xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision ed84ef1c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/sunrpc/rpc_rdma_cid.h>
15 #include <linux/tracepoint.h>
16 #include <rdma/ib_cm.h>
17 #include <trace/events/rdma.h>
18 
19 /**
20  ** Event classes
21  **/
22 
23 DECLARE_EVENT_CLASS(rpcrdma_completion_class,
24 	TP_PROTO(
25 		const struct ib_wc *wc,
26 		const struct rpc_rdma_cid *cid
27 	),
28 
29 	TP_ARGS(wc, cid),
30 
31 	TP_STRUCT__entry(
32 		__field(u32, cq_id)
33 		__field(int, completion_id)
34 		__field(unsigned long, status)
35 		__field(unsigned int, vendor_err)
36 	),
37 
38 	TP_fast_assign(
39 		__entry->cq_id = cid->ci_queue_id;
40 		__entry->completion_id = cid->ci_completion_id;
41 		__entry->status = wc->status;
42 		if (wc->status)
43 			__entry->vendor_err = wc->vendor_err;
44 		else
45 			__entry->vendor_err = 0;
46 	),
47 
48 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
49 		__entry->cq_id, __entry->completion_id,
50 		rdma_show_wc_status(__entry->status),
51 		__entry->status, __entry->vendor_err
52 	)
53 );
54 
55 #define DEFINE_COMPLETION_EVENT(name)					\
56 		DEFINE_EVENT(rpcrdma_completion_class, name,		\
57 				TP_PROTO(				\
58 					const struct ib_wc *wc,		\
59 					const struct rpc_rdma_cid *cid	\
60 				),					\
61 				TP_ARGS(wc, cid))
62 
63 DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
64 	TP_PROTO(
65 		const struct ib_wc *wc,
66 		const struct rpc_rdma_cid *cid
67 	),
68 
69 	TP_ARGS(wc, cid),
70 
71 	TP_STRUCT__entry(
72 		__field(u32, cq_id)
73 		__field(int, completion_id)
74 		__field(unsigned long, status)
75 		__field(unsigned int, vendor_err)
76 	),
77 
78 	TP_fast_assign(
79 		__entry->cq_id = cid->ci_queue_id;
80 		__entry->completion_id = cid->ci_completion_id;
81 		__entry->status = wc->status;
82 		if (wc->status)
83 			__entry->vendor_err = wc->vendor_err;
84 		else
85 			__entry->vendor_err = 0;
86 	),
87 
88 	TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
89 		__entry->cq_id, __entry->completion_id,
90 		rdma_show_wc_status(__entry->status),
91 		__entry->status, __entry->vendor_err
92 	)
93 );
94 
95 #define DEFINE_MR_COMPLETION_EVENT(name)				\
96 		DEFINE_EVENT(rpcrdma_mr_completion_class, name,		\
97 				TP_PROTO(				\
98 					const struct ib_wc *wc,		\
99 					const struct rpc_rdma_cid *cid	\
100 				),					\
101 				TP_ARGS(wc, cid))
102 
103 DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
104 	TP_PROTO(
105 		const struct ib_wc *wc,
106 		const struct rpc_rdma_cid *cid
107 	),
108 
109 	TP_ARGS(wc, cid),
110 
111 	TP_STRUCT__entry(
112 		__field(u32, cq_id)
113 		__field(int, completion_id)
114 		__field(u32, received)
115 		__field(unsigned long, status)
116 		__field(unsigned int, vendor_err)
117 	),
118 
119 	TP_fast_assign(
120 		__entry->cq_id = cid->ci_queue_id;
121 		__entry->completion_id = cid->ci_completion_id;
122 		__entry->status = wc->status;
123 		if (wc->status) {
124 			__entry->received = 0;
125 			__entry->vendor_err = wc->vendor_err;
126 		} else {
127 			__entry->received = wc->byte_len;
128 			__entry->vendor_err = 0;
129 		}
130 	),
131 
132 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
133 		__entry->cq_id, __entry->completion_id,
134 		rdma_show_wc_status(__entry->status),
135 		__entry->status, __entry->vendor_err,
136 		__entry->received
137 	)
138 );
139 
140 #define DEFINE_RECEIVE_COMPLETION_EVENT(name)				\
141 		DEFINE_EVENT(rpcrdma_receive_completion_class, name,	\
142 				TP_PROTO(				\
143 					const struct ib_wc *wc,		\
144 					const struct rpc_rdma_cid *cid	\
145 				),					\
146 				TP_ARGS(wc, cid))
147 
148 DECLARE_EVENT_CLASS(xprtrdma_reply_class,
149 	TP_PROTO(
150 		const struct rpcrdma_rep *rep
151 	),
152 
153 	TP_ARGS(rep),
154 
155 	TP_STRUCT__entry(
156 		__field(u32, xid)
157 		__field(u32, version)
158 		__field(u32, proc)
159 		__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
160 		__string(port, rpcrdma_portstr(rep->rr_rxprt))
161 	),
162 
163 	TP_fast_assign(
164 		__entry->xid = be32_to_cpu(rep->rr_xid);
165 		__entry->version = be32_to_cpu(rep->rr_vers);
166 		__entry->proc = be32_to_cpu(rep->rr_proc);
167 		__assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
168 		__assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
169 	),
170 
171 	TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
172 		__get_str(addr), __get_str(port),
173 		__entry->xid, __entry->version, __entry->proc
174 	)
175 );
176 
177 #define DEFINE_REPLY_EVENT(name)					\
178 		DEFINE_EVENT(xprtrdma_reply_class,			\
179 				xprtrdma_reply_##name##_err,		\
180 				TP_PROTO(				\
181 					const struct rpcrdma_rep *rep	\
182 				),					\
183 				TP_ARGS(rep))
184 
185 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
186 	TP_PROTO(
187 		const struct rpcrdma_xprt *r_xprt
188 	),
189 
190 	TP_ARGS(r_xprt),
191 
192 	TP_STRUCT__entry(
193 		__string(addr, rpcrdma_addrstr(r_xprt))
194 		__string(port, rpcrdma_portstr(r_xprt))
195 	),
196 
197 	TP_fast_assign(
198 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
199 		__assign_str(port, rpcrdma_portstr(r_xprt));
200 	),
201 
202 	TP_printk("peer=[%s]:%s",
203 		__get_str(addr), __get_str(port)
204 	)
205 );
206 
207 #define DEFINE_RXPRT_EVENT(name)					\
208 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
209 				TP_PROTO(				\
210 					const struct rpcrdma_xprt *r_xprt \
211 				),					\
212 				TP_ARGS(r_xprt))
213 
214 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
215 	TP_PROTO(
216 		const struct rpcrdma_xprt *r_xprt,
217 		int rc
218 	),
219 
220 	TP_ARGS(r_xprt, rc),
221 
222 	TP_STRUCT__entry(
223 		__field(int, rc)
224 		__field(int, connect_status)
225 		__string(addr, rpcrdma_addrstr(r_xprt))
226 		__string(port, rpcrdma_portstr(r_xprt))
227 	),
228 
229 	TP_fast_assign(
230 		__entry->rc = rc;
231 		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
232 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
233 		__assign_str(port, rpcrdma_portstr(r_xprt));
234 	),
235 
236 	TP_printk("peer=[%s]:%s rc=%d connection status=%d",
237 		__get_str(addr), __get_str(port),
238 		__entry->rc, __entry->connect_status
239 	)
240 );
241 
242 #define DEFINE_CONN_EVENT(name)						\
243 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
244 				TP_PROTO(				\
245 					const struct rpcrdma_xprt *r_xprt, \
246 					int rc				\
247 				),					\
248 				TP_ARGS(r_xprt, rc))
249 
250 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
251 	TP_PROTO(
252 		const struct rpc_task *task,
253 		unsigned int pos,
254 		struct rpcrdma_mr *mr,
255 		int nsegs
256 	),
257 
258 	TP_ARGS(task, pos, mr, nsegs),
259 
260 	TP_STRUCT__entry(
261 		__field(unsigned int, task_id)
262 		__field(unsigned int, client_id)
263 		__field(unsigned int, pos)
264 		__field(int, nents)
265 		__field(u32, handle)
266 		__field(u32, length)
267 		__field(u64, offset)
268 		__field(int, nsegs)
269 	),
270 
271 	TP_fast_assign(
272 		__entry->task_id = task->tk_pid;
273 		__entry->client_id = task->tk_client->cl_clid;
274 		__entry->pos = pos;
275 		__entry->nents = mr->mr_nents;
276 		__entry->handle = mr->mr_handle;
277 		__entry->length = mr->mr_length;
278 		__entry->offset = mr->mr_offset;
279 		__entry->nsegs = nsegs;
280 	),
281 
282 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
283 		__entry->task_id, __entry->client_id,
284 		__entry->pos, __entry->length,
285 		(unsigned long long)__entry->offset, __entry->handle,
286 		__entry->nents < __entry->nsegs ? "more" : "last"
287 	)
288 );
289 
290 #define DEFINE_RDCH_EVENT(name)						\
291 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
292 				TP_PROTO(				\
293 					const struct rpc_task *task,	\
294 					unsigned int pos,		\
295 					struct rpcrdma_mr *mr,		\
296 					int nsegs			\
297 				),					\
298 				TP_ARGS(task, pos, mr, nsegs))
299 
300 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
301 	TP_PROTO(
302 		const struct rpc_task *task,
303 		struct rpcrdma_mr *mr,
304 		int nsegs
305 	),
306 
307 	TP_ARGS(task, mr, nsegs),
308 
309 	TP_STRUCT__entry(
310 		__field(unsigned int, task_id)
311 		__field(unsigned int, client_id)
312 		__field(int, nents)
313 		__field(u32, handle)
314 		__field(u32, length)
315 		__field(u64, offset)
316 		__field(int, nsegs)
317 	),
318 
319 	TP_fast_assign(
320 		__entry->task_id = task->tk_pid;
321 		__entry->client_id = task->tk_client->cl_clid;
322 		__entry->nents = mr->mr_nents;
323 		__entry->handle = mr->mr_handle;
324 		__entry->length = mr->mr_length;
325 		__entry->offset = mr->mr_offset;
326 		__entry->nsegs = nsegs;
327 	),
328 
329 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
330 		__entry->task_id, __entry->client_id,
331 		__entry->length, (unsigned long long)__entry->offset,
332 		__entry->handle,
333 		__entry->nents < __entry->nsegs ? "more" : "last"
334 	)
335 );
336 
337 #define DEFINE_WRCH_EVENT(name)						\
338 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
339 				TP_PROTO(				\
340 					const struct rpc_task *task,	\
341 					struct rpcrdma_mr *mr,		\
342 					int nsegs			\
343 				),					\
344 				TP_ARGS(task, mr, nsegs))
345 
346 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
347 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
348 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
349 TRACE_DEFINE_ENUM(DMA_NONE);
350 
351 #define xprtrdma_show_direction(x)					\
352 		__print_symbolic(x,					\
353 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
354 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
355 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
356 				{ DMA_NONE, "NONE" })
357 
358 DECLARE_EVENT_CLASS(xprtrdma_mr_class,
359 	TP_PROTO(
360 		const struct rpcrdma_mr *mr
361 	),
362 
363 	TP_ARGS(mr),
364 
365 	TP_STRUCT__entry(
366 		__field(unsigned int, task_id)
367 		__field(unsigned int, client_id)
368 		__field(u32, mr_id)
369 		__field(int, nents)
370 		__field(u32, handle)
371 		__field(u32, length)
372 		__field(u64, offset)
373 		__field(u32, dir)
374 	),
375 
376 	TP_fast_assign(
377 		const struct rpcrdma_req *req = mr->mr_req;
378 		const struct rpc_task *task = req->rl_slot.rq_task;
379 
380 		__entry->task_id = task->tk_pid;
381 		__entry->client_id = task->tk_client->cl_clid;
382 		__entry->mr_id  = mr->mr_ibmr->res.id;
383 		__entry->nents  = mr->mr_nents;
384 		__entry->handle = mr->mr_handle;
385 		__entry->length = mr->mr_length;
386 		__entry->offset = mr->mr_offset;
387 		__entry->dir    = mr->mr_dir;
388 	),
389 
390 	TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
391 		__entry->task_id, __entry->client_id,
392 		__entry->mr_id, __entry->nents, __entry->length,
393 		(unsigned long long)__entry->offset, __entry->handle,
394 		xprtrdma_show_direction(__entry->dir)
395 	)
396 );
397 
398 #define DEFINE_MR_EVENT(name)						\
399 		DEFINE_EVENT(xprtrdma_mr_class,				\
400 				xprtrdma_mr_##name,			\
401 				TP_PROTO(				\
402 					const struct rpcrdma_mr *mr	\
403 				),					\
404 				TP_ARGS(mr))
405 
406 DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
407 	TP_PROTO(
408 		const struct rpcrdma_mr *mr
409 	),
410 
411 	TP_ARGS(mr),
412 
413 	TP_STRUCT__entry(
414 		__field(u32, mr_id)
415 		__field(int, nents)
416 		__field(u32, handle)
417 		__field(u32, length)
418 		__field(u64, offset)
419 		__field(u32, dir)
420 	),
421 
422 	TP_fast_assign(
423 		__entry->mr_id  = mr->mr_ibmr->res.id;
424 		__entry->nents  = mr->mr_nents;
425 		__entry->handle = mr->mr_handle;
426 		__entry->length = mr->mr_length;
427 		__entry->offset = mr->mr_offset;
428 		__entry->dir    = mr->mr_dir;
429 	),
430 
431 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
432 		__entry->mr_id, __entry->nents, __entry->length,
433 		(unsigned long long)__entry->offset, __entry->handle,
434 		xprtrdma_show_direction(__entry->dir)
435 	)
436 );
437 
438 #define DEFINE_ANON_MR_EVENT(name)					\
439 		DEFINE_EVENT(xprtrdma_anonymous_mr_class,		\
440 				xprtrdma_mr_##name,			\
441 				TP_PROTO(				\
442 					const struct rpcrdma_mr *mr	\
443 				),					\
444 				TP_ARGS(mr))
445 
446 DECLARE_EVENT_CLASS(xprtrdma_callback_class,
447 	TP_PROTO(
448 		const struct rpcrdma_xprt *r_xprt,
449 		const struct rpc_rqst *rqst
450 	),
451 
452 	TP_ARGS(r_xprt, rqst),
453 
454 	TP_STRUCT__entry(
455 		__field(u32, xid)
456 		__string(addr, rpcrdma_addrstr(r_xprt))
457 		__string(port, rpcrdma_portstr(r_xprt))
458 	),
459 
460 	TP_fast_assign(
461 		__entry->xid = be32_to_cpu(rqst->rq_xid);
462 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
463 		__assign_str(port, rpcrdma_portstr(r_xprt));
464 	),
465 
466 	TP_printk("peer=[%s]:%s xid=0x%08x",
467 		__get_str(addr), __get_str(port), __entry->xid
468 	)
469 );
470 
471 #define DEFINE_CALLBACK_EVENT(name)					\
472 		DEFINE_EVENT(xprtrdma_callback_class,			\
473 				xprtrdma_cb_##name,			\
474 				TP_PROTO(				\
475 					const struct rpcrdma_xprt *r_xprt, \
476 					const struct rpc_rqst *rqst	\
477 				),					\
478 				TP_ARGS(r_xprt, rqst))
479 
480 /**
481  ** Connection events
482  **/
483 
484 TRACE_EVENT(xprtrdma_inline_thresh,
485 	TP_PROTO(
486 		const struct rpcrdma_ep *ep
487 	),
488 
489 	TP_ARGS(ep),
490 
491 	TP_STRUCT__entry(
492 		__field(unsigned int, inline_send)
493 		__field(unsigned int, inline_recv)
494 		__field(unsigned int, max_send)
495 		__field(unsigned int, max_recv)
496 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
497 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
498 	),
499 
500 	TP_fast_assign(
501 		const struct rdma_cm_id *id = ep->re_id;
502 
503 		__entry->inline_send = ep->re_inline_send;
504 		__entry->inline_recv = ep->re_inline_recv;
505 		__entry->max_send = ep->re_max_inline_send;
506 		__entry->max_recv = ep->re_max_inline_recv;
507 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
508 		       sizeof(struct sockaddr_in6));
509 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
510 		       sizeof(struct sockaddr_in6));
511 	),
512 
513 	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
514 		__entry->srcaddr, __entry->dstaddr,
515 		__entry->inline_send, __entry->inline_recv,
516 		__entry->max_send, __entry->max_recv
517 	)
518 );
519 
520 DEFINE_CONN_EVENT(connect);
521 DEFINE_CONN_EVENT(disconnect);
522 
523 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
524 
525 TRACE_EVENT(xprtrdma_op_connect,
526 	TP_PROTO(
527 		const struct rpcrdma_xprt *r_xprt,
528 		unsigned long delay
529 	),
530 
531 	TP_ARGS(r_xprt, delay),
532 
533 	TP_STRUCT__entry(
534 		__field(unsigned long, delay)
535 		__string(addr, rpcrdma_addrstr(r_xprt))
536 		__string(port, rpcrdma_portstr(r_xprt))
537 	),
538 
539 	TP_fast_assign(
540 		__entry->delay = delay;
541 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
542 		__assign_str(port, rpcrdma_portstr(r_xprt));
543 	),
544 
545 	TP_printk("peer=[%s]:%s delay=%lu",
546 		__get_str(addr), __get_str(port), __entry->delay
547 	)
548 );
549 
550 
551 TRACE_EVENT(xprtrdma_op_set_cto,
552 	TP_PROTO(
553 		const struct rpcrdma_xprt *r_xprt,
554 		unsigned long connect,
555 		unsigned long reconnect
556 	),
557 
558 	TP_ARGS(r_xprt, connect, reconnect),
559 
560 	TP_STRUCT__entry(
561 		__field(unsigned long, connect)
562 		__field(unsigned long, reconnect)
563 		__string(addr, rpcrdma_addrstr(r_xprt))
564 		__string(port, rpcrdma_portstr(r_xprt))
565 	),
566 
567 	TP_fast_assign(
568 		__entry->connect = connect;
569 		__entry->reconnect = reconnect;
570 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
571 		__assign_str(port, rpcrdma_portstr(r_xprt));
572 	),
573 
574 	TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
575 		__get_str(addr), __get_str(port),
576 		__entry->connect / HZ, __entry->reconnect / HZ
577 	)
578 );
579 
580 /**
581  ** Call events
582  **/
583 
584 TRACE_EVENT(xprtrdma_createmrs,
585 	TP_PROTO(
586 		const struct rpcrdma_xprt *r_xprt,
587 		unsigned int count
588 	),
589 
590 	TP_ARGS(r_xprt, count),
591 
592 	TP_STRUCT__entry(
593 		__string(addr, rpcrdma_addrstr(r_xprt))
594 		__string(port, rpcrdma_portstr(r_xprt))
595 		__field(unsigned int, count)
596 	),
597 
598 	TP_fast_assign(
599 		__entry->count = count;
600 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
601 		__assign_str(port, rpcrdma_portstr(r_xprt));
602 	),
603 
604 	TP_printk("peer=[%s]:%s created %u MRs",
605 		__get_str(addr), __get_str(port), __entry->count
606 	)
607 );
608 
609 TRACE_EVENT(xprtrdma_nomrs_err,
610 	TP_PROTO(
611 		const struct rpcrdma_xprt *r_xprt,
612 		const struct rpcrdma_req *req
613 	),
614 
615 	TP_ARGS(r_xprt, req),
616 
617 	TP_STRUCT__entry(
618 		__field(unsigned int, task_id)
619 		__field(unsigned int, client_id)
620 		__string(addr, rpcrdma_addrstr(r_xprt))
621 		__string(port, rpcrdma_portstr(r_xprt))
622 	),
623 
624 	TP_fast_assign(
625 		const struct rpc_rqst *rqst = &req->rl_slot;
626 
627 		__entry->task_id = rqst->rq_task->tk_pid;
628 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
629 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
630 		__assign_str(port, rpcrdma_portstr(r_xprt));
631 	),
632 
633 	TP_printk("peer=[%s]:%s task:%u@%u",
634 		__get_str(addr), __get_str(port),
635 		__entry->task_id, __entry->client_id
636 	)
637 );
638 
639 DEFINE_RDCH_EVENT(read);
640 DEFINE_WRCH_EVENT(write);
641 DEFINE_WRCH_EVENT(reply);
642 
643 TRACE_DEFINE_ENUM(rpcrdma_noch);
644 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
645 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
646 TRACE_DEFINE_ENUM(rpcrdma_readch);
647 TRACE_DEFINE_ENUM(rpcrdma_areadch);
648 TRACE_DEFINE_ENUM(rpcrdma_writech);
649 TRACE_DEFINE_ENUM(rpcrdma_replych);
650 
651 #define xprtrdma_show_chunktype(x)					\
652 		__print_symbolic(x,					\
653 				{ rpcrdma_noch, "inline" },		\
654 				{ rpcrdma_noch_pullup, "pullup" },	\
655 				{ rpcrdma_noch_mapped, "mapped" },	\
656 				{ rpcrdma_readch, "read list" },	\
657 				{ rpcrdma_areadch, "*read list" },	\
658 				{ rpcrdma_writech, "write list" },	\
659 				{ rpcrdma_replych, "reply chunk" })
660 
661 TRACE_EVENT(xprtrdma_marshal,
662 	TP_PROTO(
663 		const struct rpcrdma_req *req,
664 		unsigned int rtype,
665 		unsigned int wtype
666 	),
667 
668 	TP_ARGS(req, rtype, wtype),
669 
670 	TP_STRUCT__entry(
671 		__field(unsigned int, task_id)
672 		__field(unsigned int, client_id)
673 		__field(u32, xid)
674 		__field(unsigned int, hdrlen)
675 		__field(unsigned int, headlen)
676 		__field(unsigned int, pagelen)
677 		__field(unsigned int, taillen)
678 		__field(unsigned int, rtype)
679 		__field(unsigned int, wtype)
680 	),
681 
682 	TP_fast_assign(
683 		const struct rpc_rqst *rqst = &req->rl_slot;
684 
685 		__entry->task_id = rqst->rq_task->tk_pid;
686 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
687 		__entry->xid = be32_to_cpu(rqst->rq_xid);
688 		__entry->hdrlen = req->rl_hdrbuf.len;
689 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
690 		__entry->pagelen = rqst->rq_snd_buf.page_len;
691 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
692 		__entry->rtype = rtype;
693 		__entry->wtype = wtype;
694 	),
695 
696 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
697 		__entry->task_id, __entry->client_id, __entry->xid,
698 		__entry->hdrlen,
699 		__entry->headlen, __entry->pagelen, __entry->taillen,
700 		xprtrdma_show_chunktype(__entry->rtype),
701 		xprtrdma_show_chunktype(__entry->wtype)
702 	)
703 );
704 
705 TRACE_EVENT(xprtrdma_marshal_failed,
706 	TP_PROTO(const struct rpc_rqst *rqst,
707 		 int ret
708 	),
709 
710 	TP_ARGS(rqst, ret),
711 
712 	TP_STRUCT__entry(
713 		__field(unsigned int, task_id)
714 		__field(unsigned int, client_id)
715 		__field(u32, xid)
716 		__field(int, ret)
717 	),
718 
719 	TP_fast_assign(
720 		__entry->task_id = rqst->rq_task->tk_pid;
721 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
722 		__entry->xid = be32_to_cpu(rqst->rq_xid);
723 		__entry->ret = ret;
724 	),
725 
726 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
727 		__entry->task_id, __entry->client_id, __entry->xid,
728 		__entry->ret
729 	)
730 );
731 
732 TRACE_EVENT(xprtrdma_prepsend_failed,
733 	TP_PROTO(const struct rpc_rqst *rqst,
734 		 int ret
735 	),
736 
737 	TP_ARGS(rqst, ret),
738 
739 	TP_STRUCT__entry(
740 		__field(unsigned int, task_id)
741 		__field(unsigned int, client_id)
742 		__field(u32, xid)
743 		__field(int, ret)
744 	),
745 
746 	TP_fast_assign(
747 		__entry->task_id = rqst->rq_task->tk_pid;
748 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
749 		__entry->xid = be32_to_cpu(rqst->rq_xid);
750 		__entry->ret = ret;
751 	),
752 
753 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
754 		__entry->task_id, __entry->client_id, __entry->xid,
755 		__entry->ret
756 	)
757 );
758 
759 TRACE_EVENT(xprtrdma_post_send,
760 	TP_PROTO(
761 		const struct rpcrdma_req *req
762 	),
763 
764 	TP_ARGS(req),
765 
766 	TP_STRUCT__entry(
767 		__field(u32, cq_id)
768 		__field(int, completion_id)
769 		__field(unsigned int, task_id)
770 		__field(unsigned int, client_id)
771 		__field(int, num_sge)
772 		__field(int, signaled)
773 	),
774 
775 	TP_fast_assign(
776 		const struct rpc_rqst *rqst = &req->rl_slot;
777 		const struct rpcrdma_sendctx *sc = req->rl_sendctx;
778 
779 		__entry->cq_id = sc->sc_cid.ci_queue_id;
780 		__entry->completion_id = sc->sc_cid.ci_completion_id;
781 		__entry->task_id = rqst->rq_task->tk_pid;
782 		__entry->client_id = rqst->rq_task->tk_client ?
783 				     rqst->rq_task->tk_client->cl_clid : -1;
784 		__entry->num_sge = req->rl_wr.num_sge;
785 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
786 	),
787 
788 	TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
789 		__entry->task_id, __entry->client_id,
790 		__entry->cq_id, __entry->completion_id,
791 		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
792 		(__entry->signaled ? "signaled" : "")
793 	)
794 );
795 
796 TRACE_EVENT(xprtrdma_post_send_err,
797 	TP_PROTO(
798 		const struct rpcrdma_xprt *r_xprt,
799 		const struct rpcrdma_req *req,
800 		int rc
801 	),
802 
803 	TP_ARGS(r_xprt, req, rc),
804 
805 	TP_STRUCT__entry(
806 		__field(u32, cq_id)
807 		__field(unsigned int, task_id)
808 		__field(unsigned int, client_id)
809 		__field(int, rc)
810 	),
811 
812 	TP_fast_assign(
813 		const struct rpc_rqst *rqst = &req->rl_slot;
814 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
815 
816 		__entry->cq_id = ep ? ep->re_attr.recv_cq->res.id : 0;
817 		__entry->task_id = rqst->rq_task->tk_pid;
818 		__entry->client_id = rqst->rq_task->tk_client ?
819 				     rqst->rq_task->tk_client->cl_clid : -1;
820 		__entry->rc = rc;
821 	),
822 
823 	TP_printk("task:%u@%u cq.id=%u rc=%d",
824 		__entry->task_id, __entry->client_id,
825 		__entry->cq_id, __entry->rc
826 	)
827 );
828 
829 TRACE_EVENT(xprtrdma_post_recv,
830 	TP_PROTO(
831 		const struct rpcrdma_rep *rep
832 	),
833 
834 	TP_ARGS(rep),
835 
836 	TP_STRUCT__entry(
837 		__field(u32, cq_id)
838 		__field(int, completion_id)
839 	),
840 
841 	TP_fast_assign(
842 		__entry->cq_id = rep->rr_cid.ci_queue_id;
843 		__entry->completion_id = rep->rr_cid.ci_completion_id;
844 	),
845 
846 	TP_printk("cq.id=%d cid=%d",
847 		__entry->cq_id, __entry->completion_id
848 	)
849 );
850 
851 TRACE_EVENT(xprtrdma_post_recvs,
852 	TP_PROTO(
853 		const struct rpcrdma_xprt *r_xprt,
854 		unsigned int count
855 	),
856 
857 	TP_ARGS(r_xprt, count),
858 
859 	TP_STRUCT__entry(
860 		__field(u32, cq_id)
861 		__field(unsigned int, count)
862 		__field(int, posted)
863 		__string(addr, rpcrdma_addrstr(r_xprt))
864 		__string(port, rpcrdma_portstr(r_xprt))
865 	),
866 
867 	TP_fast_assign(
868 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
869 
870 		__entry->cq_id = ep->re_attr.recv_cq->res.id;
871 		__entry->count = count;
872 		__entry->posted = ep->re_receive_count;
873 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
874 		__assign_str(port, rpcrdma_portstr(r_xprt));
875 	),
876 
877 	TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active",
878 		__get_str(addr), __get_str(port), __entry->cq_id,
879 		__entry->count, __entry->posted
880 	)
881 );
882 
883 TRACE_EVENT(xprtrdma_post_recvs_err,
884 	TP_PROTO(
885 		const struct rpcrdma_xprt *r_xprt,
886 		int status
887 	),
888 
889 	TP_ARGS(r_xprt, status),
890 
891 	TP_STRUCT__entry(
892 		__field(u32, cq_id)
893 		__field(int, status)
894 		__string(addr, rpcrdma_addrstr(r_xprt))
895 		__string(port, rpcrdma_portstr(r_xprt))
896 	),
897 
898 	TP_fast_assign(
899 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
900 
901 		__entry->cq_id = ep->re_attr.recv_cq->res.id;
902 		__entry->status = status;
903 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
904 		__assign_str(port, rpcrdma_portstr(r_xprt));
905 	),
906 
907 	TP_printk("peer=[%s]:%s cq.id=%d rc=%d",
908 		__get_str(addr), __get_str(port), __entry->cq_id,
909 		__entry->status
910 	)
911 );
912 
913 TRACE_EVENT(xprtrdma_post_linv_err,
914 	TP_PROTO(
915 		const struct rpcrdma_req *req,
916 		int status
917 	),
918 
919 	TP_ARGS(req, status),
920 
921 	TP_STRUCT__entry(
922 		__field(unsigned int, task_id)
923 		__field(unsigned int, client_id)
924 		__field(int, status)
925 	),
926 
927 	TP_fast_assign(
928 		const struct rpc_task *task = req->rl_slot.rq_task;
929 
930 		__entry->task_id = task->tk_pid;
931 		__entry->client_id = task->tk_client->cl_clid;
932 		__entry->status = status;
933 	),
934 
935 	TP_printk("task:%u@%u status=%d",
936 		__entry->task_id, __entry->client_id, __entry->status
937 	)
938 );
939 
940 /**
941  ** Completion events
942  **/
943 
944 DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
945 
946 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
947 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
948 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
949 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
950 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
951 
952 TRACE_EVENT(xprtrdma_frwr_alloc,
953 	TP_PROTO(
954 		const struct rpcrdma_mr *mr,
955 		int rc
956 	),
957 
958 	TP_ARGS(mr, rc),
959 
960 	TP_STRUCT__entry(
961 		__field(u32, mr_id)
962 		__field(int, rc)
963 	),
964 
965 	TP_fast_assign(
966 		__entry->mr_id = mr->mr_ibmr->res.id;
967 		__entry->rc = rc;
968 	),
969 
970 	TP_printk("mr.id=%u: rc=%d",
971 		__entry->mr_id, __entry->rc
972 	)
973 );
974 
975 TRACE_EVENT(xprtrdma_frwr_dereg,
976 	TP_PROTO(
977 		const struct rpcrdma_mr *mr,
978 		int rc
979 	),
980 
981 	TP_ARGS(mr, rc),
982 
983 	TP_STRUCT__entry(
984 		__field(u32, mr_id)
985 		__field(int, nents)
986 		__field(u32, handle)
987 		__field(u32, length)
988 		__field(u64, offset)
989 		__field(u32, dir)
990 		__field(int, rc)
991 	),
992 
993 	TP_fast_assign(
994 		__entry->mr_id  = mr->mr_ibmr->res.id;
995 		__entry->nents  = mr->mr_nents;
996 		__entry->handle = mr->mr_handle;
997 		__entry->length = mr->mr_length;
998 		__entry->offset = mr->mr_offset;
999 		__entry->dir    = mr->mr_dir;
1000 		__entry->rc	= rc;
1001 	),
1002 
1003 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
1004 		__entry->mr_id, __entry->nents, __entry->length,
1005 		(unsigned long long)__entry->offset, __entry->handle,
1006 		xprtrdma_show_direction(__entry->dir),
1007 		__entry->rc
1008 	)
1009 );
1010 
1011 TRACE_EVENT(xprtrdma_frwr_sgerr,
1012 	TP_PROTO(
1013 		const struct rpcrdma_mr *mr,
1014 		int sg_nents
1015 	),
1016 
1017 	TP_ARGS(mr, sg_nents),
1018 
1019 	TP_STRUCT__entry(
1020 		__field(u32, mr_id)
1021 		__field(u64, addr)
1022 		__field(u32, dir)
1023 		__field(int, nents)
1024 	),
1025 
1026 	TP_fast_assign(
1027 		__entry->mr_id = mr->mr_ibmr->res.id;
1028 		__entry->addr = mr->mr_sg->dma_address;
1029 		__entry->dir = mr->mr_dir;
1030 		__entry->nents = sg_nents;
1031 	),
1032 
1033 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
1034 		__entry->mr_id, __entry->addr,
1035 		xprtrdma_show_direction(__entry->dir),
1036 		__entry->nents
1037 	)
1038 );
1039 
1040 TRACE_EVENT(xprtrdma_frwr_maperr,
1041 	TP_PROTO(
1042 		const struct rpcrdma_mr *mr,
1043 		int num_mapped
1044 	),
1045 
1046 	TP_ARGS(mr, num_mapped),
1047 
1048 	TP_STRUCT__entry(
1049 		__field(u32, mr_id)
1050 		__field(u64, addr)
1051 		__field(u32, dir)
1052 		__field(int, num_mapped)
1053 		__field(int, nents)
1054 	),
1055 
1056 	TP_fast_assign(
1057 		__entry->mr_id = mr->mr_ibmr->res.id;
1058 		__entry->addr = mr->mr_sg->dma_address;
1059 		__entry->dir = mr->mr_dir;
1060 		__entry->num_mapped = num_mapped;
1061 		__entry->nents = mr->mr_nents;
1062 	),
1063 
1064 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
1065 		__entry->mr_id, __entry->addr,
1066 		xprtrdma_show_direction(__entry->dir),
1067 		__entry->num_mapped, __entry->nents
1068 	)
1069 );
1070 
1071 DEFINE_MR_EVENT(fastreg);
1072 DEFINE_MR_EVENT(localinv);
1073 DEFINE_MR_EVENT(reminv);
1074 DEFINE_MR_EVENT(map);
1075 
1076 DEFINE_ANON_MR_EVENT(unmap);
1077 
1078 TRACE_EVENT(xprtrdma_dma_maperr,
1079 	TP_PROTO(
1080 		u64 addr
1081 	),
1082 
1083 	TP_ARGS(addr),
1084 
1085 	TP_STRUCT__entry(
1086 		__field(u64, addr)
1087 	),
1088 
1089 	TP_fast_assign(
1090 		__entry->addr = addr;
1091 	),
1092 
1093 	TP_printk("dma addr=0x%llx\n", __entry->addr)
1094 );
1095 
1096 /**
1097  ** Reply events
1098  **/
1099 
1100 TRACE_EVENT(xprtrdma_reply,
1101 	TP_PROTO(
1102 		const struct rpc_task *task,
1103 		const struct rpcrdma_rep *rep,
1104 		unsigned int credits
1105 	),
1106 
1107 	TP_ARGS(task, rep, credits),
1108 
1109 	TP_STRUCT__entry(
1110 		__field(unsigned int, task_id)
1111 		__field(unsigned int, client_id)
1112 		__field(u32, xid)
1113 		__field(unsigned int, credits)
1114 	),
1115 
1116 	TP_fast_assign(
1117 		__entry->task_id = task->tk_pid;
1118 		__entry->client_id = task->tk_client->cl_clid;
1119 		__entry->xid = be32_to_cpu(rep->rr_xid);
1120 		__entry->credits = credits;
1121 	),
1122 
1123 	TP_printk("task:%u@%u xid=0x%08x credits=%u",
1124 		__entry->task_id, __entry->client_id, __entry->xid,
1125 		__entry->credits
1126 	)
1127 );
1128 
1129 DEFINE_REPLY_EVENT(vers);
1130 DEFINE_REPLY_EVENT(rqst);
1131 DEFINE_REPLY_EVENT(short);
1132 DEFINE_REPLY_EVENT(hdr);
1133 
1134 TRACE_EVENT(xprtrdma_err_vers,
1135 	TP_PROTO(
1136 		const struct rpc_rqst *rqst,
1137 		__be32 *min,
1138 		__be32 *max
1139 	),
1140 
1141 	TP_ARGS(rqst, min, max),
1142 
1143 	TP_STRUCT__entry(
1144 		__field(unsigned int, task_id)
1145 		__field(unsigned int, client_id)
1146 		__field(u32, xid)
1147 		__field(u32, min)
1148 		__field(u32, max)
1149 	),
1150 
1151 	TP_fast_assign(
1152 		__entry->task_id = rqst->rq_task->tk_pid;
1153 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1154 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1155 		__entry->min = be32_to_cpup(min);
1156 		__entry->max = be32_to_cpup(max);
1157 	),
1158 
1159 	TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
1160 		__entry->task_id, __entry->client_id, __entry->xid,
1161 		__entry->min, __entry->max
1162 	)
1163 );
1164 
1165 TRACE_EVENT(xprtrdma_err_chunk,
1166 	TP_PROTO(
1167 		const struct rpc_rqst *rqst
1168 	),
1169 
1170 	TP_ARGS(rqst),
1171 
1172 	TP_STRUCT__entry(
1173 		__field(unsigned int, task_id)
1174 		__field(unsigned int, client_id)
1175 		__field(u32, xid)
1176 	),
1177 
1178 	TP_fast_assign(
1179 		__entry->task_id = rqst->rq_task->tk_pid;
1180 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1181 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1182 	),
1183 
1184 	TP_printk("task:%u@%u xid=0x%08x",
1185 		__entry->task_id, __entry->client_id, __entry->xid
1186 	)
1187 );
1188 
1189 TRACE_EVENT(xprtrdma_err_unrecognized,
1190 	TP_PROTO(
1191 		const struct rpc_rqst *rqst,
1192 		__be32 *procedure
1193 	),
1194 
1195 	TP_ARGS(rqst, procedure),
1196 
1197 	TP_STRUCT__entry(
1198 		__field(unsigned int, task_id)
1199 		__field(unsigned int, client_id)
1200 		__field(u32, xid)
1201 		__field(u32, procedure)
1202 	),
1203 
1204 	TP_fast_assign(
1205 		__entry->task_id = rqst->rq_task->tk_pid;
1206 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1207 		__entry->procedure = be32_to_cpup(procedure);
1208 	),
1209 
1210 	TP_printk("task:%u@%u xid=0x%08x procedure=%u",
1211 		__entry->task_id, __entry->client_id, __entry->xid,
1212 		__entry->procedure
1213 	)
1214 );
1215 
1216 TRACE_EVENT(xprtrdma_fixup,
1217 	TP_PROTO(
1218 		const struct rpc_rqst *rqst,
1219 		unsigned long fixup
1220 	),
1221 
1222 	TP_ARGS(rqst, fixup),
1223 
1224 	TP_STRUCT__entry(
1225 		__field(unsigned int, task_id)
1226 		__field(unsigned int, client_id)
1227 		__field(unsigned long, fixup)
1228 		__field(size_t, headlen)
1229 		__field(unsigned int, pagelen)
1230 		__field(size_t, taillen)
1231 	),
1232 
1233 	TP_fast_assign(
1234 		__entry->task_id = rqst->rq_task->tk_pid;
1235 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1236 		__entry->fixup = fixup;
1237 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1238 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1239 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1240 	),
1241 
1242 	TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1243 		__entry->task_id, __entry->client_id, __entry->fixup,
1244 		__entry->headlen, __entry->pagelen, __entry->taillen
1245 	)
1246 );
1247 
1248 TRACE_EVENT(xprtrdma_decode_seg,
1249 	TP_PROTO(
1250 		u32 handle,
1251 		u32 length,
1252 		u64 offset
1253 	),
1254 
1255 	TP_ARGS(handle, length, offset),
1256 
1257 	TP_STRUCT__entry(
1258 		__field(u32, handle)
1259 		__field(u32, length)
1260 		__field(u64, offset)
1261 	),
1262 
1263 	TP_fast_assign(
1264 		__entry->handle = handle;
1265 		__entry->length = length;
1266 		__entry->offset = offset;
1267 	),
1268 
1269 	TP_printk("%u@0x%016llx:0x%08x",
1270 		__entry->length, (unsigned long long)__entry->offset,
1271 		__entry->handle
1272 	)
1273 );
1274 
1275 TRACE_EVENT(xprtrdma_mrs_zap,
1276 	TP_PROTO(
1277 		const struct rpc_task *task
1278 	),
1279 
1280 	TP_ARGS(task),
1281 
1282 	TP_STRUCT__entry(
1283 		__field(unsigned int, task_id)
1284 		__field(unsigned int, client_id)
1285 	),
1286 
1287 	TP_fast_assign(
1288 		__entry->task_id = task->tk_pid;
1289 		__entry->client_id = task->tk_client->cl_clid;
1290 	),
1291 
1292 	TP_printk("task:%u@%u",
1293 		__entry->task_id, __entry->client_id
1294 	)
1295 );
1296 
1297 /**
1298  ** Callback events
1299  **/
1300 
1301 TRACE_EVENT(xprtrdma_cb_setup,
1302 	TP_PROTO(
1303 		const struct rpcrdma_xprt *r_xprt,
1304 		unsigned int reqs
1305 	),
1306 
1307 	TP_ARGS(r_xprt, reqs),
1308 
1309 	TP_STRUCT__entry(
1310 		__field(unsigned int, reqs)
1311 		__string(addr, rpcrdma_addrstr(r_xprt))
1312 		__string(port, rpcrdma_portstr(r_xprt))
1313 	),
1314 
1315 	TP_fast_assign(
1316 		__entry->reqs = reqs;
1317 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1318 		__assign_str(port, rpcrdma_portstr(r_xprt));
1319 	),
1320 
1321 	TP_printk("peer=[%s]:%s %u reqs",
1322 		__get_str(addr), __get_str(port), __entry->reqs
1323 	)
1324 );
1325 
1326 DEFINE_CALLBACK_EVENT(call);
1327 DEFINE_CALLBACK_EVENT(reply);
1328 
1329 /**
1330  ** Server-side RPC/RDMA events
1331  **/
1332 
1333 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1334 	TP_PROTO(
1335 		const struct svcxprt_rdma *rdma,
1336 		long status
1337 	),
1338 
1339 	TP_ARGS(rdma, status),
1340 
1341 	TP_STRUCT__entry(
1342 		__field(long, status)
1343 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1344 	),
1345 
1346 	TP_fast_assign(
1347 		__entry->status = status;
1348 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1349 	),
1350 
1351 	TP_printk("addr=%s status=%ld",
1352 		__get_str(addr), __entry->status
1353 	)
1354 );
1355 
1356 #define DEFINE_ACCEPT_EVENT(name) \
1357 		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1358 				TP_PROTO( \
1359 					const struct svcxprt_rdma *rdma, \
1360 					long status \
1361 				), \
1362 				TP_ARGS(rdma, status))
1363 
1364 DEFINE_ACCEPT_EVENT(pd);
1365 DEFINE_ACCEPT_EVENT(qp);
1366 DEFINE_ACCEPT_EVENT(fabric);
1367 DEFINE_ACCEPT_EVENT(initdepth);
1368 DEFINE_ACCEPT_EVENT(accept);
1369 
1370 TRACE_DEFINE_ENUM(RDMA_MSG);
1371 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1372 TRACE_DEFINE_ENUM(RDMA_MSGP);
1373 TRACE_DEFINE_ENUM(RDMA_DONE);
1374 TRACE_DEFINE_ENUM(RDMA_ERROR);
1375 
1376 #define show_rpcrdma_proc(x)						\
1377 		__print_symbolic(x,					\
1378 				{ RDMA_MSG, "RDMA_MSG" },		\
1379 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1380 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1381 				{ RDMA_DONE, "RDMA_DONE" },		\
1382 				{ RDMA_ERROR, "RDMA_ERROR" })
1383 
1384 TRACE_EVENT(svcrdma_decode_rqst,
1385 	TP_PROTO(
1386 		const struct svc_rdma_recv_ctxt *ctxt,
1387 		__be32 *p,
1388 		unsigned int hdrlen
1389 	),
1390 
1391 	TP_ARGS(ctxt, p, hdrlen),
1392 
1393 	TP_STRUCT__entry(
1394 		__field(u32, cq_id)
1395 		__field(int, completion_id)
1396 		__field(u32, xid)
1397 		__field(u32, vers)
1398 		__field(u32, proc)
1399 		__field(u32, credits)
1400 		__field(unsigned int, hdrlen)
1401 	),
1402 
1403 	TP_fast_assign(
1404 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1405 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1406 		__entry->xid = be32_to_cpup(p++);
1407 		__entry->vers = be32_to_cpup(p++);
1408 		__entry->credits = be32_to_cpup(p++);
1409 		__entry->proc = be32_to_cpup(p);
1410 		__entry->hdrlen = hdrlen;
1411 	),
1412 
1413 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1414 		__entry->cq_id, __entry->completion_id,
1415 		__entry->xid, __entry->vers, __entry->credits,
1416 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1417 );
1418 
1419 TRACE_EVENT(svcrdma_decode_short_err,
1420 	TP_PROTO(
1421 		const struct svc_rdma_recv_ctxt *ctxt,
1422 		unsigned int hdrlen
1423 	),
1424 
1425 	TP_ARGS(ctxt, hdrlen),
1426 
1427 	TP_STRUCT__entry(
1428 		__field(u32, cq_id)
1429 		__field(int, completion_id)
1430 		__field(unsigned int, hdrlen)
1431 	),
1432 
1433 	TP_fast_assign(
1434 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1435 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1436 		__entry->hdrlen = hdrlen;
1437 	),
1438 
1439 	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1440 		__entry->cq_id, __entry->completion_id,
1441 		__entry->hdrlen)
1442 );
1443 
1444 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1445 	TP_PROTO(
1446 		const struct svc_rdma_recv_ctxt *ctxt,
1447 		__be32 *p
1448 	),
1449 
1450 	TP_ARGS(ctxt, p),
1451 
1452 	TP_STRUCT__entry(
1453 		__field(u32, cq_id)
1454 		__field(int, completion_id)
1455 		__field(u32, xid)
1456 		__field(u32, vers)
1457 		__field(u32, proc)
1458 		__field(u32, credits)
1459 	),
1460 
1461 	TP_fast_assign(
1462 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1463 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1464 		__entry->xid = be32_to_cpup(p++);
1465 		__entry->vers = be32_to_cpup(p++);
1466 		__entry->credits = be32_to_cpup(p++);
1467 		__entry->proc = be32_to_cpup(p);
1468 	),
1469 
1470 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1471 		__entry->cq_id, __entry->completion_id,
1472 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1473 );
1474 
1475 #define DEFINE_BADREQ_EVENT(name)					\
1476 		DEFINE_EVENT(svcrdma_badreq_event,			\
1477 			     svcrdma_decode_##name##_err,		\
1478 				TP_PROTO(				\
1479 					const struct svc_rdma_recv_ctxt *ctxt,	\
1480 					__be32 *p			\
1481 				),					\
1482 				TP_ARGS(ctxt, p))
1483 
1484 DEFINE_BADREQ_EVENT(badvers);
1485 DEFINE_BADREQ_EVENT(drop);
1486 DEFINE_BADREQ_EVENT(badproc);
1487 DEFINE_BADREQ_EVENT(parse);
1488 
1489 TRACE_EVENT(svcrdma_encode_wseg,
1490 	TP_PROTO(
1491 		const struct svc_rdma_send_ctxt *ctxt,
1492 		u32 segno,
1493 		u32 handle,
1494 		u32 length,
1495 		u64 offset
1496 	),
1497 
1498 	TP_ARGS(ctxt, segno, handle, length, offset),
1499 
1500 	TP_STRUCT__entry(
1501 		__field(u32, cq_id)
1502 		__field(int, completion_id)
1503 		__field(u32, segno)
1504 		__field(u32, handle)
1505 		__field(u32, length)
1506 		__field(u64, offset)
1507 	),
1508 
1509 	TP_fast_assign(
1510 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1511 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1512 		__entry->segno = segno;
1513 		__entry->handle = handle;
1514 		__entry->length = length;
1515 		__entry->offset = offset;
1516 	),
1517 
1518 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1519 		__entry->cq_id, __entry->completion_id,
1520 		__entry->segno, __entry->length,
1521 		(unsigned long long)__entry->offset, __entry->handle
1522 	)
1523 );
1524 
1525 TRACE_EVENT(svcrdma_decode_rseg,
1526 	TP_PROTO(
1527 		const struct rpc_rdma_cid *cid,
1528 		const struct svc_rdma_chunk *chunk,
1529 		const struct svc_rdma_segment *segment
1530 	),
1531 
1532 	TP_ARGS(cid, chunk, segment),
1533 
1534 	TP_STRUCT__entry(
1535 		__field(u32, cq_id)
1536 		__field(int, completion_id)
1537 		__field(u32, segno)
1538 		__field(u32, position)
1539 		__field(u32, handle)
1540 		__field(u32, length)
1541 		__field(u64, offset)
1542 	),
1543 
1544 	TP_fast_assign(
1545 		__entry->cq_id = cid->ci_queue_id;
1546 		__entry->completion_id = cid->ci_completion_id;
1547 		__entry->segno = chunk->ch_segcount;
1548 		__entry->position = chunk->ch_position;
1549 		__entry->handle = segment->rs_handle;
1550 		__entry->length = segment->rs_length;
1551 		__entry->offset = segment->rs_offset;
1552 	),
1553 
1554 	TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
1555 		__entry->cq_id, __entry->completion_id,
1556 		__entry->segno, __entry->position, __entry->length,
1557 		(unsigned long long)__entry->offset, __entry->handle
1558 	)
1559 );
1560 
1561 TRACE_EVENT(svcrdma_decode_wseg,
1562 	TP_PROTO(
1563 		const struct rpc_rdma_cid *cid,
1564 		const struct svc_rdma_chunk *chunk,
1565 		u32 segno
1566 	),
1567 
1568 	TP_ARGS(cid, chunk, segno),
1569 
1570 	TP_STRUCT__entry(
1571 		__field(u32, cq_id)
1572 		__field(int, completion_id)
1573 		__field(u32, segno)
1574 		__field(u32, handle)
1575 		__field(u32, length)
1576 		__field(u64, offset)
1577 	),
1578 
1579 	TP_fast_assign(
1580 		const struct svc_rdma_segment *segment =
1581 			&chunk->ch_segments[segno];
1582 
1583 		__entry->cq_id = cid->ci_queue_id;
1584 		__entry->completion_id = cid->ci_completion_id;
1585 		__entry->segno = segno;
1586 		__entry->handle = segment->rs_handle;
1587 		__entry->length = segment->rs_length;
1588 		__entry->offset = segment->rs_offset;
1589 	),
1590 
1591 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1592 		__entry->cq_id, __entry->completion_id,
1593 		__entry->segno, __entry->length,
1594 		(unsigned long long)__entry->offset, __entry->handle
1595 	)
1596 );
1597 
1598 DECLARE_EVENT_CLASS(svcrdma_error_event,
1599 	TP_PROTO(
1600 		__be32 xid
1601 	),
1602 
1603 	TP_ARGS(xid),
1604 
1605 	TP_STRUCT__entry(
1606 		__field(u32, xid)
1607 	),
1608 
1609 	TP_fast_assign(
1610 		__entry->xid = be32_to_cpu(xid);
1611 	),
1612 
1613 	TP_printk("xid=0x%08x",
1614 		__entry->xid
1615 	)
1616 );
1617 
1618 #define DEFINE_ERROR_EVENT(name)					\
1619 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1620 				TP_PROTO(				\
1621 					__be32 xid			\
1622 				),					\
1623 				TP_ARGS(xid))
1624 
1625 DEFINE_ERROR_EVENT(vers);
1626 DEFINE_ERROR_EVENT(chunk);
1627 
1628 /**
1629  ** Server-side RDMA API events
1630  **/
1631 
1632 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1633 	TP_PROTO(
1634 		const struct svcxprt_rdma *rdma,
1635 		u64 dma_addr,
1636 		u32 length
1637 	),
1638 
1639 	TP_ARGS(rdma, dma_addr, length),
1640 
1641 	TP_STRUCT__entry(
1642 		__field(u64, dma_addr)
1643 		__field(u32, length)
1644 		__string(device, rdma->sc_cm_id->device->name)
1645 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1646 	),
1647 
1648 	TP_fast_assign(
1649 		__entry->dma_addr = dma_addr;
1650 		__entry->length = length;
1651 		__assign_str(device, rdma->sc_cm_id->device->name);
1652 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1653 	),
1654 
1655 	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1656 		__get_str(addr), __get_str(device),
1657 		__entry->dma_addr, __entry->length
1658 	)
1659 );
1660 
1661 #define DEFINE_SVC_DMA_EVENT(name)					\
1662 		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1663 				TP_PROTO(				\
1664 					const struct svcxprt_rdma *rdma,\
1665 					u64 dma_addr,			\
1666 					u32 length			\
1667 				),					\
1668 				TP_ARGS(rdma, dma_addr, length))
1669 
1670 DEFINE_SVC_DMA_EVENT(dma_map_page);
1671 DEFINE_SVC_DMA_EVENT(dma_map_err);
1672 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1673 
1674 TRACE_EVENT(svcrdma_dma_map_rw_err,
1675 	TP_PROTO(
1676 		const struct svcxprt_rdma *rdma,
1677 		unsigned int nents,
1678 		int status
1679 	),
1680 
1681 	TP_ARGS(rdma, nents, status),
1682 
1683 	TP_STRUCT__entry(
1684 		__field(int, status)
1685 		__field(unsigned int, nents)
1686 		__string(device, rdma->sc_cm_id->device->name)
1687 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1688 	),
1689 
1690 	TP_fast_assign(
1691 		__entry->status = status;
1692 		__entry->nents = nents;
1693 		__assign_str(device, rdma->sc_cm_id->device->name);
1694 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1695 	),
1696 
1697 	TP_printk("addr=%s device=%s nents=%u status=%d",
1698 		__get_str(addr), __get_str(device), __entry->nents,
1699 		__entry->status
1700 	)
1701 );
1702 
1703 TRACE_EVENT(svcrdma_no_rwctx_err,
1704 	TP_PROTO(
1705 		const struct svcxprt_rdma *rdma,
1706 		unsigned int num_sges
1707 	),
1708 
1709 	TP_ARGS(rdma, num_sges),
1710 
1711 	TP_STRUCT__entry(
1712 		__field(unsigned int, num_sges)
1713 		__string(device, rdma->sc_cm_id->device->name)
1714 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1715 	),
1716 
1717 	TP_fast_assign(
1718 		__entry->num_sges = num_sges;
1719 		__assign_str(device, rdma->sc_cm_id->device->name);
1720 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1721 	),
1722 
1723 	TP_printk("addr=%s device=%s num_sges=%d",
1724 		__get_str(addr), __get_str(device), __entry->num_sges
1725 	)
1726 );
1727 
1728 TRACE_EVENT(svcrdma_page_overrun_err,
1729 	TP_PROTO(
1730 		const struct svcxprt_rdma *rdma,
1731 		const struct svc_rqst *rqst,
1732 		unsigned int pageno
1733 	),
1734 
1735 	TP_ARGS(rdma, rqst, pageno),
1736 
1737 	TP_STRUCT__entry(
1738 		__field(unsigned int, pageno)
1739 		__field(u32, xid)
1740 		__string(device, rdma->sc_cm_id->device->name)
1741 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1742 	),
1743 
1744 	TP_fast_assign(
1745 		__entry->pageno = pageno;
1746 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1747 		__assign_str(device, rdma->sc_cm_id->device->name);
1748 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1749 	),
1750 
1751 	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1752 		__get_str(device), __entry->xid, __entry->pageno
1753 	)
1754 );
1755 
1756 TRACE_EVENT(svcrdma_small_wrch_err,
1757 	TP_PROTO(
1758 		const struct svcxprt_rdma *rdma,
1759 		unsigned int remaining,
1760 		unsigned int seg_no,
1761 		unsigned int num_segs
1762 	),
1763 
1764 	TP_ARGS(rdma, remaining, seg_no, num_segs),
1765 
1766 	TP_STRUCT__entry(
1767 		__field(unsigned int, remaining)
1768 		__field(unsigned int, seg_no)
1769 		__field(unsigned int, num_segs)
1770 		__string(device, rdma->sc_cm_id->device->name)
1771 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1772 	),
1773 
1774 	TP_fast_assign(
1775 		__entry->remaining = remaining;
1776 		__entry->seg_no = seg_no;
1777 		__entry->num_segs = num_segs;
1778 		__assign_str(device, rdma->sc_cm_id->device->name);
1779 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1780 	),
1781 
1782 	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1783 		__get_str(addr), __get_str(device), __entry->remaining,
1784 		__entry->seg_no, __entry->num_segs
1785 	)
1786 );
1787 
1788 TRACE_EVENT(svcrdma_send_pullup,
1789 	TP_PROTO(
1790 		const struct svc_rdma_send_ctxt *ctxt,
1791 		unsigned int msglen
1792 	),
1793 
1794 	TP_ARGS(ctxt, msglen),
1795 
1796 	TP_STRUCT__entry(
1797 		__field(u32, cq_id)
1798 		__field(int, completion_id)
1799 		__field(unsigned int, hdrlen)
1800 		__field(unsigned int, msglen)
1801 	),
1802 
1803 	TP_fast_assign(
1804 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1805 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1806 		__entry->hdrlen = ctxt->sc_hdrbuf.len,
1807 		__entry->msglen = msglen;
1808 	),
1809 
1810 	TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
1811 		__entry->cq_id, __entry->completion_id,
1812 		__entry->hdrlen, __entry->msglen,
1813 		__entry->hdrlen + __entry->msglen)
1814 );
1815 
1816 TRACE_EVENT(svcrdma_send_err,
1817 	TP_PROTO(
1818 		const struct svc_rqst *rqst,
1819 		int status
1820 	),
1821 
1822 	TP_ARGS(rqst, status),
1823 
1824 	TP_STRUCT__entry(
1825 		__field(int, status)
1826 		__field(u32, xid)
1827 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1828 	),
1829 
1830 	TP_fast_assign(
1831 		__entry->status = status;
1832 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1833 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1834 	),
1835 
1836 	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1837 		__entry->xid, __entry->status
1838 	)
1839 );
1840 
1841 TRACE_EVENT(svcrdma_post_send,
1842 	TP_PROTO(
1843 		const struct svc_rdma_send_ctxt *ctxt
1844 	),
1845 
1846 	TP_ARGS(ctxt),
1847 
1848 	TP_STRUCT__entry(
1849 		__field(u32, cq_id)
1850 		__field(int, completion_id)
1851 		__field(unsigned int, num_sge)
1852 		__field(u32, inv_rkey)
1853 	),
1854 
1855 	TP_fast_assign(
1856 		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1857 
1858 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1859 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1860 		__entry->num_sge = wr->num_sge;
1861 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1862 					wr->ex.invalidate_rkey : 0;
1863 	),
1864 
1865 	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1866 		__entry->cq_id, __entry->completion_id,
1867 		__entry->num_sge, __entry->inv_rkey
1868 	)
1869 );
1870 
1871 DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
1872 
1873 TRACE_EVENT(svcrdma_post_recv,
1874 	TP_PROTO(
1875 		const struct svc_rdma_recv_ctxt *ctxt
1876 	),
1877 
1878 	TP_ARGS(ctxt),
1879 
1880 	TP_STRUCT__entry(
1881 		__field(u32, cq_id)
1882 		__field(int, completion_id)
1883 	),
1884 
1885 	TP_fast_assign(
1886 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1887 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1888 	),
1889 
1890 	TP_printk("cq.id=%d cid=%d",
1891 		__entry->cq_id, __entry->completion_id
1892 	)
1893 );
1894 
1895 DEFINE_RECEIVE_COMPLETION_EVENT(svcrdma_wc_receive);
1896 
1897 TRACE_EVENT(svcrdma_rq_post_err,
1898 	TP_PROTO(
1899 		const struct svcxprt_rdma *rdma,
1900 		int status
1901 	),
1902 
1903 	TP_ARGS(rdma, status),
1904 
1905 	TP_STRUCT__entry(
1906 		__field(int, status)
1907 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1908 	),
1909 
1910 	TP_fast_assign(
1911 		__entry->status = status;
1912 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1913 	),
1914 
1915 	TP_printk("addr=%s status=%d",
1916 		__get_str(addr), __entry->status
1917 	)
1918 );
1919 
1920 DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
1921 	TP_PROTO(
1922 		const struct rpc_rdma_cid *cid,
1923 		int sqecount
1924 	),
1925 
1926 	TP_ARGS(cid, sqecount),
1927 
1928 	TP_STRUCT__entry(
1929 		__field(u32, cq_id)
1930 		__field(int, completion_id)
1931 		__field(int, sqecount)
1932 	),
1933 
1934 	TP_fast_assign(
1935 		__entry->cq_id = cid->ci_queue_id;
1936 		__entry->completion_id = cid->ci_completion_id;
1937 		__entry->sqecount = sqecount;
1938 	),
1939 
1940 	TP_printk("cq.id=%u cid=%d sqecount=%d",
1941 		__entry->cq_id, __entry->completion_id,
1942 		__entry->sqecount
1943 	)
1944 );
1945 
1946 #define DEFINE_POST_CHUNK_EVENT(name)					\
1947 		DEFINE_EVENT(svcrdma_post_chunk_class,			\
1948 				svcrdma_post_##name##_chunk,		\
1949 				TP_PROTO(				\
1950 					const struct rpc_rdma_cid *cid,	\
1951 					int sqecount			\
1952 				),					\
1953 				TP_ARGS(cid, sqecount))
1954 
1955 DEFINE_POST_CHUNK_EVENT(read);
1956 DEFINE_POST_CHUNK_EVENT(write);
1957 DEFINE_POST_CHUNK_EVENT(reply);
1958 
1959 DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
1960 DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
1961 
1962 TRACE_EVENT(svcrdma_qp_error,
1963 	TP_PROTO(
1964 		const struct ib_event *event,
1965 		const struct sockaddr *sap
1966 	),
1967 
1968 	TP_ARGS(event, sap),
1969 
1970 	TP_STRUCT__entry(
1971 		__field(unsigned int, event)
1972 		__string(device, event->device->name)
1973 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1974 	),
1975 
1976 	TP_fast_assign(
1977 		__entry->event = event->event;
1978 		__assign_str(device, event->device->name);
1979 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1980 			 "%pISpc", sap);
1981 	),
1982 
1983 	TP_printk("addr=%s dev=%s event=%s (%u)",
1984 		__entry->addr, __get_str(device),
1985 		rdma_show_ib_event(__entry->event), __entry->event
1986 	)
1987 );
1988 
1989 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1990 	TP_PROTO(
1991 		const struct svcxprt_rdma *rdma
1992 	),
1993 
1994 	TP_ARGS(rdma),
1995 
1996 	TP_STRUCT__entry(
1997 		__field(int, avail)
1998 		__field(int, depth)
1999 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2000 	),
2001 
2002 	TP_fast_assign(
2003 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2004 		__entry->depth = rdma->sc_sq_depth;
2005 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
2006 	),
2007 
2008 	TP_printk("addr=%s sc_sq_avail=%d/%d",
2009 		__get_str(addr), __entry->avail, __entry->depth
2010 	)
2011 );
2012 
2013 #define DEFINE_SQ_EVENT(name)						\
2014 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
2015 				TP_PROTO(				\
2016 					const struct svcxprt_rdma *rdma \
2017 				),					\
2018 				TP_ARGS(rdma))
2019 
2020 DEFINE_SQ_EVENT(full);
2021 DEFINE_SQ_EVENT(retry);
2022 
2023 TRACE_EVENT(svcrdma_sq_post_err,
2024 	TP_PROTO(
2025 		const struct svcxprt_rdma *rdma,
2026 		int status
2027 	),
2028 
2029 	TP_ARGS(rdma, status),
2030 
2031 	TP_STRUCT__entry(
2032 		__field(int, avail)
2033 		__field(int, depth)
2034 		__field(int, status)
2035 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2036 	),
2037 
2038 	TP_fast_assign(
2039 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2040 		__entry->depth = rdma->sc_sq_depth;
2041 		__entry->status = status;
2042 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
2043 	),
2044 
2045 	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
2046 		__get_str(addr), __entry->avail, __entry->depth,
2047 		__entry->status
2048 	)
2049 );
2050 
2051 #endif /* _TRACE_RPCRDMA_H */
2052 
2053 #include <trace/define_trace.h>
2054