xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision 00b8c557)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/sunrpc/rpc_rdma_cid.h>
15 #include <linux/tracepoint.h>
16 #include <rdma/ib_cm.h>
17 #include <trace/events/rdma.h>
18 
19 /**
20  ** Event classes
21  **/
22 
23 DECLARE_EVENT_CLASS(rpcrdma_completion_class,
24 	TP_PROTO(
25 		const struct ib_wc *wc,
26 		const struct rpc_rdma_cid *cid
27 	),
28 
29 	TP_ARGS(wc, cid),
30 
31 	TP_STRUCT__entry(
32 		__field(u32, cq_id)
33 		__field(int, completion_id)
34 		__field(unsigned long, status)
35 		__field(unsigned int, vendor_err)
36 	),
37 
38 	TP_fast_assign(
39 		__entry->cq_id = cid->ci_queue_id;
40 		__entry->completion_id = cid->ci_completion_id;
41 		__entry->status = wc->status;
42 		if (wc->status)
43 			__entry->vendor_err = wc->vendor_err;
44 		else
45 			__entry->vendor_err = 0;
46 	),
47 
48 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
49 		__entry->cq_id, __entry->completion_id,
50 		rdma_show_wc_status(__entry->status),
51 		__entry->status, __entry->vendor_err
52 	)
53 );
54 
55 #define DEFINE_COMPLETION_EVENT(name)					\
56 		DEFINE_EVENT(rpcrdma_completion_class, name,		\
57 				TP_PROTO(				\
58 					const struct ib_wc *wc,		\
59 					const struct rpc_rdma_cid *cid	\
60 				),					\
61 				TP_ARGS(wc, cid))
62 
63 DECLARE_EVENT_CLASS(xprtrdma_reply_class,
64 	TP_PROTO(
65 		const struct rpcrdma_rep *rep
66 	),
67 
68 	TP_ARGS(rep),
69 
70 	TP_STRUCT__entry(
71 		__field(u32, xid)
72 		__field(u32, version)
73 		__field(u32, proc)
74 		__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
75 		__string(port, rpcrdma_portstr(rep->rr_rxprt))
76 	),
77 
78 	TP_fast_assign(
79 		__entry->xid = be32_to_cpu(rep->rr_xid);
80 		__entry->version = be32_to_cpu(rep->rr_vers);
81 		__entry->proc = be32_to_cpu(rep->rr_proc);
82 		__assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
83 		__assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
84 	),
85 
86 	TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
87 		__get_str(addr), __get_str(port),
88 		__entry->xid, __entry->version, __entry->proc
89 	)
90 );
91 
92 #define DEFINE_REPLY_EVENT(name)					\
93 		DEFINE_EVENT(xprtrdma_reply_class,			\
94 				xprtrdma_reply_##name##_err,		\
95 				TP_PROTO(				\
96 					const struct rpcrdma_rep *rep	\
97 				),					\
98 				TP_ARGS(rep))
99 
100 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
101 	TP_PROTO(
102 		const struct rpcrdma_xprt *r_xprt
103 	),
104 
105 	TP_ARGS(r_xprt),
106 
107 	TP_STRUCT__entry(
108 		__field(const void *, r_xprt)
109 		__string(addr, rpcrdma_addrstr(r_xprt))
110 		__string(port, rpcrdma_portstr(r_xprt))
111 	),
112 
113 	TP_fast_assign(
114 		__entry->r_xprt = r_xprt;
115 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
116 		__assign_str(port, rpcrdma_portstr(r_xprt));
117 	),
118 
119 	TP_printk("peer=[%s]:%s r_xprt=%p",
120 		__get_str(addr), __get_str(port), __entry->r_xprt
121 	)
122 );
123 
124 #define DEFINE_RXPRT_EVENT(name)					\
125 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
126 				TP_PROTO(				\
127 					const struct rpcrdma_xprt *r_xprt \
128 				),					\
129 				TP_ARGS(r_xprt))
130 
131 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
132 	TP_PROTO(
133 		const struct rpcrdma_xprt *r_xprt,
134 		int rc
135 	),
136 
137 	TP_ARGS(r_xprt, rc),
138 
139 	TP_STRUCT__entry(
140 		__field(const void *, r_xprt)
141 		__field(int, rc)
142 		__field(int, connect_status)
143 		__string(addr, rpcrdma_addrstr(r_xprt))
144 		__string(port, rpcrdma_portstr(r_xprt))
145 	),
146 
147 	TP_fast_assign(
148 		__entry->r_xprt = r_xprt;
149 		__entry->rc = rc;
150 		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
151 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
152 		__assign_str(port, rpcrdma_portstr(r_xprt));
153 	),
154 
155 	TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d",
156 		__get_str(addr), __get_str(port), __entry->r_xprt,
157 		__entry->rc, __entry->connect_status
158 	)
159 );
160 
161 #define DEFINE_CONN_EVENT(name)						\
162 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
163 				TP_PROTO(				\
164 					const struct rpcrdma_xprt *r_xprt, \
165 					int rc				\
166 				),					\
167 				TP_ARGS(r_xprt, rc))
168 
169 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
170 	TP_PROTO(
171 		const struct rpc_task *task,
172 		unsigned int pos,
173 		struct rpcrdma_mr *mr,
174 		int nsegs
175 	),
176 
177 	TP_ARGS(task, pos, mr, nsegs),
178 
179 	TP_STRUCT__entry(
180 		__field(unsigned int, task_id)
181 		__field(unsigned int, client_id)
182 		__field(unsigned int, pos)
183 		__field(int, nents)
184 		__field(u32, handle)
185 		__field(u32, length)
186 		__field(u64, offset)
187 		__field(int, nsegs)
188 	),
189 
190 	TP_fast_assign(
191 		__entry->task_id = task->tk_pid;
192 		__entry->client_id = task->tk_client->cl_clid;
193 		__entry->pos = pos;
194 		__entry->nents = mr->mr_nents;
195 		__entry->handle = mr->mr_handle;
196 		__entry->length = mr->mr_length;
197 		__entry->offset = mr->mr_offset;
198 		__entry->nsegs = nsegs;
199 	),
200 
201 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
202 		__entry->task_id, __entry->client_id,
203 		__entry->pos, __entry->length,
204 		(unsigned long long)__entry->offset, __entry->handle,
205 		__entry->nents < __entry->nsegs ? "more" : "last"
206 	)
207 );
208 
209 #define DEFINE_RDCH_EVENT(name)						\
210 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
211 				TP_PROTO(				\
212 					const struct rpc_task *task,	\
213 					unsigned int pos,		\
214 					struct rpcrdma_mr *mr,		\
215 					int nsegs			\
216 				),					\
217 				TP_ARGS(task, pos, mr, nsegs))
218 
219 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
220 	TP_PROTO(
221 		const struct rpc_task *task,
222 		struct rpcrdma_mr *mr,
223 		int nsegs
224 	),
225 
226 	TP_ARGS(task, mr, nsegs),
227 
228 	TP_STRUCT__entry(
229 		__field(unsigned int, task_id)
230 		__field(unsigned int, client_id)
231 		__field(int, nents)
232 		__field(u32, handle)
233 		__field(u32, length)
234 		__field(u64, offset)
235 		__field(int, nsegs)
236 	),
237 
238 	TP_fast_assign(
239 		__entry->task_id = task->tk_pid;
240 		__entry->client_id = task->tk_client->cl_clid;
241 		__entry->nents = mr->mr_nents;
242 		__entry->handle = mr->mr_handle;
243 		__entry->length = mr->mr_length;
244 		__entry->offset = mr->mr_offset;
245 		__entry->nsegs = nsegs;
246 	),
247 
248 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
249 		__entry->task_id, __entry->client_id,
250 		__entry->length, (unsigned long long)__entry->offset,
251 		__entry->handle,
252 		__entry->nents < __entry->nsegs ? "more" : "last"
253 	)
254 );
255 
256 #define DEFINE_WRCH_EVENT(name)						\
257 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
258 				TP_PROTO(				\
259 					const struct rpc_task *task,	\
260 					struct rpcrdma_mr *mr,		\
261 					int nsegs			\
262 				),					\
263 				TP_ARGS(task, mr, nsegs))
264 
265 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
266 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
267 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
268 TRACE_DEFINE_ENUM(DMA_NONE);
269 
270 #define xprtrdma_show_direction(x)					\
271 		__print_symbolic(x,					\
272 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
273 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
274 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
275 				{ DMA_NONE, "NONE" })
276 
277 DECLARE_EVENT_CLASS(xprtrdma_mr_class,
278 	TP_PROTO(
279 		const struct rpcrdma_mr *mr
280 	),
281 
282 	TP_ARGS(mr),
283 
284 	TP_STRUCT__entry(
285 		__field(unsigned int, task_id)
286 		__field(unsigned int, client_id)
287 		__field(u32, mr_id)
288 		__field(int, nents)
289 		__field(u32, handle)
290 		__field(u32, length)
291 		__field(u64, offset)
292 		__field(u32, dir)
293 	),
294 
295 	TP_fast_assign(
296 		const struct rpcrdma_req *req = mr->mr_req;
297 		const struct rpc_task *task = req->rl_slot.rq_task;
298 
299 		__entry->task_id = task->tk_pid;
300 		__entry->client_id = task->tk_client->cl_clid;
301 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
302 		__entry->nents  = mr->mr_nents;
303 		__entry->handle = mr->mr_handle;
304 		__entry->length = mr->mr_length;
305 		__entry->offset = mr->mr_offset;
306 		__entry->dir    = mr->mr_dir;
307 	),
308 
309 	TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
310 		__entry->task_id, __entry->client_id,
311 		__entry->mr_id, __entry->nents, __entry->length,
312 		(unsigned long long)__entry->offset, __entry->handle,
313 		xprtrdma_show_direction(__entry->dir)
314 	)
315 );
316 
317 #define DEFINE_MR_EVENT(name)						\
318 		DEFINE_EVENT(xprtrdma_mr_class,				\
319 				xprtrdma_mr_##name,			\
320 				TP_PROTO(				\
321 					const struct rpcrdma_mr *mr	\
322 				),					\
323 				TP_ARGS(mr))
324 
325 DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
326 	TP_PROTO(
327 		const struct rpcrdma_mr *mr
328 	),
329 
330 	TP_ARGS(mr),
331 
332 	TP_STRUCT__entry(
333 		__field(u32, mr_id)
334 		__field(int, nents)
335 		__field(u32, handle)
336 		__field(u32, length)
337 		__field(u64, offset)
338 		__field(u32, dir)
339 	),
340 
341 	TP_fast_assign(
342 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
343 		__entry->nents  = mr->mr_nents;
344 		__entry->handle = mr->mr_handle;
345 		__entry->length = mr->mr_length;
346 		__entry->offset = mr->mr_offset;
347 		__entry->dir    = mr->mr_dir;
348 	),
349 
350 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
351 		__entry->mr_id, __entry->nents, __entry->length,
352 		(unsigned long long)__entry->offset, __entry->handle,
353 		xprtrdma_show_direction(__entry->dir)
354 	)
355 );
356 
357 #define DEFINE_ANON_MR_EVENT(name)					\
358 		DEFINE_EVENT(xprtrdma_anonymous_mr_class,		\
359 				xprtrdma_mr_##name,			\
360 				TP_PROTO(				\
361 					const struct rpcrdma_mr *mr	\
362 				),					\
363 				TP_ARGS(mr))
364 
365 DECLARE_EVENT_CLASS(xprtrdma_callback_class,
366 	TP_PROTO(
367 		const struct rpcrdma_xprt *r_xprt,
368 		const struct rpc_rqst *rqst
369 	),
370 
371 	TP_ARGS(r_xprt, rqst),
372 
373 	TP_STRUCT__entry(
374 		__field(u32, xid)
375 		__string(addr, rpcrdma_addrstr(r_xprt))
376 		__string(port, rpcrdma_portstr(r_xprt))
377 	),
378 
379 	TP_fast_assign(
380 		__entry->xid = be32_to_cpu(rqst->rq_xid);
381 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
382 		__assign_str(port, rpcrdma_portstr(r_xprt));
383 	),
384 
385 	TP_printk("peer=[%s]:%s xid=0x%08x",
386 		__get_str(addr), __get_str(port), __entry->xid
387 	)
388 );
389 
390 #define DEFINE_CALLBACK_EVENT(name)					\
391 		DEFINE_EVENT(xprtrdma_callback_class,			\
392 				xprtrdma_cb_##name,			\
393 				TP_PROTO(				\
394 					const struct rpcrdma_xprt *r_xprt, \
395 					const struct rpc_rqst *rqst	\
396 				),					\
397 				TP_ARGS(r_xprt, rqst))
398 
399 /**
400  ** Connection events
401  **/
402 
403 TRACE_EVENT(xprtrdma_inline_thresh,
404 	TP_PROTO(
405 		const struct rpcrdma_ep *ep
406 	),
407 
408 	TP_ARGS(ep),
409 
410 	TP_STRUCT__entry(
411 		__field(unsigned int, inline_send)
412 		__field(unsigned int, inline_recv)
413 		__field(unsigned int, max_send)
414 		__field(unsigned int, max_recv)
415 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
416 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
417 	),
418 
419 	TP_fast_assign(
420 		const struct rdma_cm_id *id = ep->re_id;
421 
422 		__entry->inline_send = ep->re_inline_send;
423 		__entry->inline_recv = ep->re_inline_recv;
424 		__entry->max_send = ep->re_max_inline_send;
425 		__entry->max_recv = ep->re_max_inline_recv;
426 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
427 		       sizeof(struct sockaddr_in6));
428 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
429 		       sizeof(struct sockaddr_in6));
430 	),
431 
432 	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
433 		__entry->srcaddr, __entry->dstaddr,
434 		__entry->inline_send, __entry->inline_recv,
435 		__entry->max_send, __entry->max_recv
436 	)
437 );
438 
439 DEFINE_CONN_EVENT(connect);
440 DEFINE_CONN_EVENT(disconnect);
441 
442 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
443 
444 TRACE_EVENT(xprtrdma_op_connect,
445 	TP_PROTO(
446 		const struct rpcrdma_xprt *r_xprt,
447 		unsigned long delay
448 	),
449 
450 	TP_ARGS(r_xprt, delay),
451 
452 	TP_STRUCT__entry(
453 		__field(const void *, r_xprt)
454 		__field(unsigned long, delay)
455 		__string(addr, rpcrdma_addrstr(r_xprt))
456 		__string(port, rpcrdma_portstr(r_xprt))
457 	),
458 
459 	TP_fast_assign(
460 		__entry->r_xprt = r_xprt;
461 		__entry->delay = delay;
462 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
463 		__assign_str(port, rpcrdma_portstr(r_xprt));
464 	),
465 
466 	TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu",
467 		__get_str(addr), __get_str(port), __entry->r_xprt,
468 		__entry->delay
469 	)
470 );
471 
472 
473 TRACE_EVENT(xprtrdma_op_set_cto,
474 	TP_PROTO(
475 		const struct rpcrdma_xprt *r_xprt,
476 		unsigned long connect,
477 		unsigned long reconnect
478 	),
479 
480 	TP_ARGS(r_xprt, connect, reconnect),
481 
482 	TP_STRUCT__entry(
483 		__field(const void *, r_xprt)
484 		__field(unsigned long, connect)
485 		__field(unsigned long, reconnect)
486 		__string(addr, rpcrdma_addrstr(r_xprt))
487 		__string(port, rpcrdma_portstr(r_xprt))
488 	),
489 
490 	TP_fast_assign(
491 		__entry->r_xprt = r_xprt;
492 		__entry->connect = connect;
493 		__entry->reconnect = reconnect;
494 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
495 		__assign_str(port, rpcrdma_portstr(r_xprt));
496 	),
497 
498 	TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
499 		__get_str(addr), __get_str(port), __entry->r_xprt,
500 		__entry->connect / HZ, __entry->reconnect / HZ
501 	)
502 );
503 
504 TRACE_EVENT(xprtrdma_qp_event,
505 	TP_PROTO(
506 		const struct rpcrdma_ep *ep,
507 		const struct ib_event *event
508 	),
509 
510 	TP_ARGS(ep, event),
511 
512 	TP_STRUCT__entry(
513 		__field(unsigned long, event)
514 		__string(name, event->device->name)
515 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
516 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
517 	),
518 
519 	TP_fast_assign(
520 		const struct rdma_cm_id *id = ep->re_id;
521 
522 		__entry->event = event->event;
523 		__assign_str(name, event->device->name);
524 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
525 		       sizeof(struct sockaddr_in6));
526 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
527 		       sizeof(struct sockaddr_in6));
528 	),
529 
530 	TP_printk("%pISpc -> %pISpc device=%s %s (%lu)",
531 		__entry->srcaddr, __entry->dstaddr, __get_str(name),
532 		rdma_show_ib_event(__entry->event), __entry->event
533 	)
534 );
535 
536 /**
537  ** Call events
538  **/
539 
540 TRACE_EVENT(xprtrdma_createmrs,
541 	TP_PROTO(
542 		const struct rpcrdma_xprt *r_xprt,
543 		unsigned int count
544 	),
545 
546 	TP_ARGS(r_xprt, count),
547 
548 	TP_STRUCT__entry(
549 		__field(const void *, r_xprt)
550 		__string(addr, rpcrdma_addrstr(r_xprt))
551 		__string(port, rpcrdma_portstr(r_xprt))
552 		__field(unsigned int, count)
553 	),
554 
555 	TP_fast_assign(
556 		__entry->r_xprt = r_xprt;
557 		__entry->count = count;
558 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
559 		__assign_str(port, rpcrdma_portstr(r_xprt));
560 	),
561 
562 	TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
563 		__get_str(addr), __get_str(port), __entry->r_xprt,
564 		__entry->count
565 	)
566 );
567 
568 TRACE_EVENT(xprtrdma_nomrs_err,
569 	TP_PROTO(
570 		const struct rpcrdma_xprt *r_xprt,
571 		const struct rpcrdma_req *req
572 	),
573 
574 	TP_ARGS(r_xprt, req),
575 
576 	TP_STRUCT__entry(
577 		__field(unsigned int, task_id)
578 		__field(unsigned int, client_id)
579 		__string(addr, rpcrdma_addrstr(r_xprt))
580 		__string(port, rpcrdma_portstr(r_xprt))
581 	),
582 
583 	TP_fast_assign(
584 		const struct rpc_rqst *rqst = &req->rl_slot;
585 
586 		__entry->task_id = rqst->rq_task->tk_pid;
587 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
588 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
589 		__assign_str(port, rpcrdma_portstr(r_xprt));
590 	),
591 
592 	TP_printk("peer=[%s]:%s task:%u@%u",
593 		__get_str(addr), __get_str(port),
594 		__entry->task_id, __entry->client_id
595 	)
596 );
597 
598 DEFINE_RDCH_EVENT(read);
599 DEFINE_WRCH_EVENT(write);
600 DEFINE_WRCH_EVENT(reply);
601 
602 TRACE_DEFINE_ENUM(rpcrdma_noch);
603 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
604 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
605 TRACE_DEFINE_ENUM(rpcrdma_readch);
606 TRACE_DEFINE_ENUM(rpcrdma_areadch);
607 TRACE_DEFINE_ENUM(rpcrdma_writech);
608 TRACE_DEFINE_ENUM(rpcrdma_replych);
609 
610 #define xprtrdma_show_chunktype(x)					\
611 		__print_symbolic(x,					\
612 				{ rpcrdma_noch, "inline" },		\
613 				{ rpcrdma_noch_pullup, "pullup" },	\
614 				{ rpcrdma_noch_mapped, "mapped" },	\
615 				{ rpcrdma_readch, "read list" },	\
616 				{ rpcrdma_areadch, "*read list" },	\
617 				{ rpcrdma_writech, "write list" },	\
618 				{ rpcrdma_replych, "reply chunk" })
619 
620 TRACE_EVENT(xprtrdma_marshal,
621 	TP_PROTO(
622 		const struct rpcrdma_req *req,
623 		unsigned int rtype,
624 		unsigned int wtype
625 	),
626 
627 	TP_ARGS(req, rtype, wtype),
628 
629 	TP_STRUCT__entry(
630 		__field(unsigned int, task_id)
631 		__field(unsigned int, client_id)
632 		__field(u32, xid)
633 		__field(unsigned int, hdrlen)
634 		__field(unsigned int, headlen)
635 		__field(unsigned int, pagelen)
636 		__field(unsigned int, taillen)
637 		__field(unsigned int, rtype)
638 		__field(unsigned int, wtype)
639 	),
640 
641 	TP_fast_assign(
642 		const struct rpc_rqst *rqst = &req->rl_slot;
643 
644 		__entry->task_id = rqst->rq_task->tk_pid;
645 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
646 		__entry->xid = be32_to_cpu(rqst->rq_xid);
647 		__entry->hdrlen = req->rl_hdrbuf.len;
648 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
649 		__entry->pagelen = rqst->rq_snd_buf.page_len;
650 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
651 		__entry->rtype = rtype;
652 		__entry->wtype = wtype;
653 	),
654 
655 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
656 		__entry->task_id, __entry->client_id, __entry->xid,
657 		__entry->hdrlen,
658 		__entry->headlen, __entry->pagelen, __entry->taillen,
659 		xprtrdma_show_chunktype(__entry->rtype),
660 		xprtrdma_show_chunktype(__entry->wtype)
661 	)
662 );
663 
664 TRACE_EVENT(xprtrdma_marshal_failed,
665 	TP_PROTO(const struct rpc_rqst *rqst,
666 		 int ret
667 	),
668 
669 	TP_ARGS(rqst, ret),
670 
671 	TP_STRUCT__entry(
672 		__field(unsigned int, task_id)
673 		__field(unsigned int, client_id)
674 		__field(u32, xid)
675 		__field(int, ret)
676 	),
677 
678 	TP_fast_assign(
679 		__entry->task_id = rqst->rq_task->tk_pid;
680 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
681 		__entry->xid = be32_to_cpu(rqst->rq_xid);
682 		__entry->ret = ret;
683 	),
684 
685 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
686 		__entry->task_id, __entry->client_id, __entry->xid,
687 		__entry->ret
688 	)
689 );
690 
691 TRACE_EVENT(xprtrdma_prepsend_failed,
692 	TP_PROTO(const struct rpc_rqst *rqst,
693 		 int ret
694 	),
695 
696 	TP_ARGS(rqst, ret),
697 
698 	TP_STRUCT__entry(
699 		__field(unsigned int, task_id)
700 		__field(unsigned int, client_id)
701 		__field(u32, xid)
702 		__field(int, ret)
703 	),
704 
705 	TP_fast_assign(
706 		__entry->task_id = rqst->rq_task->tk_pid;
707 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
708 		__entry->xid = be32_to_cpu(rqst->rq_xid);
709 		__entry->ret = ret;
710 	),
711 
712 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
713 		__entry->task_id, __entry->client_id, __entry->xid,
714 		__entry->ret
715 	)
716 );
717 
718 TRACE_EVENT(xprtrdma_post_send,
719 	TP_PROTO(
720 		const struct rpcrdma_req *req
721 	),
722 
723 	TP_ARGS(req),
724 
725 	TP_STRUCT__entry(
726 		__field(u32, cq_id)
727 		__field(int, completion_id)
728 		__field(unsigned int, task_id)
729 		__field(unsigned int, client_id)
730 		__field(int, num_sge)
731 		__field(int, signaled)
732 	),
733 
734 	TP_fast_assign(
735 		const struct rpc_rqst *rqst = &req->rl_slot;
736 		const struct rpcrdma_sendctx *sc = req->rl_sendctx;
737 
738 		__entry->cq_id = sc->sc_cid.ci_queue_id;
739 		__entry->completion_id = sc->sc_cid.ci_completion_id;
740 		__entry->task_id = rqst->rq_task->tk_pid;
741 		__entry->client_id = rqst->rq_task->tk_client ?
742 				     rqst->rq_task->tk_client->cl_clid : -1;
743 		__entry->num_sge = req->rl_wr.num_sge;
744 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
745 	),
746 
747 	TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
748 		__entry->task_id, __entry->client_id,
749 		__entry->cq_id, __entry->completion_id,
750 		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
751 		(__entry->signaled ? "signaled" : "")
752 	)
753 );
754 
755 TRACE_EVENT(xprtrdma_post_recv,
756 	TP_PROTO(
757 		const struct rpcrdma_rep *rep
758 	),
759 
760 	TP_ARGS(rep),
761 
762 	TP_STRUCT__entry(
763 		__field(u32, cq_id)
764 		__field(int, completion_id)
765 	),
766 
767 	TP_fast_assign(
768 		__entry->cq_id = rep->rr_cid.ci_queue_id;
769 		__entry->completion_id = rep->rr_cid.ci_completion_id;
770 	),
771 
772 	TP_printk("cq.id=%d cid=%d",
773 		__entry->cq_id, __entry->completion_id
774 	)
775 );
776 
777 TRACE_EVENT(xprtrdma_post_recvs,
778 	TP_PROTO(
779 		const struct rpcrdma_xprt *r_xprt,
780 		unsigned int count,
781 		int status
782 	),
783 
784 	TP_ARGS(r_xprt, count, status),
785 
786 	TP_STRUCT__entry(
787 		__field(const void *, r_xprt)
788 		__field(unsigned int, count)
789 		__field(int, status)
790 		__field(int, posted)
791 		__string(addr, rpcrdma_addrstr(r_xprt))
792 		__string(port, rpcrdma_portstr(r_xprt))
793 	),
794 
795 	TP_fast_assign(
796 		__entry->r_xprt = r_xprt;
797 		__entry->count = count;
798 		__entry->status = status;
799 		__entry->posted = r_xprt->rx_ep->re_receive_count;
800 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
801 		__assign_str(port, rpcrdma_portstr(r_xprt));
802 	),
803 
804 	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
805 		__get_str(addr), __get_str(port), __entry->r_xprt,
806 		__entry->count, __entry->posted, __entry->status
807 	)
808 );
809 
810 TRACE_EVENT(xprtrdma_post_linv_err,
811 	TP_PROTO(
812 		const struct rpcrdma_req *req,
813 		int status
814 	),
815 
816 	TP_ARGS(req, status),
817 
818 	TP_STRUCT__entry(
819 		__field(unsigned int, task_id)
820 		__field(unsigned int, client_id)
821 		__field(int, status)
822 	),
823 
824 	TP_fast_assign(
825 		const struct rpc_task *task = req->rl_slot.rq_task;
826 
827 		__entry->task_id = task->tk_pid;
828 		__entry->client_id = task->tk_client->cl_clid;
829 		__entry->status = status;
830 	),
831 
832 	TP_printk("task:%u@%u status=%d",
833 		__entry->task_id, __entry->client_id, __entry->status
834 	)
835 );
836 
837 /**
838  ** Completion events
839  **/
840 
841 DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive);
842 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
843 DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg);
844 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li);
845 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_wake);
846 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_done);
847 
848 TRACE_EVENT(xprtrdma_frwr_alloc,
849 	TP_PROTO(
850 		const struct rpcrdma_mr *mr,
851 		int rc
852 	),
853 
854 	TP_ARGS(mr, rc),
855 
856 	TP_STRUCT__entry(
857 		__field(u32, mr_id)
858 		__field(int, rc)
859 	),
860 
861 	TP_fast_assign(
862 		__entry->mr_id = mr->frwr.fr_mr->res.id;
863 		__entry->rc = rc;
864 	),
865 
866 	TP_printk("mr.id=%u: rc=%d",
867 		__entry->mr_id, __entry->rc
868 	)
869 );
870 
871 TRACE_EVENT(xprtrdma_frwr_dereg,
872 	TP_PROTO(
873 		const struct rpcrdma_mr *mr,
874 		int rc
875 	),
876 
877 	TP_ARGS(mr, rc),
878 
879 	TP_STRUCT__entry(
880 		__field(u32, mr_id)
881 		__field(int, nents)
882 		__field(u32, handle)
883 		__field(u32, length)
884 		__field(u64, offset)
885 		__field(u32, dir)
886 		__field(int, rc)
887 	),
888 
889 	TP_fast_assign(
890 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
891 		__entry->nents  = mr->mr_nents;
892 		__entry->handle = mr->mr_handle;
893 		__entry->length = mr->mr_length;
894 		__entry->offset = mr->mr_offset;
895 		__entry->dir    = mr->mr_dir;
896 		__entry->rc	= rc;
897 	),
898 
899 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
900 		__entry->mr_id, __entry->nents, __entry->length,
901 		(unsigned long long)__entry->offset, __entry->handle,
902 		xprtrdma_show_direction(__entry->dir),
903 		__entry->rc
904 	)
905 );
906 
907 TRACE_EVENT(xprtrdma_frwr_sgerr,
908 	TP_PROTO(
909 		const struct rpcrdma_mr *mr,
910 		int sg_nents
911 	),
912 
913 	TP_ARGS(mr, sg_nents),
914 
915 	TP_STRUCT__entry(
916 		__field(u32, mr_id)
917 		__field(u64, addr)
918 		__field(u32, dir)
919 		__field(int, nents)
920 	),
921 
922 	TP_fast_assign(
923 		__entry->mr_id = mr->frwr.fr_mr->res.id;
924 		__entry->addr = mr->mr_sg->dma_address;
925 		__entry->dir = mr->mr_dir;
926 		__entry->nents = sg_nents;
927 	),
928 
929 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
930 		__entry->mr_id, __entry->addr,
931 		xprtrdma_show_direction(__entry->dir),
932 		__entry->nents
933 	)
934 );
935 
936 TRACE_EVENT(xprtrdma_frwr_maperr,
937 	TP_PROTO(
938 		const struct rpcrdma_mr *mr,
939 		int num_mapped
940 	),
941 
942 	TP_ARGS(mr, num_mapped),
943 
944 	TP_STRUCT__entry(
945 		__field(u32, mr_id)
946 		__field(u64, addr)
947 		__field(u32, dir)
948 		__field(int, num_mapped)
949 		__field(int, nents)
950 	),
951 
952 	TP_fast_assign(
953 		__entry->mr_id = mr->frwr.fr_mr->res.id;
954 		__entry->addr = mr->mr_sg->dma_address;
955 		__entry->dir = mr->mr_dir;
956 		__entry->num_mapped = num_mapped;
957 		__entry->nents = mr->mr_nents;
958 	),
959 
960 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
961 		__entry->mr_id, __entry->addr,
962 		xprtrdma_show_direction(__entry->dir),
963 		__entry->num_mapped, __entry->nents
964 	)
965 );
966 
967 DEFINE_MR_EVENT(localinv);
968 DEFINE_MR_EVENT(map);
969 
970 DEFINE_ANON_MR_EVENT(unmap);
971 DEFINE_ANON_MR_EVENT(recycle);
972 
973 TRACE_EVENT(xprtrdma_dma_maperr,
974 	TP_PROTO(
975 		u64 addr
976 	),
977 
978 	TP_ARGS(addr),
979 
980 	TP_STRUCT__entry(
981 		__field(u64, addr)
982 	),
983 
984 	TP_fast_assign(
985 		__entry->addr = addr;
986 	),
987 
988 	TP_printk("dma addr=0x%llx\n", __entry->addr)
989 );
990 
991 /**
992  ** Reply events
993  **/
994 
995 TRACE_EVENT(xprtrdma_reply,
996 	TP_PROTO(
997 		const struct rpc_task *task,
998 		const struct rpcrdma_rep *rep,
999 		unsigned int credits
1000 	),
1001 
1002 	TP_ARGS(task, rep, credits),
1003 
1004 	TP_STRUCT__entry(
1005 		__field(unsigned int, task_id)
1006 		__field(unsigned int, client_id)
1007 		__field(u32, xid)
1008 		__field(unsigned int, credits)
1009 	),
1010 
1011 	TP_fast_assign(
1012 		__entry->task_id = task->tk_pid;
1013 		__entry->client_id = task->tk_client->cl_clid;
1014 		__entry->xid = be32_to_cpu(rep->rr_xid);
1015 		__entry->credits = credits;
1016 	),
1017 
1018 	TP_printk("task:%u@%u xid=0x%08x credits=%u",
1019 		__entry->task_id, __entry->client_id, __entry->xid,
1020 		__entry->credits
1021 	)
1022 );
1023 
1024 DEFINE_REPLY_EVENT(vers);
1025 DEFINE_REPLY_EVENT(rqst);
1026 DEFINE_REPLY_EVENT(short);
1027 DEFINE_REPLY_EVENT(hdr);
1028 
1029 TRACE_EVENT(xprtrdma_err_vers,
1030 	TP_PROTO(
1031 		const struct rpc_rqst *rqst,
1032 		__be32 *min,
1033 		__be32 *max
1034 	),
1035 
1036 	TP_ARGS(rqst, min, max),
1037 
1038 	TP_STRUCT__entry(
1039 		__field(unsigned int, task_id)
1040 		__field(unsigned int, client_id)
1041 		__field(u32, xid)
1042 		__field(u32, min)
1043 		__field(u32, max)
1044 	),
1045 
1046 	TP_fast_assign(
1047 		__entry->task_id = rqst->rq_task->tk_pid;
1048 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1049 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1050 		__entry->min = be32_to_cpup(min);
1051 		__entry->max = be32_to_cpup(max);
1052 	),
1053 
1054 	TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
1055 		__entry->task_id, __entry->client_id, __entry->xid,
1056 		__entry->min, __entry->max
1057 	)
1058 );
1059 
1060 TRACE_EVENT(xprtrdma_err_chunk,
1061 	TP_PROTO(
1062 		const struct rpc_rqst *rqst
1063 	),
1064 
1065 	TP_ARGS(rqst),
1066 
1067 	TP_STRUCT__entry(
1068 		__field(unsigned int, task_id)
1069 		__field(unsigned int, client_id)
1070 		__field(u32, xid)
1071 	),
1072 
1073 	TP_fast_assign(
1074 		__entry->task_id = rqst->rq_task->tk_pid;
1075 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1076 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1077 	),
1078 
1079 	TP_printk("task:%u@%u xid=0x%08x",
1080 		__entry->task_id, __entry->client_id, __entry->xid
1081 	)
1082 );
1083 
1084 TRACE_EVENT(xprtrdma_err_unrecognized,
1085 	TP_PROTO(
1086 		const struct rpc_rqst *rqst,
1087 		__be32 *procedure
1088 	),
1089 
1090 	TP_ARGS(rqst, procedure),
1091 
1092 	TP_STRUCT__entry(
1093 		__field(unsigned int, task_id)
1094 		__field(unsigned int, client_id)
1095 		__field(u32, xid)
1096 		__field(u32, procedure)
1097 	),
1098 
1099 	TP_fast_assign(
1100 		__entry->task_id = rqst->rq_task->tk_pid;
1101 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1102 		__entry->procedure = be32_to_cpup(procedure);
1103 	),
1104 
1105 	TP_printk("task:%u@%u xid=0x%08x procedure=%u",
1106 		__entry->task_id, __entry->client_id, __entry->xid,
1107 		__entry->procedure
1108 	)
1109 );
1110 
1111 TRACE_EVENT(xprtrdma_fixup,
1112 	TP_PROTO(
1113 		const struct rpc_rqst *rqst,
1114 		unsigned long fixup
1115 	),
1116 
1117 	TP_ARGS(rqst, fixup),
1118 
1119 	TP_STRUCT__entry(
1120 		__field(unsigned int, task_id)
1121 		__field(unsigned int, client_id)
1122 		__field(unsigned long, fixup)
1123 		__field(size_t, headlen)
1124 		__field(unsigned int, pagelen)
1125 		__field(size_t, taillen)
1126 	),
1127 
1128 	TP_fast_assign(
1129 		__entry->task_id = rqst->rq_task->tk_pid;
1130 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1131 		__entry->fixup = fixup;
1132 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1133 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1134 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1135 	),
1136 
1137 	TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1138 		__entry->task_id, __entry->client_id, __entry->fixup,
1139 		__entry->headlen, __entry->pagelen, __entry->taillen
1140 	)
1141 );
1142 
1143 TRACE_EVENT(xprtrdma_decode_seg,
1144 	TP_PROTO(
1145 		u32 handle,
1146 		u32 length,
1147 		u64 offset
1148 	),
1149 
1150 	TP_ARGS(handle, length, offset),
1151 
1152 	TP_STRUCT__entry(
1153 		__field(u32, handle)
1154 		__field(u32, length)
1155 		__field(u64, offset)
1156 	),
1157 
1158 	TP_fast_assign(
1159 		__entry->handle = handle;
1160 		__entry->length = length;
1161 		__entry->offset = offset;
1162 	),
1163 
1164 	TP_printk("%u@0x%016llx:0x%08x",
1165 		__entry->length, (unsigned long long)__entry->offset,
1166 		__entry->handle
1167 	)
1168 );
1169 
1170 TRACE_EVENT(xprtrdma_mrs_zap,
1171 	TP_PROTO(
1172 		const struct rpc_task *task
1173 	),
1174 
1175 	TP_ARGS(task),
1176 
1177 	TP_STRUCT__entry(
1178 		__field(unsigned int, task_id)
1179 		__field(unsigned int, client_id)
1180 	),
1181 
1182 	TP_fast_assign(
1183 		__entry->task_id = task->tk_pid;
1184 		__entry->client_id = task->tk_client->cl_clid;
1185 	),
1186 
1187 	TP_printk("task:%u@%u",
1188 		__entry->task_id, __entry->client_id
1189 	)
1190 );
1191 
1192 /**
1193  ** Callback events
1194  **/
1195 
1196 TRACE_EVENT(xprtrdma_cb_setup,
1197 	TP_PROTO(
1198 		const struct rpcrdma_xprt *r_xprt,
1199 		unsigned int reqs
1200 	),
1201 
1202 	TP_ARGS(r_xprt, reqs),
1203 
1204 	TP_STRUCT__entry(
1205 		__field(const void *, r_xprt)
1206 		__field(unsigned int, reqs)
1207 		__string(addr, rpcrdma_addrstr(r_xprt))
1208 		__string(port, rpcrdma_portstr(r_xprt))
1209 	),
1210 
1211 	TP_fast_assign(
1212 		__entry->r_xprt = r_xprt;
1213 		__entry->reqs = reqs;
1214 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1215 		__assign_str(port, rpcrdma_portstr(r_xprt));
1216 	),
1217 
1218 	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1219 		__get_str(addr), __get_str(port),
1220 		__entry->r_xprt, __entry->reqs
1221 	)
1222 );
1223 
1224 DEFINE_CALLBACK_EVENT(call);
1225 DEFINE_CALLBACK_EVENT(reply);
1226 
1227 /**
1228  ** Server-side RPC/RDMA events
1229  **/
1230 
1231 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1232 	TP_PROTO(
1233 		const struct svcxprt_rdma *rdma,
1234 		long status
1235 	),
1236 
1237 	TP_ARGS(rdma, status),
1238 
1239 	TP_STRUCT__entry(
1240 		__field(long, status)
1241 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1242 	),
1243 
1244 	TP_fast_assign(
1245 		__entry->status = status;
1246 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1247 	),
1248 
1249 	TP_printk("addr=%s status=%ld",
1250 		__get_str(addr), __entry->status
1251 	)
1252 );
1253 
1254 #define DEFINE_ACCEPT_EVENT(name) \
1255 		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1256 				TP_PROTO( \
1257 					const struct svcxprt_rdma *rdma, \
1258 					long status \
1259 				), \
1260 				TP_ARGS(rdma, status))
1261 
1262 DEFINE_ACCEPT_EVENT(pd);
1263 DEFINE_ACCEPT_EVENT(qp);
1264 DEFINE_ACCEPT_EVENT(fabric);
1265 DEFINE_ACCEPT_EVENT(initdepth);
1266 DEFINE_ACCEPT_EVENT(accept);
1267 
1268 TRACE_DEFINE_ENUM(RDMA_MSG);
1269 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1270 TRACE_DEFINE_ENUM(RDMA_MSGP);
1271 TRACE_DEFINE_ENUM(RDMA_DONE);
1272 TRACE_DEFINE_ENUM(RDMA_ERROR);
1273 
1274 #define show_rpcrdma_proc(x)						\
1275 		__print_symbolic(x,					\
1276 				{ RDMA_MSG, "RDMA_MSG" },		\
1277 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1278 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1279 				{ RDMA_DONE, "RDMA_DONE" },		\
1280 				{ RDMA_ERROR, "RDMA_ERROR" })
1281 
1282 TRACE_EVENT(svcrdma_decode_rqst,
1283 	TP_PROTO(
1284 		const struct svc_rdma_recv_ctxt *ctxt,
1285 		__be32 *p,
1286 		unsigned int hdrlen
1287 	),
1288 
1289 	TP_ARGS(ctxt, p, hdrlen),
1290 
1291 	TP_STRUCT__entry(
1292 		__field(u32, cq_id)
1293 		__field(int, completion_id)
1294 		__field(u32, xid)
1295 		__field(u32, vers)
1296 		__field(u32, proc)
1297 		__field(u32, credits)
1298 		__field(unsigned int, hdrlen)
1299 	),
1300 
1301 	TP_fast_assign(
1302 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1303 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1304 		__entry->xid = be32_to_cpup(p++);
1305 		__entry->vers = be32_to_cpup(p++);
1306 		__entry->credits = be32_to_cpup(p++);
1307 		__entry->proc = be32_to_cpup(p);
1308 		__entry->hdrlen = hdrlen;
1309 	),
1310 
1311 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1312 		__entry->cq_id, __entry->completion_id,
1313 		__entry->xid, __entry->vers, __entry->credits,
1314 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1315 );
1316 
1317 TRACE_EVENT(svcrdma_decode_short_err,
1318 	TP_PROTO(
1319 		const struct svc_rdma_recv_ctxt *ctxt,
1320 		unsigned int hdrlen
1321 	),
1322 
1323 	TP_ARGS(ctxt, hdrlen),
1324 
1325 	TP_STRUCT__entry(
1326 		__field(u32, cq_id)
1327 		__field(int, completion_id)
1328 		__field(unsigned int, hdrlen)
1329 	),
1330 
1331 	TP_fast_assign(
1332 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1333 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1334 		__entry->hdrlen = hdrlen;
1335 	),
1336 
1337 	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1338 		__entry->cq_id, __entry->completion_id,
1339 		__entry->hdrlen)
1340 );
1341 
1342 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1343 	TP_PROTO(
1344 		const struct svc_rdma_recv_ctxt *ctxt,
1345 		__be32 *p
1346 	),
1347 
1348 	TP_ARGS(ctxt, p),
1349 
1350 	TP_STRUCT__entry(
1351 		__field(u32, cq_id)
1352 		__field(int, completion_id)
1353 		__field(u32, xid)
1354 		__field(u32, vers)
1355 		__field(u32, proc)
1356 		__field(u32, credits)
1357 	),
1358 
1359 	TP_fast_assign(
1360 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1361 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1362 		__entry->xid = be32_to_cpup(p++);
1363 		__entry->vers = be32_to_cpup(p++);
1364 		__entry->credits = be32_to_cpup(p++);
1365 		__entry->proc = be32_to_cpup(p);
1366 	),
1367 
1368 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1369 		__entry->cq_id, __entry->completion_id,
1370 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1371 );
1372 
1373 #define DEFINE_BADREQ_EVENT(name)					\
1374 		DEFINE_EVENT(svcrdma_badreq_event,			\
1375 			     svcrdma_decode_##name##_err,		\
1376 				TP_PROTO(				\
1377 					const struct svc_rdma_recv_ctxt *ctxt,	\
1378 					__be32 *p			\
1379 				),					\
1380 				TP_ARGS(ctxt, p))
1381 
1382 DEFINE_BADREQ_EVENT(badvers);
1383 DEFINE_BADREQ_EVENT(drop);
1384 DEFINE_BADREQ_EVENT(badproc);
1385 DEFINE_BADREQ_EVENT(parse);
1386 
1387 TRACE_EVENT(svcrdma_encode_wseg,
1388 	TP_PROTO(
1389 		const struct svc_rdma_send_ctxt *ctxt,
1390 		u32 segno,
1391 		u32 handle,
1392 		u32 length,
1393 		u64 offset
1394 	),
1395 
1396 	TP_ARGS(ctxt, segno, handle, length, offset),
1397 
1398 	TP_STRUCT__entry(
1399 		__field(u32, cq_id)
1400 		__field(int, completion_id)
1401 		__field(u32, segno)
1402 		__field(u32, handle)
1403 		__field(u32, length)
1404 		__field(u64, offset)
1405 	),
1406 
1407 	TP_fast_assign(
1408 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1409 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1410 		__entry->segno = segno;
1411 		__entry->handle = handle;
1412 		__entry->length = length;
1413 		__entry->offset = offset;
1414 	),
1415 
1416 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1417 		__entry->cq_id, __entry->completion_id,
1418 		__entry->segno, __entry->length,
1419 		(unsigned long long)__entry->offset, __entry->handle
1420 	)
1421 );
1422 
1423 TRACE_EVENT(svcrdma_decode_rseg,
1424 	TP_PROTO(
1425 		const struct rpc_rdma_cid *cid,
1426 		const struct svc_rdma_chunk *chunk,
1427 		const struct svc_rdma_segment *segment
1428 	),
1429 
1430 	TP_ARGS(cid, chunk, segment),
1431 
1432 	TP_STRUCT__entry(
1433 		__field(u32, cq_id)
1434 		__field(int, completion_id)
1435 		__field(u32, segno)
1436 		__field(u32, position)
1437 		__field(u32, handle)
1438 		__field(u32, length)
1439 		__field(u64, offset)
1440 	),
1441 
1442 	TP_fast_assign(
1443 		__entry->cq_id = cid->ci_queue_id;
1444 		__entry->completion_id = cid->ci_completion_id;
1445 		__entry->segno = chunk->ch_segcount;
1446 		__entry->position = chunk->ch_position;
1447 		__entry->handle = segment->rs_handle;
1448 		__entry->length = segment->rs_length;
1449 		__entry->offset = segment->rs_offset;
1450 	),
1451 
1452 	TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
1453 		__entry->cq_id, __entry->completion_id,
1454 		__entry->segno, __entry->position, __entry->length,
1455 		(unsigned long long)__entry->offset, __entry->handle
1456 	)
1457 );
1458 
1459 TRACE_EVENT(svcrdma_decode_wseg,
1460 	TP_PROTO(
1461 		const struct rpc_rdma_cid *cid,
1462 		const struct svc_rdma_chunk *chunk,
1463 		u32 segno
1464 	),
1465 
1466 	TP_ARGS(cid, chunk, segno),
1467 
1468 	TP_STRUCT__entry(
1469 		__field(u32, cq_id)
1470 		__field(int, completion_id)
1471 		__field(u32, segno)
1472 		__field(u32, handle)
1473 		__field(u32, length)
1474 		__field(u64, offset)
1475 	),
1476 
1477 	TP_fast_assign(
1478 		const struct svc_rdma_segment *segment =
1479 			&chunk->ch_segments[segno];
1480 
1481 		__entry->cq_id = cid->ci_queue_id;
1482 		__entry->completion_id = cid->ci_completion_id;
1483 		__entry->segno = segno;
1484 		__entry->handle = segment->rs_handle;
1485 		__entry->length = segment->rs_length;
1486 		__entry->offset = segment->rs_offset;
1487 	),
1488 
1489 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1490 		__entry->cq_id, __entry->completion_id,
1491 		__entry->segno, __entry->length,
1492 		(unsigned long long)__entry->offset, __entry->handle
1493 	)
1494 );
1495 
1496 DECLARE_EVENT_CLASS(svcrdma_error_event,
1497 	TP_PROTO(
1498 		__be32 xid
1499 	),
1500 
1501 	TP_ARGS(xid),
1502 
1503 	TP_STRUCT__entry(
1504 		__field(u32, xid)
1505 	),
1506 
1507 	TP_fast_assign(
1508 		__entry->xid = be32_to_cpu(xid);
1509 	),
1510 
1511 	TP_printk("xid=0x%08x",
1512 		__entry->xid
1513 	)
1514 );
1515 
1516 #define DEFINE_ERROR_EVENT(name)					\
1517 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1518 				TP_PROTO(				\
1519 					__be32 xid			\
1520 				),					\
1521 				TP_ARGS(xid))
1522 
1523 DEFINE_ERROR_EVENT(vers);
1524 DEFINE_ERROR_EVENT(chunk);
1525 
1526 /**
1527  ** Server-side RDMA API events
1528  **/
1529 
1530 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1531 	TP_PROTO(
1532 		const struct svcxprt_rdma *rdma,
1533 		u64 dma_addr,
1534 		u32 length
1535 	),
1536 
1537 	TP_ARGS(rdma, dma_addr, length),
1538 
1539 	TP_STRUCT__entry(
1540 		__field(u64, dma_addr)
1541 		__field(u32, length)
1542 		__string(device, rdma->sc_cm_id->device->name)
1543 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1544 	),
1545 
1546 	TP_fast_assign(
1547 		__entry->dma_addr = dma_addr;
1548 		__entry->length = length;
1549 		__assign_str(device, rdma->sc_cm_id->device->name);
1550 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1551 	),
1552 
1553 	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1554 		__get_str(addr), __get_str(device),
1555 		__entry->dma_addr, __entry->length
1556 	)
1557 );
1558 
1559 #define DEFINE_SVC_DMA_EVENT(name)					\
1560 		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1561 				TP_PROTO(				\
1562 					const struct svcxprt_rdma *rdma,\
1563 					u64 dma_addr,			\
1564 					u32 length			\
1565 				),					\
1566 				TP_ARGS(rdma, dma_addr, length))
1567 
1568 DEFINE_SVC_DMA_EVENT(dma_map_page);
1569 DEFINE_SVC_DMA_EVENT(dma_map_err);
1570 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1571 
1572 TRACE_EVENT(svcrdma_dma_map_rw_err,
1573 	TP_PROTO(
1574 		const struct svcxprt_rdma *rdma,
1575 		unsigned int nents,
1576 		int status
1577 	),
1578 
1579 	TP_ARGS(rdma, nents, status),
1580 
1581 	TP_STRUCT__entry(
1582 		__field(int, status)
1583 		__field(unsigned int, nents)
1584 		__string(device, rdma->sc_cm_id->device->name)
1585 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1586 	),
1587 
1588 	TP_fast_assign(
1589 		__entry->status = status;
1590 		__entry->nents = nents;
1591 		__assign_str(device, rdma->sc_cm_id->device->name);
1592 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1593 	),
1594 
1595 	TP_printk("addr=%s device=%s nents=%u status=%d",
1596 		__get_str(addr), __get_str(device), __entry->nents,
1597 		__entry->status
1598 	)
1599 );
1600 
1601 TRACE_EVENT(svcrdma_no_rwctx_err,
1602 	TP_PROTO(
1603 		const struct svcxprt_rdma *rdma,
1604 		unsigned int num_sges
1605 	),
1606 
1607 	TP_ARGS(rdma, num_sges),
1608 
1609 	TP_STRUCT__entry(
1610 		__field(unsigned int, num_sges)
1611 		__string(device, rdma->sc_cm_id->device->name)
1612 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1613 	),
1614 
1615 	TP_fast_assign(
1616 		__entry->num_sges = num_sges;
1617 		__assign_str(device, rdma->sc_cm_id->device->name);
1618 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1619 	),
1620 
1621 	TP_printk("addr=%s device=%s num_sges=%d",
1622 		__get_str(addr), __get_str(device), __entry->num_sges
1623 	)
1624 );
1625 
1626 TRACE_EVENT(svcrdma_page_overrun_err,
1627 	TP_PROTO(
1628 		const struct svcxprt_rdma *rdma,
1629 		const struct svc_rqst *rqst,
1630 		unsigned int pageno
1631 	),
1632 
1633 	TP_ARGS(rdma, rqst, pageno),
1634 
1635 	TP_STRUCT__entry(
1636 		__field(unsigned int, pageno)
1637 		__field(u32, xid)
1638 		__string(device, rdma->sc_cm_id->device->name)
1639 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1640 	),
1641 
1642 	TP_fast_assign(
1643 		__entry->pageno = pageno;
1644 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1645 		__assign_str(device, rdma->sc_cm_id->device->name);
1646 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1647 	),
1648 
1649 	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1650 		__get_str(device), __entry->xid, __entry->pageno
1651 	)
1652 );
1653 
1654 TRACE_EVENT(svcrdma_small_wrch_err,
1655 	TP_PROTO(
1656 		const struct svcxprt_rdma *rdma,
1657 		unsigned int remaining,
1658 		unsigned int seg_no,
1659 		unsigned int num_segs
1660 	),
1661 
1662 	TP_ARGS(rdma, remaining, seg_no, num_segs),
1663 
1664 	TP_STRUCT__entry(
1665 		__field(unsigned int, remaining)
1666 		__field(unsigned int, seg_no)
1667 		__field(unsigned int, num_segs)
1668 		__string(device, rdma->sc_cm_id->device->name)
1669 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1670 	),
1671 
1672 	TP_fast_assign(
1673 		__entry->remaining = remaining;
1674 		__entry->seg_no = seg_no;
1675 		__entry->num_segs = num_segs;
1676 		__assign_str(device, rdma->sc_cm_id->device->name);
1677 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1678 	),
1679 
1680 	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1681 		__get_str(addr), __get_str(device), __entry->remaining,
1682 		__entry->seg_no, __entry->num_segs
1683 	)
1684 );
1685 
1686 TRACE_EVENT(svcrdma_send_pullup,
1687 	TP_PROTO(
1688 		const struct svc_rdma_send_ctxt *ctxt,
1689 		unsigned int msglen
1690 	),
1691 
1692 	TP_ARGS(ctxt, msglen),
1693 
1694 	TP_STRUCT__entry(
1695 		__field(u32, cq_id)
1696 		__field(int, completion_id)
1697 		__field(unsigned int, hdrlen)
1698 		__field(unsigned int, msglen)
1699 	),
1700 
1701 	TP_fast_assign(
1702 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1703 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1704 		__entry->hdrlen = ctxt->sc_hdrbuf.len,
1705 		__entry->msglen = msglen;
1706 	),
1707 
1708 	TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
1709 		__entry->cq_id, __entry->completion_id,
1710 		__entry->hdrlen, __entry->msglen,
1711 		__entry->hdrlen + __entry->msglen)
1712 );
1713 
1714 TRACE_EVENT(svcrdma_send_err,
1715 	TP_PROTO(
1716 		const struct svc_rqst *rqst,
1717 		int status
1718 	),
1719 
1720 	TP_ARGS(rqst, status),
1721 
1722 	TP_STRUCT__entry(
1723 		__field(int, status)
1724 		__field(u32, xid)
1725 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1726 	),
1727 
1728 	TP_fast_assign(
1729 		__entry->status = status;
1730 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1731 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1732 	),
1733 
1734 	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1735 		__entry->xid, __entry->status
1736 	)
1737 );
1738 
1739 TRACE_EVENT(svcrdma_post_send,
1740 	TP_PROTO(
1741 		const struct svc_rdma_send_ctxt *ctxt
1742 	),
1743 
1744 	TP_ARGS(ctxt),
1745 
1746 	TP_STRUCT__entry(
1747 		__field(u32, cq_id)
1748 		__field(int, completion_id)
1749 		__field(unsigned int, num_sge)
1750 		__field(u32, inv_rkey)
1751 	),
1752 
1753 	TP_fast_assign(
1754 		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1755 
1756 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1757 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1758 		__entry->num_sge = wr->num_sge;
1759 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1760 					wr->ex.invalidate_rkey : 0;
1761 	),
1762 
1763 	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1764 		__entry->cq_id, __entry->completion_id,
1765 		__entry->num_sge, __entry->inv_rkey
1766 	)
1767 );
1768 
1769 DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
1770 
1771 TRACE_EVENT(svcrdma_post_recv,
1772 	TP_PROTO(
1773 		const struct svc_rdma_recv_ctxt *ctxt
1774 	),
1775 
1776 	TP_ARGS(ctxt),
1777 
1778 	TP_STRUCT__entry(
1779 		__field(u32, cq_id)
1780 		__field(int, completion_id)
1781 	),
1782 
1783 	TP_fast_assign(
1784 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1785 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1786 	),
1787 
1788 	TP_printk("cq.id=%d cid=%d",
1789 		__entry->cq_id, __entry->completion_id
1790 	)
1791 );
1792 
1793 DEFINE_COMPLETION_EVENT(svcrdma_wc_receive);
1794 
1795 TRACE_EVENT(svcrdma_rq_post_err,
1796 	TP_PROTO(
1797 		const struct svcxprt_rdma *rdma,
1798 		int status
1799 	),
1800 
1801 	TP_ARGS(rdma, status),
1802 
1803 	TP_STRUCT__entry(
1804 		__field(int, status)
1805 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1806 	),
1807 
1808 	TP_fast_assign(
1809 		__entry->status = status;
1810 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1811 	),
1812 
1813 	TP_printk("addr=%s status=%d",
1814 		__get_str(addr), __entry->status
1815 	)
1816 );
1817 
1818 DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
1819 	TP_PROTO(
1820 		const struct rpc_rdma_cid *cid,
1821 		int sqecount
1822 	),
1823 
1824 	TP_ARGS(cid, sqecount),
1825 
1826 	TP_STRUCT__entry(
1827 		__field(u32, cq_id)
1828 		__field(int, completion_id)
1829 		__field(int, sqecount)
1830 	),
1831 
1832 	TP_fast_assign(
1833 		__entry->cq_id = cid->ci_queue_id;
1834 		__entry->completion_id = cid->ci_completion_id;
1835 		__entry->sqecount = sqecount;
1836 	),
1837 
1838 	TP_printk("cq.id=%u cid=%d sqecount=%d",
1839 		__entry->cq_id, __entry->completion_id,
1840 		__entry->sqecount
1841 	)
1842 );
1843 
1844 #define DEFINE_POST_CHUNK_EVENT(name)					\
1845 		DEFINE_EVENT(svcrdma_post_chunk_class,			\
1846 				svcrdma_post_##name##_chunk,		\
1847 				TP_PROTO(				\
1848 					const struct rpc_rdma_cid *cid,	\
1849 					int sqecount			\
1850 				),					\
1851 				TP_ARGS(cid, sqecount))
1852 
1853 DEFINE_POST_CHUNK_EVENT(read);
1854 DEFINE_POST_CHUNK_EVENT(write);
1855 DEFINE_POST_CHUNK_EVENT(reply);
1856 
1857 DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
1858 DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
1859 
1860 TRACE_EVENT(svcrdma_qp_error,
1861 	TP_PROTO(
1862 		const struct ib_event *event,
1863 		const struct sockaddr *sap
1864 	),
1865 
1866 	TP_ARGS(event, sap),
1867 
1868 	TP_STRUCT__entry(
1869 		__field(unsigned int, event)
1870 		__string(device, event->device->name)
1871 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1872 	),
1873 
1874 	TP_fast_assign(
1875 		__entry->event = event->event;
1876 		__assign_str(device, event->device->name);
1877 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1878 			 "%pISpc", sap);
1879 	),
1880 
1881 	TP_printk("addr=%s dev=%s event=%s (%u)",
1882 		__entry->addr, __get_str(device),
1883 		rdma_show_ib_event(__entry->event), __entry->event
1884 	)
1885 );
1886 
1887 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1888 	TP_PROTO(
1889 		const struct svcxprt_rdma *rdma
1890 	),
1891 
1892 	TP_ARGS(rdma),
1893 
1894 	TP_STRUCT__entry(
1895 		__field(int, avail)
1896 		__field(int, depth)
1897 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1898 	),
1899 
1900 	TP_fast_assign(
1901 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1902 		__entry->depth = rdma->sc_sq_depth;
1903 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1904 	),
1905 
1906 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1907 		__get_str(addr), __entry->avail, __entry->depth
1908 	)
1909 );
1910 
1911 #define DEFINE_SQ_EVENT(name)						\
1912 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1913 				TP_PROTO(				\
1914 					const struct svcxprt_rdma *rdma \
1915 				),					\
1916 				TP_ARGS(rdma))
1917 
1918 DEFINE_SQ_EVENT(full);
1919 DEFINE_SQ_EVENT(retry);
1920 
1921 TRACE_EVENT(svcrdma_sq_post_err,
1922 	TP_PROTO(
1923 		const struct svcxprt_rdma *rdma,
1924 		int status
1925 	),
1926 
1927 	TP_ARGS(rdma, status),
1928 
1929 	TP_STRUCT__entry(
1930 		__field(int, avail)
1931 		__field(int, depth)
1932 		__field(int, status)
1933 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1934 	),
1935 
1936 	TP_fast_assign(
1937 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1938 		__entry->depth = rdma->sc_sq_depth;
1939 		__entry->status = status;
1940 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1941 	),
1942 
1943 	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
1944 		__get_str(addr), __entry->avail, __entry->depth,
1945 		__entry->status
1946 	)
1947 );
1948 
1949 #endif /* _TRACE_RPCRDMA_H */
1950 
1951 #include <trace/define_trace.h>
1952