xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision 14474950)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/tracepoint.h>
15 #include <trace/events/rdma.h>
16 
17 /**
18  ** Event classes
19  **/
20 
21 DECLARE_EVENT_CLASS(xprtrdma_reply_event,
22 	TP_PROTO(
23 		const struct rpcrdma_rep *rep
24 	),
25 
26 	TP_ARGS(rep),
27 
28 	TP_STRUCT__entry(
29 		__field(const void *, rep)
30 		__field(const void *, r_xprt)
31 		__field(u32, xid)
32 		__field(u32, version)
33 		__field(u32, proc)
34 	),
35 
36 	TP_fast_assign(
37 		__entry->rep = rep;
38 		__entry->r_xprt = rep->rr_rxprt;
39 		__entry->xid = be32_to_cpu(rep->rr_xid);
40 		__entry->version = be32_to_cpu(rep->rr_vers);
41 		__entry->proc = be32_to_cpu(rep->rr_proc);
42 	),
43 
44 	TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
45 		__entry->r_xprt, __entry->xid, __entry->rep,
46 		__entry->version, __entry->proc
47 	)
48 );
49 
50 #define DEFINE_REPLY_EVENT(name)					\
51 		DEFINE_EVENT(xprtrdma_reply_event, name,		\
52 				TP_PROTO(				\
53 					const struct rpcrdma_rep *rep	\
54 				),					\
55 				TP_ARGS(rep))
56 
57 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
58 	TP_PROTO(
59 		const struct rpcrdma_xprt *r_xprt
60 	),
61 
62 	TP_ARGS(r_xprt),
63 
64 	TP_STRUCT__entry(
65 		__field(const void *, r_xprt)
66 		__string(addr, rpcrdma_addrstr(r_xprt))
67 		__string(port, rpcrdma_portstr(r_xprt))
68 	),
69 
70 	TP_fast_assign(
71 		__entry->r_xprt = r_xprt;
72 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
73 		__assign_str(port, rpcrdma_portstr(r_xprt));
74 	),
75 
76 	TP_printk("peer=[%s]:%s r_xprt=%p",
77 		__get_str(addr), __get_str(port), __entry->r_xprt
78 	)
79 );
80 
81 #define DEFINE_RXPRT_EVENT(name)					\
82 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
83 				TP_PROTO(				\
84 					const struct rpcrdma_xprt *r_xprt \
85 				),					\
86 				TP_ARGS(r_xprt))
87 
88 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
89 	TP_PROTO(
90 		const struct rpcrdma_xprt *r_xprt,
91 		int rc
92 	),
93 
94 	TP_ARGS(r_xprt, rc),
95 
96 	TP_STRUCT__entry(
97 		__field(const void *, r_xprt)
98 		__field(int, rc)
99 		__field(int, connect_status)
100 		__string(addr, rpcrdma_addrstr(r_xprt))
101 		__string(port, rpcrdma_portstr(r_xprt))
102 	),
103 
104 	TP_fast_assign(
105 		__entry->r_xprt = r_xprt;
106 		__entry->rc = rc;
107 		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
108 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
109 		__assign_str(port, rpcrdma_portstr(r_xprt));
110 	),
111 
112 	TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d",
113 		__get_str(addr), __get_str(port), __entry->r_xprt,
114 		__entry->rc, __entry->connect_status
115 	)
116 );
117 
118 #define DEFINE_CONN_EVENT(name)						\
119 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
120 				TP_PROTO(				\
121 					const struct rpcrdma_xprt *r_xprt, \
122 					int rc				\
123 				),					\
124 				TP_ARGS(r_xprt, rc))
125 
126 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
127 	TP_PROTO(
128 		const struct rpc_task *task,
129 		unsigned int pos,
130 		struct rpcrdma_mr *mr,
131 		int nsegs
132 	),
133 
134 	TP_ARGS(task, pos, mr, nsegs),
135 
136 	TP_STRUCT__entry(
137 		__field(unsigned int, task_id)
138 		__field(unsigned int, client_id)
139 		__field(unsigned int, pos)
140 		__field(int, nents)
141 		__field(u32, handle)
142 		__field(u32, length)
143 		__field(u64, offset)
144 		__field(int, nsegs)
145 	),
146 
147 	TP_fast_assign(
148 		__entry->task_id = task->tk_pid;
149 		__entry->client_id = task->tk_client->cl_clid;
150 		__entry->pos = pos;
151 		__entry->nents = mr->mr_nents;
152 		__entry->handle = mr->mr_handle;
153 		__entry->length = mr->mr_length;
154 		__entry->offset = mr->mr_offset;
155 		__entry->nsegs = nsegs;
156 	),
157 
158 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
159 		__entry->task_id, __entry->client_id,
160 		__entry->pos, __entry->length,
161 		(unsigned long long)__entry->offset, __entry->handle,
162 		__entry->nents < __entry->nsegs ? "more" : "last"
163 	)
164 );
165 
166 #define DEFINE_RDCH_EVENT(name)						\
167 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
168 				TP_PROTO(				\
169 					const struct rpc_task *task,	\
170 					unsigned int pos,		\
171 					struct rpcrdma_mr *mr,		\
172 					int nsegs			\
173 				),					\
174 				TP_ARGS(task, pos, mr, nsegs))
175 
176 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
177 	TP_PROTO(
178 		const struct rpc_task *task,
179 		struct rpcrdma_mr *mr,
180 		int nsegs
181 	),
182 
183 	TP_ARGS(task, mr, nsegs),
184 
185 	TP_STRUCT__entry(
186 		__field(unsigned int, task_id)
187 		__field(unsigned int, client_id)
188 		__field(int, nents)
189 		__field(u32, handle)
190 		__field(u32, length)
191 		__field(u64, offset)
192 		__field(int, nsegs)
193 	),
194 
195 	TP_fast_assign(
196 		__entry->task_id = task->tk_pid;
197 		__entry->client_id = task->tk_client->cl_clid;
198 		__entry->nents = mr->mr_nents;
199 		__entry->handle = mr->mr_handle;
200 		__entry->length = mr->mr_length;
201 		__entry->offset = mr->mr_offset;
202 		__entry->nsegs = nsegs;
203 	),
204 
205 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
206 		__entry->task_id, __entry->client_id,
207 		__entry->length, (unsigned long long)__entry->offset,
208 		__entry->handle,
209 		__entry->nents < __entry->nsegs ? "more" : "last"
210 	)
211 );
212 
213 #define DEFINE_WRCH_EVENT(name)						\
214 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
215 				TP_PROTO(				\
216 					const struct rpc_task *task,	\
217 					struct rpcrdma_mr *mr,		\
218 					int nsegs			\
219 				),					\
220 				TP_ARGS(task, mr, nsegs))
221 
222 DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
223 	TP_PROTO(
224 		const struct ib_wc *wc,
225 		const struct rpcrdma_frwr *frwr
226 	),
227 
228 	TP_ARGS(wc, frwr),
229 
230 	TP_STRUCT__entry(
231 		__field(u32, mr_id)
232 		__field(unsigned int, status)
233 		__field(unsigned int, vendor_err)
234 	),
235 
236 	TP_fast_assign(
237 		__entry->mr_id = frwr->fr_mr->res.id;
238 		__entry->status = wc->status;
239 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
240 	),
241 
242 	TP_printk(
243 		"mr.id=%u: %s (%u/0x%x)",
244 		__entry->mr_id, rdma_show_wc_status(__entry->status),
245 		__entry->status, __entry->vendor_err
246 	)
247 );
248 
249 #define DEFINE_FRWR_DONE_EVENT(name)					\
250 		DEFINE_EVENT(xprtrdma_frwr_done, name,			\
251 				TP_PROTO(				\
252 					const struct ib_wc *wc,		\
253 					const struct rpcrdma_frwr *frwr	\
254 				),					\
255 				TP_ARGS(wc, frwr))
256 
257 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
258 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
259 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
260 TRACE_DEFINE_ENUM(DMA_NONE);
261 
262 #define xprtrdma_show_direction(x)					\
263 		__print_symbolic(x,					\
264 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
265 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
266 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
267 				{ DMA_NONE, "NONE" })
268 
269 DECLARE_EVENT_CLASS(xprtrdma_mr,
270 	TP_PROTO(
271 		const struct rpcrdma_mr *mr
272 	),
273 
274 	TP_ARGS(mr),
275 
276 	TP_STRUCT__entry(
277 		__field(u32, mr_id)
278 		__field(int, nents)
279 		__field(u32, handle)
280 		__field(u32, length)
281 		__field(u64, offset)
282 		__field(u32, dir)
283 	),
284 
285 	TP_fast_assign(
286 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
287 		__entry->nents  = mr->mr_nents;
288 		__entry->handle = mr->mr_handle;
289 		__entry->length = mr->mr_length;
290 		__entry->offset = mr->mr_offset;
291 		__entry->dir    = mr->mr_dir;
292 	),
293 
294 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
295 		__entry->mr_id, __entry->nents, __entry->length,
296 		(unsigned long long)__entry->offset, __entry->handle,
297 		xprtrdma_show_direction(__entry->dir)
298 	)
299 );
300 
301 #define DEFINE_MR_EVENT(name) \
302 		DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
303 				TP_PROTO( \
304 					const struct rpcrdma_mr *mr \
305 				), \
306 				TP_ARGS(mr))
307 
308 DECLARE_EVENT_CLASS(xprtrdma_cb_event,
309 	TP_PROTO(
310 		const struct rpc_rqst *rqst
311 	),
312 
313 	TP_ARGS(rqst),
314 
315 	TP_STRUCT__entry(
316 		__field(const void *, rqst)
317 		__field(const void *, rep)
318 		__field(const void *, req)
319 		__field(u32, xid)
320 	),
321 
322 	TP_fast_assign(
323 		__entry->rqst = rqst;
324 		__entry->req = rpcr_to_rdmar(rqst);
325 		__entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
326 		__entry->xid = be32_to_cpu(rqst->rq_xid);
327 	),
328 
329 	TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
330 		__entry->xid, __entry->rqst, __entry->req, __entry->rep
331 	)
332 );
333 
334 #define DEFINE_CB_EVENT(name)						\
335 		DEFINE_EVENT(xprtrdma_cb_event, name,			\
336 				TP_PROTO(				\
337 					const struct rpc_rqst *rqst	\
338 				),					\
339 				TP_ARGS(rqst))
340 
341 /**
342  ** Connection events
343  **/
344 
345 TRACE_EVENT(xprtrdma_inline_thresh,
346 	TP_PROTO(
347 		const struct rpcrdma_ep *ep
348 	),
349 
350 	TP_ARGS(ep),
351 
352 	TP_STRUCT__entry(
353 		__field(unsigned int, inline_send)
354 		__field(unsigned int, inline_recv)
355 		__field(unsigned int, max_send)
356 		__field(unsigned int, max_recv)
357 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
358 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
359 	),
360 
361 	TP_fast_assign(
362 		const struct rdma_cm_id *id = ep->re_id;
363 
364 		__entry->inline_send = ep->re_inline_send;
365 		__entry->inline_recv = ep->re_inline_recv;
366 		__entry->max_send = ep->re_max_inline_send;
367 		__entry->max_recv = ep->re_max_inline_recv;
368 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
369 		       sizeof(struct sockaddr_in6));
370 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
371 		       sizeof(struct sockaddr_in6));
372 	),
373 
374 	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
375 		__entry->srcaddr, __entry->dstaddr,
376 		__entry->inline_send, __entry->inline_recv,
377 		__entry->max_send, __entry->max_recv
378 	)
379 );
380 
381 DEFINE_CONN_EVENT(connect);
382 DEFINE_CONN_EVENT(disconnect);
383 
384 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
385 DEFINE_RXPRT_EVENT(xprtrdma_op_setport);
386 
387 TRACE_EVENT(xprtrdma_op_connect,
388 	TP_PROTO(
389 		const struct rpcrdma_xprt *r_xprt,
390 		unsigned long delay
391 	),
392 
393 	TP_ARGS(r_xprt, delay),
394 
395 	TP_STRUCT__entry(
396 		__field(const void *, r_xprt)
397 		__field(unsigned long, delay)
398 		__string(addr, rpcrdma_addrstr(r_xprt))
399 		__string(port, rpcrdma_portstr(r_xprt))
400 	),
401 
402 	TP_fast_assign(
403 		__entry->r_xprt = r_xprt;
404 		__entry->delay = delay;
405 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
406 		__assign_str(port, rpcrdma_portstr(r_xprt));
407 	),
408 
409 	TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu",
410 		__get_str(addr), __get_str(port), __entry->r_xprt,
411 		__entry->delay
412 	)
413 );
414 
415 
416 TRACE_EVENT(xprtrdma_op_set_cto,
417 	TP_PROTO(
418 		const struct rpcrdma_xprt *r_xprt,
419 		unsigned long connect,
420 		unsigned long reconnect
421 	),
422 
423 	TP_ARGS(r_xprt, connect, reconnect),
424 
425 	TP_STRUCT__entry(
426 		__field(const void *, r_xprt)
427 		__field(unsigned long, connect)
428 		__field(unsigned long, reconnect)
429 		__string(addr, rpcrdma_addrstr(r_xprt))
430 		__string(port, rpcrdma_portstr(r_xprt))
431 	),
432 
433 	TP_fast_assign(
434 		__entry->r_xprt = r_xprt;
435 		__entry->connect = connect;
436 		__entry->reconnect = reconnect;
437 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
438 		__assign_str(port, rpcrdma_portstr(r_xprt));
439 	),
440 
441 	TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
442 		__get_str(addr), __get_str(port), __entry->r_xprt,
443 		__entry->connect / HZ, __entry->reconnect / HZ
444 	)
445 );
446 
447 TRACE_EVENT(xprtrdma_qp_event,
448 	TP_PROTO(
449 		const struct rpcrdma_ep *ep,
450 		const struct ib_event *event
451 	),
452 
453 	TP_ARGS(ep, event),
454 
455 	TP_STRUCT__entry(
456 		__field(unsigned long, event)
457 		__string(name, event->device->name)
458 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
459 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
460 	),
461 
462 	TP_fast_assign(
463 		const struct rdma_cm_id *id = ep->re_id;
464 
465 		__entry->event = event->event;
466 		__assign_str(name, event->device->name);
467 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
468 		       sizeof(struct sockaddr_in6));
469 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
470 		       sizeof(struct sockaddr_in6));
471 	),
472 
473 	TP_printk("%pISpc -> %pISpc device=%s %s (%lu)",
474 		__entry->srcaddr, __entry->dstaddr, __get_str(name),
475 		rdma_show_ib_event(__entry->event), __entry->event
476 	)
477 );
478 
479 /**
480  ** Call events
481  **/
482 
483 TRACE_EVENT(xprtrdma_createmrs,
484 	TP_PROTO(
485 		const struct rpcrdma_xprt *r_xprt,
486 		unsigned int count
487 	),
488 
489 	TP_ARGS(r_xprt, count),
490 
491 	TP_STRUCT__entry(
492 		__field(const void *, r_xprt)
493 		__string(addr, rpcrdma_addrstr(r_xprt))
494 		__string(port, rpcrdma_portstr(r_xprt))
495 		__field(unsigned int, count)
496 	),
497 
498 	TP_fast_assign(
499 		__entry->r_xprt = r_xprt;
500 		__entry->count = count;
501 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
502 		__assign_str(port, rpcrdma_portstr(r_xprt));
503 	),
504 
505 	TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
506 		__get_str(addr), __get_str(port), __entry->r_xprt,
507 		__entry->count
508 	)
509 );
510 
511 TRACE_EVENT(xprtrdma_mr_get,
512 	TP_PROTO(
513 		const struct rpcrdma_req *req
514 	),
515 
516 	TP_ARGS(req),
517 
518 	TP_STRUCT__entry(
519 		__field(const void *, req)
520 		__field(unsigned int, task_id)
521 		__field(unsigned int, client_id)
522 		__field(u32, xid)
523 	),
524 
525 	TP_fast_assign(
526 		const struct rpc_rqst *rqst = &req->rl_slot;
527 
528 		__entry->req = req;
529 		__entry->task_id = rqst->rq_task->tk_pid;
530 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
531 		__entry->xid = be32_to_cpu(rqst->rq_xid);
532 	),
533 
534 	TP_printk("task:%u@%u xid=0x%08x req=%p",
535 		__entry->task_id, __entry->client_id, __entry->xid,
536 		__entry->req
537 	)
538 );
539 
540 TRACE_EVENT(xprtrdma_nomrs,
541 	TP_PROTO(
542 		const struct rpcrdma_req *req
543 	),
544 
545 	TP_ARGS(req),
546 
547 	TP_STRUCT__entry(
548 		__field(const void *, req)
549 		__field(unsigned int, task_id)
550 		__field(unsigned int, client_id)
551 		__field(u32, xid)
552 	),
553 
554 	TP_fast_assign(
555 		const struct rpc_rqst *rqst = &req->rl_slot;
556 
557 		__entry->req = req;
558 		__entry->task_id = rqst->rq_task->tk_pid;
559 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
560 		__entry->xid = be32_to_cpu(rqst->rq_xid);
561 	),
562 
563 	TP_printk("task:%u@%u xid=0x%08x req=%p",
564 		__entry->task_id, __entry->client_id, __entry->xid,
565 		__entry->req
566 	)
567 );
568 
569 DEFINE_RDCH_EVENT(read);
570 DEFINE_WRCH_EVENT(write);
571 DEFINE_WRCH_EVENT(reply);
572 
573 TRACE_DEFINE_ENUM(rpcrdma_noch);
574 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
575 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
576 TRACE_DEFINE_ENUM(rpcrdma_readch);
577 TRACE_DEFINE_ENUM(rpcrdma_areadch);
578 TRACE_DEFINE_ENUM(rpcrdma_writech);
579 TRACE_DEFINE_ENUM(rpcrdma_replych);
580 
581 #define xprtrdma_show_chunktype(x)					\
582 		__print_symbolic(x,					\
583 				{ rpcrdma_noch, "inline" },		\
584 				{ rpcrdma_noch_pullup, "pullup" },	\
585 				{ rpcrdma_noch_mapped, "mapped" },	\
586 				{ rpcrdma_readch, "read list" },	\
587 				{ rpcrdma_areadch, "*read list" },	\
588 				{ rpcrdma_writech, "write list" },	\
589 				{ rpcrdma_replych, "reply chunk" })
590 
591 TRACE_EVENT(xprtrdma_marshal,
592 	TP_PROTO(
593 		const struct rpcrdma_req *req,
594 		unsigned int rtype,
595 		unsigned int wtype
596 	),
597 
598 	TP_ARGS(req, rtype, wtype),
599 
600 	TP_STRUCT__entry(
601 		__field(unsigned int, task_id)
602 		__field(unsigned int, client_id)
603 		__field(u32, xid)
604 		__field(unsigned int, hdrlen)
605 		__field(unsigned int, headlen)
606 		__field(unsigned int, pagelen)
607 		__field(unsigned int, taillen)
608 		__field(unsigned int, rtype)
609 		__field(unsigned int, wtype)
610 	),
611 
612 	TP_fast_assign(
613 		const struct rpc_rqst *rqst = &req->rl_slot;
614 
615 		__entry->task_id = rqst->rq_task->tk_pid;
616 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
617 		__entry->xid = be32_to_cpu(rqst->rq_xid);
618 		__entry->hdrlen = req->rl_hdrbuf.len;
619 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
620 		__entry->pagelen = rqst->rq_snd_buf.page_len;
621 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
622 		__entry->rtype = rtype;
623 		__entry->wtype = wtype;
624 	),
625 
626 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
627 		__entry->task_id, __entry->client_id, __entry->xid,
628 		__entry->hdrlen,
629 		__entry->headlen, __entry->pagelen, __entry->taillen,
630 		xprtrdma_show_chunktype(__entry->rtype),
631 		xprtrdma_show_chunktype(__entry->wtype)
632 	)
633 );
634 
635 TRACE_EVENT(xprtrdma_marshal_failed,
636 	TP_PROTO(const struct rpc_rqst *rqst,
637 		 int ret
638 	),
639 
640 	TP_ARGS(rqst, ret),
641 
642 	TP_STRUCT__entry(
643 		__field(unsigned int, task_id)
644 		__field(unsigned int, client_id)
645 		__field(u32, xid)
646 		__field(int, ret)
647 	),
648 
649 	TP_fast_assign(
650 		__entry->task_id = rqst->rq_task->tk_pid;
651 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
652 		__entry->xid = be32_to_cpu(rqst->rq_xid);
653 		__entry->ret = ret;
654 	),
655 
656 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
657 		__entry->task_id, __entry->client_id, __entry->xid,
658 		__entry->ret
659 	)
660 );
661 
662 TRACE_EVENT(xprtrdma_prepsend_failed,
663 	TP_PROTO(const struct rpc_rqst *rqst,
664 		 int ret
665 	),
666 
667 	TP_ARGS(rqst, ret),
668 
669 	TP_STRUCT__entry(
670 		__field(unsigned int, task_id)
671 		__field(unsigned int, client_id)
672 		__field(u32, xid)
673 		__field(int, ret)
674 	),
675 
676 	TP_fast_assign(
677 		__entry->task_id = rqst->rq_task->tk_pid;
678 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
679 		__entry->xid = be32_to_cpu(rqst->rq_xid);
680 		__entry->ret = ret;
681 	),
682 
683 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
684 		__entry->task_id, __entry->client_id, __entry->xid,
685 		__entry->ret
686 	)
687 );
688 
689 TRACE_EVENT(xprtrdma_post_send,
690 	TP_PROTO(
691 		const struct rpcrdma_req *req
692 	),
693 
694 	TP_ARGS(req),
695 
696 	TP_STRUCT__entry(
697 		__field(const void *, req)
698 		__field(const void *, sc)
699 		__field(unsigned int, task_id)
700 		__field(unsigned int, client_id)
701 		__field(int, num_sge)
702 		__field(int, signaled)
703 	),
704 
705 	TP_fast_assign(
706 		const struct rpc_rqst *rqst = &req->rl_slot;
707 
708 		__entry->task_id = rqst->rq_task->tk_pid;
709 		__entry->client_id = rqst->rq_task->tk_client ?
710 				     rqst->rq_task->tk_client->cl_clid : -1;
711 		__entry->req = req;
712 		__entry->sc = req->rl_sendctx;
713 		__entry->num_sge = req->rl_wr.num_sge;
714 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
715 	),
716 
717 	TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s",
718 		__entry->task_id, __entry->client_id,
719 		__entry->req, __entry->sc, __entry->num_sge,
720 		(__entry->num_sge == 1 ? "" : "s"),
721 		(__entry->signaled ? "signaled" : "")
722 	)
723 );
724 
725 TRACE_EVENT(xprtrdma_post_recv,
726 	TP_PROTO(
727 		const struct rpcrdma_rep *rep
728 	),
729 
730 	TP_ARGS(rep),
731 
732 	TP_STRUCT__entry(
733 		__field(const void *, rep)
734 	),
735 
736 	TP_fast_assign(
737 		__entry->rep = rep;
738 	),
739 
740 	TP_printk("rep=%p",
741 		__entry->rep
742 	)
743 );
744 
745 TRACE_EVENT(xprtrdma_post_recvs,
746 	TP_PROTO(
747 		const struct rpcrdma_xprt *r_xprt,
748 		unsigned int count,
749 		int status
750 	),
751 
752 	TP_ARGS(r_xprt, count, status),
753 
754 	TP_STRUCT__entry(
755 		__field(const void *, r_xprt)
756 		__field(unsigned int, count)
757 		__field(int, status)
758 		__field(int, posted)
759 		__string(addr, rpcrdma_addrstr(r_xprt))
760 		__string(port, rpcrdma_portstr(r_xprt))
761 	),
762 
763 	TP_fast_assign(
764 		__entry->r_xprt = r_xprt;
765 		__entry->count = count;
766 		__entry->status = status;
767 		__entry->posted = r_xprt->rx_ep->re_receive_count;
768 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
769 		__assign_str(port, rpcrdma_portstr(r_xprt));
770 	),
771 
772 	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
773 		__get_str(addr), __get_str(port), __entry->r_xprt,
774 		__entry->count, __entry->posted, __entry->status
775 	)
776 );
777 
778 TRACE_EVENT(xprtrdma_post_linv,
779 	TP_PROTO(
780 		const struct rpcrdma_req *req,
781 		int status
782 	),
783 
784 	TP_ARGS(req, status),
785 
786 	TP_STRUCT__entry(
787 		__field(const void *, req)
788 		__field(int, status)
789 		__field(u32, xid)
790 	),
791 
792 	TP_fast_assign(
793 		__entry->req = req;
794 		__entry->status = status;
795 		__entry->xid = be32_to_cpu(req->rl_slot.rq_xid);
796 	),
797 
798 	TP_printk("req=%p xid=0x%08x status=%d",
799 		__entry->req, __entry->xid, __entry->status
800 	)
801 );
802 
803 /**
804  ** Completion events
805  **/
806 
807 TRACE_EVENT(xprtrdma_wc_send,
808 	TP_PROTO(
809 		const struct rpcrdma_sendctx *sc,
810 		const struct ib_wc *wc
811 	),
812 
813 	TP_ARGS(sc, wc),
814 
815 	TP_STRUCT__entry(
816 		__field(const void *, req)
817 		__field(const void *, sc)
818 		__field(unsigned int, unmap_count)
819 		__field(unsigned int, status)
820 		__field(unsigned int, vendor_err)
821 	),
822 
823 	TP_fast_assign(
824 		__entry->req = sc->sc_req;
825 		__entry->sc = sc;
826 		__entry->unmap_count = sc->sc_unmap_count;
827 		__entry->status = wc->status;
828 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
829 	),
830 
831 	TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)",
832 		__entry->req, __entry->sc, __entry->unmap_count,
833 		rdma_show_wc_status(__entry->status),
834 		__entry->status, __entry->vendor_err
835 	)
836 );
837 
838 TRACE_EVENT(xprtrdma_wc_receive,
839 	TP_PROTO(
840 		const struct ib_wc *wc
841 	),
842 
843 	TP_ARGS(wc),
844 
845 	TP_STRUCT__entry(
846 		__field(const void *, rep)
847 		__field(u32, byte_len)
848 		__field(unsigned int, status)
849 		__field(u32, vendor_err)
850 	),
851 
852 	TP_fast_assign(
853 		__entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
854 					    rr_cqe);
855 		__entry->status = wc->status;
856 		if (wc->status) {
857 			__entry->byte_len = 0;
858 			__entry->vendor_err = wc->vendor_err;
859 		} else {
860 			__entry->byte_len = wc->byte_len;
861 			__entry->vendor_err = 0;
862 		}
863 	),
864 
865 	TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
866 		__entry->rep, __entry->byte_len,
867 		rdma_show_wc_status(__entry->status),
868 		__entry->status, __entry->vendor_err
869 	)
870 );
871 
872 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
873 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
874 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
875 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
876 
877 TRACE_EVENT(xprtrdma_frwr_alloc,
878 	TP_PROTO(
879 		const struct rpcrdma_mr *mr,
880 		int rc
881 	),
882 
883 	TP_ARGS(mr, rc),
884 
885 	TP_STRUCT__entry(
886 		__field(u32, mr_id)
887 		__field(int, rc)
888 	),
889 
890 	TP_fast_assign(
891 		__entry->mr_id = mr->frwr.fr_mr->res.id;
892 		__entry->rc = rc;
893 	),
894 
895 	TP_printk("mr.id=%u: rc=%d",
896 		__entry->mr_id, __entry->rc
897 	)
898 );
899 
900 TRACE_EVENT(xprtrdma_frwr_dereg,
901 	TP_PROTO(
902 		const struct rpcrdma_mr *mr,
903 		int rc
904 	),
905 
906 	TP_ARGS(mr, rc),
907 
908 	TP_STRUCT__entry(
909 		__field(u32, mr_id)
910 		__field(int, nents)
911 		__field(u32, handle)
912 		__field(u32, length)
913 		__field(u64, offset)
914 		__field(u32, dir)
915 		__field(int, rc)
916 	),
917 
918 	TP_fast_assign(
919 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
920 		__entry->nents  = mr->mr_nents;
921 		__entry->handle = mr->mr_handle;
922 		__entry->length = mr->mr_length;
923 		__entry->offset = mr->mr_offset;
924 		__entry->dir    = mr->mr_dir;
925 		__entry->rc	= rc;
926 	),
927 
928 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
929 		__entry->mr_id, __entry->nents, __entry->length,
930 		(unsigned long long)__entry->offset, __entry->handle,
931 		xprtrdma_show_direction(__entry->dir),
932 		__entry->rc
933 	)
934 );
935 
936 TRACE_EVENT(xprtrdma_frwr_sgerr,
937 	TP_PROTO(
938 		const struct rpcrdma_mr *mr,
939 		int sg_nents
940 	),
941 
942 	TP_ARGS(mr, sg_nents),
943 
944 	TP_STRUCT__entry(
945 		__field(u32, mr_id)
946 		__field(u64, addr)
947 		__field(u32, dir)
948 		__field(int, nents)
949 	),
950 
951 	TP_fast_assign(
952 		__entry->mr_id = mr->frwr.fr_mr->res.id;
953 		__entry->addr = mr->mr_sg->dma_address;
954 		__entry->dir = mr->mr_dir;
955 		__entry->nents = sg_nents;
956 	),
957 
958 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
959 		__entry->mr_id, __entry->addr,
960 		xprtrdma_show_direction(__entry->dir),
961 		__entry->nents
962 	)
963 );
964 
965 TRACE_EVENT(xprtrdma_frwr_maperr,
966 	TP_PROTO(
967 		const struct rpcrdma_mr *mr,
968 		int num_mapped
969 	),
970 
971 	TP_ARGS(mr, num_mapped),
972 
973 	TP_STRUCT__entry(
974 		__field(u32, mr_id)
975 		__field(u64, addr)
976 		__field(u32, dir)
977 		__field(int, num_mapped)
978 		__field(int, nents)
979 	),
980 
981 	TP_fast_assign(
982 		__entry->mr_id = mr->frwr.fr_mr->res.id;
983 		__entry->addr = mr->mr_sg->dma_address;
984 		__entry->dir = mr->mr_dir;
985 		__entry->num_mapped = num_mapped;
986 		__entry->nents = mr->mr_nents;
987 	),
988 
989 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
990 		__entry->mr_id, __entry->addr,
991 		xprtrdma_show_direction(__entry->dir),
992 		__entry->num_mapped, __entry->nents
993 	)
994 );
995 
996 DEFINE_MR_EVENT(localinv);
997 DEFINE_MR_EVENT(map);
998 DEFINE_MR_EVENT(unmap);
999 DEFINE_MR_EVENT(reminv);
1000 DEFINE_MR_EVENT(recycle);
1001 
1002 TRACE_EVENT(xprtrdma_dma_maperr,
1003 	TP_PROTO(
1004 		u64 addr
1005 	),
1006 
1007 	TP_ARGS(addr),
1008 
1009 	TP_STRUCT__entry(
1010 		__field(u64, addr)
1011 	),
1012 
1013 	TP_fast_assign(
1014 		__entry->addr = addr;
1015 	),
1016 
1017 	TP_printk("dma addr=0x%llx\n", __entry->addr)
1018 );
1019 
1020 /**
1021  ** Reply events
1022  **/
1023 
1024 TRACE_EVENT(xprtrdma_reply,
1025 	TP_PROTO(
1026 		const struct rpc_task *task,
1027 		const struct rpcrdma_rep *rep,
1028 		const struct rpcrdma_req *req,
1029 		unsigned int credits
1030 	),
1031 
1032 	TP_ARGS(task, rep, req, credits),
1033 
1034 	TP_STRUCT__entry(
1035 		__field(unsigned int, task_id)
1036 		__field(unsigned int, client_id)
1037 		__field(const void *, rep)
1038 		__field(const void *, req)
1039 		__field(u32, xid)
1040 		__field(unsigned int, credits)
1041 	),
1042 
1043 	TP_fast_assign(
1044 		__entry->task_id = task->tk_pid;
1045 		__entry->client_id = task->tk_client->cl_clid;
1046 		__entry->rep = rep;
1047 		__entry->req = req;
1048 		__entry->xid = be32_to_cpu(rep->rr_xid);
1049 		__entry->credits = credits;
1050 	),
1051 
1052 	TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
1053 		__entry->task_id, __entry->client_id, __entry->xid,
1054 		__entry->credits, __entry->rep, __entry->req
1055 	)
1056 );
1057 
1058 TRACE_EVENT(xprtrdma_defer_cmp,
1059 	TP_PROTO(
1060 		const struct rpcrdma_rep *rep
1061 	),
1062 
1063 	TP_ARGS(rep),
1064 
1065 	TP_STRUCT__entry(
1066 		__field(unsigned int, task_id)
1067 		__field(unsigned int, client_id)
1068 		__field(const void *, rep)
1069 		__field(u32, xid)
1070 	),
1071 
1072 	TP_fast_assign(
1073 		__entry->task_id = rep->rr_rqst->rq_task->tk_pid;
1074 		__entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
1075 		__entry->rep = rep;
1076 		__entry->xid = be32_to_cpu(rep->rr_xid);
1077 	),
1078 
1079 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1080 		__entry->task_id, __entry->client_id, __entry->xid,
1081 		__entry->rep
1082 	)
1083 );
1084 
1085 DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
1086 DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
1087 DEFINE_REPLY_EVENT(xprtrdma_reply_short);
1088 DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
1089 
1090 TRACE_EVENT(xprtrdma_fixup,
1091 	TP_PROTO(
1092 		const struct rpc_rqst *rqst,
1093 		unsigned long fixup
1094 	),
1095 
1096 	TP_ARGS(rqst, fixup),
1097 
1098 	TP_STRUCT__entry(
1099 		__field(unsigned int, task_id)
1100 		__field(unsigned int, client_id)
1101 		__field(unsigned long, fixup)
1102 		__field(size_t, headlen)
1103 		__field(unsigned int, pagelen)
1104 		__field(size_t, taillen)
1105 	),
1106 
1107 	TP_fast_assign(
1108 		__entry->task_id = rqst->rq_task->tk_pid;
1109 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1110 		__entry->fixup = fixup;
1111 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1112 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1113 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1114 	),
1115 
1116 	TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1117 		__entry->task_id, __entry->client_id, __entry->fixup,
1118 		__entry->headlen, __entry->pagelen, __entry->taillen
1119 	)
1120 );
1121 
1122 TRACE_EVENT(xprtrdma_decode_seg,
1123 	TP_PROTO(
1124 		u32 handle,
1125 		u32 length,
1126 		u64 offset
1127 	),
1128 
1129 	TP_ARGS(handle, length, offset),
1130 
1131 	TP_STRUCT__entry(
1132 		__field(u32, handle)
1133 		__field(u32, length)
1134 		__field(u64, offset)
1135 	),
1136 
1137 	TP_fast_assign(
1138 		__entry->handle = handle;
1139 		__entry->length = length;
1140 		__entry->offset = offset;
1141 	),
1142 
1143 	TP_printk("%u@0x%016llx:0x%08x",
1144 		__entry->length, (unsigned long long)__entry->offset,
1145 		__entry->handle
1146 	)
1147 );
1148 
1149 /**
1150  ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
1151  **/
1152 
1153 TRACE_EVENT(xprtrdma_op_allocate,
1154 	TP_PROTO(
1155 		const struct rpc_task *task,
1156 		const struct rpcrdma_req *req
1157 	),
1158 
1159 	TP_ARGS(task, req),
1160 
1161 	TP_STRUCT__entry(
1162 		__field(unsigned int, task_id)
1163 		__field(unsigned int, client_id)
1164 		__field(const void *, req)
1165 		__field(size_t, callsize)
1166 		__field(size_t, rcvsize)
1167 	),
1168 
1169 	TP_fast_assign(
1170 		__entry->task_id = task->tk_pid;
1171 		__entry->client_id = task->tk_client->cl_clid;
1172 		__entry->req = req;
1173 		__entry->callsize = task->tk_rqstp->rq_callsize;
1174 		__entry->rcvsize = task->tk_rqstp->rq_rcvsize;
1175 	),
1176 
1177 	TP_printk("task:%u@%u req=%p (%zu, %zu)",
1178 		__entry->task_id, __entry->client_id,
1179 		__entry->req, __entry->callsize, __entry->rcvsize
1180 	)
1181 );
1182 
1183 TRACE_EVENT(xprtrdma_op_free,
1184 	TP_PROTO(
1185 		const struct rpc_task *task,
1186 		const struct rpcrdma_req *req
1187 	),
1188 
1189 	TP_ARGS(task, req),
1190 
1191 	TP_STRUCT__entry(
1192 		__field(unsigned int, task_id)
1193 		__field(unsigned int, client_id)
1194 		__field(const void *, req)
1195 		__field(const void *, rep)
1196 	),
1197 
1198 	TP_fast_assign(
1199 		__entry->task_id = task->tk_pid;
1200 		__entry->client_id = task->tk_client->cl_clid;
1201 		__entry->req = req;
1202 		__entry->rep = req->rl_reply;
1203 	),
1204 
1205 	TP_printk("task:%u@%u req=%p rep=%p",
1206 		__entry->task_id, __entry->client_id,
1207 		__entry->req, __entry->rep
1208 	)
1209 );
1210 
1211 /**
1212  ** Callback events
1213  **/
1214 
1215 TRACE_EVENT(xprtrdma_cb_setup,
1216 	TP_PROTO(
1217 		const struct rpcrdma_xprt *r_xprt,
1218 		unsigned int reqs
1219 	),
1220 
1221 	TP_ARGS(r_xprt, reqs),
1222 
1223 	TP_STRUCT__entry(
1224 		__field(const void *, r_xprt)
1225 		__field(unsigned int, reqs)
1226 		__string(addr, rpcrdma_addrstr(r_xprt))
1227 		__string(port, rpcrdma_portstr(r_xprt))
1228 	),
1229 
1230 	TP_fast_assign(
1231 		__entry->r_xprt = r_xprt;
1232 		__entry->reqs = reqs;
1233 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1234 		__assign_str(port, rpcrdma_portstr(r_xprt));
1235 	),
1236 
1237 	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1238 		__get_str(addr), __get_str(port),
1239 		__entry->r_xprt, __entry->reqs
1240 	)
1241 );
1242 
1243 DEFINE_CB_EVENT(xprtrdma_cb_call);
1244 DEFINE_CB_EVENT(xprtrdma_cb_reply);
1245 
1246 TRACE_EVENT(xprtrdma_leaked_rep,
1247 	TP_PROTO(
1248 		const struct rpc_rqst *rqst,
1249 		const struct rpcrdma_rep *rep
1250 	),
1251 
1252 	TP_ARGS(rqst, rep),
1253 
1254 	TP_STRUCT__entry(
1255 		__field(unsigned int, task_id)
1256 		__field(unsigned int, client_id)
1257 		__field(u32, xid)
1258 		__field(const void *, rep)
1259 	),
1260 
1261 	TP_fast_assign(
1262 		__entry->task_id = rqst->rq_task->tk_pid;
1263 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1264 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1265 		__entry->rep = rep;
1266 	),
1267 
1268 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1269 		__entry->task_id, __entry->client_id, __entry->xid,
1270 		__entry->rep
1271 	)
1272 );
1273 
1274 /**
1275  ** Server-side RPC/RDMA events
1276  **/
1277 
1278 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1279 	TP_PROTO(
1280 		const struct svcxprt_rdma *rdma,
1281 		long status
1282 	),
1283 
1284 	TP_ARGS(rdma, status),
1285 
1286 	TP_STRUCT__entry(
1287 		__field(long, status)
1288 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1289 	),
1290 
1291 	TP_fast_assign(
1292 		__entry->status = status;
1293 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1294 	),
1295 
1296 	TP_printk("addr=%s status=%ld",
1297 		__get_str(addr), __entry->status
1298 	)
1299 );
1300 
1301 #define DEFINE_ACCEPT_EVENT(name) \
1302 		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1303 				TP_PROTO( \
1304 					const struct svcxprt_rdma *rdma, \
1305 					long status \
1306 				), \
1307 				TP_ARGS(rdma, status))
1308 
1309 DEFINE_ACCEPT_EVENT(pd);
1310 DEFINE_ACCEPT_EVENT(qp);
1311 DEFINE_ACCEPT_EVENT(fabric);
1312 DEFINE_ACCEPT_EVENT(initdepth);
1313 DEFINE_ACCEPT_EVENT(accept);
1314 
1315 TRACE_DEFINE_ENUM(RDMA_MSG);
1316 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1317 TRACE_DEFINE_ENUM(RDMA_MSGP);
1318 TRACE_DEFINE_ENUM(RDMA_DONE);
1319 TRACE_DEFINE_ENUM(RDMA_ERROR);
1320 
1321 #define show_rpcrdma_proc(x)						\
1322 		__print_symbolic(x,					\
1323 				{ RDMA_MSG, "RDMA_MSG" },		\
1324 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1325 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1326 				{ RDMA_DONE, "RDMA_DONE" },		\
1327 				{ RDMA_ERROR, "RDMA_ERROR" })
1328 
1329 TRACE_EVENT(svcrdma_decode_rqst,
1330 	TP_PROTO(
1331 		__be32 *p,
1332 		unsigned int hdrlen
1333 	),
1334 
1335 	TP_ARGS(p, hdrlen),
1336 
1337 	TP_STRUCT__entry(
1338 		__field(u32, xid)
1339 		__field(u32, vers)
1340 		__field(u32, proc)
1341 		__field(u32, credits)
1342 		__field(unsigned int, hdrlen)
1343 	),
1344 
1345 	TP_fast_assign(
1346 		__entry->xid = be32_to_cpup(p++);
1347 		__entry->vers = be32_to_cpup(p++);
1348 		__entry->credits = be32_to_cpup(p++);
1349 		__entry->proc = be32_to_cpup(p);
1350 		__entry->hdrlen = hdrlen;
1351 	),
1352 
1353 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1354 		__entry->xid, __entry->vers, __entry->credits,
1355 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1356 );
1357 
1358 TRACE_EVENT(svcrdma_decode_short_err,
1359 	TP_PROTO(
1360 		unsigned int hdrlen
1361 	),
1362 
1363 	TP_ARGS(hdrlen),
1364 
1365 	TP_STRUCT__entry(
1366 		__field(unsigned int, hdrlen)
1367 	),
1368 
1369 	TP_fast_assign(
1370 		__entry->hdrlen = hdrlen;
1371 	),
1372 
1373 	TP_printk("hdrlen=%u", __entry->hdrlen)
1374 );
1375 
1376 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1377 	TP_PROTO(
1378 		__be32 *p
1379 	),
1380 
1381 	TP_ARGS(p),
1382 
1383 	TP_STRUCT__entry(
1384 		__field(u32, xid)
1385 		__field(u32, vers)
1386 		__field(u32, proc)
1387 		__field(u32, credits)
1388 	),
1389 
1390 	TP_fast_assign(
1391 		__entry->xid = be32_to_cpup(p++);
1392 		__entry->vers = be32_to_cpup(p++);
1393 		__entry->credits = be32_to_cpup(p++);
1394 		__entry->proc = be32_to_cpup(p);
1395 	),
1396 
1397 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
1398 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1399 );
1400 
1401 #define DEFINE_BADREQ_EVENT(name)					\
1402 		DEFINE_EVENT(svcrdma_badreq_event,			\
1403 			     svcrdma_decode_##name##_err,		\
1404 				TP_PROTO(				\
1405 					__be32 *p			\
1406 				),					\
1407 				TP_ARGS(p))
1408 
1409 DEFINE_BADREQ_EVENT(badvers);
1410 DEFINE_BADREQ_EVENT(drop);
1411 DEFINE_BADREQ_EVENT(badproc);
1412 DEFINE_BADREQ_EVENT(parse);
1413 
1414 DECLARE_EVENT_CLASS(svcrdma_segment_event,
1415 	TP_PROTO(
1416 		u32 handle,
1417 		u32 length,
1418 		u64 offset
1419 	),
1420 
1421 	TP_ARGS(handle, length, offset),
1422 
1423 	TP_STRUCT__entry(
1424 		__field(u32, handle)
1425 		__field(u32, length)
1426 		__field(u64, offset)
1427 	),
1428 
1429 	TP_fast_assign(
1430 		__entry->handle = handle;
1431 		__entry->length = length;
1432 		__entry->offset = offset;
1433 	),
1434 
1435 	TP_printk("%u@0x%016llx:0x%08x",
1436 		__entry->length, (unsigned long long)__entry->offset,
1437 		__entry->handle
1438 	)
1439 );
1440 
1441 #define DEFINE_SEGMENT_EVENT(name)					\
1442 		DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\
1443 				TP_PROTO(				\
1444 					u32 handle,			\
1445 					u32 length,			\
1446 					u64 offset			\
1447 				),					\
1448 				TP_ARGS(handle, length, offset))
1449 
1450 DEFINE_SEGMENT_EVENT(decode_wseg);
1451 DEFINE_SEGMENT_EVENT(encode_rseg);
1452 DEFINE_SEGMENT_EVENT(send_rseg);
1453 DEFINE_SEGMENT_EVENT(encode_wseg);
1454 DEFINE_SEGMENT_EVENT(send_wseg);
1455 
1456 DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1457 	TP_PROTO(
1458 		u32 length
1459 	),
1460 
1461 	TP_ARGS(length),
1462 
1463 	TP_STRUCT__entry(
1464 		__field(u32, length)
1465 	),
1466 
1467 	TP_fast_assign(
1468 		__entry->length = length;
1469 	),
1470 
1471 	TP_printk("length=%u",
1472 		__entry->length
1473 	)
1474 );
1475 
1476 #define DEFINE_CHUNK_EVENT(name)					\
1477 		DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name,	\
1478 				TP_PROTO(				\
1479 					u32 length			\
1480 				),					\
1481 				TP_ARGS(length))
1482 
1483 DEFINE_CHUNK_EVENT(send_pzr);
1484 DEFINE_CHUNK_EVENT(encode_write_chunk);
1485 DEFINE_CHUNK_EVENT(send_write_chunk);
1486 DEFINE_CHUNK_EVENT(encode_read_chunk);
1487 DEFINE_CHUNK_EVENT(send_reply_chunk);
1488 
1489 TRACE_EVENT(svcrdma_send_read_chunk,
1490 	TP_PROTO(
1491 		u32 length,
1492 		u32 position
1493 	),
1494 
1495 	TP_ARGS(length, position),
1496 
1497 	TP_STRUCT__entry(
1498 		__field(u32, length)
1499 		__field(u32, position)
1500 	),
1501 
1502 	TP_fast_assign(
1503 		__entry->length = length;
1504 		__entry->position = position;
1505 	),
1506 
1507 	TP_printk("length=%u position=%u",
1508 		__entry->length, __entry->position
1509 	)
1510 );
1511 
1512 DECLARE_EVENT_CLASS(svcrdma_error_event,
1513 	TP_PROTO(
1514 		__be32 xid
1515 	),
1516 
1517 	TP_ARGS(xid),
1518 
1519 	TP_STRUCT__entry(
1520 		__field(u32, xid)
1521 	),
1522 
1523 	TP_fast_assign(
1524 		__entry->xid = be32_to_cpu(xid);
1525 	),
1526 
1527 	TP_printk("xid=0x%08x",
1528 		__entry->xid
1529 	)
1530 );
1531 
1532 #define DEFINE_ERROR_EVENT(name)					\
1533 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1534 				TP_PROTO(				\
1535 					__be32 xid			\
1536 				),					\
1537 				TP_ARGS(xid))
1538 
1539 DEFINE_ERROR_EVENT(vers);
1540 DEFINE_ERROR_EVENT(chunk);
1541 
1542 /**
1543  ** Server-side RDMA API events
1544  **/
1545 
1546 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1547 	TP_PROTO(
1548 		const struct svcxprt_rdma *rdma,
1549 		u64 dma_addr,
1550 		u32 length
1551 	),
1552 
1553 	TP_ARGS(rdma, dma_addr, length),
1554 
1555 	TP_STRUCT__entry(
1556 		__field(u64, dma_addr)
1557 		__field(u32, length)
1558 		__string(device, rdma->sc_cm_id->device->name)
1559 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1560 	),
1561 
1562 	TP_fast_assign(
1563 		__entry->dma_addr = dma_addr;
1564 		__entry->length = length;
1565 		__assign_str(device, rdma->sc_cm_id->device->name);
1566 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1567 	),
1568 
1569 	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1570 		__get_str(addr), __get_str(device),
1571 		__entry->dma_addr, __entry->length
1572 	)
1573 );
1574 
1575 #define DEFINE_SVC_DMA_EVENT(name)					\
1576 		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1577 				TP_PROTO(				\
1578 					const struct svcxprt_rdma *rdma,\
1579 					u64 dma_addr,			\
1580 					u32 length			\
1581 				),					\
1582 				TP_ARGS(rdma, dma_addr, length))
1583 
1584 DEFINE_SVC_DMA_EVENT(dma_map_page);
1585 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1586 
1587 TRACE_EVENT(svcrdma_dma_map_rw_err,
1588 	TP_PROTO(
1589 		const struct svcxprt_rdma *rdma,
1590 		unsigned int nents,
1591 		int status
1592 	),
1593 
1594 	TP_ARGS(rdma, nents, status),
1595 
1596 	TP_STRUCT__entry(
1597 		__field(int, status)
1598 		__field(unsigned int, nents)
1599 		__string(device, rdma->sc_cm_id->device->name)
1600 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1601 	),
1602 
1603 	TP_fast_assign(
1604 		__entry->status = status;
1605 		__entry->nents = nents;
1606 		__assign_str(device, rdma->sc_cm_id->device->name);
1607 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1608 	),
1609 
1610 	TP_printk("addr=%s device=%s nents=%u status=%d",
1611 		__get_str(addr), __get_str(device), __entry->nents,
1612 		__entry->status
1613 	)
1614 );
1615 
1616 TRACE_EVENT(svcrdma_no_rwctx_err,
1617 	TP_PROTO(
1618 		const struct svcxprt_rdma *rdma,
1619 		unsigned int num_sges
1620 	),
1621 
1622 	TP_ARGS(rdma, num_sges),
1623 
1624 	TP_STRUCT__entry(
1625 		__field(unsigned int, num_sges)
1626 		__string(device, rdma->sc_cm_id->device->name)
1627 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1628 	),
1629 
1630 	TP_fast_assign(
1631 		__entry->num_sges = num_sges;
1632 		__assign_str(device, rdma->sc_cm_id->device->name);
1633 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1634 	),
1635 
1636 	TP_printk("addr=%s device=%s num_sges=%d",
1637 		__get_str(addr), __get_str(device), __entry->num_sges
1638 	)
1639 );
1640 
1641 TRACE_EVENT(svcrdma_page_overrun_err,
1642 	TP_PROTO(
1643 		const struct svcxprt_rdma *rdma,
1644 		const struct svc_rqst *rqst,
1645 		unsigned int pageno
1646 	),
1647 
1648 	TP_ARGS(rdma, rqst, pageno),
1649 
1650 	TP_STRUCT__entry(
1651 		__field(unsigned int, pageno)
1652 		__field(u32, xid)
1653 		__string(device, rdma->sc_cm_id->device->name)
1654 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1655 	),
1656 
1657 	TP_fast_assign(
1658 		__entry->pageno = pageno;
1659 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1660 		__assign_str(device, rdma->sc_cm_id->device->name);
1661 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1662 	),
1663 
1664 	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1665 		__get_str(device), __entry->xid, __entry->pageno
1666 	)
1667 );
1668 
1669 TRACE_EVENT(svcrdma_small_wrch_err,
1670 	TP_PROTO(
1671 		const struct svcxprt_rdma *rdma,
1672 		unsigned int remaining,
1673 		unsigned int seg_no,
1674 		unsigned int num_segs
1675 	),
1676 
1677 	TP_ARGS(rdma, remaining, seg_no, num_segs),
1678 
1679 	TP_STRUCT__entry(
1680 		__field(unsigned int, remaining)
1681 		__field(unsigned int, seg_no)
1682 		__field(unsigned int, num_segs)
1683 		__string(device, rdma->sc_cm_id->device->name)
1684 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1685 	),
1686 
1687 	TP_fast_assign(
1688 		__entry->remaining = remaining;
1689 		__entry->seg_no = seg_no;
1690 		__entry->num_segs = num_segs;
1691 		__assign_str(device, rdma->sc_cm_id->device->name);
1692 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1693 	),
1694 
1695 	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1696 		__get_str(addr), __get_str(device), __entry->remaining,
1697 		__entry->seg_no, __entry->num_segs
1698 	)
1699 );
1700 
1701 TRACE_EVENT(svcrdma_send_pullup,
1702 	TP_PROTO(
1703 		unsigned int len
1704 	),
1705 
1706 	TP_ARGS(len),
1707 
1708 	TP_STRUCT__entry(
1709 		__field(unsigned int, len)
1710 	),
1711 
1712 	TP_fast_assign(
1713 		__entry->len = len;
1714 	),
1715 
1716 	TP_printk("len=%u", __entry->len)
1717 );
1718 
1719 TRACE_EVENT(svcrdma_send_failed,
1720 	TP_PROTO(
1721 		const struct svc_rqst *rqst,
1722 		int status
1723 	),
1724 
1725 	TP_ARGS(rqst, status),
1726 
1727 	TP_STRUCT__entry(
1728 		__field(int, status)
1729 		__field(u32, xid)
1730 		__field(const void *, xprt)
1731 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1732 	),
1733 
1734 	TP_fast_assign(
1735 		__entry->status = status;
1736 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1737 		__entry->xprt = rqst->rq_xprt;
1738 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1739 	),
1740 
1741 	TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
1742 		__entry->xprt, __get_str(addr),
1743 		__entry->xid, __entry->status
1744 	)
1745 );
1746 
1747 DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
1748 	TP_PROTO(
1749 		const struct ib_wc *wc
1750 	),
1751 
1752 	TP_ARGS(wc),
1753 
1754 	TP_STRUCT__entry(
1755 		__field(const void *, cqe)
1756 		__field(unsigned int, status)
1757 		__field(unsigned int, vendor_err)
1758 	),
1759 
1760 	TP_fast_assign(
1761 		__entry->cqe = wc->wr_cqe;
1762 		__entry->status = wc->status;
1763 		if (wc->status)
1764 			__entry->vendor_err = wc->vendor_err;
1765 		else
1766 			__entry->vendor_err = 0;
1767 	),
1768 
1769 	TP_printk("cqe=%p status=%s (%u/0x%x)",
1770 		__entry->cqe, rdma_show_wc_status(__entry->status),
1771 		__entry->status, __entry->vendor_err
1772 	)
1773 );
1774 
1775 #define DEFINE_SENDCOMP_EVENT(name)					\
1776 		DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name,	\
1777 				TP_PROTO(				\
1778 					const struct ib_wc *wc		\
1779 				),					\
1780 				TP_ARGS(wc))
1781 
1782 TRACE_EVENT(svcrdma_post_send,
1783 	TP_PROTO(
1784 		const struct ib_send_wr *wr
1785 	),
1786 
1787 	TP_ARGS(wr),
1788 
1789 	TP_STRUCT__entry(
1790 		__field(const void *, cqe)
1791 		__field(unsigned int, num_sge)
1792 		__field(u32, inv_rkey)
1793 	),
1794 
1795 	TP_fast_assign(
1796 		__entry->cqe = wr->wr_cqe;
1797 		__entry->num_sge = wr->num_sge;
1798 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1799 					wr->ex.invalidate_rkey : 0;
1800 	),
1801 
1802 	TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x",
1803 		__entry->cqe, __entry->num_sge,
1804 		__entry->inv_rkey
1805 	)
1806 );
1807 
1808 DEFINE_SENDCOMP_EVENT(send);
1809 
1810 TRACE_EVENT(svcrdma_post_recv,
1811 	TP_PROTO(
1812 		const struct ib_recv_wr *wr,
1813 		int status
1814 	),
1815 
1816 	TP_ARGS(wr, status),
1817 
1818 	TP_STRUCT__entry(
1819 		__field(const void *, cqe)
1820 		__field(int, status)
1821 	),
1822 
1823 	TP_fast_assign(
1824 		__entry->cqe = wr->wr_cqe;
1825 		__entry->status = status;
1826 	),
1827 
1828 	TP_printk("cqe=%p status=%d",
1829 		__entry->cqe, __entry->status
1830 	)
1831 );
1832 
1833 TRACE_EVENT(svcrdma_wc_receive,
1834 	TP_PROTO(
1835 		const struct ib_wc *wc
1836 	),
1837 
1838 	TP_ARGS(wc),
1839 
1840 	TP_STRUCT__entry(
1841 		__field(const void *, cqe)
1842 		__field(u32, byte_len)
1843 		__field(unsigned int, status)
1844 		__field(u32, vendor_err)
1845 	),
1846 
1847 	TP_fast_assign(
1848 		__entry->cqe = wc->wr_cqe;
1849 		__entry->status = wc->status;
1850 		if (wc->status) {
1851 			__entry->byte_len = 0;
1852 			__entry->vendor_err = wc->vendor_err;
1853 		} else {
1854 			__entry->byte_len = wc->byte_len;
1855 			__entry->vendor_err = 0;
1856 		}
1857 	),
1858 
1859 	TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
1860 		__entry->cqe, __entry->byte_len,
1861 		rdma_show_wc_status(__entry->status),
1862 		__entry->status, __entry->vendor_err
1863 	)
1864 );
1865 
1866 TRACE_EVENT(svcrdma_post_rw,
1867 	TP_PROTO(
1868 		const void *cqe,
1869 		int sqecount
1870 	),
1871 
1872 	TP_ARGS(cqe, sqecount),
1873 
1874 	TP_STRUCT__entry(
1875 		__field(const void *, cqe)
1876 		__field(int, sqecount)
1877 	),
1878 
1879 	TP_fast_assign(
1880 		__entry->cqe = cqe;
1881 		__entry->sqecount = sqecount;
1882 	),
1883 
1884 	TP_printk("cqe=%p sqecount=%d",
1885 		__entry->cqe, __entry->sqecount
1886 	)
1887 );
1888 
1889 DEFINE_SENDCOMP_EVENT(read);
1890 DEFINE_SENDCOMP_EVENT(write);
1891 
1892 TRACE_EVENT(svcrdma_qp_error,
1893 	TP_PROTO(
1894 		const struct ib_event *event,
1895 		const struct sockaddr *sap
1896 	),
1897 
1898 	TP_ARGS(event, sap),
1899 
1900 	TP_STRUCT__entry(
1901 		__field(unsigned int, event)
1902 		__string(device, event->device->name)
1903 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1904 	),
1905 
1906 	TP_fast_assign(
1907 		__entry->event = event->event;
1908 		__assign_str(device, event->device->name);
1909 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1910 			 "%pISpc", sap);
1911 	),
1912 
1913 	TP_printk("addr=%s dev=%s event=%s (%u)",
1914 		__entry->addr, __get_str(device),
1915 		rdma_show_ib_event(__entry->event), __entry->event
1916 	)
1917 );
1918 
1919 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1920 	TP_PROTO(
1921 		const struct svcxprt_rdma *rdma
1922 	),
1923 
1924 	TP_ARGS(rdma),
1925 
1926 	TP_STRUCT__entry(
1927 		__field(int, avail)
1928 		__field(int, depth)
1929 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1930 	),
1931 
1932 	TP_fast_assign(
1933 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1934 		__entry->depth = rdma->sc_sq_depth;
1935 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1936 	),
1937 
1938 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1939 		__get_str(addr), __entry->avail, __entry->depth
1940 	)
1941 );
1942 
1943 #define DEFINE_SQ_EVENT(name)						\
1944 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1945 				TP_PROTO(				\
1946 					const struct svcxprt_rdma *rdma \
1947 				),					\
1948 				TP_ARGS(rdma))
1949 
1950 DEFINE_SQ_EVENT(full);
1951 DEFINE_SQ_EVENT(retry);
1952 
1953 TRACE_EVENT(svcrdma_sq_post_err,
1954 	TP_PROTO(
1955 		const struct svcxprt_rdma *rdma,
1956 		int status
1957 	),
1958 
1959 	TP_ARGS(rdma, status),
1960 
1961 	TP_STRUCT__entry(
1962 		__field(int, avail)
1963 		__field(int, depth)
1964 		__field(int, status)
1965 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1966 	),
1967 
1968 	TP_fast_assign(
1969 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1970 		__entry->depth = rdma->sc_sq_depth;
1971 		__entry->status = status;
1972 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1973 	),
1974 
1975 	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
1976 		__get_str(addr), __entry->avail, __entry->depth,
1977 		__entry->status
1978 	)
1979 );
1980 
1981 #endif /* _TRACE_RPCRDMA_H */
1982 
1983 #include <trace/define_trace.h>
1984