xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision b2e7467f26d7813d98cbaad5e62b54960f2c071b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/sunrpc/rpc_rdma_cid.h>
15 #include <linux/tracepoint.h>
16 #include <rdma/ib_cm.h>
17 #include <trace/events/rdma.h>
18 
19 /**
20  ** Event classes
21  **/
22 
23 DECLARE_EVENT_CLASS(rpcrdma_completion_class,
24 	TP_PROTO(
25 		const struct ib_wc *wc,
26 		const struct rpc_rdma_cid *cid
27 	),
28 
29 	TP_ARGS(wc, cid),
30 
31 	TP_STRUCT__entry(
32 		__field(u32, cq_id)
33 		__field(int, completion_id)
34 		__field(unsigned long, status)
35 		__field(unsigned int, vendor_err)
36 	),
37 
38 	TP_fast_assign(
39 		__entry->cq_id = cid->ci_queue_id;
40 		__entry->completion_id = cid->ci_completion_id;
41 		__entry->status = wc->status;
42 		if (wc->status)
43 			__entry->vendor_err = wc->vendor_err;
44 		else
45 			__entry->vendor_err = 0;
46 	),
47 
48 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
49 		__entry->cq_id, __entry->completion_id,
50 		rdma_show_wc_status(__entry->status),
51 		__entry->status, __entry->vendor_err
52 	)
53 );
54 
55 #define DEFINE_COMPLETION_EVENT(name)					\
56 		DEFINE_EVENT(rpcrdma_completion_class, name,		\
57 				TP_PROTO(				\
58 					const struct ib_wc *wc,		\
59 					const struct rpc_rdma_cid *cid	\
60 				),					\
61 				TP_ARGS(wc, cid))
62 
63 DECLARE_EVENT_CLASS(xprtrdma_reply_event,
64 	TP_PROTO(
65 		const struct rpcrdma_rep *rep
66 	),
67 
68 	TP_ARGS(rep),
69 
70 	TP_STRUCT__entry(
71 		__field(const void *, rep)
72 		__field(const void *, r_xprt)
73 		__field(u32, xid)
74 		__field(u32, version)
75 		__field(u32, proc)
76 	),
77 
78 	TP_fast_assign(
79 		__entry->rep = rep;
80 		__entry->r_xprt = rep->rr_rxprt;
81 		__entry->xid = be32_to_cpu(rep->rr_xid);
82 		__entry->version = be32_to_cpu(rep->rr_vers);
83 		__entry->proc = be32_to_cpu(rep->rr_proc);
84 	),
85 
86 	TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
87 		__entry->r_xprt, __entry->xid, __entry->rep,
88 		__entry->version, __entry->proc
89 	)
90 );
91 
92 #define DEFINE_REPLY_EVENT(name)					\
93 		DEFINE_EVENT(xprtrdma_reply_event, name,		\
94 				TP_PROTO(				\
95 					const struct rpcrdma_rep *rep	\
96 				),					\
97 				TP_ARGS(rep))
98 
99 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
100 	TP_PROTO(
101 		const struct rpcrdma_xprt *r_xprt
102 	),
103 
104 	TP_ARGS(r_xprt),
105 
106 	TP_STRUCT__entry(
107 		__field(const void *, r_xprt)
108 		__string(addr, rpcrdma_addrstr(r_xprt))
109 		__string(port, rpcrdma_portstr(r_xprt))
110 	),
111 
112 	TP_fast_assign(
113 		__entry->r_xprt = r_xprt;
114 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
115 		__assign_str(port, rpcrdma_portstr(r_xprt));
116 	),
117 
118 	TP_printk("peer=[%s]:%s r_xprt=%p",
119 		__get_str(addr), __get_str(port), __entry->r_xprt
120 	)
121 );
122 
123 #define DEFINE_RXPRT_EVENT(name)					\
124 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
125 				TP_PROTO(				\
126 					const struct rpcrdma_xprt *r_xprt \
127 				),					\
128 				TP_ARGS(r_xprt))
129 
130 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
131 	TP_PROTO(
132 		const struct rpcrdma_xprt *r_xprt,
133 		int rc
134 	),
135 
136 	TP_ARGS(r_xprt, rc),
137 
138 	TP_STRUCT__entry(
139 		__field(const void *, r_xprt)
140 		__field(int, rc)
141 		__field(int, connect_status)
142 		__string(addr, rpcrdma_addrstr(r_xprt))
143 		__string(port, rpcrdma_portstr(r_xprt))
144 	),
145 
146 	TP_fast_assign(
147 		__entry->r_xprt = r_xprt;
148 		__entry->rc = rc;
149 		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
150 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
151 		__assign_str(port, rpcrdma_portstr(r_xprt));
152 	),
153 
154 	TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d",
155 		__get_str(addr), __get_str(port), __entry->r_xprt,
156 		__entry->rc, __entry->connect_status
157 	)
158 );
159 
160 #define DEFINE_CONN_EVENT(name)						\
161 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
162 				TP_PROTO(				\
163 					const struct rpcrdma_xprt *r_xprt, \
164 					int rc				\
165 				),					\
166 				TP_ARGS(r_xprt, rc))
167 
168 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
169 	TP_PROTO(
170 		const struct rpc_task *task,
171 		unsigned int pos,
172 		struct rpcrdma_mr *mr,
173 		int nsegs
174 	),
175 
176 	TP_ARGS(task, pos, mr, nsegs),
177 
178 	TP_STRUCT__entry(
179 		__field(unsigned int, task_id)
180 		__field(unsigned int, client_id)
181 		__field(unsigned int, pos)
182 		__field(int, nents)
183 		__field(u32, handle)
184 		__field(u32, length)
185 		__field(u64, offset)
186 		__field(int, nsegs)
187 	),
188 
189 	TP_fast_assign(
190 		__entry->task_id = task->tk_pid;
191 		__entry->client_id = task->tk_client->cl_clid;
192 		__entry->pos = pos;
193 		__entry->nents = mr->mr_nents;
194 		__entry->handle = mr->mr_handle;
195 		__entry->length = mr->mr_length;
196 		__entry->offset = mr->mr_offset;
197 		__entry->nsegs = nsegs;
198 	),
199 
200 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
201 		__entry->task_id, __entry->client_id,
202 		__entry->pos, __entry->length,
203 		(unsigned long long)__entry->offset, __entry->handle,
204 		__entry->nents < __entry->nsegs ? "more" : "last"
205 	)
206 );
207 
208 #define DEFINE_RDCH_EVENT(name)						\
209 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
210 				TP_PROTO(				\
211 					const struct rpc_task *task,	\
212 					unsigned int pos,		\
213 					struct rpcrdma_mr *mr,		\
214 					int nsegs			\
215 				),					\
216 				TP_ARGS(task, pos, mr, nsegs))
217 
218 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
219 	TP_PROTO(
220 		const struct rpc_task *task,
221 		struct rpcrdma_mr *mr,
222 		int nsegs
223 	),
224 
225 	TP_ARGS(task, mr, nsegs),
226 
227 	TP_STRUCT__entry(
228 		__field(unsigned int, task_id)
229 		__field(unsigned int, client_id)
230 		__field(int, nents)
231 		__field(u32, handle)
232 		__field(u32, length)
233 		__field(u64, offset)
234 		__field(int, nsegs)
235 	),
236 
237 	TP_fast_assign(
238 		__entry->task_id = task->tk_pid;
239 		__entry->client_id = task->tk_client->cl_clid;
240 		__entry->nents = mr->mr_nents;
241 		__entry->handle = mr->mr_handle;
242 		__entry->length = mr->mr_length;
243 		__entry->offset = mr->mr_offset;
244 		__entry->nsegs = nsegs;
245 	),
246 
247 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
248 		__entry->task_id, __entry->client_id,
249 		__entry->length, (unsigned long long)__entry->offset,
250 		__entry->handle,
251 		__entry->nents < __entry->nsegs ? "more" : "last"
252 	)
253 );
254 
255 #define DEFINE_WRCH_EVENT(name)						\
256 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
257 				TP_PROTO(				\
258 					const struct rpc_task *task,	\
259 					struct rpcrdma_mr *mr,		\
260 					int nsegs			\
261 				),					\
262 				TP_ARGS(task, mr, nsegs))
263 
264 DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
265 	TP_PROTO(
266 		const struct ib_wc *wc,
267 		const struct rpcrdma_frwr *frwr
268 	),
269 
270 	TP_ARGS(wc, frwr),
271 
272 	TP_STRUCT__entry(
273 		__field(u32, mr_id)
274 		__field(unsigned int, status)
275 		__field(unsigned int, vendor_err)
276 	),
277 
278 	TP_fast_assign(
279 		__entry->mr_id = frwr->fr_mr->res.id;
280 		__entry->status = wc->status;
281 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
282 	),
283 
284 	TP_printk(
285 		"mr.id=%u: %s (%u/0x%x)",
286 		__entry->mr_id, rdma_show_wc_status(__entry->status),
287 		__entry->status, __entry->vendor_err
288 	)
289 );
290 
291 #define DEFINE_FRWR_DONE_EVENT(name)					\
292 		DEFINE_EVENT(xprtrdma_frwr_done, name,			\
293 				TP_PROTO(				\
294 					const struct ib_wc *wc,		\
295 					const struct rpcrdma_frwr *frwr	\
296 				),					\
297 				TP_ARGS(wc, frwr))
298 
299 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
300 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
301 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
302 TRACE_DEFINE_ENUM(DMA_NONE);
303 
304 #define xprtrdma_show_direction(x)					\
305 		__print_symbolic(x,					\
306 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
307 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
308 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
309 				{ DMA_NONE, "NONE" })
310 
311 DECLARE_EVENT_CLASS(xprtrdma_mr,
312 	TP_PROTO(
313 		const struct rpcrdma_mr *mr
314 	),
315 
316 	TP_ARGS(mr),
317 
318 	TP_STRUCT__entry(
319 		__field(u32, mr_id)
320 		__field(int, nents)
321 		__field(u32, handle)
322 		__field(u32, length)
323 		__field(u64, offset)
324 		__field(u32, dir)
325 	),
326 
327 	TP_fast_assign(
328 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
329 		__entry->nents  = mr->mr_nents;
330 		__entry->handle = mr->mr_handle;
331 		__entry->length = mr->mr_length;
332 		__entry->offset = mr->mr_offset;
333 		__entry->dir    = mr->mr_dir;
334 	),
335 
336 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
337 		__entry->mr_id, __entry->nents, __entry->length,
338 		(unsigned long long)__entry->offset, __entry->handle,
339 		xprtrdma_show_direction(__entry->dir)
340 	)
341 );
342 
343 #define DEFINE_MR_EVENT(name) \
344 		DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
345 				TP_PROTO( \
346 					const struct rpcrdma_mr *mr \
347 				), \
348 				TP_ARGS(mr))
349 
350 DECLARE_EVENT_CLASS(xprtrdma_cb_event,
351 	TP_PROTO(
352 		const struct rpc_rqst *rqst
353 	),
354 
355 	TP_ARGS(rqst),
356 
357 	TP_STRUCT__entry(
358 		__field(const void *, rqst)
359 		__field(const void *, rep)
360 		__field(const void *, req)
361 		__field(u32, xid)
362 	),
363 
364 	TP_fast_assign(
365 		__entry->rqst = rqst;
366 		__entry->req = rpcr_to_rdmar(rqst);
367 		__entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
368 		__entry->xid = be32_to_cpu(rqst->rq_xid);
369 	),
370 
371 	TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
372 		__entry->xid, __entry->rqst, __entry->req, __entry->rep
373 	)
374 );
375 
376 #define DEFINE_CB_EVENT(name)						\
377 		DEFINE_EVENT(xprtrdma_cb_event, name,			\
378 				TP_PROTO(				\
379 					const struct rpc_rqst *rqst	\
380 				),					\
381 				TP_ARGS(rqst))
382 
383 /**
384  ** Connection events
385  **/
386 
387 TRACE_EVENT(xprtrdma_inline_thresh,
388 	TP_PROTO(
389 		const struct rpcrdma_ep *ep
390 	),
391 
392 	TP_ARGS(ep),
393 
394 	TP_STRUCT__entry(
395 		__field(unsigned int, inline_send)
396 		__field(unsigned int, inline_recv)
397 		__field(unsigned int, max_send)
398 		__field(unsigned int, max_recv)
399 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
400 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
401 	),
402 
403 	TP_fast_assign(
404 		const struct rdma_cm_id *id = ep->re_id;
405 
406 		__entry->inline_send = ep->re_inline_send;
407 		__entry->inline_recv = ep->re_inline_recv;
408 		__entry->max_send = ep->re_max_inline_send;
409 		__entry->max_recv = ep->re_max_inline_recv;
410 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
411 		       sizeof(struct sockaddr_in6));
412 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
413 		       sizeof(struct sockaddr_in6));
414 	),
415 
416 	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
417 		__entry->srcaddr, __entry->dstaddr,
418 		__entry->inline_send, __entry->inline_recv,
419 		__entry->max_send, __entry->max_recv
420 	)
421 );
422 
423 DEFINE_CONN_EVENT(connect);
424 DEFINE_CONN_EVENT(disconnect);
425 
426 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
427 
428 TRACE_EVENT(xprtrdma_op_connect,
429 	TP_PROTO(
430 		const struct rpcrdma_xprt *r_xprt,
431 		unsigned long delay
432 	),
433 
434 	TP_ARGS(r_xprt, delay),
435 
436 	TP_STRUCT__entry(
437 		__field(const void *, r_xprt)
438 		__field(unsigned long, delay)
439 		__string(addr, rpcrdma_addrstr(r_xprt))
440 		__string(port, rpcrdma_portstr(r_xprt))
441 	),
442 
443 	TP_fast_assign(
444 		__entry->r_xprt = r_xprt;
445 		__entry->delay = delay;
446 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
447 		__assign_str(port, rpcrdma_portstr(r_xprt));
448 	),
449 
450 	TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu",
451 		__get_str(addr), __get_str(port), __entry->r_xprt,
452 		__entry->delay
453 	)
454 );
455 
456 
457 TRACE_EVENT(xprtrdma_op_set_cto,
458 	TP_PROTO(
459 		const struct rpcrdma_xprt *r_xprt,
460 		unsigned long connect,
461 		unsigned long reconnect
462 	),
463 
464 	TP_ARGS(r_xprt, connect, reconnect),
465 
466 	TP_STRUCT__entry(
467 		__field(const void *, r_xprt)
468 		__field(unsigned long, connect)
469 		__field(unsigned long, reconnect)
470 		__string(addr, rpcrdma_addrstr(r_xprt))
471 		__string(port, rpcrdma_portstr(r_xprt))
472 	),
473 
474 	TP_fast_assign(
475 		__entry->r_xprt = r_xprt;
476 		__entry->connect = connect;
477 		__entry->reconnect = reconnect;
478 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
479 		__assign_str(port, rpcrdma_portstr(r_xprt));
480 	),
481 
482 	TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
483 		__get_str(addr), __get_str(port), __entry->r_xprt,
484 		__entry->connect / HZ, __entry->reconnect / HZ
485 	)
486 );
487 
488 TRACE_EVENT(xprtrdma_qp_event,
489 	TP_PROTO(
490 		const struct rpcrdma_ep *ep,
491 		const struct ib_event *event
492 	),
493 
494 	TP_ARGS(ep, event),
495 
496 	TP_STRUCT__entry(
497 		__field(unsigned long, event)
498 		__string(name, event->device->name)
499 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
500 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
501 	),
502 
503 	TP_fast_assign(
504 		const struct rdma_cm_id *id = ep->re_id;
505 
506 		__entry->event = event->event;
507 		__assign_str(name, event->device->name);
508 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
509 		       sizeof(struct sockaddr_in6));
510 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
511 		       sizeof(struct sockaddr_in6));
512 	),
513 
514 	TP_printk("%pISpc -> %pISpc device=%s %s (%lu)",
515 		__entry->srcaddr, __entry->dstaddr, __get_str(name),
516 		rdma_show_ib_event(__entry->event), __entry->event
517 	)
518 );
519 
520 /**
521  ** Call events
522  **/
523 
524 TRACE_EVENT(xprtrdma_createmrs,
525 	TP_PROTO(
526 		const struct rpcrdma_xprt *r_xprt,
527 		unsigned int count
528 	),
529 
530 	TP_ARGS(r_xprt, count),
531 
532 	TP_STRUCT__entry(
533 		__field(const void *, r_xprt)
534 		__string(addr, rpcrdma_addrstr(r_xprt))
535 		__string(port, rpcrdma_portstr(r_xprt))
536 		__field(unsigned int, count)
537 	),
538 
539 	TP_fast_assign(
540 		__entry->r_xprt = r_xprt;
541 		__entry->count = count;
542 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
543 		__assign_str(port, rpcrdma_portstr(r_xprt));
544 	),
545 
546 	TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
547 		__get_str(addr), __get_str(port), __entry->r_xprt,
548 		__entry->count
549 	)
550 );
551 
552 TRACE_EVENT(xprtrdma_mr_get,
553 	TP_PROTO(
554 		const struct rpcrdma_req *req
555 	),
556 
557 	TP_ARGS(req),
558 
559 	TP_STRUCT__entry(
560 		__field(const void *, req)
561 		__field(unsigned int, task_id)
562 		__field(unsigned int, client_id)
563 		__field(u32, xid)
564 	),
565 
566 	TP_fast_assign(
567 		const struct rpc_rqst *rqst = &req->rl_slot;
568 
569 		__entry->req = req;
570 		__entry->task_id = rqst->rq_task->tk_pid;
571 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
572 		__entry->xid = be32_to_cpu(rqst->rq_xid);
573 	),
574 
575 	TP_printk("task:%u@%u xid=0x%08x req=%p",
576 		__entry->task_id, __entry->client_id, __entry->xid,
577 		__entry->req
578 	)
579 );
580 
581 TRACE_EVENT(xprtrdma_nomrs,
582 	TP_PROTO(
583 		const struct rpcrdma_req *req
584 	),
585 
586 	TP_ARGS(req),
587 
588 	TP_STRUCT__entry(
589 		__field(const void *, req)
590 		__field(unsigned int, task_id)
591 		__field(unsigned int, client_id)
592 		__field(u32, xid)
593 	),
594 
595 	TP_fast_assign(
596 		const struct rpc_rqst *rqst = &req->rl_slot;
597 
598 		__entry->req = req;
599 		__entry->task_id = rqst->rq_task->tk_pid;
600 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
601 		__entry->xid = be32_to_cpu(rqst->rq_xid);
602 	),
603 
604 	TP_printk("task:%u@%u xid=0x%08x req=%p",
605 		__entry->task_id, __entry->client_id, __entry->xid,
606 		__entry->req
607 	)
608 );
609 
610 DEFINE_RDCH_EVENT(read);
611 DEFINE_WRCH_EVENT(write);
612 DEFINE_WRCH_EVENT(reply);
613 
614 TRACE_DEFINE_ENUM(rpcrdma_noch);
615 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
616 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
617 TRACE_DEFINE_ENUM(rpcrdma_readch);
618 TRACE_DEFINE_ENUM(rpcrdma_areadch);
619 TRACE_DEFINE_ENUM(rpcrdma_writech);
620 TRACE_DEFINE_ENUM(rpcrdma_replych);
621 
622 #define xprtrdma_show_chunktype(x)					\
623 		__print_symbolic(x,					\
624 				{ rpcrdma_noch, "inline" },		\
625 				{ rpcrdma_noch_pullup, "pullup" },	\
626 				{ rpcrdma_noch_mapped, "mapped" },	\
627 				{ rpcrdma_readch, "read list" },	\
628 				{ rpcrdma_areadch, "*read list" },	\
629 				{ rpcrdma_writech, "write list" },	\
630 				{ rpcrdma_replych, "reply chunk" })
631 
632 TRACE_EVENT(xprtrdma_marshal,
633 	TP_PROTO(
634 		const struct rpcrdma_req *req,
635 		unsigned int rtype,
636 		unsigned int wtype
637 	),
638 
639 	TP_ARGS(req, rtype, wtype),
640 
641 	TP_STRUCT__entry(
642 		__field(unsigned int, task_id)
643 		__field(unsigned int, client_id)
644 		__field(u32, xid)
645 		__field(unsigned int, hdrlen)
646 		__field(unsigned int, headlen)
647 		__field(unsigned int, pagelen)
648 		__field(unsigned int, taillen)
649 		__field(unsigned int, rtype)
650 		__field(unsigned int, wtype)
651 	),
652 
653 	TP_fast_assign(
654 		const struct rpc_rqst *rqst = &req->rl_slot;
655 
656 		__entry->task_id = rqst->rq_task->tk_pid;
657 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
658 		__entry->xid = be32_to_cpu(rqst->rq_xid);
659 		__entry->hdrlen = req->rl_hdrbuf.len;
660 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
661 		__entry->pagelen = rqst->rq_snd_buf.page_len;
662 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
663 		__entry->rtype = rtype;
664 		__entry->wtype = wtype;
665 	),
666 
667 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
668 		__entry->task_id, __entry->client_id, __entry->xid,
669 		__entry->hdrlen,
670 		__entry->headlen, __entry->pagelen, __entry->taillen,
671 		xprtrdma_show_chunktype(__entry->rtype),
672 		xprtrdma_show_chunktype(__entry->wtype)
673 	)
674 );
675 
676 TRACE_EVENT(xprtrdma_marshal_failed,
677 	TP_PROTO(const struct rpc_rqst *rqst,
678 		 int ret
679 	),
680 
681 	TP_ARGS(rqst, ret),
682 
683 	TP_STRUCT__entry(
684 		__field(unsigned int, task_id)
685 		__field(unsigned int, client_id)
686 		__field(u32, xid)
687 		__field(int, ret)
688 	),
689 
690 	TP_fast_assign(
691 		__entry->task_id = rqst->rq_task->tk_pid;
692 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
693 		__entry->xid = be32_to_cpu(rqst->rq_xid);
694 		__entry->ret = ret;
695 	),
696 
697 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
698 		__entry->task_id, __entry->client_id, __entry->xid,
699 		__entry->ret
700 	)
701 );
702 
703 TRACE_EVENT(xprtrdma_prepsend_failed,
704 	TP_PROTO(const struct rpc_rqst *rqst,
705 		 int ret
706 	),
707 
708 	TP_ARGS(rqst, ret),
709 
710 	TP_STRUCT__entry(
711 		__field(unsigned int, task_id)
712 		__field(unsigned int, client_id)
713 		__field(u32, xid)
714 		__field(int, ret)
715 	),
716 
717 	TP_fast_assign(
718 		__entry->task_id = rqst->rq_task->tk_pid;
719 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
720 		__entry->xid = be32_to_cpu(rqst->rq_xid);
721 		__entry->ret = ret;
722 	),
723 
724 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
725 		__entry->task_id, __entry->client_id, __entry->xid,
726 		__entry->ret
727 	)
728 );
729 
730 TRACE_EVENT(xprtrdma_post_send,
731 	TP_PROTO(
732 		const struct rpcrdma_req *req
733 	),
734 
735 	TP_ARGS(req),
736 
737 	TP_STRUCT__entry(
738 		__field(u32, cq_id)
739 		__field(int, completion_id)
740 		__field(unsigned int, task_id)
741 		__field(unsigned int, client_id)
742 		__field(int, num_sge)
743 		__field(int, signaled)
744 	),
745 
746 	TP_fast_assign(
747 		const struct rpc_rqst *rqst = &req->rl_slot;
748 		const struct rpcrdma_sendctx *sc = req->rl_sendctx;
749 
750 		__entry->cq_id = sc->sc_cid.ci_queue_id;
751 		__entry->completion_id = sc->sc_cid.ci_completion_id;
752 		__entry->task_id = rqst->rq_task->tk_pid;
753 		__entry->client_id = rqst->rq_task->tk_client ?
754 				     rqst->rq_task->tk_client->cl_clid : -1;
755 		__entry->num_sge = req->rl_wr.num_sge;
756 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
757 	),
758 
759 	TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
760 		__entry->task_id, __entry->client_id,
761 		__entry->cq_id, __entry->completion_id,
762 		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
763 		(__entry->signaled ? "signaled" : "")
764 	)
765 );
766 
767 TRACE_EVENT(xprtrdma_post_recv,
768 	TP_PROTO(
769 		const struct rpcrdma_rep *rep
770 	),
771 
772 	TP_ARGS(rep),
773 
774 	TP_STRUCT__entry(
775 		__field(u32, cq_id)
776 		__field(int, completion_id)
777 	),
778 
779 	TP_fast_assign(
780 		__entry->cq_id = rep->rr_cid.ci_queue_id;
781 		__entry->completion_id = rep->rr_cid.ci_completion_id;
782 	),
783 
784 	TP_printk("cq.id=%d cid=%d",
785 		__entry->cq_id, __entry->completion_id
786 	)
787 );
788 
789 TRACE_EVENT(xprtrdma_post_recvs,
790 	TP_PROTO(
791 		const struct rpcrdma_xprt *r_xprt,
792 		unsigned int count,
793 		int status
794 	),
795 
796 	TP_ARGS(r_xprt, count, status),
797 
798 	TP_STRUCT__entry(
799 		__field(const void *, r_xprt)
800 		__field(unsigned int, count)
801 		__field(int, status)
802 		__field(int, posted)
803 		__string(addr, rpcrdma_addrstr(r_xprt))
804 		__string(port, rpcrdma_portstr(r_xprt))
805 	),
806 
807 	TP_fast_assign(
808 		__entry->r_xprt = r_xprt;
809 		__entry->count = count;
810 		__entry->status = status;
811 		__entry->posted = r_xprt->rx_ep->re_receive_count;
812 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
813 		__assign_str(port, rpcrdma_portstr(r_xprt));
814 	),
815 
816 	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
817 		__get_str(addr), __get_str(port), __entry->r_xprt,
818 		__entry->count, __entry->posted, __entry->status
819 	)
820 );
821 
822 TRACE_EVENT(xprtrdma_post_linv,
823 	TP_PROTO(
824 		const struct rpcrdma_req *req,
825 		int status
826 	),
827 
828 	TP_ARGS(req, status),
829 
830 	TP_STRUCT__entry(
831 		__field(const void *, req)
832 		__field(int, status)
833 		__field(u32, xid)
834 	),
835 
836 	TP_fast_assign(
837 		__entry->req = req;
838 		__entry->status = status;
839 		__entry->xid = be32_to_cpu(req->rl_slot.rq_xid);
840 	),
841 
842 	TP_printk("req=%p xid=0x%08x status=%d",
843 		__entry->req, __entry->xid, __entry->status
844 	)
845 );
846 
847 /**
848  ** Completion events
849  **/
850 
851 DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive);
852 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
853 
854 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
855 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
856 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
857 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
858 
859 TRACE_EVENT(xprtrdma_frwr_alloc,
860 	TP_PROTO(
861 		const struct rpcrdma_mr *mr,
862 		int rc
863 	),
864 
865 	TP_ARGS(mr, rc),
866 
867 	TP_STRUCT__entry(
868 		__field(u32, mr_id)
869 		__field(int, rc)
870 	),
871 
872 	TP_fast_assign(
873 		__entry->mr_id = mr->frwr.fr_mr->res.id;
874 		__entry->rc = rc;
875 	),
876 
877 	TP_printk("mr.id=%u: rc=%d",
878 		__entry->mr_id, __entry->rc
879 	)
880 );
881 
882 TRACE_EVENT(xprtrdma_frwr_dereg,
883 	TP_PROTO(
884 		const struct rpcrdma_mr *mr,
885 		int rc
886 	),
887 
888 	TP_ARGS(mr, rc),
889 
890 	TP_STRUCT__entry(
891 		__field(u32, mr_id)
892 		__field(int, nents)
893 		__field(u32, handle)
894 		__field(u32, length)
895 		__field(u64, offset)
896 		__field(u32, dir)
897 		__field(int, rc)
898 	),
899 
900 	TP_fast_assign(
901 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
902 		__entry->nents  = mr->mr_nents;
903 		__entry->handle = mr->mr_handle;
904 		__entry->length = mr->mr_length;
905 		__entry->offset = mr->mr_offset;
906 		__entry->dir    = mr->mr_dir;
907 		__entry->rc	= rc;
908 	),
909 
910 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
911 		__entry->mr_id, __entry->nents, __entry->length,
912 		(unsigned long long)__entry->offset, __entry->handle,
913 		xprtrdma_show_direction(__entry->dir),
914 		__entry->rc
915 	)
916 );
917 
918 TRACE_EVENT(xprtrdma_frwr_sgerr,
919 	TP_PROTO(
920 		const struct rpcrdma_mr *mr,
921 		int sg_nents
922 	),
923 
924 	TP_ARGS(mr, sg_nents),
925 
926 	TP_STRUCT__entry(
927 		__field(u32, mr_id)
928 		__field(u64, addr)
929 		__field(u32, dir)
930 		__field(int, nents)
931 	),
932 
933 	TP_fast_assign(
934 		__entry->mr_id = mr->frwr.fr_mr->res.id;
935 		__entry->addr = mr->mr_sg->dma_address;
936 		__entry->dir = mr->mr_dir;
937 		__entry->nents = sg_nents;
938 	),
939 
940 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
941 		__entry->mr_id, __entry->addr,
942 		xprtrdma_show_direction(__entry->dir),
943 		__entry->nents
944 	)
945 );
946 
947 TRACE_EVENT(xprtrdma_frwr_maperr,
948 	TP_PROTO(
949 		const struct rpcrdma_mr *mr,
950 		int num_mapped
951 	),
952 
953 	TP_ARGS(mr, num_mapped),
954 
955 	TP_STRUCT__entry(
956 		__field(u32, mr_id)
957 		__field(u64, addr)
958 		__field(u32, dir)
959 		__field(int, num_mapped)
960 		__field(int, nents)
961 	),
962 
963 	TP_fast_assign(
964 		__entry->mr_id = mr->frwr.fr_mr->res.id;
965 		__entry->addr = mr->mr_sg->dma_address;
966 		__entry->dir = mr->mr_dir;
967 		__entry->num_mapped = num_mapped;
968 		__entry->nents = mr->mr_nents;
969 	),
970 
971 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
972 		__entry->mr_id, __entry->addr,
973 		xprtrdma_show_direction(__entry->dir),
974 		__entry->num_mapped, __entry->nents
975 	)
976 );
977 
978 DEFINE_MR_EVENT(localinv);
979 DEFINE_MR_EVENT(map);
980 DEFINE_MR_EVENT(unmap);
981 DEFINE_MR_EVENT(reminv);
982 DEFINE_MR_EVENT(recycle);
983 
984 TRACE_EVENT(xprtrdma_dma_maperr,
985 	TP_PROTO(
986 		u64 addr
987 	),
988 
989 	TP_ARGS(addr),
990 
991 	TP_STRUCT__entry(
992 		__field(u64, addr)
993 	),
994 
995 	TP_fast_assign(
996 		__entry->addr = addr;
997 	),
998 
999 	TP_printk("dma addr=0x%llx\n", __entry->addr)
1000 );
1001 
1002 /**
1003  ** Reply events
1004  **/
1005 
1006 TRACE_EVENT(xprtrdma_reply,
1007 	TP_PROTO(
1008 		const struct rpc_task *task,
1009 		const struct rpcrdma_rep *rep,
1010 		const struct rpcrdma_req *req,
1011 		unsigned int credits
1012 	),
1013 
1014 	TP_ARGS(task, rep, req, credits),
1015 
1016 	TP_STRUCT__entry(
1017 		__field(unsigned int, task_id)
1018 		__field(unsigned int, client_id)
1019 		__field(const void *, rep)
1020 		__field(const void *, req)
1021 		__field(u32, xid)
1022 		__field(unsigned int, credits)
1023 	),
1024 
1025 	TP_fast_assign(
1026 		__entry->task_id = task->tk_pid;
1027 		__entry->client_id = task->tk_client->cl_clid;
1028 		__entry->rep = rep;
1029 		__entry->req = req;
1030 		__entry->xid = be32_to_cpu(rep->rr_xid);
1031 		__entry->credits = credits;
1032 	),
1033 
1034 	TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
1035 		__entry->task_id, __entry->client_id, __entry->xid,
1036 		__entry->credits, __entry->rep, __entry->req
1037 	)
1038 );
1039 
1040 TRACE_EVENT(xprtrdma_defer_cmp,
1041 	TP_PROTO(
1042 		const struct rpcrdma_rep *rep
1043 	),
1044 
1045 	TP_ARGS(rep),
1046 
1047 	TP_STRUCT__entry(
1048 		__field(unsigned int, task_id)
1049 		__field(unsigned int, client_id)
1050 		__field(const void *, rep)
1051 		__field(u32, xid)
1052 	),
1053 
1054 	TP_fast_assign(
1055 		__entry->task_id = rep->rr_rqst->rq_task->tk_pid;
1056 		__entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
1057 		__entry->rep = rep;
1058 		__entry->xid = be32_to_cpu(rep->rr_xid);
1059 	),
1060 
1061 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1062 		__entry->task_id, __entry->client_id, __entry->xid,
1063 		__entry->rep
1064 	)
1065 );
1066 
1067 DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
1068 DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
1069 DEFINE_REPLY_EVENT(xprtrdma_reply_short);
1070 DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
1071 
1072 TRACE_EVENT(xprtrdma_err_vers,
1073 	TP_PROTO(
1074 		const struct rpc_rqst *rqst,
1075 		__be32 *min,
1076 		__be32 *max
1077 	),
1078 
1079 	TP_ARGS(rqst, min, max),
1080 
1081 	TP_STRUCT__entry(
1082 		__field(unsigned int, task_id)
1083 		__field(unsigned int, client_id)
1084 		__field(u32, xid)
1085 		__field(u32, min)
1086 		__field(u32, max)
1087 	),
1088 
1089 	TP_fast_assign(
1090 		__entry->task_id = rqst->rq_task->tk_pid;
1091 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1092 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1093 		__entry->min = be32_to_cpup(min);
1094 		__entry->max = be32_to_cpup(max);
1095 	),
1096 
1097 	TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
1098 		__entry->task_id, __entry->client_id, __entry->xid,
1099 		__entry->min, __entry->max
1100 	)
1101 );
1102 
1103 TRACE_EVENT(xprtrdma_err_chunk,
1104 	TP_PROTO(
1105 		const struct rpc_rqst *rqst
1106 	),
1107 
1108 	TP_ARGS(rqst),
1109 
1110 	TP_STRUCT__entry(
1111 		__field(unsigned int, task_id)
1112 		__field(unsigned int, client_id)
1113 		__field(u32, xid)
1114 	),
1115 
1116 	TP_fast_assign(
1117 		__entry->task_id = rqst->rq_task->tk_pid;
1118 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1119 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1120 	),
1121 
1122 	TP_printk("task:%u@%u xid=0x%08x",
1123 		__entry->task_id, __entry->client_id, __entry->xid
1124 	)
1125 );
1126 
1127 TRACE_EVENT(xprtrdma_err_unrecognized,
1128 	TP_PROTO(
1129 		const struct rpc_rqst *rqst,
1130 		__be32 *procedure
1131 	),
1132 
1133 	TP_ARGS(rqst, procedure),
1134 
1135 	TP_STRUCT__entry(
1136 		__field(unsigned int, task_id)
1137 		__field(unsigned int, client_id)
1138 		__field(u32, xid)
1139 		__field(u32, procedure)
1140 	),
1141 
1142 	TP_fast_assign(
1143 		__entry->task_id = rqst->rq_task->tk_pid;
1144 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1145 		__entry->procedure = be32_to_cpup(procedure);
1146 	),
1147 
1148 	TP_printk("task:%u@%u xid=0x%08x procedure=%u",
1149 		__entry->task_id, __entry->client_id, __entry->xid,
1150 		__entry->procedure
1151 	)
1152 );
1153 
1154 TRACE_EVENT(xprtrdma_fixup,
1155 	TP_PROTO(
1156 		const struct rpc_rqst *rqst,
1157 		unsigned long fixup
1158 	),
1159 
1160 	TP_ARGS(rqst, fixup),
1161 
1162 	TP_STRUCT__entry(
1163 		__field(unsigned int, task_id)
1164 		__field(unsigned int, client_id)
1165 		__field(unsigned long, fixup)
1166 		__field(size_t, headlen)
1167 		__field(unsigned int, pagelen)
1168 		__field(size_t, taillen)
1169 	),
1170 
1171 	TP_fast_assign(
1172 		__entry->task_id = rqst->rq_task->tk_pid;
1173 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1174 		__entry->fixup = fixup;
1175 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1176 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1177 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1178 	),
1179 
1180 	TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1181 		__entry->task_id, __entry->client_id, __entry->fixup,
1182 		__entry->headlen, __entry->pagelen, __entry->taillen
1183 	)
1184 );
1185 
1186 TRACE_EVENT(xprtrdma_decode_seg,
1187 	TP_PROTO(
1188 		u32 handle,
1189 		u32 length,
1190 		u64 offset
1191 	),
1192 
1193 	TP_ARGS(handle, length, offset),
1194 
1195 	TP_STRUCT__entry(
1196 		__field(u32, handle)
1197 		__field(u32, length)
1198 		__field(u64, offset)
1199 	),
1200 
1201 	TP_fast_assign(
1202 		__entry->handle = handle;
1203 		__entry->length = length;
1204 		__entry->offset = offset;
1205 	),
1206 
1207 	TP_printk("%u@0x%016llx:0x%08x",
1208 		__entry->length, (unsigned long long)__entry->offset,
1209 		__entry->handle
1210 	)
1211 );
1212 
1213 /**
1214  ** Callback events
1215  **/
1216 
1217 TRACE_EVENT(xprtrdma_cb_setup,
1218 	TP_PROTO(
1219 		const struct rpcrdma_xprt *r_xprt,
1220 		unsigned int reqs
1221 	),
1222 
1223 	TP_ARGS(r_xprt, reqs),
1224 
1225 	TP_STRUCT__entry(
1226 		__field(const void *, r_xprt)
1227 		__field(unsigned int, reqs)
1228 		__string(addr, rpcrdma_addrstr(r_xprt))
1229 		__string(port, rpcrdma_portstr(r_xprt))
1230 	),
1231 
1232 	TP_fast_assign(
1233 		__entry->r_xprt = r_xprt;
1234 		__entry->reqs = reqs;
1235 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1236 		__assign_str(port, rpcrdma_portstr(r_xprt));
1237 	),
1238 
1239 	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1240 		__get_str(addr), __get_str(port),
1241 		__entry->r_xprt, __entry->reqs
1242 	)
1243 );
1244 
1245 DEFINE_CB_EVENT(xprtrdma_cb_call);
1246 DEFINE_CB_EVENT(xprtrdma_cb_reply);
1247 
1248 TRACE_EVENT(xprtrdma_leaked_rep,
1249 	TP_PROTO(
1250 		const struct rpc_rqst *rqst,
1251 		const struct rpcrdma_rep *rep
1252 	),
1253 
1254 	TP_ARGS(rqst, rep),
1255 
1256 	TP_STRUCT__entry(
1257 		__field(unsigned int, task_id)
1258 		__field(unsigned int, client_id)
1259 		__field(u32, xid)
1260 		__field(const void *, rep)
1261 	),
1262 
1263 	TP_fast_assign(
1264 		__entry->task_id = rqst->rq_task->tk_pid;
1265 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1266 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1267 		__entry->rep = rep;
1268 	),
1269 
1270 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1271 		__entry->task_id, __entry->client_id, __entry->xid,
1272 		__entry->rep
1273 	)
1274 );
1275 
1276 /**
1277  ** Server-side RPC/RDMA events
1278  **/
1279 
1280 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1281 	TP_PROTO(
1282 		const struct svcxprt_rdma *rdma,
1283 		long status
1284 	),
1285 
1286 	TP_ARGS(rdma, status),
1287 
1288 	TP_STRUCT__entry(
1289 		__field(long, status)
1290 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1291 	),
1292 
1293 	TP_fast_assign(
1294 		__entry->status = status;
1295 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1296 	),
1297 
1298 	TP_printk("addr=%s status=%ld",
1299 		__get_str(addr), __entry->status
1300 	)
1301 );
1302 
1303 #define DEFINE_ACCEPT_EVENT(name) \
1304 		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1305 				TP_PROTO( \
1306 					const struct svcxprt_rdma *rdma, \
1307 					long status \
1308 				), \
1309 				TP_ARGS(rdma, status))
1310 
1311 DEFINE_ACCEPT_EVENT(pd);
1312 DEFINE_ACCEPT_EVENT(qp);
1313 DEFINE_ACCEPT_EVENT(fabric);
1314 DEFINE_ACCEPT_EVENT(initdepth);
1315 DEFINE_ACCEPT_EVENT(accept);
1316 
1317 TRACE_DEFINE_ENUM(RDMA_MSG);
1318 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1319 TRACE_DEFINE_ENUM(RDMA_MSGP);
1320 TRACE_DEFINE_ENUM(RDMA_DONE);
1321 TRACE_DEFINE_ENUM(RDMA_ERROR);
1322 
1323 #define show_rpcrdma_proc(x)						\
1324 		__print_symbolic(x,					\
1325 				{ RDMA_MSG, "RDMA_MSG" },		\
1326 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1327 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1328 				{ RDMA_DONE, "RDMA_DONE" },		\
1329 				{ RDMA_ERROR, "RDMA_ERROR" })
1330 
1331 TRACE_EVENT(svcrdma_decode_rqst,
1332 	TP_PROTO(
1333 		const struct svc_rdma_recv_ctxt *ctxt,
1334 		__be32 *p,
1335 		unsigned int hdrlen
1336 	),
1337 
1338 	TP_ARGS(ctxt, p, hdrlen),
1339 
1340 	TP_STRUCT__entry(
1341 		__field(u32, cq_id)
1342 		__field(int, completion_id)
1343 		__field(u32, xid)
1344 		__field(u32, vers)
1345 		__field(u32, proc)
1346 		__field(u32, credits)
1347 		__field(unsigned int, hdrlen)
1348 	),
1349 
1350 	TP_fast_assign(
1351 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1352 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1353 		__entry->xid = be32_to_cpup(p++);
1354 		__entry->vers = be32_to_cpup(p++);
1355 		__entry->credits = be32_to_cpup(p++);
1356 		__entry->proc = be32_to_cpup(p);
1357 		__entry->hdrlen = hdrlen;
1358 	),
1359 
1360 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1361 		__entry->cq_id, __entry->completion_id,
1362 		__entry->xid, __entry->vers, __entry->credits,
1363 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1364 );
1365 
1366 TRACE_EVENT(svcrdma_decode_short_err,
1367 	TP_PROTO(
1368 		const struct svc_rdma_recv_ctxt *ctxt,
1369 		unsigned int hdrlen
1370 	),
1371 
1372 	TP_ARGS(ctxt, hdrlen),
1373 
1374 	TP_STRUCT__entry(
1375 		__field(u32, cq_id)
1376 		__field(int, completion_id)
1377 		__field(unsigned int, hdrlen)
1378 	),
1379 
1380 	TP_fast_assign(
1381 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1382 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1383 		__entry->hdrlen = hdrlen;
1384 	),
1385 
1386 	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1387 		__entry->cq_id, __entry->completion_id,
1388 		__entry->hdrlen)
1389 );
1390 
1391 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1392 	TP_PROTO(
1393 		const struct svc_rdma_recv_ctxt *ctxt,
1394 		__be32 *p
1395 	),
1396 
1397 	TP_ARGS(ctxt, p),
1398 
1399 	TP_STRUCT__entry(
1400 		__field(u32, cq_id)
1401 		__field(int, completion_id)
1402 		__field(u32, xid)
1403 		__field(u32, vers)
1404 		__field(u32, proc)
1405 		__field(u32, credits)
1406 	),
1407 
1408 	TP_fast_assign(
1409 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1410 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1411 		__entry->xid = be32_to_cpup(p++);
1412 		__entry->vers = be32_to_cpup(p++);
1413 		__entry->credits = be32_to_cpup(p++);
1414 		__entry->proc = be32_to_cpup(p);
1415 	),
1416 
1417 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1418 		__entry->cq_id, __entry->completion_id,
1419 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1420 );
1421 
1422 #define DEFINE_BADREQ_EVENT(name)					\
1423 		DEFINE_EVENT(svcrdma_badreq_event,			\
1424 			     svcrdma_decode_##name##_err,		\
1425 				TP_PROTO(				\
1426 					const struct svc_rdma_recv_ctxt *ctxt,	\
1427 					__be32 *p			\
1428 				),					\
1429 				TP_ARGS(ctxt, p))
1430 
1431 DEFINE_BADREQ_EVENT(badvers);
1432 DEFINE_BADREQ_EVENT(drop);
1433 DEFINE_BADREQ_EVENT(badproc);
1434 DEFINE_BADREQ_EVENT(parse);
1435 
1436 DECLARE_EVENT_CLASS(svcrdma_segment_event,
1437 	TP_PROTO(
1438 		u32 handle,
1439 		u32 length,
1440 		u64 offset
1441 	),
1442 
1443 	TP_ARGS(handle, length, offset),
1444 
1445 	TP_STRUCT__entry(
1446 		__field(u32, handle)
1447 		__field(u32, length)
1448 		__field(u64, offset)
1449 	),
1450 
1451 	TP_fast_assign(
1452 		__entry->handle = handle;
1453 		__entry->length = length;
1454 		__entry->offset = offset;
1455 	),
1456 
1457 	TP_printk("%u@0x%016llx:0x%08x",
1458 		__entry->length, (unsigned long long)__entry->offset,
1459 		__entry->handle
1460 	)
1461 );
1462 
1463 #define DEFINE_SEGMENT_EVENT(name)					\
1464 		DEFINE_EVENT(svcrdma_segment_event, svcrdma_##name,\
1465 				TP_PROTO(				\
1466 					u32 handle,			\
1467 					u32 length,			\
1468 					u64 offset			\
1469 				),					\
1470 				TP_ARGS(handle, length, offset))
1471 
1472 DEFINE_SEGMENT_EVENT(decode_wseg);
1473 DEFINE_SEGMENT_EVENT(encode_rseg);
1474 DEFINE_SEGMENT_EVENT(send_rseg);
1475 DEFINE_SEGMENT_EVENT(encode_wseg);
1476 DEFINE_SEGMENT_EVENT(send_wseg);
1477 
1478 DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1479 	TP_PROTO(
1480 		u32 length
1481 	),
1482 
1483 	TP_ARGS(length),
1484 
1485 	TP_STRUCT__entry(
1486 		__field(u32, length)
1487 	),
1488 
1489 	TP_fast_assign(
1490 		__entry->length = length;
1491 	),
1492 
1493 	TP_printk("length=%u",
1494 		__entry->length
1495 	)
1496 );
1497 
1498 #define DEFINE_CHUNK_EVENT(name)					\
1499 		DEFINE_EVENT(svcrdma_chunk_event, svcrdma_##name,	\
1500 				TP_PROTO(				\
1501 					u32 length			\
1502 				),					\
1503 				TP_ARGS(length))
1504 
1505 DEFINE_CHUNK_EVENT(send_pzr);
1506 DEFINE_CHUNK_EVENT(encode_write_chunk);
1507 DEFINE_CHUNK_EVENT(send_write_chunk);
1508 DEFINE_CHUNK_EVENT(encode_read_chunk);
1509 DEFINE_CHUNK_EVENT(send_reply_chunk);
1510 
1511 TRACE_EVENT(svcrdma_send_read_chunk,
1512 	TP_PROTO(
1513 		u32 length,
1514 		u32 position
1515 	),
1516 
1517 	TP_ARGS(length, position),
1518 
1519 	TP_STRUCT__entry(
1520 		__field(u32, length)
1521 		__field(u32, position)
1522 	),
1523 
1524 	TP_fast_assign(
1525 		__entry->length = length;
1526 		__entry->position = position;
1527 	),
1528 
1529 	TP_printk("length=%u position=%u",
1530 		__entry->length, __entry->position
1531 	)
1532 );
1533 
1534 DECLARE_EVENT_CLASS(svcrdma_error_event,
1535 	TP_PROTO(
1536 		__be32 xid
1537 	),
1538 
1539 	TP_ARGS(xid),
1540 
1541 	TP_STRUCT__entry(
1542 		__field(u32, xid)
1543 	),
1544 
1545 	TP_fast_assign(
1546 		__entry->xid = be32_to_cpu(xid);
1547 	),
1548 
1549 	TP_printk("xid=0x%08x",
1550 		__entry->xid
1551 	)
1552 );
1553 
1554 #define DEFINE_ERROR_EVENT(name)					\
1555 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1556 				TP_PROTO(				\
1557 					__be32 xid			\
1558 				),					\
1559 				TP_ARGS(xid))
1560 
1561 DEFINE_ERROR_EVENT(vers);
1562 DEFINE_ERROR_EVENT(chunk);
1563 
1564 /**
1565  ** Server-side RDMA API events
1566  **/
1567 
1568 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1569 	TP_PROTO(
1570 		const struct svcxprt_rdma *rdma,
1571 		u64 dma_addr,
1572 		u32 length
1573 	),
1574 
1575 	TP_ARGS(rdma, dma_addr, length),
1576 
1577 	TP_STRUCT__entry(
1578 		__field(u64, dma_addr)
1579 		__field(u32, length)
1580 		__string(device, rdma->sc_cm_id->device->name)
1581 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1582 	),
1583 
1584 	TP_fast_assign(
1585 		__entry->dma_addr = dma_addr;
1586 		__entry->length = length;
1587 		__assign_str(device, rdma->sc_cm_id->device->name);
1588 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1589 	),
1590 
1591 	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1592 		__get_str(addr), __get_str(device),
1593 		__entry->dma_addr, __entry->length
1594 	)
1595 );
1596 
1597 #define DEFINE_SVC_DMA_EVENT(name)					\
1598 		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1599 				TP_PROTO(				\
1600 					const struct svcxprt_rdma *rdma,\
1601 					u64 dma_addr,			\
1602 					u32 length			\
1603 				),					\
1604 				TP_ARGS(rdma, dma_addr, length))
1605 
1606 DEFINE_SVC_DMA_EVENT(dma_map_page);
1607 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1608 
1609 TRACE_EVENT(svcrdma_dma_map_rw_err,
1610 	TP_PROTO(
1611 		const struct svcxprt_rdma *rdma,
1612 		unsigned int nents,
1613 		int status
1614 	),
1615 
1616 	TP_ARGS(rdma, nents, status),
1617 
1618 	TP_STRUCT__entry(
1619 		__field(int, status)
1620 		__field(unsigned int, nents)
1621 		__string(device, rdma->sc_cm_id->device->name)
1622 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1623 	),
1624 
1625 	TP_fast_assign(
1626 		__entry->status = status;
1627 		__entry->nents = nents;
1628 		__assign_str(device, rdma->sc_cm_id->device->name);
1629 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1630 	),
1631 
1632 	TP_printk("addr=%s device=%s nents=%u status=%d",
1633 		__get_str(addr), __get_str(device), __entry->nents,
1634 		__entry->status
1635 	)
1636 );
1637 
1638 TRACE_EVENT(svcrdma_no_rwctx_err,
1639 	TP_PROTO(
1640 		const struct svcxprt_rdma *rdma,
1641 		unsigned int num_sges
1642 	),
1643 
1644 	TP_ARGS(rdma, num_sges),
1645 
1646 	TP_STRUCT__entry(
1647 		__field(unsigned int, num_sges)
1648 		__string(device, rdma->sc_cm_id->device->name)
1649 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1650 	),
1651 
1652 	TP_fast_assign(
1653 		__entry->num_sges = num_sges;
1654 		__assign_str(device, rdma->sc_cm_id->device->name);
1655 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1656 	),
1657 
1658 	TP_printk("addr=%s device=%s num_sges=%d",
1659 		__get_str(addr), __get_str(device), __entry->num_sges
1660 	)
1661 );
1662 
1663 TRACE_EVENT(svcrdma_page_overrun_err,
1664 	TP_PROTO(
1665 		const struct svcxprt_rdma *rdma,
1666 		const struct svc_rqst *rqst,
1667 		unsigned int pageno
1668 	),
1669 
1670 	TP_ARGS(rdma, rqst, pageno),
1671 
1672 	TP_STRUCT__entry(
1673 		__field(unsigned int, pageno)
1674 		__field(u32, xid)
1675 		__string(device, rdma->sc_cm_id->device->name)
1676 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1677 	),
1678 
1679 	TP_fast_assign(
1680 		__entry->pageno = pageno;
1681 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1682 		__assign_str(device, rdma->sc_cm_id->device->name);
1683 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1684 	),
1685 
1686 	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1687 		__get_str(device), __entry->xid, __entry->pageno
1688 	)
1689 );
1690 
1691 TRACE_EVENT(svcrdma_small_wrch_err,
1692 	TP_PROTO(
1693 		const struct svcxprt_rdma *rdma,
1694 		unsigned int remaining,
1695 		unsigned int seg_no,
1696 		unsigned int num_segs
1697 	),
1698 
1699 	TP_ARGS(rdma, remaining, seg_no, num_segs),
1700 
1701 	TP_STRUCT__entry(
1702 		__field(unsigned int, remaining)
1703 		__field(unsigned int, seg_no)
1704 		__field(unsigned int, num_segs)
1705 		__string(device, rdma->sc_cm_id->device->name)
1706 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1707 	),
1708 
1709 	TP_fast_assign(
1710 		__entry->remaining = remaining;
1711 		__entry->seg_no = seg_no;
1712 		__entry->num_segs = num_segs;
1713 		__assign_str(device, rdma->sc_cm_id->device->name);
1714 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1715 	),
1716 
1717 	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1718 		__get_str(addr), __get_str(device), __entry->remaining,
1719 		__entry->seg_no, __entry->num_segs
1720 	)
1721 );
1722 
1723 TRACE_EVENT(svcrdma_send_pullup,
1724 	TP_PROTO(
1725 		unsigned int len
1726 	),
1727 
1728 	TP_ARGS(len),
1729 
1730 	TP_STRUCT__entry(
1731 		__field(unsigned int, len)
1732 	),
1733 
1734 	TP_fast_assign(
1735 		__entry->len = len;
1736 	),
1737 
1738 	TP_printk("len=%u", __entry->len)
1739 );
1740 
1741 TRACE_EVENT(svcrdma_send_err,
1742 	TP_PROTO(
1743 		const struct svc_rqst *rqst,
1744 		int status
1745 	),
1746 
1747 	TP_ARGS(rqst, status),
1748 
1749 	TP_STRUCT__entry(
1750 		__field(int, status)
1751 		__field(u32, xid)
1752 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1753 	),
1754 
1755 	TP_fast_assign(
1756 		__entry->status = status;
1757 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1758 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1759 	),
1760 
1761 	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1762 		__entry->xid, __entry->status
1763 	)
1764 );
1765 
1766 TRACE_EVENT(svcrdma_post_send,
1767 	TP_PROTO(
1768 		const struct svc_rdma_send_ctxt *ctxt
1769 	),
1770 
1771 	TP_ARGS(ctxt),
1772 
1773 	TP_STRUCT__entry(
1774 		__field(u32, cq_id)
1775 		__field(int, completion_id)
1776 		__field(unsigned int, num_sge)
1777 		__field(u32, inv_rkey)
1778 	),
1779 
1780 	TP_fast_assign(
1781 		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1782 
1783 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1784 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1785 		__entry->num_sge = wr->num_sge;
1786 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1787 					wr->ex.invalidate_rkey : 0;
1788 	),
1789 
1790 	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1791 		__entry->cq_id, __entry->completion_id,
1792 		__entry->num_sge, __entry->inv_rkey
1793 	)
1794 );
1795 
1796 DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
1797 
1798 TRACE_EVENT(svcrdma_post_recv,
1799 	TP_PROTO(
1800 		const struct svc_rdma_recv_ctxt *ctxt
1801 	),
1802 
1803 	TP_ARGS(ctxt),
1804 
1805 	TP_STRUCT__entry(
1806 		__field(u32, cq_id)
1807 		__field(int, completion_id)
1808 	),
1809 
1810 	TP_fast_assign(
1811 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1812 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1813 	),
1814 
1815 	TP_printk("cq.id=%d cid=%d",
1816 		__entry->cq_id, __entry->completion_id
1817 	)
1818 );
1819 
1820 DEFINE_COMPLETION_EVENT(svcrdma_wc_receive);
1821 
1822 TRACE_EVENT(svcrdma_rq_post_err,
1823 	TP_PROTO(
1824 		const struct svcxprt_rdma *rdma,
1825 		int status
1826 	),
1827 
1828 	TP_ARGS(rdma, status),
1829 
1830 	TP_STRUCT__entry(
1831 		__field(int, status)
1832 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1833 	),
1834 
1835 	TP_fast_assign(
1836 		__entry->status = status;
1837 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1838 	),
1839 
1840 	TP_printk("addr=%s status=%d",
1841 		__get_str(addr), __entry->status
1842 	)
1843 );
1844 
1845 TRACE_EVENT(svcrdma_post_chunk,
1846 	TP_PROTO(
1847 		const struct rpc_rdma_cid *cid,
1848 		int sqecount
1849 	),
1850 
1851 	TP_ARGS(cid, sqecount),
1852 
1853 	TP_STRUCT__entry(
1854 		__field(u32, cq_id)
1855 		__field(int, completion_id)
1856 		__field(int, sqecount)
1857 	),
1858 
1859 	TP_fast_assign(
1860 		__entry->cq_id = cid->ci_queue_id;
1861 		__entry->completion_id = cid->ci_completion_id;
1862 		__entry->sqecount = sqecount;
1863 	),
1864 
1865 	TP_printk("cq.id=%u cid=%d sqecount=%d",
1866 		__entry->cq_id, __entry->completion_id,
1867 		__entry->sqecount
1868 	)
1869 );
1870 
1871 DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
1872 DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
1873 
1874 TRACE_EVENT(svcrdma_qp_error,
1875 	TP_PROTO(
1876 		const struct ib_event *event,
1877 		const struct sockaddr *sap
1878 	),
1879 
1880 	TP_ARGS(event, sap),
1881 
1882 	TP_STRUCT__entry(
1883 		__field(unsigned int, event)
1884 		__string(device, event->device->name)
1885 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1886 	),
1887 
1888 	TP_fast_assign(
1889 		__entry->event = event->event;
1890 		__assign_str(device, event->device->name);
1891 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1892 			 "%pISpc", sap);
1893 	),
1894 
1895 	TP_printk("addr=%s dev=%s event=%s (%u)",
1896 		__entry->addr, __get_str(device),
1897 		rdma_show_ib_event(__entry->event), __entry->event
1898 	)
1899 );
1900 
1901 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1902 	TP_PROTO(
1903 		const struct svcxprt_rdma *rdma
1904 	),
1905 
1906 	TP_ARGS(rdma),
1907 
1908 	TP_STRUCT__entry(
1909 		__field(int, avail)
1910 		__field(int, depth)
1911 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1912 	),
1913 
1914 	TP_fast_assign(
1915 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1916 		__entry->depth = rdma->sc_sq_depth;
1917 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1918 	),
1919 
1920 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1921 		__get_str(addr), __entry->avail, __entry->depth
1922 	)
1923 );
1924 
1925 #define DEFINE_SQ_EVENT(name)						\
1926 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1927 				TP_PROTO(				\
1928 					const struct svcxprt_rdma *rdma \
1929 				),					\
1930 				TP_ARGS(rdma))
1931 
1932 DEFINE_SQ_EVENT(full);
1933 DEFINE_SQ_EVENT(retry);
1934 
1935 TRACE_EVENT(svcrdma_sq_post_err,
1936 	TP_PROTO(
1937 		const struct svcxprt_rdma *rdma,
1938 		int status
1939 	),
1940 
1941 	TP_ARGS(rdma, status),
1942 
1943 	TP_STRUCT__entry(
1944 		__field(int, avail)
1945 		__field(int, depth)
1946 		__field(int, status)
1947 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1948 	),
1949 
1950 	TP_fast_assign(
1951 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1952 		__entry->depth = rdma->sc_sq_depth;
1953 		__entry->status = status;
1954 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1955 	),
1956 
1957 	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
1958 		__get_str(addr), __entry->avail, __entry->depth,
1959 		__entry->status
1960 	)
1961 );
1962 
1963 #endif /* _TRACE_RPCRDMA_H */
1964 
1965 #include <trace/define_trace.h>
1966