xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision b4776a34)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/sunrpc/rpc_rdma_cid.h>
15 #include <linux/tracepoint.h>
16 #include <rdma/ib_cm.h>
17 
18 #include <trace/events/rdma.h>
19 #include <trace/events/sunrpc_base.h>
20 
21 /**
22  ** Event classes
23  **/
24 
25 DECLARE_EVENT_CLASS(rpcrdma_completion_class,
26 	TP_PROTO(
27 		const struct ib_wc *wc,
28 		const struct rpc_rdma_cid *cid
29 	),
30 
31 	TP_ARGS(wc, cid),
32 
33 	TP_STRUCT__entry(
34 		__field(u32, cq_id)
35 		__field(int, completion_id)
36 		__field(unsigned long, status)
37 		__field(unsigned int, vendor_err)
38 	),
39 
40 	TP_fast_assign(
41 		__entry->cq_id = cid->ci_queue_id;
42 		__entry->completion_id = cid->ci_completion_id;
43 		__entry->status = wc->status;
44 		if (wc->status)
45 			__entry->vendor_err = wc->vendor_err;
46 		else
47 			__entry->vendor_err = 0;
48 	),
49 
50 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
51 		__entry->cq_id, __entry->completion_id,
52 		rdma_show_wc_status(__entry->status),
53 		__entry->status, __entry->vendor_err
54 	)
55 );
56 
57 #define DEFINE_COMPLETION_EVENT(name)					\
58 		DEFINE_EVENT(rpcrdma_completion_class, name,		\
59 				TP_PROTO(				\
60 					const struct ib_wc *wc,		\
61 					const struct rpc_rdma_cid *cid	\
62 				),					\
63 				TP_ARGS(wc, cid))
64 
65 DECLARE_EVENT_CLASS(rpcrdma_mr_completion_class,
66 	TP_PROTO(
67 		const struct ib_wc *wc,
68 		const struct rpc_rdma_cid *cid
69 	),
70 
71 	TP_ARGS(wc, cid),
72 
73 	TP_STRUCT__entry(
74 		__field(u32, cq_id)
75 		__field(int, completion_id)
76 		__field(unsigned long, status)
77 		__field(unsigned int, vendor_err)
78 	),
79 
80 	TP_fast_assign(
81 		__entry->cq_id = cid->ci_queue_id;
82 		__entry->completion_id = cid->ci_completion_id;
83 		__entry->status = wc->status;
84 		if (wc->status)
85 			__entry->vendor_err = wc->vendor_err;
86 		else
87 			__entry->vendor_err = 0;
88 	),
89 
90 	TP_printk("cq.id=%u mr.id=%d status=%s (%lu/0x%x)",
91 		__entry->cq_id, __entry->completion_id,
92 		rdma_show_wc_status(__entry->status),
93 		__entry->status, __entry->vendor_err
94 	)
95 );
96 
97 #define DEFINE_MR_COMPLETION_EVENT(name)				\
98 		DEFINE_EVENT(rpcrdma_mr_completion_class, name,		\
99 				TP_PROTO(				\
100 					const struct ib_wc *wc,		\
101 					const struct rpc_rdma_cid *cid	\
102 				),					\
103 				TP_ARGS(wc, cid))
104 
105 DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
106 	TP_PROTO(
107 		const struct ib_wc *wc,
108 		const struct rpc_rdma_cid *cid
109 	),
110 
111 	TP_ARGS(wc, cid),
112 
113 	TP_STRUCT__entry(
114 		__field(u32, cq_id)
115 		__field(int, completion_id)
116 		__field(u32, received)
117 		__field(unsigned long, status)
118 		__field(unsigned int, vendor_err)
119 	),
120 
121 	TP_fast_assign(
122 		__entry->cq_id = cid->ci_queue_id;
123 		__entry->completion_id = cid->ci_completion_id;
124 		__entry->status = wc->status;
125 		if (wc->status) {
126 			__entry->received = 0;
127 			__entry->vendor_err = wc->vendor_err;
128 		} else {
129 			__entry->received = wc->byte_len;
130 			__entry->vendor_err = 0;
131 		}
132 	),
133 
134 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
135 		__entry->cq_id, __entry->completion_id,
136 		rdma_show_wc_status(__entry->status),
137 		__entry->status, __entry->vendor_err,
138 		__entry->received
139 	)
140 );
141 
142 #define DEFINE_RECEIVE_COMPLETION_EVENT(name)				\
143 		DEFINE_EVENT(rpcrdma_receive_completion_class, name,	\
144 				TP_PROTO(				\
145 					const struct ib_wc *wc,		\
146 					const struct rpc_rdma_cid *cid	\
147 				),					\
148 				TP_ARGS(wc, cid))
149 
150 DECLARE_EVENT_CLASS(xprtrdma_reply_class,
151 	TP_PROTO(
152 		const struct rpcrdma_rep *rep
153 	),
154 
155 	TP_ARGS(rep),
156 
157 	TP_STRUCT__entry(
158 		__field(u32, xid)
159 		__field(u32, version)
160 		__field(u32, proc)
161 		__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
162 		__string(port, rpcrdma_portstr(rep->rr_rxprt))
163 	),
164 
165 	TP_fast_assign(
166 		__entry->xid = be32_to_cpu(rep->rr_xid);
167 		__entry->version = be32_to_cpu(rep->rr_vers);
168 		__entry->proc = be32_to_cpu(rep->rr_proc);
169 		__assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
170 		__assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
171 	),
172 
173 	TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
174 		__get_str(addr), __get_str(port),
175 		__entry->xid, __entry->version, __entry->proc
176 	)
177 );
178 
179 #define DEFINE_REPLY_EVENT(name)					\
180 		DEFINE_EVENT(xprtrdma_reply_class,			\
181 				xprtrdma_reply_##name##_err,		\
182 				TP_PROTO(				\
183 					const struct rpcrdma_rep *rep	\
184 				),					\
185 				TP_ARGS(rep))
186 
187 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
188 	TP_PROTO(
189 		const struct rpcrdma_xprt *r_xprt
190 	),
191 
192 	TP_ARGS(r_xprt),
193 
194 	TP_STRUCT__entry(
195 		__string(addr, rpcrdma_addrstr(r_xprt))
196 		__string(port, rpcrdma_portstr(r_xprt))
197 	),
198 
199 	TP_fast_assign(
200 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
201 		__assign_str(port, rpcrdma_portstr(r_xprt));
202 	),
203 
204 	TP_printk("peer=[%s]:%s",
205 		__get_str(addr), __get_str(port)
206 	)
207 );
208 
209 #define DEFINE_RXPRT_EVENT(name)					\
210 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
211 				TP_PROTO(				\
212 					const struct rpcrdma_xprt *r_xprt \
213 				),					\
214 				TP_ARGS(r_xprt))
215 
216 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
217 	TP_PROTO(
218 		const struct rpcrdma_xprt *r_xprt,
219 		int rc
220 	),
221 
222 	TP_ARGS(r_xprt, rc),
223 
224 	TP_STRUCT__entry(
225 		__field(int, rc)
226 		__field(int, connect_status)
227 		__string(addr, rpcrdma_addrstr(r_xprt))
228 		__string(port, rpcrdma_portstr(r_xprt))
229 	),
230 
231 	TP_fast_assign(
232 		__entry->rc = rc;
233 		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
234 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
235 		__assign_str(port, rpcrdma_portstr(r_xprt));
236 	),
237 
238 	TP_printk("peer=[%s]:%s rc=%d connection status=%d",
239 		__get_str(addr), __get_str(port),
240 		__entry->rc, __entry->connect_status
241 	)
242 );
243 
244 #define DEFINE_CONN_EVENT(name)						\
245 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
246 				TP_PROTO(				\
247 					const struct rpcrdma_xprt *r_xprt, \
248 					int rc				\
249 				),					\
250 				TP_ARGS(r_xprt, rc))
251 
252 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
253 	TP_PROTO(
254 		const struct rpc_task *task,
255 		unsigned int pos,
256 		struct rpcrdma_mr *mr,
257 		int nsegs
258 	),
259 
260 	TP_ARGS(task, pos, mr, nsegs),
261 
262 	TP_STRUCT__entry(
263 		__field(unsigned int, task_id)
264 		__field(unsigned int, client_id)
265 		__field(unsigned int, pos)
266 		__field(int, nents)
267 		__field(u32, handle)
268 		__field(u32, length)
269 		__field(u64, offset)
270 		__field(int, nsegs)
271 	),
272 
273 	TP_fast_assign(
274 		__entry->task_id = task->tk_pid;
275 		__entry->client_id = task->tk_client->cl_clid;
276 		__entry->pos = pos;
277 		__entry->nents = mr->mr_nents;
278 		__entry->handle = mr->mr_handle;
279 		__entry->length = mr->mr_length;
280 		__entry->offset = mr->mr_offset;
281 		__entry->nsegs = nsegs;
282 	),
283 
284 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
285 		  " pos=%u %u@0x%016llx:0x%08x (%s)",
286 		__entry->task_id, __entry->client_id,
287 		__entry->pos, __entry->length,
288 		(unsigned long long)__entry->offset, __entry->handle,
289 		__entry->nents < __entry->nsegs ? "more" : "last"
290 	)
291 );
292 
293 #define DEFINE_RDCH_EVENT(name)						\
294 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
295 				TP_PROTO(				\
296 					const struct rpc_task *task,	\
297 					unsigned int pos,		\
298 					struct rpcrdma_mr *mr,		\
299 					int nsegs			\
300 				),					\
301 				TP_ARGS(task, pos, mr, nsegs))
302 
303 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
304 	TP_PROTO(
305 		const struct rpc_task *task,
306 		struct rpcrdma_mr *mr,
307 		int nsegs
308 	),
309 
310 	TP_ARGS(task, mr, nsegs),
311 
312 	TP_STRUCT__entry(
313 		__field(unsigned int, task_id)
314 		__field(unsigned int, client_id)
315 		__field(int, nents)
316 		__field(u32, handle)
317 		__field(u32, length)
318 		__field(u64, offset)
319 		__field(int, nsegs)
320 	),
321 
322 	TP_fast_assign(
323 		__entry->task_id = task->tk_pid;
324 		__entry->client_id = task->tk_client->cl_clid;
325 		__entry->nents = mr->mr_nents;
326 		__entry->handle = mr->mr_handle;
327 		__entry->length = mr->mr_length;
328 		__entry->offset = mr->mr_offset;
329 		__entry->nsegs = nsegs;
330 	),
331 
332 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
333 		  " %u@0x%016llx:0x%08x (%s)",
334 		__entry->task_id, __entry->client_id,
335 		__entry->length, (unsigned long long)__entry->offset,
336 		__entry->handle,
337 		__entry->nents < __entry->nsegs ? "more" : "last"
338 	)
339 );
340 
341 #define DEFINE_WRCH_EVENT(name)						\
342 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
343 				TP_PROTO(				\
344 					const struct rpc_task *task,	\
345 					struct rpcrdma_mr *mr,		\
346 					int nsegs			\
347 				),					\
348 				TP_ARGS(task, mr, nsegs))
349 
350 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
351 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
352 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
353 TRACE_DEFINE_ENUM(DMA_NONE);
354 
355 #define xprtrdma_show_direction(x)					\
356 		__print_symbolic(x,					\
357 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
358 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
359 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
360 				{ DMA_NONE, "NONE" })
361 
362 DECLARE_EVENT_CLASS(xprtrdma_mr_class,
363 	TP_PROTO(
364 		const struct rpcrdma_mr *mr
365 	),
366 
367 	TP_ARGS(mr),
368 
369 	TP_STRUCT__entry(
370 		__field(unsigned int, task_id)
371 		__field(unsigned int, client_id)
372 		__field(u32, mr_id)
373 		__field(int, nents)
374 		__field(u32, handle)
375 		__field(u32, length)
376 		__field(u64, offset)
377 		__field(u32, dir)
378 	),
379 
380 	TP_fast_assign(
381 		const struct rpcrdma_req *req = mr->mr_req;
382 
383 		if (req) {
384 			const struct rpc_task *task = req->rl_slot.rq_task;
385 
386 			__entry->task_id = task->tk_pid;
387 			__entry->client_id = task->tk_client->cl_clid;
388 		} else {
389 			__entry->task_id = 0;
390 			__entry->client_id = -1;
391 		}
392 		__entry->mr_id  = mr->mr_ibmr->res.id;
393 		__entry->nents  = mr->mr_nents;
394 		__entry->handle = mr->mr_handle;
395 		__entry->length = mr->mr_length;
396 		__entry->offset = mr->mr_offset;
397 		__entry->dir    = mr->mr_dir;
398 	),
399 
400 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
401 		  " mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
402 		__entry->task_id, __entry->client_id,
403 		__entry->mr_id, __entry->nents, __entry->length,
404 		(unsigned long long)__entry->offset, __entry->handle,
405 		xprtrdma_show_direction(__entry->dir)
406 	)
407 );
408 
409 #define DEFINE_MR_EVENT(name)						\
410 		DEFINE_EVENT(xprtrdma_mr_class,				\
411 				xprtrdma_mr_##name,			\
412 				TP_PROTO(				\
413 					const struct rpcrdma_mr *mr	\
414 				),					\
415 				TP_ARGS(mr))
416 
417 DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
418 	TP_PROTO(
419 		const struct rpcrdma_mr *mr
420 	),
421 
422 	TP_ARGS(mr),
423 
424 	TP_STRUCT__entry(
425 		__field(u32, mr_id)
426 		__field(int, nents)
427 		__field(u32, handle)
428 		__field(u32, length)
429 		__field(u64, offset)
430 		__field(u32, dir)
431 	),
432 
433 	TP_fast_assign(
434 		__entry->mr_id  = mr->mr_ibmr->res.id;
435 		__entry->nents  = mr->mr_nents;
436 		__entry->handle = mr->mr_handle;
437 		__entry->length = mr->mr_length;
438 		__entry->offset = mr->mr_offset;
439 		__entry->dir    = mr->mr_dir;
440 	),
441 
442 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
443 		__entry->mr_id, __entry->nents, __entry->length,
444 		(unsigned long long)__entry->offset, __entry->handle,
445 		xprtrdma_show_direction(__entry->dir)
446 	)
447 );
448 
449 #define DEFINE_ANON_MR_EVENT(name)					\
450 		DEFINE_EVENT(xprtrdma_anonymous_mr_class,		\
451 				xprtrdma_mr_##name,			\
452 				TP_PROTO(				\
453 					const struct rpcrdma_mr *mr	\
454 				),					\
455 				TP_ARGS(mr))
456 
457 DECLARE_EVENT_CLASS(xprtrdma_callback_class,
458 	TP_PROTO(
459 		const struct rpcrdma_xprt *r_xprt,
460 		const struct rpc_rqst *rqst
461 	),
462 
463 	TP_ARGS(r_xprt, rqst),
464 
465 	TP_STRUCT__entry(
466 		__field(u32, xid)
467 		__string(addr, rpcrdma_addrstr(r_xprt))
468 		__string(port, rpcrdma_portstr(r_xprt))
469 	),
470 
471 	TP_fast_assign(
472 		__entry->xid = be32_to_cpu(rqst->rq_xid);
473 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
474 		__assign_str(port, rpcrdma_portstr(r_xprt));
475 	),
476 
477 	TP_printk("peer=[%s]:%s xid=0x%08x",
478 		__get_str(addr), __get_str(port), __entry->xid
479 	)
480 );
481 
482 #define DEFINE_CALLBACK_EVENT(name)					\
483 		DEFINE_EVENT(xprtrdma_callback_class,			\
484 				xprtrdma_cb_##name,			\
485 				TP_PROTO(				\
486 					const struct rpcrdma_xprt *r_xprt, \
487 					const struct rpc_rqst *rqst	\
488 				),					\
489 				TP_ARGS(r_xprt, rqst))
490 
491 /**
492  ** Connection events
493  **/
494 
495 TRACE_EVENT(xprtrdma_inline_thresh,
496 	TP_PROTO(
497 		const struct rpcrdma_ep *ep
498 	),
499 
500 	TP_ARGS(ep),
501 
502 	TP_STRUCT__entry(
503 		__field(unsigned int, inline_send)
504 		__field(unsigned int, inline_recv)
505 		__field(unsigned int, max_send)
506 		__field(unsigned int, max_recv)
507 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
508 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
509 	),
510 
511 	TP_fast_assign(
512 		const struct rdma_cm_id *id = ep->re_id;
513 
514 		__entry->inline_send = ep->re_inline_send;
515 		__entry->inline_recv = ep->re_inline_recv;
516 		__entry->max_send = ep->re_max_inline_send;
517 		__entry->max_recv = ep->re_max_inline_recv;
518 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
519 		       sizeof(struct sockaddr_in6));
520 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
521 		       sizeof(struct sockaddr_in6));
522 	),
523 
524 	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
525 		__entry->srcaddr, __entry->dstaddr,
526 		__entry->inline_send, __entry->inline_recv,
527 		__entry->max_send, __entry->max_recv
528 	)
529 );
530 
531 DEFINE_CONN_EVENT(connect);
532 DEFINE_CONN_EVENT(disconnect);
533 
534 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
535 
536 TRACE_EVENT(xprtrdma_op_connect,
537 	TP_PROTO(
538 		const struct rpcrdma_xprt *r_xprt,
539 		unsigned long delay
540 	),
541 
542 	TP_ARGS(r_xprt, delay),
543 
544 	TP_STRUCT__entry(
545 		__field(unsigned long, delay)
546 		__string(addr, rpcrdma_addrstr(r_xprt))
547 		__string(port, rpcrdma_portstr(r_xprt))
548 	),
549 
550 	TP_fast_assign(
551 		__entry->delay = delay;
552 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
553 		__assign_str(port, rpcrdma_portstr(r_xprt));
554 	),
555 
556 	TP_printk("peer=[%s]:%s delay=%lu",
557 		__get_str(addr), __get_str(port), __entry->delay
558 	)
559 );
560 
561 
562 TRACE_EVENT(xprtrdma_op_set_cto,
563 	TP_PROTO(
564 		const struct rpcrdma_xprt *r_xprt,
565 		unsigned long connect,
566 		unsigned long reconnect
567 	),
568 
569 	TP_ARGS(r_xprt, connect, reconnect),
570 
571 	TP_STRUCT__entry(
572 		__field(unsigned long, connect)
573 		__field(unsigned long, reconnect)
574 		__string(addr, rpcrdma_addrstr(r_xprt))
575 		__string(port, rpcrdma_portstr(r_xprt))
576 	),
577 
578 	TP_fast_assign(
579 		__entry->connect = connect;
580 		__entry->reconnect = reconnect;
581 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
582 		__assign_str(port, rpcrdma_portstr(r_xprt));
583 	),
584 
585 	TP_printk("peer=[%s]:%s connect=%lu reconnect=%lu",
586 		__get_str(addr), __get_str(port),
587 		__entry->connect / HZ, __entry->reconnect / HZ
588 	)
589 );
590 
591 /**
592  ** Call events
593  **/
594 
595 TRACE_EVENT(xprtrdma_createmrs,
596 	TP_PROTO(
597 		const struct rpcrdma_xprt *r_xprt,
598 		unsigned int count
599 	),
600 
601 	TP_ARGS(r_xprt, count),
602 
603 	TP_STRUCT__entry(
604 		__string(addr, rpcrdma_addrstr(r_xprt))
605 		__string(port, rpcrdma_portstr(r_xprt))
606 		__field(unsigned int, count)
607 	),
608 
609 	TP_fast_assign(
610 		__entry->count = count;
611 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
612 		__assign_str(port, rpcrdma_portstr(r_xprt));
613 	),
614 
615 	TP_printk("peer=[%s]:%s created %u MRs",
616 		__get_str(addr), __get_str(port), __entry->count
617 	)
618 );
619 
620 TRACE_EVENT(xprtrdma_nomrs_err,
621 	TP_PROTO(
622 		const struct rpcrdma_xprt *r_xprt,
623 		const struct rpcrdma_req *req
624 	),
625 
626 	TP_ARGS(r_xprt, req),
627 
628 	TP_STRUCT__entry(
629 		__field(unsigned int, task_id)
630 		__field(unsigned int, client_id)
631 		__string(addr, rpcrdma_addrstr(r_xprt))
632 		__string(port, rpcrdma_portstr(r_xprt))
633 	),
634 
635 	TP_fast_assign(
636 		const struct rpc_rqst *rqst = &req->rl_slot;
637 
638 		__entry->task_id = rqst->rq_task->tk_pid;
639 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
640 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
641 		__assign_str(port, rpcrdma_portstr(r_xprt));
642 	),
643 
644 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " peer=[%s]:%s",
645 		__entry->task_id, __entry->client_id,
646 		__get_str(addr), __get_str(port)
647 	)
648 );
649 
650 DEFINE_RDCH_EVENT(read);
651 DEFINE_WRCH_EVENT(write);
652 DEFINE_WRCH_EVENT(reply);
653 DEFINE_WRCH_EVENT(wp);
654 
655 TRACE_DEFINE_ENUM(rpcrdma_noch);
656 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
657 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
658 TRACE_DEFINE_ENUM(rpcrdma_readch);
659 TRACE_DEFINE_ENUM(rpcrdma_areadch);
660 TRACE_DEFINE_ENUM(rpcrdma_writech);
661 TRACE_DEFINE_ENUM(rpcrdma_replych);
662 
663 #define xprtrdma_show_chunktype(x)					\
664 		__print_symbolic(x,					\
665 				{ rpcrdma_noch, "inline" },		\
666 				{ rpcrdma_noch_pullup, "pullup" },	\
667 				{ rpcrdma_noch_mapped, "mapped" },	\
668 				{ rpcrdma_readch, "read list" },	\
669 				{ rpcrdma_areadch, "*read list" },	\
670 				{ rpcrdma_writech, "write list" },	\
671 				{ rpcrdma_replych, "reply chunk" })
672 
673 TRACE_EVENT(xprtrdma_marshal,
674 	TP_PROTO(
675 		const struct rpcrdma_req *req,
676 		unsigned int rtype,
677 		unsigned int wtype
678 	),
679 
680 	TP_ARGS(req, rtype, wtype),
681 
682 	TP_STRUCT__entry(
683 		__field(unsigned int, task_id)
684 		__field(unsigned int, client_id)
685 		__field(u32, xid)
686 		__field(unsigned int, hdrlen)
687 		__field(unsigned int, headlen)
688 		__field(unsigned int, pagelen)
689 		__field(unsigned int, taillen)
690 		__field(unsigned int, rtype)
691 		__field(unsigned int, wtype)
692 	),
693 
694 	TP_fast_assign(
695 		const struct rpc_rqst *rqst = &req->rl_slot;
696 
697 		__entry->task_id = rqst->rq_task->tk_pid;
698 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
699 		__entry->xid = be32_to_cpu(rqst->rq_xid);
700 		__entry->hdrlen = req->rl_hdrbuf.len;
701 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
702 		__entry->pagelen = rqst->rq_snd_buf.page_len;
703 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
704 		__entry->rtype = rtype;
705 		__entry->wtype = wtype;
706 	),
707 
708 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
709 		  " xid=0x%08x hdr=%u xdr=%u/%u/%u %s/%s",
710 		__entry->task_id, __entry->client_id, __entry->xid,
711 		__entry->hdrlen,
712 		__entry->headlen, __entry->pagelen, __entry->taillen,
713 		xprtrdma_show_chunktype(__entry->rtype),
714 		xprtrdma_show_chunktype(__entry->wtype)
715 	)
716 );
717 
718 TRACE_EVENT(xprtrdma_marshal_failed,
719 	TP_PROTO(const struct rpc_rqst *rqst,
720 		 int ret
721 	),
722 
723 	TP_ARGS(rqst, ret),
724 
725 	TP_STRUCT__entry(
726 		__field(unsigned int, task_id)
727 		__field(unsigned int, client_id)
728 		__field(u32, xid)
729 		__field(int, ret)
730 	),
731 
732 	TP_fast_assign(
733 		__entry->task_id = rqst->rq_task->tk_pid;
734 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
735 		__entry->xid = be32_to_cpu(rqst->rq_xid);
736 		__entry->ret = ret;
737 	),
738 
739 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
740 		__entry->task_id, __entry->client_id, __entry->xid,
741 		__entry->ret
742 	)
743 );
744 
745 TRACE_EVENT(xprtrdma_prepsend_failed,
746 	TP_PROTO(const struct rpc_rqst *rqst,
747 		 int ret
748 	),
749 
750 	TP_ARGS(rqst, ret),
751 
752 	TP_STRUCT__entry(
753 		__field(unsigned int, task_id)
754 		__field(unsigned int, client_id)
755 		__field(u32, xid)
756 		__field(int, ret)
757 	),
758 
759 	TP_fast_assign(
760 		__entry->task_id = rqst->rq_task->tk_pid;
761 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
762 		__entry->xid = be32_to_cpu(rqst->rq_xid);
763 		__entry->ret = ret;
764 	),
765 
766 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x ret=%d",
767 		__entry->task_id, __entry->client_id, __entry->xid,
768 		__entry->ret
769 	)
770 );
771 
772 TRACE_EVENT(xprtrdma_post_send,
773 	TP_PROTO(
774 		const struct rpcrdma_req *req
775 	),
776 
777 	TP_ARGS(req),
778 
779 	TP_STRUCT__entry(
780 		__field(u32, cq_id)
781 		__field(int, completion_id)
782 		__field(unsigned int, task_id)
783 		__field(unsigned int, client_id)
784 		__field(int, num_sge)
785 		__field(int, signaled)
786 	),
787 
788 	TP_fast_assign(
789 		const struct rpc_rqst *rqst = &req->rl_slot;
790 		const struct rpcrdma_sendctx *sc = req->rl_sendctx;
791 
792 		__entry->cq_id = sc->sc_cid.ci_queue_id;
793 		__entry->completion_id = sc->sc_cid.ci_completion_id;
794 		__entry->task_id = rqst->rq_task->tk_pid;
795 		__entry->client_id = rqst->rq_task->tk_client ?
796 				     rqst->rq_task->tk_client->cl_clid : -1;
797 		__entry->num_sge = req->rl_wr.num_sge;
798 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
799 	),
800 
801 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u cid=%d (%d SGE%s) %s",
802 		__entry->task_id, __entry->client_id,
803 		__entry->cq_id, __entry->completion_id,
804 		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
805 		(__entry->signaled ? "signaled" : "")
806 	)
807 );
808 
809 TRACE_EVENT(xprtrdma_post_send_err,
810 	TP_PROTO(
811 		const struct rpcrdma_xprt *r_xprt,
812 		const struct rpcrdma_req *req,
813 		int rc
814 	),
815 
816 	TP_ARGS(r_xprt, req, rc),
817 
818 	TP_STRUCT__entry(
819 		__field(u32, cq_id)
820 		__field(unsigned int, task_id)
821 		__field(unsigned int, client_id)
822 		__field(int, rc)
823 	),
824 
825 	TP_fast_assign(
826 		const struct rpc_rqst *rqst = &req->rl_slot;
827 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
828 
829 		__entry->cq_id = ep ? ep->re_attr.recv_cq->res.id : 0;
830 		__entry->task_id = rqst->rq_task->tk_pid;
831 		__entry->client_id = rqst->rq_task->tk_client ?
832 				     rqst->rq_task->tk_client->cl_clid : -1;
833 		__entry->rc = rc;
834 	),
835 
836 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " cq.id=%u rc=%d",
837 		__entry->task_id, __entry->client_id,
838 		__entry->cq_id, __entry->rc
839 	)
840 );
841 
842 TRACE_EVENT(xprtrdma_post_recv,
843 	TP_PROTO(
844 		const struct rpcrdma_rep *rep
845 	),
846 
847 	TP_ARGS(rep),
848 
849 	TP_STRUCT__entry(
850 		__field(u32, cq_id)
851 		__field(int, completion_id)
852 	),
853 
854 	TP_fast_assign(
855 		__entry->cq_id = rep->rr_cid.ci_queue_id;
856 		__entry->completion_id = rep->rr_cid.ci_completion_id;
857 	),
858 
859 	TP_printk("cq.id=%d cid=%d",
860 		__entry->cq_id, __entry->completion_id
861 	)
862 );
863 
864 TRACE_EVENT(xprtrdma_post_recvs,
865 	TP_PROTO(
866 		const struct rpcrdma_xprt *r_xprt,
867 		unsigned int count
868 	),
869 
870 	TP_ARGS(r_xprt, count),
871 
872 	TP_STRUCT__entry(
873 		__field(u32, cq_id)
874 		__field(unsigned int, count)
875 		__field(int, posted)
876 		__string(addr, rpcrdma_addrstr(r_xprt))
877 		__string(port, rpcrdma_portstr(r_xprt))
878 	),
879 
880 	TP_fast_assign(
881 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
882 
883 		__entry->cq_id = ep->re_attr.recv_cq->res.id;
884 		__entry->count = count;
885 		__entry->posted = ep->re_receive_count;
886 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
887 		__assign_str(port, rpcrdma_portstr(r_xprt));
888 	),
889 
890 	TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active",
891 		__get_str(addr), __get_str(port), __entry->cq_id,
892 		__entry->count, __entry->posted
893 	)
894 );
895 
896 TRACE_EVENT(xprtrdma_post_recvs_err,
897 	TP_PROTO(
898 		const struct rpcrdma_xprt *r_xprt,
899 		int status
900 	),
901 
902 	TP_ARGS(r_xprt, status),
903 
904 	TP_STRUCT__entry(
905 		__field(u32, cq_id)
906 		__field(int, status)
907 		__string(addr, rpcrdma_addrstr(r_xprt))
908 		__string(port, rpcrdma_portstr(r_xprt))
909 	),
910 
911 	TP_fast_assign(
912 		const struct rpcrdma_ep *ep = r_xprt->rx_ep;
913 
914 		__entry->cq_id = ep->re_attr.recv_cq->res.id;
915 		__entry->status = status;
916 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
917 		__assign_str(port, rpcrdma_portstr(r_xprt));
918 	),
919 
920 	TP_printk("peer=[%s]:%s cq.id=%d rc=%d",
921 		__get_str(addr), __get_str(port), __entry->cq_id,
922 		__entry->status
923 	)
924 );
925 
926 TRACE_EVENT(xprtrdma_post_linv_err,
927 	TP_PROTO(
928 		const struct rpcrdma_req *req,
929 		int status
930 	),
931 
932 	TP_ARGS(req, status),
933 
934 	TP_STRUCT__entry(
935 		__field(unsigned int, task_id)
936 		__field(unsigned int, client_id)
937 		__field(int, status)
938 	),
939 
940 	TP_fast_assign(
941 		const struct rpc_task *task = req->rl_slot.rq_task;
942 
943 		__entry->task_id = task->tk_pid;
944 		__entry->client_id = task->tk_client->cl_clid;
945 		__entry->status = status;
946 	),
947 
948 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " status=%d",
949 		__entry->task_id, __entry->client_id, __entry->status
950 	)
951 );
952 
953 /**
954  ** Completion events
955  **/
956 
957 DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
958 
959 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
960 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_fastreg);
961 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li);
962 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_wake);
963 DEFINE_MR_COMPLETION_EVENT(xprtrdma_wc_li_done);
964 
965 TRACE_EVENT(xprtrdma_frwr_alloc,
966 	TP_PROTO(
967 		const struct rpcrdma_mr *mr,
968 		int rc
969 	),
970 
971 	TP_ARGS(mr, rc),
972 
973 	TP_STRUCT__entry(
974 		__field(u32, mr_id)
975 		__field(int, rc)
976 	),
977 
978 	TP_fast_assign(
979 		__entry->mr_id = mr->mr_ibmr->res.id;
980 		__entry->rc = rc;
981 	),
982 
983 	TP_printk("mr.id=%u: rc=%d",
984 		__entry->mr_id, __entry->rc
985 	)
986 );
987 
988 TRACE_EVENT(xprtrdma_frwr_dereg,
989 	TP_PROTO(
990 		const struct rpcrdma_mr *mr,
991 		int rc
992 	),
993 
994 	TP_ARGS(mr, rc),
995 
996 	TP_STRUCT__entry(
997 		__field(u32, mr_id)
998 		__field(int, nents)
999 		__field(u32, handle)
1000 		__field(u32, length)
1001 		__field(u64, offset)
1002 		__field(u32, dir)
1003 		__field(int, rc)
1004 	),
1005 
1006 	TP_fast_assign(
1007 		__entry->mr_id  = mr->mr_ibmr->res.id;
1008 		__entry->nents  = mr->mr_nents;
1009 		__entry->handle = mr->mr_handle;
1010 		__entry->length = mr->mr_length;
1011 		__entry->offset = mr->mr_offset;
1012 		__entry->dir    = mr->mr_dir;
1013 		__entry->rc	= rc;
1014 	),
1015 
1016 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
1017 		__entry->mr_id, __entry->nents, __entry->length,
1018 		(unsigned long long)__entry->offset, __entry->handle,
1019 		xprtrdma_show_direction(__entry->dir),
1020 		__entry->rc
1021 	)
1022 );
1023 
1024 TRACE_EVENT(xprtrdma_frwr_sgerr,
1025 	TP_PROTO(
1026 		const struct rpcrdma_mr *mr,
1027 		int sg_nents
1028 	),
1029 
1030 	TP_ARGS(mr, sg_nents),
1031 
1032 	TP_STRUCT__entry(
1033 		__field(u32, mr_id)
1034 		__field(u64, addr)
1035 		__field(u32, dir)
1036 		__field(int, nents)
1037 	),
1038 
1039 	TP_fast_assign(
1040 		__entry->mr_id = mr->mr_ibmr->res.id;
1041 		__entry->addr = mr->mr_sg->dma_address;
1042 		__entry->dir = mr->mr_dir;
1043 		__entry->nents = sg_nents;
1044 	),
1045 
1046 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
1047 		__entry->mr_id, __entry->addr,
1048 		xprtrdma_show_direction(__entry->dir),
1049 		__entry->nents
1050 	)
1051 );
1052 
1053 TRACE_EVENT(xprtrdma_frwr_maperr,
1054 	TP_PROTO(
1055 		const struct rpcrdma_mr *mr,
1056 		int num_mapped
1057 	),
1058 
1059 	TP_ARGS(mr, num_mapped),
1060 
1061 	TP_STRUCT__entry(
1062 		__field(u32, mr_id)
1063 		__field(u64, addr)
1064 		__field(u32, dir)
1065 		__field(int, num_mapped)
1066 		__field(int, nents)
1067 	),
1068 
1069 	TP_fast_assign(
1070 		__entry->mr_id = mr->mr_ibmr->res.id;
1071 		__entry->addr = mr->mr_sg->dma_address;
1072 		__entry->dir = mr->mr_dir;
1073 		__entry->num_mapped = num_mapped;
1074 		__entry->nents = mr->mr_nents;
1075 	),
1076 
1077 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
1078 		__entry->mr_id, __entry->addr,
1079 		xprtrdma_show_direction(__entry->dir),
1080 		__entry->num_mapped, __entry->nents
1081 	)
1082 );
1083 
1084 DEFINE_MR_EVENT(fastreg);
1085 DEFINE_MR_EVENT(localinv);
1086 DEFINE_MR_EVENT(reminv);
1087 DEFINE_MR_EVENT(map);
1088 
1089 DEFINE_ANON_MR_EVENT(unmap);
1090 
1091 TRACE_EVENT(xprtrdma_dma_maperr,
1092 	TP_PROTO(
1093 		u64 addr
1094 	),
1095 
1096 	TP_ARGS(addr),
1097 
1098 	TP_STRUCT__entry(
1099 		__field(u64, addr)
1100 	),
1101 
1102 	TP_fast_assign(
1103 		__entry->addr = addr;
1104 	),
1105 
1106 	TP_printk("dma addr=0x%llx\n", __entry->addr)
1107 );
1108 
1109 /**
1110  ** Reply events
1111  **/
1112 
1113 TRACE_EVENT(xprtrdma_reply,
1114 	TP_PROTO(
1115 		const struct rpc_task *task,
1116 		const struct rpcrdma_rep *rep,
1117 		unsigned int credits
1118 	),
1119 
1120 	TP_ARGS(task, rep, credits),
1121 
1122 	TP_STRUCT__entry(
1123 		__field(unsigned int, task_id)
1124 		__field(unsigned int, client_id)
1125 		__field(u32, xid)
1126 		__field(unsigned int, credits)
1127 	),
1128 
1129 	TP_fast_assign(
1130 		__entry->task_id = task->tk_pid;
1131 		__entry->client_id = task->tk_client->cl_clid;
1132 		__entry->xid = be32_to_cpu(rep->rr_xid);
1133 		__entry->credits = credits;
1134 	),
1135 
1136 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x credits=%u",
1137 		__entry->task_id, __entry->client_id, __entry->xid,
1138 		__entry->credits
1139 	)
1140 );
1141 
1142 DEFINE_REPLY_EVENT(vers);
1143 DEFINE_REPLY_EVENT(rqst);
1144 DEFINE_REPLY_EVENT(short);
1145 DEFINE_REPLY_EVENT(hdr);
1146 
1147 TRACE_EVENT(xprtrdma_err_vers,
1148 	TP_PROTO(
1149 		const struct rpc_rqst *rqst,
1150 		__be32 *min,
1151 		__be32 *max
1152 	),
1153 
1154 	TP_ARGS(rqst, min, max),
1155 
1156 	TP_STRUCT__entry(
1157 		__field(unsigned int, task_id)
1158 		__field(unsigned int, client_id)
1159 		__field(u32, xid)
1160 		__field(u32, min)
1161 		__field(u32, max)
1162 	),
1163 
1164 	TP_fast_assign(
1165 		__entry->task_id = rqst->rq_task->tk_pid;
1166 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1167 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1168 		__entry->min = be32_to_cpup(min);
1169 		__entry->max = be32_to_cpup(max);
1170 	),
1171 
1172 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x versions=[%u, %u]",
1173 		__entry->task_id, __entry->client_id, __entry->xid,
1174 		__entry->min, __entry->max
1175 	)
1176 );
1177 
1178 TRACE_EVENT(xprtrdma_err_chunk,
1179 	TP_PROTO(
1180 		const struct rpc_rqst *rqst
1181 	),
1182 
1183 	TP_ARGS(rqst),
1184 
1185 	TP_STRUCT__entry(
1186 		__field(unsigned int, task_id)
1187 		__field(unsigned int, client_id)
1188 		__field(u32, xid)
1189 	),
1190 
1191 	TP_fast_assign(
1192 		__entry->task_id = rqst->rq_task->tk_pid;
1193 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1194 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1195 	),
1196 
1197 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x",
1198 		__entry->task_id, __entry->client_id, __entry->xid
1199 	)
1200 );
1201 
1202 TRACE_EVENT(xprtrdma_err_unrecognized,
1203 	TP_PROTO(
1204 		const struct rpc_rqst *rqst,
1205 		__be32 *procedure
1206 	),
1207 
1208 	TP_ARGS(rqst, procedure),
1209 
1210 	TP_STRUCT__entry(
1211 		__field(unsigned int, task_id)
1212 		__field(unsigned int, client_id)
1213 		__field(u32, xid)
1214 		__field(u32, procedure)
1215 	),
1216 
1217 	TP_fast_assign(
1218 		__entry->task_id = rqst->rq_task->tk_pid;
1219 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1220 		__entry->procedure = be32_to_cpup(procedure);
1221 	),
1222 
1223 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x procedure=%u",
1224 		__entry->task_id, __entry->client_id, __entry->xid,
1225 		__entry->procedure
1226 	)
1227 );
1228 
1229 TRACE_EVENT(xprtrdma_fixup,
1230 	TP_PROTO(
1231 		const struct rpc_rqst *rqst,
1232 		unsigned long fixup
1233 	),
1234 
1235 	TP_ARGS(rqst, fixup),
1236 
1237 	TP_STRUCT__entry(
1238 		__field(unsigned int, task_id)
1239 		__field(unsigned int, client_id)
1240 		__field(unsigned long, fixup)
1241 		__field(size_t, headlen)
1242 		__field(unsigned int, pagelen)
1243 		__field(size_t, taillen)
1244 	),
1245 
1246 	TP_fast_assign(
1247 		__entry->task_id = rqst->rq_task->tk_pid;
1248 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1249 		__entry->fixup = fixup;
1250 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1251 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1252 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1253 	),
1254 
1255 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " fixup=%lu xdr=%zu/%u/%zu",
1256 		__entry->task_id, __entry->client_id, __entry->fixup,
1257 		__entry->headlen, __entry->pagelen, __entry->taillen
1258 	)
1259 );
1260 
1261 TRACE_EVENT(xprtrdma_decode_seg,
1262 	TP_PROTO(
1263 		u32 handle,
1264 		u32 length,
1265 		u64 offset
1266 	),
1267 
1268 	TP_ARGS(handle, length, offset),
1269 
1270 	TP_STRUCT__entry(
1271 		__field(u32, handle)
1272 		__field(u32, length)
1273 		__field(u64, offset)
1274 	),
1275 
1276 	TP_fast_assign(
1277 		__entry->handle = handle;
1278 		__entry->length = length;
1279 		__entry->offset = offset;
1280 	),
1281 
1282 	TP_printk("%u@0x%016llx:0x%08x",
1283 		__entry->length, (unsigned long long)__entry->offset,
1284 		__entry->handle
1285 	)
1286 );
1287 
1288 TRACE_EVENT(xprtrdma_mrs_zap,
1289 	TP_PROTO(
1290 		const struct rpc_task *task
1291 	),
1292 
1293 	TP_ARGS(task),
1294 
1295 	TP_STRUCT__entry(
1296 		__field(unsigned int, task_id)
1297 		__field(unsigned int, client_id)
1298 	),
1299 
1300 	TP_fast_assign(
1301 		__entry->task_id = task->tk_pid;
1302 		__entry->client_id = task->tk_client->cl_clid;
1303 	),
1304 
1305 	TP_printk(SUNRPC_TRACE_TASK_SPECIFIER,
1306 		__entry->task_id, __entry->client_id
1307 	)
1308 );
1309 
1310 /**
1311  ** Callback events
1312  **/
1313 
1314 TRACE_EVENT(xprtrdma_cb_setup,
1315 	TP_PROTO(
1316 		const struct rpcrdma_xprt *r_xprt,
1317 		unsigned int reqs
1318 	),
1319 
1320 	TP_ARGS(r_xprt, reqs),
1321 
1322 	TP_STRUCT__entry(
1323 		__field(unsigned int, reqs)
1324 		__string(addr, rpcrdma_addrstr(r_xprt))
1325 		__string(port, rpcrdma_portstr(r_xprt))
1326 	),
1327 
1328 	TP_fast_assign(
1329 		__entry->reqs = reqs;
1330 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1331 		__assign_str(port, rpcrdma_portstr(r_xprt));
1332 	),
1333 
1334 	TP_printk("peer=[%s]:%s %u reqs",
1335 		__get_str(addr), __get_str(port), __entry->reqs
1336 	)
1337 );
1338 
1339 DEFINE_CALLBACK_EVENT(call);
1340 DEFINE_CALLBACK_EVENT(reply);
1341 
1342 /**
1343  ** Server-side RPC/RDMA events
1344  **/
1345 
1346 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1347 	TP_PROTO(
1348 		const struct svcxprt_rdma *rdma,
1349 		long status
1350 	),
1351 
1352 	TP_ARGS(rdma, status),
1353 
1354 	TP_STRUCT__entry(
1355 		__field(long, status)
1356 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1357 	),
1358 
1359 	TP_fast_assign(
1360 		__entry->status = status;
1361 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1362 	),
1363 
1364 	TP_printk("addr=%s status=%ld",
1365 		__get_str(addr), __entry->status
1366 	)
1367 );
1368 
1369 #define DEFINE_ACCEPT_EVENT(name) \
1370 		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1371 				TP_PROTO( \
1372 					const struct svcxprt_rdma *rdma, \
1373 					long status \
1374 				), \
1375 				TP_ARGS(rdma, status))
1376 
1377 DEFINE_ACCEPT_EVENT(pd);
1378 DEFINE_ACCEPT_EVENT(qp);
1379 DEFINE_ACCEPT_EVENT(fabric);
1380 DEFINE_ACCEPT_EVENT(initdepth);
1381 DEFINE_ACCEPT_EVENT(accept);
1382 
1383 TRACE_DEFINE_ENUM(RDMA_MSG);
1384 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1385 TRACE_DEFINE_ENUM(RDMA_MSGP);
1386 TRACE_DEFINE_ENUM(RDMA_DONE);
1387 TRACE_DEFINE_ENUM(RDMA_ERROR);
1388 
1389 #define show_rpcrdma_proc(x)						\
1390 		__print_symbolic(x,					\
1391 				{ RDMA_MSG, "RDMA_MSG" },		\
1392 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1393 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1394 				{ RDMA_DONE, "RDMA_DONE" },		\
1395 				{ RDMA_ERROR, "RDMA_ERROR" })
1396 
1397 TRACE_EVENT(svcrdma_decode_rqst,
1398 	TP_PROTO(
1399 		const struct svc_rdma_recv_ctxt *ctxt,
1400 		__be32 *p,
1401 		unsigned int hdrlen
1402 	),
1403 
1404 	TP_ARGS(ctxt, p, hdrlen),
1405 
1406 	TP_STRUCT__entry(
1407 		__field(u32, cq_id)
1408 		__field(int, completion_id)
1409 		__field(u32, xid)
1410 		__field(u32, vers)
1411 		__field(u32, proc)
1412 		__field(u32, credits)
1413 		__field(unsigned int, hdrlen)
1414 	),
1415 
1416 	TP_fast_assign(
1417 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1418 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1419 		__entry->xid = be32_to_cpup(p++);
1420 		__entry->vers = be32_to_cpup(p++);
1421 		__entry->credits = be32_to_cpup(p++);
1422 		__entry->proc = be32_to_cpup(p);
1423 		__entry->hdrlen = hdrlen;
1424 	),
1425 
1426 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1427 		__entry->cq_id, __entry->completion_id,
1428 		__entry->xid, __entry->vers, __entry->credits,
1429 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1430 );
1431 
1432 TRACE_EVENT(svcrdma_decode_short_err,
1433 	TP_PROTO(
1434 		const struct svc_rdma_recv_ctxt *ctxt,
1435 		unsigned int hdrlen
1436 	),
1437 
1438 	TP_ARGS(ctxt, hdrlen),
1439 
1440 	TP_STRUCT__entry(
1441 		__field(u32, cq_id)
1442 		__field(int, completion_id)
1443 		__field(unsigned int, hdrlen)
1444 	),
1445 
1446 	TP_fast_assign(
1447 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1448 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1449 		__entry->hdrlen = hdrlen;
1450 	),
1451 
1452 	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1453 		__entry->cq_id, __entry->completion_id,
1454 		__entry->hdrlen)
1455 );
1456 
1457 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1458 	TP_PROTO(
1459 		const struct svc_rdma_recv_ctxt *ctxt,
1460 		__be32 *p
1461 	),
1462 
1463 	TP_ARGS(ctxt, p),
1464 
1465 	TP_STRUCT__entry(
1466 		__field(u32, cq_id)
1467 		__field(int, completion_id)
1468 		__field(u32, xid)
1469 		__field(u32, vers)
1470 		__field(u32, proc)
1471 		__field(u32, credits)
1472 	),
1473 
1474 	TP_fast_assign(
1475 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1476 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1477 		__entry->xid = be32_to_cpup(p++);
1478 		__entry->vers = be32_to_cpup(p++);
1479 		__entry->credits = be32_to_cpup(p++);
1480 		__entry->proc = be32_to_cpup(p);
1481 	),
1482 
1483 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1484 		__entry->cq_id, __entry->completion_id,
1485 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1486 );
1487 
1488 #define DEFINE_BADREQ_EVENT(name)					\
1489 		DEFINE_EVENT(svcrdma_badreq_event,			\
1490 			     svcrdma_decode_##name##_err,		\
1491 				TP_PROTO(				\
1492 					const struct svc_rdma_recv_ctxt *ctxt,	\
1493 					__be32 *p			\
1494 				),					\
1495 				TP_ARGS(ctxt, p))
1496 
1497 DEFINE_BADREQ_EVENT(badvers);
1498 DEFINE_BADREQ_EVENT(drop);
1499 DEFINE_BADREQ_EVENT(badproc);
1500 DEFINE_BADREQ_EVENT(parse);
1501 
1502 TRACE_EVENT(svcrdma_encode_wseg,
1503 	TP_PROTO(
1504 		const struct svc_rdma_send_ctxt *ctxt,
1505 		u32 segno,
1506 		u32 handle,
1507 		u32 length,
1508 		u64 offset
1509 	),
1510 
1511 	TP_ARGS(ctxt, segno, handle, length, offset),
1512 
1513 	TP_STRUCT__entry(
1514 		__field(u32, cq_id)
1515 		__field(int, completion_id)
1516 		__field(u32, segno)
1517 		__field(u32, handle)
1518 		__field(u32, length)
1519 		__field(u64, offset)
1520 	),
1521 
1522 	TP_fast_assign(
1523 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1524 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1525 		__entry->segno = segno;
1526 		__entry->handle = handle;
1527 		__entry->length = length;
1528 		__entry->offset = offset;
1529 	),
1530 
1531 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1532 		__entry->cq_id, __entry->completion_id,
1533 		__entry->segno, __entry->length,
1534 		(unsigned long long)__entry->offset, __entry->handle
1535 	)
1536 );
1537 
1538 TRACE_EVENT(svcrdma_decode_rseg,
1539 	TP_PROTO(
1540 		const struct rpc_rdma_cid *cid,
1541 		const struct svc_rdma_chunk *chunk,
1542 		const struct svc_rdma_segment *segment
1543 	),
1544 
1545 	TP_ARGS(cid, chunk, segment),
1546 
1547 	TP_STRUCT__entry(
1548 		__field(u32, cq_id)
1549 		__field(int, completion_id)
1550 		__field(u32, segno)
1551 		__field(u32, position)
1552 		__field(u32, handle)
1553 		__field(u32, length)
1554 		__field(u64, offset)
1555 	),
1556 
1557 	TP_fast_assign(
1558 		__entry->cq_id = cid->ci_queue_id;
1559 		__entry->completion_id = cid->ci_completion_id;
1560 		__entry->segno = chunk->ch_segcount;
1561 		__entry->position = chunk->ch_position;
1562 		__entry->handle = segment->rs_handle;
1563 		__entry->length = segment->rs_length;
1564 		__entry->offset = segment->rs_offset;
1565 	),
1566 
1567 	TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
1568 		__entry->cq_id, __entry->completion_id,
1569 		__entry->segno, __entry->position, __entry->length,
1570 		(unsigned long long)__entry->offset, __entry->handle
1571 	)
1572 );
1573 
1574 TRACE_EVENT(svcrdma_decode_wseg,
1575 	TP_PROTO(
1576 		const struct rpc_rdma_cid *cid,
1577 		const struct svc_rdma_chunk *chunk,
1578 		u32 segno
1579 	),
1580 
1581 	TP_ARGS(cid, chunk, segno),
1582 
1583 	TP_STRUCT__entry(
1584 		__field(u32, cq_id)
1585 		__field(int, completion_id)
1586 		__field(u32, segno)
1587 		__field(u32, handle)
1588 		__field(u32, length)
1589 		__field(u64, offset)
1590 	),
1591 
1592 	TP_fast_assign(
1593 		const struct svc_rdma_segment *segment =
1594 			&chunk->ch_segments[segno];
1595 
1596 		__entry->cq_id = cid->ci_queue_id;
1597 		__entry->completion_id = cid->ci_completion_id;
1598 		__entry->segno = segno;
1599 		__entry->handle = segment->rs_handle;
1600 		__entry->length = segment->rs_length;
1601 		__entry->offset = segment->rs_offset;
1602 	),
1603 
1604 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1605 		__entry->cq_id, __entry->completion_id,
1606 		__entry->segno, __entry->length,
1607 		(unsigned long long)__entry->offset, __entry->handle
1608 	)
1609 );
1610 
1611 DECLARE_EVENT_CLASS(svcrdma_error_event,
1612 	TP_PROTO(
1613 		__be32 xid
1614 	),
1615 
1616 	TP_ARGS(xid),
1617 
1618 	TP_STRUCT__entry(
1619 		__field(u32, xid)
1620 	),
1621 
1622 	TP_fast_assign(
1623 		__entry->xid = be32_to_cpu(xid);
1624 	),
1625 
1626 	TP_printk("xid=0x%08x",
1627 		__entry->xid
1628 	)
1629 );
1630 
1631 #define DEFINE_ERROR_EVENT(name)					\
1632 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1633 				TP_PROTO(				\
1634 					__be32 xid			\
1635 				),					\
1636 				TP_ARGS(xid))
1637 
1638 DEFINE_ERROR_EVENT(vers);
1639 DEFINE_ERROR_EVENT(chunk);
1640 
1641 /**
1642  ** Server-side RDMA API events
1643  **/
1644 
1645 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1646 	TP_PROTO(
1647 		const struct svcxprt_rdma *rdma,
1648 		u64 dma_addr,
1649 		u32 length
1650 	),
1651 
1652 	TP_ARGS(rdma, dma_addr, length),
1653 
1654 	TP_STRUCT__entry(
1655 		__field(u64, dma_addr)
1656 		__field(u32, length)
1657 		__string(device, rdma->sc_cm_id->device->name)
1658 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1659 	),
1660 
1661 	TP_fast_assign(
1662 		__entry->dma_addr = dma_addr;
1663 		__entry->length = length;
1664 		__assign_str(device, rdma->sc_cm_id->device->name);
1665 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1666 	),
1667 
1668 	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1669 		__get_str(addr), __get_str(device),
1670 		__entry->dma_addr, __entry->length
1671 	)
1672 );
1673 
1674 #define DEFINE_SVC_DMA_EVENT(name)					\
1675 		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1676 				TP_PROTO(				\
1677 					const struct svcxprt_rdma *rdma,\
1678 					u64 dma_addr,			\
1679 					u32 length			\
1680 				),					\
1681 				TP_ARGS(rdma, dma_addr, length))
1682 
1683 DEFINE_SVC_DMA_EVENT(dma_map_page);
1684 DEFINE_SVC_DMA_EVENT(dma_map_err);
1685 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1686 
1687 TRACE_EVENT(svcrdma_dma_map_rw_err,
1688 	TP_PROTO(
1689 		const struct svcxprt_rdma *rdma,
1690 		unsigned int nents,
1691 		int status
1692 	),
1693 
1694 	TP_ARGS(rdma, nents, status),
1695 
1696 	TP_STRUCT__entry(
1697 		__field(int, status)
1698 		__field(unsigned int, nents)
1699 		__string(device, rdma->sc_cm_id->device->name)
1700 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1701 	),
1702 
1703 	TP_fast_assign(
1704 		__entry->status = status;
1705 		__entry->nents = nents;
1706 		__assign_str(device, rdma->sc_cm_id->device->name);
1707 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1708 	),
1709 
1710 	TP_printk("addr=%s device=%s nents=%u status=%d",
1711 		__get_str(addr), __get_str(device), __entry->nents,
1712 		__entry->status
1713 	)
1714 );
1715 
1716 TRACE_EVENT(svcrdma_no_rwctx_err,
1717 	TP_PROTO(
1718 		const struct svcxprt_rdma *rdma,
1719 		unsigned int num_sges
1720 	),
1721 
1722 	TP_ARGS(rdma, num_sges),
1723 
1724 	TP_STRUCT__entry(
1725 		__field(unsigned int, num_sges)
1726 		__string(device, rdma->sc_cm_id->device->name)
1727 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1728 	),
1729 
1730 	TP_fast_assign(
1731 		__entry->num_sges = num_sges;
1732 		__assign_str(device, rdma->sc_cm_id->device->name);
1733 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1734 	),
1735 
1736 	TP_printk("addr=%s device=%s num_sges=%d",
1737 		__get_str(addr), __get_str(device), __entry->num_sges
1738 	)
1739 );
1740 
1741 TRACE_EVENT(svcrdma_page_overrun_err,
1742 	TP_PROTO(
1743 		const struct svcxprt_rdma *rdma,
1744 		const struct svc_rqst *rqst,
1745 		unsigned int pageno
1746 	),
1747 
1748 	TP_ARGS(rdma, rqst, pageno),
1749 
1750 	TP_STRUCT__entry(
1751 		__field(unsigned int, pageno)
1752 		__field(u32, xid)
1753 		__string(device, rdma->sc_cm_id->device->name)
1754 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1755 	),
1756 
1757 	TP_fast_assign(
1758 		__entry->pageno = pageno;
1759 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1760 		__assign_str(device, rdma->sc_cm_id->device->name);
1761 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1762 	),
1763 
1764 	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1765 		__get_str(device), __entry->xid, __entry->pageno
1766 	)
1767 );
1768 
1769 TRACE_EVENT(svcrdma_small_wrch_err,
1770 	TP_PROTO(
1771 		const struct svcxprt_rdma *rdma,
1772 		unsigned int remaining,
1773 		unsigned int seg_no,
1774 		unsigned int num_segs
1775 	),
1776 
1777 	TP_ARGS(rdma, remaining, seg_no, num_segs),
1778 
1779 	TP_STRUCT__entry(
1780 		__field(unsigned int, remaining)
1781 		__field(unsigned int, seg_no)
1782 		__field(unsigned int, num_segs)
1783 		__string(device, rdma->sc_cm_id->device->name)
1784 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1785 	),
1786 
1787 	TP_fast_assign(
1788 		__entry->remaining = remaining;
1789 		__entry->seg_no = seg_no;
1790 		__entry->num_segs = num_segs;
1791 		__assign_str(device, rdma->sc_cm_id->device->name);
1792 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1793 	),
1794 
1795 	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1796 		__get_str(addr), __get_str(device), __entry->remaining,
1797 		__entry->seg_no, __entry->num_segs
1798 	)
1799 );
1800 
1801 TRACE_EVENT(svcrdma_send_pullup,
1802 	TP_PROTO(
1803 		const struct svc_rdma_send_ctxt *ctxt,
1804 		unsigned int msglen
1805 	),
1806 
1807 	TP_ARGS(ctxt, msglen),
1808 
1809 	TP_STRUCT__entry(
1810 		__field(u32, cq_id)
1811 		__field(int, completion_id)
1812 		__field(unsigned int, hdrlen)
1813 		__field(unsigned int, msglen)
1814 	),
1815 
1816 	TP_fast_assign(
1817 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1818 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1819 		__entry->hdrlen = ctxt->sc_hdrbuf.len,
1820 		__entry->msglen = msglen;
1821 	),
1822 
1823 	TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
1824 		__entry->cq_id, __entry->completion_id,
1825 		__entry->hdrlen, __entry->msglen,
1826 		__entry->hdrlen + __entry->msglen)
1827 );
1828 
1829 TRACE_EVENT(svcrdma_send_err,
1830 	TP_PROTO(
1831 		const struct svc_rqst *rqst,
1832 		int status
1833 	),
1834 
1835 	TP_ARGS(rqst, status),
1836 
1837 	TP_STRUCT__entry(
1838 		__field(int, status)
1839 		__field(u32, xid)
1840 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1841 	),
1842 
1843 	TP_fast_assign(
1844 		__entry->status = status;
1845 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1846 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1847 	),
1848 
1849 	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1850 		__entry->xid, __entry->status
1851 	)
1852 );
1853 
1854 TRACE_EVENT(svcrdma_post_send,
1855 	TP_PROTO(
1856 		const struct svc_rdma_send_ctxt *ctxt
1857 	),
1858 
1859 	TP_ARGS(ctxt),
1860 
1861 	TP_STRUCT__entry(
1862 		__field(u32, cq_id)
1863 		__field(int, completion_id)
1864 		__field(unsigned int, num_sge)
1865 		__field(u32, inv_rkey)
1866 	),
1867 
1868 	TP_fast_assign(
1869 		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1870 
1871 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1872 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1873 		__entry->num_sge = wr->num_sge;
1874 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1875 					wr->ex.invalidate_rkey : 0;
1876 	),
1877 
1878 	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1879 		__entry->cq_id, __entry->completion_id,
1880 		__entry->num_sge, __entry->inv_rkey
1881 	)
1882 );
1883 
1884 DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
1885 
1886 TRACE_EVENT(svcrdma_post_recv,
1887 	TP_PROTO(
1888 		const struct svc_rdma_recv_ctxt *ctxt
1889 	),
1890 
1891 	TP_ARGS(ctxt),
1892 
1893 	TP_STRUCT__entry(
1894 		__field(u32, cq_id)
1895 		__field(int, completion_id)
1896 	),
1897 
1898 	TP_fast_assign(
1899 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1900 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1901 	),
1902 
1903 	TP_printk("cq.id=%d cid=%d",
1904 		__entry->cq_id, __entry->completion_id
1905 	)
1906 );
1907 
1908 DEFINE_RECEIVE_COMPLETION_EVENT(svcrdma_wc_receive);
1909 
1910 TRACE_EVENT(svcrdma_rq_post_err,
1911 	TP_PROTO(
1912 		const struct svcxprt_rdma *rdma,
1913 		int status
1914 	),
1915 
1916 	TP_ARGS(rdma, status),
1917 
1918 	TP_STRUCT__entry(
1919 		__field(int, status)
1920 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1921 	),
1922 
1923 	TP_fast_assign(
1924 		__entry->status = status;
1925 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1926 	),
1927 
1928 	TP_printk("addr=%s status=%d",
1929 		__get_str(addr), __entry->status
1930 	)
1931 );
1932 
1933 DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
1934 	TP_PROTO(
1935 		const struct rpc_rdma_cid *cid,
1936 		int sqecount
1937 	),
1938 
1939 	TP_ARGS(cid, sqecount),
1940 
1941 	TP_STRUCT__entry(
1942 		__field(u32, cq_id)
1943 		__field(int, completion_id)
1944 		__field(int, sqecount)
1945 	),
1946 
1947 	TP_fast_assign(
1948 		__entry->cq_id = cid->ci_queue_id;
1949 		__entry->completion_id = cid->ci_completion_id;
1950 		__entry->sqecount = sqecount;
1951 	),
1952 
1953 	TP_printk("cq.id=%u cid=%d sqecount=%d",
1954 		__entry->cq_id, __entry->completion_id,
1955 		__entry->sqecount
1956 	)
1957 );
1958 
1959 #define DEFINE_POST_CHUNK_EVENT(name)					\
1960 		DEFINE_EVENT(svcrdma_post_chunk_class,			\
1961 				svcrdma_post_##name##_chunk,		\
1962 				TP_PROTO(				\
1963 					const struct rpc_rdma_cid *cid,	\
1964 					int sqecount			\
1965 				),					\
1966 				TP_ARGS(cid, sqecount))
1967 
1968 DEFINE_POST_CHUNK_EVENT(read);
1969 DEFINE_POST_CHUNK_EVENT(write);
1970 DEFINE_POST_CHUNK_EVENT(reply);
1971 
1972 DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
1973 DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
1974 
1975 TRACE_EVENT(svcrdma_qp_error,
1976 	TP_PROTO(
1977 		const struct ib_event *event,
1978 		const struct sockaddr *sap
1979 	),
1980 
1981 	TP_ARGS(event, sap),
1982 
1983 	TP_STRUCT__entry(
1984 		__field(unsigned int, event)
1985 		__string(device, event->device->name)
1986 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1987 	),
1988 
1989 	TP_fast_assign(
1990 		__entry->event = event->event;
1991 		__assign_str(device, event->device->name);
1992 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1993 			 "%pISpc", sap);
1994 	),
1995 
1996 	TP_printk("addr=%s dev=%s event=%s (%u)",
1997 		__entry->addr, __get_str(device),
1998 		rdma_show_ib_event(__entry->event), __entry->event
1999 	)
2000 );
2001 
2002 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
2003 	TP_PROTO(
2004 		const struct svcxprt_rdma *rdma
2005 	),
2006 
2007 	TP_ARGS(rdma),
2008 
2009 	TP_STRUCT__entry(
2010 		__field(int, avail)
2011 		__field(int, depth)
2012 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2013 	),
2014 
2015 	TP_fast_assign(
2016 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2017 		__entry->depth = rdma->sc_sq_depth;
2018 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
2019 	),
2020 
2021 	TP_printk("addr=%s sc_sq_avail=%d/%d",
2022 		__get_str(addr), __entry->avail, __entry->depth
2023 	)
2024 );
2025 
2026 #define DEFINE_SQ_EVENT(name)						\
2027 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
2028 				TP_PROTO(				\
2029 					const struct svcxprt_rdma *rdma \
2030 				),					\
2031 				TP_ARGS(rdma))
2032 
2033 DEFINE_SQ_EVENT(full);
2034 DEFINE_SQ_EVENT(retry);
2035 
2036 TRACE_EVENT(svcrdma_sq_post_err,
2037 	TP_PROTO(
2038 		const struct svcxprt_rdma *rdma,
2039 		int status
2040 	),
2041 
2042 	TP_ARGS(rdma, status),
2043 
2044 	TP_STRUCT__entry(
2045 		__field(int, avail)
2046 		__field(int, depth)
2047 		__field(int, status)
2048 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
2049 	),
2050 
2051 	TP_fast_assign(
2052 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
2053 		__entry->depth = rdma->sc_sq_depth;
2054 		__entry->status = status;
2055 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
2056 	),
2057 
2058 	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
2059 		__get_str(addr), __entry->avail, __entry->depth,
2060 		__entry->status
2061 	)
2062 );
2063 
2064 #endif /* _TRACE_RPCRDMA_H */
2065 
2066 #include <trace/define_trace.h>
2067