xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision c30f259a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/sunrpc/rpc_rdma_cid.h>
15 #include <linux/tracepoint.h>
16 #include <rdma/ib_cm.h>
17 #include <trace/events/rdma.h>
18 
19 /**
20  ** Event classes
21  **/
22 
23 DECLARE_EVENT_CLASS(rpcrdma_completion_class,
24 	TP_PROTO(
25 		const struct ib_wc *wc,
26 		const struct rpc_rdma_cid *cid
27 	),
28 
29 	TP_ARGS(wc, cid),
30 
31 	TP_STRUCT__entry(
32 		__field(u32, cq_id)
33 		__field(int, completion_id)
34 		__field(unsigned long, status)
35 		__field(unsigned int, vendor_err)
36 	),
37 
38 	TP_fast_assign(
39 		__entry->cq_id = cid->ci_queue_id;
40 		__entry->completion_id = cid->ci_completion_id;
41 		__entry->status = wc->status;
42 		if (wc->status)
43 			__entry->vendor_err = wc->vendor_err;
44 		else
45 			__entry->vendor_err = 0;
46 	),
47 
48 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
49 		__entry->cq_id, __entry->completion_id,
50 		rdma_show_wc_status(__entry->status),
51 		__entry->status, __entry->vendor_err
52 	)
53 );
54 
55 #define DEFINE_COMPLETION_EVENT(name)					\
56 		DEFINE_EVENT(rpcrdma_completion_class, name,		\
57 				TP_PROTO(				\
58 					const struct ib_wc *wc,		\
59 					const struct rpc_rdma_cid *cid	\
60 				),					\
61 				TP_ARGS(wc, cid))
62 
63 DECLARE_EVENT_CLASS(rpcrdma_receive_completion_class,
64 	TP_PROTO(
65 		const struct ib_wc *wc,
66 		const struct rpc_rdma_cid *cid
67 	),
68 
69 	TP_ARGS(wc, cid),
70 
71 	TP_STRUCT__entry(
72 		__field(u32, cq_id)
73 		__field(int, completion_id)
74 		__field(u32, received)
75 		__field(unsigned long, status)
76 		__field(unsigned int, vendor_err)
77 	),
78 
79 	TP_fast_assign(
80 		__entry->cq_id = cid->ci_queue_id;
81 		__entry->completion_id = cid->ci_completion_id;
82 		__entry->status = wc->status;
83 		if (wc->status) {
84 			__entry->received = 0;
85 			__entry->vendor_err = wc->vendor_err;
86 		} else {
87 			__entry->received = wc->byte_len;
88 			__entry->vendor_err = 0;
89 		}
90 	),
91 
92 	TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x) received=%u",
93 		__entry->cq_id, __entry->completion_id,
94 		rdma_show_wc_status(__entry->status),
95 		__entry->status, __entry->vendor_err,
96 		__entry->received
97 	)
98 );
99 
100 #define DEFINE_RECEIVE_COMPLETION_EVENT(name)				\
101 		DEFINE_EVENT(rpcrdma_receive_completion_class, name,	\
102 				TP_PROTO(				\
103 					const struct ib_wc *wc,		\
104 					const struct rpc_rdma_cid *cid	\
105 				),					\
106 				TP_ARGS(wc, cid))
107 
108 DECLARE_EVENT_CLASS(xprtrdma_reply_class,
109 	TP_PROTO(
110 		const struct rpcrdma_rep *rep
111 	),
112 
113 	TP_ARGS(rep),
114 
115 	TP_STRUCT__entry(
116 		__field(u32, xid)
117 		__field(u32, version)
118 		__field(u32, proc)
119 		__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
120 		__string(port, rpcrdma_portstr(rep->rr_rxprt))
121 	),
122 
123 	TP_fast_assign(
124 		__entry->xid = be32_to_cpu(rep->rr_xid);
125 		__entry->version = be32_to_cpu(rep->rr_vers);
126 		__entry->proc = be32_to_cpu(rep->rr_proc);
127 		__assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
128 		__assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
129 	),
130 
131 	TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
132 		__get_str(addr), __get_str(port),
133 		__entry->xid, __entry->version, __entry->proc
134 	)
135 );
136 
137 #define DEFINE_REPLY_EVENT(name)					\
138 		DEFINE_EVENT(xprtrdma_reply_class,			\
139 				xprtrdma_reply_##name##_err,		\
140 				TP_PROTO(				\
141 					const struct rpcrdma_rep *rep	\
142 				),					\
143 				TP_ARGS(rep))
144 
145 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
146 	TP_PROTO(
147 		const struct rpcrdma_xprt *r_xprt
148 	),
149 
150 	TP_ARGS(r_xprt),
151 
152 	TP_STRUCT__entry(
153 		__field(const void *, r_xprt)
154 		__string(addr, rpcrdma_addrstr(r_xprt))
155 		__string(port, rpcrdma_portstr(r_xprt))
156 	),
157 
158 	TP_fast_assign(
159 		__entry->r_xprt = r_xprt;
160 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
161 		__assign_str(port, rpcrdma_portstr(r_xprt));
162 	),
163 
164 	TP_printk("peer=[%s]:%s r_xprt=%p",
165 		__get_str(addr), __get_str(port), __entry->r_xprt
166 	)
167 );
168 
169 #define DEFINE_RXPRT_EVENT(name)					\
170 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
171 				TP_PROTO(				\
172 					const struct rpcrdma_xprt *r_xprt \
173 				),					\
174 				TP_ARGS(r_xprt))
175 
176 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
177 	TP_PROTO(
178 		const struct rpcrdma_xprt *r_xprt,
179 		int rc
180 	),
181 
182 	TP_ARGS(r_xprt, rc),
183 
184 	TP_STRUCT__entry(
185 		__field(const void *, r_xprt)
186 		__field(int, rc)
187 		__field(int, connect_status)
188 		__string(addr, rpcrdma_addrstr(r_xprt))
189 		__string(port, rpcrdma_portstr(r_xprt))
190 	),
191 
192 	TP_fast_assign(
193 		__entry->r_xprt = r_xprt;
194 		__entry->rc = rc;
195 		__entry->connect_status = r_xprt->rx_ep->re_connect_status;
196 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
197 		__assign_str(port, rpcrdma_portstr(r_xprt));
198 	),
199 
200 	TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d",
201 		__get_str(addr), __get_str(port), __entry->r_xprt,
202 		__entry->rc, __entry->connect_status
203 	)
204 );
205 
206 #define DEFINE_CONN_EVENT(name)						\
207 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
208 				TP_PROTO(				\
209 					const struct rpcrdma_xprt *r_xprt, \
210 					int rc				\
211 				),					\
212 				TP_ARGS(r_xprt, rc))
213 
214 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
215 	TP_PROTO(
216 		const struct rpc_task *task,
217 		unsigned int pos,
218 		struct rpcrdma_mr *mr,
219 		int nsegs
220 	),
221 
222 	TP_ARGS(task, pos, mr, nsegs),
223 
224 	TP_STRUCT__entry(
225 		__field(unsigned int, task_id)
226 		__field(unsigned int, client_id)
227 		__field(unsigned int, pos)
228 		__field(int, nents)
229 		__field(u32, handle)
230 		__field(u32, length)
231 		__field(u64, offset)
232 		__field(int, nsegs)
233 	),
234 
235 	TP_fast_assign(
236 		__entry->task_id = task->tk_pid;
237 		__entry->client_id = task->tk_client->cl_clid;
238 		__entry->pos = pos;
239 		__entry->nents = mr->mr_nents;
240 		__entry->handle = mr->mr_handle;
241 		__entry->length = mr->mr_length;
242 		__entry->offset = mr->mr_offset;
243 		__entry->nsegs = nsegs;
244 	),
245 
246 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
247 		__entry->task_id, __entry->client_id,
248 		__entry->pos, __entry->length,
249 		(unsigned long long)__entry->offset, __entry->handle,
250 		__entry->nents < __entry->nsegs ? "more" : "last"
251 	)
252 );
253 
254 #define DEFINE_RDCH_EVENT(name)						\
255 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
256 				TP_PROTO(				\
257 					const struct rpc_task *task,	\
258 					unsigned int pos,		\
259 					struct rpcrdma_mr *mr,		\
260 					int nsegs			\
261 				),					\
262 				TP_ARGS(task, pos, mr, nsegs))
263 
264 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
265 	TP_PROTO(
266 		const struct rpc_task *task,
267 		struct rpcrdma_mr *mr,
268 		int nsegs
269 	),
270 
271 	TP_ARGS(task, mr, nsegs),
272 
273 	TP_STRUCT__entry(
274 		__field(unsigned int, task_id)
275 		__field(unsigned int, client_id)
276 		__field(int, nents)
277 		__field(u32, handle)
278 		__field(u32, length)
279 		__field(u64, offset)
280 		__field(int, nsegs)
281 	),
282 
283 	TP_fast_assign(
284 		__entry->task_id = task->tk_pid;
285 		__entry->client_id = task->tk_client->cl_clid;
286 		__entry->nents = mr->mr_nents;
287 		__entry->handle = mr->mr_handle;
288 		__entry->length = mr->mr_length;
289 		__entry->offset = mr->mr_offset;
290 		__entry->nsegs = nsegs;
291 	),
292 
293 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
294 		__entry->task_id, __entry->client_id,
295 		__entry->length, (unsigned long long)__entry->offset,
296 		__entry->handle,
297 		__entry->nents < __entry->nsegs ? "more" : "last"
298 	)
299 );
300 
301 #define DEFINE_WRCH_EVENT(name)						\
302 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
303 				TP_PROTO(				\
304 					const struct rpc_task *task,	\
305 					struct rpcrdma_mr *mr,		\
306 					int nsegs			\
307 				),					\
308 				TP_ARGS(task, mr, nsegs))
309 
310 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
311 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
312 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
313 TRACE_DEFINE_ENUM(DMA_NONE);
314 
315 #define xprtrdma_show_direction(x)					\
316 		__print_symbolic(x,					\
317 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
318 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
319 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
320 				{ DMA_NONE, "NONE" })
321 
322 DECLARE_EVENT_CLASS(xprtrdma_mr_class,
323 	TP_PROTO(
324 		const struct rpcrdma_mr *mr
325 	),
326 
327 	TP_ARGS(mr),
328 
329 	TP_STRUCT__entry(
330 		__field(unsigned int, task_id)
331 		__field(unsigned int, client_id)
332 		__field(u32, mr_id)
333 		__field(int, nents)
334 		__field(u32, handle)
335 		__field(u32, length)
336 		__field(u64, offset)
337 		__field(u32, dir)
338 	),
339 
340 	TP_fast_assign(
341 		const struct rpcrdma_req *req = mr->mr_req;
342 		const struct rpc_task *task = req->rl_slot.rq_task;
343 
344 		__entry->task_id = task->tk_pid;
345 		__entry->client_id = task->tk_client->cl_clid;
346 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
347 		__entry->nents  = mr->mr_nents;
348 		__entry->handle = mr->mr_handle;
349 		__entry->length = mr->mr_length;
350 		__entry->offset = mr->mr_offset;
351 		__entry->dir    = mr->mr_dir;
352 	),
353 
354 	TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
355 		__entry->task_id, __entry->client_id,
356 		__entry->mr_id, __entry->nents, __entry->length,
357 		(unsigned long long)__entry->offset, __entry->handle,
358 		xprtrdma_show_direction(__entry->dir)
359 	)
360 );
361 
362 #define DEFINE_MR_EVENT(name)						\
363 		DEFINE_EVENT(xprtrdma_mr_class,				\
364 				xprtrdma_mr_##name,			\
365 				TP_PROTO(				\
366 					const struct rpcrdma_mr *mr	\
367 				),					\
368 				TP_ARGS(mr))
369 
370 DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
371 	TP_PROTO(
372 		const struct rpcrdma_mr *mr
373 	),
374 
375 	TP_ARGS(mr),
376 
377 	TP_STRUCT__entry(
378 		__field(u32, mr_id)
379 		__field(int, nents)
380 		__field(u32, handle)
381 		__field(u32, length)
382 		__field(u64, offset)
383 		__field(u32, dir)
384 	),
385 
386 	TP_fast_assign(
387 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
388 		__entry->nents  = mr->mr_nents;
389 		__entry->handle = mr->mr_handle;
390 		__entry->length = mr->mr_length;
391 		__entry->offset = mr->mr_offset;
392 		__entry->dir    = mr->mr_dir;
393 	),
394 
395 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
396 		__entry->mr_id, __entry->nents, __entry->length,
397 		(unsigned long long)__entry->offset, __entry->handle,
398 		xprtrdma_show_direction(__entry->dir)
399 	)
400 );
401 
402 #define DEFINE_ANON_MR_EVENT(name)					\
403 		DEFINE_EVENT(xprtrdma_anonymous_mr_class,		\
404 				xprtrdma_mr_##name,			\
405 				TP_PROTO(				\
406 					const struct rpcrdma_mr *mr	\
407 				),					\
408 				TP_ARGS(mr))
409 
410 DECLARE_EVENT_CLASS(xprtrdma_callback_class,
411 	TP_PROTO(
412 		const struct rpcrdma_xprt *r_xprt,
413 		const struct rpc_rqst *rqst
414 	),
415 
416 	TP_ARGS(r_xprt, rqst),
417 
418 	TP_STRUCT__entry(
419 		__field(u32, xid)
420 		__string(addr, rpcrdma_addrstr(r_xprt))
421 		__string(port, rpcrdma_portstr(r_xprt))
422 	),
423 
424 	TP_fast_assign(
425 		__entry->xid = be32_to_cpu(rqst->rq_xid);
426 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
427 		__assign_str(port, rpcrdma_portstr(r_xprt));
428 	),
429 
430 	TP_printk("peer=[%s]:%s xid=0x%08x",
431 		__get_str(addr), __get_str(port), __entry->xid
432 	)
433 );
434 
435 #define DEFINE_CALLBACK_EVENT(name)					\
436 		DEFINE_EVENT(xprtrdma_callback_class,			\
437 				xprtrdma_cb_##name,			\
438 				TP_PROTO(				\
439 					const struct rpcrdma_xprt *r_xprt, \
440 					const struct rpc_rqst *rqst	\
441 				),					\
442 				TP_ARGS(r_xprt, rqst))
443 
444 /**
445  ** Connection events
446  **/
447 
448 TRACE_EVENT(xprtrdma_inline_thresh,
449 	TP_PROTO(
450 		const struct rpcrdma_ep *ep
451 	),
452 
453 	TP_ARGS(ep),
454 
455 	TP_STRUCT__entry(
456 		__field(unsigned int, inline_send)
457 		__field(unsigned int, inline_recv)
458 		__field(unsigned int, max_send)
459 		__field(unsigned int, max_recv)
460 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
461 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
462 	),
463 
464 	TP_fast_assign(
465 		const struct rdma_cm_id *id = ep->re_id;
466 
467 		__entry->inline_send = ep->re_inline_send;
468 		__entry->inline_recv = ep->re_inline_recv;
469 		__entry->max_send = ep->re_max_inline_send;
470 		__entry->max_recv = ep->re_max_inline_recv;
471 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
472 		       sizeof(struct sockaddr_in6));
473 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
474 		       sizeof(struct sockaddr_in6));
475 	),
476 
477 	TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
478 		__entry->srcaddr, __entry->dstaddr,
479 		__entry->inline_send, __entry->inline_recv,
480 		__entry->max_send, __entry->max_recv
481 	)
482 );
483 
484 DEFINE_CONN_EVENT(connect);
485 DEFINE_CONN_EVENT(disconnect);
486 
487 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
488 
489 TRACE_EVENT(xprtrdma_op_connect,
490 	TP_PROTO(
491 		const struct rpcrdma_xprt *r_xprt,
492 		unsigned long delay
493 	),
494 
495 	TP_ARGS(r_xprt, delay),
496 
497 	TP_STRUCT__entry(
498 		__field(const void *, r_xprt)
499 		__field(unsigned long, delay)
500 		__string(addr, rpcrdma_addrstr(r_xprt))
501 		__string(port, rpcrdma_portstr(r_xprt))
502 	),
503 
504 	TP_fast_assign(
505 		__entry->r_xprt = r_xprt;
506 		__entry->delay = delay;
507 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
508 		__assign_str(port, rpcrdma_portstr(r_xprt));
509 	),
510 
511 	TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu",
512 		__get_str(addr), __get_str(port), __entry->r_xprt,
513 		__entry->delay
514 	)
515 );
516 
517 
518 TRACE_EVENT(xprtrdma_op_set_cto,
519 	TP_PROTO(
520 		const struct rpcrdma_xprt *r_xprt,
521 		unsigned long connect,
522 		unsigned long reconnect
523 	),
524 
525 	TP_ARGS(r_xprt, connect, reconnect),
526 
527 	TP_STRUCT__entry(
528 		__field(const void *, r_xprt)
529 		__field(unsigned long, connect)
530 		__field(unsigned long, reconnect)
531 		__string(addr, rpcrdma_addrstr(r_xprt))
532 		__string(port, rpcrdma_portstr(r_xprt))
533 	),
534 
535 	TP_fast_assign(
536 		__entry->r_xprt = r_xprt;
537 		__entry->connect = connect;
538 		__entry->reconnect = reconnect;
539 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
540 		__assign_str(port, rpcrdma_portstr(r_xprt));
541 	),
542 
543 	TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
544 		__get_str(addr), __get_str(port), __entry->r_xprt,
545 		__entry->connect / HZ, __entry->reconnect / HZ
546 	)
547 );
548 
549 TRACE_EVENT(xprtrdma_qp_event,
550 	TP_PROTO(
551 		const struct rpcrdma_ep *ep,
552 		const struct ib_event *event
553 	),
554 
555 	TP_ARGS(ep, event),
556 
557 	TP_STRUCT__entry(
558 		__field(unsigned long, event)
559 		__string(name, event->device->name)
560 		__array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
561 		__array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
562 	),
563 
564 	TP_fast_assign(
565 		const struct rdma_cm_id *id = ep->re_id;
566 
567 		__entry->event = event->event;
568 		__assign_str(name, event->device->name);
569 		memcpy(__entry->srcaddr, &id->route.addr.src_addr,
570 		       sizeof(struct sockaddr_in6));
571 		memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
572 		       sizeof(struct sockaddr_in6));
573 	),
574 
575 	TP_printk("%pISpc -> %pISpc device=%s %s (%lu)",
576 		__entry->srcaddr, __entry->dstaddr, __get_str(name),
577 		rdma_show_ib_event(__entry->event), __entry->event
578 	)
579 );
580 
581 /**
582  ** Call events
583  **/
584 
585 TRACE_EVENT(xprtrdma_createmrs,
586 	TP_PROTO(
587 		const struct rpcrdma_xprt *r_xprt,
588 		unsigned int count
589 	),
590 
591 	TP_ARGS(r_xprt, count),
592 
593 	TP_STRUCT__entry(
594 		__field(const void *, r_xprt)
595 		__string(addr, rpcrdma_addrstr(r_xprt))
596 		__string(port, rpcrdma_portstr(r_xprt))
597 		__field(unsigned int, count)
598 	),
599 
600 	TP_fast_assign(
601 		__entry->r_xprt = r_xprt;
602 		__entry->count = count;
603 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
604 		__assign_str(port, rpcrdma_portstr(r_xprt));
605 	),
606 
607 	TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
608 		__get_str(addr), __get_str(port), __entry->r_xprt,
609 		__entry->count
610 	)
611 );
612 
613 TRACE_EVENT(xprtrdma_nomrs_err,
614 	TP_PROTO(
615 		const struct rpcrdma_xprt *r_xprt,
616 		const struct rpcrdma_req *req
617 	),
618 
619 	TP_ARGS(r_xprt, req),
620 
621 	TP_STRUCT__entry(
622 		__field(unsigned int, task_id)
623 		__field(unsigned int, client_id)
624 		__string(addr, rpcrdma_addrstr(r_xprt))
625 		__string(port, rpcrdma_portstr(r_xprt))
626 	),
627 
628 	TP_fast_assign(
629 		const struct rpc_rqst *rqst = &req->rl_slot;
630 
631 		__entry->task_id = rqst->rq_task->tk_pid;
632 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
633 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
634 		__assign_str(port, rpcrdma_portstr(r_xprt));
635 	),
636 
637 	TP_printk("peer=[%s]:%s task:%u@%u",
638 		__get_str(addr), __get_str(port),
639 		__entry->task_id, __entry->client_id
640 	)
641 );
642 
643 DEFINE_RDCH_EVENT(read);
644 DEFINE_WRCH_EVENT(write);
645 DEFINE_WRCH_EVENT(reply);
646 
647 TRACE_DEFINE_ENUM(rpcrdma_noch);
648 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
649 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
650 TRACE_DEFINE_ENUM(rpcrdma_readch);
651 TRACE_DEFINE_ENUM(rpcrdma_areadch);
652 TRACE_DEFINE_ENUM(rpcrdma_writech);
653 TRACE_DEFINE_ENUM(rpcrdma_replych);
654 
655 #define xprtrdma_show_chunktype(x)					\
656 		__print_symbolic(x,					\
657 				{ rpcrdma_noch, "inline" },		\
658 				{ rpcrdma_noch_pullup, "pullup" },	\
659 				{ rpcrdma_noch_mapped, "mapped" },	\
660 				{ rpcrdma_readch, "read list" },	\
661 				{ rpcrdma_areadch, "*read list" },	\
662 				{ rpcrdma_writech, "write list" },	\
663 				{ rpcrdma_replych, "reply chunk" })
664 
665 TRACE_EVENT(xprtrdma_marshal,
666 	TP_PROTO(
667 		const struct rpcrdma_req *req,
668 		unsigned int rtype,
669 		unsigned int wtype
670 	),
671 
672 	TP_ARGS(req, rtype, wtype),
673 
674 	TP_STRUCT__entry(
675 		__field(unsigned int, task_id)
676 		__field(unsigned int, client_id)
677 		__field(u32, xid)
678 		__field(unsigned int, hdrlen)
679 		__field(unsigned int, headlen)
680 		__field(unsigned int, pagelen)
681 		__field(unsigned int, taillen)
682 		__field(unsigned int, rtype)
683 		__field(unsigned int, wtype)
684 	),
685 
686 	TP_fast_assign(
687 		const struct rpc_rqst *rqst = &req->rl_slot;
688 
689 		__entry->task_id = rqst->rq_task->tk_pid;
690 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
691 		__entry->xid = be32_to_cpu(rqst->rq_xid);
692 		__entry->hdrlen = req->rl_hdrbuf.len;
693 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
694 		__entry->pagelen = rqst->rq_snd_buf.page_len;
695 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
696 		__entry->rtype = rtype;
697 		__entry->wtype = wtype;
698 	),
699 
700 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
701 		__entry->task_id, __entry->client_id, __entry->xid,
702 		__entry->hdrlen,
703 		__entry->headlen, __entry->pagelen, __entry->taillen,
704 		xprtrdma_show_chunktype(__entry->rtype),
705 		xprtrdma_show_chunktype(__entry->wtype)
706 	)
707 );
708 
709 TRACE_EVENT(xprtrdma_marshal_failed,
710 	TP_PROTO(const struct rpc_rqst *rqst,
711 		 int ret
712 	),
713 
714 	TP_ARGS(rqst, ret),
715 
716 	TP_STRUCT__entry(
717 		__field(unsigned int, task_id)
718 		__field(unsigned int, client_id)
719 		__field(u32, xid)
720 		__field(int, ret)
721 	),
722 
723 	TP_fast_assign(
724 		__entry->task_id = rqst->rq_task->tk_pid;
725 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
726 		__entry->xid = be32_to_cpu(rqst->rq_xid);
727 		__entry->ret = ret;
728 	),
729 
730 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
731 		__entry->task_id, __entry->client_id, __entry->xid,
732 		__entry->ret
733 	)
734 );
735 
736 TRACE_EVENT(xprtrdma_prepsend_failed,
737 	TP_PROTO(const struct rpc_rqst *rqst,
738 		 int ret
739 	),
740 
741 	TP_ARGS(rqst, ret),
742 
743 	TP_STRUCT__entry(
744 		__field(unsigned int, task_id)
745 		__field(unsigned int, client_id)
746 		__field(u32, xid)
747 		__field(int, ret)
748 	),
749 
750 	TP_fast_assign(
751 		__entry->task_id = rqst->rq_task->tk_pid;
752 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
753 		__entry->xid = be32_to_cpu(rqst->rq_xid);
754 		__entry->ret = ret;
755 	),
756 
757 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
758 		__entry->task_id, __entry->client_id, __entry->xid,
759 		__entry->ret
760 	)
761 );
762 
763 TRACE_EVENT(xprtrdma_post_send,
764 	TP_PROTO(
765 		const struct rpcrdma_req *req
766 	),
767 
768 	TP_ARGS(req),
769 
770 	TP_STRUCT__entry(
771 		__field(u32, cq_id)
772 		__field(int, completion_id)
773 		__field(unsigned int, task_id)
774 		__field(unsigned int, client_id)
775 		__field(int, num_sge)
776 		__field(int, signaled)
777 	),
778 
779 	TP_fast_assign(
780 		const struct rpc_rqst *rqst = &req->rl_slot;
781 		const struct rpcrdma_sendctx *sc = req->rl_sendctx;
782 
783 		__entry->cq_id = sc->sc_cid.ci_queue_id;
784 		__entry->completion_id = sc->sc_cid.ci_completion_id;
785 		__entry->task_id = rqst->rq_task->tk_pid;
786 		__entry->client_id = rqst->rq_task->tk_client ?
787 				     rqst->rq_task->tk_client->cl_clid : -1;
788 		__entry->num_sge = req->rl_wr.num_sge;
789 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
790 	),
791 
792 	TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
793 		__entry->task_id, __entry->client_id,
794 		__entry->cq_id, __entry->completion_id,
795 		__entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
796 		(__entry->signaled ? "signaled" : "")
797 	)
798 );
799 
800 TRACE_EVENT(xprtrdma_post_recv,
801 	TP_PROTO(
802 		const struct rpcrdma_rep *rep
803 	),
804 
805 	TP_ARGS(rep),
806 
807 	TP_STRUCT__entry(
808 		__field(u32, cq_id)
809 		__field(int, completion_id)
810 	),
811 
812 	TP_fast_assign(
813 		__entry->cq_id = rep->rr_cid.ci_queue_id;
814 		__entry->completion_id = rep->rr_cid.ci_completion_id;
815 	),
816 
817 	TP_printk("cq.id=%d cid=%d",
818 		__entry->cq_id, __entry->completion_id
819 	)
820 );
821 
822 TRACE_EVENT(xprtrdma_post_recvs,
823 	TP_PROTO(
824 		const struct rpcrdma_xprt *r_xprt,
825 		unsigned int count,
826 		int status
827 	),
828 
829 	TP_ARGS(r_xprt, count, status),
830 
831 	TP_STRUCT__entry(
832 		__field(const void *, r_xprt)
833 		__field(unsigned int, count)
834 		__field(int, status)
835 		__field(int, posted)
836 		__string(addr, rpcrdma_addrstr(r_xprt))
837 		__string(port, rpcrdma_portstr(r_xprt))
838 	),
839 
840 	TP_fast_assign(
841 		__entry->r_xprt = r_xprt;
842 		__entry->count = count;
843 		__entry->status = status;
844 		__entry->posted = r_xprt->rx_ep->re_receive_count;
845 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
846 		__assign_str(port, rpcrdma_portstr(r_xprt));
847 	),
848 
849 	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
850 		__get_str(addr), __get_str(port), __entry->r_xprt,
851 		__entry->count, __entry->posted, __entry->status
852 	)
853 );
854 
855 TRACE_EVENT(xprtrdma_post_linv_err,
856 	TP_PROTO(
857 		const struct rpcrdma_req *req,
858 		int status
859 	),
860 
861 	TP_ARGS(req, status),
862 
863 	TP_STRUCT__entry(
864 		__field(unsigned int, task_id)
865 		__field(unsigned int, client_id)
866 		__field(int, status)
867 	),
868 
869 	TP_fast_assign(
870 		const struct rpc_task *task = req->rl_slot.rq_task;
871 
872 		__entry->task_id = task->tk_pid;
873 		__entry->client_id = task->tk_client->cl_clid;
874 		__entry->status = status;
875 	),
876 
877 	TP_printk("task:%u@%u status=%d",
878 		__entry->task_id, __entry->client_id, __entry->status
879 	)
880 );
881 
882 /**
883  ** Completion events
884  **/
885 
886 DEFINE_RECEIVE_COMPLETION_EVENT(xprtrdma_wc_receive);
887 
888 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
889 DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg);
890 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li);
891 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_wake);
892 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_done);
893 
894 TRACE_EVENT(xprtrdma_frwr_alloc,
895 	TP_PROTO(
896 		const struct rpcrdma_mr *mr,
897 		int rc
898 	),
899 
900 	TP_ARGS(mr, rc),
901 
902 	TP_STRUCT__entry(
903 		__field(u32, mr_id)
904 		__field(int, rc)
905 	),
906 
907 	TP_fast_assign(
908 		__entry->mr_id = mr->frwr.fr_mr->res.id;
909 		__entry->rc = rc;
910 	),
911 
912 	TP_printk("mr.id=%u: rc=%d",
913 		__entry->mr_id, __entry->rc
914 	)
915 );
916 
917 TRACE_EVENT(xprtrdma_frwr_dereg,
918 	TP_PROTO(
919 		const struct rpcrdma_mr *mr,
920 		int rc
921 	),
922 
923 	TP_ARGS(mr, rc),
924 
925 	TP_STRUCT__entry(
926 		__field(u32, mr_id)
927 		__field(int, nents)
928 		__field(u32, handle)
929 		__field(u32, length)
930 		__field(u64, offset)
931 		__field(u32, dir)
932 		__field(int, rc)
933 	),
934 
935 	TP_fast_assign(
936 		__entry->mr_id  = mr->frwr.fr_mr->res.id;
937 		__entry->nents  = mr->mr_nents;
938 		__entry->handle = mr->mr_handle;
939 		__entry->length = mr->mr_length;
940 		__entry->offset = mr->mr_offset;
941 		__entry->dir    = mr->mr_dir;
942 		__entry->rc	= rc;
943 	),
944 
945 	TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
946 		__entry->mr_id, __entry->nents, __entry->length,
947 		(unsigned long long)__entry->offset, __entry->handle,
948 		xprtrdma_show_direction(__entry->dir),
949 		__entry->rc
950 	)
951 );
952 
953 TRACE_EVENT(xprtrdma_frwr_sgerr,
954 	TP_PROTO(
955 		const struct rpcrdma_mr *mr,
956 		int sg_nents
957 	),
958 
959 	TP_ARGS(mr, sg_nents),
960 
961 	TP_STRUCT__entry(
962 		__field(u32, mr_id)
963 		__field(u64, addr)
964 		__field(u32, dir)
965 		__field(int, nents)
966 	),
967 
968 	TP_fast_assign(
969 		__entry->mr_id = mr->frwr.fr_mr->res.id;
970 		__entry->addr = mr->mr_sg->dma_address;
971 		__entry->dir = mr->mr_dir;
972 		__entry->nents = sg_nents;
973 	),
974 
975 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
976 		__entry->mr_id, __entry->addr,
977 		xprtrdma_show_direction(__entry->dir),
978 		__entry->nents
979 	)
980 );
981 
982 TRACE_EVENT(xprtrdma_frwr_maperr,
983 	TP_PROTO(
984 		const struct rpcrdma_mr *mr,
985 		int num_mapped
986 	),
987 
988 	TP_ARGS(mr, num_mapped),
989 
990 	TP_STRUCT__entry(
991 		__field(u32, mr_id)
992 		__field(u64, addr)
993 		__field(u32, dir)
994 		__field(int, num_mapped)
995 		__field(int, nents)
996 	),
997 
998 	TP_fast_assign(
999 		__entry->mr_id = mr->frwr.fr_mr->res.id;
1000 		__entry->addr = mr->mr_sg->dma_address;
1001 		__entry->dir = mr->mr_dir;
1002 		__entry->num_mapped = num_mapped;
1003 		__entry->nents = mr->mr_nents;
1004 	),
1005 
1006 	TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
1007 		__entry->mr_id, __entry->addr,
1008 		xprtrdma_show_direction(__entry->dir),
1009 		__entry->num_mapped, __entry->nents
1010 	)
1011 );
1012 
1013 DEFINE_MR_EVENT(localinv);
1014 DEFINE_MR_EVENT(map);
1015 
1016 DEFINE_ANON_MR_EVENT(unmap);
1017 DEFINE_ANON_MR_EVENT(recycle);
1018 
1019 TRACE_EVENT(xprtrdma_dma_maperr,
1020 	TP_PROTO(
1021 		u64 addr
1022 	),
1023 
1024 	TP_ARGS(addr),
1025 
1026 	TP_STRUCT__entry(
1027 		__field(u64, addr)
1028 	),
1029 
1030 	TP_fast_assign(
1031 		__entry->addr = addr;
1032 	),
1033 
1034 	TP_printk("dma addr=0x%llx\n", __entry->addr)
1035 );
1036 
1037 /**
1038  ** Reply events
1039  **/
1040 
1041 TRACE_EVENT(xprtrdma_reply,
1042 	TP_PROTO(
1043 		const struct rpc_task *task,
1044 		const struct rpcrdma_rep *rep,
1045 		unsigned int credits
1046 	),
1047 
1048 	TP_ARGS(task, rep, credits),
1049 
1050 	TP_STRUCT__entry(
1051 		__field(unsigned int, task_id)
1052 		__field(unsigned int, client_id)
1053 		__field(u32, xid)
1054 		__field(unsigned int, credits)
1055 	),
1056 
1057 	TP_fast_assign(
1058 		__entry->task_id = task->tk_pid;
1059 		__entry->client_id = task->tk_client->cl_clid;
1060 		__entry->xid = be32_to_cpu(rep->rr_xid);
1061 		__entry->credits = credits;
1062 	),
1063 
1064 	TP_printk("task:%u@%u xid=0x%08x credits=%u",
1065 		__entry->task_id, __entry->client_id, __entry->xid,
1066 		__entry->credits
1067 	)
1068 );
1069 
1070 DEFINE_REPLY_EVENT(vers);
1071 DEFINE_REPLY_EVENT(rqst);
1072 DEFINE_REPLY_EVENT(short);
1073 DEFINE_REPLY_EVENT(hdr);
1074 
1075 TRACE_EVENT(xprtrdma_err_vers,
1076 	TP_PROTO(
1077 		const struct rpc_rqst *rqst,
1078 		__be32 *min,
1079 		__be32 *max
1080 	),
1081 
1082 	TP_ARGS(rqst, min, max),
1083 
1084 	TP_STRUCT__entry(
1085 		__field(unsigned int, task_id)
1086 		__field(unsigned int, client_id)
1087 		__field(u32, xid)
1088 		__field(u32, min)
1089 		__field(u32, max)
1090 	),
1091 
1092 	TP_fast_assign(
1093 		__entry->task_id = rqst->rq_task->tk_pid;
1094 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1095 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1096 		__entry->min = be32_to_cpup(min);
1097 		__entry->max = be32_to_cpup(max);
1098 	),
1099 
1100 	TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
1101 		__entry->task_id, __entry->client_id, __entry->xid,
1102 		__entry->min, __entry->max
1103 	)
1104 );
1105 
1106 TRACE_EVENT(xprtrdma_err_chunk,
1107 	TP_PROTO(
1108 		const struct rpc_rqst *rqst
1109 	),
1110 
1111 	TP_ARGS(rqst),
1112 
1113 	TP_STRUCT__entry(
1114 		__field(unsigned int, task_id)
1115 		__field(unsigned int, client_id)
1116 		__field(u32, xid)
1117 	),
1118 
1119 	TP_fast_assign(
1120 		__entry->task_id = rqst->rq_task->tk_pid;
1121 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1122 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1123 	),
1124 
1125 	TP_printk("task:%u@%u xid=0x%08x",
1126 		__entry->task_id, __entry->client_id, __entry->xid
1127 	)
1128 );
1129 
1130 TRACE_EVENT(xprtrdma_err_unrecognized,
1131 	TP_PROTO(
1132 		const struct rpc_rqst *rqst,
1133 		__be32 *procedure
1134 	),
1135 
1136 	TP_ARGS(rqst, procedure),
1137 
1138 	TP_STRUCT__entry(
1139 		__field(unsigned int, task_id)
1140 		__field(unsigned int, client_id)
1141 		__field(u32, xid)
1142 		__field(u32, procedure)
1143 	),
1144 
1145 	TP_fast_assign(
1146 		__entry->task_id = rqst->rq_task->tk_pid;
1147 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1148 		__entry->procedure = be32_to_cpup(procedure);
1149 	),
1150 
1151 	TP_printk("task:%u@%u xid=0x%08x procedure=%u",
1152 		__entry->task_id, __entry->client_id, __entry->xid,
1153 		__entry->procedure
1154 	)
1155 );
1156 
1157 TRACE_EVENT(xprtrdma_fixup,
1158 	TP_PROTO(
1159 		const struct rpc_rqst *rqst,
1160 		unsigned long fixup
1161 	),
1162 
1163 	TP_ARGS(rqst, fixup),
1164 
1165 	TP_STRUCT__entry(
1166 		__field(unsigned int, task_id)
1167 		__field(unsigned int, client_id)
1168 		__field(unsigned long, fixup)
1169 		__field(size_t, headlen)
1170 		__field(unsigned int, pagelen)
1171 		__field(size_t, taillen)
1172 	),
1173 
1174 	TP_fast_assign(
1175 		__entry->task_id = rqst->rq_task->tk_pid;
1176 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1177 		__entry->fixup = fixup;
1178 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1179 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1180 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1181 	),
1182 
1183 	TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1184 		__entry->task_id, __entry->client_id, __entry->fixup,
1185 		__entry->headlen, __entry->pagelen, __entry->taillen
1186 	)
1187 );
1188 
1189 TRACE_EVENT(xprtrdma_decode_seg,
1190 	TP_PROTO(
1191 		u32 handle,
1192 		u32 length,
1193 		u64 offset
1194 	),
1195 
1196 	TP_ARGS(handle, length, offset),
1197 
1198 	TP_STRUCT__entry(
1199 		__field(u32, handle)
1200 		__field(u32, length)
1201 		__field(u64, offset)
1202 	),
1203 
1204 	TP_fast_assign(
1205 		__entry->handle = handle;
1206 		__entry->length = length;
1207 		__entry->offset = offset;
1208 	),
1209 
1210 	TP_printk("%u@0x%016llx:0x%08x",
1211 		__entry->length, (unsigned long long)__entry->offset,
1212 		__entry->handle
1213 	)
1214 );
1215 
1216 TRACE_EVENT(xprtrdma_mrs_zap,
1217 	TP_PROTO(
1218 		const struct rpc_task *task
1219 	),
1220 
1221 	TP_ARGS(task),
1222 
1223 	TP_STRUCT__entry(
1224 		__field(unsigned int, task_id)
1225 		__field(unsigned int, client_id)
1226 	),
1227 
1228 	TP_fast_assign(
1229 		__entry->task_id = task->tk_pid;
1230 		__entry->client_id = task->tk_client->cl_clid;
1231 	),
1232 
1233 	TP_printk("task:%u@%u",
1234 		__entry->task_id, __entry->client_id
1235 	)
1236 );
1237 
1238 /**
1239  ** Callback events
1240  **/
1241 
1242 TRACE_EVENT(xprtrdma_cb_setup,
1243 	TP_PROTO(
1244 		const struct rpcrdma_xprt *r_xprt,
1245 		unsigned int reqs
1246 	),
1247 
1248 	TP_ARGS(r_xprt, reqs),
1249 
1250 	TP_STRUCT__entry(
1251 		__field(const void *, r_xprt)
1252 		__field(unsigned int, reqs)
1253 		__string(addr, rpcrdma_addrstr(r_xprt))
1254 		__string(port, rpcrdma_portstr(r_xprt))
1255 	),
1256 
1257 	TP_fast_assign(
1258 		__entry->r_xprt = r_xprt;
1259 		__entry->reqs = reqs;
1260 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1261 		__assign_str(port, rpcrdma_portstr(r_xprt));
1262 	),
1263 
1264 	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1265 		__get_str(addr), __get_str(port),
1266 		__entry->r_xprt, __entry->reqs
1267 	)
1268 );
1269 
1270 DEFINE_CALLBACK_EVENT(call);
1271 DEFINE_CALLBACK_EVENT(reply);
1272 
1273 /**
1274  ** Server-side RPC/RDMA events
1275  **/
1276 
1277 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1278 	TP_PROTO(
1279 		const struct svcxprt_rdma *rdma,
1280 		long status
1281 	),
1282 
1283 	TP_ARGS(rdma, status),
1284 
1285 	TP_STRUCT__entry(
1286 		__field(long, status)
1287 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1288 	),
1289 
1290 	TP_fast_assign(
1291 		__entry->status = status;
1292 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1293 	),
1294 
1295 	TP_printk("addr=%s status=%ld",
1296 		__get_str(addr), __entry->status
1297 	)
1298 );
1299 
1300 #define DEFINE_ACCEPT_EVENT(name) \
1301 		DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1302 				TP_PROTO( \
1303 					const struct svcxprt_rdma *rdma, \
1304 					long status \
1305 				), \
1306 				TP_ARGS(rdma, status))
1307 
1308 DEFINE_ACCEPT_EVENT(pd);
1309 DEFINE_ACCEPT_EVENT(qp);
1310 DEFINE_ACCEPT_EVENT(fabric);
1311 DEFINE_ACCEPT_EVENT(initdepth);
1312 DEFINE_ACCEPT_EVENT(accept);
1313 
1314 TRACE_DEFINE_ENUM(RDMA_MSG);
1315 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1316 TRACE_DEFINE_ENUM(RDMA_MSGP);
1317 TRACE_DEFINE_ENUM(RDMA_DONE);
1318 TRACE_DEFINE_ENUM(RDMA_ERROR);
1319 
1320 #define show_rpcrdma_proc(x)						\
1321 		__print_symbolic(x,					\
1322 				{ RDMA_MSG, "RDMA_MSG" },		\
1323 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1324 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1325 				{ RDMA_DONE, "RDMA_DONE" },		\
1326 				{ RDMA_ERROR, "RDMA_ERROR" })
1327 
1328 TRACE_EVENT(svcrdma_decode_rqst,
1329 	TP_PROTO(
1330 		const struct svc_rdma_recv_ctxt *ctxt,
1331 		__be32 *p,
1332 		unsigned int hdrlen
1333 	),
1334 
1335 	TP_ARGS(ctxt, p, hdrlen),
1336 
1337 	TP_STRUCT__entry(
1338 		__field(u32, cq_id)
1339 		__field(int, completion_id)
1340 		__field(u32, xid)
1341 		__field(u32, vers)
1342 		__field(u32, proc)
1343 		__field(u32, credits)
1344 		__field(unsigned int, hdrlen)
1345 	),
1346 
1347 	TP_fast_assign(
1348 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1349 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1350 		__entry->xid = be32_to_cpup(p++);
1351 		__entry->vers = be32_to_cpup(p++);
1352 		__entry->credits = be32_to_cpup(p++);
1353 		__entry->proc = be32_to_cpup(p);
1354 		__entry->hdrlen = hdrlen;
1355 	),
1356 
1357 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1358 		__entry->cq_id, __entry->completion_id,
1359 		__entry->xid, __entry->vers, __entry->credits,
1360 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1361 );
1362 
1363 TRACE_EVENT(svcrdma_decode_short_err,
1364 	TP_PROTO(
1365 		const struct svc_rdma_recv_ctxt *ctxt,
1366 		unsigned int hdrlen
1367 	),
1368 
1369 	TP_ARGS(ctxt, hdrlen),
1370 
1371 	TP_STRUCT__entry(
1372 		__field(u32, cq_id)
1373 		__field(int, completion_id)
1374 		__field(unsigned int, hdrlen)
1375 	),
1376 
1377 	TP_fast_assign(
1378 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1379 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1380 		__entry->hdrlen = hdrlen;
1381 	),
1382 
1383 	TP_printk("cq.id=%u cid=%d hdrlen=%u",
1384 		__entry->cq_id, __entry->completion_id,
1385 		__entry->hdrlen)
1386 );
1387 
1388 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1389 	TP_PROTO(
1390 		const struct svc_rdma_recv_ctxt *ctxt,
1391 		__be32 *p
1392 	),
1393 
1394 	TP_ARGS(ctxt, p),
1395 
1396 	TP_STRUCT__entry(
1397 		__field(u32, cq_id)
1398 		__field(int, completion_id)
1399 		__field(u32, xid)
1400 		__field(u32, vers)
1401 		__field(u32, proc)
1402 		__field(u32, credits)
1403 	),
1404 
1405 	TP_fast_assign(
1406 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1407 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1408 		__entry->xid = be32_to_cpup(p++);
1409 		__entry->vers = be32_to_cpup(p++);
1410 		__entry->credits = be32_to_cpup(p++);
1411 		__entry->proc = be32_to_cpup(p);
1412 	),
1413 
1414 	TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1415 		__entry->cq_id, __entry->completion_id,
1416 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1417 );
1418 
1419 #define DEFINE_BADREQ_EVENT(name)					\
1420 		DEFINE_EVENT(svcrdma_badreq_event,			\
1421 			     svcrdma_decode_##name##_err,		\
1422 				TP_PROTO(				\
1423 					const struct svc_rdma_recv_ctxt *ctxt,	\
1424 					__be32 *p			\
1425 				),					\
1426 				TP_ARGS(ctxt, p))
1427 
1428 DEFINE_BADREQ_EVENT(badvers);
1429 DEFINE_BADREQ_EVENT(drop);
1430 DEFINE_BADREQ_EVENT(badproc);
1431 DEFINE_BADREQ_EVENT(parse);
1432 
1433 TRACE_EVENT(svcrdma_encode_wseg,
1434 	TP_PROTO(
1435 		const struct svc_rdma_send_ctxt *ctxt,
1436 		u32 segno,
1437 		u32 handle,
1438 		u32 length,
1439 		u64 offset
1440 	),
1441 
1442 	TP_ARGS(ctxt, segno, handle, length, offset),
1443 
1444 	TP_STRUCT__entry(
1445 		__field(u32, cq_id)
1446 		__field(int, completion_id)
1447 		__field(u32, segno)
1448 		__field(u32, handle)
1449 		__field(u32, length)
1450 		__field(u64, offset)
1451 	),
1452 
1453 	TP_fast_assign(
1454 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1455 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1456 		__entry->segno = segno;
1457 		__entry->handle = handle;
1458 		__entry->length = length;
1459 		__entry->offset = offset;
1460 	),
1461 
1462 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1463 		__entry->cq_id, __entry->completion_id,
1464 		__entry->segno, __entry->length,
1465 		(unsigned long long)__entry->offset, __entry->handle
1466 	)
1467 );
1468 
1469 TRACE_EVENT(svcrdma_decode_rseg,
1470 	TP_PROTO(
1471 		const struct rpc_rdma_cid *cid,
1472 		const struct svc_rdma_chunk *chunk,
1473 		const struct svc_rdma_segment *segment
1474 	),
1475 
1476 	TP_ARGS(cid, chunk, segment),
1477 
1478 	TP_STRUCT__entry(
1479 		__field(u32, cq_id)
1480 		__field(int, completion_id)
1481 		__field(u32, segno)
1482 		__field(u32, position)
1483 		__field(u32, handle)
1484 		__field(u32, length)
1485 		__field(u64, offset)
1486 	),
1487 
1488 	TP_fast_assign(
1489 		__entry->cq_id = cid->ci_queue_id;
1490 		__entry->completion_id = cid->ci_completion_id;
1491 		__entry->segno = chunk->ch_segcount;
1492 		__entry->position = chunk->ch_position;
1493 		__entry->handle = segment->rs_handle;
1494 		__entry->length = segment->rs_length;
1495 		__entry->offset = segment->rs_offset;
1496 	),
1497 
1498 	TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
1499 		__entry->cq_id, __entry->completion_id,
1500 		__entry->segno, __entry->position, __entry->length,
1501 		(unsigned long long)__entry->offset, __entry->handle
1502 	)
1503 );
1504 
1505 TRACE_EVENT(svcrdma_decode_wseg,
1506 	TP_PROTO(
1507 		const struct rpc_rdma_cid *cid,
1508 		const struct svc_rdma_chunk *chunk,
1509 		u32 segno
1510 	),
1511 
1512 	TP_ARGS(cid, chunk, segno),
1513 
1514 	TP_STRUCT__entry(
1515 		__field(u32, cq_id)
1516 		__field(int, completion_id)
1517 		__field(u32, segno)
1518 		__field(u32, handle)
1519 		__field(u32, length)
1520 		__field(u64, offset)
1521 	),
1522 
1523 	TP_fast_assign(
1524 		const struct svc_rdma_segment *segment =
1525 			&chunk->ch_segments[segno];
1526 
1527 		__entry->cq_id = cid->ci_queue_id;
1528 		__entry->completion_id = cid->ci_completion_id;
1529 		__entry->segno = segno;
1530 		__entry->handle = segment->rs_handle;
1531 		__entry->length = segment->rs_length;
1532 		__entry->offset = segment->rs_offset;
1533 	),
1534 
1535 	TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1536 		__entry->cq_id, __entry->completion_id,
1537 		__entry->segno, __entry->length,
1538 		(unsigned long long)__entry->offset, __entry->handle
1539 	)
1540 );
1541 
1542 DECLARE_EVENT_CLASS(svcrdma_error_event,
1543 	TP_PROTO(
1544 		__be32 xid
1545 	),
1546 
1547 	TP_ARGS(xid),
1548 
1549 	TP_STRUCT__entry(
1550 		__field(u32, xid)
1551 	),
1552 
1553 	TP_fast_assign(
1554 		__entry->xid = be32_to_cpu(xid);
1555 	),
1556 
1557 	TP_printk("xid=0x%08x",
1558 		__entry->xid
1559 	)
1560 );
1561 
1562 #define DEFINE_ERROR_EVENT(name)					\
1563 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1564 				TP_PROTO(				\
1565 					__be32 xid			\
1566 				),					\
1567 				TP_ARGS(xid))
1568 
1569 DEFINE_ERROR_EVENT(vers);
1570 DEFINE_ERROR_EVENT(chunk);
1571 
1572 /**
1573  ** Server-side RDMA API events
1574  **/
1575 
1576 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1577 	TP_PROTO(
1578 		const struct svcxprt_rdma *rdma,
1579 		u64 dma_addr,
1580 		u32 length
1581 	),
1582 
1583 	TP_ARGS(rdma, dma_addr, length),
1584 
1585 	TP_STRUCT__entry(
1586 		__field(u64, dma_addr)
1587 		__field(u32, length)
1588 		__string(device, rdma->sc_cm_id->device->name)
1589 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1590 	),
1591 
1592 	TP_fast_assign(
1593 		__entry->dma_addr = dma_addr;
1594 		__entry->length = length;
1595 		__assign_str(device, rdma->sc_cm_id->device->name);
1596 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1597 	),
1598 
1599 	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1600 		__get_str(addr), __get_str(device),
1601 		__entry->dma_addr, __entry->length
1602 	)
1603 );
1604 
1605 #define DEFINE_SVC_DMA_EVENT(name)					\
1606 		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
1607 				TP_PROTO(				\
1608 					const struct svcxprt_rdma *rdma,\
1609 					u64 dma_addr,			\
1610 					u32 length			\
1611 				),					\
1612 				TP_ARGS(rdma, dma_addr, length))
1613 
1614 DEFINE_SVC_DMA_EVENT(dma_map_page);
1615 DEFINE_SVC_DMA_EVENT(dma_map_err);
1616 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1617 
1618 TRACE_EVENT(svcrdma_dma_map_rw_err,
1619 	TP_PROTO(
1620 		const struct svcxprt_rdma *rdma,
1621 		unsigned int nents,
1622 		int status
1623 	),
1624 
1625 	TP_ARGS(rdma, nents, status),
1626 
1627 	TP_STRUCT__entry(
1628 		__field(int, status)
1629 		__field(unsigned int, nents)
1630 		__string(device, rdma->sc_cm_id->device->name)
1631 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1632 	),
1633 
1634 	TP_fast_assign(
1635 		__entry->status = status;
1636 		__entry->nents = nents;
1637 		__assign_str(device, rdma->sc_cm_id->device->name);
1638 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1639 	),
1640 
1641 	TP_printk("addr=%s device=%s nents=%u status=%d",
1642 		__get_str(addr), __get_str(device), __entry->nents,
1643 		__entry->status
1644 	)
1645 );
1646 
1647 TRACE_EVENT(svcrdma_no_rwctx_err,
1648 	TP_PROTO(
1649 		const struct svcxprt_rdma *rdma,
1650 		unsigned int num_sges
1651 	),
1652 
1653 	TP_ARGS(rdma, num_sges),
1654 
1655 	TP_STRUCT__entry(
1656 		__field(unsigned int, num_sges)
1657 		__string(device, rdma->sc_cm_id->device->name)
1658 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1659 	),
1660 
1661 	TP_fast_assign(
1662 		__entry->num_sges = num_sges;
1663 		__assign_str(device, rdma->sc_cm_id->device->name);
1664 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1665 	),
1666 
1667 	TP_printk("addr=%s device=%s num_sges=%d",
1668 		__get_str(addr), __get_str(device), __entry->num_sges
1669 	)
1670 );
1671 
1672 TRACE_EVENT(svcrdma_page_overrun_err,
1673 	TP_PROTO(
1674 		const struct svcxprt_rdma *rdma,
1675 		const struct svc_rqst *rqst,
1676 		unsigned int pageno
1677 	),
1678 
1679 	TP_ARGS(rdma, rqst, pageno),
1680 
1681 	TP_STRUCT__entry(
1682 		__field(unsigned int, pageno)
1683 		__field(u32, xid)
1684 		__string(device, rdma->sc_cm_id->device->name)
1685 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1686 	),
1687 
1688 	TP_fast_assign(
1689 		__entry->pageno = pageno;
1690 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1691 		__assign_str(device, rdma->sc_cm_id->device->name);
1692 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1693 	),
1694 
1695 	TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1696 		__get_str(device), __entry->xid, __entry->pageno
1697 	)
1698 );
1699 
1700 TRACE_EVENT(svcrdma_small_wrch_err,
1701 	TP_PROTO(
1702 		const struct svcxprt_rdma *rdma,
1703 		unsigned int remaining,
1704 		unsigned int seg_no,
1705 		unsigned int num_segs
1706 	),
1707 
1708 	TP_ARGS(rdma, remaining, seg_no, num_segs),
1709 
1710 	TP_STRUCT__entry(
1711 		__field(unsigned int, remaining)
1712 		__field(unsigned int, seg_no)
1713 		__field(unsigned int, num_segs)
1714 		__string(device, rdma->sc_cm_id->device->name)
1715 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1716 	),
1717 
1718 	TP_fast_assign(
1719 		__entry->remaining = remaining;
1720 		__entry->seg_no = seg_no;
1721 		__entry->num_segs = num_segs;
1722 		__assign_str(device, rdma->sc_cm_id->device->name);
1723 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1724 	),
1725 
1726 	TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1727 		__get_str(addr), __get_str(device), __entry->remaining,
1728 		__entry->seg_no, __entry->num_segs
1729 	)
1730 );
1731 
1732 TRACE_EVENT(svcrdma_send_pullup,
1733 	TP_PROTO(
1734 		const struct svc_rdma_send_ctxt *ctxt,
1735 		unsigned int msglen
1736 	),
1737 
1738 	TP_ARGS(ctxt, msglen),
1739 
1740 	TP_STRUCT__entry(
1741 		__field(u32, cq_id)
1742 		__field(int, completion_id)
1743 		__field(unsigned int, hdrlen)
1744 		__field(unsigned int, msglen)
1745 	),
1746 
1747 	TP_fast_assign(
1748 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1749 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1750 		__entry->hdrlen = ctxt->sc_hdrbuf.len,
1751 		__entry->msglen = msglen;
1752 	),
1753 
1754 	TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
1755 		__entry->cq_id, __entry->completion_id,
1756 		__entry->hdrlen, __entry->msglen,
1757 		__entry->hdrlen + __entry->msglen)
1758 );
1759 
1760 TRACE_EVENT(svcrdma_send_err,
1761 	TP_PROTO(
1762 		const struct svc_rqst *rqst,
1763 		int status
1764 	),
1765 
1766 	TP_ARGS(rqst, status),
1767 
1768 	TP_STRUCT__entry(
1769 		__field(int, status)
1770 		__field(u32, xid)
1771 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1772 	),
1773 
1774 	TP_fast_assign(
1775 		__entry->status = status;
1776 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1777 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1778 	),
1779 
1780 	TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1781 		__entry->xid, __entry->status
1782 	)
1783 );
1784 
1785 TRACE_EVENT(svcrdma_post_send,
1786 	TP_PROTO(
1787 		const struct svc_rdma_send_ctxt *ctxt
1788 	),
1789 
1790 	TP_ARGS(ctxt),
1791 
1792 	TP_STRUCT__entry(
1793 		__field(u32, cq_id)
1794 		__field(int, completion_id)
1795 		__field(unsigned int, num_sge)
1796 		__field(u32, inv_rkey)
1797 	),
1798 
1799 	TP_fast_assign(
1800 		const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1801 
1802 		__entry->cq_id = ctxt->sc_cid.ci_queue_id;
1803 		__entry->completion_id = ctxt->sc_cid.ci_completion_id;
1804 		__entry->num_sge = wr->num_sge;
1805 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1806 					wr->ex.invalidate_rkey : 0;
1807 	),
1808 
1809 	TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1810 		__entry->cq_id, __entry->completion_id,
1811 		__entry->num_sge, __entry->inv_rkey
1812 	)
1813 );
1814 
1815 DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
1816 
1817 TRACE_EVENT(svcrdma_post_recv,
1818 	TP_PROTO(
1819 		const struct svc_rdma_recv_ctxt *ctxt
1820 	),
1821 
1822 	TP_ARGS(ctxt),
1823 
1824 	TP_STRUCT__entry(
1825 		__field(u32, cq_id)
1826 		__field(int, completion_id)
1827 	),
1828 
1829 	TP_fast_assign(
1830 		__entry->cq_id = ctxt->rc_cid.ci_queue_id;
1831 		__entry->completion_id = ctxt->rc_cid.ci_completion_id;
1832 	),
1833 
1834 	TP_printk("cq.id=%d cid=%d",
1835 		__entry->cq_id, __entry->completion_id
1836 	)
1837 );
1838 
1839 DEFINE_RECEIVE_COMPLETION_EVENT(svcrdma_wc_receive);
1840 
1841 TRACE_EVENT(svcrdma_rq_post_err,
1842 	TP_PROTO(
1843 		const struct svcxprt_rdma *rdma,
1844 		int status
1845 	),
1846 
1847 	TP_ARGS(rdma, status),
1848 
1849 	TP_STRUCT__entry(
1850 		__field(int, status)
1851 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1852 	),
1853 
1854 	TP_fast_assign(
1855 		__entry->status = status;
1856 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1857 	),
1858 
1859 	TP_printk("addr=%s status=%d",
1860 		__get_str(addr), __entry->status
1861 	)
1862 );
1863 
1864 DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
1865 	TP_PROTO(
1866 		const struct rpc_rdma_cid *cid,
1867 		int sqecount
1868 	),
1869 
1870 	TP_ARGS(cid, sqecount),
1871 
1872 	TP_STRUCT__entry(
1873 		__field(u32, cq_id)
1874 		__field(int, completion_id)
1875 		__field(int, sqecount)
1876 	),
1877 
1878 	TP_fast_assign(
1879 		__entry->cq_id = cid->ci_queue_id;
1880 		__entry->completion_id = cid->ci_completion_id;
1881 		__entry->sqecount = sqecount;
1882 	),
1883 
1884 	TP_printk("cq.id=%u cid=%d sqecount=%d",
1885 		__entry->cq_id, __entry->completion_id,
1886 		__entry->sqecount
1887 	)
1888 );
1889 
1890 #define DEFINE_POST_CHUNK_EVENT(name)					\
1891 		DEFINE_EVENT(svcrdma_post_chunk_class,			\
1892 				svcrdma_post_##name##_chunk,		\
1893 				TP_PROTO(				\
1894 					const struct rpc_rdma_cid *cid,	\
1895 					int sqecount			\
1896 				),					\
1897 				TP_ARGS(cid, sqecount))
1898 
1899 DEFINE_POST_CHUNK_EVENT(read);
1900 DEFINE_POST_CHUNK_EVENT(write);
1901 DEFINE_POST_CHUNK_EVENT(reply);
1902 
1903 DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
1904 DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
1905 
1906 TRACE_EVENT(svcrdma_qp_error,
1907 	TP_PROTO(
1908 		const struct ib_event *event,
1909 		const struct sockaddr *sap
1910 	),
1911 
1912 	TP_ARGS(event, sap),
1913 
1914 	TP_STRUCT__entry(
1915 		__field(unsigned int, event)
1916 		__string(device, event->device->name)
1917 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1918 	),
1919 
1920 	TP_fast_assign(
1921 		__entry->event = event->event;
1922 		__assign_str(device, event->device->name);
1923 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1924 			 "%pISpc", sap);
1925 	),
1926 
1927 	TP_printk("addr=%s dev=%s event=%s (%u)",
1928 		__entry->addr, __get_str(device),
1929 		rdma_show_ib_event(__entry->event), __entry->event
1930 	)
1931 );
1932 
1933 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1934 	TP_PROTO(
1935 		const struct svcxprt_rdma *rdma
1936 	),
1937 
1938 	TP_ARGS(rdma),
1939 
1940 	TP_STRUCT__entry(
1941 		__field(int, avail)
1942 		__field(int, depth)
1943 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1944 	),
1945 
1946 	TP_fast_assign(
1947 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1948 		__entry->depth = rdma->sc_sq_depth;
1949 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1950 	),
1951 
1952 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1953 		__get_str(addr), __entry->avail, __entry->depth
1954 	)
1955 );
1956 
1957 #define DEFINE_SQ_EVENT(name)						\
1958 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1959 				TP_PROTO(				\
1960 					const struct svcxprt_rdma *rdma \
1961 				),					\
1962 				TP_ARGS(rdma))
1963 
1964 DEFINE_SQ_EVENT(full);
1965 DEFINE_SQ_EVENT(retry);
1966 
1967 TRACE_EVENT(svcrdma_sq_post_err,
1968 	TP_PROTO(
1969 		const struct svcxprt_rdma *rdma,
1970 		int status
1971 	),
1972 
1973 	TP_ARGS(rdma, status),
1974 
1975 	TP_STRUCT__entry(
1976 		__field(int, avail)
1977 		__field(int, depth)
1978 		__field(int, status)
1979 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1980 	),
1981 
1982 	TP_fast_assign(
1983 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1984 		__entry->depth = rdma->sc_sq_depth;
1985 		__entry->status = status;
1986 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1987 	),
1988 
1989 	TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
1990 		__get_str(addr), __entry->avail, __entry->depth,
1991 		__entry->status
1992 	)
1993 );
1994 
1995 #endif /* _TRACE_RPCRDMA_H */
1996 
1997 #include <trace/define_trace.h>
1998