xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision 2fa49589)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/tracepoint.h>
15 #include <trace/events/rdma.h>
16 
17 /**
18  ** Event classes
19  **/
20 
21 DECLARE_EVENT_CLASS(xprtrdma_reply_event,
22 	TP_PROTO(
23 		const struct rpcrdma_rep *rep
24 	),
25 
26 	TP_ARGS(rep),
27 
28 	TP_STRUCT__entry(
29 		__field(const void *, rep)
30 		__field(const void *, r_xprt)
31 		__field(u32, xid)
32 		__field(u32, version)
33 		__field(u32, proc)
34 	),
35 
36 	TP_fast_assign(
37 		__entry->rep = rep;
38 		__entry->r_xprt = rep->rr_rxprt;
39 		__entry->xid = be32_to_cpu(rep->rr_xid);
40 		__entry->version = be32_to_cpu(rep->rr_vers);
41 		__entry->proc = be32_to_cpu(rep->rr_proc);
42 	),
43 
44 	TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
45 		__entry->r_xprt, __entry->xid, __entry->rep,
46 		__entry->version, __entry->proc
47 	)
48 );
49 
50 #define DEFINE_REPLY_EVENT(name)					\
51 		DEFINE_EVENT(xprtrdma_reply_event, name,		\
52 				TP_PROTO(				\
53 					const struct rpcrdma_rep *rep	\
54 				),					\
55 				TP_ARGS(rep))
56 
57 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
58 	TP_PROTO(
59 		const struct rpcrdma_xprt *r_xprt
60 	),
61 
62 	TP_ARGS(r_xprt),
63 
64 	TP_STRUCT__entry(
65 		__field(const void *, r_xprt)
66 		__string(addr, rpcrdma_addrstr(r_xprt))
67 		__string(port, rpcrdma_portstr(r_xprt))
68 	),
69 
70 	TP_fast_assign(
71 		__entry->r_xprt = r_xprt;
72 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
73 		__assign_str(port, rpcrdma_portstr(r_xprt));
74 	),
75 
76 	TP_printk("peer=[%s]:%s r_xprt=%p",
77 		__get_str(addr), __get_str(port), __entry->r_xprt
78 	)
79 );
80 
81 #define DEFINE_RXPRT_EVENT(name)					\
82 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
83 				TP_PROTO(				\
84 					const struct rpcrdma_xprt *r_xprt \
85 				),					\
86 				TP_ARGS(r_xprt))
87 
88 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
89 	TP_PROTO(
90 		const struct rpc_task *task,
91 		unsigned int pos,
92 		struct rpcrdma_mr *mr,
93 		int nsegs
94 	),
95 
96 	TP_ARGS(task, pos, mr, nsegs),
97 
98 	TP_STRUCT__entry(
99 		__field(unsigned int, task_id)
100 		__field(unsigned int, client_id)
101 		__field(unsigned int, pos)
102 		__field(int, nents)
103 		__field(u32, handle)
104 		__field(u32, length)
105 		__field(u64, offset)
106 		__field(int, nsegs)
107 	),
108 
109 	TP_fast_assign(
110 		__entry->task_id = task->tk_pid;
111 		__entry->client_id = task->tk_client->cl_clid;
112 		__entry->pos = pos;
113 		__entry->nents = mr->mr_nents;
114 		__entry->handle = mr->mr_handle;
115 		__entry->length = mr->mr_length;
116 		__entry->offset = mr->mr_offset;
117 		__entry->nsegs = nsegs;
118 	),
119 
120 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
121 		__entry->task_id, __entry->client_id,
122 		__entry->pos, __entry->length,
123 		(unsigned long long)__entry->offset, __entry->handle,
124 		__entry->nents < __entry->nsegs ? "more" : "last"
125 	)
126 );
127 
128 #define DEFINE_RDCH_EVENT(name)						\
129 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
130 				TP_PROTO(				\
131 					const struct rpc_task *task,	\
132 					unsigned int pos,		\
133 					struct rpcrdma_mr *mr,		\
134 					int nsegs			\
135 				),					\
136 				TP_ARGS(task, pos, mr, nsegs))
137 
138 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
139 	TP_PROTO(
140 		const struct rpc_task *task,
141 		struct rpcrdma_mr *mr,
142 		int nsegs
143 	),
144 
145 	TP_ARGS(task, mr, nsegs),
146 
147 	TP_STRUCT__entry(
148 		__field(unsigned int, task_id)
149 		__field(unsigned int, client_id)
150 		__field(int, nents)
151 		__field(u32, handle)
152 		__field(u32, length)
153 		__field(u64, offset)
154 		__field(int, nsegs)
155 	),
156 
157 	TP_fast_assign(
158 		__entry->task_id = task->tk_pid;
159 		__entry->client_id = task->tk_client->cl_clid;
160 		__entry->nents = mr->mr_nents;
161 		__entry->handle = mr->mr_handle;
162 		__entry->length = mr->mr_length;
163 		__entry->offset = mr->mr_offset;
164 		__entry->nsegs = nsegs;
165 	),
166 
167 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
168 		__entry->task_id, __entry->client_id,
169 		__entry->length, (unsigned long long)__entry->offset,
170 		__entry->handle,
171 		__entry->nents < __entry->nsegs ? "more" : "last"
172 	)
173 );
174 
175 #define DEFINE_WRCH_EVENT(name)						\
176 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
177 				TP_PROTO(				\
178 					const struct rpc_task *task,	\
179 					struct rpcrdma_mr *mr,		\
180 					int nsegs			\
181 				),					\
182 				TP_ARGS(task, mr, nsegs))
183 
184 TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
185 TRACE_DEFINE_ENUM(FRWR_IS_VALID);
186 TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
187 TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
188 
189 #define xprtrdma_show_frwr_state(x)					\
190 		__print_symbolic(x,					\
191 				{ FRWR_IS_INVALID, "INVALID" },		\
192 				{ FRWR_IS_VALID, "VALID" },		\
193 				{ FRWR_FLUSHED_FR, "FLUSHED_FR" },	\
194 				{ FRWR_FLUSHED_LI, "FLUSHED_LI" })
195 
196 DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
197 	TP_PROTO(
198 		const struct ib_wc *wc,
199 		const struct rpcrdma_frwr *frwr
200 	),
201 
202 	TP_ARGS(wc, frwr),
203 
204 	TP_STRUCT__entry(
205 		__field(const void *, mr)
206 		__field(unsigned int, state)
207 		__field(unsigned int, status)
208 		__field(unsigned int, vendor_err)
209 	),
210 
211 	TP_fast_assign(
212 		__entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
213 		__entry->state = frwr->fr_state;
214 		__entry->status = wc->status;
215 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
216 	),
217 
218 	TP_printk(
219 		"mr=%p state=%s: %s (%u/0x%x)",
220 		__entry->mr, xprtrdma_show_frwr_state(__entry->state),
221 		rdma_show_wc_status(__entry->status),
222 		__entry->status, __entry->vendor_err
223 	)
224 );
225 
226 #define DEFINE_FRWR_DONE_EVENT(name)					\
227 		DEFINE_EVENT(xprtrdma_frwr_done, name,			\
228 				TP_PROTO(				\
229 					const struct ib_wc *wc,		\
230 					const struct rpcrdma_frwr *frwr	\
231 				),					\
232 				TP_ARGS(wc, frwr))
233 
234 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
235 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
236 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
237 TRACE_DEFINE_ENUM(DMA_NONE);
238 
239 #define xprtrdma_show_direction(x)					\
240 		__print_symbolic(x,					\
241 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
242 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
243 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
244 				{ DMA_NONE, "NONE" })
245 
246 DECLARE_EVENT_CLASS(xprtrdma_mr,
247 	TP_PROTO(
248 		const struct rpcrdma_mr *mr
249 	),
250 
251 	TP_ARGS(mr),
252 
253 	TP_STRUCT__entry(
254 		__field(const void *, mr)
255 		__field(u32, handle)
256 		__field(u32, length)
257 		__field(u64, offset)
258 		__field(u32, dir)
259 	),
260 
261 	TP_fast_assign(
262 		__entry->mr = mr;
263 		__entry->handle = mr->mr_handle;
264 		__entry->length = mr->mr_length;
265 		__entry->offset = mr->mr_offset;
266 		__entry->dir    = mr->mr_dir;
267 	),
268 
269 	TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)",
270 		__entry->mr, __entry->length,
271 		(unsigned long long)__entry->offset, __entry->handle,
272 		xprtrdma_show_direction(__entry->dir)
273 	)
274 );
275 
276 #define DEFINE_MR_EVENT(name) \
277 		DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
278 				TP_PROTO( \
279 					const struct rpcrdma_mr *mr \
280 				), \
281 				TP_ARGS(mr))
282 
283 DECLARE_EVENT_CLASS(xprtrdma_cb_event,
284 	TP_PROTO(
285 		const struct rpc_rqst *rqst
286 	),
287 
288 	TP_ARGS(rqst),
289 
290 	TP_STRUCT__entry(
291 		__field(const void *, rqst)
292 		__field(const void *, rep)
293 		__field(const void *, req)
294 		__field(u32, xid)
295 	),
296 
297 	TP_fast_assign(
298 		__entry->rqst = rqst;
299 		__entry->req = rpcr_to_rdmar(rqst);
300 		__entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
301 		__entry->xid = be32_to_cpu(rqst->rq_xid);
302 	),
303 
304 	TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
305 		__entry->xid, __entry->rqst, __entry->req, __entry->rep
306 	)
307 );
308 
309 #define DEFINE_CB_EVENT(name)						\
310 		DEFINE_EVENT(xprtrdma_cb_event, name,			\
311 				TP_PROTO(				\
312 					const struct rpc_rqst *rqst	\
313 				),					\
314 				TP_ARGS(rqst))
315 
316 /**
317  ** Connection events
318  **/
319 
320 TRACE_EVENT(xprtrdma_cm_event,
321 	TP_PROTO(
322 		const struct rpcrdma_xprt *r_xprt,
323 		struct rdma_cm_event *event
324 	),
325 
326 	TP_ARGS(r_xprt, event),
327 
328 	TP_STRUCT__entry(
329 		__field(const void *, r_xprt)
330 		__field(unsigned int, event)
331 		__field(int, status)
332 		__string(addr, rpcrdma_addrstr(r_xprt))
333 		__string(port, rpcrdma_portstr(r_xprt))
334 	),
335 
336 	TP_fast_assign(
337 		__entry->r_xprt = r_xprt;
338 		__entry->event = event->event;
339 		__entry->status = event->status;
340 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
341 		__assign_str(port, rpcrdma_portstr(r_xprt));
342 	),
343 
344 	TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
345 		__get_str(addr), __get_str(port),
346 		__entry->r_xprt, rdma_show_cm_event(__entry->event),
347 		__entry->event, __entry->status
348 	)
349 );
350 
351 TRACE_EVENT(xprtrdma_disconnect,
352 	TP_PROTO(
353 		const struct rpcrdma_xprt *r_xprt,
354 		int status
355 	),
356 
357 	TP_ARGS(r_xprt, status),
358 
359 	TP_STRUCT__entry(
360 		__field(const void *, r_xprt)
361 		__field(int, status)
362 		__field(int, connected)
363 		__string(addr, rpcrdma_addrstr(r_xprt))
364 		__string(port, rpcrdma_portstr(r_xprt))
365 	),
366 
367 	TP_fast_assign(
368 		__entry->r_xprt = r_xprt;
369 		__entry->status = status;
370 		__entry->connected = r_xprt->rx_ep.rep_connected;
371 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
372 		__assign_str(port, rpcrdma_portstr(r_xprt));
373 	),
374 
375 	TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
376 		__get_str(addr), __get_str(port),
377 		__entry->r_xprt, __entry->status,
378 		__entry->connected == 1 ? "still " : "dis"
379 	)
380 );
381 
382 DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
383 DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
384 DEFINE_RXPRT_EVENT(xprtrdma_create);
385 DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
386 DEFINE_RXPRT_EVENT(xprtrdma_remove);
387 DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
388 DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
389 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
390 DEFINE_RXPRT_EVENT(xprtrdma_op_close);
391 DEFINE_RXPRT_EVENT(xprtrdma_op_connect);
392 
393 TRACE_EVENT(xprtrdma_qp_event,
394 	TP_PROTO(
395 		const struct rpcrdma_xprt *r_xprt,
396 		const struct ib_event *event
397 	),
398 
399 	TP_ARGS(r_xprt, event),
400 
401 	TP_STRUCT__entry(
402 		__field(const void *, r_xprt)
403 		__field(unsigned int, event)
404 		__string(name, event->device->name)
405 		__string(addr, rpcrdma_addrstr(r_xprt))
406 		__string(port, rpcrdma_portstr(r_xprt))
407 	),
408 
409 	TP_fast_assign(
410 		__entry->r_xprt = r_xprt;
411 		__entry->event = event->event;
412 		__assign_str(name, event->device->name);
413 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
414 		__assign_str(port, rpcrdma_portstr(r_xprt));
415 	),
416 
417 	TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
418 		__get_str(addr), __get_str(port), __entry->r_xprt,
419 		__get_str(name), rdma_show_ib_event(__entry->event),
420 		__entry->event
421 	)
422 );
423 
424 /**
425  ** Call events
426  **/
427 
428 TRACE_EVENT(xprtrdma_createmrs,
429 	TP_PROTO(
430 		const struct rpcrdma_xprt *r_xprt,
431 		unsigned int count
432 	),
433 
434 	TP_ARGS(r_xprt, count),
435 
436 	TP_STRUCT__entry(
437 		__field(const void *, r_xprt)
438 		__field(unsigned int, count)
439 	),
440 
441 	TP_fast_assign(
442 		__entry->r_xprt = r_xprt;
443 		__entry->count = count;
444 	),
445 
446 	TP_printk("r_xprt=%p: created %u MRs",
447 		__entry->r_xprt, __entry->count
448 	)
449 );
450 
451 DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
452 
453 DEFINE_RDCH_EVENT(read);
454 DEFINE_WRCH_EVENT(write);
455 DEFINE_WRCH_EVENT(reply);
456 
457 TRACE_DEFINE_ENUM(rpcrdma_noch);
458 TRACE_DEFINE_ENUM(rpcrdma_readch);
459 TRACE_DEFINE_ENUM(rpcrdma_areadch);
460 TRACE_DEFINE_ENUM(rpcrdma_writech);
461 TRACE_DEFINE_ENUM(rpcrdma_replych);
462 
463 #define xprtrdma_show_chunktype(x)					\
464 		__print_symbolic(x,					\
465 				{ rpcrdma_noch, "inline" },		\
466 				{ rpcrdma_readch, "read list" },	\
467 				{ rpcrdma_areadch, "*read list" },	\
468 				{ rpcrdma_writech, "write list" },	\
469 				{ rpcrdma_replych, "reply chunk" })
470 
471 TRACE_EVENT(xprtrdma_marshal,
472 	TP_PROTO(
473 		const struct rpc_rqst *rqst,
474 		unsigned int hdrlen,
475 		unsigned int rtype,
476 		unsigned int wtype
477 	),
478 
479 	TP_ARGS(rqst, hdrlen, rtype, wtype),
480 
481 	TP_STRUCT__entry(
482 		__field(unsigned int, task_id)
483 		__field(unsigned int, client_id)
484 		__field(u32, xid)
485 		__field(unsigned int, hdrlen)
486 		__field(unsigned int, headlen)
487 		__field(unsigned int, pagelen)
488 		__field(unsigned int, taillen)
489 		__field(unsigned int, rtype)
490 		__field(unsigned int, wtype)
491 	),
492 
493 	TP_fast_assign(
494 		__entry->task_id = rqst->rq_task->tk_pid;
495 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
496 		__entry->xid = be32_to_cpu(rqst->rq_xid);
497 		__entry->hdrlen = hdrlen;
498 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
499 		__entry->pagelen = rqst->rq_snd_buf.page_len;
500 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
501 		__entry->rtype = rtype;
502 		__entry->wtype = wtype;
503 	),
504 
505 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
506 		__entry->task_id, __entry->client_id, __entry->xid,
507 		__entry->hdrlen,
508 		__entry->headlen, __entry->pagelen, __entry->taillen,
509 		xprtrdma_show_chunktype(__entry->rtype),
510 		xprtrdma_show_chunktype(__entry->wtype)
511 	)
512 );
513 
514 TRACE_EVENT(xprtrdma_post_send,
515 	TP_PROTO(
516 		const struct rpcrdma_req *req,
517 		int status
518 	),
519 
520 	TP_ARGS(req, status),
521 
522 	TP_STRUCT__entry(
523 		__field(const void *, req)
524 		__field(int, num_sge)
525 		__field(int, signaled)
526 		__field(int, status)
527 	),
528 
529 	TP_fast_assign(
530 		__entry->req = req;
531 		__entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
532 		__entry->signaled = req->rl_sendctx->sc_wr.send_flags &
533 				    IB_SEND_SIGNALED;
534 		__entry->status = status;
535 	),
536 
537 	TP_printk("req=%p, %d SGEs%s, status=%d",
538 		__entry->req, __entry->num_sge,
539 		(__entry->signaled ? ", signaled" : ""),
540 		__entry->status
541 	)
542 );
543 
544 TRACE_EVENT(xprtrdma_post_recv,
545 	TP_PROTO(
546 		const struct ib_cqe *cqe
547 	),
548 
549 	TP_ARGS(cqe),
550 
551 	TP_STRUCT__entry(
552 		__field(const void *, cqe)
553 	),
554 
555 	TP_fast_assign(
556 		__entry->cqe = cqe;
557 	),
558 
559 	TP_printk("cqe=%p",
560 		__entry->cqe
561 	)
562 );
563 
564 TRACE_EVENT(xprtrdma_post_recvs,
565 	TP_PROTO(
566 		const struct rpcrdma_xprt *r_xprt,
567 		unsigned int count,
568 		int status
569 	),
570 
571 	TP_ARGS(r_xprt, count, status),
572 
573 	TP_STRUCT__entry(
574 		__field(const void *, r_xprt)
575 		__field(unsigned int, count)
576 		__field(int, status)
577 		__field(int, posted)
578 		__string(addr, rpcrdma_addrstr(r_xprt))
579 		__string(port, rpcrdma_portstr(r_xprt))
580 	),
581 
582 	TP_fast_assign(
583 		__entry->r_xprt = r_xprt;
584 		__entry->count = count;
585 		__entry->status = status;
586 		__entry->posted = r_xprt->rx_ep.rep_receive_count;
587 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
588 		__assign_str(port, rpcrdma_portstr(r_xprt));
589 	),
590 
591 	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
592 		__get_str(addr), __get_str(port), __entry->r_xprt,
593 		__entry->count, __entry->posted, __entry->status
594 	)
595 );
596 
597 /**
598  ** Completion events
599  **/
600 
601 TRACE_EVENT(xprtrdma_wc_send,
602 	TP_PROTO(
603 		const struct rpcrdma_sendctx *sc,
604 		const struct ib_wc *wc
605 	),
606 
607 	TP_ARGS(sc, wc),
608 
609 	TP_STRUCT__entry(
610 		__field(const void *, req)
611 		__field(unsigned int, unmap_count)
612 		__field(unsigned int, status)
613 		__field(unsigned int, vendor_err)
614 	),
615 
616 	TP_fast_assign(
617 		__entry->req = sc->sc_req;
618 		__entry->unmap_count = sc->sc_unmap_count;
619 		__entry->status = wc->status;
620 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
621 	),
622 
623 	TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
624 		__entry->req, __entry->unmap_count,
625 		rdma_show_wc_status(__entry->status),
626 		__entry->status, __entry->vendor_err
627 	)
628 );
629 
630 TRACE_EVENT(xprtrdma_wc_receive,
631 	TP_PROTO(
632 		const struct ib_wc *wc
633 	),
634 
635 	TP_ARGS(wc),
636 
637 	TP_STRUCT__entry(
638 		__field(const void *, cqe)
639 		__field(u32, byte_len)
640 		__field(unsigned int, status)
641 		__field(u32, vendor_err)
642 	),
643 
644 	TP_fast_assign(
645 		__entry->cqe = wc->wr_cqe;
646 		__entry->status = wc->status;
647 		if (wc->status) {
648 			__entry->byte_len = 0;
649 			__entry->vendor_err = wc->vendor_err;
650 		} else {
651 			__entry->byte_len = wc->byte_len;
652 			__entry->vendor_err = 0;
653 		}
654 	),
655 
656 	TP_printk("cqe=%p %u bytes: %s (%u/0x%x)",
657 		__entry->cqe, __entry->byte_len,
658 		rdma_show_wc_status(__entry->status),
659 		__entry->status, __entry->vendor_err
660 	)
661 );
662 
663 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
664 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
665 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
666 
667 TRACE_EVENT(xprtrdma_frwr_alloc,
668 	TP_PROTO(
669 		const struct rpcrdma_mr *mr,
670 		int rc
671 	),
672 
673 	TP_ARGS(mr, rc),
674 
675 	TP_STRUCT__entry(
676 		__field(const void *, mr)
677 		__field(int, rc)
678 	),
679 
680 	TP_fast_assign(
681 		__entry->mr = mr;
682 		__entry->rc	= rc;
683 	),
684 
685 	TP_printk("mr=%p: rc=%d",
686 		__entry->mr, __entry->rc
687 	)
688 );
689 
690 TRACE_EVENT(xprtrdma_frwr_dereg,
691 	TP_PROTO(
692 		const struct rpcrdma_mr *mr,
693 		int rc
694 	),
695 
696 	TP_ARGS(mr, rc),
697 
698 	TP_STRUCT__entry(
699 		__field(const void *, mr)
700 		__field(u32, handle)
701 		__field(u32, length)
702 		__field(u64, offset)
703 		__field(u32, dir)
704 		__field(int, rc)
705 	),
706 
707 	TP_fast_assign(
708 		__entry->mr = mr;
709 		__entry->handle = mr->mr_handle;
710 		__entry->length = mr->mr_length;
711 		__entry->offset = mr->mr_offset;
712 		__entry->dir    = mr->mr_dir;
713 		__entry->rc	= rc;
714 	),
715 
716 	TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d",
717 		__entry->mr, __entry->length,
718 		(unsigned long long)__entry->offset, __entry->handle,
719 		xprtrdma_show_direction(__entry->dir),
720 		__entry->rc
721 	)
722 );
723 
724 TRACE_EVENT(xprtrdma_frwr_sgerr,
725 	TP_PROTO(
726 		const struct rpcrdma_mr *mr,
727 		int sg_nents
728 	),
729 
730 	TP_ARGS(mr, sg_nents),
731 
732 	TP_STRUCT__entry(
733 		__field(const void *, mr)
734 		__field(u64, addr)
735 		__field(u32, dir)
736 		__field(int, nents)
737 	),
738 
739 	TP_fast_assign(
740 		__entry->mr = mr;
741 		__entry->addr = mr->mr_sg->dma_address;
742 		__entry->dir = mr->mr_dir;
743 		__entry->nents = sg_nents;
744 	),
745 
746 	TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d",
747 		__entry->mr, __entry->addr,
748 		xprtrdma_show_direction(__entry->dir),
749 		__entry->nents
750 	)
751 );
752 
753 TRACE_EVENT(xprtrdma_frwr_maperr,
754 	TP_PROTO(
755 		const struct rpcrdma_mr *mr,
756 		int num_mapped
757 	),
758 
759 	TP_ARGS(mr, num_mapped),
760 
761 	TP_STRUCT__entry(
762 		__field(const void *, mr)
763 		__field(u64, addr)
764 		__field(u32, dir)
765 		__field(int, num_mapped)
766 		__field(int, nents)
767 	),
768 
769 	TP_fast_assign(
770 		__entry->mr = mr;
771 		__entry->addr = mr->mr_sg->dma_address;
772 		__entry->dir = mr->mr_dir;
773 		__entry->num_mapped = num_mapped;
774 		__entry->nents = mr->mr_nents;
775 	),
776 
777 	TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d",
778 		__entry->mr, __entry->addr,
779 		xprtrdma_show_direction(__entry->dir),
780 		__entry->num_mapped, __entry->nents
781 	)
782 );
783 
784 DEFINE_MR_EVENT(localinv);
785 DEFINE_MR_EVENT(map);
786 DEFINE_MR_EVENT(unmap);
787 DEFINE_MR_EVENT(remoteinv);
788 DEFINE_MR_EVENT(recycle);
789 
790 TRACE_EVENT(xprtrdma_dma_maperr,
791 	TP_PROTO(
792 		u64 addr
793 	),
794 
795 	TP_ARGS(addr),
796 
797 	TP_STRUCT__entry(
798 		__field(u64, addr)
799 	),
800 
801 	TP_fast_assign(
802 		__entry->addr = addr;
803 	),
804 
805 	TP_printk("dma addr=0x%llx\n", __entry->addr)
806 );
807 
808 /**
809  ** Reply events
810  **/
811 
812 TRACE_EVENT(xprtrdma_reply,
813 	TP_PROTO(
814 		const struct rpc_task *task,
815 		const struct rpcrdma_rep *rep,
816 		const struct rpcrdma_req *req,
817 		unsigned int credits
818 	),
819 
820 	TP_ARGS(task, rep, req, credits),
821 
822 	TP_STRUCT__entry(
823 		__field(unsigned int, task_id)
824 		__field(unsigned int, client_id)
825 		__field(const void *, rep)
826 		__field(const void *, req)
827 		__field(u32, xid)
828 		__field(unsigned int, credits)
829 	),
830 
831 	TP_fast_assign(
832 		__entry->task_id = task->tk_pid;
833 		__entry->client_id = task->tk_client->cl_clid;
834 		__entry->rep = rep;
835 		__entry->req = req;
836 		__entry->xid = be32_to_cpu(rep->rr_xid);
837 		__entry->credits = credits;
838 	),
839 
840 	TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
841 		__entry->task_id, __entry->client_id, __entry->xid,
842 		__entry->credits, __entry->rep, __entry->req
843 	)
844 );
845 
846 TRACE_EVENT(xprtrdma_defer_cmp,
847 	TP_PROTO(
848 		const struct rpcrdma_rep *rep
849 	),
850 
851 	TP_ARGS(rep),
852 
853 	TP_STRUCT__entry(
854 		__field(unsigned int, task_id)
855 		__field(unsigned int, client_id)
856 		__field(const void *, rep)
857 		__field(u32, xid)
858 	),
859 
860 	TP_fast_assign(
861 		__entry->task_id = rep->rr_rqst->rq_task->tk_pid;
862 		__entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
863 		__entry->rep = rep;
864 		__entry->xid = be32_to_cpu(rep->rr_xid);
865 	),
866 
867 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
868 		__entry->task_id, __entry->client_id, __entry->xid,
869 		__entry->rep
870 	)
871 );
872 
873 DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
874 DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
875 DEFINE_REPLY_EVENT(xprtrdma_reply_short);
876 DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
877 
878 TRACE_EVENT(xprtrdma_fixup,
879 	TP_PROTO(
880 		const struct rpc_rqst *rqst,
881 		int len,
882 		int hdrlen
883 	),
884 
885 	TP_ARGS(rqst, len, hdrlen),
886 
887 	TP_STRUCT__entry(
888 		__field(unsigned int, task_id)
889 		__field(unsigned int, client_id)
890 		__field(const void *, base)
891 		__field(int, len)
892 		__field(int, hdrlen)
893 	),
894 
895 	TP_fast_assign(
896 		__entry->task_id = rqst->rq_task->tk_pid;
897 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
898 		__entry->base = rqst->rq_rcv_buf.head[0].iov_base;
899 		__entry->len = len;
900 		__entry->hdrlen = hdrlen;
901 	),
902 
903 	TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
904 		__entry->task_id, __entry->client_id,
905 		__entry->base, __entry->len, __entry->hdrlen
906 	)
907 );
908 
909 TRACE_EVENT(xprtrdma_fixup_pg,
910 	TP_PROTO(
911 		const struct rpc_rqst *rqst,
912 		int pageno,
913 		const void *pos,
914 		int len,
915 		int curlen
916 	),
917 
918 	TP_ARGS(rqst, pageno, pos, len, curlen),
919 
920 	TP_STRUCT__entry(
921 		__field(unsigned int, task_id)
922 		__field(unsigned int, client_id)
923 		__field(const void *, pos)
924 		__field(int, pageno)
925 		__field(int, len)
926 		__field(int, curlen)
927 	),
928 
929 	TP_fast_assign(
930 		__entry->task_id = rqst->rq_task->tk_pid;
931 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
932 		__entry->pos = pos;
933 		__entry->pageno = pageno;
934 		__entry->len = len;
935 		__entry->curlen = curlen;
936 	),
937 
938 	TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
939 		__entry->task_id, __entry->client_id,
940 		__entry->pageno, __entry->pos, __entry->len, __entry->curlen
941 	)
942 );
943 
944 TRACE_EVENT(xprtrdma_decode_seg,
945 	TP_PROTO(
946 		u32 handle,
947 		u32 length,
948 		u64 offset
949 	),
950 
951 	TP_ARGS(handle, length, offset),
952 
953 	TP_STRUCT__entry(
954 		__field(u32, handle)
955 		__field(u32, length)
956 		__field(u64, offset)
957 	),
958 
959 	TP_fast_assign(
960 		__entry->handle = handle;
961 		__entry->length = length;
962 		__entry->offset = offset;
963 	),
964 
965 	TP_printk("%u@0x%016llx:0x%08x",
966 		__entry->length, (unsigned long long)__entry->offset,
967 		__entry->handle
968 	)
969 );
970 
971 /**
972  ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
973  **/
974 
975 TRACE_EVENT(xprtrdma_op_allocate,
976 	TP_PROTO(
977 		const struct rpc_task *task,
978 		const struct rpcrdma_req *req
979 	),
980 
981 	TP_ARGS(task, req),
982 
983 	TP_STRUCT__entry(
984 		__field(unsigned int, task_id)
985 		__field(unsigned int, client_id)
986 		__field(const void *, req)
987 		__field(size_t, callsize)
988 		__field(size_t, rcvsize)
989 	),
990 
991 	TP_fast_assign(
992 		__entry->task_id = task->tk_pid;
993 		__entry->client_id = task->tk_client->cl_clid;
994 		__entry->req = req;
995 		__entry->callsize = task->tk_rqstp->rq_callsize;
996 		__entry->rcvsize = task->tk_rqstp->rq_rcvsize;
997 	),
998 
999 	TP_printk("task:%u@%u req=%p (%zu, %zu)",
1000 		__entry->task_id, __entry->client_id,
1001 		__entry->req, __entry->callsize, __entry->rcvsize
1002 	)
1003 );
1004 
1005 TRACE_EVENT(xprtrdma_op_free,
1006 	TP_PROTO(
1007 		const struct rpc_task *task,
1008 		const struct rpcrdma_req *req
1009 	),
1010 
1011 	TP_ARGS(task, req),
1012 
1013 	TP_STRUCT__entry(
1014 		__field(unsigned int, task_id)
1015 		__field(unsigned int, client_id)
1016 		__field(const void *, req)
1017 		__field(const void *, rep)
1018 	),
1019 
1020 	TP_fast_assign(
1021 		__entry->task_id = task->tk_pid;
1022 		__entry->client_id = task->tk_client->cl_clid;
1023 		__entry->req = req;
1024 		__entry->rep = req->rl_reply;
1025 	),
1026 
1027 	TP_printk("task:%u@%u req=%p rep=%p",
1028 		__entry->task_id, __entry->client_id,
1029 		__entry->req, __entry->rep
1030 	)
1031 );
1032 
1033 /**
1034  ** Callback events
1035  **/
1036 
1037 TRACE_EVENT(xprtrdma_cb_setup,
1038 	TP_PROTO(
1039 		const struct rpcrdma_xprt *r_xprt,
1040 		unsigned int reqs
1041 	),
1042 
1043 	TP_ARGS(r_xprt, reqs),
1044 
1045 	TP_STRUCT__entry(
1046 		__field(const void *, r_xprt)
1047 		__field(unsigned int, reqs)
1048 		__string(addr, rpcrdma_addrstr(r_xprt))
1049 		__string(port, rpcrdma_portstr(r_xprt))
1050 	),
1051 
1052 	TP_fast_assign(
1053 		__entry->r_xprt = r_xprt;
1054 		__entry->reqs = reqs;
1055 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1056 		__assign_str(port, rpcrdma_portstr(r_xprt));
1057 	),
1058 
1059 	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1060 		__get_str(addr), __get_str(port),
1061 		__entry->r_xprt, __entry->reqs
1062 	)
1063 );
1064 
1065 DEFINE_CB_EVENT(xprtrdma_cb_call);
1066 DEFINE_CB_EVENT(xprtrdma_cb_reply);
1067 
1068 TRACE_EVENT(xprtrdma_leaked_rep,
1069 	TP_PROTO(
1070 		const struct rpc_rqst *rqst,
1071 		const struct rpcrdma_rep *rep
1072 	),
1073 
1074 	TP_ARGS(rqst, rep),
1075 
1076 	TP_STRUCT__entry(
1077 		__field(unsigned int, task_id)
1078 		__field(unsigned int, client_id)
1079 		__field(u32, xid)
1080 		__field(const void *, rep)
1081 	),
1082 
1083 	TP_fast_assign(
1084 		__entry->task_id = rqst->rq_task->tk_pid;
1085 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1086 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1087 		__entry->rep = rep;
1088 	),
1089 
1090 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1091 		__entry->task_id, __entry->client_id, __entry->xid,
1092 		__entry->rep
1093 	)
1094 );
1095 
1096 /**
1097  ** Server-side RPC/RDMA events
1098  **/
1099 
1100 DECLARE_EVENT_CLASS(svcrdma_xprt_event,
1101 	TP_PROTO(
1102 		const struct svc_xprt *xprt
1103 	),
1104 
1105 	TP_ARGS(xprt),
1106 
1107 	TP_STRUCT__entry(
1108 		__field(const void *, xprt)
1109 		__string(addr, xprt->xpt_remotebuf)
1110 	),
1111 
1112 	TP_fast_assign(
1113 		__entry->xprt = xprt;
1114 		__assign_str(addr, xprt->xpt_remotebuf);
1115 	),
1116 
1117 	TP_printk("xprt=%p addr=%s",
1118 		__entry->xprt, __get_str(addr)
1119 	)
1120 );
1121 
1122 #define DEFINE_XPRT_EVENT(name)						\
1123 		DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name,	\
1124 				TP_PROTO(				\
1125 					const struct svc_xprt *xprt	\
1126 				),					\
1127 				TP_ARGS(xprt))
1128 
1129 DEFINE_XPRT_EVENT(accept);
1130 DEFINE_XPRT_EVENT(fail);
1131 DEFINE_XPRT_EVENT(free);
1132 
1133 TRACE_DEFINE_ENUM(RDMA_MSG);
1134 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1135 TRACE_DEFINE_ENUM(RDMA_MSGP);
1136 TRACE_DEFINE_ENUM(RDMA_DONE);
1137 TRACE_DEFINE_ENUM(RDMA_ERROR);
1138 
1139 #define show_rpcrdma_proc(x)						\
1140 		__print_symbolic(x,					\
1141 				{ RDMA_MSG, "RDMA_MSG" },		\
1142 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1143 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1144 				{ RDMA_DONE, "RDMA_DONE" },		\
1145 				{ RDMA_ERROR, "RDMA_ERROR" })
1146 
1147 TRACE_EVENT(svcrdma_decode_rqst,
1148 	TP_PROTO(
1149 		__be32 *p,
1150 		unsigned int hdrlen
1151 	),
1152 
1153 	TP_ARGS(p, hdrlen),
1154 
1155 	TP_STRUCT__entry(
1156 		__field(u32, xid)
1157 		__field(u32, vers)
1158 		__field(u32, proc)
1159 		__field(u32, credits)
1160 		__field(unsigned int, hdrlen)
1161 	),
1162 
1163 	TP_fast_assign(
1164 		__entry->xid = be32_to_cpup(p++);
1165 		__entry->vers = be32_to_cpup(p++);
1166 		__entry->credits = be32_to_cpup(p++);
1167 		__entry->proc = be32_to_cpup(p);
1168 		__entry->hdrlen = hdrlen;
1169 	),
1170 
1171 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1172 		__entry->xid, __entry->vers, __entry->credits,
1173 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1174 );
1175 
1176 TRACE_EVENT(svcrdma_decode_short,
1177 	TP_PROTO(
1178 		unsigned int hdrlen
1179 	),
1180 
1181 	TP_ARGS(hdrlen),
1182 
1183 	TP_STRUCT__entry(
1184 		__field(unsigned int, hdrlen)
1185 	),
1186 
1187 	TP_fast_assign(
1188 		__entry->hdrlen = hdrlen;
1189 	),
1190 
1191 	TP_printk("hdrlen=%u", __entry->hdrlen)
1192 );
1193 
1194 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1195 	TP_PROTO(
1196 		__be32 *p
1197 	),
1198 
1199 	TP_ARGS(p),
1200 
1201 	TP_STRUCT__entry(
1202 		__field(u32, xid)
1203 		__field(u32, vers)
1204 		__field(u32, proc)
1205 		__field(u32, credits)
1206 	),
1207 
1208 	TP_fast_assign(
1209 		__entry->xid = be32_to_cpup(p++);
1210 		__entry->vers = be32_to_cpup(p++);
1211 		__entry->credits = be32_to_cpup(p++);
1212 		__entry->proc = be32_to_cpup(p);
1213 	),
1214 
1215 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
1216 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1217 );
1218 
1219 #define DEFINE_BADREQ_EVENT(name)					\
1220 		DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
1221 				TP_PROTO(				\
1222 					__be32 *p			\
1223 				),					\
1224 				TP_ARGS(p))
1225 
1226 DEFINE_BADREQ_EVENT(badvers);
1227 DEFINE_BADREQ_EVENT(drop);
1228 DEFINE_BADREQ_EVENT(badproc);
1229 DEFINE_BADREQ_EVENT(parse);
1230 
1231 DECLARE_EVENT_CLASS(svcrdma_segment_event,
1232 	TP_PROTO(
1233 		u32 handle,
1234 		u32 length,
1235 		u64 offset
1236 	),
1237 
1238 	TP_ARGS(handle, length, offset),
1239 
1240 	TP_STRUCT__entry(
1241 		__field(u32, handle)
1242 		__field(u32, length)
1243 		__field(u64, offset)
1244 	),
1245 
1246 	TP_fast_assign(
1247 		__entry->handle = handle;
1248 		__entry->length = length;
1249 		__entry->offset = offset;
1250 	),
1251 
1252 	TP_printk("%u@0x%016llx:0x%08x",
1253 		__entry->length, (unsigned long long)__entry->offset,
1254 		__entry->handle
1255 	)
1256 );
1257 
1258 #define DEFINE_SEGMENT_EVENT(name)					\
1259 		DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
1260 				TP_PROTO(				\
1261 					u32 handle,			\
1262 					u32 length,			\
1263 					u64 offset			\
1264 				),					\
1265 				TP_ARGS(handle, length, offset))
1266 
1267 DEFINE_SEGMENT_EVENT(rseg);
1268 DEFINE_SEGMENT_EVENT(wseg);
1269 
1270 DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1271 	TP_PROTO(
1272 		u32 length
1273 	),
1274 
1275 	TP_ARGS(length),
1276 
1277 	TP_STRUCT__entry(
1278 		__field(u32, length)
1279 	),
1280 
1281 	TP_fast_assign(
1282 		__entry->length = length;
1283 	),
1284 
1285 	TP_printk("length=%u",
1286 		__entry->length
1287 	)
1288 );
1289 
1290 #define DEFINE_CHUNK_EVENT(name)					\
1291 		DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
1292 				TP_PROTO(				\
1293 					u32 length			\
1294 				),					\
1295 				TP_ARGS(length))
1296 
1297 DEFINE_CHUNK_EVENT(pzr);
1298 DEFINE_CHUNK_EVENT(write);
1299 DEFINE_CHUNK_EVENT(reply);
1300 
1301 TRACE_EVENT(svcrdma_encode_read,
1302 	TP_PROTO(
1303 		u32 length,
1304 		u32 position
1305 	),
1306 
1307 	TP_ARGS(length, position),
1308 
1309 	TP_STRUCT__entry(
1310 		__field(u32, length)
1311 		__field(u32, position)
1312 	),
1313 
1314 	TP_fast_assign(
1315 		__entry->length = length;
1316 		__entry->position = position;
1317 	),
1318 
1319 	TP_printk("length=%u position=%u",
1320 		__entry->length, __entry->position
1321 	)
1322 );
1323 
1324 DECLARE_EVENT_CLASS(svcrdma_error_event,
1325 	TP_PROTO(
1326 		__be32 xid
1327 	),
1328 
1329 	TP_ARGS(xid),
1330 
1331 	TP_STRUCT__entry(
1332 		__field(u32, xid)
1333 	),
1334 
1335 	TP_fast_assign(
1336 		__entry->xid = be32_to_cpu(xid);
1337 	),
1338 
1339 	TP_printk("xid=0x%08x",
1340 		__entry->xid
1341 	)
1342 );
1343 
1344 #define DEFINE_ERROR_EVENT(name)					\
1345 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1346 				TP_PROTO(				\
1347 					__be32 xid			\
1348 				),					\
1349 				TP_ARGS(xid))
1350 
1351 DEFINE_ERROR_EVENT(vers);
1352 DEFINE_ERROR_EVENT(chunk);
1353 
1354 /**
1355  ** Server-side RDMA API events
1356  **/
1357 
1358 TRACE_EVENT(svcrdma_dma_map_page,
1359 	TP_PROTO(
1360 		const struct svcxprt_rdma *rdma,
1361 		const void *page
1362 	),
1363 
1364 	TP_ARGS(rdma, page),
1365 
1366 	TP_STRUCT__entry(
1367 		__field(const void *, page);
1368 		__string(device, rdma->sc_cm_id->device->name)
1369 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1370 	),
1371 
1372 	TP_fast_assign(
1373 		__entry->page = page;
1374 		__assign_str(device, rdma->sc_cm_id->device->name);
1375 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1376 	),
1377 
1378 	TP_printk("addr=%s device=%s page=%p",
1379 		__get_str(addr), __get_str(device), __entry->page
1380 	)
1381 );
1382 
1383 TRACE_EVENT(svcrdma_dma_map_rwctx,
1384 	TP_PROTO(
1385 		const struct svcxprt_rdma *rdma,
1386 		int status
1387 	),
1388 
1389 	TP_ARGS(rdma, status),
1390 
1391 	TP_STRUCT__entry(
1392 		__field(int, status)
1393 		__string(device, rdma->sc_cm_id->device->name)
1394 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1395 	),
1396 
1397 	TP_fast_assign(
1398 		__entry->status = status;
1399 		__assign_str(device, rdma->sc_cm_id->device->name);
1400 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1401 	),
1402 
1403 	TP_printk("addr=%s device=%s status=%d",
1404 		__get_str(addr), __get_str(device), __entry->status
1405 	)
1406 );
1407 
1408 TRACE_EVENT(svcrdma_send_failed,
1409 	TP_PROTO(
1410 		const struct svc_rqst *rqst,
1411 		int status
1412 	),
1413 
1414 	TP_ARGS(rqst, status),
1415 
1416 	TP_STRUCT__entry(
1417 		__field(int, status)
1418 		__field(u32, xid)
1419 		__field(const void *, xprt)
1420 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1421 	),
1422 
1423 	TP_fast_assign(
1424 		__entry->status = status;
1425 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1426 		__entry->xprt = rqst->rq_xprt;
1427 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1428 	),
1429 
1430 	TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
1431 		__entry->xprt, __get_str(addr),
1432 		__entry->xid, __entry->status
1433 	)
1434 );
1435 
1436 DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
1437 	TP_PROTO(
1438 		const struct ib_wc *wc
1439 	),
1440 
1441 	TP_ARGS(wc),
1442 
1443 	TP_STRUCT__entry(
1444 		__field(const void *, cqe)
1445 		__field(unsigned int, status)
1446 		__field(unsigned int, vendor_err)
1447 	),
1448 
1449 	TP_fast_assign(
1450 		__entry->cqe = wc->wr_cqe;
1451 		__entry->status = wc->status;
1452 		if (wc->status)
1453 			__entry->vendor_err = wc->vendor_err;
1454 		else
1455 			__entry->vendor_err = 0;
1456 	),
1457 
1458 	TP_printk("cqe=%p status=%s (%u/0x%x)",
1459 		__entry->cqe, rdma_show_wc_status(__entry->status),
1460 		__entry->status, __entry->vendor_err
1461 	)
1462 );
1463 
1464 #define DEFINE_SENDCOMP_EVENT(name)					\
1465 		DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name,	\
1466 				TP_PROTO(				\
1467 					const struct ib_wc *wc		\
1468 				),					\
1469 				TP_ARGS(wc))
1470 
1471 TRACE_EVENT(svcrdma_post_send,
1472 	TP_PROTO(
1473 		const struct ib_send_wr *wr,
1474 		int status
1475 	),
1476 
1477 	TP_ARGS(wr, status),
1478 
1479 	TP_STRUCT__entry(
1480 		__field(const void *, cqe)
1481 		__field(unsigned int, num_sge)
1482 		__field(u32, inv_rkey)
1483 		__field(int, status)
1484 	),
1485 
1486 	TP_fast_assign(
1487 		__entry->cqe = wr->wr_cqe;
1488 		__entry->num_sge = wr->num_sge;
1489 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1490 					wr->ex.invalidate_rkey : 0;
1491 		__entry->status = status;
1492 	),
1493 
1494 	TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
1495 		__entry->cqe, __entry->num_sge,
1496 		__entry->inv_rkey, __entry->status
1497 	)
1498 );
1499 
1500 DEFINE_SENDCOMP_EVENT(send);
1501 
1502 TRACE_EVENT(svcrdma_post_recv,
1503 	TP_PROTO(
1504 		const struct ib_recv_wr *wr,
1505 		int status
1506 	),
1507 
1508 	TP_ARGS(wr, status),
1509 
1510 	TP_STRUCT__entry(
1511 		__field(const void *, cqe)
1512 		__field(int, status)
1513 	),
1514 
1515 	TP_fast_assign(
1516 		__entry->cqe = wr->wr_cqe;
1517 		__entry->status = status;
1518 	),
1519 
1520 	TP_printk("cqe=%p status=%d",
1521 		__entry->cqe, __entry->status
1522 	)
1523 );
1524 
1525 TRACE_EVENT(svcrdma_wc_receive,
1526 	TP_PROTO(
1527 		const struct ib_wc *wc
1528 	),
1529 
1530 	TP_ARGS(wc),
1531 
1532 	TP_STRUCT__entry(
1533 		__field(const void *, cqe)
1534 		__field(u32, byte_len)
1535 		__field(unsigned int, status)
1536 		__field(u32, vendor_err)
1537 	),
1538 
1539 	TP_fast_assign(
1540 		__entry->cqe = wc->wr_cqe;
1541 		__entry->status = wc->status;
1542 		if (wc->status) {
1543 			__entry->byte_len = 0;
1544 			__entry->vendor_err = wc->vendor_err;
1545 		} else {
1546 			__entry->byte_len = wc->byte_len;
1547 			__entry->vendor_err = 0;
1548 		}
1549 	),
1550 
1551 	TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
1552 		__entry->cqe, __entry->byte_len,
1553 		rdma_show_wc_status(__entry->status),
1554 		__entry->status, __entry->vendor_err
1555 	)
1556 );
1557 
1558 TRACE_EVENT(svcrdma_post_rw,
1559 	TP_PROTO(
1560 		const void *cqe,
1561 		int sqecount,
1562 		int status
1563 	),
1564 
1565 	TP_ARGS(cqe, sqecount, status),
1566 
1567 	TP_STRUCT__entry(
1568 		__field(const void *, cqe)
1569 		__field(int, sqecount)
1570 		__field(int, status)
1571 	),
1572 
1573 	TP_fast_assign(
1574 		__entry->cqe = cqe;
1575 		__entry->sqecount = sqecount;
1576 		__entry->status = status;
1577 	),
1578 
1579 	TP_printk("cqe=%p sqecount=%d status=%d",
1580 		__entry->cqe, __entry->sqecount, __entry->status
1581 	)
1582 );
1583 
1584 DEFINE_SENDCOMP_EVENT(read);
1585 DEFINE_SENDCOMP_EVENT(write);
1586 
1587 TRACE_EVENT(svcrdma_cm_event,
1588 	TP_PROTO(
1589 		const struct rdma_cm_event *event,
1590 		const struct sockaddr *sap
1591 	),
1592 
1593 	TP_ARGS(event, sap),
1594 
1595 	TP_STRUCT__entry(
1596 		__field(unsigned int, event)
1597 		__field(int, status)
1598 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1599 	),
1600 
1601 	TP_fast_assign(
1602 		__entry->event = event->event;
1603 		__entry->status = event->status;
1604 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1605 			 "%pISpc", sap);
1606 	),
1607 
1608 	TP_printk("addr=%s event=%s (%u/%d)",
1609 		__entry->addr,
1610 		rdma_show_cm_event(__entry->event),
1611 		__entry->event, __entry->status
1612 	)
1613 );
1614 
1615 TRACE_EVENT(svcrdma_qp_error,
1616 	TP_PROTO(
1617 		const struct ib_event *event,
1618 		const struct sockaddr *sap
1619 	),
1620 
1621 	TP_ARGS(event, sap),
1622 
1623 	TP_STRUCT__entry(
1624 		__field(unsigned int, event)
1625 		__string(device, event->device->name)
1626 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1627 	),
1628 
1629 	TP_fast_assign(
1630 		__entry->event = event->event;
1631 		__assign_str(device, event->device->name);
1632 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1633 			 "%pISpc", sap);
1634 	),
1635 
1636 	TP_printk("addr=%s dev=%s event=%s (%u)",
1637 		__entry->addr, __get_str(device),
1638 		rdma_show_ib_event(__entry->event), __entry->event
1639 	)
1640 );
1641 
1642 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1643 	TP_PROTO(
1644 		const struct svcxprt_rdma *rdma
1645 	),
1646 
1647 	TP_ARGS(rdma),
1648 
1649 	TP_STRUCT__entry(
1650 		__field(int, avail)
1651 		__field(int, depth)
1652 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1653 	),
1654 
1655 	TP_fast_assign(
1656 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1657 		__entry->depth = rdma->sc_sq_depth;
1658 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1659 	),
1660 
1661 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1662 		__get_str(addr), __entry->avail, __entry->depth
1663 	)
1664 );
1665 
1666 #define DEFINE_SQ_EVENT(name)						\
1667 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1668 				TP_PROTO(				\
1669 					const struct svcxprt_rdma *rdma \
1670 				),					\
1671 				TP_ARGS(rdma))
1672 
1673 DEFINE_SQ_EVENT(full);
1674 DEFINE_SQ_EVENT(retry);
1675 
1676 #endif /* _TRACE_RPCRDMA_H */
1677 
1678 #include <trace/define_trace.h>
1679