xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision 5b628549)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/tracepoint.h>
15 #include <trace/events/rdma.h>
16 
17 /**
18  ** Event classes
19  **/
20 
21 DECLARE_EVENT_CLASS(xprtrdma_reply_event,
22 	TP_PROTO(
23 		const struct rpcrdma_rep *rep
24 	),
25 
26 	TP_ARGS(rep),
27 
28 	TP_STRUCT__entry(
29 		__field(const void *, rep)
30 		__field(const void *, r_xprt)
31 		__field(u32, xid)
32 		__field(u32, version)
33 		__field(u32, proc)
34 	),
35 
36 	TP_fast_assign(
37 		__entry->rep = rep;
38 		__entry->r_xprt = rep->rr_rxprt;
39 		__entry->xid = be32_to_cpu(rep->rr_xid);
40 		__entry->version = be32_to_cpu(rep->rr_vers);
41 		__entry->proc = be32_to_cpu(rep->rr_proc);
42 	),
43 
44 	TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
45 		__entry->r_xprt, __entry->xid, __entry->rep,
46 		__entry->version, __entry->proc
47 	)
48 );
49 
50 #define DEFINE_REPLY_EVENT(name)					\
51 		DEFINE_EVENT(xprtrdma_reply_event, name,		\
52 				TP_PROTO(				\
53 					const struct rpcrdma_rep *rep	\
54 				),					\
55 				TP_ARGS(rep))
56 
57 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
58 	TP_PROTO(
59 		const struct rpcrdma_xprt *r_xprt
60 	),
61 
62 	TP_ARGS(r_xprt),
63 
64 	TP_STRUCT__entry(
65 		__field(const void *, r_xprt)
66 		__string(addr, rpcrdma_addrstr(r_xprt))
67 		__string(port, rpcrdma_portstr(r_xprt))
68 	),
69 
70 	TP_fast_assign(
71 		__entry->r_xprt = r_xprt;
72 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
73 		__assign_str(port, rpcrdma_portstr(r_xprt));
74 	),
75 
76 	TP_printk("peer=[%s]:%s r_xprt=%p",
77 		__get_str(addr), __get_str(port), __entry->r_xprt
78 	)
79 );
80 
81 #define DEFINE_RXPRT_EVENT(name)					\
82 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
83 				TP_PROTO(				\
84 					const struct rpcrdma_xprt *r_xprt \
85 				),					\
86 				TP_ARGS(r_xprt))
87 
88 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
89 	TP_PROTO(
90 		const struct rpc_task *task,
91 		unsigned int pos,
92 		struct rpcrdma_mr *mr,
93 		int nsegs
94 	),
95 
96 	TP_ARGS(task, pos, mr, nsegs),
97 
98 	TP_STRUCT__entry(
99 		__field(unsigned int, task_id)
100 		__field(unsigned int, client_id)
101 		__field(unsigned int, pos)
102 		__field(int, nents)
103 		__field(u32, handle)
104 		__field(u32, length)
105 		__field(u64, offset)
106 		__field(int, nsegs)
107 	),
108 
109 	TP_fast_assign(
110 		__entry->task_id = task->tk_pid;
111 		__entry->client_id = task->tk_client->cl_clid;
112 		__entry->pos = pos;
113 		__entry->nents = mr->mr_nents;
114 		__entry->handle = mr->mr_handle;
115 		__entry->length = mr->mr_length;
116 		__entry->offset = mr->mr_offset;
117 		__entry->nsegs = nsegs;
118 	),
119 
120 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
121 		__entry->task_id, __entry->client_id,
122 		__entry->pos, __entry->length,
123 		(unsigned long long)__entry->offset, __entry->handle,
124 		__entry->nents < __entry->nsegs ? "more" : "last"
125 	)
126 );
127 
128 #define DEFINE_RDCH_EVENT(name)						\
129 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
130 				TP_PROTO(				\
131 					const struct rpc_task *task,	\
132 					unsigned int pos,		\
133 					struct rpcrdma_mr *mr,		\
134 					int nsegs			\
135 				),					\
136 				TP_ARGS(task, pos, mr, nsegs))
137 
138 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
139 	TP_PROTO(
140 		const struct rpc_task *task,
141 		struct rpcrdma_mr *mr,
142 		int nsegs
143 	),
144 
145 	TP_ARGS(task, mr, nsegs),
146 
147 	TP_STRUCT__entry(
148 		__field(unsigned int, task_id)
149 		__field(unsigned int, client_id)
150 		__field(int, nents)
151 		__field(u32, handle)
152 		__field(u32, length)
153 		__field(u64, offset)
154 		__field(int, nsegs)
155 	),
156 
157 	TP_fast_assign(
158 		__entry->task_id = task->tk_pid;
159 		__entry->client_id = task->tk_client->cl_clid;
160 		__entry->nents = mr->mr_nents;
161 		__entry->handle = mr->mr_handle;
162 		__entry->length = mr->mr_length;
163 		__entry->offset = mr->mr_offset;
164 		__entry->nsegs = nsegs;
165 	),
166 
167 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
168 		__entry->task_id, __entry->client_id,
169 		__entry->length, (unsigned long long)__entry->offset,
170 		__entry->handle,
171 		__entry->nents < __entry->nsegs ? "more" : "last"
172 	)
173 );
174 
175 #define DEFINE_WRCH_EVENT(name)						\
176 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
177 				TP_PROTO(				\
178 					const struct rpc_task *task,	\
179 					struct rpcrdma_mr *mr,		\
180 					int nsegs			\
181 				),					\
182 				TP_ARGS(task, mr, nsegs))
183 
184 TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
185 TRACE_DEFINE_ENUM(FRWR_IS_VALID);
186 TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
187 TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
188 
189 #define xprtrdma_show_frwr_state(x)					\
190 		__print_symbolic(x,					\
191 				{ FRWR_IS_INVALID, "INVALID" },		\
192 				{ FRWR_IS_VALID, "VALID" },		\
193 				{ FRWR_FLUSHED_FR, "FLUSHED_FR" },	\
194 				{ FRWR_FLUSHED_LI, "FLUSHED_LI" })
195 
196 DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
197 	TP_PROTO(
198 		const struct ib_wc *wc,
199 		const struct rpcrdma_frwr *frwr
200 	),
201 
202 	TP_ARGS(wc, frwr),
203 
204 	TP_STRUCT__entry(
205 		__field(const void *, mr)
206 		__field(unsigned int, state)
207 		__field(unsigned int, status)
208 		__field(unsigned int, vendor_err)
209 	),
210 
211 	TP_fast_assign(
212 		__entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
213 		__entry->state = frwr->fr_state;
214 		__entry->status = wc->status;
215 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
216 	),
217 
218 	TP_printk(
219 		"mr=%p state=%s: %s (%u/0x%x)",
220 		__entry->mr, xprtrdma_show_frwr_state(__entry->state),
221 		rdma_show_wc_status(__entry->status),
222 		__entry->status, __entry->vendor_err
223 	)
224 );
225 
226 #define DEFINE_FRWR_DONE_EVENT(name)					\
227 		DEFINE_EVENT(xprtrdma_frwr_done, name,			\
228 				TP_PROTO(				\
229 					const struct ib_wc *wc,		\
230 					const struct rpcrdma_frwr *frwr	\
231 				),					\
232 				TP_ARGS(wc, frwr))
233 
234 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
235 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
236 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
237 TRACE_DEFINE_ENUM(DMA_NONE);
238 
239 #define xprtrdma_show_direction(x)					\
240 		__print_symbolic(x,					\
241 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
242 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
243 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
244 				{ DMA_NONE, "NONE" })
245 
246 DECLARE_EVENT_CLASS(xprtrdma_mr,
247 	TP_PROTO(
248 		const struct rpcrdma_mr *mr
249 	),
250 
251 	TP_ARGS(mr),
252 
253 	TP_STRUCT__entry(
254 		__field(const void *, mr)
255 		__field(u32, handle)
256 		__field(u32, length)
257 		__field(u64, offset)
258 		__field(u32, dir)
259 	),
260 
261 	TP_fast_assign(
262 		__entry->mr = mr;
263 		__entry->handle = mr->mr_handle;
264 		__entry->length = mr->mr_length;
265 		__entry->offset = mr->mr_offset;
266 		__entry->dir    = mr->mr_dir;
267 	),
268 
269 	TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)",
270 		__entry->mr, __entry->length,
271 		(unsigned long long)__entry->offset, __entry->handle,
272 		xprtrdma_show_direction(__entry->dir)
273 	)
274 );
275 
276 #define DEFINE_MR_EVENT(name) \
277 		DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
278 				TP_PROTO( \
279 					const struct rpcrdma_mr *mr \
280 				), \
281 				TP_ARGS(mr))
282 
283 DECLARE_EVENT_CLASS(xprtrdma_cb_event,
284 	TP_PROTO(
285 		const struct rpc_rqst *rqst
286 	),
287 
288 	TP_ARGS(rqst),
289 
290 	TP_STRUCT__entry(
291 		__field(const void *, rqst)
292 		__field(const void *, rep)
293 		__field(const void *, req)
294 		__field(u32, xid)
295 	),
296 
297 	TP_fast_assign(
298 		__entry->rqst = rqst;
299 		__entry->req = rpcr_to_rdmar(rqst);
300 		__entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
301 		__entry->xid = be32_to_cpu(rqst->rq_xid);
302 	),
303 
304 	TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
305 		__entry->xid, __entry->rqst, __entry->req, __entry->rep
306 	)
307 );
308 
309 #define DEFINE_CB_EVENT(name)						\
310 		DEFINE_EVENT(xprtrdma_cb_event, name,			\
311 				TP_PROTO(				\
312 					const struct rpc_rqst *rqst	\
313 				),					\
314 				TP_ARGS(rqst))
315 
316 /**
317  ** Connection events
318  **/
319 
320 TRACE_EVENT(xprtrdma_cm_event,
321 	TP_PROTO(
322 		const struct rpcrdma_xprt *r_xprt,
323 		struct rdma_cm_event *event
324 	),
325 
326 	TP_ARGS(r_xprt, event),
327 
328 	TP_STRUCT__entry(
329 		__field(const void *, r_xprt)
330 		__field(unsigned int, event)
331 		__field(int, status)
332 		__string(addr, rpcrdma_addrstr(r_xprt))
333 		__string(port, rpcrdma_portstr(r_xprt))
334 	),
335 
336 	TP_fast_assign(
337 		__entry->r_xprt = r_xprt;
338 		__entry->event = event->event;
339 		__entry->status = event->status;
340 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
341 		__assign_str(port, rpcrdma_portstr(r_xprt));
342 	),
343 
344 	TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
345 		__get_str(addr), __get_str(port),
346 		__entry->r_xprt, rdma_show_cm_event(__entry->event),
347 		__entry->event, __entry->status
348 	)
349 );
350 
351 TRACE_EVENT(xprtrdma_disconnect,
352 	TP_PROTO(
353 		const struct rpcrdma_xprt *r_xprt,
354 		int status
355 	),
356 
357 	TP_ARGS(r_xprt, status),
358 
359 	TP_STRUCT__entry(
360 		__field(const void *, r_xprt)
361 		__field(int, status)
362 		__field(int, connected)
363 		__string(addr, rpcrdma_addrstr(r_xprt))
364 		__string(port, rpcrdma_portstr(r_xprt))
365 	),
366 
367 	TP_fast_assign(
368 		__entry->r_xprt = r_xprt;
369 		__entry->status = status;
370 		__entry->connected = r_xprt->rx_ep.rep_connected;
371 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
372 		__assign_str(port, rpcrdma_portstr(r_xprt));
373 	),
374 
375 	TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
376 		__get_str(addr), __get_str(port),
377 		__entry->r_xprt, __entry->status,
378 		__entry->connected == 1 ? "still " : "dis"
379 	)
380 );
381 
382 DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
383 DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
384 DEFINE_RXPRT_EVENT(xprtrdma_create);
385 DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
386 DEFINE_RXPRT_EVENT(xprtrdma_remove);
387 DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
388 DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
389 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
390 DEFINE_RXPRT_EVENT(xprtrdma_op_close);
391 DEFINE_RXPRT_EVENT(xprtrdma_op_connect);
392 
393 TRACE_EVENT(xprtrdma_qp_event,
394 	TP_PROTO(
395 		const struct rpcrdma_xprt *r_xprt,
396 		const struct ib_event *event
397 	),
398 
399 	TP_ARGS(r_xprt, event),
400 
401 	TP_STRUCT__entry(
402 		__field(const void *, r_xprt)
403 		__field(unsigned int, event)
404 		__string(name, event->device->name)
405 		__string(addr, rpcrdma_addrstr(r_xprt))
406 		__string(port, rpcrdma_portstr(r_xprt))
407 	),
408 
409 	TP_fast_assign(
410 		__entry->r_xprt = r_xprt;
411 		__entry->event = event->event;
412 		__assign_str(name, event->device->name);
413 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
414 		__assign_str(port, rpcrdma_portstr(r_xprt));
415 	),
416 
417 	TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
418 		__get_str(addr), __get_str(port), __entry->r_xprt,
419 		__get_str(name), rdma_show_ib_event(__entry->event),
420 		__entry->event
421 	)
422 );
423 
424 /**
425  ** Call events
426  **/
427 
428 TRACE_EVENT(xprtrdma_createmrs,
429 	TP_PROTO(
430 		const struct rpcrdma_xprt *r_xprt,
431 		unsigned int count
432 	),
433 
434 	TP_ARGS(r_xprt, count),
435 
436 	TP_STRUCT__entry(
437 		__field(const void *, r_xprt)
438 		__field(unsigned int, count)
439 	),
440 
441 	TP_fast_assign(
442 		__entry->r_xprt = r_xprt;
443 		__entry->count = count;
444 	),
445 
446 	TP_printk("r_xprt=%p: created %u MRs",
447 		__entry->r_xprt, __entry->count
448 	)
449 );
450 
451 DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
452 
453 DEFINE_RDCH_EVENT(read);
454 DEFINE_WRCH_EVENT(write);
455 DEFINE_WRCH_EVENT(reply);
456 
457 TRACE_DEFINE_ENUM(rpcrdma_noch);
458 TRACE_DEFINE_ENUM(rpcrdma_readch);
459 TRACE_DEFINE_ENUM(rpcrdma_areadch);
460 TRACE_DEFINE_ENUM(rpcrdma_writech);
461 TRACE_DEFINE_ENUM(rpcrdma_replych);
462 
463 #define xprtrdma_show_chunktype(x)					\
464 		__print_symbolic(x,					\
465 				{ rpcrdma_noch, "inline" },		\
466 				{ rpcrdma_readch, "read list" },	\
467 				{ rpcrdma_areadch, "*read list" },	\
468 				{ rpcrdma_writech, "write list" },	\
469 				{ rpcrdma_replych, "reply chunk" })
470 
471 TRACE_EVENT(xprtrdma_marshal,
472 	TP_PROTO(
473 		const struct rpc_rqst *rqst,
474 		unsigned int hdrlen,
475 		unsigned int rtype,
476 		unsigned int wtype
477 	),
478 
479 	TP_ARGS(rqst, hdrlen, rtype, wtype),
480 
481 	TP_STRUCT__entry(
482 		__field(unsigned int, task_id)
483 		__field(unsigned int, client_id)
484 		__field(u32, xid)
485 		__field(unsigned int, hdrlen)
486 		__field(unsigned int, headlen)
487 		__field(unsigned int, pagelen)
488 		__field(unsigned int, taillen)
489 		__field(unsigned int, rtype)
490 		__field(unsigned int, wtype)
491 	),
492 
493 	TP_fast_assign(
494 		__entry->task_id = rqst->rq_task->tk_pid;
495 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
496 		__entry->xid = be32_to_cpu(rqst->rq_xid);
497 		__entry->hdrlen = hdrlen;
498 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
499 		__entry->pagelen = rqst->rq_snd_buf.page_len;
500 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
501 		__entry->rtype = rtype;
502 		__entry->wtype = wtype;
503 	),
504 
505 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
506 		__entry->task_id, __entry->client_id, __entry->xid,
507 		__entry->hdrlen,
508 		__entry->headlen, __entry->pagelen, __entry->taillen,
509 		xprtrdma_show_chunktype(__entry->rtype),
510 		xprtrdma_show_chunktype(__entry->wtype)
511 	)
512 );
513 
514 TRACE_EVENT(xprtrdma_post_send,
515 	TP_PROTO(
516 		const struct rpcrdma_req *req,
517 		int status
518 	),
519 
520 	TP_ARGS(req, status),
521 
522 	TP_STRUCT__entry(
523 		__field(const void *, req)
524 		__field(unsigned int, task_id)
525 		__field(unsigned int, client_id)
526 		__field(int, num_sge)
527 		__field(int, signaled)
528 		__field(int, status)
529 	),
530 
531 	TP_fast_assign(
532 		const struct rpc_rqst *rqst = &req->rl_slot;
533 
534 		__entry->task_id = rqst->rq_task->tk_pid;
535 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
536 		__entry->req = req;
537 		__entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
538 		__entry->signaled = req->rl_sendctx->sc_wr.send_flags &
539 				    IB_SEND_SIGNALED;
540 		__entry->status = status;
541 	),
542 
543 	TP_printk("task:%u@%u req=%p (%d SGE%s) %sstatus=%d",
544 		__entry->task_id, __entry->client_id,
545 		__entry->req, __entry->num_sge,
546 		(__entry->num_sge == 1 ? "" : "s"),
547 		(__entry->signaled ? "signaled " : ""),
548 		__entry->status
549 	)
550 );
551 
552 TRACE_EVENT(xprtrdma_post_recv,
553 	TP_PROTO(
554 		const struct ib_cqe *cqe
555 	),
556 
557 	TP_ARGS(cqe),
558 
559 	TP_STRUCT__entry(
560 		__field(const void *, cqe)
561 	),
562 
563 	TP_fast_assign(
564 		__entry->cqe = cqe;
565 	),
566 
567 	TP_printk("cqe=%p",
568 		__entry->cqe
569 	)
570 );
571 
572 TRACE_EVENT(xprtrdma_post_recvs,
573 	TP_PROTO(
574 		const struct rpcrdma_xprt *r_xprt,
575 		unsigned int count,
576 		int status
577 	),
578 
579 	TP_ARGS(r_xprt, count, status),
580 
581 	TP_STRUCT__entry(
582 		__field(const void *, r_xprt)
583 		__field(unsigned int, count)
584 		__field(int, status)
585 		__field(int, posted)
586 		__string(addr, rpcrdma_addrstr(r_xprt))
587 		__string(port, rpcrdma_portstr(r_xprt))
588 	),
589 
590 	TP_fast_assign(
591 		__entry->r_xprt = r_xprt;
592 		__entry->count = count;
593 		__entry->status = status;
594 		__entry->posted = r_xprt->rx_ep.rep_receive_count;
595 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
596 		__assign_str(port, rpcrdma_portstr(r_xprt));
597 	),
598 
599 	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
600 		__get_str(addr), __get_str(port), __entry->r_xprt,
601 		__entry->count, __entry->posted, __entry->status
602 	)
603 );
604 
605 /**
606  ** Completion events
607  **/
608 
609 TRACE_EVENT(xprtrdma_wc_send,
610 	TP_PROTO(
611 		const struct rpcrdma_sendctx *sc,
612 		const struct ib_wc *wc
613 	),
614 
615 	TP_ARGS(sc, wc),
616 
617 	TP_STRUCT__entry(
618 		__field(const void *, req)
619 		__field(unsigned int, unmap_count)
620 		__field(unsigned int, status)
621 		__field(unsigned int, vendor_err)
622 	),
623 
624 	TP_fast_assign(
625 		__entry->req = sc->sc_req;
626 		__entry->unmap_count = sc->sc_unmap_count;
627 		__entry->status = wc->status;
628 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
629 	),
630 
631 	TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
632 		__entry->req, __entry->unmap_count,
633 		rdma_show_wc_status(__entry->status),
634 		__entry->status, __entry->vendor_err
635 	)
636 );
637 
638 TRACE_EVENT(xprtrdma_wc_receive,
639 	TP_PROTO(
640 		const struct ib_wc *wc
641 	),
642 
643 	TP_ARGS(wc),
644 
645 	TP_STRUCT__entry(
646 		__field(const void *, cqe)
647 		__field(u32, byte_len)
648 		__field(unsigned int, status)
649 		__field(u32, vendor_err)
650 	),
651 
652 	TP_fast_assign(
653 		__entry->cqe = wc->wr_cqe;
654 		__entry->status = wc->status;
655 		if (wc->status) {
656 			__entry->byte_len = 0;
657 			__entry->vendor_err = wc->vendor_err;
658 		} else {
659 			__entry->byte_len = wc->byte_len;
660 			__entry->vendor_err = 0;
661 		}
662 	),
663 
664 	TP_printk("cqe=%p %u bytes: %s (%u/0x%x)",
665 		__entry->cqe, __entry->byte_len,
666 		rdma_show_wc_status(__entry->status),
667 		__entry->status, __entry->vendor_err
668 	)
669 );
670 
671 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
672 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
673 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
674 
675 TRACE_EVENT(xprtrdma_frwr_alloc,
676 	TP_PROTO(
677 		const struct rpcrdma_mr *mr,
678 		int rc
679 	),
680 
681 	TP_ARGS(mr, rc),
682 
683 	TP_STRUCT__entry(
684 		__field(const void *, mr)
685 		__field(int, rc)
686 	),
687 
688 	TP_fast_assign(
689 		__entry->mr = mr;
690 		__entry->rc	= rc;
691 	),
692 
693 	TP_printk("mr=%p: rc=%d",
694 		__entry->mr, __entry->rc
695 	)
696 );
697 
698 TRACE_EVENT(xprtrdma_frwr_dereg,
699 	TP_PROTO(
700 		const struct rpcrdma_mr *mr,
701 		int rc
702 	),
703 
704 	TP_ARGS(mr, rc),
705 
706 	TP_STRUCT__entry(
707 		__field(const void *, mr)
708 		__field(u32, handle)
709 		__field(u32, length)
710 		__field(u64, offset)
711 		__field(u32, dir)
712 		__field(int, rc)
713 	),
714 
715 	TP_fast_assign(
716 		__entry->mr = mr;
717 		__entry->handle = mr->mr_handle;
718 		__entry->length = mr->mr_length;
719 		__entry->offset = mr->mr_offset;
720 		__entry->dir    = mr->mr_dir;
721 		__entry->rc	= rc;
722 	),
723 
724 	TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d",
725 		__entry->mr, __entry->length,
726 		(unsigned long long)__entry->offset, __entry->handle,
727 		xprtrdma_show_direction(__entry->dir),
728 		__entry->rc
729 	)
730 );
731 
732 TRACE_EVENT(xprtrdma_frwr_sgerr,
733 	TP_PROTO(
734 		const struct rpcrdma_mr *mr,
735 		int sg_nents
736 	),
737 
738 	TP_ARGS(mr, sg_nents),
739 
740 	TP_STRUCT__entry(
741 		__field(const void *, mr)
742 		__field(u64, addr)
743 		__field(u32, dir)
744 		__field(int, nents)
745 	),
746 
747 	TP_fast_assign(
748 		__entry->mr = mr;
749 		__entry->addr = mr->mr_sg->dma_address;
750 		__entry->dir = mr->mr_dir;
751 		__entry->nents = sg_nents;
752 	),
753 
754 	TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d",
755 		__entry->mr, __entry->addr,
756 		xprtrdma_show_direction(__entry->dir),
757 		__entry->nents
758 	)
759 );
760 
761 TRACE_EVENT(xprtrdma_frwr_maperr,
762 	TP_PROTO(
763 		const struct rpcrdma_mr *mr,
764 		int num_mapped
765 	),
766 
767 	TP_ARGS(mr, num_mapped),
768 
769 	TP_STRUCT__entry(
770 		__field(const void *, mr)
771 		__field(u64, addr)
772 		__field(u32, dir)
773 		__field(int, num_mapped)
774 		__field(int, nents)
775 	),
776 
777 	TP_fast_assign(
778 		__entry->mr = mr;
779 		__entry->addr = mr->mr_sg->dma_address;
780 		__entry->dir = mr->mr_dir;
781 		__entry->num_mapped = num_mapped;
782 		__entry->nents = mr->mr_nents;
783 	),
784 
785 	TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d",
786 		__entry->mr, __entry->addr,
787 		xprtrdma_show_direction(__entry->dir),
788 		__entry->num_mapped, __entry->nents
789 	)
790 );
791 
792 DEFINE_MR_EVENT(localinv);
793 DEFINE_MR_EVENT(map);
794 DEFINE_MR_EVENT(unmap);
795 DEFINE_MR_EVENT(remoteinv);
796 DEFINE_MR_EVENT(recycle);
797 
798 TRACE_EVENT(xprtrdma_dma_maperr,
799 	TP_PROTO(
800 		u64 addr
801 	),
802 
803 	TP_ARGS(addr),
804 
805 	TP_STRUCT__entry(
806 		__field(u64, addr)
807 	),
808 
809 	TP_fast_assign(
810 		__entry->addr = addr;
811 	),
812 
813 	TP_printk("dma addr=0x%llx\n", __entry->addr)
814 );
815 
816 /**
817  ** Reply events
818  **/
819 
820 TRACE_EVENT(xprtrdma_reply,
821 	TP_PROTO(
822 		const struct rpc_task *task,
823 		const struct rpcrdma_rep *rep,
824 		const struct rpcrdma_req *req,
825 		unsigned int credits
826 	),
827 
828 	TP_ARGS(task, rep, req, credits),
829 
830 	TP_STRUCT__entry(
831 		__field(unsigned int, task_id)
832 		__field(unsigned int, client_id)
833 		__field(const void *, rep)
834 		__field(const void *, req)
835 		__field(u32, xid)
836 		__field(unsigned int, credits)
837 	),
838 
839 	TP_fast_assign(
840 		__entry->task_id = task->tk_pid;
841 		__entry->client_id = task->tk_client->cl_clid;
842 		__entry->rep = rep;
843 		__entry->req = req;
844 		__entry->xid = be32_to_cpu(rep->rr_xid);
845 		__entry->credits = credits;
846 	),
847 
848 	TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
849 		__entry->task_id, __entry->client_id, __entry->xid,
850 		__entry->credits, __entry->rep, __entry->req
851 	)
852 );
853 
854 TRACE_EVENT(xprtrdma_defer_cmp,
855 	TP_PROTO(
856 		const struct rpcrdma_rep *rep
857 	),
858 
859 	TP_ARGS(rep),
860 
861 	TP_STRUCT__entry(
862 		__field(unsigned int, task_id)
863 		__field(unsigned int, client_id)
864 		__field(const void *, rep)
865 		__field(u32, xid)
866 	),
867 
868 	TP_fast_assign(
869 		__entry->task_id = rep->rr_rqst->rq_task->tk_pid;
870 		__entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
871 		__entry->rep = rep;
872 		__entry->xid = be32_to_cpu(rep->rr_xid);
873 	),
874 
875 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
876 		__entry->task_id, __entry->client_id, __entry->xid,
877 		__entry->rep
878 	)
879 );
880 
881 DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
882 DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
883 DEFINE_REPLY_EVENT(xprtrdma_reply_short);
884 DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
885 
886 TRACE_EVENT(xprtrdma_fixup,
887 	TP_PROTO(
888 		const struct rpc_rqst *rqst,
889 		int len,
890 		int hdrlen
891 	),
892 
893 	TP_ARGS(rqst, len, hdrlen),
894 
895 	TP_STRUCT__entry(
896 		__field(unsigned int, task_id)
897 		__field(unsigned int, client_id)
898 		__field(const void *, base)
899 		__field(int, len)
900 		__field(int, hdrlen)
901 	),
902 
903 	TP_fast_assign(
904 		__entry->task_id = rqst->rq_task->tk_pid;
905 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
906 		__entry->base = rqst->rq_rcv_buf.head[0].iov_base;
907 		__entry->len = len;
908 		__entry->hdrlen = hdrlen;
909 	),
910 
911 	TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
912 		__entry->task_id, __entry->client_id,
913 		__entry->base, __entry->len, __entry->hdrlen
914 	)
915 );
916 
917 TRACE_EVENT(xprtrdma_fixup_pg,
918 	TP_PROTO(
919 		const struct rpc_rqst *rqst,
920 		int pageno,
921 		const void *pos,
922 		int len,
923 		int curlen
924 	),
925 
926 	TP_ARGS(rqst, pageno, pos, len, curlen),
927 
928 	TP_STRUCT__entry(
929 		__field(unsigned int, task_id)
930 		__field(unsigned int, client_id)
931 		__field(const void *, pos)
932 		__field(int, pageno)
933 		__field(int, len)
934 		__field(int, curlen)
935 	),
936 
937 	TP_fast_assign(
938 		__entry->task_id = rqst->rq_task->tk_pid;
939 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
940 		__entry->pos = pos;
941 		__entry->pageno = pageno;
942 		__entry->len = len;
943 		__entry->curlen = curlen;
944 	),
945 
946 	TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
947 		__entry->task_id, __entry->client_id,
948 		__entry->pageno, __entry->pos, __entry->len, __entry->curlen
949 	)
950 );
951 
952 TRACE_EVENT(xprtrdma_decode_seg,
953 	TP_PROTO(
954 		u32 handle,
955 		u32 length,
956 		u64 offset
957 	),
958 
959 	TP_ARGS(handle, length, offset),
960 
961 	TP_STRUCT__entry(
962 		__field(u32, handle)
963 		__field(u32, length)
964 		__field(u64, offset)
965 	),
966 
967 	TP_fast_assign(
968 		__entry->handle = handle;
969 		__entry->length = length;
970 		__entry->offset = offset;
971 	),
972 
973 	TP_printk("%u@0x%016llx:0x%08x",
974 		__entry->length, (unsigned long long)__entry->offset,
975 		__entry->handle
976 	)
977 );
978 
979 /**
980  ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
981  **/
982 
983 TRACE_EVENT(xprtrdma_op_allocate,
984 	TP_PROTO(
985 		const struct rpc_task *task,
986 		const struct rpcrdma_req *req
987 	),
988 
989 	TP_ARGS(task, req),
990 
991 	TP_STRUCT__entry(
992 		__field(unsigned int, task_id)
993 		__field(unsigned int, client_id)
994 		__field(const void *, req)
995 		__field(size_t, callsize)
996 		__field(size_t, rcvsize)
997 	),
998 
999 	TP_fast_assign(
1000 		__entry->task_id = task->tk_pid;
1001 		__entry->client_id = task->tk_client->cl_clid;
1002 		__entry->req = req;
1003 		__entry->callsize = task->tk_rqstp->rq_callsize;
1004 		__entry->rcvsize = task->tk_rqstp->rq_rcvsize;
1005 	),
1006 
1007 	TP_printk("task:%u@%u req=%p (%zu, %zu)",
1008 		__entry->task_id, __entry->client_id,
1009 		__entry->req, __entry->callsize, __entry->rcvsize
1010 	)
1011 );
1012 
1013 TRACE_EVENT(xprtrdma_op_free,
1014 	TP_PROTO(
1015 		const struct rpc_task *task,
1016 		const struct rpcrdma_req *req
1017 	),
1018 
1019 	TP_ARGS(task, req),
1020 
1021 	TP_STRUCT__entry(
1022 		__field(unsigned int, task_id)
1023 		__field(unsigned int, client_id)
1024 		__field(const void *, req)
1025 		__field(const void *, rep)
1026 	),
1027 
1028 	TP_fast_assign(
1029 		__entry->task_id = task->tk_pid;
1030 		__entry->client_id = task->tk_client->cl_clid;
1031 		__entry->req = req;
1032 		__entry->rep = req->rl_reply;
1033 	),
1034 
1035 	TP_printk("task:%u@%u req=%p rep=%p",
1036 		__entry->task_id, __entry->client_id,
1037 		__entry->req, __entry->rep
1038 	)
1039 );
1040 
1041 /**
1042  ** Callback events
1043  **/
1044 
1045 TRACE_EVENT(xprtrdma_cb_setup,
1046 	TP_PROTO(
1047 		const struct rpcrdma_xprt *r_xprt,
1048 		unsigned int reqs
1049 	),
1050 
1051 	TP_ARGS(r_xprt, reqs),
1052 
1053 	TP_STRUCT__entry(
1054 		__field(const void *, r_xprt)
1055 		__field(unsigned int, reqs)
1056 		__string(addr, rpcrdma_addrstr(r_xprt))
1057 		__string(port, rpcrdma_portstr(r_xprt))
1058 	),
1059 
1060 	TP_fast_assign(
1061 		__entry->r_xprt = r_xprt;
1062 		__entry->reqs = reqs;
1063 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1064 		__assign_str(port, rpcrdma_portstr(r_xprt));
1065 	),
1066 
1067 	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1068 		__get_str(addr), __get_str(port),
1069 		__entry->r_xprt, __entry->reqs
1070 	)
1071 );
1072 
1073 DEFINE_CB_EVENT(xprtrdma_cb_call);
1074 DEFINE_CB_EVENT(xprtrdma_cb_reply);
1075 
1076 TRACE_EVENT(xprtrdma_leaked_rep,
1077 	TP_PROTO(
1078 		const struct rpc_rqst *rqst,
1079 		const struct rpcrdma_rep *rep
1080 	),
1081 
1082 	TP_ARGS(rqst, rep),
1083 
1084 	TP_STRUCT__entry(
1085 		__field(unsigned int, task_id)
1086 		__field(unsigned int, client_id)
1087 		__field(u32, xid)
1088 		__field(const void *, rep)
1089 	),
1090 
1091 	TP_fast_assign(
1092 		__entry->task_id = rqst->rq_task->tk_pid;
1093 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1094 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1095 		__entry->rep = rep;
1096 	),
1097 
1098 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1099 		__entry->task_id, __entry->client_id, __entry->xid,
1100 		__entry->rep
1101 	)
1102 );
1103 
1104 /**
1105  ** Server-side RPC/RDMA events
1106  **/
1107 
1108 DECLARE_EVENT_CLASS(svcrdma_xprt_event,
1109 	TP_PROTO(
1110 		const struct svc_xprt *xprt
1111 	),
1112 
1113 	TP_ARGS(xprt),
1114 
1115 	TP_STRUCT__entry(
1116 		__field(const void *, xprt)
1117 		__string(addr, xprt->xpt_remotebuf)
1118 	),
1119 
1120 	TP_fast_assign(
1121 		__entry->xprt = xprt;
1122 		__assign_str(addr, xprt->xpt_remotebuf);
1123 	),
1124 
1125 	TP_printk("xprt=%p addr=%s",
1126 		__entry->xprt, __get_str(addr)
1127 	)
1128 );
1129 
1130 #define DEFINE_XPRT_EVENT(name)						\
1131 		DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name,	\
1132 				TP_PROTO(				\
1133 					const struct svc_xprt *xprt	\
1134 				),					\
1135 				TP_ARGS(xprt))
1136 
1137 DEFINE_XPRT_EVENT(accept);
1138 DEFINE_XPRT_EVENT(fail);
1139 DEFINE_XPRT_EVENT(free);
1140 
1141 TRACE_DEFINE_ENUM(RDMA_MSG);
1142 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1143 TRACE_DEFINE_ENUM(RDMA_MSGP);
1144 TRACE_DEFINE_ENUM(RDMA_DONE);
1145 TRACE_DEFINE_ENUM(RDMA_ERROR);
1146 
1147 #define show_rpcrdma_proc(x)						\
1148 		__print_symbolic(x,					\
1149 				{ RDMA_MSG, "RDMA_MSG" },		\
1150 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1151 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1152 				{ RDMA_DONE, "RDMA_DONE" },		\
1153 				{ RDMA_ERROR, "RDMA_ERROR" })
1154 
1155 TRACE_EVENT(svcrdma_decode_rqst,
1156 	TP_PROTO(
1157 		__be32 *p,
1158 		unsigned int hdrlen
1159 	),
1160 
1161 	TP_ARGS(p, hdrlen),
1162 
1163 	TP_STRUCT__entry(
1164 		__field(u32, xid)
1165 		__field(u32, vers)
1166 		__field(u32, proc)
1167 		__field(u32, credits)
1168 		__field(unsigned int, hdrlen)
1169 	),
1170 
1171 	TP_fast_assign(
1172 		__entry->xid = be32_to_cpup(p++);
1173 		__entry->vers = be32_to_cpup(p++);
1174 		__entry->credits = be32_to_cpup(p++);
1175 		__entry->proc = be32_to_cpup(p);
1176 		__entry->hdrlen = hdrlen;
1177 	),
1178 
1179 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1180 		__entry->xid, __entry->vers, __entry->credits,
1181 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1182 );
1183 
1184 TRACE_EVENT(svcrdma_decode_short,
1185 	TP_PROTO(
1186 		unsigned int hdrlen
1187 	),
1188 
1189 	TP_ARGS(hdrlen),
1190 
1191 	TP_STRUCT__entry(
1192 		__field(unsigned int, hdrlen)
1193 	),
1194 
1195 	TP_fast_assign(
1196 		__entry->hdrlen = hdrlen;
1197 	),
1198 
1199 	TP_printk("hdrlen=%u", __entry->hdrlen)
1200 );
1201 
1202 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1203 	TP_PROTO(
1204 		__be32 *p
1205 	),
1206 
1207 	TP_ARGS(p),
1208 
1209 	TP_STRUCT__entry(
1210 		__field(u32, xid)
1211 		__field(u32, vers)
1212 		__field(u32, proc)
1213 		__field(u32, credits)
1214 	),
1215 
1216 	TP_fast_assign(
1217 		__entry->xid = be32_to_cpup(p++);
1218 		__entry->vers = be32_to_cpup(p++);
1219 		__entry->credits = be32_to_cpup(p++);
1220 		__entry->proc = be32_to_cpup(p);
1221 	),
1222 
1223 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
1224 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1225 );
1226 
1227 #define DEFINE_BADREQ_EVENT(name)					\
1228 		DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
1229 				TP_PROTO(				\
1230 					__be32 *p			\
1231 				),					\
1232 				TP_ARGS(p))
1233 
1234 DEFINE_BADREQ_EVENT(badvers);
1235 DEFINE_BADREQ_EVENT(drop);
1236 DEFINE_BADREQ_EVENT(badproc);
1237 DEFINE_BADREQ_EVENT(parse);
1238 
1239 DECLARE_EVENT_CLASS(svcrdma_segment_event,
1240 	TP_PROTO(
1241 		u32 handle,
1242 		u32 length,
1243 		u64 offset
1244 	),
1245 
1246 	TP_ARGS(handle, length, offset),
1247 
1248 	TP_STRUCT__entry(
1249 		__field(u32, handle)
1250 		__field(u32, length)
1251 		__field(u64, offset)
1252 	),
1253 
1254 	TP_fast_assign(
1255 		__entry->handle = handle;
1256 		__entry->length = length;
1257 		__entry->offset = offset;
1258 	),
1259 
1260 	TP_printk("%u@0x%016llx:0x%08x",
1261 		__entry->length, (unsigned long long)__entry->offset,
1262 		__entry->handle
1263 	)
1264 );
1265 
1266 #define DEFINE_SEGMENT_EVENT(name)					\
1267 		DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
1268 				TP_PROTO(				\
1269 					u32 handle,			\
1270 					u32 length,			\
1271 					u64 offset			\
1272 				),					\
1273 				TP_ARGS(handle, length, offset))
1274 
1275 DEFINE_SEGMENT_EVENT(rseg);
1276 DEFINE_SEGMENT_EVENT(wseg);
1277 
1278 DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1279 	TP_PROTO(
1280 		u32 length
1281 	),
1282 
1283 	TP_ARGS(length),
1284 
1285 	TP_STRUCT__entry(
1286 		__field(u32, length)
1287 	),
1288 
1289 	TP_fast_assign(
1290 		__entry->length = length;
1291 	),
1292 
1293 	TP_printk("length=%u",
1294 		__entry->length
1295 	)
1296 );
1297 
1298 #define DEFINE_CHUNK_EVENT(name)					\
1299 		DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
1300 				TP_PROTO(				\
1301 					u32 length			\
1302 				),					\
1303 				TP_ARGS(length))
1304 
1305 DEFINE_CHUNK_EVENT(pzr);
1306 DEFINE_CHUNK_EVENT(write);
1307 DEFINE_CHUNK_EVENT(reply);
1308 
1309 TRACE_EVENT(svcrdma_encode_read,
1310 	TP_PROTO(
1311 		u32 length,
1312 		u32 position
1313 	),
1314 
1315 	TP_ARGS(length, position),
1316 
1317 	TP_STRUCT__entry(
1318 		__field(u32, length)
1319 		__field(u32, position)
1320 	),
1321 
1322 	TP_fast_assign(
1323 		__entry->length = length;
1324 		__entry->position = position;
1325 	),
1326 
1327 	TP_printk("length=%u position=%u",
1328 		__entry->length, __entry->position
1329 	)
1330 );
1331 
1332 DECLARE_EVENT_CLASS(svcrdma_error_event,
1333 	TP_PROTO(
1334 		__be32 xid
1335 	),
1336 
1337 	TP_ARGS(xid),
1338 
1339 	TP_STRUCT__entry(
1340 		__field(u32, xid)
1341 	),
1342 
1343 	TP_fast_assign(
1344 		__entry->xid = be32_to_cpu(xid);
1345 	),
1346 
1347 	TP_printk("xid=0x%08x",
1348 		__entry->xid
1349 	)
1350 );
1351 
1352 #define DEFINE_ERROR_EVENT(name)					\
1353 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1354 				TP_PROTO(				\
1355 					__be32 xid			\
1356 				),					\
1357 				TP_ARGS(xid))
1358 
1359 DEFINE_ERROR_EVENT(vers);
1360 DEFINE_ERROR_EVENT(chunk);
1361 
1362 /**
1363  ** Server-side RDMA API events
1364  **/
1365 
1366 TRACE_EVENT(svcrdma_dma_map_page,
1367 	TP_PROTO(
1368 		const struct svcxprt_rdma *rdma,
1369 		const void *page
1370 	),
1371 
1372 	TP_ARGS(rdma, page),
1373 
1374 	TP_STRUCT__entry(
1375 		__field(const void *, page);
1376 		__string(device, rdma->sc_cm_id->device->name)
1377 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1378 	),
1379 
1380 	TP_fast_assign(
1381 		__entry->page = page;
1382 		__assign_str(device, rdma->sc_cm_id->device->name);
1383 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1384 	),
1385 
1386 	TP_printk("addr=%s device=%s page=%p",
1387 		__get_str(addr), __get_str(device), __entry->page
1388 	)
1389 );
1390 
1391 TRACE_EVENT(svcrdma_dma_map_rwctx,
1392 	TP_PROTO(
1393 		const struct svcxprt_rdma *rdma,
1394 		int status
1395 	),
1396 
1397 	TP_ARGS(rdma, status),
1398 
1399 	TP_STRUCT__entry(
1400 		__field(int, status)
1401 		__string(device, rdma->sc_cm_id->device->name)
1402 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1403 	),
1404 
1405 	TP_fast_assign(
1406 		__entry->status = status;
1407 		__assign_str(device, rdma->sc_cm_id->device->name);
1408 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1409 	),
1410 
1411 	TP_printk("addr=%s device=%s status=%d",
1412 		__get_str(addr), __get_str(device), __entry->status
1413 	)
1414 );
1415 
1416 TRACE_EVENT(svcrdma_send_failed,
1417 	TP_PROTO(
1418 		const struct svc_rqst *rqst,
1419 		int status
1420 	),
1421 
1422 	TP_ARGS(rqst, status),
1423 
1424 	TP_STRUCT__entry(
1425 		__field(int, status)
1426 		__field(u32, xid)
1427 		__field(const void *, xprt)
1428 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1429 	),
1430 
1431 	TP_fast_assign(
1432 		__entry->status = status;
1433 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1434 		__entry->xprt = rqst->rq_xprt;
1435 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1436 	),
1437 
1438 	TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
1439 		__entry->xprt, __get_str(addr),
1440 		__entry->xid, __entry->status
1441 	)
1442 );
1443 
1444 DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
1445 	TP_PROTO(
1446 		const struct ib_wc *wc
1447 	),
1448 
1449 	TP_ARGS(wc),
1450 
1451 	TP_STRUCT__entry(
1452 		__field(const void *, cqe)
1453 		__field(unsigned int, status)
1454 		__field(unsigned int, vendor_err)
1455 	),
1456 
1457 	TP_fast_assign(
1458 		__entry->cqe = wc->wr_cqe;
1459 		__entry->status = wc->status;
1460 		if (wc->status)
1461 			__entry->vendor_err = wc->vendor_err;
1462 		else
1463 			__entry->vendor_err = 0;
1464 	),
1465 
1466 	TP_printk("cqe=%p status=%s (%u/0x%x)",
1467 		__entry->cqe, rdma_show_wc_status(__entry->status),
1468 		__entry->status, __entry->vendor_err
1469 	)
1470 );
1471 
1472 #define DEFINE_SENDCOMP_EVENT(name)					\
1473 		DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name,	\
1474 				TP_PROTO(				\
1475 					const struct ib_wc *wc		\
1476 				),					\
1477 				TP_ARGS(wc))
1478 
1479 TRACE_EVENT(svcrdma_post_send,
1480 	TP_PROTO(
1481 		const struct ib_send_wr *wr,
1482 		int status
1483 	),
1484 
1485 	TP_ARGS(wr, status),
1486 
1487 	TP_STRUCT__entry(
1488 		__field(const void *, cqe)
1489 		__field(unsigned int, num_sge)
1490 		__field(u32, inv_rkey)
1491 		__field(int, status)
1492 	),
1493 
1494 	TP_fast_assign(
1495 		__entry->cqe = wr->wr_cqe;
1496 		__entry->num_sge = wr->num_sge;
1497 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1498 					wr->ex.invalidate_rkey : 0;
1499 		__entry->status = status;
1500 	),
1501 
1502 	TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
1503 		__entry->cqe, __entry->num_sge,
1504 		__entry->inv_rkey, __entry->status
1505 	)
1506 );
1507 
1508 DEFINE_SENDCOMP_EVENT(send);
1509 
1510 TRACE_EVENT(svcrdma_post_recv,
1511 	TP_PROTO(
1512 		const struct ib_recv_wr *wr,
1513 		int status
1514 	),
1515 
1516 	TP_ARGS(wr, status),
1517 
1518 	TP_STRUCT__entry(
1519 		__field(const void *, cqe)
1520 		__field(int, status)
1521 	),
1522 
1523 	TP_fast_assign(
1524 		__entry->cqe = wr->wr_cqe;
1525 		__entry->status = status;
1526 	),
1527 
1528 	TP_printk("cqe=%p status=%d",
1529 		__entry->cqe, __entry->status
1530 	)
1531 );
1532 
1533 TRACE_EVENT(svcrdma_wc_receive,
1534 	TP_PROTO(
1535 		const struct ib_wc *wc
1536 	),
1537 
1538 	TP_ARGS(wc),
1539 
1540 	TP_STRUCT__entry(
1541 		__field(const void *, cqe)
1542 		__field(u32, byte_len)
1543 		__field(unsigned int, status)
1544 		__field(u32, vendor_err)
1545 	),
1546 
1547 	TP_fast_assign(
1548 		__entry->cqe = wc->wr_cqe;
1549 		__entry->status = wc->status;
1550 		if (wc->status) {
1551 			__entry->byte_len = 0;
1552 			__entry->vendor_err = wc->vendor_err;
1553 		} else {
1554 			__entry->byte_len = wc->byte_len;
1555 			__entry->vendor_err = 0;
1556 		}
1557 	),
1558 
1559 	TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
1560 		__entry->cqe, __entry->byte_len,
1561 		rdma_show_wc_status(__entry->status),
1562 		__entry->status, __entry->vendor_err
1563 	)
1564 );
1565 
1566 TRACE_EVENT(svcrdma_post_rw,
1567 	TP_PROTO(
1568 		const void *cqe,
1569 		int sqecount,
1570 		int status
1571 	),
1572 
1573 	TP_ARGS(cqe, sqecount, status),
1574 
1575 	TP_STRUCT__entry(
1576 		__field(const void *, cqe)
1577 		__field(int, sqecount)
1578 		__field(int, status)
1579 	),
1580 
1581 	TP_fast_assign(
1582 		__entry->cqe = cqe;
1583 		__entry->sqecount = sqecount;
1584 		__entry->status = status;
1585 	),
1586 
1587 	TP_printk("cqe=%p sqecount=%d status=%d",
1588 		__entry->cqe, __entry->sqecount, __entry->status
1589 	)
1590 );
1591 
1592 DEFINE_SENDCOMP_EVENT(read);
1593 DEFINE_SENDCOMP_EVENT(write);
1594 
1595 TRACE_EVENT(svcrdma_cm_event,
1596 	TP_PROTO(
1597 		const struct rdma_cm_event *event,
1598 		const struct sockaddr *sap
1599 	),
1600 
1601 	TP_ARGS(event, sap),
1602 
1603 	TP_STRUCT__entry(
1604 		__field(unsigned int, event)
1605 		__field(int, status)
1606 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1607 	),
1608 
1609 	TP_fast_assign(
1610 		__entry->event = event->event;
1611 		__entry->status = event->status;
1612 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1613 			 "%pISpc", sap);
1614 	),
1615 
1616 	TP_printk("addr=%s event=%s (%u/%d)",
1617 		__entry->addr,
1618 		rdma_show_cm_event(__entry->event),
1619 		__entry->event, __entry->status
1620 	)
1621 );
1622 
1623 TRACE_EVENT(svcrdma_qp_error,
1624 	TP_PROTO(
1625 		const struct ib_event *event,
1626 		const struct sockaddr *sap
1627 	),
1628 
1629 	TP_ARGS(event, sap),
1630 
1631 	TP_STRUCT__entry(
1632 		__field(unsigned int, event)
1633 		__string(device, event->device->name)
1634 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1635 	),
1636 
1637 	TP_fast_assign(
1638 		__entry->event = event->event;
1639 		__assign_str(device, event->device->name);
1640 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1641 			 "%pISpc", sap);
1642 	),
1643 
1644 	TP_printk("addr=%s dev=%s event=%s (%u)",
1645 		__entry->addr, __get_str(device),
1646 		rdma_show_ib_event(__entry->event), __entry->event
1647 	)
1648 );
1649 
1650 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1651 	TP_PROTO(
1652 		const struct svcxprt_rdma *rdma
1653 	),
1654 
1655 	TP_ARGS(rdma),
1656 
1657 	TP_STRUCT__entry(
1658 		__field(int, avail)
1659 		__field(int, depth)
1660 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1661 	),
1662 
1663 	TP_fast_assign(
1664 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1665 		__entry->depth = rdma->sc_sq_depth;
1666 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1667 	),
1668 
1669 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1670 		__get_str(addr), __entry->avail, __entry->depth
1671 	)
1672 );
1673 
1674 #define DEFINE_SQ_EVENT(name)						\
1675 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1676 				TP_PROTO(				\
1677 					const struct svcxprt_rdma *rdma \
1678 				),					\
1679 				TP_ARGS(rdma))
1680 
1681 DEFINE_SQ_EVENT(full);
1682 DEFINE_SQ_EVENT(retry);
1683 
1684 #endif /* _TRACE_RPCRDMA_H */
1685 
1686 #include <trace/define_trace.h>
1687