xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision 2169e6da)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/tracepoint.h>
15 #include <trace/events/rdma.h>
16 
17 /**
18  ** Event classes
19  **/
20 
21 DECLARE_EVENT_CLASS(xprtrdma_reply_event,
22 	TP_PROTO(
23 		const struct rpcrdma_rep *rep
24 	),
25 
26 	TP_ARGS(rep),
27 
28 	TP_STRUCT__entry(
29 		__field(const void *, rep)
30 		__field(const void *, r_xprt)
31 		__field(u32, xid)
32 		__field(u32, version)
33 		__field(u32, proc)
34 	),
35 
36 	TP_fast_assign(
37 		__entry->rep = rep;
38 		__entry->r_xprt = rep->rr_rxprt;
39 		__entry->xid = be32_to_cpu(rep->rr_xid);
40 		__entry->version = be32_to_cpu(rep->rr_vers);
41 		__entry->proc = be32_to_cpu(rep->rr_proc);
42 	),
43 
44 	TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
45 		__entry->r_xprt, __entry->xid, __entry->rep,
46 		__entry->version, __entry->proc
47 	)
48 );
49 
50 #define DEFINE_REPLY_EVENT(name)					\
51 		DEFINE_EVENT(xprtrdma_reply_event, name,		\
52 				TP_PROTO(				\
53 					const struct rpcrdma_rep *rep	\
54 				),					\
55 				TP_ARGS(rep))
56 
57 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
58 	TP_PROTO(
59 		const struct rpcrdma_xprt *r_xprt
60 	),
61 
62 	TP_ARGS(r_xprt),
63 
64 	TP_STRUCT__entry(
65 		__field(const void *, r_xprt)
66 		__string(addr, rpcrdma_addrstr(r_xprt))
67 		__string(port, rpcrdma_portstr(r_xprt))
68 	),
69 
70 	TP_fast_assign(
71 		__entry->r_xprt = r_xprt;
72 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
73 		__assign_str(port, rpcrdma_portstr(r_xprt));
74 	),
75 
76 	TP_printk("peer=[%s]:%s r_xprt=%p",
77 		__get_str(addr), __get_str(port), __entry->r_xprt
78 	)
79 );
80 
81 #define DEFINE_RXPRT_EVENT(name)					\
82 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
83 				TP_PROTO(				\
84 					const struct rpcrdma_xprt *r_xprt \
85 				),					\
86 				TP_ARGS(r_xprt))
87 
88 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
89 	TP_PROTO(
90 		const struct rpc_task *task,
91 		unsigned int pos,
92 		struct rpcrdma_mr *mr,
93 		int nsegs
94 	),
95 
96 	TP_ARGS(task, pos, mr, nsegs),
97 
98 	TP_STRUCT__entry(
99 		__field(unsigned int, task_id)
100 		__field(unsigned int, client_id)
101 		__field(unsigned int, pos)
102 		__field(int, nents)
103 		__field(u32, handle)
104 		__field(u32, length)
105 		__field(u64, offset)
106 		__field(int, nsegs)
107 	),
108 
109 	TP_fast_assign(
110 		__entry->task_id = task->tk_pid;
111 		__entry->client_id = task->tk_client->cl_clid;
112 		__entry->pos = pos;
113 		__entry->nents = mr->mr_nents;
114 		__entry->handle = mr->mr_handle;
115 		__entry->length = mr->mr_length;
116 		__entry->offset = mr->mr_offset;
117 		__entry->nsegs = nsegs;
118 	),
119 
120 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
121 		__entry->task_id, __entry->client_id,
122 		__entry->pos, __entry->length,
123 		(unsigned long long)__entry->offset, __entry->handle,
124 		__entry->nents < __entry->nsegs ? "more" : "last"
125 	)
126 );
127 
128 #define DEFINE_RDCH_EVENT(name)						\
129 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
130 				TP_PROTO(				\
131 					const struct rpc_task *task,	\
132 					unsigned int pos,		\
133 					struct rpcrdma_mr *mr,		\
134 					int nsegs			\
135 				),					\
136 				TP_ARGS(task, pos, mr, nsegs))
137 
138 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
139 	TP_PROTO(
140 		const struct rpc_task *task,
141 		struct rpcrdma_mr *mr,
142 		int nsegs
143 	),
144 
145 	TP_ARGS(task, mr, nsegs),
146 
147 	TP_STRUCT__entry(
148 		__field(unsigned int, task_id)
149 		__field(unsigned int, client_id)
150 		__field(int, nents)
151 		__field(u32, handle)
152 		__field(u32, length)
153 		__field(u64, offset)
154 		__field(int, nsegs)
155 	),
156 
157 	TP_fast_assign(
158 		__entry->task_id = task->tk_pid;
159 		__entry->client_id = task->tk_client->cl_clid;
160 		__entry->nents = mr->mr_nents;
161 		__entry->handle = mr->mr_handle;
162 		__entry->length = mr->mr_length;
163 		__entry->offset = mr->mr_offset;
164 		__entry->nsegs = nsegs;
165 	),
166 
167 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
168 		__entry->task_id, __entry->client_id,
169 		__entry->length, (unsigned long long)__entry->offset,
170 		__entry->handle,
171 		__entry->nents < __entry->nsegs ? "more" : "last"
172 	)
173 );
174 
175 #define DEFINE_WRCH_EVENT(name)						\
176 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
177 				TP_PROTO(				\
178 					const struct rpc_task *task,	\
179 					struct rpcrdma_mr *mr,		\
180 					int nsegs			\
181 				),					\
182 				TP_ARGS(task, mr, nsegs))
183 
184 DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
185 	TP_PROTO(
186 		const struct ib_wc *wc,
187 		const struct rpcrdma_frwr *frwr
188 	),
189 
190 	TP_ARGS(wc, frwr),
191 
192 	TP_STRUCT__entry(
193 		__field(const void *, mr)
194 		__field(unsigned int, status)
195 		__field(unsigned int, vendor_err)
196 	),
197 
198 	TP_fast_assign(
199 		__entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
200 		__entry->status = wc->status;
201 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
202 	),
203 
204 	TP_printk(
205 		"mr=%p: %s (%u/0x%x)",
206 		__entry->mr, rdma_show_wc_status(__entry->status),
207 		__entry->status, __entry->vendor_err
208 	)
209 );
210 
211 #define DEFINE_FRWR_DONE_EVENT(name)					\
212 		DEFINE_EVENT(xprtrdma_frwr_done, name,			\
213 				TP_PROTO(				\
214 					const struct ib_wc *wc,		\
215 					const struct rpcrdma_frwr *frwr	\
216 				),					\
217 				TP_ARGS(wc, frwr))
218 
219 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
220 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
221 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
222 TRACE_DEFINE_ENUM(DMA_NONE);
223 
224 #define xprtrdma_show_direction(x)					\
225 		__print_symbolic(x,					\
226 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
227 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
228 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
229 				{ DMA_NONE, "NONE" })
230 
231 DECLARE_EVENT_CLASS(xprtrdma_mr,
232 	TP_PROTO(
233 		const struct rpcrdma_mr *mr
234 	),
235 
236 	TP_ARGS(mr),
237 
238 	TP_STRUCT__entry(
239 		__field(const void *, mr)
240 		__field(u32, handle)
241 		__field(u32, length)
242 		__field(u64, offset)
243 		__field(u32, dir)
244 	),
245 
246 	TP_fast_assign(
247 		__entry->mr = mr;
248 		__entry->handle = mr->mr_handle;
249 		__entry->length = mr->mr_length;
250 		__entry->offset = mr->mr_offset;
251 		__entry->dir    = mr->mr_dir;
252 	),
253 
254 	TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)",
255 		__entry->mr, __entry->length,
256 		(unsigned long long)__entry->offset, __entry->handle,
257 		xprtrdma_show_direction(__entry->dir)
258 	)
259 );
260 
261 #define DEFINE_MR_EVENT(name) \
262 		DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
263 				TP_PROTO( \
264 					const struct rpcrdma_mr *mr \
265 				), \
266 				TP_ARGS(mr))
267 
268 DECLARE_EVENT_CLASS(xprtrdma_cb_event,
269 	TP_PROTO(
270 		const struct rpc_rqst *rqst
271 	),
272 
273 	TP_ARGS(rqst),
274 
275 	TP_STRUCT__entry(
276 		__field(const void *, rqst)
277 		__field(const void *, rep)
278 		__field(const void *, req)
279 		__field(u32, xid)
280 	),
281 
282 	TP_fast_assign(
283 		__entry->rqst = rqst;
284 		__entry->req = rpcr_to_rdmar(rqst);
285 		__entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
286 		__entry->xid = be32_to_cpu(rqst->rq_xid);
287 	),
288 
289 	TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
290 		__entry->xid, __entry->rqst, __entry->req, __entry->rep
291 	)
292 );
293 
294 #define DEFINE_CB_EVENT(name)						\
295 		DEFINE_EVENT(xprtrdma_cb_event, name,			\
296 				TP_PROTO(				\
297 					const struct rpc_rqst *rqst	\
298 				),					\
299 				TP_ARGS(rqst))
300 
301 /**
302  ** Connection events
303  **/
304 
305 TRACE_EVENT(xprtrdma_cm_event,
306 	TP_PROTO(
307 		const struct rpcrdma_xprt *r_xprt,
308 		struct rdma_cm_event *event
309 	),
310 
311 	TP_ARGS(r_xprt, event),
312 
313 	TP_STRUCT__entry(
314 		__field(const void *, r_xprt)
315 		__field(unsigned int, event)
316 		__field(int, status)
317 		__string(addr, rpcrdma_addrstr(r_xprt))
318 		__string(port, rpcrdma_portstr(r_xprt))
319 	),
320 
321 	TP_fast_assign(
322 		__entry->r_xprt = r_xprt;
323 		__entry->event = event->event;
324 		__entry->status = event->status;
325 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
326 		__assign_str(port, rpcrdma_portstr(r_xprt));
327 	),
328 
329 	TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
330 		__get_str(addr), __get_str(port),
331 		__entry->r_xprt, rdma_show_cm_event(__entry->event),
332 		__entry->event, __entry->status
333 	)
334 );
335 
336 TRACE_EVENT(xprtrdma_disconnect,
337 	TP_PROTO(
338 		const struct rpcrdma_xprt *r_xprt,
339 		int status
340 	),
341 
342 	TP_ARGS(r_xprt, status),
343 
344 	TP_STRUCT__entry(
345 		__field(const void *, r_xprt)
346 		__field(int, status)
347 		__field(int, connected)
348 		__string(addr, rpcrdma_addrstr(r_xprt))
349 		__string(port, rpcrdma_portstr(r_xprt))
350 	),
351 
352 	TP_fast_assign(
353 		__entry->r_xprt = r_xprt;
354 		__entry->status = status;
355 		__entry->connected = r_xprt->rx_ep.rep_connected;
356 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
357 		__assign_str(port, rpcrdma_portstr(r_xprt));
358 	),
359 
360 	TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
361 		__get_str(addr), __get_str(port),
362 		__entry->r_xprt, __entry->status,
363 		__entry->connected == 1 ? "still " : "dis"
364 	)
365 );
366 
367 DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
368 DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
369 DEFINE_RXPRT_EVENT(xprtrdma_create);
370 DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
371 DEFINE_RXPRT_EVENT(xprtrdma_remove);
372 DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
373 DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
374 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
375 DEFINE_RXPRT_EVENT(xprtrdma_op_close);
376 DEFINE_RXPRT_EVENT(xprtrdma_op_connect);
377 
378 TRACE_EVENT(xprtrdma_op_set_cto,
379 	TP_PROTO(
380 		const struct rpcrdma_xprt *r_xprt,
381 		unsigned long connect,
382 		unsigned long reconnect
383 	),
384 
385 	TP_ARGS(r_xprt, connect, reconnect),
386 
387 	TP_STRUCT__entry(
388 		__field(const void *, r_xprt)
389 		__field(unsigned long, connect)
390 		__field(unsigned long, reconnect)
391 		__string(addr, rpcrdma_addrstr(r_xprt))
392 		__string(port, rpcrdma_portstr(r_xprt))
393 	),
394 
395 	TP_fast_assign(
396 		__entry->r_xprt = r_xprt;
397 		__entry->connect = connect;
398 		__entry->reconnect = reconnect;
399 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
400 		__assign_str(port, rpcrdma_portstr(r_xprt));
401 	),
402 
403 	TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
404 		__get_str(addr), __get_str(port), __entry->r_xprt,
405 		__entry->connect / HZ, __entry->reconnect / HZ
406 	)
407 );
408 
409 TRACE_EVENT(xprtrdma_qp_event,
410 	TP_PROTO(
411 		const struct rpcrdma_xprt *r_xprt,
412 		const struct ib_event *event
413 	),
414 
415 	TP_ARGS(r_xprt, event),
416 
417 	TP_STRUCT__entry(
418 		__field(const void *, r_xprt)
419 		__field(unsigned int, event)
420 		__string(name, event->device->name)
421 		__string(addr, rpcrdma_addrstr(r_xprt))
422 		__string(port, rpcrdma_portstr(r_xprt))
423 	),
424 
425 	TP_fast_assign(
426 		__entry->r_xprt = r_xprt;
427 		__entry->event = event->event;
428 		__assign_str(name, event->device->name);
429 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
430 		__assign_str(port, rpcrdma_portstr(r_xprt));
431 	),
432 
433 	TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
434 		__get_str(addr), __get_str(port), __entry->r_xprt,
435 		__get_str(name), rdma_show_ib_event(__entry->event),
436 		__entry->event
437 	)
438 );
439 
440 /**
441  ** Call events
442  **/
443 
444 TRACE_EVENT(xprtrdma_createmrs,
445 	TP_PROTO(
446 		const struct rpcrdma_xprt *r_xprt,
447 		unsigned int count
448 	),
449 
450 	TP_ARGS(r_xprt, count),
451 
452 	TP_STRUCT__entry(
453 		__field(const void *, r_xprt)
454 		__field(unsigned int, count)
455 	),
456 
457 	TP_fast_assign(
458 		__entry->r_xprt = r_xprt;
459 		__entry->count = count;
460 	),
461 
462 	TP_printk("r_xprt=%p: created %u MRs",
463 		__entry->r_xprt, __entry->count
464 	)
465 );
466 
467 DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
468 
469 DEFINE_RDCH_EVENT(read);
470 DEFINE_WRCH_EVENT(write);
471 DEFINE_WRCH_EVENT(reply);
472 
473 TRACE_DEFINE_ENUM(rpcrdma_noch);
474 TRACE_DEFINE_ENUM(rpcrdma_readch);
475 TRACE_DEFINE_ENUM(rpcrdma_areadch);
476 TRACE_DEFINE_ENUM(rpcrdma_writech);
477 TRACE_DEFINE_ENUM(rpcrdma_replych);
478 
479 #define xprtrdma_show_chunktype(x)					\
480 		__print_symbolic(x,					\
481 				{ rpcrdma_noch, "inline" },		\
482 				{ rpcrdma_readch, "read list" },	\
483 				{ rpcrdma_areadch, "*read list" },	\
484 				{ rpcrdma_writech, "write list" },	\
485 				{ rpcrdma_replych, "reply chunk" })
486 
487 TRACE_EVENT(xprtrdma_marshal,
488 	TP_PROTO(
489 		const struct rpcrdma_req *req,
490 		unsigned int rtype,
491 		unsigned int wtype
492 	),
493 
494 	TP_ARGS(req, rtype, wtype),
495 
496 	TP_STRUCT__entry(
497 		__field(unsigned int, task_id)
498 		__field(unsigned int, client_id)
499 		__field(u32, xid)
500 		__field(unsigned int, hdrlen)
501 		__field(unsigned int, headlen)
502 		__field(unsigned int, pagelen)
503 		__field(unsigned int, taillen)
504 		__field(unsigned int, rtype)
505 		__field(unsigned int, wtype)
506 	),
507 
508 	TP_fast_assign(
509 		const struct rpc_rqst *rqst = &req->rl_slot;
510 
511 		__entry->task_id = rqst->rq_task->tk_pid;
512 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
513 		__entry->xid = be32_to_cpu(rqst->rq_xid);
514 		__entry->hdrlen = req->rl_hdrbuf.len;
515 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
516 		__entry->pagelen = rqst->rq_snd_buf.page_len;
517 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
518 		__entry->rtype = rtype;
519 		__entry->wtype = wtype;
520 	),
521 
522 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
523 		__entry->task_id, __entry->client_id, __entry->xid,
524 		__entry->hdrlen,
525 		__entry->headlen, __entry->pagelen, __entry->taillen,
526 		xprtrdma_show_chunktype(__entry->rtype),
527 		xprtrdma_show_chunktype(__entry->wtype)
528 	)
529 );
530 
531 TRACE_EVENT(xprtrdma_marshal_failed,
532 	TP_PROTO(const struct rpc_rqst *rqst,
533 		 int ret
534 	),
535 
536 	TP_ARGS(rqst, ret),
537 
538 	TP_STRUCT__entry(
539 		__field(unsigned int, task_id)
540 		__field(unsigned int, client_id)
541 		__field(u32, xid)
542 		__field(int, ret)
543 	),
544 
545 	TP_fast_assign(
546 		__entry->task_id = rqst->rq_task->tk_pid;
547 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
548 		__entry->xid = be32_to_cpu(rqst->rq_xid);
549 		__entry->ret = ret;
550 	),
551 
552 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
553 		__entry->task_id, __entry->client_id, __entry->xid,
554 		__entry->ret
555 	)
556 );
557 
558 TRACE_EVENT(xprtrdma_prepsend_failed,
559 	TP_PROTO(const struct rpc_rqst *rqst,
560 		 int ret
561 	),
562 
563 	TP_ARGS(rqst, ret),
564 
565 	TP_STRUCT__entry(
566 		__field(unsigned int, task_id)
567 		__field(unsigned int, client_id)
568 		__field(u32, xid)
569 		__field(int, ret)
570 	),
571 
572 	TP_fast_assign(
573 		__entry->task_id = rqst->rq_task->tk_pid;
574 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
575 		__entry->xid = be32_to_cpu(rqst->rq_xid);
576 		__entry->ret = ret;
577 	),
578 
579 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
580 		__entry->task_id, __entry->client_id, __entry->xid,
581 		__entry->ret
582 	)
583 );
584 
585 TRACE_EVENT(xprtrdma_post_send,
586 	TP_PROTO(
587 		const struct rpcrdma_req *req,
588 		int status
589 	),
590 
591 	TP_ARGS(req, status),
592 
593 	TP_STRUCT__entry(
594 		__field(const void *, req)
595 		__field(unsigned int, task_id)
596 		__field(unsigned int, client_id)
597 		__field(int, num_sge)
598 		__field(int, signaled)
599 		__field(int, status)
600 	),
601 
602 	TP_fast_assign(
603 		const struct rpc_rqst *rqst = &req->rl_slot;
604 
605 		__entry->task_id = rqst->rq_task->tk_pid;
606 		__entry->client_id = rqst->rq_task->tk_client ?
607 				     rqst->rq_task->tk_client->cl_clid : -1;
608 		__entry->req = req;
609 		__entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
610 		__entry->signaled = req->rl_sendctx->sc_wr.send_flags &
611 				    IB_SEND_SIGNALED;
612 		__entry->status = status;
613 	),
614 
615 	TP_printk("task:%u@%u req=%p (%d SGE%s) %sstatus=%d",
616 		__entry->task_id, __entry->client_id,
617 		__entry->req, __entry->num_sge,
618 		(__entry->num_sge == 1 ? "" : "s"),
619 		(__entry->signaled ? "signaled " : ""),
620 		__entry->status
621 	)
622 );
623 
624 TRACE_EVENT(xprtrdma_post_recv,
625 	TP_PROTO(
626 		const struct ib_cqe *cqe
627 	),
628 
629 	TP_ARGS(cqe),
630 
631 	TP_STRUCT__entry(
632 		__field(const void *, cqe)
633 	),
634 
635 	TP_fast_assign(
636 		__entry->cqe = cqe;
637 	),
638 
639 	TP_printk("cqe=%p",
640 		__entry->cqe
641 	)
642 );
643 
644 TRACE_EVENT(xprtrdma_post_recvs,
645 	TP_PROTO(
646 		const struct rpcrdma_xprt *r_xprt,
647 		unsigned int count,
648 		int status
649 	),
650 
651 	TP_ARGS(r_xprt, count, status),
652 
653 	TP_STRUCT__entry(
654 		__field(const void *, r_xprt)
655 		__field(unsigned int, count)
656 		__field(int, status)
657 		__field(int, posted)
658 		__string(addr, rpcrdma_addrstr(r_xprt))
659 		__string(port, rpcrdma_portstr(r_xprt))
660 	),
661 
662 	TP_fast_assign(
663 		__entry->r_xprt = r_xprt;
664 		__entry->count = count;
665 		__entry->status = status;
666 		__entry->posted = r_xprt->rx_ep.rep_receive_count;
667 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
668 		__assign_str(port, rpcrdma_portstr(r_xprt));
669 	),
670 
671 	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
672 		__get_str(addr), __get_str(port), __entry->r_xprt,
673 		__entry->count, __entry->posted, __entry->status
674 	)
675 );
676 
677 /**
678  ** Completion events
679  **/
680 
681 TRACE_EVENT(xprtrdma_wc_send,
682 	TP_PROTO(
683 		const struct rpcrdma_sendctx *sc,
684 		const struct ib_wc *wc
685 	),
686 
687 	TP_ARGS(sc, wc),
688 
689 	TP_STRUCT__entry(
690 		__field(const void *, req)
691 		__field(unsigned int, unmap_count)
692 		__field(unsigned int, status)
693 		__field(unsigned int, vendor_err)
694 	),
695 
696 	TP_fast_assign(
697 		__entry->req = sc->sc_req;
698 		__entry->unmap_count = sc->sc_unmap_count;
699 		__entry->status = wc->status;
700 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
701 	),
702 
703 	TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
704 		__entry->req, __entry->unmap_count,
705 		rdma_show_wc_status(__entry->status),
706 		__entry->status, __entry->vendor_err
707 	)
708 );
709 
710 TRACE_EVENT(xprtrdma_wc_receive,
711 	TP_PROTO(
712 		const struct ib_wc *wc
713 	),
714 
715 	TP_ARGS(wc),
716 
717 	TP_STRUCT__entry(
718 		__field(const void *, cqe)
719 		__field(u32, byte_len)
720 		__field(unsigned int, status)
721 		__field(u32, vendor_err)
722 	),
723 
724 	TP_fast_assign(
725 		__entry->cqe = wc->wr_cqe;
726 		__entry->status = wc->status;
727 		if (wc->status) {
728 			__entry->byte_len = 0;
729 			__entry->vendor_err = wc->vendor_err;
730 		} else {
731 			__entry->byte_len = wc->byte_len;
732 			__entry->vendor_err = 0;
733 		}
734 	),
735 
736 	TP_printk("cqe=%p %u bytes: %s (%u/0x%x)",
737 		__entry->cqe, __entry->byte_len,
738 		rdma_show_wc_status(__entry->status),
739 		__entry->status, __entry->vendor_err
740 	)
741 );
742 
743 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
744 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
745 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
746 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
747 
748 TRACE_EVENT(xprtrdma_frwr_alloc,
749 	TP_PROTO(
750 		const struct rpcrdma_mr *mr,
751 		int rc
752 	),
753 
754 	TP_ARGS(mr, rc),
755 
756 	TP_STRUCT__entry(
757 		__field(const void *, mr)
758 		__field(int, rc)
759 	),
760 
761 	TP_fast_assign(
762 		__entry->mr = mr;
763 		__entry->rc	= rc;
764 	),
765 
766 	TP_printk("mr=%p: rc=%d",
767 		__entry->mr, __entry->rc
768 	)
769 );
770 
771 TRACE_EVENT(xprtrdma_frwr_dereg,
772 	TP_PROTO(
773 		const struct rpcrdma_mr *mr,
774 		int rc
775 	),
776 
777 	TP_ARGS(mr, rc),
778 
779 	TP_STRUCT__entry(
780 		__field(const void *, mr)
781 		__field(u32, handle)
782 		__field(u32, length)
783 		__field(u64, offset)
784 		__field(u32, dir)
785 		__field(int, rc)
786 	),
787 
788 	TP_fast_assign(
789 		__entry->mr = mr;
790 		__entry->handle = mr->mr_handle;
791 		__entry->length = mr->mr_length;
792 		__entry->offset = mr->mr_offset;
793 		__entry->dir    = mr->mr_dir;
794 		__entry->rc	= rc;
795 	),
796 
797 	TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d",
798 		__entry->mr, __entry->length,
799 		(unsigned long long)__entry->offset, __entry->handle,
800 		xprtrdma_show_direction(__entry->dir),
801 		__entry->rc
802 	)
803 );
804 
805 TRACE_EVENT(xprtrdma_frwr_sgerr,
806 	TP_PROTO(
807 		const struct rpcrdma_mr *mr,
808 		int sg_nents
809 	),
810 
811 	TP_ARGS(mr, sg_nents),
812 
813 	TP_STRUCT__entry(
814 		__field(const void *, mr)
815 		__field(u64, addr)
816 		__field(u32, dir)
817 		__field(int, nents)
818 	),
819 
820 	TP_fast_assign(
821 		__entry->mr = mr;
822 		__entry->addr = mr->mr_sg->dma_address;
823 		__entry->dir = mr->mr_dir;
824 		__entry->nents = sg_nents;
825 	),
826 
827 	TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d",
828 		__entry->mr, __entry->addr,
829 		xprtrdma_show_direction(__entry->dir),
830 		__entry->nents
831 	)
832 );
833 
834 TRACE_EVENT(xprtrdma_frwr_maperr,
835 	TP_PROTO(
836 		const struct rpcrdma_mr *mr,
837 		int num_mapped
838 	),
839 
840 	TP_ARGS(mr, num_mapped),
841 
842 	TP_STRUCT__entry(
843 		__field(const void *, mr)
844 		__field(u64, addr)
845 		__field(u32, dir)
846 		__field(int, num_mapped)
847 		__field(int, nents)
848 	),
849 
850 	TP_fast_assign(
851 		__entry->mr = mr;
852 		__entry->addr = mr->mr_sg->dma_address;
853 		__entry->dir = mr->mr_dir;
854 		__entry->num_mapped = num_mapped;
855 		__entry->nents = mr->mr_nents;
856 	),
857 
858 	TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d",
859 		__entry->mr, __entry->addr,
860 		xprtrdma_show_direction(__entry->dir),
861 		__entry->num_mapped, __entry->nents
862 	)
863 );
864 
865 DEFINE_MR_EVENT(localinv);
866 DEFINE_MR_EVENT(map);
867 DEFINE_MR_EVENT(unmap);
868 DEFINE_MR_EVENT(remoteinv);
869 DEFINE_MR_EVENT(recycle);
870 
871 TRACE_EVENT(xprtrdma_dma_maperr,
872 	TP_PROTO(
873 		u64 addr
874 	),
875 
876 	TP_ARGS(addr),
877 
878 	TP_STRUCT__entry(
879 		__field(u64, addr)
880 	),
881 
882 	TP_fast_assign(
883 		__entry->addr = addr;
884 	),
885 
886 	TP_printk("dma addr=0x%llx\n", __entry->addr)
887 );
888 
889 /**
890  ** Reply events
891  **/
892 
893 TRACE_EVENT(xprtrdma_reply,
894 	TP_PROTO(
895 		const struct rpc_task *task,
896 		const struct rpcrdma_rep *rep,
897 		const struct rpcrdma_req *req,
898 		unsigned int credits
899 	),
900 
901 	TP_ARGS(task, rep, req, credits),
902 
903 	TP_STRUCT__entry(
904 		__field(unsigned int, task_id)
905 		__field(unsigned int, client_id)
906 		__field(const void *, rep)
907 		__field(const void *, req)
908 		__field(u32, xid)
909 		__field(unsigned int, credits)
910 	),
911 
912 	TP_fast_assign(
913 		__entry->task_id = task->tk_pid;
914 		__entry->client_id = task->tk_client->cl_clid;
915 		__entry->rep = rep;
916 		__entry->req = req;
917 		__entry->xid = be32_to_cpu(rep->rr_xid);
918 		__entry->credits = credits;
919 	),
920 
921 	TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
922 		__entry->task_id, __entry->client_id, __entry->xid,
923 		__entry->credits, __entry->rep, __entry->req
924 	)
925 );
926 
927 TRACE_EVENT(xprtrdma_defer_cmp,
928 	TP_PROTO(
929 		const struct rpcrdma_rep *rep
930 	),
931 
932 	TP_ARGS(rep),
933 
934 	TP_STRUCT__entry(
935 		__field(unsigned int, task_id)
936 		__field(unsigned int, client_id)
937 		__field(const void *, rep)
938 		__field(u32, xid)
939 	),
940 
941 	TP_fast_assign(
942 		__entry->task_id = rep->rr_rqst->rq_task->tk_pid;
943 		__entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
944 		__entry->rep = rep;
945 		__entry->xid = be32_to_cpu(rep->rr_xid);
946 	),
947 
948 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
949 		__entry->task_id, __entry->client_id, __entry->xid,
950 		__entry->rep
951 	)
952 );
953 
954 DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
955 DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
956 DEFINE_REPLY_EVENT(xprtrdma_reply_short);
957 DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
958 
959 TRACE_EVENT(xprtrdma_fixup,
960 	TP_PROTO(
961 		const struct rpc_rqst *rqst,
962 		int len,
963 		int hdrlen
964 	),
965 
966 	TP_ARGS(rqst, len, hdrlen),
967 
968 	TP_STRUCT__entry(
969 		__field(unsigned int, task_id)
970 		__field(unsigned int, client_id)
971 		__field(const void *, base)
972 		__field(int, len)
973 		__field(int, hdrlen)
974 	),
975 
976 	TP_fast_assign(
977 		__entry->task_id = rqst->rq_task->tk_pid;
978 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
979 		__entry->base = rqst->rq_rcv_buf.head[0].iov_base;
980 		__entry->len = len;
981 		__entry->hdrlen = hdrlen;
982 	),
983 
984 	TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
985 		__entry->task_id, __entry->client_id,
986 		__entry->base, __entry->len, __entry->hdrlen
987 	)
988 );
989 
990 TRACE_EVENT(xprtrdma_fixup_pg,
991 	TP_PROTO(
992 		const struct rpc_rqst *rqst,
993 		int pageno,
994 		const void *pos,
995 		int len,
996 		int curlen
997 	),
998 
999 	TP_ARGS(rqst, pageno, pos, len, curlen),
1000 
1001 	TP_STRUCT__entry(
1002 		__field(unsigned int, task_id)
1003 		__field(unsigned int, client_id)
1004 		__field(const void *, pos)
1005 		__field(int, pageno)
1006 		__field(int, len)
1007 		__field(int, curlen)
1008 	),
1009 
1010 	TP_fast_assign(
1011 		__entry->task_id = rqst->rq_task->tk_pid;
1012 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1013 		__entry->pos = pos;
1014 		__entry->pageno = pageno;
1015 		__entry->len = len;
1016 		__entry->curlen = curlen;
1017 	),
1018 
1019 	TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
1020 		__entry->task_id, __entry->client_id,
1021 		__entry->pageno, __entry->pos, __entry->len, __entry->curlen
1022 	)
1023 );
1024 
1025 TRACE_EVENT(xprtrdma_decode_seg,
1026 	TP_PROTO(
1027 		u32 handle,
1028 		u32 length,
1029 		u64 offset
1030 	),
1031 
1032 	TP_ARGS(handle, length, offset),
1033 
1034 	TP_STRUCT__entry(
1035 		__field(u32, handle)
1036 		__field(u32, length)
1037 		__field(u64, offset)
1038 	),
1039 
1040 	TP_fast_assign(
1041 		__entry->handle = handle;
1042 		__entry->length = length;
1043 		__entry->offset = offset;
1044 	),
1045 
1046 	TP_printk("%u@0x%016llx:0x%08x",
1047 		__entry->length, (unsigned long long)__entry->offset,
1048 		__entry->handle
1049 	)
1050 );
1051 
1052 /**
1053  ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
1054  **/
1055 
1056 TRACE_EVENT(xprtrdma_op_allocate,
1057 	TP_PROTO(
1058 		const struct rpc_task *task,
1059 		const struct rpcrdma_req *req
1060 	),
1061 
1062 	TP_ARGS(task, req),
1063 
1064 	TP_STRUCT__entry(
1065 		__field(unsigned int, task_id)
1066 		__field(unsigned int, client_id)
1067 		__field(const void *, req)
1068 		__field(size_t, callsize)
1069 		__field(size_t, rcvsize)
1070 	),
1071 
1072 	TP_fast_assign(
1073 		__entry->task_id = task->tk_pid;
1074 		__entry->client_id = task->tk_client->cl_clid;
1075 		__entry->req = req;
1076 		__entry->callsize = task->tk_rqstp->rq_callsize;
1077 		__entry->rcvsize = task->tk_rqstp->rq_rcvsize;
1078 	),
1079 
1080 	TP_printk("task:%u@%u req=%p (%zu, %zu)",
1081 		__entry->task_id, __entry->client_id,
1082 		__entry->req, __entry->callsize, __entry->rcvsize
1083 	)
1084 );
1085 
1086 TRACE_EVENT(xprtrdma_op_free,
1087 	TP_PROTO(
1088 		const struct rpc_task *task,
1089 		const struct rpcrdma_req *req
1090 	),
1091 
1092 	TP_ARGS(task, req),
1093 
1094 	TP_STRUCT__entry(
1095 		__field(unsigned int, task_id)
1096 		__field(unsigned int, client_id)
1097 		__field(const void *, req)
1098 		__field(const void *, rep)
1099 	),
1100 
1101 	TP_fast_assign(
1102 		__entry->task_id = task->tk_pid;
1103 		__entry->client_id = task->tk_client->cl_clid;
1104 		__entry->req = req;
1105 		__entry->rep = req->rl_reply;
1106 	),
1107 
1108 	TP_printk("task:%u@%u req=%p rep=%p",
1109 		__entry->task_id, __entry->client_id,
1110 		__entry->req, __entry->rep
1111 	)
1112 );
1113 
1114 /**
1115  ** Callback events
1116  **/
1117 
1118 TRACE_EVENT(xprtrdma_cb_setup,
1119 	TP_PROTO(
1120 		const struct rpcrdma_xprt *r_xprt,
1121 		unsigned int reqs
1122 	),
1123 
1124 	TP_ARGS(r_xprt, reqs),
1125 
1126 	TP_STRUCT__entry(
1127 		__field(const void *, r_xprt)
1128 		__field(unsigned int, reqs)
1129 		__string(addr, rpcrdma_addrstr(r_xprt))
1130 		__string(port, rpcrdma_portstr(r_xprt))
1131 	),
1132 
1133 	TP_fast_assign(
1134 		__entry->r_xprt = r_xprt;
1135 		__entry->reqs = reqs;
1136 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1137 		__assign_str(port, rpcrdma_portstr(r_xprt));
1138 	),
1139 
1140 	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1141 		__get_str(addr), __get_str(port),
1142 		__entry->r_xprt, __entry->reqs
1143 	)
1144 );
1145 
1146 DEFINE_CB_EVENT(xprtrdma_cb_call);
1147 DEFINE_CB_EVENT(xprtrdma_cb_reply);
1148 
1149 TRACE_EVENT(xprtrdma_leaked_rep,
1150 	TP_PROTO(
1151 		const struct rpc_rqst *rqst,
1152 		const struct rpcrdma_rep *rep
1153 	),
1154 
1155 	TP_ARGS(rqst, rep),
1156 
1157 	TP_STRUCT__entry(
1158 		__field(unsigned int, task_id)
1159 		__field(unsigned int, client_id)
1160 		__field(u32, xid)
1161 		__field(const void *, rep)
1162 	),
1163 
1164 	TP_fast_assign(
1165 		__entry->task_id = rqst->rq_task->tk_pid;
1166 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1167 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1168 		__entry->rep = rep;
1169 	),
1170 
1171 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1172 		__entry->task_id, __entry->client_id, __entry->xid,
1173 		__entry->rep
1174 	)
1175 );
1176 
1177 /**
1178  ** Server-side RPC/RDMA events
1179  **/
1180 
1181 DECLARE_EVENT_CLASS(svcrdma_xprt_event,
1182 	TP_PROTO(
1183 		const struct svc_xprt *xprt
1184 	),
1185 
1186 	TP_ARGS(xprt),
1187 
1188 	TP_STRUCT__entry(
1189 		__field(const void *, xprt)
1190 		__string(addr, xprt->xpt_remotebuf)
1191 	),
1192 
1193 	TP_fast_assign(
1194 		__entry->xprt = xprt;
1195 		__assign_str(addr, xprt->xpt_remotebuf);
1196 	),
1197 
1198 	TP_printk("xprt=%p addr=%s",
1199 		__entry->xprt, __get_str(addr)
1200 	)
1201 );
1202 
1203 #define DEFINE_XPRT_EVENT(name)						\
1204 		DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name,	\
1205 				TP_PROTO(				\
1206 					const struct svc_xprt *xprt	\
1207 				),					\
1208 				TP_ARGS(xprt))
1209 
1210 DEFINE_XPRT_EVENT(accept);
1211 DEFINE_XPRT_EVENT(fail);
1212 DEFINE_XPRT_EVENT(free);
1213 
1214 TRACE_DEFINE_ENUM(RDMA_MSG);
1215 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1216 TRACE_DEFINE_ENUM(RDMA_MSGP);
1217 TRACE_DEFINE_ENUM(RDMA_DONE);
1218 TRACE_DEFINE_ENUM(RDMA_ERROR);
1219 
1220 #define show_rpcrdma_proc(x)						\
1221 		__print_symbolic(x,					\
1222 				{ RDMA_MSG, "RDMA_MSG" },		\
1223 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1224 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1225 				{ RDMA_DONE, "RDMA_DONE" },		\
1226 				{ RDMA_ERROR, "RDMA_ERROR" })
1227 
1228 TRACE_EVENT(svcrdma_decode_rqst,
1229 	TP_PROTO(
1230 		__be32 *p,
1231 		unsigned int hdrlen
1232 	),
1233 
1234 	TP_ARGS(p, hdrlen),
1235 
1236 	TP_STRUCT__entry(
1237 		__field(u32, xid)
1238 		__field(u32, vers)
1239 		__field(u32, proc)
1240 		__field(u32, credits)
1241 		__field(unsigned int, hdrlen)
1242 	),
1243 
1244 	TP_fast_assign(
1245 		__entry->xid = be32_to_cpup(p++);
1246 		__entry->vers = be32_to_cpup(p++);
1247 		__entry->credits = be32_to_cpup(p++);
1248 		__entry->proc = be32_to_cpup(p);
1249 		__entry->hdrlen = hdrlen;
1250 	),
1251 
1252 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1253 		__entry->xid, __entry->vers, __entry->credits,
1254 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1255 );
1256 
1257 TRACE_EVENT(svcrdma_decode_short,
1258 	TP_PROTO(
1259 		unsigned int hdrlen
1260 	),
1261 
1262 	TP_ARGS(hdrlen),
1263 
1264 	TP_STRUCT__entry(
1265 		__field(unsigned int, hdrlen)
1266 	),
1267 
1268 	TP_fast_assign(
1269 		__entry->hdrlen = hdrlen;
1270 	),
1271 
1272 	TP_printk("hdrlen=%u", __entry->hdrlen)
1273 );
1274 
1275 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1276 	TP_PROTO(
1277 		__be32 *p
1278 	),
1279 
1280 	TP_ARGS(p),
1281 
1282 	TP_STRUCT__entry(
1283 		__field(u32, xid)
1284 		__field(u32, vers)
1285 		__field(u32, proc)
1286 		__field(u32, credits)
1287 	),
1288 
1289 	TP_fast_assign(
1290 		__entry->xid = be32_to_cpup(p++);
1291 		__entry->vers = be32_to_cpup(p++);
1292 		__entry->credits = be32_to_cpup(p++);
1293 		__entry->proc = be32_to_cpup(p);
1294 	),
1295 
1296 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
1297 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1298 );
1299 
1300 #define DEFINE_BADREQ_EVENT(name)					\
1301 		DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
1302 				TP_PROTO(				\
1303 					__be32 *p			\
1304 				),					\
1305 				TP_ARGS(p))
1306 
1307 DEFINE_BADREQ_EVENT(badvers);
1308 DEFINE_BADREQ_EVENT(drop);
1309 DEFINE_BADREQ_EVENT(badproc);
1310 DEFINE_BADREQ_EVENT(parse);
1311 
1312 DECLARE_EVENT_CLASS(svcrdma_segment_event,
1313 	TP_PROTO(
1314 		u32 handle,
1315 		u32 length,
1316 		u64 offset
1317 	),
1318 
1319 	TP_ARGS(handle, length, offset),
1320 
1321 	TP_STRUCT__entry(
1322 		__field(u32, handle)
1323 		__field(u32, length)
1324 		__field(u64, offset)
1325 	),
1326 
1327 	TP_fast_assign(
1328 		__entry->handle = handle;
1329 		__entry->length = length;
1330 		__entry->offset = offset;
1331 	),
1332 
1333 	TP_printk("%u@0x%016llx:0x%08x",
1334 		__entry->length, (unsigned long long)__entry->offset,
1335 		__entry->handle
1336 	)
1337 );
1338 
1339 #define DEFINE_SEGMENT_EVENT(name)					\
1340 		DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
1341 				TP_PROTO(				\
1342 					u32 handle,			\
1343 					u32 length,			\
1344 					u64 offset			\
1345 				),					\
1346 				TP_ARGS(handle, length, offset))
1347 
1348 DEFINE_SEGMENT_EVENT(rseg);
1349 DEFINE_SEGMENT_EVENT(wseg);
1350 
1351 DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1352 	TP_PROTO(
1353 		u32 length
1354 	),
1355 
1356 	TP_ARGS(length),
1357 
1358 	TP_STRUCT__entry(
1359 		__field(u32, length)
1360 	),
1361 
1362 	TP_fast_assign(
1363 		__entry->length = length;
1364 	),
1365 
1366 	TP_printk("length=%u",
1367 		__entry->length
1368 	)
1369 );
1370 
1371 #define DEFINE_CHUNK_EVENT(name)					\
1372 		DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
1373 				TP_PROTO(				\
1374 					u32 length			\
1375 				),					\
1376 				TP_ARGS(length))
1377 
1378 DEFINE_CHUNK_EVENT(pzr);
1379 DEFINE_CHUNK_EVENT(write);
1380 DEFINE_CHUNK_EVENT(reply);
1381 
1382 TRACE_EVENT(svcrdma_encode_read,
1383 	TP_PROTO(
1384 		u32 length,
1385 		u32 position
1386 	),
1387 
1388 	TP_ARGS(length, position),
1389 
1390 	TP_STRUCT__entry(
1391 		__field(u32, length)
1392 		__field(u32, position)
1393 	),
1394 
1395 	TP_fast_assign(
1396 		__entry->length = length;
1397 		__entry->position = position;
1398 	),
1399 
1400 	TP_printk("length=%u position=%u",
1401 		__entry->length, __entry->position
1402 	)
1403 );
1404 
1405 DECLARE_EVENT_CLASS(svcrdma_error_event,
1406 	TP_PROTO(
1407 		__be32 xid
1408 	),
1409 
1410 	TP_ARGS(xid),
1411 
1412 	TP_STRUCT__entry(
1413 		__field(u32, xid)
1414 	),
1415 
1416 	TP_fast_assign(
1417 		__entry->xid = be32_to_cpu(xid);
1418 	),
1419 
1420 	TP_printk("xid=0x%08x",
1421 		__entry->xid
1422 	)
1423 );
1424 
1425 #define DEFINE_ERROR_EVENT(name)					\
1426 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1427 				TP_PROTO(				\
1428 					__be32 xid			\
1429 				),					\
1430 				TP_ARGS(xid))
1431 
1432 DEFINE_ERROR_EVENT(vers);
1433 DEFINE_ERROR_EVENT(chunk);
1434 
1435 /**
1436  ** Server-side RDMA API events
1437  **/
1438 
1439 TRACE_EVENT(svcrdma_dma_map_page,
1440 	TP_PROTO(
1441 		const struct svcxprt_rdma *rdma,
1442 		const void *page
1443 	),
1444 
1445 	TP_ARGS(rdma, page),
1446 
1447 	TP_STRUCT__entry(
1448 		__field(const void *, page);
1449 		__string(device, rdma->sc_cm_id->device->name)
1450 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1451 	),
1452 
1453 	TP_fast_assign(
1454 		__entry->page = page;
1455 		__assign_str(device, rdma->sc_cm_id->device->name);
1456 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1457 	),
1458 
1459 	TP_printk("addr=%s device=%s page=%p",
1460 		__get_str(addr), __get_str(device), __entry->page
1461 	)
1462 );
1463 
1464 TRACE_EVENT(svcrdma_dma_map_rwctx,
1465 	TP_PROTO(
1466 		const struct svcxprt_rdma *rdma,
1467 		int status
1468 	),
1469 
1470 	TP_ARGS(rdma, status),
1471 
1472 	TP_STRUCT__entry(
1473 		__field(int, status)
1474 		__string(device, rdma->sc_cm_id->device->name)
1475 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1476 	),
1477 
1478 	TP_fast_assign(
1479 		__entry->status = status;
1480 		__assign_str(device, rdma->sc_cm_id->device->name);
1481 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1482 	),
1483 
1484 	TP_printk("addr=%s device=%s status=%d",
1485 		__get_str(addr), __get_str(device), __entry->status
1486 	)
1487 );
1488 
1489 TRACE_EVENT(svcrdma_send_failed,
1490 	TP_PROTO(
1491 		const struct svc_rqst *rqst,
1492 		int status
1493 	),
1494 
1495 	TP_ARGS(rqst, status),
1496 
1497 	TP_STRUCT__entry(
1498 		__field(int, status)
1499 		__field(u32, xid)
1500 		__field(const void *, xprt)
1501 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1502 	),
1503 
1504 	TP_fast_assign(
1505 		__entry->status = status;
1506 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1507 		__entry->xprt = rqst->rq_xprt;
1508 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1509 	),
1510 
1511 	TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
1512 		__entry->xprt, __get_str(addr),
1513 		__entry->xid, __entry->status
1514 	)
1515 );
1516 
1517 DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
1518 	TP_PROTO(
1519 		const struct ib_wc *wc
1520 	),
1521 
1522 	TP_ARGS(wc),
1523 
1524 	TP_STRUCT__entry(
1525 		__field(const void *, cqe)
1526 		__field(unsigned int, status)
1527 		__field(unsigned int, vendor_err)
1528 	),
1529 
1530 	TP_fast_assign(
1531 		__entry->cqe = wc->wr_cqe;
1532 		__entry->status = wc->status;
1533 		if (wc->status)
1534 			__entry->vendor_err = wc->vendor_err;
1535 		else
1536 			__entry->vendor_err = 0;
1537 	),
1538 
1539 	TP_printk("cqe=%p status=%s (%u/0x%x)",
1540 		__entry->cqe, rdma_show_wc_status(__entry->status),
1541 		__entry->status, __entry->vendor_err
1542 	)
1543 );
1544 
1545 #define DEFINE_SENDCOMP_EVENT(name)					\
1546 		DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name,	\
1547 				TP_PROTO(				\
1548 					const struct ib_wc *wc		\
1549 				),					\
1550 				TP_ARGS(wc))
1551 
1552 TRACE_EVENT(svcrdma_post_send,
1553 	TP_PROTO(
1554 		const struct ib_send_wr *wr,
1555 		int status
1556 	),
1557 
1558 	TP_ARGS(wr, status),
1559 
1560 	TP_STRUCT__entry(
1561 		__field(const void *, cqe)
1562 		__field(unsigned int, num_sge)
1563 		__field(u32, inv_rkey)
1564 		__field(int, status)
1565 	),
1566 
1567 	TP_fast_assign(
1568 		__entry->cqe = wr->wr_cqe;
1569 		__entry->num_sge = wr->num_sge;
1570 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1571 					wr->ex.invalidate_rkey : 0;
1572 		__entry->status = status;
1573 	),
1574 
1575 	TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
1576 		__entry->cqe, __entry->num_sge,
1577 		__entry->inv_rkey, __entry->status
1578 	)
1579 );
1580 
1581 DEFINE_SENDCOMP_EVENT(send);
1582 
1583 TRACE_EVENT(svcrdma_post_recv,
1584 	TP_PROTO(
1585 		const struct ib_recv_wr *wr,
1586 		int status
1587 	),
1588 
1589 	TP_ARGS(wr, status),
1590 
1591 	TP_STRUCT__entry(
1592 		__field(const void *, cqe)
1593 		__field(int, status)
1594 	),
1595 
1596 	TP_fast_assign(
1597 		__entry->cqe = wr->wr_cqe;
1598 		__entry->status = status;
1599 	),
1600 
1601 	TP_printk("cqe=%p status=%d",
1602 		__entry->cqe, __entry->status
1603 	)
1604 );
1605 
1606 TRACE_EVENT(svcrdma_wc_receive,
1607 	TP_PROTO(
1608 		const struct ib_wc *wc
1609 	),
1610 
1611 	TP_ARGS(wc),
1612 
1613 	TP_STRUCT__entry(
1614 		__field(const void *, cqe)
1615 		__field(u32, byte_len)
1616 		__field(unsigned int, status)
1617 		__field(u32, vendor_err)
1618 	),
1619 
1620 	TP_fast_assign(
1621 		__entry->cqe = wc->wr_cqe;
1622 		__entry->status = wc->status;
1623 		if (wc->status) {
1624 			__entry->byte_len = 0;
1625 			__entry->vendor_err = wc->vendor_err;
1626 		} else {
1627 			__entry->byte_len = wc->byte_len;
1628 			__entry->vendor_err = 0;
1629 		}
1630 	),
1631 
1632 	TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
1633 		__entry->cqe, __entry->byte_len,
1634 		rdma_show_wc_status(__entry->status),
1635 		__entry->status, __entry->vendor_err
1636 	)
1637 );
1638 
1639 TRACE_EVENT(svcrdma_post_rw,
1640 	TP_PROTO(
1641 		const void *cqe,
1642 		int sqecount,
1643 		int status
1644 	),
1645 
1646 	TP_ARGS(cqe, sqecount, status),
1647 
1648 	TP_STRUCT__entry(
1649 		__field(const void *, cqe)
1650 		__field(int, sqecount)
1651 		__field(int, status)
1652 	),
1653 
1654 	TP_fast_assign(
1655 		__entry->cqe = cqe;
1656 		__entry->sqecount = sqecount;
1657 		__entry->status = status;
1658 	),
1659 
1660 	TP_printk("cqe=%p sqecount=%d status=%d",
1661 		__entry->cqe, __entry->sqecount, __entry->status
1662 	)
1663 );
1664 
1665 DEFINE_SENDCOMP_EVENT(read);
1666 DEFINE_SENDCOMP_EVENT(write);
1667 
1668 TRACE_EVENT(svcrdma_cm_event,
1669 	TP_PROTO(
1670 		const struct rdma_cm_event *event,
1671 		const struct sockaddr *sap
1672 	),
1673 
1674 	TP_ARGS(event, sap),
1675 
1676 	TP_STRUCT__entry(
1677 		__field(unsigned int, event)
1678 		__field(int, status)
1679 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1680 	),
1681 
1682 	TP_fast_assign(
1683 		__entry->event = event->event;
1684 		__entry->status = event->status;
1685 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1686 			 "%pISpc", sap);
1687 	),
1688 
1689 	TP_printk("addr=%s event=%s (%u/%d)",
1690 		__entry->addr,
1691 		rdma_show_cm_event(__entry->event),
1692 		__entry->event, __entry->status
1693 	)
1694 );
1695 
1696 TRACE_EVENT(svcrdma_qp_error,
1697 	TP_PROTO(
1698 		const struct ib_event *event,
1699 		const struct sockaddr *sap
1700 	),
1701 
1702 	TP_ARGS(event, sap),
1703 
1704 	TP_STRUCT__entry(
1705 		__field(unsigned int, event)
1706 		__string(device, event->device->name)
1707 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1708 	),
1709 
1710 	TP_fast_assign(
1711 		__entry->event = event->event;
1712 		__assign_str(device, event->device->name);
1713 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1714 			 "%pISpc", sap);
1715 	),
1716 
1717 	TP_printk("addr=%s dev=%s event=%s (%u)",
1718 		__entry->addr, __get_str(device),
1719 		rdma_show_ib_event(__entry->event), __entry->event
1720 	)
1721 );
1722 
1723 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1724 	TP_PROTO(
1725 		const struct svcxprt_rdma *rdma
1726 	),
1727 
1728 	TP_ARGS(rdma),
1729 
1730 	TP_STRUCT__entry(
1731 		__field(int, avail)
1732 		__field(int, depth)
1733 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1734 	),
1735 
1736 	TP_fast_assign(
1737 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1738 		__entry->depth = rdma->sc_sq_depth;
1739 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1740 	),
1741 
1742 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1743 		__get_str(addr), __entry->avail, __entry->depth
1744 	)
1745 );
1746 
1747 #define DEFINE_SQ_EVENT(name)						\
1748 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1749 				TP_PROTO(				\
1750 					const struct svcxprt_rdma *rdma \
1751 				),					\
1752 				TP_ARGS(rdma))
1753 
1754 DEFINE_SQ_EVENT(full);
1755 DEFINE_SQ_EVENT(retry);
1756 
1757 #endif /* _TRACE_RPCRDMA_H */
1758 
1759 #include <trace/define_trace.h>
1760