xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision d4957f01)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/tracepoint.h>
15 #include <trace/events/rdma.h>
16 
17 /**
18  ** Event classes
19  **/
20 
21 DECLARE_EVENT_CLASS(xprtrdma_reply_event,
22 	TP_PROTO(
23 		const struct rpcrdma_rep *rep
24 	),
25 
26 	TP_ARGS(rep),
27 
28 	TP_STRUCT__entry(
29 		__field(const void *, rep)
30 		__field(const void *, r_xprt)
31 		__field(u32, xid)
32 		__field(u32, version)
33 		__field(u32, proc)
34 	),
35 
36 	TP_fast_assign(
37 		__entry->rep = rep;
38 		__entry->r_xprt = rep->rr_rxprt;
39 		__entry->xid = be32_to_cpu(rep->rr_xid);
40 		__entry->version = be32_to_cpu(rep->rr_vers);
41 		__entry->proc = be32_to_cpu(rep->rr_proc);
42 	),
43 
44 	TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
45 		__entry->r_xprt, __entry->xid, __entry->rep,
46 		__entry->version, __entry->proc
47 	)
48 );
49 
50 #define DEFINE_REPLY_EVENT(name)					\
51 		DEFINE_EVENT(xprtrdma_reply_event, name,		\
52 				TP_PROTO(				\
53 					const struct rpcrdma_rep *rep	\
54 				),					\
55 				TP_ARGS(rep))
56 
57 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
58 	TP_PROTO(
59 		const struct rpcrdma_xprt *r_xprt
60 	),
61 
62 	TP_ARGS(r_xprt),
63 
64 	TP_STRUCT__entry(
65 		__field(const void *, r_xprt)
66 		__string(addr, rpcrdma_addrstr(r_xprt))
67 		__string(port, rpcrdma_portstr(r_xprt))
68 	),
69 
70 	TP_fast_assign(
71 		__entry->r_xprt = r_xprt;
72 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
73 		__assign_str(port, rpcrdma_portstr(r_xprt));
74 	),
75 
76 	TP_printk("peer=[%s]:%s r_xprt=%p",
77 		__get_str(addr), __get_str(port), __entry->r_xprt
78 	)
79 );
80 
81 #define DEFINE_RXPRT_EVENT(name)					\
82 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
83 				TP_PROTO(				\
84 					const struct rpcrdma_xprt *r_xprt \
85 				),					\
86 				TP_ARGS(r_xprt))
87 
88 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
89 	TP_PROTO(
90 		const struct rpcrdma_xprt *r_xprt,
91 		int rc
92 	),
93 
94 	TP_ARGS(r_xprt, rc),
95 
96 	TP_STRUCT__entry(
97 		__field(const void *, r_xprt)
98 		__field(int, rc)
99 		__field(int, connect_status)
100 		__string(addr, rpcrdma_addrstr(r_xprt))
101 		__string(port, rpcrdma_portstr(r_xprt))
102 	),
103 
104 	TP_fast_assign(
105 		__entry->r_xprt = r_xprt;
106 		__entry->rc = rc;
107 		__entry->connect_status = r_xprt->rx_ep.rep_connected;
108 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
109 		__assign_str(port, rpcrdma_portstr(r_xprt));
110 	),
111 
112 	TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connect status=%d",
113 		__get_str(addr), __get_str(port), __entry->r_xprt,
114 		__entry->rc, __entry->connect_status
115 	)
116 );
117 
118 #define DEFINE_CONN_EVENT(name)						\
119 		DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name,	\
120 				TP_PROTO(				\
121 					const struct rpcrdma_xprt *r_xprt, \
122 					int rc				\
123 				),					\
124 				TP_ARGS(r_xprt, rc))
125 
126 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
127 	TP_PROTO(
128 		const struct rpc_task *task,
129 		unsigned int pos,
130 		struct rpcrdma_mr *mr,
131 		int nsegs
132 	),
133 
134 	TP_ARGS(task, pos, mr, nsegs),
135 
136 	TP_STRUCT__entry(
137 		__field(unsigned int, task_id)
138 		__field(unsigned int, client_id)
139 		__field(unsigned int, pos)
140 		__field(int, nents)
141 		__field(u32, handle)
142 		__field(u32, length)
143 		__field(u64, offset)
144 		__field(int, nsegs)
145 	),
146 
147 	TP_fast_assign(
148 		__entry->task_id = task->tk_pid;
149 		__entry->client_id = task->tk_client->cl_clid;
150 		__entry->pos = pos;
151 		__entry->nents = mr->mr_nents;
152 		__entry->handle = mr->mr_handle;
153 		__entry->length = mr->mr_length;
154 		__entry->offset = mr->mr_offset;
155 		__entry->nsegs = nsegs;
156 	),
157 
158 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
159 		__entry->task_id, __entry->client_id,
160 		__entry->pos, __entry->length,
161 		(unsigned long long)__entry->offset, __entry->handle,
162 		__entry->nents < __entry->nsegs ? "more" : "last"
163 	)
164 );
165 
166 #define DEFINE_RDCH_EVENT(name)						\
167 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
168 				TP_PROTO(				\
169 					const struct rpc_task *task,	\
170 					unsigned int pos,		\
171 					struct rpcrdma_mr *mr,		\
172 					int nsegs			\
173 				),					\
174 				TP_ARGS(task, pos, mr, nsegs))
175 
176 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
177 	TP_PROTO(
178 		const struct rpc_task *task,
179 		struct rpcrdma_mr *mr,
180 		int nsegs
181 	),
182 
183 	TP_ARGS(task, mr, nsegs),
184 
185 	TP_STRUCT__entry(
186 		__field(unsigned int, task_id)
187 		__field(unsigned int, client_id)
188 		__field(int, nents)
189 		__field(u32, handle)
190 		__field(u32, length)
191 		__field(u64, offset)
192 		__field(int, nsegs)
193 	),
194 
195 	TP_fast_assign(
196 		__entry->task_id = task->tk_pid;
197 		__entry->client_id = task->tk_client->cl_clid;
198 		__entry->nents = mr->mr_nents;
199 		__entry->handle = mr->mr_handle;
200 		__entry->length = mr->mr_length;
201 		__entry->offset = mr->mr_offset;
202 		__entry->nsegs = nsegs;
203 	),
204 
205 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
206 		__entry->task_id, __entry->client_id,
207 		__entry->length, (unsigned long long)__entry->offset,
208 		__entry->handle,
209 		__entry->nents < __entry->nsegs ? "more" : "last"
210 	)
211 );
212 
213 #define DEFINE_WRCH_EVENT(name)						\
214 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
215 				TP_PROTO(				\
216 					const struct rpc_task *task,	\
217 					struct rpcrdma_mr *mr,		\
218 					int nsegs			\
219 				),					\
220 				TP_ARGS(task, mr, nsegs))
221 
222 DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
223 	TP_PROTO(
224 		const struct ib_wc *wc,
225 		const struct rpcrdma_frwr *frwr
226 	),
227 
228 	TP_ARGS(wc, frwr),
229 
230 	TP_STRUCT__entry(
231 		__field(const void *, mr)
232 		__field(unsigned int, status)
233 		__field(unsigned int, vendor_err)
234 	),
235 
236 	TP_fast_assign(
237 		__entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
238 		__entry->status = wc->status;
239 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
240 	),
241 
242 	TP_printk(
243 		"mr=%p: %s (%u/0x%x)",
244 		__entry->mr, rdma_show_wc_status(__entry->status),
245 		__entry->status, __entry->vendor_err
246 	)
247 );
248 
249 #define DEFINE_FRWR_DONE_EVENT(name)					\
250 		DEFINE_EVENT(xprtrdma_frwr_done, name,			\
251 				TP_PROTO(				\
252 					const struct ib_wc *wc,		\
253 					const struct rpcrdma_frwr *frwr	\
254 				),					\
255 				TP_ARGS(wc, frwr))
256 
257 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
258 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
259 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
260 TRACE_DEFINE_ENUM(DMA_NONE);
261 
262 #define xprtrdma_show_direction(x)					\
263 		__print_symbolic(x,					\
264 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
265 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
266 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
267 				{ DMA_NONE, "NONE" })
268 
269 DECLARE_EVENT_CLASS(xprtrdma_mr,
270 	TP_PROTO(
271 		const struct rpcrdma_mr *mr
272 	),
273 
274 	TP_ARGS(mr),
275 
276 	TP_STRUCT__entry(
277 		__field(const void *, mr)
278 		__field(u32, handle)
279 		__field(u32, length)
280 		__field(u64, offset)
281 		__field(u32, dir)
282 	),
283 
284 	TP_fast_assign(
285 		__entry->mr = mr;
286 		__entry->handle = mr->mr_handle;
287 		__entry->length = mr->mr_length;
288 		__entry->offset = mr->mr_offset;
289 		__entry->dir    = mr->mr_dir;
290 	),
291 
292 	TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)",
293 		__entry->mr, __entry->length,
294 		(unsigned long long)__entry->offset, __entry->handle,
295 		xprtrdma_show_direction(__entry->dir)
296 	)
297 );
298 
299 #define DEFINE_MR_EVENT(name) \
300 		DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
301 				TP_PROTO( \
302 					const struct rpcrdma_mr *mr \
303 				), \
304 				TP_ARGS(mr))
305 
306 DECLARE_EVENT_CLASS(xprtrdma_cb_event,
307 	TP_PROTO(
308 		const struct rpc_rqst *rqst
309 	),
310 
311 	TP_ARGS(rqst),
312 
313 	TP_STRUCT__entry(
314 		__field(const void *, rqst)
315 		__field(const void *, rep)
316 		__field(const void *, req)
317 		__field(u32, xid)
318 	),
319 
320 	TP_fast_assign(
321 		__entry->rqst = rqst;
322 		__entry->req = rpcr_to_rdmar(rqst);
323 		__entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
324 		__entry->xid = be32_to_cpu(rqst->rq_xid);
325 	),
326 
327 	TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
328 		__entry->xid, __entry->rqst, __entry->req, __entry->rep
329 	)
330 );
331 
332 #define DEFINE_CB_EVENT(name)						\
333 		DEFINE_EVENT(xprtrdma_cb_event, name,			\
334 				TP_PROTO(				\
335 					const struct rpc_rqst *rqst	\
336 				),					\
337 				TP_ARGS(rqst))
338 
339 /**
340  ** Connection events
341  **/
342 
343 TRACE_EVENT(xprtrdma_cm_event,
344 	TP_PROTO(
345 		const struct rpcrdma_xprt *r_xprt,
346 		struct rdma_cm_event *event
347 	),
348 
349 	TP_ARGS(r_xprt, event),
350 
351 	TP_STRUCT__entry(
352 		__field(const void *, r_xprt)
353 		__field(unsigned int, event)
354 		__field(int, status)
355 		__string(addr, rpcrdma_addrstr(r_xprt))
356 		__string(port, rpcrdma_portstr(r_xprt))
357 	),
358 
359 	TP_fast_assign(
360 		__entry->r_xprt = r_xprt;
361 		__entry->event = event->event;
362 		__entry->status = event->status;
363 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
364 		__assign_str(port, rpcrdma_portstr(r_xprt));
365 	),
366 
367 	TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
368 		__get_str(addr), __get_str(port),
369 		__entry->r_xprt, rdma_show_cm_event(__entry->event),
370 		__entry->event, __entry->status
371 	)
372 );
373 
374 DEFINE_CONN_EVENT(connect);
375 DEFINE_CONN_EVENT(disconnect);
376 
377 DEFINE_RXPRT_EVENT(xprtrdma_create);
378 DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
379 DEFINE_RXPRT_EVENT(xprtrdma_remove);
380 DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
381 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
382 DEFINE_RXPRT_EVENT(xprtrdma_op_close);
383 
384 TRACE_EVENT(xprtrdma_op_connect,
385 	TP_PROTO(
386 		const struct rpcrdma_xprt *r_xprt,
387 		unsigned long delay
388 	),
389 
390 	TP_ARGS(r_xprt, delay),
391 
392 	TP_STRUCT__entry(
393 		__field(const void *, r_xprt)
394 		__field(unsigned long, delay)
395 		__string(addr, rpcrdma_addrstr(r_xprt))
396 		__string(port, rpcrdma_portstr(r_xprt))
397 	),
398 
399 	TP_fast_assign(
400 		__entry->r_xprt = r_xprt;
401 		__entry->delay = delay;
402 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
403 		__assign_str(port, rpcrdma_portstr(r_xprt));
404 	),
405 
406 	TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu",
407 		__get_str(addr), __get_str(port), __entry->r_xprt,
408 		__entry->delay
409 	)
410 );
411 
412 
413 TRACE_EVENT(xprtrdma_op_set_cto,
414 	TP_PROTO(
415 		const struct rpcrdma_xprt *r_xprt,
416 		unsigned long connect,
417 		unsigned long reconnect
418 	),
419 
420 	TP_ARGS(r_xprt, connect, reconnect),
421 
422 	TP_STRUCT__entry(
423 		__field(const void *, r_xprt)
424 		__field(unsigned long, connect)
425 		__field(unsigned long, reconnect)
426 		__string(addr, rpcrdma_addrstr(r_xprt))
427 		__string(port, rpcrdma_portstr(r_xprt))
428 	),
429 
430 	TP_fast_assign(
431 		__entry->r_xprt = r_xprt;
432 		__entry->connect = connect;
433 		__entry->reconnect = reconnect;
434 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
435 		__assign_str(port, rpcrdma_portstr(r_xprt));
436 	),
437 
438 	TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
439 		__get_str(addr), __get_str(port), __entry->r_xprt,
440 		__entry->connect / HZ, __entry->reconnect / HZ
441 	)
442 );
443 
444 TRACE_EVENT(xprtrdma_qp_event,
445 	TP_PROTO(
446 		const struct rpcrdma_xprt *r_xprt,
447 		const struct ib_event *event
448 	),
449 
450 	TP_ARGS(r_xprt, event),
451 
452 	TP_STRUCT__entry(
453 		__field(const void *, r_xprt)
454 		__field(unsigned int, event)
455 		__string(name, event->device->name)
456 		__string(addr, rpcrdma_addrstr(r_xprt))
457 		__string(port, rpcrdma_portstr(r_xprt))
458 	),
459 
460 	TP_fast_assign(
461 		__entry->r_xprt = r_xprt;
462 		__entry->event = event->event;
463 		__assign_str(name, event->device->name);
464 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
465 		__assign_str(port, rpcrdma_portstr(r_xprt));
466 	),
467 
468 	TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
469 		__get_str(addr), __get_str(port), __entry->r_xprt,
470 		__get_str(name), rdma_show_ib_event(__entry->event),
471 		__entry->event
472 	)
473 );
474 
475 /**
476  ** Call events
477  **/
478 
479 TRACE_EVENT(xprtrdma_createmrs,
480 	TP_PROTO(
481 		const struct rpcrdma_xprt *r_xprt,
482 		unsigned int count
483 	),
484 
485 	TP_ARGS(r_xprt, count),
486 
487 	TP_STRUCT__entry(
488 		__field(const void *, r_xprt)
489 		__string(addr, rpcrdma_addrstr(r_xprt))
490 		__string(port, rpcrdma_portstr(r_xprt))
491 		__field(unsigned int, count)
492 	),
493 
494 	TP_fast_assign(
495 		__entry->r_xprt = r_xprt;
496 		__entry->count = count;
497 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
498 		__assign_str(port, rpcrdma_portstr(r_xprt));
499 	),
500 
501 	TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
502 		__get_str(addr), __get_str(port), __entry->r_xprt,
503 		__entry->count
504 	)
505 );
506 
507 TRACE_EVENT(xprtrdma_mr_get,
508 	TP_PROTO(
509 		const struct rpcrdma_req *req
510 	),
511 
512 	TP_ARGS(req),
513 
514 	TP_STRUCT__entry(
515 		__field(const void *, req)
516 		__field(unsigned int, task_id)
517 		__field(unsigned int, client_id)
518 		__field(u32, xid)
519 	),
520 
521 	TP_fast_assign(
522 		const struct rpc_rqst *rqst = &req->rl_slot;
523 
524 		__entry->req = req;
525 		__entry->task_id = rqst->rq_task->tk_pid;
526 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
527 		__entry->xid = be32_to_cpu(rqst->rq_xid);
528 	),
529 
530 	TP_printk("task:%u@%u xid=0x%08x req=%p",
531 		__entry->task_id, __entry->client_id, __entry->xid,
532 		__entry->req
533 	)
534 );
535 
536 TRACE_EVENT(xprtrdma_nomrs,
537 	TP_PROTO(
538 		const struct rpcrdma_req *req
539 	),
540 
541 	TP_ARGS(req),
542 
543 	TP_STRUCT__entry(
544 		__field(const void *, req)
545 		__field(unsigned int, task_id)
546 		__field(unsigned int, client_id)
547 		__field(u32, xid)
548 	),
549 
550 	TP_fast_assign(
551 		const struct rpc_rqst *rqst = &req->rl_slot;
552 
553 		__entry->req = req;
554 		__entry->task_id = rqst->rq_task->tk_pid;
555 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
556 		__entry->xid = be32_to_cpu(rqst->rq_xid);
557 	),
558 
559 	TP_printk("task:%u@%u xid=0x%08x req=%p",
560 		__entry->task_id, __entry->client_id, __entry->xid,
561 		__entry->req
562 	)
563 );
564 
565 DEFINE_RDCH_EVENT(read);
566 DEFINE_WRCH_EVENT(write);
567 DEFINE_WRCH_EVENT(reply);
568 
569 TRACE_DEFINE_ENUM(rpcrdma_noch);
570 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
571 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
572 TRACE_DEFINE_ENUM(rpcrdma_readch);
573 TRACE_DEFINE_ENUM(rpcrdma_areadch);
574 TRACE_DEFINE_ENUM(rpcrdma_writech);
575 TRACE_DEFINE_ENUM(rpcrdma_replych);
576 
577 #define xprtrdma_show_chunktype(x)					\
578 		__print_symbolic(x,					\
579 				{ rpcrdma_noch, "inline" },		\
580 				{ rpcrdma_noch_pullup, "pullup" },	\
581 				{ rpcrdma_noch_mapped, "mapped" },	\
582 				{ rpcrdma_readch, "read list" },	\
583 				{ rpcrdma_areadch, "*read list" },	\
584 				{ rpcrdma_writech, "write list" },	\
585 				{ rpcrdma_replych, "reply chunk" })
586 
587 TRACE_EVENT(xprtrdma_marshal,
588 	TP_PROTO(
589 		const struct rpcrdma_req *req,
590 		unsigned int rtype,
591 		unsigned int wtype
592 	),
593 
594 	TP_ARGS(req, rtype, wtype),
595 
596 	TP_STRUCT__entry(
597 		__field(unsigned int, task_id)
598 		__field(unsigned int, client_id)
599 		__field(u32, xid)
600 		__field(unsigned int, hdrlen)
601 		__field(unsigned int, headlen)
602 		__field(unsigned int, pagelen)
603 		__field(unsigned int, taillen)
604 		__field(unsigned int, rtype)
605 		__field(unsigned int, wtype)
606 	),
607 
608 	TP_fast_assign(
609 		const struct rpc_rqst *rqst = &req->rl_slot;
610 
611 		__entry->task_id = rqst->rq_task->tk_pid;
612 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
613 		__entry->xid = be32_to_cpu(rqst->rq_xid);
614 		__entry->hdrlen = req->rl_hdrbuf.len;
615 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
616 		__entry->pagelen = rqst->rq_snd_buf.page_len;
617 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
618 		__entry->rtype = rtype;
619 		__entry->wtype = wtype;
620 	),
621 
622 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
623 		__entry->task_id, __entry->client_id, __entry->xid,
624 		__entry->hdrlen,
625 		__entry->headlen, __entry->pagelen, __entry->taillen,
626 		xprtrdma_show_chunktype(__entry->rtype),
627 		xprtrdma_show_chunktype(__entry->wtype)
628 	)
629 );
630 
631 TRACE_EVENT(xprtrdma_marshal_failed,
632 	TP_PROTO(const struct rpc_rqst *rqst,
633 		 int ret
634 	),
635 
636 	TP_ARGS(rqst, ret),
637 
638 	TP_STRUCT__entry(
639 		__field(unsigned int, task_id)
640 		__field(unsigned int, client_id)
641 		__field(u32, xid)
642 		__field(int, ret)
643 	),
644 
645 	TP_fast_assign(
646 		__entry->task_id = rqst->rq_task->tk_pid;
647 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
648 		__entry->xid = be32_to_cpu(rqst->rq_xid);
649 		__entry->ret = ret;
650 	),
651 
652 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
653 		__entry->task_id, __entry->client_id, __entry->xid,
654 		__entry->ret
655 	)
656 );
657 
658 TRACE_EVENT(xprtrdma_prepsend_failed,
659 	TP_PROTO(const struct rpc_rqst *rqst,
660 		 int ret
661 	),
662 
663 	TP_ARGS(rqst, ret),
664 
665 	TP_STRUCT__entry(
666 		__field(unsigned int, task_id)
667 		__field(unsigned int, client_id)
668 		__field(u32, xid)
669 		__field(int, ret)
670 	),
671 
672 	TP_fast_assign(
673 		__entry->task_id = rqst->rq_task->tk_pid;
674 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
675 		__entry->xid = be32_to_cpu(rqst->rq_xid);
676 		__entry->ret = ret;
677 	),
678 
679 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
680 		__entry->task_id, __entry->client_id, __entry->xid,
681 		__entry->ret
682 	)
683 );
684 
685 TRACE_EVENT(xprtrdma_post_send,
686 	TP_PROTO(
687 		const struct rpcrdma_req *req,
688 		int status
689 	),
690 
691 	TP_ARGS(req, status),
692 
693 	TP_STRUCT__entry(
694 		__field(const void *, req)
695 		__field(unsigned int, task_id)
696 		__field(unsigned int, client_id)
697 		__field(int, num_sge)
698 		__field(int, signaled)
699 		__field(int, status)
700 	),
701 
702 	TP_fast_assign(
703 		const struct rpc_rqst *rqst = &req->rl_slot;
704 
705 		__entry->task_id = rqst->rq_task->tk_pid;
706 		__entry->client_id = rqst->rq_task->tk_client ?
707 				     rqst->rq_task->tk_client->cl_clid : -1;
708 		__entry->req = req;
709 		__entry->num_sge = req->rl_wr.num_sge;
710 		__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
711 		__entry->status = status;
712 	),
713 
714 	TP_printk("task:%u@%u req=%p (%d SGE%s) %sstatus=%d",
715 		__entry->task_id, __entry->client_id,
716 		__entry->req, __entry->num_sge,
717 		(__entry->num_sge == 1 ? "" : "s"),
718 		(__entry->signaled ? "signaled " : ""),
719 		__entry->status
720 	)
721 );
722 
723 TRACE_EVENT(xprtrdma_post_recv,
724 	TP_PROTO(
725 		const struct rpcrdma_rep *rep
726 	),
727 
728 	TP_ARGS(rep),
729 
730 	TP_STRUCT__entry(
731 		__field(const void *, rep)
732 	),
733 
734 	TP_fast_assign(
735 		__entry->rep = rep;
736 	),
737 
738 	TP_printk("rep=%p",
739 		__entry->rep
740 	)
741 );
742 
743 TRACE_EVENT(xprtrdma_post_recvs,
744 	TP_PROTO(
745 		const struct rpcrdma_xprt *r_xprt,
746 		unsigned int count,
747 		int status
748 	),
749 
750 	TP_ARGS(r_xprt, count, status),
751 
752 	TP_STRUCT__entry(
753 		__field(const void *, r_xprt)
754 		__field(unsigned int, count)
755 		__field(int, status)
756 		__field(int, posted)
757 		__string(addr, rpcrdma_addrstr(r_xprt))
758 		__string(port, rpcrdma_portstr(r_xprt))
759 	),
760 
761 	TP_fast_assign(
762 		__entry->r_xprt = r_xprt;
763 		__entry->count = count;
764 		__entry->status = status;
765 		__entry->posted = r_xprt->rx_ep.rep_receive_count;
766 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
767 		__assign_str(port, rpcrdma_portstr(r_xprt));
768 	),
769 
770 	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
771 		__get_str(addr), __get_str(port), __entry->r_xprt,
772 		__entry->count, __entry->posted, __entry->status
773 	)
774 );
775 
776 TRACE_EVENT(xprtrdma_post_linv,
777 	TP_PROTO(
778 		const struct rpcrdma_req *req,
779 		int status
780 	),
781 
782 	TP_ARGS(req, status),
783 
784 	TP_STRUCT__entry(
785 		__field(const void *, req)
786 		__field(int, status)
787 		__field(u32, xid)
788 	),
789 
790 	TP_fast_assign(
791 		__entry->req = req;
792 		__entry->status = status;
793 		__entry->xid = be32_to_cpu(req->rl_slot.rq_xid);
794 	),
795 
796 	TP_printk("req=%p xid=0x%08x status=%d",
797 		__entry->req, __entry->xid, __entry->status
798 	)
799 );
800 
801 /**
802  ** Completion events
803  **/
804 
805 TRACE_EVENT(xprtrdma_wc_send,
806 	TP_PROTO(
807 		const struct rpcrdma_sendctx *sc,
808 		const struct ib_wc *wc
809 	),
810 
811 	TP_ARGS(sc, wc),
812 
813 	TP_STRUCT__entry(
814 		__field(const void *, req)
815 		__field(unsigned int, unmap_count)
816 		__field(unsigned int, status)
817 		__field(unsigned int, vendor_err)
818 	),
819 
820 	TP_fast_assign(
821 		__entry->req = sc->sc_req;
822 		__entry->unmap_count = sc->sc_unmap_count;
823 		__entry->status = wc->status;
824 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
825 	),
826 
827 	TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
828 		__entry->req, __entry->unmap_count,
829 		rdma_show_wc_status(__entry->status),
830 		__entry->status, __entry->vendor_err
831 	)
832 );
833 
834 TRACE_EVENT(xprtrdma_wc_receive,
835 	TP_PROTO(
836 		const struct ib_wc *wc
837 	),
838 
839 	TP_ARGS(wc),
840 
841 	TP_STRUCT__entry(
842 		__field(const void *, rep)
843 		__field(u32, byte_len)
844 		__field(unsigned int, status)
845 		__field(u32, vendor_err)
846 	),
847 
848 	TP_fast_assign(
849 		__entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
850 					    rr_cqe);
851 		__entry->status = wc->status;
852 		if (wc->status) {
853 			__entry->byte_len = 0;
854 			__entry->vendor_err = wc->vendor_err;
855 		} else {
856 			__entry->byte_len = wc->byte_len;
857 			__entry->vendor_err = 0;
858 		}
859 	),
860 
861 	TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
862 		__entry->rep, __entry->byte_len,
863 		rdma_show_wc_status(__entry->status),
864 		__entry->status, __entry->vendor_err
865 	)
866 );
867 
868 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
869 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
870 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
871 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
872 
873 TRACE_EVENT(xprtrdma_frwr_alloc,
874 	TP_PROTO(
875 		const struct rpcrdma_mr *mr,
876 		int rc
877 	),
878 
879 	TP_ARGS(mr, rc),
880 
881 	TP_STRUCT__entry(
882 		__field(const void *, mr)
883 		__field(int, rc)
884 	),
885 
886 	TP_fast_assign(
887 		__entry->mr = mr;
888 		__entry->rc	= rc;
889 	),
890 
891 	TP_printk("mr=%p: rc=%d",
892 		__entry->mr, __entry->rc
893 	)
894 );
895 
896 TRACE_EVENT(xprtrdma_frwr_dereg,
897 	TP_PROTO(
898 		const struct rpcrdma_mr *mr,
899 		int rc
900 	),
901 
902 	TP_ARGS(mr, rc),
903 
904 	TP_STRUCT__entry(
905 		__field(const void *, mr)
906 		__field(u32, handle)
907 		__field(u32, length)
908 		__field(u64, offset)
909 		__field(u32, dir)
910 		__field(int, rc)
911 	),
912 
913 	TP_fast_assign(
914 		__entry->mr = mr;
915 		__entry->handle = mr->mr_handle;
916 		__entry->length = mr->mr_length;
917 		__entry->offset = mr->mr_offset;
918 		__entry->dir    = mr->mr_dir;
919 		__entry->rc	= rc;
920 	),
921 
922 	TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d",
923 		__entry->mr, __entry->length,
924 		(unsigned long long)__entry->offset, __entry->handle,
925 		xprtrdma_show_direction(__entry->dir),
926 		__entry->rc
927 	)
928 );
929 
930 TRACE_EVENT(xprtrdma_frwr_sgerr,
931 	TP_PROTO(
932 		const struct rpcrdma_mr *mr,
933 		int sg_nents
934 	),
935 
936 	TP_ARGS(mr, sg_nents),
937 
938 	TP_STRUCT__entry(
939 		__field(const void *, mr)
940 		__field(u64, addr)
941 		__field(u32, dir)
942 		__field(int, nents)
943 	),
944 
945 	TP_fast_assign(
946 		__entry->mr = mr;
947 		__entry->addr = mr->mr_sg->dma_address;
948 		__entry->dir = mr->mr_dir;
949 		__entry->nents = sg_nents;
950 	),
951 
952 	TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d",
953 		__entry->mr, __entry->addr,
954 		xprtrdma_show_direction(__entry->dir),
955 		__entry->nents
956 	)
957 );
958 
959 TRACE_EVENT(xprtrdma_frwr_maperr,
960 	TP_PROTO(
961 		const struct rpcrdma_mr *mr,
962 		int num_mapped
963 	),
964 
965 	TP_ARGS(mr, num_mapped),
966 
967 	TP_STRUCT__entry(
968 		__field(const void *, mr)
969 		__field(u64, addr)
970 		__field(u32, dir)
971 		__field(int, num_mapped)
972 		__field(int, nents)
973 	),
974 
975 	TP_fast_assign(
976 		__entry->mr = mr;
977 		__entry->addr = mr->mr_sg->dma_address;
978 		__entry->dir = mr->mr_dir;
979 		__entry->num_mapped = num_mapped;
980 		__entry->nents = mr->mr_nents;
981 	),
982 
983 	TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d",
984 		__entry->mr, __entry->addr,
985 		xprtrdma_show_direction(__entry->dir),
986 		__entry->num_mapped, __entry->nents
987 	)
988 );
989 
990 DEFINE_MR_EVENT(localinv);
991 DEFINE_MR_EVENT(map);
992 DEFINE_MR_EVENT(unmap);
993 DEFINE_MR_EVENT(remoteinv);
994 DEFINE_MR_EVENT(recycle);
995 
996 TRACE_EVENT(xprtrdma_dma_maperr,
997 	TP_PROTO(
998 		u64 addr
999 	),
1000 
1001 	TP_ARGS(addr),
1002 
1003 	TP_STRUCT__entry(
1004 		__field(u64, addr)
1005 	),
1006 
1007 	TP_fast_assign(
1008 		__entry->addr = addr;
1009 	),
1010 
1011 	TP_printk("dma addr=0x%llx\n", __entry->addr)
1012 );
1013 
1014 /**
1015  ** Reply events
1016  **/
1017 
1018 TRACE_EVENT(xprtrdma_reply,
1019 	TP_PROTO(
1020 		const struct rpc_task *task,
1021 		const struct rpcrdma_rep *rep,
1022 		const struct rpcrdma_req *req,
1023 		unsigned int credits
1024 	),
1025 
1026 	TP_ARGS(task, rep, req, credits),
1027 
1028 	TP_STRUCT__entry(
1029 		__field(unsigned int, task_id)
1030 		__field(unsigned int, client_id)
1031 		__field(const void *, rep)
1032 		__field(const void *, req)
1033 		__field(u32, xid)
1034 		__field(unsigned int, credits)
1035 	),
1036 
1037 	TP_fast_assign(
1038 		__entry->task_id = task->tk_pid;
1039 		__entry->client_id = task->tk_client->cl_clid;
1040 		__entry->rep = rep;
1041 		__entry->req = req;
1042 		__entry->xid = be32_to_cpu(rep->rr_xid);
1043 		__entry->credits = credits;
1044 	),
1045 
1046 	TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
1047 		__entry->task_id, __entry->client_id, __entry->xid,
1048 		__entry->credits, __entry->rep, __entry->req
1049 	)
1050 );
1051 
1052 TRACE_EVENT(xprtrdma_defer_cmp,
1053 	TP_PROTO(
1054 		const struct rpcrdma_rep *rep
1055 	),
1056 
1057 	TP_ARGS(rep),
1058 
1059 	TP_STRUCT__entry(
1060 		__field(unsigned int, task_id)
1061 		__field(unsigned int, client_id)
1062 		__field(const void *, rep)
1063 		__field(u32, xid)
1064 	),
1065 
1066 	TP_fast_assign(
1067 		__entry->task_id = rep->rr_rqst->rq_task->tk_pid;
1068 		__entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
1069 		__entry->rep = rep;
1070 		__entry->xid = be32_to_cpu(rep->rr_xid);
1071 	),
1072 
1073 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1074 		__entry->task_id, __entry->client_id, __entry->xid,
1075 		__entry->rep
1076 	)
1077 );
1078 
1079 DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
1080 DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
1081 DEFINE_REPLY_EVENT(xprtrdma_reply_short);
1082 DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
1083 
1084 TRACE_EVENT(xprtrdma_fixup,
1085 	TP_PROTO(
1086 		const struct rpc_rqst *rqst,
1087 		unsigned long fixup
1088 	),
1089 
1090 	TP_ARGS(rqst, fixup),
1091 
1092 	TP_STRUCT__entry(
1093 		__field(unsigned int, task_id)
1094 		__field(unsigned int, client_id)
1095 		__field(unsigned long, fixup)
1096 		__field(size_t, headlen)
1097 		__field(unsigned int, pagelen)
1098 		__field(size_t, taillen)
1099 	),
1100 
1101 	TP_fast_assign(
1102 		__entry->task_id = rqst->rq_task->tk_pid;
1103 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1104 		__entry->fixup = fixup;
1105 		__entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1106 		__entry->pagelen = rqst->rq_rcv_buf.page_len;
1107 		__entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1108 	),
1109 
1110 	TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1111 		__entry->task_id, __entry->client_id, __entry->fixup,
1112 		__entry->headlen, __entry->pagelen, __entry->taillen
1113 	)
1114 );
1115 
1116 TRACE_EVENT(xprtrdma_decode_seg,
1117 	TP_PROTO(
1118 		u32 handle,
1119 		u32 length,
1120 		u64 offset
1121 	),
1122 
1123 	TP_ARGS(handle, length, offset),
1124 
1125 	TP_STRUCT__entry(
1126 		__field(u32, handle)
1127 		__field(u32, length)
1128 		__field(u64, offset)
1129 	),
1130 
1131 	TP_fast_assign(
1132 		__entry->handle = handle;
1133 		__entry->length = length;
1134 		__entry->offset = offset;
1135 	),
1136 
1137 	TP_printk("%u@0x%016llx:0x%08x",
1138 		__entry->length, (unsigned long long)__entry->offset,
1139 		__entry->handle
1140 	)
1141 );
1142 
1143 /**
1144  ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
1145  **/
1146 
1147 TRACE_EVENT(xprtrdma_op_allocate,
1148 	TP_PROTO(
1149 		const struct rpc_task *task,
1150 		const struct rpcrdma_req *req
1151 	),
1152 
1153 	TP_ARGS(task, req),
1154 
1155 	TP_STRUCT__entry(
1156 		__field(unsigned int, task_id)
1157 		__field(unsigned int, client_id)
1158 		__field(const void *, req)
1159 		__field(size_t, callsize)
1160 		__field(size_t, rcvsize)
1161 	),
1162 
1163 	TP_fast_assign(
1164 		__entry->task_id = task->tk_pid;
1165 		__entry->client_id = task->tk_client->cl_clid;
1166 		__entry->req = req;
1167 		__entry->callsize = task->tk_rqstp->rq_callsize;
1168 		__entry->rcvsize = task->tk_rqstp->rq_rcvsize;
1169 	),
1170 
1171 	TP_printk("task:%u@%u req=%p (%zu, %zu)",
1172 		__entry->task_id, __entry->client_id,
1173 		__entry->req, __entry->callsize, __entry->rcvsize
1174 	)
1175 );
1176 
1177 TRACE_EVENT(xprtrdma_op_free,
1178 	TP_PROTO(
1179 		const struct rpc_task *task,
1180 		const struct rpcrdma_req *req
1181 	),
1182 
1183 	TP_ARGS(task, req),
1184 
1185 	TP_STRUCT__entry(
1186 		__field(unsigned int, task_id)
1187 		__field(unsigned int, client_id)
1188 		__field(const void *, req)
1189 		__field(const void *, rep)
1190 	),
1191 
1192 	TP_fast_assign(
1193 		__entry->task_id = task->tk_pid;
1194 		__entry->client_id = task->tk_client->cl_clid;
1195 		__entry->req = req;
1196 		__entry->rep = req->rl_reply;
1197 	),
1198 
1199 	TP_printk("task:%u@%u req=%p rep=%p",
1200 		__entry->task_id, __entry->client_id,
1201 		__entry->req, __entry->rep
1202 	)
1203 );
1204 
1205 /**
1206  ** Callback events
1207  **/
1208 
1209 TRACE_EVENT(xprtrdma_cb_setup,
1210 	TP_PROTO(
1211 		const struct rpcrdma_xprt *r_xprt,
1212 		unsigned int reqs
1213 	),
1214 
1215 	TP_ARGS(r_xprt, reqs),
1216 
1217 	TP_STRUCT__entry(
1218 		__field(const void *, r_xprt)
1219 		__field(unsigned int, reqs)
1220 		__string(addr, rpcrdma_addrstr(r_xprt))
1221 		__string(port, rpcrdma_portstr(r_xprt))
1222 	),
1223 
1224 	TP_fast_assign(
1225 		__entry->r_xprt = r_xprt;
1226 		__entry->reqs = reqs;
1227 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1228 		__assign_str(port, rpcrdma_portstr(r_xprt));
1229 	),
1230 
1231 	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1232 		__get_str(addr), __get_str(port),
1233 		__entry->r_xprt, __entry->reqs
1234 	)
1235 );
1236 
1237 DEFINE_CB_EVENT(xprtrdma_cb_call);
1238 DEFINE_CB_EVENT(xprtrdma_cb_reply);
1239 
1240 TRACE_EVENT(xprtrdma_leaked_rep,
1241 	TP_PROTO(
1242 		const struct rpc_rqst *rqst,
1243 		const struct rpcrdma_rep *rep
1244 	),
1245 
1246 	TP_ARGS(rqst, rep),
1247 
1248 	TP_STRUCT__entry(
1249 		__field(unsigned int, task_id)
1250 		__field(unsigned int, client_id)
1251 		__field(u32, xid)
1252 		__field(const void *, rep)
1253 	),
1254 
1255 	TP_fast_assign(
1256 		__entry->task_id = rqst->rq_task->tk_pid;
1257 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1258 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1259 		__entry->rep = rep;
1260 	),
1261 
1262 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1263 		__entry->task_id, __entry->client_id, __entry->xid,
1264 		__entry->rep
1265 	)
1266 );
1267 
1268 /**
1269  ** Server-side RPC/RDMA events
1270  **/
1271 
1272 DECLARE_EVENT_CLASS(svcrdma_xprt_event,
1273 	TP_PROTO(
1274 		const struct svc_xprt *xprt
1275 	),
1276 
1277 	TP_ARGS(xprt),
1278 
1279 	TP_STRUCT__entry(
1280 		__field(const void *, xprt)
1281 		__string(addr, xprt->xpt_remotebuf)
1282 	),
1283 
1284 	TP_fast_assign(
1285 		__entry->xprt = xprt;
1286 		__assign_str(addr, xprt->xpt_remotebuf);
1287 	),
1288 
1289 	TP_printk("xprt=%p addr=%s",
1290 		__entry->xprt, __get_str(addr)
1291 	)
1292 );
1293 
1294 #define DEFINE_XPRT_EVENT(name)						\
1295 		DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name,	\
1296 				TP_PROTO(				\
1297 					const struct svc_xprt *xprt	\
1298 				),					\
1299 				TP_ARGS(xprt))
1300 
1301 DEFINE_XPRT_EVENT(accept);
1302 DEFINE_XPRT_EVENT(fail);
1303 DEFINE_XPRT_EVENT(free);
1304 
1305 TRACE_DEFINE_ENUM(RDMA_MSG);
1306 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1307 TRACE_DEFINE_ENUM(RDMA_MSGP);
1308 TRACE_DEFINE_ENUM(RDMA_DONE);
1309 TRACE_DEFINE_ENUM(RDMA_ERROR);
1310 
1311 #define show_rpcrdma_proc(x)						\
1312 		__print_symbolic(x,					\
1313 				{ RDMA_MSG, "RDMA_MSG" },		\
1314 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1315 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1316 				{ RDMA_DONE, "RDMA_DONE" },		\
1317 				{ RDMA_ERROR, "RDMA_ERROR" })
1318 
1319 TRACE_EVENT(svcrdma_decode_rqst,
1320 	TP_PROTO(
1321 		__be32 *p,
1322 		unsigned int hdrlen
1323 	),
1324 
1325 	TP_ARGS(p, hdrlen),
1326 
1327 	TP_STRUCT__entry(
1328 		__field(u32, xid)
1329 		__field(u32, vers)
1330 		__field(u32, proc)
1331 		__field(u32, credits)
1332 		__field(unsigned int, hdrlen)
1333 	),
1334 
1335 	TP_fast_assign(
1336 		__entry->xid = be32_to_cpup(p++);
1337 		__entry->vers = be32_to_cpup(p++);
1338 		__entry->credits = be32_to_cpup(p++);
1339 		__entry->proc = be32_to_cpup(p);
1340 		__entry->hdrlen = hdrlen;
1341 	),
1342 
1343 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1344 		__entry->xid, __entry->vers, __entry->credits,
1345 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1346 );
1347 
1348 TRACE_EVENT(svcrdma_decode_short,
1349 	TP_PROTO(
1350 		unsigned int hdrlen
1351 	),
1352 
1353 	TP_ARGS(hdrlen),
1354 
1355 	TP_STRUCT__entry(
1356 		__field(unsigned int, hdrlen)
1357 	),
1358 
1359 	TP_fast_assign(
1360 		__entry->hdrlen = hdrlen;
1361 	),
1362 
1363 	TP_printk("hdrlen=%u", __entry->hdrlen)
1364 );
1365 
1366 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1367 	TP_PROTO(
1368 		__be32 *p
1369 	),
1370 
1371 	TP_ARGS(p),
1372 
1373 	TP_STRUCT__entry(
1374 		__field(u32, xid)
1375 		__field(u32, vers)
1376 		__field(u32, proc)
1377 		__field(u32, credits)
1378 	),
1379 
1380 	TP_fast_assign(
1381 		__entry->xid = be32_to_cpup(p++);
1382 		__entry->vers = be32_to_cpup(p++);
1383 		__entry->credits = be32_to_cpup(p++);
1384 		__entry->proc = be32_to_cpup(p);
1385 	),
1386 
1387 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
1388 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1389 );
1390 
1391 #define DEFINE_BADREQ_EVENT(name)					\
1392 		DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
1393 				TP_PROTO(				\
1394 					__be32 *p			\
1395 				),					\
1396 				TP_ARGS(p))
1397 
1398 DEFINE_BADREQ_EVENT(badvers);
1399 DEFINE_BADREQ_EVENT(drop);
1400 DEFINE_BADREQ_EVENT(badproc);
1401 DEFINE_BADREQ_EVENT(parse);
1402 
1403 DECLARE_EVENT_CLASS(svcrdma_segment_event,
1404 	TP_PROTO(
1405 		u32 handle,
1406 		u32 length,
1407 		u64 offset
1408 	),
1409 
1410 	TP_ARGS(handle, length, offset),
1411 
1412 	TP_STRUCT__entry(
1413 		__field(u32, handle)
1414 		__field(u32, length)
1415 		__field(u64, offset)
1416 	),
1417 
1418 	TP_fast_assign(
1419 		__entry->handle = handle;
1420 		__entry->length = length;
1421 		__entry->offset = offset;
1422 	),
1423 
1424 	TP_printk("%u@0x%016llx:0x%08x",
1425 		__entry->length, (unsigned long long)__entry->offset,
1426 		__entry->handle
1427 	)
1428 );
1429 
1430 #define DEFINE_SEGMENT_EVENT(name)					\
1431 		DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
1432 				TP_PROTO(				\
1433 					u32 handle,			\
1434 					u32 length,			\
1435 					u64 offset			\
1436 				),					\
1437 				TP_ARGS(handle, length, offset))
1438 
1439 DEFINE_SEGMENT_EVENT(rseg);
1440 DEFINE_SEGMENT_EVENT(wseg);
1441 
1442 DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1443 	TP_PROTO(
1444 		u32 length
1445 	),
1446 
1447 	TP_ARGS(length),
1448 
1449 	TP_STRUCT__entry(
1450 		__field(u32, length)
1451 	),
1452 
1453 	TP_fast_assign(
1454 		__entry->length = length;
1455 	),
1456 
1457 	TP_printk("length=%u",
1458 		__entry->length
1459 	)
1460 );
1461 
1462 #define DEFINE_CHUNK_EVENT(name)					\
1463 		DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
1464 				TP_PROTO(				\
1465 					u32 length			\
1466 				),					\
1467 				TP_ARGS(length))
1468 
1469 DEFINE_CHUNK_EVENT(pzr);
1470 DEFINE_CHUNK_EVENT(write);
1471 DEFINE_CHUNK_EVENT(reply);
1472 
1473 TRACE_EVENT(svcrdma_encode_read,
1474 	TP_PROTO(
1475 		u32 length,
1476 		u32 position
1477 	),
1478 
1479 	TP_ARGS(length, position),
1480 
1481 	TP_STRUCT__entry(
1482 		__field(u32, length)
1483 		__field(u32, position)
1484 	),
1485 
1486 	TP_fast_assign(
1487 		__entry->length = length;
1488 		__entry->position = position;
1489 	),
1490 
1491 	TP_printk("length=%u position=%u",
1492 		__entry->length, __entry->position
1493 	)
1494 );
1495 
1496 DECLARE_EVENT_CLASS(svcrdma_error_event,
1497 	TP_PROTO(
1498 		__be32 xid
1499 	),
1500 
1501 	TP_ARGS(xid),
1502 
1503 	TP_STRUCT__entry(
1504 		__field(u32, xid)
1505 	),
1506 
1507 	TP_fast_assign(
1508 		__entry->xid = be32_to_cpu(xid);
1509 	),
1510 
1511 	TP_printk("xid=0x%08x",
1512 		__entry->xid
1513 	)
1514 );
1515 
1516 #define DEFINE_ERROR_EVENT(name)					\
1517 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1518 				TP_PROTO(				\
1519 					__be32 xid			\
1520 				),					\
1521 				TP_ARGS(xid))
1522 
1523 DEFINE_ERROR_EVENT(vers);
1524 DEFINE_ERROR_EVENT(chunk);
1525 
1526 /**
1527  ** Server-side RDMA API events
1528  **/
1529 
1530 TRACE_EVENT(svcrdma_dma_map_page,
1531 	TP_PROTO(
1532 		const struct svcxprt_rdma *rdma,
1533 		const void *page
1534 	),
1535 
1536 	TP_ARGS(rdma, page),
1537 
1538 	TP_STRUCT__entry(
1539 		__field(const void *, page);
1540 		__string(device, rdma->sc_cm_id->device->name)
1541 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1542 	),
1543 
1544 	TP_fast_assign(
1545 		__entry->page = page;
1546 		__assign_str(device, rdma->sc_cm_id->device->name);
1547 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1548 	),
1549 
1550 	TP_printk("addr=%s device=%s page=%p",
1551 		__get_str(addr), __get_str(device), __entry->page
1552 	)
1553 );
1554 
1555 TRACE_EVENT(svcrdma_dma_map_rwctx,
1556 	TP_PROTO(
1557 		const struct svcxprt_rdma *rdma,
1558 		int status
1559 	),
1560 
1561 	TP_ARGS(rdma, status),
1562 
1563 	TP_STRUCT__entry(
1564 		__field(int, status)
1565 		__string(device, rdma->sc_cm_id->device->name)
1566 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1567 	),
1568 
1569 	TP_fast_assign(
1570 		__entry->status = status;
1571 		__assign_str(device, rdma->sc_cm_id->device->name);
1572 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1573 	),
1574 
1575 	TP_printk("addr=%s device=%s status=%d",
1576 		__get_str(addr), __get_str(device), __entry->status
1577 	)
1578 );
1579 
1580 TRACE_EVENT(svcrdma_send_failed,
1581 	TP_PROTO(
1582 		const struct svc_rqst *rqst,
1583 		int status
1584 	),
1585 
1586 	TP_ARGS(rqst, status),
1587 
1588 	TP_STRUCT__entry(
1589 		__field(int, status)
1590 		__field(u32, xid)
1591 		__field(const void *, xprt)
1592 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1593 	),
1594 
1595 	TP_fast_assign(
1596 		__entry->status = status;
1597 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1598 		__entry->xprt = rqst->rq_xprt;
1599 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1600 	),
1601 
1602 	TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
1603 		__entry->xprt, __get_str(addr),
1604 		__entry->xid, __entry->status
1605 	)
1606 );
1607 
1608 DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
1609 	TP_PROTO(
1610 		const struct ib_wc *wc
1611 	),
1612 
1613 	TP_ARGS(wc),
1614 
1615 	TP_STRUCT__entry(
1616 		__field(const void *, cqe)
1617 		__field(unsigned int, status)
1618 		__field(unsigned int, vendor_err)
1619 	),
1620 
1621 	TP_fast_assign(
1622 		__entry->cqe = wc->wr_cqe;
1623 		__entry->status = wc->status;
1624 		if (wc->status)
1625 			__entry->vendor_err = wc->vendor_err;
1626 		else
1627 			__entry->vendor_err = 0;
1628 	),
1629 
1630 	TP_printk("cqe=%p status=%s (%u/0x%x)",
1631 		__entry->cqe, rdma_show_wc_status(__entry->status),
1632 		__entry->status, __entry->vendor_err
1633 	)
1634 );
1635 
1636 #define DEFINE_SENDCOMP_EVENT(name)					\
1637 		DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name,	\
1638 				TP_PROTO(				\
1639 					const struct ib_wc *wc		\
1640 				),					\
1641 				TP_ARGS(wc))
1642 
1643 TRACE_EVENT(svcrdma_post_send,
1644 	TP_PROTO(
1645 		const struct ib_send_wr *wr,
1646 		int status
1647 	),
1648 
1649 	TP_ARGS(wr, status),
1650 
1651 	TP_STRUCT__entry(
1652 		__field(const void *, cqe)
1653 		__field(unsigned int, num_sge)
1654 		__field(u32, inv_rkey)
1655 		__field(int, status)
1656 	),
1657 
1658 	TP_fast_assign(
1659 		__entry->cqe = wr->wr_cqe;
1660 		__entry->num_sge = wr->num_sge;
1661 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1662 					wr->ex.invalidate_rkey : 0;
1663 		__entry->status = status;
1664 	),
1665 
1666 	TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
1667 		__entry->cqe, __entry->num_sge,
1668 		__entry->inv_rkey, __entry->status
1669 	)
1670 );
1671 
1672 DEFINE_SENDCOMP_EVENT(send);
1673 
1674 TRACE_EVENT(svcrdma_post_recv,
1675 	TP_PROTO(
1676 		const struct ib_recv_wr *wr,
1677 		int status
1678 	),
1679 
1680 	TP_ARGS(wr, status),
1681 
1682 	TP_STRUCT__entry(
1683 		__field(const void *, cqe)
1684 		__field(int, status)
1685 	),
1686 
1687 	TP_fast_assign(
1688 		__entry->cqe = wr->wr_cqe;
1689 		__entry->status = status;
1690 	),
1691 
1692 	TP_printk("cqe=%p status=%d",
1693 		__entry->cqe, __entry->status
1694 	)
1695 );
1696 
1697 TRACE_EVENT(svcrdma_wc_receive,
1698 	TP_PROTO(
1699 		const struct ib_wc *wc
1700 	),
1701 
1702 	TP_ARGS(wc),
1703 
1704 	TP_STRUCT__entry(
1705 		__field(const void *, cqe)
1706 		__field(u32, byte_len)
1707 		__field(unsigned int, status)
1708 		__field(u32, vendor_err)
1709 	),
1710 
1711 	TP_fast_assign(
1712 		__entry->cqe = wc->wr_cqe;
1713 		__entry->status = wc->status;
1714 		if (wc->status) {
1715 			__entry->byte_len = 0;
1716 			__entry->vendor_err = wc->vendor_err;
1717 		} else {
1718 			__entry->byte_len = wc->byte_len;
1719 			__entry->vendor_err = 0;
1720 		}
1721 	),
1722 
1723 	TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
1724 		__entry->cqe, __entry->byte_len,
1725 		rdma_show_wc_status(__entry->status),
1726 		__entry->status, __entry->vendor_err
1727 	)
1728 );
1729 
1730 TRACE_EVENT(svcrdma_post_rw,
1731 	TP_PROTO(
1732 		const void *cqe,
1733 		int sqecount,
1734 		int status
1735 	),
1736 
1737 	TP_ARGS(cqe, sqecount, status),
1738 
1739 	TP_STRUCT__entry(
1740 		__field(const void *, cqe)
1741 		__field(int, sqecount)
1742 		__field(int, status)
1743 	),
1744 
1745 	TP_fast_assign(
1746 		__entry->cqe = cqe;
1747 		__entry->sqecount = sqecount;
1748 		__entry->status = status;
1749 	),
1750 
1751 	TP_printk("cqe=%p sqecount=%d status=%d",
1752 		__entry->cqe, __entry->sqecount, __entry->status
1753 	)
1754 );
1755 
1756 DEFINE_SENDCOMP_EVENT(read);
1757 DEFINE_SENDCOMP_EVENT(write);
1758 
1759 TRACE_EVENT(svcrdma_cm_event,
1760 	TP_PROTO(
1761 		const struct rdma_cm_event *event,
1762 		const struct sockaddr *sap
1763 	),
1764 
1765 	TP_ARGS(event, sap),
1766 
1767 	TP_STRUCT__entry(
1768 		__field(unsigned int, event)
1769 		__field(int, status)
1770 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1771 	),
1772 
1773 	TP_fast_assign(
1774 		__entry->event = event->event;
1775 		__entry->status = event->status;
1776 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1777 			 "%pISpc", sap);
1778 	),
1779 
1780 	TP_printk("addr=%s event=%s (%u/%d)",
1781 		__entry->addr,
1782 		rdma_show_cm_event(__entry->event),
1783 		__entry->event, __entry->status
1784 	)
1785 );
1786 
1787 TRACE_EVENT(svcrdma_qp_error,
1788 	TP_PROTO(
1789 		const struct ib_event *event,
1790 		const struct sockaddr *sap
1791 	),
1792 
1793 	TP_ARGS(event, sap),
1794 
1795 	TP_STRUCT__entry(
1796 		__field(unsigned int, event)
1797 		__string(device, event->device->name)
1798 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1799 	),
1800 
1801 	TP_fast_assign(
1802 		__entry->event = event->event;
1803 		__assign_str(device, event->device->name);
1804 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1805 			 "%pISpc", sap);
1806 	),
1807 
1808 	TP_printk("addr=%s dev=%s event=%s (%u)",
1809 		__entry->addr, __get_str(device),
1810 		rdma_show_ib_event(__entry->event), __entry->event
1811 	)
1812 );
1813 
1814 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1815 	TP_PROTO(
1816 		const struct svcxprt_rdma *rdma
1817 	),
1818 
1819 	TP_ARGS(rdma),
1820 
1821 	TP_STRUCT__entry(
1822 		__field(int, avail)
1823 		__field(int, depth)
1824 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1825 	),
1826 
1827 	TP_fast_assign(
1828 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1829 		__entry->depth = rdma->sc_sq_depth;
1830 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1831 	),
1832 
1833 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1834 		__get_str(addr), __entry->avail, __entry->depth
1835 	)
1836 );
1837 
1838 #define DEFINE_SQ_EVENT(name)						\
1839 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1840 				TP_PROTO(				\
1841 					const struct svcxprt_rdma *rdma \
1842 				),					\
1843 				TP_ARGS(rdma))
1844 
1845 DEFINE_SQ_EVENT(full);
1846 DEFINE_SQ_EVENT(retry);
1847 
1848 #endif /* _TRACE_RPCRDMA_H */
1849 
1850 #include <trace/define_trace.h>
1851