xref: /openbmc/linux/include/trace/events/rpcrdma.h (revision 08283d30)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2017, 2018 Oracle.  All rights reserved.
4  *
5  * Trace point definitions for the "rpcrdma" subsystem.
6  */
7 #undef TRACE_SYSTEM
8 #define TRACE_SYSTEM rpcrdma
9 
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
12 
13 #include <linux/scatterlist.h>
14 #include <linux/tracepoint.h>
15 #include <trace/events/rdma.h>
16 
17 /**
18  ** Event classes
19  **/
20 
21 DECLARE_EVENT_CLASS(xprtrdma_reply_event,
22 	TP_PROTO(
23 		const struct rpcrdma_rep *rep
24 	),
25 
26 	TP_ARGS(rep),
27 
28 	TP_STRUCT__entry(
29 		__field(const void *, rep)
30 		__field(const void *, r_xprt)
31 		__field(u32, xid)
32 		__field(u32, version)
33 		__field(u32, proc)
34 	),
35 
36 	TP_fast_assign(
37 		__entry->rep = rep;
38 		__entry->r_xprt = rep->rr_rxprt;
39 		__entry->xid = be32_to_cpu(rep->rr_xid);
40 		__entry->version = be32_to_cpu(rep->rr_vers);
41 		__entry->proc = be32_to_cpu(rep->rr_proc);
42 	),
43 
44 	TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
45 		__entry->r_xprt, __entry->xid, __entry->rep,
46 		__entry->version, __entry->proc
47 	)
48 );
49 
50 #define DEFINE_REPLY_EVENT(name)					\
51 		DEFINE_EVENT(xprtrdma_reply_event, name,		\
52 				TP_PROTO(				\
53 					const struct rpcrdma_rep *rep	\
54 				),					\
55 				TP_ARGS(rep))
56 
57 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
58 	TP_PROTO(
59 		const struct rpcrdma_xprt *r_xprt
60 	),
61 
62 	TP_ARGS(r_xprt),
63 
64 	TP_STRUCT__entry(
65 		__field(const void *, r_xprt)
66 		__string(addr, rpcrdma_addrstr(r_xprt))
67 		__string(port, rpcrdma_portstr(r_xprt))
68 	),
69 
70 	TP_fast_assign(
71 		__entry->r_xprt = r_xprt;
72 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
73 		__assign_str(port, rpcrdma_portstr(r_xprt));
74 	),
75 
76 	TP_printk("peer=[%s]:%s r_xprt=%p",
77 		__get_str(addr), __get_str(port), __entry->r_xprt
78 	)
79 );
80 
81 #define DEFINE_RXPRT_EVENT(name)					\
82 		DEFINE_EVENT(xprtrdma_rxprt, name,			\
83 				TP_PROTO(				\
84 					const struct rpcrdma_xprt *r_xprt \
85 				),					\
86 				TP_ARGS(r_xprt))
87 
88 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
89 	TP_PROTO(
90 		const struct rpc_task *task,
91 		unsigned int pos,
92 		struct rpcrdma_mr *mr,
93 		int nsegs
94 	),
95 
96 	TP_ARGS(task, pos, mr, nsegs),
97 
98 	TP_STRUCT__entry(
99 		__field(unsigned int, task_id)
100 		__field(unsigned int, client_id)
101 		__field(unsigned int, pos)
102 		__field(int, nents)
103 		__field(u32, handle)
104 		__field(u32, length)
105 		__field(u64, offset)
106 		__field(int, nsegs)
107 	),
108 
109 	TP_fast_assign(
110 		__entry->task_id = task->tk_pid;
111 		__entry->client_id = task->tk_client->cl_clid;
112 		__entry->pos = pos;
113 		__entry->nents = mr->mr_nents;
114 		__entry->handle = mr->mr_handle;
115 		__entry->length = mr->mr_length;
116 		__entry->offset = mr->mr_offset;
117 		__entry->nsegs = nsegs;
118 	),
119 
120 	TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
121 		__entry->task_id, __entry->client_id,
122 		__entry->pos, __entry->length,
123 		(unsigned long long)__entry->offset, __entry->handle,
124 		__entry->nents < __entry->nsegs ? "more" : "last"
125 	)
126 );
127 
128 #define DEFINE_RDCH_EVENT(name)						\
129 		DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
130 				TP_PROTO(				\
131 					const struct rpc_task *task,	\
132 					unsigned int pos,		\
133 					struct rpcrdma_mr *mr,		\
134 					int nsegs			\
135 				),					\
136 				TP_ARGS(task, pos, mr, nsegs))
137 
138 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
139 	TP_PROTO(
140 		const struct rpc_task *task,
141 		struct rpcrdma_mr *mr,
142 		int nsegs
143 	),
144 
145 	TP_ARGS(task, mr, nsegs),
146 
147 	TP_STRUCT__entry(
148 		__field(unsigned int, task_id)
149 		__field(unsigned int, client_id)
150 		__field(int, nents)
151 		__field(u32, handle)
152 		__field(u32, length)
153 		__field(u64, offset)
154 		__field(int, nsegs)
155 	),
156 
157 	TP_fast_assign(
158 		__entry->task_id = task->tk_pid;
159 		__entry->client_id = task->tk_client->cl_clid;
160 		__entry->nents = mr->mr_nents;
161 		__entry->handle = mr->mr_handle;
162 		__entry->length = mr->mr_length;
163 		__entry->offset = mr->mr_offset;
164 		__entry->nsegs = nsegs;
165 	),
166 
167 	TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
168 		__entry->task_id, __entry->client_id,
169 		__entry->length, (unsigned long long)__entry->offset,
170 		__entry->handle,
171 		__entry->nents < __entry->nsegs ? "more" : "last"
172 	)
173 );
174 
175 #define DEFINE_WRCH_EVENT(name)						\
176 		DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
177 				TP_PROTO(				\
178 					const struct rpc_task *task,	\
179 					struct rpcrdma_mr *mr,		\
180 					int nsegs			\
181 				),					\
182 				TP_ARGS(task, mr, nsegs))
183 
184 DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
185 	TP_PROTO(
186 		const struct ib_wc *wc,
187 		const struct rpcrdma_frwr *frwr
188 	),
189 
190 	TP_ARGS(wc, frwr),
191 
192 	TP_STRUCT__entry(
193 		__field(const void *, mr)
194 		__field(unsigned int, status)
195 		__field(unsigned int, vendor_err)
196 	),
197 
198 	TP_fast_assign(
199 		__entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
200 		__entry->status = wc->status;
201 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
202 	),
203 
204 	TP_printk(
205 		"mr=%p: %s (%u/0x%x)",
206 		__entry->mr, rdma_show_wc_status(__entry->status),
207 		__entry->status, __entry->vendor_err
208 	)
209 );
210 
211 #define DEFINE_FRWR_DONE_EVENT(name)					\
212 		DEFINE_EVENT(xprtrdma_frwr_done, name,			\
213 				TP_PROTO(				\
214 					const struct ib_wc *wc,		\
215 					const struct rpcrdma_frwr *frwr	\
216 				),					\
217 				TP_ARGS(wc, frwr))
218 
219 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
220 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
221 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
222 TRACE_DEFINE_ENUM(DMA_NONE);
223 
224 #define xprtrdma_show_direction(x)					\
225 		__print_symbolic(x,					\
226 				{ DMA_BIDIRECTIONAL, "BIDIR" },		\
227 				{ DMA_TO_DEVICE, "TO_DEVICE" },		\
228 				{ DMA_FROM_DEVICE, "FROM_DEVICE" },	\
229 				{ DMA_NONE, "NONE" })
230 
231 DECLARE_EVENT_CLASS(xprtrdma_mr,
232 	TP_PROTO(
233 		const struct rpcrdma_mr *mr
234 	),
235 
236 	TP_ARGS(mr),
237 
238 	TP_STRUCT__entry(
239 		__field(const void *, mr)
240 		__field(u32, handle)
241 		__field(u32, length)
242 		__field(u64, offset)
243 		__field(u32, dir)
244 	),
245 
246 	TP_fast_assign(
247 		__entry->mr = mr;
248 		__entry->handle = mr->mr_handle;
249 		__entry->length = mr->mr_length;
250 		__entry->offset = mr->mr_offset;
251 		__entry->dir    = mr->mr_dir;
252 	),
253 
254 	TP_printk("mr=%p %u@0x%016llx:0x%08x (%s)",
255 		__entry->mr, __entry->length,
256 		(unsigned long long)__entry->offset, __entry->handle,
257 		xprtrdma_show_direction(__entry->dir)
258 	)
259 );
260 
261 #define DEFINE_MR_EVENT(name) \
262 		DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
263 				TP_PROTO( \
264 					const struct rpcrdma_mr *mr \
265 				), \
266 				TP_ARGS(mr))
267 
268 DECLARE_EVENT_CLASS(xprtrdma_cb_event,
269 	TP_PROTO(
270 		const struct rpc_rqst *rqst
271 	),
272 
273 	TP_ARGS(rqst),
274 
275 	TP_STRUCT__entry(
276 		__field(const void *, rqst)
277 		__field(const void *, rep)
278 		__field(const void *, req)
279 		__field(u32, xid)
280 	),
281 
282 	TP_fast_assign(
283 		__entry->rqst = rqst;
284 		__entry->req = rpcr_to_rdmar(rqst);
285 		__entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
286 		__entry->xid = be32_to_cpu(rqst->rq_xid);
287 	),
288 
289 	TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
290 		__entry->xid, __entry->rqst, __entry->req, __entry->rep
291 	)
292 );
293 
294 #define DEFINE_CB_EVENT(name)						\
295 		DEFINE_EVENT(xprtrdma_cb_event, name,			\
296 				TP_PROTO(				\
297 					const struct rpc_rqst *rqst	\
298 				),					\
299 				TP_ARGS(rqst))
300 
301 /**
302  ** Connection events
303  **/
304 
305 TRACE_EVENT(xprtrdma_cm_event,
306 	TP_PROTO(
307 		const struct rpcrdma_xprt *r_xprt,
308 		struct rdma_cm_event *event
309 	),
310 
311 	TP_ARGS(r_xprt, event),
312 
313 	TP_STRUCT__entry(
314 		__field(const void *, r_xprt)
315 		__field(unsigned int, event)
316 		__field(int, status)
317 		__string(addr, rpcrdma_addrstr(r_xprt))
318 		__string(port, rpcrdma_portstr(r_xprt))
319 	),
320 
321 	TP_fast_assign(
322 		__entry->r_xprt = r_xprt;
323 		__entry->event = event->event;
324 		__entry->status = event->status;
325 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
326 		__assign_str(port, rpcrdma_portstr(r_xprt));
327 	),
328 
329 	TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
330 		__get_str(addr), __get_str(port),
331 		__entry->r_xprt, rdma_show_cm_event(__entry->event),
332 		__entry->event, __entry->status
333 	)
334 );
335 
336 TRACE_EVENT(xprtrdma_disconnect,
337 	TP_PROTO(
338 		const struct rpcrdma_xprt *r_xprt,
339 		int status
340 	),
341 
342 	TP_ARGS(r_xprt, status),
343 
344 	TP_STRUCT__entry(
345 		__field(const void *, r_xprt)
346 		__field(int, status)
347 		__field(int, connected)
348 		__string(addr, rpcrdma_addrstr(r_xprt))
349 		__string(port, rpcrdma_portstr(r_xprt))
350 	),
351 
352 	TP_fast_assign(
353 		__entry->r_xprt = r_xprt;
354 		__entry->status = status;
355 		__entry->connected = r_xprt->rx_ep.rep_connected;
356 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
357 		__assign_str(port, rpcrdma_portstr(r_xprt));
358 	),
359 
360 	TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
361 		__get_str(addr), __get_str(port),
362 		__entry->r_xprt, __entry->status,
363 		__entry->connected == 1 ? "still " : "dis"
364 	)
365 );
366 
367 DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
368 DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
369 DEFINE_RXPRT_EVENT(xprtrdma_create);
370 DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
371 DEFINE_RXPRT_EVENT(xprtrdma_remove);
372 DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
373 DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
374 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
375 DEFINE_RXPRT_EVENT(xprtrdma_op_close);
376 DEFINE_RXPRT_EVENT(xprtrdma_op_connect);
377 
378 TRACE_EVENT(xprtrdma_op_set_cto,
379 	TP_PROTO(
380 		const struct rpcrdma_xprt *r_xprt,
381 		unsigned long connect,
382 		unsigned long reconnect
383 	),
384 
385 	TP_ARGS(r_xprt, connect, reconnect),
386 
387 	TP_STRUCT__entry(
388 		__field(const void *, r_xprt)
389 		__field(unsigned long, connect)
390 		__field(unsigned long, reconnect)
391 		__string(addr, rpcrdma_addrstr(r_xprt))
392 		__string(port, rpcrdma_portstr(r_xprt))
393 	),
394 
395 	TP_fast_assign(
396 		__entry->r_xprt = r_xprt;
397 		__entry->connect = connect;
398 		__entry->reconnect = reconnect;
399 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
400 		__assign_str(port, rpcrdma_portstr(r_xprt));
401 	),
402 
403 	TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
404 		__get_str(addr), __get_str(port), __entry->r_xprt,
405 		__entry->connect / HZ, __entry->reconnect / HZ
406 	)
407 );
408 
409 TRACE_EVENT(xprtrdma_qp_event,
410 	TP_PROTO(
411 		const struct rpcrdma_xprt *r_xprt,
412 		const struct ib_event *event
413 	),
414 
415 	TP_ARGS(r_xprt, event),
416 
417 	TP_STRUCT__entry(
418 		__field(const void *, r_xprt)
419 		__field(unsigned int, event)
420 		__string(name, event->device->name)
421 		__string(addr, rpcrdma_addrstr(r_xprt))
422 		__string(port, rpcrdma_portstr(r_xprt))
423 	),
424 
425 	TP_fast_assign(
426 		__entry->r_xprt = r_xprt;
427 		__entry->event = event->event;
428 		__assign_str(name, event->device->name);
429 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
430 		__assign_str(port, rpcrdma_portstr(r_xprt));
431 	),
432 
433 	TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
434 		__get_str(addr), __get_str(port), __entry->r_xprt,
435 		__get_str(name), rdma_show_ib_event(__entry->event),
436 		__entry->event
437 	)
438 );
439 
440 /**
441  ** Call events
442  **/
443 
444 TRACE_EVENT(xprtrdma_createmrs,
445 	TP_PROTO(
446 		const struct rpcrdma_xprt *r_xprt,
447 		unsigned int count
448 	),
449 
450 	TP_ARGS(r_xprt, count),
451 
452 	TP_STRUCT__entry(
453 		__field(const void *, r_xprt)
454 		__string(addr, rpcrdma_addrstr(r_xprt))
455 		__string(port, rpcrdma_portstr(r_xprt))
456 		__field(unsigned int, count)
457 	),
458 
459 	TP_fast_assign(
460 		__entry->r_xprt = r_xprt;
461 		__entry->count = count;
462 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
463 		__assign_str(port, rpcrdma_portstr(r_xprt));
464 	),
465 
466 	TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
467 		__get_str(addr), __get_str(port), __entry->r_xprt,
468 		__entry->count
469 	)
470 );
471 
472 TRACE_EVENT(xprtrdma_mr_get,
473 	TP_PROTO(
474 		const struct rpcrdma_req *req
475 	),
476 
477 	TP_ARGS(req),
478 
479 	TP_STRUCT__entry(
480 		__field(const void *, req)
481 		__field(unsigned int, task_id)
482 		__field(unsigned int, client_id)
483 		__field(u32, xid)
484 	),
485 
486 	TP_fast_assign(
487 		const struct rpc_rqst *rqst = &req->rl_slot;
488 
489 		__entry->req = req;
490 		__entry->task_id = rqst->rq_task->tk_pid;
491 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
492 		__entry->xid = be32_to_cpu(rqst->rq_xid);
493 	),
494 
495 	TP_printk("task:%u@%u xid=0x%08x req=%p",
496 		__entry->task_id, __entry->client_id, __entry->xid,
497 		__entry->req
498 	)
499 );
500 
501 TRACE_EVENT(xprtrdma_nomrs,
502 	TP_PROTO(
503 		const struct rpcrdma_req *req
504 	),
505 
506 	TP_ARGS(req),
507 
508 	TP_STRUCT__entry(
509 		__field(const void *, req)
510 		__field(unsigned int, task_id)
511 		__field(unsigned int, client_id)
512 		__field(u32, xid)
513 	),
514 
515 	TP_fast_assign(
516 		const struct rpc_rqst *rqst = &req->rl_slot;
517 
518 		__entry->req = req;
519 		__entry->task_id = rqst->rq_task->tk_pid;
520 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
521 		__entry->xid = be32_to_cpu(rqst->rq_xid);
522 	),
523 
524 	TP_printk("task:%u@%u xid=0x%08x req=%p",
525 		__entry->task_id, __entry->client_id, __entry->xid,
526 		__entry->req
527 	)
528 );
529 
530 DEFINE_RDCH_EVENT(read);
531 DEFINE_WRCH_EVENT(write);
532 DEFINE_WRCH_EVENT(reply);
533 
534 TRACE_DEFINE_ENUM(rpcrdma_noch);
535 TRACE_DEFINE_ENUM(rpcrdma_readch);
536 TRACE_DEFINE_ENUM(rpcrdma_areadch);
537 TRACE_DEFINE_ENUM(rpcrdma_writech);
538 TRACE_DEFINE_ENUM(rpcrdma_replych);
539 
540 #define xprtrdma_show_chunktype(x)					\
541 		__print_symbolic(x,					\
542 				{ rpcrdma_noch, "inline" },		\
543 				{ rpcrdma_readch, "read list" },	\
544 				{ rpcrdma_areadch, "*read list" },	\
545 				{ rpcrdma_writech, "write list" },	\
546 				{ rpcrdma_replych, "reply chunk" })
547 
548 TRACE_EVENT(xprtrdma_marshal,
549 	TP_PROTO(
550 		const struct rpcrdma_req *req,
551 		unsigned int rtype,
552 		unsigned int wtype
553 	),
554 
555 	TP_ARGS(req, rtype, wtype),
556 
557 	TP_STRUCT__entry(
558 		__field(unsigned int, task_id)
559 		__field(unsigned int, client_id)
560 		__field(u32, xid)
561 		__field(unsigned int, hdrlen)
562 		__field(unsigned int, headlen)
563 		__field(unsigned int, pagelen)
564 		__field(unsigned int, taillen)
565 		__field(unsigned int, rtype)
566 		__field(unsigned int, wtype)
567 	),
568 
569 	TP_fast_assign(
570 		const struct rpc_rqst *rqst = &req->rl_slot;
571 
572 		__entry->task_id = rqst->rq_task->tk_pid;
573 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
574 		__entry->xid = be32_to_cpu(rqst->rq_xid);
575 		__entry->hdrlen = req->rl_hdrbuf.len;
576 		__entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
577 		__entry->pagelen = rqst->rq_snd_buf.page_len;
578 		__entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
579 		__entry->rtype = rtype;
580 		__entry->wtype = wtype;
581 	),
582 
583 	TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
584 		__entry->task_id, __entry->client_id, __entry->xid,
585 		__entry->hdrlen,
586 		__entry->headlen, __entry->pagelen, __entry->taillen,
587 		xprtrdma_show_chunktype(__entry->rtype),
588 		xprtrdma_show_chunktype(__entry->wtype)
589 	)
590 );
591 
592 TRACE_EVENT(xprtrdma_marshal_failed,
593 	TP_PROTO(const struct rpc_rqst *rqst,
594 		 int ret
595 	),
596 
597 	TP_ARGS(rqst, ret),
598 
599 	TP_STRUCT__entry(
600 		__field(unsigned int, task_id)
601 		__field(unsigned int, client_id)
602 		__field(u32, xid)
603 		__field(int, ret)
604 	),
605 
606 	TP_fast_assign(
607 		__entry->task_id = rqst->rq_task->tk_pid;
608 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
609 		__entry->xid = be32_to_cpu(rqst->rq_xid);
610 		__entry->ret = ret;
611 	),
612 
613 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
614 		__entry->task_id, __entry->client_id, __entry->xid,
615 		__entry->ret
616 	)
617 );
618 
619 TRACE_EVENT(xprtrdma_prepsend_failed,
620 	TP_PROTO(const struct rpc_rqst *rqst,
621 		 int ret
622 	),
623 
624 	TP_ARGS(rqst, ret),
625 
626 	TP_STRUCT__entry(
627 		__field(unsigned int, task_id)
628 		__field(unsigned int, client_id)
629 		__field(u32, xid)
630 		__field(int, ret)
631 	),
632 
633 	TP_fast_assign(
634 		__entry->task_id = rqst->rq_task->tk_pid;
635 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
636 		__entry->xid = be32_to_cpu(rqst->rq_xid);
637 		__entry->ret = ret;
638 	),
639 
640 	TP_printk("task:%u@%u xid=0x%08x: ret=%d",
641 		__entry->task_id, __entry->client_id, __entry->xid,
642 		__entry->ret
643 	)
644 );
645 
646 TRACE_EVENT(xprtrdma_post_send,
647 	TP_PROTO(
648 		const struct rpcrdma_req *req,
649 		int status
650 	),
651 
652 	TP_ARGS(req, status),
653 
654 	TP_STRUCT__entry(
655 		__field(const void *, req)
656 		__field(unsigned int, task_id)
657 		__field(unsigned int, client_id)
658 		__field(int, num_sge)
659 		__field(int, signaled)
660 		__field(int, status)
661 	),
662 
663 	TP_fast_assign(
664 		const struct rpc_rqst *rqst = &req->rl_slot;
665 
666 		__entry->task_id = rqst->rq_task->tk_pid;
667 		__entry->client_id = rqst->rq_task->tk_client ?
668 				     rqst->rq_task->tk_client->cl_clid : -1;
669 		__entry->req = req;
670 		__entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
671 		__entry->signaled = req->rl_sendctx->sc_wr.send_flags &
672 				    IB_SEND_SIGNALED;
673 		__entry->status = status;
674 	),
675 
676 	TP_printk("task:%u@%u req=%p (%d SGE%s) %sstatus=%d",
677 		__entry->task_id, __entry->client_id,
678 		__entry->req, __entry->num_sge,
679 		(__entry->num_sge == 1 ? "" : "s"),
680 		(__entry->signaled ? "signaled " : ""),
681 		__entry->status
682 	)
683 );
684 
685 TRACE_EVENT(xprtrdma_post_recv,
686 	TP_PROTO(
687 		const struct rpcrdma_rep *rep
688 	),
689 
690 	TP_ARGS(rep),
691 
692 	TP_STRUCT__entry(
693 		__field(const void *, rep)
694 	),
695 
696 	TP_fast_assign(
697 		__entry->rep = rep;
698 	),
699 
700 	TP_printk("rep=%p",
701 		__entry->rep
702 	)
703 );
704 
705 TRACE_EVENT(xprtrdma_post_recvs,
706 	TP_PROTO(
707 		const struct rpcrdma_xprt *r_xprt,
708 		unsigned int count,
709 		int status
710 	),
711 
712 	TP_ARGS(r_xprt, count, status),
713 
714 	TP_STRUCT__entry(
715 		__field(const void *, r_xprt)
716 		__field(unsigned int, count)
717 		__field(int, status)
718 		__field(int, posted)
719 		__string(addr, rpcrdma_addrstr(r_xprt))
720 		__string(port, rpcrdma_portstr(r_xprt))
721 	),
722 
723 	TP_fast_assign(
724 		__entry->r_xprt = r_xprt;
725 		__entry->count = count;
726 		__entry->status = status;
727 		__entry->posted = r_xprt->rx_ep.rep_receive_count;
728 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
729 		__assign_str(port, rpcrdma_portstr(r_xprt));
730 	),
731 
732 	TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
733 		__get_str(addr), __get_str(port), __entry->r_xprt,
734 		__entry->count, __entry->posted, __entry->status
735 	)
736 );
737 
738 /**
739  ** Completion events
740  **/
741 
742 TRACE_EVENT(xprtrdma_wc_send,
743 	TP_PROTO(
744 		const struct rpcrdma_sendctx *sc,
745 		const struct ib_wc *wc
746 	),
747 
748 	TP_ARGS(sc, wc),
749 
750 	TP_STRUCT__entry(
751 		__field(const void *, req)
752 		__field(unsigned int, unmap_count)
753 		__field(unsigned int, status)
754 		__field(unsigned int, vendor_err)
755 	),
756 
757 	TP_fast_assign(
758 		__entry->req = sc->sc_req;
759 		__entry->unmap_count = sc->sc_unmap_count;
760 		__entry->status = wc->status;
761 		__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
762 	),
763 
764 	TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
765 		__entry->req, __entry->unmap_count,
766 		rdma_show_wc_status(__entry->status),
767 		__entry->status, __entry->vendor_err
768 	)
769 );
770 
771 TRACE_EVENT(xprtrdma_wc_receive,
772 	TP_PROTO(
773 		const struct ib_wc *wc
774 	),
775 
776 	TP_ARGS(wc),
777 
778 	TP_STRUCT__entry(
779 		__field(const void *, rep)
780 		__field(u32, byte_len)
781 		__field(unsigned int, status)
782 		__field(u32, vendor_err)
783 	),
784 
785 	TP_fast_assign(
786 		__entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
787 					    rr_cqe);
788 		__entry->status = wc->status;
789 		if (wc->status) {
790 			__entry->byte_len = 0;
791 			__entry->vendor_err = wc->vendor_err;
792 		} else {
793 			__entry->byte_len = wc->byte_len;
794 			__entry->vendor_err = 0;
795 		}
796 	),
797 
798 	TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
799 		__entry->rep, __entry->byte_len,
800 		rdma_show_wc_status(__entry->status),
801 		__entry->status, __entry->vendor_err
802 	)
803 );
804 
805 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
806 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
807 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
808 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
809 
810 TRACE_EVENT(xprtrdma_frwr_alloc,
811 	TP_PROTO(
812 		const struct rpcrdma_mr *mr,
813 		int rc
814 	),
815 
816 	TP_ARGS(mr, rc),
817 
818 	TP_STRUCT__entry(
819 		__field(const void *, mr)
820 		__field(int, rc)
821 	),
822 
823 	TP_fast_assign(
824 		__entry->mr = mr;
825 		__entry->rc	= rc;
826 	),
827 
828 	TP_printk("mr=%p: rc=%d",
829 		__entry->mr, __entry->rc
830 	)
831 );
832 
833 TRACE_EVENT(xprtrdma_frwr_dereg,
834 	TP_PROTO(
835 		const struct rpcrdma_mr *mr,
836 		int rc
837 	),
838 
839 	TP_ARGS(mr, rc),
840 
841 	TP_STRUCT__entry(
842 		__field(const void *, mr)
843 		__field(u32, handle)
844 		__field(u32, length)
845 		__field(u64, offset)
846 		__field(u32, dir)
847 		__field(int, rc)
848 	),
849 
850 	TP_fast_assign(
851 		__entry->mr = mr;
852 		__entry->handle = mr->mr_handle;
853 		__entry->length = mr->mr_length;
854 		__entry->offset = mr->mr_offset;
855 		__entry->dir    = mr->mr_dir;
856 		__entry->rc	= rc;
857 	),
858 
859 	TP_printk("mr=%p %u@0x%016llx:0x%08x (%s): rc=%d",
860 		__entry->mr, __entry->length,
861 		(unsigned long long)__entry->offset, __entry->handle,
862 		xprtrdma_show_direction(__entry->dir),
863 		__entry->rc
864 	)
865 );
866 
867 TRACE_EVENT(xprtrdma_frwr_sgerr,
868 	TP_PROTO(
869 		const struct rpcrdma_mr *mr,
870 		int sg_nents
871 	),
872 
873 	TP_ARGS(mr, sg_nents),
874 
875 	TP_STRUCT__entry(
876 		__field(const void *, mr)
877 		__field(u64, addr)
878 		__field(u32, dir)
879 		__field(int, nents)
880 	),
881 
882 	TP_fast_assign(
883 		__entry->mr = mr;
884 		__entry->addr = mr->mr_sg->dma_address;
885 		__entry->dir = mr->mr_dir;
886 		__entry->nents = sg_nents;
887 	),
888 
889 	TP_printk("mr=%p dma addr=0x%llx (%s) sg_nents=%d",
890 		__entry->mr, __entry->addr,
891 		xprtrdma_show_direction(__entry->dir),
892 		__entry->nents
893 	)
894 );
895 
896 TRACE_EVENT(xprtrdma_frwr_maperr,
897 	TP_PROTO(
898 		const struct rpcrdma_mr *mr,
899 		int num_mapped
900 	),
901 
902 	TP_ARGS(mr, num_mapped),
903 
904 	TP_STRUCT__entry(
905 		__field(const void *, mr)
906 		__field(u64, addr)
907 		__field(u32, dir)
908 		__field(int, num_mapped)
909 		__field(int, nents)
910 	),
911 
912 	TP_fast_assign(
913 		__entry->mr = mr;
914 		__entry->addr = mr->mr_sg->dma_address;
915 		__entry->dir = mr->mr_dir;
916 		__entry->num_mapped = num_mapped;
917 		__entry->nents = mr->mr_nents;
918 	),
919 
920 	TP_printk("mr=%p dma addr=0x%llx (%s) nents=%d of %d",
921 		__entry->mr, __entry->addr,
922 		xprtrdma_show_direction(__entry->dir),
923 		__entry->num_mapped, __entry->nents
924 	)
925 );
926 
927 DEFINE_MR_EVENT(localinv);
928 DEFINE_MR_EVENT(map);
929 DEFINE_MR_EVENT(unmap);
930 DEFINE_MR_EVENT(remoteinv);
931 DEFINE_MR_EVENT(recycle);
932 
933 TRACE_EVENT(xprtrdma_dma_maperr,
934 	TP_PROTO(
935 		u64 addr
936 	),
937 
938 	TP_ARGS(addr),
939 
940 	TP_STRUCT__entry(
941 		__field(u64, addr)
942 	),
943 
944 	TP_fast_assign(
945 		__entry->addr = addr;
946 	),
947 
948 	TP_printk("dma addr=0x%llx\n", __entry->addr)
949 );
950 
951 /**
952  ** Reply events
953  **/
954 
955 TRACE_EVENT(xprtrdma_reply,
956 	TP_PROTO(
957 		const struct rpc_task *task,
958 		const struct rpcrdma_rep *rep,
959 		const struct rpcrdma_req *req,
960 		unsigned int credits
961 	),
962 
963 	TP_ARGS(task, rep, req, credits),
964 
965 	TP_STRUCT__entry(
966 		__field(unsigned int, task_id)
967 		__field(unsigned int, client_id)
968 		__field(const void *, rep)
969 		__field(const void *, req)
970 		__field(u32, xid)
971 		__field(unsigned int, credits)
972 	),
973 
974 	TP_fast_assign(
975 		__entry->task_id = task->tk_pid;
976 		__entry->client_id = task->tk_client->cl_clid;
977 		__entry->rep = rep;
978 		__entry->req = req;
979 		__entry->xid = be32_to_cpu(rep->rr_xid);
980 		__entry->credits = credits;
981 	),
982 
983 	TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
984 		__entry->task_id, __entry->client_id, __entry->xid,
985 		__entry->credits, __entry->rep, __entry->req
986 	)
987 );
988 
989 TRACE_EVENT(xprtrdma_defer_cmp,
990 	TP_PROTO(
991 		const struct rpcrdma_rep *rep
992 	),
993 
994 	TP_ARGS(rep),
995 
996 	TP_STRUCT__entry(
997 		__field(unsigned int, task_id)
998 		__field(unsigned int, client_id)
999 		__field(const void *, rep)
1000 		__field(u32, xid)
1001 	),
1002 
1003 	TP_fast_assign(
1004 		__entry->task_id = rep->rr_rqst->rq_task->tk_pid;
1005 		__entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
1006 		__entry->rep = rep;
1007 		__entry->xid = be32_to_cpu(rep->rr_xid);
1008 	),
1009 
1010 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1011 		__entry->task_id, __entry->client_id, __entry->xid,
1012 		__entry->rep
1013 	)
1014 );
1015 
1016 DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
1017 DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
1018 DEFINE_REPLY_EVENT(xprtrdma_reply_short);
1019 DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
1020 
1021 TRACE_EVENT(xprtrdma_fixup,
1022 	TP_PROTO(
1023 		const struct rpc_rqst *rqst,
1024 		int len,
1025 		int hdrlen
1026 	),
1027 
1028 	TP_ARGS(rqst, len, hdrlen),
1029 
1030 	TP_STRUCT__entry(
1031 		__field(unsigned int, task_id)
1032 		__field(unsigned int, client_id)
1033 		__field(const void *, base)
1034 		__field(int, len)
1035 		__field(int, hdrlen)
1036 	),
1037 
1038 	TP_fast_assign(
1039 		__entry->task_id = rqst->rq_task->tk_pid;
1040 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1041 		__entry->base = rqst->rq_rcv_buf.head[0].iov_base;
1042 		__entry->len = len;
1043 		__entry->hdrlen = hdrlen;
1044 	),
1045 
1046 	TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
1047 		__entry->task_id, __entry->client_id,
1048 		__entry->base, __entry->len, __entry->hdrlen
1049 	)
1050 );
1051 
1052 TRACE_EVENT(xprtrdma_fixup_pg,
1053 	TP_PROTO(
1054 		const struct rpc_rqst *rqst,
1055 		int pageno,
1056 		const void *pos,
1057 		int len,
1058 		int curlen
1059 	),
1060 
1061 	TP_ARGS(rqst, pageno, pos, len, curlen),
1062 
1063 	TP_STRUCT__entry(
1064 		__field(unsigned int, task_id)
1065 		__field(unsigned int, client_id)
1066 		__field(const void *, pos)
1067 		__field(int, pageno)
1068 		__field(int, len)
1069 		__field(int, curlen)
1070 	),
1071 
1072 	TP_fast_assign(
1073 		__entry->task_id = rqst->rq_task->tk_pid;
1074 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1075 		__entry->pos = pos;
1076 		__entry->pageno = pageno;
1077 		__entry->len = len;
1078 		__entry->curlen = curlen;
1079 	),
1080 
1081 	TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
1082 		__entry->task_id, __entry->client_id,
1083 		__entry->pageno, __entry->pos, __entry->len, __entry->curlen
1084 	)
1085 );
1086 
1087 TRACE_EVENT(xprtrdma_decode_seg,
1088 	TP_PROTO(
1089 		u32 handle,
1090 		u32 length,
1091 		u64 offset
1092 	),
1093 
1094 	TP_ARGS(handle, length, offset),
1095 
1096 	TP_STRUCT__entry(
1097 		__field(u32, handle)
1098 		__field(u32, length)
1099 		__field(u64, offset)
1100 	),
1101 
1102 	TP_fast_assign(
1103 		__entry->handle = handle;
1104 		__entry->length = length;
1105 		__entry->offset = offset;
1106 	),
1107 
1108 	TP_printk("%u@0x%016llx:0x%08x",
1109 		__entry->length, (unsigned long long)__entry->offset,
1110 		__entry->handle
1111 	)
1112 );
1113 
1114 /**
1115  ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
1116  **/
1117 
1118 TRACE_EVENT(xprtrdma_op_allocate,
1119 	TP_PROTO(
1120 		const struct rpc_task *task,
1121 		const struct rpcrdma_req *req
1122 	),
1123 
1124 	TP_ARGS(task, req),
1125 
1126 	TP_STRUCT__entry(
1127 		__field(unsigned int, task_id)
1128 		__field(unsigned int, client_id)
1129 		__field(const void *, req)
1130 		__field(size_t, callsize)
1131 		__field(size_t, rcvsize)
1132 	),
1133 
1134 	TP_fast_assign(
1135 		__entry->task_id = task->tk_pid;
1136 		__entry->client_id = task->tk_client->cl_clid;
1137 		__entry->req = req;
1138 		__entry->callsize = task->tk_rqstp->rq_callsize;
1139 		__entry->rcvsize = task->tk_rqstp->rq_rcvsize;
1140 	),
1141 
1142 	TP_printk("task:%u@%u req=%p (%zu, %zu)",
1143 		__entry->task_id, __entry->client_id,
1144 		__entry->req, __entry->callsize, __entry->rcvsize
1145 	)
1146 );
1147 
1148 TRACE_EVENT(xprtrdma_op_free,
1149 	TP_PROTO(
1150 		const struct rpc_task *task,
1151 		const struct rpcrdma_req *req
1152 	),
1153 
1154 	TP_ARGS(task, req),
1155 
1156 	TP_STRUCT__entry(
1157 		__field(unsigned int, task_id)
1158 		__field(unsigned int, client_id)
1159 		__field(const void *, req)
1160 		__field(const void *, rep)
1161 	),
1162 
1163 	TP_fast_assign(
1164 		__entry->task_id = task->tk_pid;
1165 		__entry->client_id = task->tk_client->cl_clid;
1166 		__entry->req = req;
1167 		__entry->rep = req->rl_reply;
1168 	),
1169 
1170 	TP_printk("task:%u@%u req=%p rep=%p",
1171 		__entry->task_id, __entry->client_id,
1172 		__entry->req, __entry->rep
1173 	)
1174 );
1175 
1176 /**
1177  ** Callback events
1178  **/
1179 
1180 TRACE_EVENT(xprtrdma_cb_setup,
1181 	TP_PROTO(
1182 		const struct rpcrdma_xprt *r_xprt,
1183 		unsigned int reqs
1184 	),
1185 
1186 	TP_ARGS(r_xprt, reqs),
1187 
1188 	TP_STRUCT__entry(
1189 		__field(const void *, r_xprt)
1190 		__field(unsigned int, reqs)
1191 		__string(addr, rpcrdma_addrstr(r_xprt))
1192 		__string(port, rpcrdma_portstr(r_xprt))
1193 	),
1194 
1195 	TP_fast_assign(
1196 		__entry->r_xprt = r_xprt;
1197 		__entry->reqs = reqs;
1198 		__assign_str(addr, rpcrdma_addrstr(r_xprt));
1199 		__assign_str(port, rpcrdma_portstr(r_xprt));
1200 	),
1201 
1202 	TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1203 		__get_str(addr), __get_str(port),
1204 		__entry->r_xprt, __entry->reqs
1205 	)
1206 );
1207 
1208 DEFINE_CB_EVENT(xprtrdma_cb_call);
1209 DEFINE_CB_EVENT(xprtrdma_cb_reply);
1210 
1211 TRACE_EVENT(xprtrdma_leaked_rep,
1212 	TP_PROTO(
1213 		const struct rpc_rqst *rqst,
1214 		const struct rpcrdma_rep *rep
1215 	),
1216 
1217 	TP_ARGS(rqst, rep),
1218 
1219 	TP_STRUCT__entry(
1220 		__field(unsigned int, task_id)
1221 		__field(unsigned int, client_id)
1222 		__field(u32, xid)
1223 		__field(const void *, rep)
1224 	),
1225 
1226 	TP_fast_assign(
1227 		__entry->task_id = rqst->rq_task->tk_pid;
1228 		__entry->client_id = rqst->rq_task->tk_client->cl_clid;
1229 		__entry->xid = be32_to_cpu(rqst->rq_xid);
1230 		__entry->rep = rep;
1231 	),
1232 
1233 	TP_printk("task:%u@%u xid=0x%08x rep=%p",
1234 		__entry->task_id, __entry->client_id, __entry->xid,
1235 		__entry->rep
1236 	)
1237 );
1238 
1239 /**
1240  ** Server-side RPC/RDMA events
1241  **/
1242 
1243 DECLARE_EVENT_CLASS(svcrdma_xprt_event,
1244 	TP_PROTO(
1245 		const struct svc_xprt *xprt
1246 	),
1247 
1248 	TP_ARGS(xprt),
1249 
1250 	TP_STRUCT__entry(
1251 		__field(const void *, xprt)
1252 		__string(addr, xprt->xpt_remotebuf)
1253 	),
1254 
1255 	TP_fast_assign(
1256 		__entry->xprt = xprt;
1257 		__assign_str(addr, xprt->xpt_remotebuf);
1258 	),
1259 
1260 	TP_printk("xprt=%p addr=%s",
1261 		__entry->xprt, __get_str(addr)
1262 	)
1263 );
1264 
1265 #define DEFINE_XPRT_EVENT(name)						\
1266 		DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name,	\
1267 				TP_PROTO(				\
1268 					const struct svc_xprt *xprt	\
1269 				),					\
1270 				TP_ARGS(xprt))
1271 
1272 DEFINE_XPRT_EVENT(accept);
1273 DEFINE_XPRT_EVENT(fail);
1274 DEFINE_XPRT_EVENT(free);
1275 
1276 TRACE_DEFINE_ENUM(RDMA_MSG);
1277 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1278 TRACE_DEFINE_ENUM(RDMA_MSGP);
1279 TRACE_DEFINE_ENUM(RDMA_DONE);
1280 TRACE_DEFINE_ENUM(RDMA_ERROR);
1281 
1282 #define show_rpcrdma_proc(x)						\
1283 		__print_symbolic(x,					\
1284 				{ RDMA_MSG, "RDMA_MSG" },		\
1285 				{ RDMA_NOMSG, "RDMA_NOMSG" },		\
1286 				{ RDMA_MSGP, "RDMA_MSGP" },		\
1287 				{ RDMA_DONE, "RDMA_DONE" },		\
1288 				{ RDMA_ERROR, "RDMA_ERROR" })
1289 
1290 TRACE_EVENT(svcrdma_decode_rqst,
1291 	TP_PROTO(
1292 		__be32 *p,
1293 		unsigned int hdrlen
1294 	),
1295 
1296 	TP_ARGS(p, hdrlen),
1297 
1298 	TP_STRUCT__entry(
1299 		__field(u32, xid)
1300 		__field(u32, vers)
1301 		__field(u32, proc)
1302 		__field(u32, credits)
1303 		__field(unsigned int, hdrlen)
1304 	),
1305 
1306 	TP_fast_assign(
1307 		__entry->xid = be32_to_cpup(p++);
1308 		__entry->vers = be32_to_cpup(p++);
1309 		__entry->credits = be32_to_cpup(p++);
1310 		__entry->proc = be32_to_cpup(p);
1311 		__entry->hdrlen = hdrlen;
1312 	),
1313 
1314 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1315 		__entry->xid, __entry->vers, __entry->credits,
1316 		show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1317 );
1318 
1319 TRACE_EVENT(svcrdma_decode_short,
1320 	TP_PROTO(
1321 		unsigned int hdrlen
1322 	),
1323 
1324 	TP_ARGS(hdrlen),
1325 
1326 	TP_STRUCT__entry(
1327 		__field(unsigned int, hdrlen)
1328 	),
1329 
1330 	TP_fast_assign(
1331 		__entry->hdrlen = hdrlen;
1332 	),
1333 
1334 	TP_printk("hdrlen=%u", __entry->hdrlen)
1335 );
1336 
1337 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1338 	TP_PROTO(
1339 		__be32 *p
1340 	),
1341 
1342 	TP_ARGS(p),
1343 
1344 	TP_STRUCT__entry(
1345 		__field(u32, xid)
1346 		__field(u32, vers)
1347 		__field(u32, proc)
1348 		__field(u32, credits)
1349 	),
1350 
1351 	TP_fast_assign(
1352 		__entry->xid = be32_to_cpup(p++);
1353 		__entry->vers = be32_to_cpup(p++);
1354 		__entry->credits = be32_to_cpup(p++);
1355 		__entry->proc = be32_to_cpup(p);
1356 	),
1357 
1358 	TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
1359 		__entry->xid, __entry->vers, __entry->credits, __entry->proc)
1360 );
1361 
1362 #define DEFINE_BADREQ_EVENT(name)					\
1363 		DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
1364 				TP_PROTO(				\
1365 					__be32 *p			\
1366 				),					\
1367 				TP_ARGS(p))
1368 
1369 DEFINE_BADREQ_EVENT(badvers);
1370 DEFINE_BADREQ_EVENT(drop);
1371 DEFINE_BADREQ_EVENT(badproc);
1372 DEFINE_BADREQ_EVENT(parse);
1373 
1374 DECLARE_EVENT_CLASS(svcrdma_segment_event,
1375 	TP_PROTO(
1376 		u32 handle,
1377 		u32 length,
1378 		u64 offset
1379 	),
1380 
1381 	TP_ARGS(handle, length, offset),
1382 
1383 	TP_STRUCT__entry(
1384 		__field(u32, handle)
1385 		__field(u32, length)
1386 		__field(u64, offset)
1387 	),
1388 
1389 	TP_fast_assign(
1390 		__entry->handle = handle;
1391 		__entry->length = length;
1392 		__entry->offset = offset;
1393 	),
1394 
1395 	TP_printk("%u@0x%016llx:0x%08x",
1396 		__entry->length, (unsigned long long)__entry->offset,
1397 		__entry->handle
1398 	)
1399 );
1400 
1401 #define DEFINE_SEGMENT_EVENT(name)					\
1402 		DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
1403 				TP_PROTO(				\
1404 					u32 handle,			\
1405 					u32 length,			\
1406 					u64 offset			\
1407 				),					\
1408 				TP_ARGS(handle, length, offset))
1409 
1410 DEFINE_SEGMENT_EVENT(rseg);
1411 DEFINE_SEGMENT_EVENT(wseg);
1412 
1413 DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1414 	TP_PROTO(
1415 		u32 length
1416 	),
1417 
1418 	TP_ARGS(length),
1419 
1420 	TP_STRUCT__entry(
1421 		__field(u32, length)
1422 	),
1423 
1424 	TP_fast_assign(
1425 		__entry->length = length;
1426 	),
1427 
1428 	TP_printk("length=%u",
1429 		__entry->length
1430 	)
1431 );
1432 
1433 #define DEFINE_CHUNK_EVENT(name)					\
1434 		DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
1435 				TP_PROTO(				\
1436 					u32 length			\
1437 				),					\
1438 				TP_ARGS(length))
1439 
1440 DEFINE_CHUNK_EVENT(pzr);
1441 DEFINE_CHUNK_EVENT(write);
1442 DEFINE_CHUNK_EVENT(reply);
1443 
1444 TRACE_EVENT(svcrdma_encode_read,
1445 	TP_PROTO(
1446 		u32 length,
1447 		u32 position
1448 	),
1449 
1450 	TP_ARGS(length, position),
1451 
1452 	TP_STRUCT__entry(
1453 		__field(u32, length)
1454 		__field(u32, position)
1455 	),
1456 
1457 	TP_fast_assign(
1458 		__entry->length = length;
1459 		__entry->position = position;
1460 	),
1461 
1462 	TP_printk("length=%u position=%u",
1463 		__entry->length, __entry->position
1464 	)
1465 );
1466 
1467 DECLARE_EVENT_CLASS(svcrdma_error_event,
1468 	TP_PROTO(
1469 		__be32 xid
1470 	),
1471 
1472 	TP_ARGS(xid),
1473 
1474 	TP_STRUCT__entry(
1475 		__field(u32, xid)
1476 	),
1477 
1478 	TP_fast_assign(
1479 		__entry->xid = be32_to_cpu(xid);
1480 	),
1481 
1482 	TP_printk("xid=0x%08x",
1483 		__entry->xid
1484 	)
1485 );
1486 
1487 #define DEFINE_ERROR_EVENT(name)					\
1488 		DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name,	\
1489 				TP_PROTO(				\
1490 					__be32 xid			\
1491 				),					\
1492 				TP_ARGS(xid))
1493 
1494 DEFINE_ERROR_EVENT(vers);
1495 DEFINE_ERROR_EVENT(chunk);
1496 
1497 /**
1498  ** Server-side RDMA API events
1499  **/
1500 
1501 TRACE_EVENT(svcrdma_dma_map_page,
1502 	TP_PROTO(
1503 		const struct svcxprt_rdma *rdma,
1504 		const void *page
1505 	),
1506 
1507 	TP_ARGS(rdma, page),
1508 
1509 	TP_STRUCT__entry(
1510 		__field(const void *, page);
1511 		__string(device, rdma->sc_cm_id->device->name)
1512 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1513 	),
1514 
1515 	TP_fast_assign(
1516 		__entry->page = page;
1517 		__assign_str(device, rdma->sc_cm_id->device->name);
1518 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1519 	),
1520 
1521 	TP_printk("addr=%s device=%s page=%p",
1522 		__get_str(addr), __get_str(device), __entry->page
1523 	)
1524 );
1525 
1526 TRACE_EVENT(svcrdma_dma_map_rwctx,
1527 	TP_PROTO(
1528 		const struct svcxprt_rdma *rdma,
1529 		int status
1530 	),
1531 
1532 	TP_ARGS(rdma, status),
1533 
1534 	TP_STRUCT__entry(
1535 		__field(int, status)
1536 		__string(device, rdma->sc_cm_id->device->name)
1537 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1538 	),
1539 
1540 	TP_fast_assign(
1541 		__entry->status = status;
1542 		__assign_str(device, rdma->sc_cm_id->device->name);
1543 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1544 	),
1545 
1546 	TP_printk("addr=%s device=%s status=%d",
1547 		__get_str(addr), __get_str(device), __entry->status
1548 	)
1549 );
1550 
1551 TRACE_EVENT(svcrdma_send_failed,
1552 	TP_PROTO(
1553 		const struct svc_rqst *rqst,
1554 		int status
1555 	),
1556 
1557 	TP_ARGS(rqst, status),
1558 
1559 	TP_STRUCT__entry(
1560 		__field(int, status)
1561 		__field(u32, xid)
1562 		__field(const void *, xprt)
1563 		__string(addr, rqst->rq_xprt->xpt_remotebuf)
1564 	),
1565 
1566 	TP_fast_assign(
1567 		__entry->status = status;
1568 		__entry->xid = __be32_to_cpu(rqst->rq_xid);
1569 		__entry->xprt = rqst->rq_xprt;
1570 		__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1571 	),
1572 
1573 	TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
1574 		__entry->xprt, __get_str(addr),
1575 		__entry->xid, __entry->status
1576 	)
1577 );
1578 
1579 DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
1580 	TP_PROTO(
1581 		const struct ib_wc *wc
1582 	),
1583 
1584 	TP_ARGS(wc),
1585 
1586 	TP_STRUCT__entry(
1587 		__field(const void *, cqe)
1588 		__field(unsigned int, status)
1589 		__field(unsigned int, vendor_err)
1590 	),
1591 
1592 	TP_fast_assign(
1593 		__entry->cqe = wc->wr_cqe;
1594 		__entry->status = wc->status;
1595 		if (wc->status)
1596 			__entry->vendor_err = wc->vendor_err;
1597 		else
1598 			__entry->vendor_err = 0;
1599 	),
1600 
1601 	TP_printk("cqe=%p status=%s (%u/0x%x)",
1602 		__entry->cqe, rdma_show_wc_status(__entry->status),
1603 		__entry->status, __entry->vendor_err
1604 	)
1605 );
1606 
1607 #define DEFINE_SENDCOMP_EVENT(name)					\
1608 		DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name,	\
1609 				TP_PROTO(				\
1610 					const struct ib_wc *wc		\
1611 				),					\
1612 				TP_ARGS(wc))
1613 
1614 TRACE_EVENT(svcrdma_post_send,
1615 	TP_PROTO(
1616 		const struct ib_send_wr *wr,
1617 		int status
1618 	),
1619 
1620 	TP_ARGS(wr, status),
1621 
1622 	TP_STRUCT__entry(
1623 		__field(const void *, cqe)
1624 		__field(unsigned int, num_sge)
1625 		__field(u32, inv_rkey)
1626 		__field(int, status)
1627 	),
1628 
1629 	TP_fast_assign(
1630 		__entry->cqe = wr->wr_cqe;
1631 		__entry->num_sge = wr->num_sge;
1632 		__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1633 					wr->ex.invalidate_rkey : 0;
1634 		__entry->status = status;
1635 	),
1636 
1637 	TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
1638 		__entry->cqe, __entry->num_sge,
1639 		__entry->inv_rkey, __entry->status
1640 	)
1641 );
1642 
1643 DEFINE_SENDCOMP_EVENT(send);
1644 
1645 TRACE_EVENT(svcrdma_post_recv,
1646 	TP_PROTO(
1647 		const struct ib_recv_wr *wr,
1648 		int status
1649 	),
1650 
1651 	TP_ARGS(wr, status),
1652 
1653 	TP_STRUCT__entry(
1654 		__field(const void *, cqe)
1655 		__field(int, status)
1656 	),
1657 
1658 	TP_fast_assign(
1659 		__entry->cqe = wr->wr_cqe;
1660 		__entry->status = status;
1661 	),
1662 
1663 	TP_printk("cqe=%p status=%d",
1664 		__entry->cqe, __entry->status
1665 	)
1666 );
1667 
1668 TRACE_EVENT(svcrdma_wc_receive,
1669 	TP_PROTO(
1670 		const struct ib_wc *wc
1671 	),
1672 
1673 	TP_ARGS(wc),
1674 
1675 	TP_STRUCT__entry(
1676 		__field(const void *, cqe)
1677 		__field(u32, byte_len)
1678 		__field(unsigned int, status)
1679 		__field(u32, vendor_err)
1680 	),
1681 
1682 	TP_fast_assign(
1683 		__entry->cqe = wc->wr_cqe;
1684 		__entry->status = wc->status;
1685 		if (wc->status) {
1686 			__entry->byte_len = 0;
1687 			__entry->vendor_err = wc->vendor_err;
1688 		} else {
1689 			__entry->byte_len = wc->byte_len;
1690 			__entry->vendor_err = 0;
1691 		}
1692 	),
1693 
1694 	TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
1695 		__entry->cqe, __entry->byte_len,
1696 		rdma_show_wc_status(__entry->status),
1697 		__entry->status, __entry->vendor_err
1698 	)
1699 );
1700 
1701 TRACE_EVENT(svcrdma_post_rw,
1702 	TP_PROTO(
1703 		const void *cqe,
1704 		int sqecount,
1705 		int status
1706 	),
1707 
1708 	TP_ARGS(cqe, sqecount, status),
1709 
1710 	TP_STRUCT__entry(
1711 		__field(const void *, cqe)
1712 		__field(int, sqecount)
1713 		__field(int, status)
1714 	),
1715 
1716 	TP_fast_assign(
1717 		__entry->cqe = cqe;
1718 		__entry->sqecount = sqecount;
1719 		__entry->status = status;
1720 	),
1721 
1722 	TP_printk("cqe=%p sqecount=%d status=%d",
1723 		__entry->cqe, __entry->sqecount, __entry->status
1724 	)
1725 );
1726 
1727 DEFINE_SENDCOMP_EVENT(read);
1728 DEFINE_SENDCOMP_EVENT(write);
1729 
1730 TRACE_EVENT(svcrdma_cm_event,
1731 	TP_PROTO(
1732 		const struct rdma_cm_event *event,
1733 		const struct sockaddr *sap
1734 	),
1735 
1736 	TP_ARGS(event, sap),
1737 
1738 	TP_STRUCT__entry(
1739 		__field(unsigned int, event)
1740 		__field(int, status)
1741 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1742 	),
1743 
1744 	TP_fast_assign(
1745 		__entry->event = event->event;
1746 		__entry->status = event->status;
1747 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1748 			 "%pISpc", sap);
1749 	),
1750 
1751 	TP_printk("addr=%s event=%s (%u/%d)",
1752 		__entry->addr,
1753 		rdma_show_cm_event(__entry->event),
1754 		__entry->event, __entry->status
1755 	)
1756 );
1757 
1758 TRACE_EVENT(svcrdma_qp_error,
1759 	TP_PROTO(
1760 		const struct ib_event *event,
1761 		const struct sockaddr *sap
1762 	),
1763 
1764 	TP_ARGS(event, sap),
1765 
1766 	TP_STRUCT__entry(
1767 		__field(unsigned int, event)
1768 		__string(device, event->device->name)
1769 		__array(__u8, addr, INET6_ADDRSTRLEN + 10)
1770 	),
1771 
1772 	TP_fast_assign(
1773 		__entry->event = event->event;
1774 		__assign_str(device, event->device->name);
1775 		snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1776 			 "%pISpc", sap);
1777 	),
1778 
1779 	TP_printk("addr=%s dev=%s event=%s (%u)",
1780 		__entry->addr, __get_str(device),
1781 		rdma_show_ib_event(__entry->event), __entry->event
1782 	)
1783 );
1784 
1785 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1786 	TP_PROTO(
1787 		const struct svcxprt_rdma *rdma
1788 	),
1789 
1790 	TP_ARGS(rdma),
1791 
1792 	TP_STRUCT__entry(
1793 		__field(int, avail)
1794 		__field(int, depth)
1795 		__string(addr, rdma->sc_xprt.xpt_remotebuf)
1796 	),
1797 
1798 	TP_fast_assign(
1799 		__entry->avail = atomic_read(&rdma->sc_sq_avail);
1800 		__entry->depth = rdma->sc_sq_depth;
1801 		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1802 	),
1803 
1804 	TP_printk("addr=%s sc_sq_avail=%d/%d",
1805 		__get_str(addr), __entry->avail, __entry->depth
1806 	)
1807 );
1808 
1809 #define DEFINE_SQ_EVENT(name)						\
1810 		DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1811 				TP_PROTO(				\
1812 					const struct svcxprt_rdma *rdma \
1813 				),					\
1814 				TP_ARGS(rdma))
1815 
1816 DEFINE_SQ_EVENT(full);
1817 DEFINE_SQ_EVENT(retry);
1818 
1819 #endif /* _TRACE_RPCRDMA_H */
1820 
1821 #include <trace/define_trace.h>
1822