1 /*
2  * Copyright(c) 2015 - 2017 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 #if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
48 #define __HFI1_TRACE_TX_H
49 
50 #include <linux/tracepoint.h>
51 #include <linux/trace_seq.h>
52 
53 #include "hfi.h"
54 #include "mad.h"
55 #include "sdma.h"
56 
57 const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
58 
59 #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
60 
61 #undef TRACE_SYSTEM
62 #define TRACE_SYSTEM hfi1_tx
63 
64 TRACE_EVENT(hfi1_piofree,
65 	    TP_PROTO(struct send_context *sc, int extra),
66 	    TP_ARGS(sc, extra),
67 	    TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
68 	    __field(u32, sw_index)
69 	    __field(u32, hw_context)
70 	    __field(int, extra)
71 	    ),
72 	    TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
73 	    __entry->sw_index = sc->sw_index;
74 	    __entry->hw_context = sc->hw_context;
75 	    __entry->extra = extra;
76 	    ),
77 	    TP_printk("[%s] ctxt %u(%u) extra %d",
78 		      __get_str(dev),
79 		      __entry->sw_index,
80 		      __entry->hw_context,
81 		      __entry->extra
82 	    )
83 );
84 
85 TRACE_EVENT(hfi1_wantpiointr,
86 	    TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
87 	    TP_ARGS(sc, needint, credit_ctrl),
88 	    TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
89 			__field(u32, sw_index)
90 			__field(u32, hw_context)
91 			__field(u32, needint)
92 			__field(u64, credit_ctrl)
93 			),
94 	    TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
95 			__entry->sw_index = sc->sw_index;
96 			__entry->hw_context = sc->hw_context;
97 			__entry->needint = needint;
98 			__entry->credit_ctrl = credit_ctrl;
99 			),
100 	    TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
101 		      __get_str(dev),
102 		      __entry->sw_index,
103 		      __entry->hw_context,
104 		      __entry->needint,
105 		      (unsigned long long)__entry->credit_ctrl
106 		      )
107 );
108 
109 DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
110 		    TP_PROTO(struct rvt_qp *qp, u32 flags),
111 		    TP_ARGS(qp, flags),
112 		    TP_STRUCT__entry(
113 		    DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
114 		    __field(u32, qpn)
115 		    __field(u32, flags)
116 		    __field(u32, s_flags)
117 		    __field(u32, ps_flags)
118 		    __field(unsigned long, iow_flags)
119 		    ),
120 		    TP_fast_assign(
121 		    DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
122 		    __entry->flags = flags;
123 		    __entry->qpn = qp->ibqp.qp_num;
124 		    __entry->s_flags = qp->s_flags;
125 		    __entry->ps_flags =
126 			((struct hfi1_qp_priv *)qp->priv)->s_flags;
127 		    __entry->iow_flags =
128 			((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
129 		    ),
130 		    TP_printk(
131 		    "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
132 		    __get_str(dev),
133 		    __entry->qpn,
134 		    __entry->flags,
135 		    __entry->s_flags,
136 		    __entry->ps_flags,
137 		    __entry->iow_flags
138 		    )
139 );
140 
141 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
142 	     TP_PROTO(struct rvt_qp *qp, u32 flags),
143 	     TP_ARGS(qp, flags));
144 
145 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
146 	     TP_PROTO(struct rvt_qp *qp, u32 flags),
147 	     TP_ARGS(qp, flags));
148 
149 TRACE_EVENT(hfi1_sdma_descriptor,
150 	    TP_PROTO(struct sdma_engine *sde,
151 		     u64 desc0,
152 		     u64 desc1,
153 		     u16 e,
154 		     void *descp),
155 		     TP_ARGS(sde, desc0, desc1, e, descp),
156 		     TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
157 		     __field(void *, descp)
158 		     __field(u64, desc0)
159 		     __field(u64, desc1)
160 		     __field(u16, e)
161 		     __field(u8, idx)
162 		     ),
163 		     TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
164 		     __entry->desc0 = desc0;
165 		     __entry->desc1 = desc1;
166 		     __entry->idx = sde->this_idx;
167 		     __entry->descp = descp;
168 		     __entry->e = e;
169 		     ),
170 	    TP_printk(
171 	    "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
172 	    __get_str(dev),
173 	    __entry->idx,
174 	    __parse_sdma_flags(__entry->desc0, __entry->desc1),
175 	    (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
176 	    SDMA_DESC0_PHY_ADDR_MASK,
177 	    (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
178 	    SDMA_DESC1_GENERATION_MASK),
179 	    (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
180 	    SDMA_DESC0_BYTE_COUNT_MASK),
181 	    __entry->desc0,
182 	    __entry->desc1,
183 	    __entry->descp,
184 	    __entry->e
185 	    )
186 );
187 
188 TRACE_EVENT(hfi1_sdma_engine_select,
189 	    TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
190 	    TP_ARGS(dd, sel, vl, idx),
191 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
192 	    __field(u32, sel)
193 	    __field(u8, vl)
194 	    __field(u8, idx)
195 	    ),
196 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
197 	    __entry->sel = sel;
198 	    __entry->vl = vl;
199 	    __entry->idx = idx;
200 	    ),
201 	    TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
202 		      __get_str(dev),
203 		      __entry->idx,
204 		      __entry->sel,
205 		      __entry->vl
206 		      )
207 );
208 
209 TRACE_EVENT(hfi1_sdma_user_free_queues,
210 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
211 	    TP_ARGS(dd, ctxt, subctxt),
212 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
213 			     __field(u16, ctxt)
214 			     __field(u16, subctxt)
215 			     ),
216 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
217 			   __entry->ctxt = ctxt;
218 			   __entry->subctxt = subctxt;
219 			   ),
220 	    TP_printk("[%s] SDMA [%u:%u] Freeing user SDMA queues",
221 		      __get_str(dev),
222 		      __entry->ctxt,
223 		      __entry->subctxt
224 		      )
225 );
226 
227 TRACE_EVENT(hfi1_sdma_user_process_request,
228 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
229 		     u16 comp_idx),
230 	    TP_ARGS(dd, ctxt, subctxt, comp_idx),
231 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
232 			     __field(u16, ctxt)
233 			     __field(u16, subctxt)
234 			     __field(u16, comp_idx)
235 			     ),
236 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
237 			   __entry->ctxt = ctxt;
238 			   __entry->subctxt = subctxt;
239 			   __entry->comp_idx = comp_idx;
240 			   ),
241 	    TP_printk("[%s] SDMA [%u:%u] Using req/comp entry: %u",
242 		      __get_str(dev),
243 		      __entry->ctxt,
244 		      __entry->subctxt,
245 		      __entry->comp_idx
246 		      )
247 );
248 
249 DECLARE_EVENT_CLASS(
250 	hfi1_sdma_value_template,
251 	TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx,
252 		 u32 value),
253 	TP_ARGS(dd, ctxt, subctxt, comp_idx, value),
254 	TP_STRUCT__entry(DD_DEV_ENTRY(dd)
255 			 __field(u16, ctxt)
256 			 __field(u16, subctxt)
257 			 __field(u16, comp_idx)
258 			 __field(u32, value)
259 		),
260 	TP_fast_assign(DD_DEV_ASSIGN(dd);
261 		       __entry->ctxt = ctxt;
262 		       __entry->subctxt = subctxt;
263 		       __entry->comp_idx = comp_idx;
264 		       __entry->value = value;
265 		),
266 	TP_printk("[%s] SDMA [%u:%u:%u] value: %u",
267 		  __get_str(dev),
268 		  __entry->ctxt,
269 		  __entry->subctxt,
270 		  __entry->comp_idx,
271 		  __entry->value
272 		)
273 );
274 
275 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_initial_tidoffset,
276 	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
277 		      u16 comp_idx, u32 tidoffset),
278 	     TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset));
279 
280 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_data_length,
281 	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
282 		      u16 comp_idx, u32 data_len),
283 	     TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
284 
285 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_compute_length,
286 	     TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
287 		      u16 comp_idx, u32 data_len),
288 	     TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
289 
290 TRACE_EVENT(hfi1_sdma_user_tid_info,
291 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
292 		     u16 comp_idx, u32 tidoffset, u32 units, u8 shift),
293 	    TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift),
294 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
295 			     __field(u16, ctxt)
296 			     __field(u16, subctxt)
297 			     __field(u16, comp_idx)
298 			     __field(u32, tidoffset)
299 			     __field(u32, units)
300 			     __field(u8, shift)
301 			     ),
302 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
303 			   __entry->ctxt = ctxt;
304 			   __entry->subctxt = subctxt;
305 			   __entry->comp_idx = comp_idx;
306 			   __entry->tidoffset = tidoffset;
307 			   __entry->units = units;
308 			   __entry->shift = shift;
309 			   ),
310 	    TP_printk("[%s] SDMA [%u:%u:%u] TID offset %ubytes %uunits om %u",
311 		      __get_str(dev),
312 		      __entry->ctxt,
313 		      __entry->subctxt,
314 		      __entry->comp_idx,
315 		      __entry->tidoffset,
316 		      __entry->units,
317 		      __entry->shift
318 		      )
319 );
320 
321 TRACE_EVENT(hfi1_sdma_request,
322 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
323 		     unsigned long dim),
324 	    TP_ARGS(dd, ctxt, subctxt, dim),
325 	    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
326 			     __field(u16, ctxt)
327 			     __field(u16, subctxt)
328 			     __field(unsigned long, dim)
329 			     ),
330 	    TP_fast_assign(DD_DEV_ASSIGN(dd);
331 			   __entry->ctxt = ctxt;
332 			   __entry->subctxt = subctxt;
333 			   __entry->dim = dim;
334 			   ),
335 	    TP_printk("[%s] SDMA from %u:%u (%lu)",
336 		      __get_str(dev),
337 		      __entry->ctxt,
338 		      __entry->subctxt,
339 		      __entry->dim
340 		      )
341 );
342 
343 DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
344 		    TP_PROTO(struct sdma_engine *sde, u64 status),
345 		    TP_ARGS(sde, status),
346 		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
347 		    __field(u64, status)
348 		    __field(u8, idx)
349 		    ),
350 		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
351 		    __entry->status = status;
352 		    __entry->idx = sde->this_idx;
353 		    ),
354 		    TP_printk("[%s] SDE(%u) status %llx",
355 			      __get_str(dev),
356 			      __entry->idx,
357 			      (unsigned long long)__entry->status
358 			      )
359 );
360 
361 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
362 	     TP_PROTO(struct sdma_engine *sde, u64 status),
363 	     TP_ARGS(sde, status)
364 );
365 
366 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
367 	     TP_PROTO(struct sdma_engine *sde, u64 status),
368 	     TP_ARGS(sde, status)
369 );
370 
371 DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
372 		    TP_PROTO(struct sdma_engine *sde, int aidx),
373 		    TP_ARGS(sde, aidx),
374 		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
375 		    __field(int, aidx)
376 		    __field(u8, idx)
377 		    ),
378 		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
379 		    __entry->idx = sde->this_idx;
380 		    __entry->aidx = aidx;
381 		    ),
382 		    TP_printk("[%s] SDE(%u) aidx %d",
383 			      __get_str(dev),
384 			      __entry->idx,
385 			      __entry->aidx
386 			      )
387 );
388 
389 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
390 	     TP_PROTO(struct sdma_engine *sde, int aidx),
391 	     TP_ARGS(sde, aidx));
392 
393 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
394 	     TP_PROTO(struct sdma_engine *sde, int aidx),
395 	     TP_ARGS(sde, aidx));
396 
397 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
398 TRACE_EVENT(hfi1_sdma_progress,
399 	    TP_PROTO(struct sdma_engine *sde,
400 		     u16 hwhead,
401 		     u16 swhead,
402 		     struct sdma_txreq *txp
403 		     ),
404 	    TP_ARGS(sde, hwhead, swhead, txp),
405 	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
406 	    __field(u64, sn)
407 	    __field(u16, hwhead)
408 	    __field(u16, swhead)
409 	    __field(u16, txnext)
410 	    __field(u16, tx_tail)
411 	    __field(u16, tx_head)
412 	    __field(u8, idx)
413 	    ),
414 	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
415 	    __entry->hwhead = hwhead;
416 	    __entry->swhead = swhead;
417 	    __entry->tx_tail = sde->tx_tail;
418 	    __entry->tx_head = sde->tx_head;
419 	    __entry->txnext = txp ? txp->next_descq_idx : ~0;
420 	    __entry->idx = sde->this_idx;
421 	    __entry->sn = txp ? txp->sn : ~0;
422 	    ),
423 	    TP_printk(
424 	    "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
425 	    __get_str(dev),
426 	    __entry->idx,
427 	    __entry->sn,
428 	    __entry->hwhead,
429 	    __entry->swhead,
430 	    __entry->txnext,
431 	    __entry->tx_head,
432 	    __entry->tx_tail
433 	    )
434 );
435 #else
436 TRACE_EVENT(hfi1_sdma_progress,
437 	    TP_PROTO(struct sdma_engine *sde,
438 		     u16 hwhead, u16 swhead,
439 		     struct sdma_txreq *txp
440 		     ),
441 	    TP_ARGS(sde, hwhead, swhead, txp),
442 	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
443 		    __field(u16, hwhead)
444 		    __field(u16, swhead)
445 		    __field(u16, txnext)
446 		    __field(u16, tx_tail)
447 		    __field(u16, tx_head)
448 		    __field(u8, idx)
449 		    ),
450 	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
451 		    __entry->hwhead = hwhead;
452 		    __entry->swhead = swhead;
453 		    __entry->tx_tail = sde->tx_tail;
454 		    __entry->tx_head = sde->tx_head;
455 		    __entry->txnext = txp ? txp->next_descq_idx : ~0;
456 		    __entry->idx = sde->this_idx;
457 		    ),
458 	    TP_printk(
459 		    "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
460 		    __get_str(dev),
461 		    __entry->idx,
462 		    __entry->hwhead,
463 		    __entry->swhead,
464 		    __entry->txnext,
465 		    __entry->tx_head,
466 		    __entry->tx_tail
467 	    )
468 );
469 #endif
470 
471 DECLARE_EVENT_CLASS(hfi1_sdma_sn,
472 		    TP_PROTO(struct sdma_engine *sde, u64 sn),
473 		    TP_ARGS(sde, sn),
474 		    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
475 		    __field(u64, sn)
476 		    __field(u8, idx)
477 		    ),
478 		    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
479 		    __entry->sn = sn;
480 		    __entry->idx = sde->this_idx;
481 		    ),
482 		    TP_printk("[%s] SDE(%u) sn %llu",
483 			      __get_str(dev),
484 			      __entry->idx,
485 			      __entry->sn
486 			      )
487 );
488 
489 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
490 	     TP_PROTO(
491 	     struct sdma_engine *sde,
492 	     u64 sn
493 	     ),
494 	     TP_ARGS(sde, sn)
495 );
496 
497 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
498 	     TP_PROTO(struct sdma_engine *sde, u64 sn),
499 	     TP_ARGS(sde, sn)
500 );
501 
502 #define USDMA_HDR_FORMAT \
503 	"[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
504 
505 TRACE_EVENT(hfi1_sdma_user_header,
506 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
507 		     struct hfi1_pkt_header *hdr, u32 tidval),
508 	    TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
509 	    TP_STRUCT__entry(
510 		    DD_DEV_ENTRY(dd)
511 		    __field(u16, ctxt)
512 		    __field(u8, subctxt)
513 		    __field(u16, req)
514 		    __field(u32, pbc0)
515 		    __field(u32, pbc1)
516 		    __field(u32, lrh0)
517 		    __field(u32, lrh1)
518 		    __field(u32, bth0)
519 		    __field(u32, bth1)
520 		    __field(u32, bth2)
521 		    __field(u32, kdeth0)
522 		    __field(u32, kdeth1)
523 		    __field(u32, kdeth2)
524 		    __field(u32, kdeth3)
525 		    __field(u32, kdeth4)
526 		    __field(u32, kdeth5)
527 		    __field(u32, kdeth6)
528 		    __field(u32, kdeth7)
529 		    __field(u32, kdeth8)
530 		    __field(u32, tidval)
531 		    ),
532 		    TP_fast_assign(
533 		    __le32 *pbc = (__le32 *)hdr->pbc;
534 		    __be32 *lrh = (__be32 *)hdr->lrh;
535 		    __be32 *bth = (__be32 *)hdr->bth;
536 		    __le32 *kdeth = (__le32 *)&hdr->kdeth;
537 
538 		    DD_DEV_ASSIGN(dd);
539 		    __entry->ctxt = ctxt;
540 		    __entry->subctxt = subctxt;
541 		    __entry->req = req;
542 		    __entry->pbc0 = le32_to_cpu(pbc[0]);
543 		    __entry->pbc1 = le32_to_cpu(pbc[1]);
544 		    __entry->lrh0 = be32_to_cpu(lrh[0]);
545 		    __entry->lrh1 = be32_to_cpu(lrh[1]);
546 		    __entry->bth0 = be32_to_cpu(bth[0]);
547 		    __entry->bth1 = be32_to_cpu(bth[1]);
548 		    __entry->bth2 = be32_to_cpu(bth[2]);
549 		    __entry->kdeth0 = le32_to_cpu(kdeth[0]);
550 		    __entry->kdeth1 = le32_to_cpu(kdeth[1]);
551 		    __entry->kdeth2 = le32_to_cpu(kdeth[2]);
552 		    __entry->kdeth3 = le32_to_cpu(kdeth[3]);
553 		    __entry->kdeth4 = le32_to_cpu(kdeth[4]);
554 		    __entry->kdeth5 = le32_to_cpu(kdeth[5]);
555 		    __entry->kdeth6 = le32_to_cpu(kdeth[6]);
556 		    __entry->kdeth7 = le32_to_cpu(kdeth[7]);
557 		    __entry->kdeth8 = le32_to_cpu(kdeth[8]);
558 		    __entry->tidval = tidval;
559 	    ),
560 	    TP_printk(USDMA_HDR_FORMAT,
561 		      __get_str(dev),
562 		      __entry->ctxt,
563 		      __entry->subctxt,
564 		      __entry->req,
565 		      __entry->pbc1,
566 		      __entry->pbc0,
567 		      __entry->lrh0,
568 		      __entry->lrh1,
569 		      __entry->bth0,
570 		      __entry->bth1,
571 		      __entry->bth2,
572 		      __entry->kdeth0,
573 		      __entry->kdeth1,
574 		      __entry->kdeth2,
575 		      __entry->kdeth3,
576 		      __entry->kdeth4,
577 		      __entry->kdeth5,
578 		      __entry->kdeth6,
579 		      __entry->kdeth7,
580 		      __entry->kdeth8,
581 		      __entry->tidval
582 	    )
583 );
584 
585 #define SDMA_UREQ_FMT \
586 	"[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
587 TRACE_EVENT(hfi1_sdma_user_reqinfo,
588 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
589 	    TP_ARGS(dd, ctxt, subctxt, i),
590 	    TP_STRUCT__entry(
591 		    DD_DEV_ENTRY(dd)
592 		    __field(u16, ctxt)
593 		    __field(u8, subctxt)
594 		    __field(u8, ver_opcode)
595 		    __field(u8, iovcnt)
596 		    __field(u16, npkts)
597 		    __field(u16, fragsize)
598 		    __field(u16, comp_idx)
599 	    ),
600 	    TP_fast_assign(
601 		    DD_DEV_ASSIGN(dd);
602 		    __entry->ctxt = ctxt;
603 		    __entry->subctxt = subctxt;
604 		    __entry->ver_opcode = i[0] & 0xff;
605 		    __entry->iovcnt = (i[0] >> 8) & 0xff;
606 		    __entry->npkts = i[1];
607 		    __entry->fragsize = i[2];
608 		    __entry->comp_idx = i[3];
609 	    ),
610 	    TP_printk(SDMA_UREQ_FMT,
611 		      __get_str(dev),
612 		      __entry->ctxt,
613 		      __entry->subctxt,
614 		      __entry->ver_opcode,
615 		      __entry->iovcnt,
616 		      __entry->npkts,
617 		      __entry->fragsize,
618 		      __entry->comp_idx
619 		      )
620 );
621 
622 #define usdma_complete_name(st) { st, #st }
623 #define show_usdma_complete_state(st)			\
624 	__print_symbolic(st,				\
625 			usdma_complete_name(FREE),	\
626 			usdma_complete_name(QUEUED),	\
627 			usdma_complete_name(COMPLETE), \
628 			usdma_complete_name(ERROR))
629 
630 TRACE_EVENT(hfi1_sdma_user_completion,
631 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
632 		     u8 state, int code),
633 	    TP_ARGS(dd, ctxt, subctxt, idx, state, code),
634 	    TP_STRUCT__entry(
635 	    DD_DEV_ENTRY(dd)
636 	    __field(u16, ctxt)
637 	    __field(u8, subctxt)
638 	    __field(u16, idx)
639 	    __field(u8, state)
640 	    __field(int, code)
641 	    ),
642 	    TP_fast_assign(
643 	    DD_DEV_ASSIGN(dd);
644 	    __entry->ctxt = ctxt;
645 	    __entry->subctxt = subctxt;
646 	    __entry->idx = idx;
647 	    __entry->state = state;
648 	    __entry->code = code;
649 	    ),
650 	    TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
651 		      __get_str(dev), __entry->ctxt, __entry->subctxt,
652 		      __entry->idx, show_usdma_complete_state(__entry->state),
653 		      __entry->code)
654 );
655 
656 const char *print_u32_array(struct trace_seq *, u32 *, int);
657 #define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
658 
659 TRACE_EVENT(hfi1_sdma_user_header_ahg,
660 	    TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
661 		     u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
662 	    TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
663 	    TP_STRUCT__entry(
664 	    DD_DEV_ENTRY(dd)
665 	    __field(u16, ctxt)
666 	    __field(u8, subctxt)
667 	    __field(u16, req)
668 	    __field(u8, sde)
669 	    __field(u8, idx)
670 	    __field(int, len)
671 	    __field(u32, tidval)
672 	    __array(u32, ahg, 10)
673 	    ),
674 	    TP_fast_assign(
675 	    DD_DEV_ASSIGN(dd);
676 	    __entry->ctxt = ctxt;
677 	    __entry->subctxt = subctxt;
678 	    __entry->req = req;
679 	    __entry->sde = sde;
680 	    __entry->idx = ahgidx;
681 	    __entry->len = len;
682 	    __entry->tidval = tidval;
683 	    memcpy(__entry->ahg, ahg, len * sizeof(u32));
684 	    ),
685 	    TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
686 		      __get_str(dev),
687 		      __entry->ctxt,
688 		      __entry->subctxt,
689 		      __entry->req,
690 		      __entry->sde,
691 		      __entry->idx,
692 		      __entry->len - 1,
693 		      __print_u32_hex(__entry->ahg, __entry->len),
694 		      __entry->tidval
695 		      )
696 );
697 
698 TRACE_EVENT(hfi1_sdma_state,
699 	    TP_PROTO(struct sdma_engine *sde,
700 		     const char *cstate,
701 		     const char *nstate
702 		     ),
703 	    TP_ARGS(sde, cstate, nstate),
704 	    TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
705 		__string(curstate, cstate)
706 		__string(newstate, nstate)
707 	    ),
708 	    TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
709 		__assign_str(curstate, cstate);
710 		__assign_str(newstate, nstate);
711 	    ),
712 	    TP_printk("[%s] current state %s new state %s",
713 		      __get_str(dev),
714 		      __get_str(curstate),
715 		      __get_str(newstate)
716 	    )
717 );
718 
719 #define BCT_FORMAT \
720 	"shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
721 
722 #define BCT(field) \
723 	be16_to_cpu( \
724 	((struct buffer_control *)__get_dynamic_array(bct))->field \
725 	)
726 
727 DECLARE_EVENT_CLASS(hfi1_bct_template,
728 		    TP_PROTO(struct hfi1_devdata *dd,
729 			     struct buffer_control *bc),
730 		    TP_ARGS(dd, bc),
731 		    TP_STRUCT__entry(DD_DEV_ENTRY(dd)
732 		    __dynamic_array(u8, bct, sizeof(*bc))
733 		    ),
734 		    TP_fast_assign(DD_DEV_ASSIGN(dd);
735 				   memcpy(__get_dynamic_array(bct), bc,
736 					  sizeof(*bc));
737 		    ),
738 		    TP_printk(BCT_FORMAT,
739 			      BCT(overall_shared_limit),
740 
741 			      BCT(vl[0].dedicated),
742 			      BCT(vl[0].shared),
743 
744 			      BCT(vl[1].dedicated),
745 			      BCT(vl[1].shared),
746 
747 			      BCT(vl[2].dedicated),
748 			      BCT(vl[2].shared),
749 
750 			      BCT(vl[3].dedicated),
751 			      BCT(vl[3].shared),
752 
753 			      BCT(vl[4].dedicated),
754 			      BCT(vl[4].shared),
755 
756 			      BCT(vl[5].dedicated),
757 			      BCT(vl[5].shared),
758 
759 			      BCT(vl[6].dedicated),
760 			      BCT(vl[6].shared),
761 
762 			      BCT(vl[7].dedicated),
763 			      BCT(vl[7].shared),
764 
765 			      BCT(vl[15].dedicated),
766 			      BCT(vl[15].shared)
767 		    )
768 );
769 
770 DEFINE_EVENT(hfi1_bct_template, bct_set,
771 	     TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
772 	     TP_ARGS(dd, bc));
773 
774 DEFINE_EVENT(hfi1_bct_template, bct_get,
775 	     TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
776 	     TP_ARGS(dd, bc));
777 
778 TRACE_EVENT(
779 	hfi1_qp_send_completion,
780 	TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
781 	TP_ARGS(qp, wqe, idx),
782 	TP_STRUCT__entry(
783 		DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
784 		__field(struct rvt_swqe *, wqe)
785 		__field(u64, wr_id)
786 		__field(u32, qpn)
787 		__field(u32, qpt)
788 		__field(u32, length)
789 		__field(u32, idx)
790 		__field(u32, ssn)
791 		__field(enum ib_wr_opcode, opcode)
792 		__field(int, send_flags)
793 	),
794 	TP_fast_assign(
795 		DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
796 		__entry->wqe = wqe;
797 		__entry->wr_id = wqe->wr.wr_id;
798 		__entry->qpn = qp->ibqp.qp_num;
799 		__entry->qpt = qp->ibqp.qp_type;
800 		__entry->length = wqe->length;
801 		__entry->idx = idx;
802 		__entry->ssn = wqe->ssn;
803 		__entry->opcode = wqe->wr.opcode;
804 		__entry->send_flags = wqe->wr.send_flags;
805 	),
806 	TP_printk(
807 		"[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
808 		__get_str(dev),
809 		__entry->qpn,
810 		__entry->qpt,
811 		__entry->wqe,
812 		__entry->idx,
813 		__entry->wr_id,
814 		__entry->length,
815 		__entry->ssn,
816 		__entry->opcode,
817 		__entry->send_flags
818 	)
819 );
820 
821 DECLARE_EVENT_CLASS(
822 	hfi1_do_send_template,
823 	TP_PROTO(struct rvt_qp *qp, bool flag),
824 	TP_ARGS(qp, flag),
825 	TP_STRUCT__entry(
826 		DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
827 		__field(u32, qpn)
828 		__field(bool, flag)
829 	),
830 	TP_fast_assign(
831 		DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
832 		__entry->qpn = qp->ibqp.qp_num;
833 		__entry->flag = flag;
834 	),
835 	TP_printk(
836 		"[%s] qpn %x flag %d",
837 		__get_str(dev),
838 		__entry->qpn,
839 		__entry->flag
840 	)
841 );
842 
843 DEFINE_EVENT(
844 	hfi1_do_send_template, hfi1_rc_do_send,
845 	TP_PROTO(struct rvt_qp *qp, bool flag),
846 	TP_ARGS(qp, flag)
847 );
848 
849 DEFINE_EVENT(/* event */
850 	hfi1_do_send_template, hfi1_rc_do_tid_send,
851 	TP_PROTO(struct rvt_qp *qp, bool flag),
852 	TP_ARGS(qp, flag)
853 );
854 
855 DEFINE_EVENT(
856 	hfi1_do_send_template, hfi1_rc_expired_time_slice,
857 	TP_PROTO(struct rvt_qp *qp, bool flag),
858 	TP_ARGS(qp, flag)
859 );
860 
861 #endif /* __HFI1_TRACE_TX_H */
862 
863 #undef TRACE_INCLUDE_PATH
864 #undef TRACE_INCLUDE_FILE
865 #define TRACE_INCLUDE_PATH .
866 #define TRACE_INCLUDE_FILE trace_tx
867 #include <trace/define_trace.h>
868