1 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ 2 /* 3 * Copyright(c) 2018 Intel Corporation. 4 * 5 */ 6 #if !defined(__HFI1_TRACE_TID_H) || defined(TRACE_HEADER_MULTI_READ) 7 #define __HFI1_TRACE_TID_H 8 9 #include <linux/tracepoint.h> 10 #include <linux/trace_seq.h> 11 12 #include "hfi.h" 13 14 #define tidtype_name(type) { PT_##type, #type } 15 #define show_tidtype(type) \ 16 __print_symbolic(type, \ 17 tidtype_name(EXPECTED), \ 18 tidtype_name(EAGER), \ 19 tidtype_name(INVALID)) \ 20 21 #undef TRACE_SYSTEM 22 #define TRACE_SYSTEM hfi1_tid 23 24 u8 hfi1_trace_get_tid_ctrl(u32 ent); 25 u16 hfi1_trace_get_tid_len(u32 ent); 26 u16 hfi1_trace_get_tid_idx(u32 ent); 27 28 #define OPFN_PARAM_PRN "[%s] qpn 0x%x %s OPFN: qp 0x%x, max read %u, " \ 29 "max write %u, max length %u, jkey 0x%x timeout %u " \ 30 "urg %u" 31 32 #define TID_FLOW_PRN "[%s] qpn 0x%x flow %d: idx %d resp_ib_psn 0x%x " \ 33 "generation 0x%x fpsn 0x%x-%x r_next_psn 0x%x " \ 34 "ib_psn 0x%x-%x npagesets %u tnode_cnt %u " \ 35 "tidcnt %u tid_idx %u tid_offset %u length %u sent %u" 36 37 #define TID_NODE_PRN "[%s] qpn 0x%x %s idx %u grp base 0x%x map 0x%x " \ 38 "used %u cnt %u" 39 40 #define RSP_INFO_PRN "[%s] qpn 0x%x state 0x%x s_state 0x%x psn 0x%x " \ 41 "r_psn 0x%x r_state 0x%x r_flags 0x%x " \ 42 "r_head_ack_queue %u s_tail_ack_queue %u " \ 43 "s_acked_ack_queue %u s_ack_state 0x%x " \ 44 "s_nak_state 0x%x s_flags 0x%x ps_flags 0x%x " \ 45 "iow_flags 0x%lx" 46 47 #define SENDER_INFO_PRN "[%s] qpn 0x%x state 0x%x s_cur %u s_tail %u " \ 48 "s_head %u s_acked %u s_last %u s_psn 0x%x " \ 49 "s_last_psn 0x%x s_flags 0x%x ps_flags 0x%x " \ 50 "iow_flags 0x%lx s_state 0x%x s_num_rd %u s_retry %u" 51 52 #define TID_READ_SENDER_PRN "[%s] qpn 0x%x newreq %u tid_r_reqs %u " \ 53 "tid_r_comp %u pending_tid_r_segs %u " \ 54 "s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx " \ 55 "s_state 0x%x hw_flow_index %u generation 0x%x " \ 56 "fpsn 0x%x" 57 58 #define TID_REQ_PRN "[%s] qpn 0x%x newreq %u opcode 0x%x psn 0x%x lpsn 0x%x " \ 59 "cur_seg %u comp_seg %u ack_seg %u alloc_seg %u " \ 60 "total_segs %u setup_head %u clear_tail %u flow_idx %u " \ 61 "acked_tail %u state %u r_ack_psn 0x%x r_flow_psn 0x%x " \ 62 "r_last_ackd 0x%x s_next_psn 0x%x" 63 64 #define RCV_ERR_PRN "[%s] qpn 0x%x s_flags 0x%x state 0x%x " \ 65 "s_acked_ack_queue %u s_tail_ack_queue %u " \ 66 "r_head_ack_queue %u opcode 0x%x psn 0x%x r_psn 0x%x " \ 67 " diff %d" 68 69 #define TID_WRITE_RSPDR_PRN "[%s] qpn 0x%x r_tid_head %u r_tid_tail %u " \ 70 "r_tid_ack %u r_tid_alloc %u alloc_w_segs %u " \ 71 "pending_tid_w_segs %u sync_pt %s " \ 72 "ps_nak_psn 0x%x ps_nak_state 0x%x " \ 73 "prnr_nak_state 0x%x hw_flow_index %u generation "\ 74 "0x%x fpsn 0x%x resync %s" \ 75 "r_next_psn_kdeth 0x%x" 76 77 #define TID_WRITE_SENDER_PRN "[%s] qpn 0x%x newreq %u s_tid_cur %u " \ 78 "s_tid_tail %u s_tid_head %u " \ 79 "pending_tid_w_resp %u n_requests %u " \ 80 "n_tid_requests %u s_flags 0x%x ps_flags 0x%x "\ 81 "iow_flags 0x%lx s_state 0x%x s_retry %u" 82 83 #define KDETH_EFLAGS_ERR_PRN "[%s] qpn 0x%x TID ERR: RcvType 0x%x " \ 84 "RcvTypeError 0x%x PSN 0x%x" 85 86 DECLARE_EVENT_CLASS(/* class */ 87 hfi1_exp_tid_reg_unreg, 88 TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, 89 unsigned long va, unsigned long pa, dma_addr_t dma), 90 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma), 91 TP_STRUCT__entry(/* entry */ 92 __field(unsigned int, ctxt) 93 __field(u16, subctxt) 94 __field(u32, rarr) 95 __field(u32, npages) 96 __field(unsigned long, va) 97 __field(unsigned long, pa) 98 __field(dma_addr_t, dma) 99 ), 100 TP_fast_assign(/* assign */ 101 __entry->ctxt = ctxt; 102 __entry->subctxt = subctxt; 103 __entry->rarr = rarr; 104 __entry->npages = npages; 105 __entry->va = va; 106 __entry->pa = pa; 107 __entry->dma = dma; 108 ), 109 TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx", 110 __entry->ctxt, 111 __entry->subctxt, 112 __entry->rarr, 113 __entry->npages, 114 __entry->pa, 115 __entry->va, 116 __entry->dma 117 ) 118 ); 119 120 DEFINE_EVENT(/* exp_tid_unreg */ 121 hfi1_exp_tid_reg_unreg, hfi1_exp_tid_unreg, 122 TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, 123 unsigned long va, unsigned long pa, dma_addr_t dma), 124 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma) 125 ); 126 127 DEFINE_EVENT(/* exp_tid_reg */ 128 hfi1_exp_tid_reg_unreg, hfi1_exp_tid_reg, 129 TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, 130 unsigned long va, unsigned long pa, dma_addr_t dma), 131 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma) 132 ); 133 134 TRACE_EVENT(/* put_tid */ 135 hfi1_put_tid, 136 TP_PROTO(struct hfi1_devdata *dd, 137 u32 index, u32 type, unsigned long pa, u16 order), 138 TP_ARGS(dd, index, type, pa, order), 139 TP_STRUCT__entry(/* entry */ 140 DD_DEV_ENTRY(dd) 141 __field(unsigned long, pa); 142 __field(u32, index); 143 __field(u32, type); 144 __field(u16, order); 145 ), 146 TP_fast_assign(/* assign */ 147 DD_DEV_ASSIGN(dd); 148 __entry->pa = pa; 149 __entry->index = index; 150 __entry->type = type; 151 __entry->order = order; 152 ), 153 TP_printk("[%s] type %s pa %lx index %u order %u", 154 __get_str(dev), 155 show_tidtype(__entry->type), 156 __entry->pa, 157 __entry->index, 158 __entry->order 159 ) 160 ); 161 162 TRACE_EVENT(/* exp_tid_inval */ 163 hfi1_exp_tid_inval, 164 TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr, 165 u32 npages, dma_addr_t dma), 166 TP_ARGS(ctxt, subctxt, va, rarr, npages, dma), 167 TP_STRUCT__entry(/* entry */ 168 __field(unsigned int, ctxt) 169 __field(u16, subctxt) 170 __field(unsigned long, va) 171 __field(u32, rarr) 172 __field(u32, npages) 173 __field(dma_addr_t, dma) 174 ), 175 TP_fast_assign(/* assign */ 176 __entry->ctxt = ctxt; 177 __entry->subctxt = subctxt; 178 __entry->va = va; 179 __entry->rarr = rarr; 180 __entry->npages = npages; 181 __entry->dma = dma; 182 ), 183 TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx", 184 __entry->ctxt, 185 __entry->subctxt, 186 __entry->rarr, 187 __entry->npages, 188 __entry->va, 189 __entry->dma 190 ) 191 ); 192 193 DECLARE_EVENT_CLASS(/* opfn_state */ 194 hfi1_opfn_state_template, 195 TP_PROTO(struct rvt_qp *qp), 196 TP_ARGS(qp), 197 TP_STRUCT__entry(/* entry */ 198 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 199 __field(u32, qpn) 200 __field(u16, requested) 201 __field(u16, completed) 202 __field(u8, curr) 203 ), 204 TP_fast_assign(/* assign */ 205 struct hfi1_qp_priv *priv = qp->priv; 206 207 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 208 __entry->qpn = qp->ibqp.qp_num; 209 __entry->requested = priv->opfn.requested; 210 __entry->completed = priv->opfn.completed; 211 __entry->curr = priv->opfn.curr; 212 ), 213 TP_printk(/* print */ 214 "[%s] qpn 0x%x requested 0x%x completed 0x%x curr 0x%x", 215 __get_str(dev), 216 __entry->qpn, 217 __entry->requested, 218 __entry->completed, 219 __entry->curr 220 ) 221 ); 222 223 DEFINE_EVENT(/* event */ 224 hfi1_opfn_state_template, hfi1_opfn_state_conn_request, 225 TP_PROTO(struct rvt_qp *qp), 226 TP_ARGS(qp) 227 ); 228 229 DEFINE_EVENT(/* event */ 230 hfi1_opfn_state_template, hfi1_opfn_state_sched_conn_request, 231 TP_PROTO(struct rvt_qp *qp), 232 TP_ARGS(qp) 233 ); 234 235 DEFINE_EVENT(/* event */ 236 hfi1_opfn_state_template, hfi1_opfn_state_conn_response, 237 TP_PROTO(struct rvt_qp *qp), 238 TP_ARGS(qp) 239 ); 240 241 DEFINE_EVENT(/* event */ 242 hfi1_opfn_state_template, hfi1_opfn_state_conn_reply, 243 TP_PROTO(struct rvt_qp *qp), 244 TP_ARGS(qp) 245 ); 246 247 DEFINE_EVENT(/* event */ 248 hfi1_opfn_state_template, hfi1_opfn_state_conn_error, 249 TP_PROTO(struct rvt_qp *qp), 250 TP_ARGS(qp) 251 ); 252 253 DECLARE_EVENT_CLASS(/* opfn_data */ 254 hfi1_opfn_data_template, 255 TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data), 256 TP_ARGS(qp, capcode, data), 257 TP_STRUCT__entry(/* entry */ 258 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 259 __field(u32, qpn) 260 __field(u32, state) 261 __field(u8, capcode) 262 __field(u64, data) 263 ), 264 TP_fast_assign(/* assign */ 265 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 266 __entry->qpn = qp->ibqp.qp_num; 267 __entry->state = qp->state; 268 __entry->capcode = capcode; 269 __entry->data = data; 270 ), 271 TP_printk(/* printk */ 272 "[%s] qpn 0x%x (state 0x%x) Capcode %u data 0x%llx", 273 __get_str(dev), 274 __entry->qpn, 275 __entry->state, 276 __entry->capcode, 277 __entry->data 278 ) 279 ); 280 281 DEFINE_EVENT(/* event */ 282 hfi1_opfn_data_template, hfi1_opfn_data_conn_request, 283 TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data), 284 TP_ARGS(qp, capcode, data) 285 ); 286 287 DEFINE_EVENT(/* event */ 288 hfi1_opfn_data_template, hfi1_opfn_data_conn_response, 289 TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data), 290 TP_ARGS(qp, capcode, data) 291 ); 292 293 DEFINE_EVENT(/* event */ 294 hfi1_opfn_data_template, hfi1_opfn_data_conn_reply, 295 TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data), 296 TP_ARGS(qp, capcode, data) 297 ); 298 299 DECLARE_EVENT_CLASS(/* opfn_param */ 300 hfi1_opfn_param_template, 301 TP_PROTO(struct rvt_qp *qp, char remote, 302 struct tid_rdma_params *param), 303 TP_ARGS(qp, remote, param), 304 TP_STRUCT__entry(/* entry */ 305 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 306 __field(u32, qpn) 307 __field(char, remote) 308 __field(u32, param_qp) 309 __field(u32, max_len) 310 __field(u16, jkey) 311 __field(u8, max_read) 312 __field(u8, max_write) 313 __field(u8, timeout) 314 __field(u8, urg) 315 ), 316 TP_fast_assign(/* assign */ 317 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 318 __entry->qpn = qp->ibqp.qp_num; 319 __entry->remote = remote; 320 __entry->param_qp = param->qp; 321 __entry->max_len = param->max_len; 322 __entry->jkey = param->jkey; 323 __entry->max_read = param->max_read; 324 __entry->max_write = param->max_write; 325 __entry->timeout = param->timeout; 326 __entry->urg = param->urg; 327 ), 328 TP_printk(/* print */ 329 OPFN_PARAM_PRN, 330 __get_str(dev), 331 __entry->qpn, 332 __entry->remote ? "remote" : "local", 333 __entry->param_qp, 334 __entry->max_read, 335 __entry->max_write, 336 __entry->max_len, 337 __entry->jkey, 338 __entry->timeout, 339 __entry->urg 340 ) 341 ); 342 343 DEFINE_EVENT(/* event */ 344 hfi1_opfn_param_template, hfi1_opfn_param, 345 TP_PROTO(struct rvt_qp *qp, char remote, 346 struct tid_rdma_params *param), 347 TP_ARGS(qp, remote, param) 348 ); 349 350 DECLARE_EVENT_CLASS(/* msg */ 351 hfi1_msg_template, 352 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), 353 TP_ARGS(qp, msg, more), 354 TP_STRUCT__entry(/* entry */ 355 __field(u32, qpn) 356 __string(msg, msg) 357 __field(u64, more) 358 ), 359 TP_fast_assign(/* assign */ 360 __entry->qpn = qp ? qp->ibqp.qp_num : 0; 361 __assign_str(msg, msg); 362 __entry->more = more; 363 ), 364 TP_printk(/* print */ 365 "qpn 0x%x %s 0x%llx", 366 __entry->qpn, 367 __get_str(msg), 368 __entry->more 369 ) 370 ); 371 372 DEFINE_EVENT(/* event */ 373 hfi1_msg_template, hfi1_msg_opfn_conn_request, 374 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), 375 TP_ARGS(qp, msg, more) 376 ); 377 378 DEFINE_EVENT(/* event */ 379 hfi1_msg_template, hfi1_msg_opfn_conn_error, 380 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), 381 TP_ARGS(qp, msg, more) 382 ); 383 384 DEFINE_EVENT(/* event */ 385 hfi1_msg_template, hfi1_msg_alloc_tids, 386 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), 387 TP_ARGS(qp, msg, more) 388 ); 389 390 DEFINE_EVENT(/* event */ 391 hfi1_msg_template, hfi1_msg_tid_restart_req, 392 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), 393 TP_ARGS(qp, msg, more) 394 ); 395 396 DEFINE_EVENT(/* event */ 397 hfi1_msg_template, hfi1_msg_handle_kdeth_eflags, 398 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), 399 TP_ARGS(qp, msg, more) 400 ); 401 402 DEFINE_EVENT(/* event */ 403 hfi1_msg_template, hfi1_msg_tid_timeout, 404 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), 405 TP_ARGS(qp, msg, more) 406 ); 407 408 DEFINE_EVENT(/* event */ 409 hfi1_msg_template, hfi1_msg_tid_retry_timeout, 410 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), 411 TP_ARGS(qp, msg, more) 412 ); 413 414 DECLARE_EVENT_CLASS(/* tid_flow_page */ 415 hfi1_tid_flow_page_template, 416 TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index, 417 char mtu8k, char v1, void *vaddr), 418 TP_ARGS(qp, flow, index, mtu8k, v1, vaddr), 419 TP_STRUCT__entry(/* entry */ 420 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 421 __field(u32, qpn) 422 __field(char, mtu8k) 423 __field(char, v1) 424 __field(u32, index) 425 __field(u64, page) 426 __field(u64, vaddr) 427 ), 428 TP_fast_assign(/* assign */ 429 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 430 __entry->qpn = qp->ibqp.qp_num; 431 __entry->mtu8k = mtu8k; 432 __entry->v1 = v1; 433 __entry->index = index; 434 __entry->page = vaddr ? (u64)virt_to_page(vaddr) : 0ULL; 435 __entry->vaddr = (u64)vaddr; 436 ), 437 TP_printk(/* print */ 438 "[%s] qpn 0x%x page[%u]: page 0x%llx %s 0x%llx", 439 __get_str(dev), 440 __entry->qpn, 441 __entry->index, 442 __entry->page, 443 __entry->mtu8k ? (__entry->v1 ? "v1" : "v0") : "vaddr", 444 __entry->vaddr 445 ) 446 ); 447 448 DEFINE_EVENT(/* event */ 449 hfi1_tid_flow_page_template, hfi1_tid_flow_page, 450 TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index, 451 char mtu8k, char v1, void *vaddr), 452 TP_ARGS(qp, flow, index, mtu8k, v1, vaddr) 453 ); 454 455 DECLARE_EVENT_CLASS(/* tid_pageset */ 456 hfi1_tid_pageset_template, 457 TP_PROTO(struct rvt_qp *qp, u32 index, u16 idx, u16 count), 458 TP_ARGS(qp, index, idx, count), 459 TP_STRUCT__entry(/* entry */ 460 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 461 __field(u32, qpn) 462 __field(u32, index) 463 __field(u16, idx) 464 __field(u16, count) 465 ), 466 TP_fast_assign(/* assign */ 467 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 468 __entry->qpn = qp->ibqp.qp_num; 469 __entry->index = index; 470 __entry->idx = idx; 471 __entry->count = count; 472 ), 473 TP_printk(/* print */ 474 "[%s] qpn 0x%x list[%u]: idx %u count %u", 475 __get_str(dev), 476 __entry->qpn, 477 __entry->index, 478 __entry->idx, 479 __entry->count 480 ) 481 ); 482 483 DEFINE_EVENT(/* event */ 484 hfi1_tid_pageset_template, hfi1_tid_pageset, 485 TP_PROTO(struct rvt_qp *qp, u32 index, u16 idx, u16 count), 486 TP_ARGS(qp, index, idx, count) 487 ); 488 489 DECLARE_EVENT_CLASS(/* tid_fow */ 490 hfi1_tid_flow_template, 491 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), 492 TP_ARGS(qp, index, flow), 493 TP_STRUCT__entry(/* entry */ 494 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 495 __field(u32, qpn) 496 __field(int, index) 497 __field(int, idx) 498 __field(u32, resp_ib_psn) 499 __field(u32, generation) 500 __field(u32, fspsn) 501 __field(u32, flpsn) 502 __field(u32, r_next_psn) 503 __field(u32, ib_spsn) 504 __field(u32, ib_lpsn) 505 __field(u32, npagesets) 506 __field(u32, tnode_cnt) 507 __field(u32, tidcnt) 508 __field(u32, tid_idx) 509 __field(u32, tid_offset) 510 __field(u32, length) 511 __field(u32, sent) 512 ), 513 TP_fast_assign(/* assign */ 514 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 515 __entry->qpn = qp->ibqp.qp_num; 516 __entry->index = index; 517 __entry->idx = flow->idx; 518 __entry->resp_ib_psn = flow->flow_state.resp_ib_psn; 519 __entry->generation = flow->flow_state.generation; 520 __entry->fspsn = full_flow_psn(flow, 521 flow->flow_state.spsn); 522 __entry->flpsn = full_flow_psn(flow, 523 flow->flow_state.lpsn); 524 __entry->r_next_psn = flow->flow_state.r_next_psn; 525 __entry->ib_spsn = flow->flow_state.ib_spsn; 526 __entry->ib_lpsn = flow->flow_state.ib_lpsn; 527 __entry->npagesets = flow->npagesets; 528 __entry->tnode_cnt = flow->tnode_cnt; 529 __entry->tidcnt = flow->tidcnt; 530 __entry->tid_idx = flow->tid_idx; 531 __entry->tid_offset = flow->tid_offset; 532 __entry->length = flow->length; 533 __entry->sent = flow->sent; 534 ), 535 TP_printk(/* print */ 536 TID_FLOW_PRN, 537 __get_str(dev), 538 __entry->qpn, 539 __entry->index, 540 __entry->idx, 541 __entry->resp_ib_psn, 542 __entry->generation, 543 __entry->fspsn, 544 __entry->flpsn, 545 __entry->r_next_psn, 546 __entry->ib_spsn, 547 __entry->ib_lpsn, 548 __entry->npagesets, 549 __entry->tnode_cnt, 550 __entry->tidcnt, 551 __entry->tid_idx, 552 __entry->tid_offset, 553 __entry->length, 554 __entry->sent 555 ) 556 ); 557 558 DEFINE_EVENT(/* event */ 559 hfi1_tid_flow_template, hfi1_tid_flow_alloc, 560 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), 561 TP_ARGS(qp, index, flow) 562 ); 563 564 DEFINE_EVENT(/* event */ 565 hfi1_tid_flow_template, hfi1_tid_flow_build_read_pkt, 566 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), 567 TP_ARGS(qp, index, flow) 568 ); 569 570 DEFINE_EVENT(/* event */ 571 hfi1_tid_flow_template, hfi1_tid_flow_build_read_resp, 572 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), 573 TP_ARGS(qp, index, flow) 574 ); 575 576 DEFINE_EVENT(/* event */ 577 hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_req, 578 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), 579 TP_ARGS(qp, index, flow) 580 ); 581 582 DEFINE_EVENT(/* event */ 583 hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_resp, 584 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), 585 TP_ARGS(qp, index, flow) 586 ); 587 588 DEFINE_EVENT(/* event */ 589 hfi1_tid_flow_template, hfi1_tid_flow_restart_req, 590 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), 591 TP_ARGS(qp, index, flow) 592 ); 593 594 DEFINE_EVENT(/* event */ 595 hfi1_tid_flow_template, hfi1_tid_flow_build_write_resp, 596 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), 597 TP_ARGS(qp, index, flow) 598 ); 599 600 DEFINE_EVENT(/* event */ 601 hfi1_tid_flow_template, hfi1_tid_flow_rcv_write_resp, 602 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), 603 TP_ARGS(qp, index, flow) 604 ); 605 606 DEFINE_EVENT(/* event */ 607 hfi1_tid_flow_template, hfi1_tid_flow_build_write_data, 608 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), 609 TP_ARGS(qp, index, flow) 610 ); 611 612 DEFINE_EVENT(/* event */ 613 hfi1_tid_flow_template, hfi1_tid_flow_rcv_tid_ack, 614 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), 615 TP_ARGS(qp, index, flow) 616 ); 617 618 DEFINE_EVENT(/* event */ 619 hfi1_tid_flow_template, hfi1_tid_flow_rcv_resync, 620 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), 621 TP_ARGS(qp, index, flow) 622 ); 623 624 DEFINE_EVENT(/* event */ 625 hfi1_tid_flow_template, hfi1_tid_flow_handle_kdeth_eflags, 626 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), 627 TP_ARGS(qp, index, flow) 628 ); 629 630 DECLARE_EVENT_CLASS(/* tid_node */ 631 hfi1_tid_node_template, 632 TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base, 633 u8 map, u8 used, u8 cnt), 634 TP_ARGS(qp, msg, index, base, map, used, cnt), 635 TP_STRUCT__entry(/* entry */ 636 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 637 __field(u32, qpn) 638 __string(msg, msg) 639 __field(u32, index) 640 __field(u32, base) 641 __field(u8, map) 642 __field(u8, used) 643 __field(u8, cnt) 644 ), 645 TP_fast_assign(/* assign */ 646 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 647 __entry->qpn = qp->ibqp.qp_num; 648 __assign_str(msg, msg); 649 __entry->index = index; 650 __entry->base = base; 651 __entry->map = map; 652 __entry->used = used; 653 __entry->cnt = cnt; 654 ), 655 TP_printk(/* print */ 656 TID_NODE_PRN, 657 __get_str(dev), 658 __entry->qpn, 659 __get_str(msg), 660 __entry->index, 661 __entry->base, 662 __entry->map, 663 __entry->used, 664 __entry->cnt 665 ) 666 ); 667 668 DEFINE_EVENT(/* event */ 669 hfi1_tid_node_template, hfi1_tid_node_add, 670 TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base, 671 u8 map, u8 used, u8 cnt), 672 TP_ARGS(qp, msg, index, base, map, used, cnt) 673 ); 674 675 DECLARE_EVENT_CLASS(/* tid_entry */ 676 hfi1_tid_entry_template, 677 TP_PROTO(struct rvt_qp *qp, int index, u32 ent), 678 TP_ARGS(qp, index, ent), 679 TP_STRUCT__entry(/* entry */ 680 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 681 __field(u32, qpn) 682 __field(int, index) 683 __field(u8, ctrl) 684 __field(u16, idx) 685 __field(u16, len) 686 ), 687 TP_fast_assign(/* assign */ 688 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 689 __entry->qpn = qp->ibqp.qp_num; 690 __entry->index = index; 691 __entry->ctrl = hfi1_trace_get_tid_ctrl(ent); 692 __entry->idx = hfi1_trace_get_tid_idx(ent); 693 __entry->len = hfi1_trace_get_tid_len(ent); 694 ), 695 TP_printk(/* print */ 696 "[%s] qpn 0x%x TID entry %d: idx %u len %u ctrl 0x%x", 697 __get_str(dev), 698 __entry->qpn, 699 __entry->index, 700 __entry->idx, 701 __entry->len, 702 __entry->ctrl 703 ) 704 ); 705 706 DEFINE_EVENT(/* event */ 707 hfi1_tid_entry_template, hfi1_tid_entry_alloc, 708 TP_PROTO(struct rvt_qp *qp, int index, u32 entry), 709 TP_ARGS(qp, index, entry) 710 ); 711 712 DEFINE_EVENT(/* event */ 713 hfi1_tid_entry_template, hfi1_tid_entry_build_read_resp, 714 TP_PROTO(struct rvt_qp *qp, int index, u32 ent), 715 TP_ARGS(qp, index, ent) 716 ); 717 718 DEFINE_EVENT(/* event */ 719 hfi1_tid_entry_template, hfi1_tid_entry_rcv_read_req, 720 TP_PROTO(struct rvt_qp *qp, int index, u32 ent), 721 TP_ARGS(qp, index, ent) 722 ); 723 724 DEFINE_EVENT(/* event */ 725 hfi1_tid_entry_template, hfi1_tid_entry_rcv_write_resp, 726 TP_PROTO(struct rvt_qp *qp, int index, u32 entry), 727 TP_ARGS(qp, index, entry) 728 ); 729 730 DEFINE_EVENT(/* event */ 731 hfi1_tid_entry_template, hfi1_tid_entry_build_write_data, 732 TP_PROTO(struct rvt_qp *qp, int index, u32 entry), 733 TP_ARGS(qp, index, entry) 734 ); 735 736 DECLARE_EVENT_CLASS(/* rsp_info */ 737 hfi1_responder_info_template, 738 TP_PROTO(struct rvt_qp *qp, u32 psn), 739 TP_ARGS(qp, psn), 740 TP_STRUCT__entry(/* entry */ 741 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 742 __field(u32, qpn) 743 __field(u8, state) 744 __field(u8, s_state) 745 __field(u32, psn) 746 __field(u32, r_psn) 747 __field(u8, r_state) 748 __field(u8, r_flags) 749 __field(u8, r_head_ack_queue) 750 __field(u8, s_tail_ack_queue) 751 __field(u8, s_acked_ack_queue) 752 __field(u8, s_ack_state) 753 __field(u8, s_nak_state) 754 __field(u8, r_nak_state) 755 __field(u32, s_flags) 756 __field(u32, ps_flags) 757 __field(unsigned long, iow_flags) 758 ), 759 TP_fast_assign(/* assign */ 760 struct hfi1_qp_priv *priv = qp->priv; 761 762 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 763 __entry->qpn = qp->ibqp.qp_num; 764 __entry->state = qp->state; 765 __entry->s_state = qp->s_state; 766 __entry->psn = psn; 767 __entry->r_psn = qp->r_psn; 768 __entry->r_state = qp->r_state; 769 __entry->r_flags = qp->r_flags; 770 __entry->r_head_ack_queue = qp->r_head_ack_queue; 771 __entry->s_tail_ack_queue = qp->s_tail_ack_queue; 772 __entry->s_acked_ack_queue = qp->s_acked_ack_queue; 773 __entry->s_ack_state = qp->s_ack_state; 774 __entry->s_nak_state = qp->s_nak_state; 775 __entry->s_flags = qp->s_flags; 776 __entry->ps_flags = priv->s_flags; 777 __entry->iow_flags = priv->s_iowait.flags; 778 ), 779 TP_printk(/* print */ 780 RSP_INFO_PRN, 781 __get_str(dev), 782 __entry->qpn, 783 __entry->state, 784 __entry->s_state, 785 __entry->psn, 786 __entry->r_psn, 787 __entry->r_state, 788 __entry->r_flags, 789 __entry->r_head_ack_queue, 790 __entry->s_tail_ack_queue, 791 __entry->s_acked_ack_queue, 792 __entry->s_ack_state, 793 __entry->s_nak_state, 794 __entry->s_flags, 795 __entry->ps_flags, 796 __entry->iow_flags 797 ) 798 ); 799 800 DEFINE_EVENT(/* event */ 801 hfi1_responder_info_template, hfi1_rsp_make_rc_ack, 802 TP_PROTO(struct rvt_qp *qp, u32 psn), 803 TP_ARGS(qp, psn) 804 ); 805 806 DEFINE_EVENT(/* event */ 807 hfi1_responder_info_template, hfi1_rsp_rcv_tid_read_req, 808 TP_PROTO(struct rvt_qp *qp, u32 psn), 809 TP_ARGS(qp, psn) 810 ); 811 812 DEFINE_EVENT(/* event */ 813 hfi1_responder_info_template, hfi1_rsp_tid_rcv_error, 814 TP_PROTO(struct rvt_qp *qp, u32 psn), 815 TP_ARGS(qp, psn) 816 ); 817 818 DEFINE_EVENT(/* event */ 819 hfi1_responder_info_template, hfi1_rsp_tid_write_alloc_res, 820 TP_PROTO(struct rvt_qp *qp, u32 psn), 821 TP_ARGS(qp, psn) 822 ); 823 824 DEFINE_EVENT(/* event */ 825 hfi1_responder_info_template, hfi1_rsp_rcv_tid_write_req, 826 TP_PROTO(struct rvt_qp *qp, u32 psn), 827 TP_ARGS(qp, psn) 828 ); 829 830 DEFINE_EVENT(/* event */ 831 hfi1_responder_info_template, hfi1_rsp_build_tid_write_resp, 832 TP_PROTO(struct rvt_qp *qp, u32 psn), 833 TP_ARGS(qp, psn) 834 ); 835 836 DEFINE_EVENT(/* event */ 837 hfi1_responder_info_template, hfi1_rsp_rcv_tid_write_data, 838 TP_PROTO(struct rvt_qp *qp, u32 psn), 839 TP_ARGS(qp, psn) 840 ); 841 842 DEFINE_EVENT(/* event */ 843 hfi1_responder_info_template, hfi1_rsp_make_tid_ack, 844 TP_PROTO(struct rvt_qp *qp, u32 psn), 845 TP_ARGS(qp, psn) 846 ); 847 848 DEFINE_EVENT(/* event */ 849 hfi1_responder_info_template, hfi1_rsp_handle_kdeth_eflags, 850 TP_PROTO(struct rvt_qp *qp, u32 psn), 851 TP_ARGS(qp, psn) 852 ); 853 854 DECLARE_EVENT_CLASS(/* sender_info */ 855 hfi1_sender_info_template, 856 TP_PROTO(struct rvt_qp *qp), 857 TP_ARGS(qp), 858 TP_STRUCT__entry(/* entry */ 859 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 860 __field(u32, qpn) 861 __field(u8, state) 862 __field(u32, s_cur) 863 __field(u32, s_tail) 864 __field(u32, s_head) 865 __field(u32, s_acked) 866 __field(u32, s_last) 867 __field(u32, s_psn) 868 __field(u32, s_last_psn) 869 __field(u32, s_flags) 870 __field(u32, ps_flags) 871 __field(unsigned long, iow_flags) 872 __field(u8, s_state) 873 __field(u8, s_num_rd) 874 __field(u8, s_retry) 875 ), 876 TP_fast_assign(/* assign */ 877 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)) 878 __entry->qpn = qp->ibqp.qp_num; 879 __entry->state = qp->state; 880 __entry->s_cur = qp->s_cur; 881 __entry->s_tail = qp->s_tail; 882 __entry->s_head = qp->s_head; 883 __entry->s_acked = qp->s_acked; 884 __entry->s_last = qp->s_last; 885 __entry->s_psn = qp->s_psn; 886 __entry->s_last_psn = qp->s_last_psn; 887 __entry->s_flags = qp->s_flags; 888 __entry->ps_flags = ((struct hfi1_qp_priv *)qp->priv)->s_flags; 889 __entry->iow_flags = 890 ((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags; 891 __entry->s_state = qp->s_state; 892 __entry->s_num_rd = qp->s_num_rd_atomic; 893 __entry->s_retry = qp->s_retry; 894 ), 895 TP_printk(/* print */ 896 SENDER_INFO_PRN, 897 __get_str(dev), 898 __entry->qpn, 899 __entry->state, 900 __entry->s_cur, 901 __entry->s_tail, 902 __entry->s_head, 903 __entry->s_acked, 904 __entry->s_last, 905 __entry->s_psn, 906 __entry->s_last_psn, 907 __entry->s_flags, 908 __entry->ps_flags, 909 __entry->iow_flags, 910 __entry->s_state, 911 __entry->s_num_rd, 912 __entry->s_retry 913 ) 914 ); 915 916 DEFINE_EVENT(/* event */ 917 hfi1_sender_info_template, hfi1_sender_make_rc_req, 918 TP_PROTO(struct rvt_qp *qp), 919 TP_ARGS(qp) 920 ); 921 922 DEFINE_EVENT(/* event */ 923 hfi1_sender_info_template, hfi1_sender_reset_psn, 924 TP_PROTO(struct rvt_qp *qp), 925 TP_ARGS(qp) 926 ); 927 928 DEFINE_EVENT(/* event */ 929 hfi1_sender_info_template, hfi1_sender_restart_rc, 930 TP_PROTO(struct rvt_qp *qp), 931 TP_ARGS(qp) 932 ); 933 934 DEFINE_EVENT(/* event */ 935 hfi1_sender_info_template, hfi1_sender_do_rc_ack, 936 TP_PROTO(struct rvt_qp *qp), 937 TP_ARGS(qp) 938 ); 939 940 DEFINE_EVENT(/* event */ 941 hfi1_sender_info_template, hfi1_sender_rcv_tid_read_resp, 942 TP_PROTO(struct rvt_qp *qp), 943 TP_ARGS(qp) 944 ); 945 946 DEFINE_EVENT(/* event */ 947 hfi1_sender_info_template, hfi1_sender_rcv_tid_ack, 948 TP_PROTO(struct rvt_qp *qp), 949 TP_ARGS(qp) 950 ); 951 952 DEFINE_EVENT(/* event */ 953 hfi1_sender_info_template, hfi1_sender_make_tid_pkt, 954 TP_PROTO(struct rvt_qp *qp), 955 TP_ARGS(qp) 956 ); 957 958 DECLARE_EVENT_CLASS(/* tid_read_sender */ 959 hfi1_tid_read_sender_template, 960 TP_PROTO(struct rvt_qp *qp, char newreq), 961 TP_ARGS(qp, newreq), 962 TP_STRUCT__entry(/* entry */ 963 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 964 __field(u32, qpn) 965 __field(char, newreq) 966 __field(u32, tid_r_reqs) 967 __field(u32, tid_r_comp) 968 __field(u32, pending_tid_r_segs) 969 __field(u32, s_flags) 970 __field(u32, ps_flags) 971 __field(unsigned long, iow_flags) 972 __field(u8, s_state) 973 __field(u32, hw_flow_index) 974 __field(u32, generation) 975 __field(u32, fpsn) 976 ), 977 TP_fast_assign(/* assign */ 978 struct hfi1_qp_priv *priv = qp->priv; 979 980 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 981 __entry->qpn = qp->ibqp.qp_num; 982 __entry->newreq = newreq; 983 __entry->tid_r_reqs = priv->tid_r_reqs; 984 __entry->tid_r_comp = priv->tid_r_comp; 985 __entry->pending_tid_r_segs = priv->pending_tid_r_segs; 986 __entry->s_flags = qp->s_flags; 987 __entry->ps_flags = priv->s_flags; 988 __entry->iow_flags = priv->s_iowait.flags; 989 __entry->s_state = priv->s_state; 990 __entry->hw_flow_index = priv->flow_state.index; 991 __entry->generation = priv->flow_state.generation; 992 __entry->fpsn = priv->flow_state.psn; 993 ), 994 TP_printk(/* print */ 995 TID_READ_SENDER_PRN, 996 __get_str(dev), 997 __entry->qpn, 998 __entry->newreq, 999 __entry->tid_r_reqs, 1000 __entry->tid_r_comp, 1001 __entry->pending_tid_r_segs, 1002 __entry->s_flags, 1003 __entry->ps_flags, 1004 __entry->iow_flags, 1005 __entry->s_state, 1006 __entry->hw_flow_index, 1007 __entry->generation, 1008 __entry->fpsn 1009 ) 1010 ); 1011 1012 DEFINE_EVENT(/* event */ 1013 hfi1_tid_read_sender_template, hfi1_tid_read_sender_make_req, 1014 TP_PROTO(struct rvt_qp *qp, char newreq), 1015 TP_ARGS(qp, newreq) 1016 ); 1017 1018 DECLARE_EVENT_CLASS(/* tid_rdma_request */ 1019 hfi1_tid_rdma_request_template, 1020 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1021 struct tid_rdma_request *req), 1022 TP_ARGS(qp, newreq, opcode, psn, lpsn, req), 1023 TP_STRUCT__entry(/* entry */ 1024 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 1025 __field(u32, qpn) 1026 __field(char, newreq) 1027 __field(u8, opcode) 1028 __field(u32, psn) 1029 __field(u32, lpsn) 1030 __field(u32, cur_seg) 1031 __field(u32, comp_seg) 1032 __field(u32, ack_seg) 1033 __field(u32, alloc_seg) 1034 __field(u32, total_segs) 1035 __field(u16, setup_head) 1036 __field(u16, clear_tail) 1037 __field(u16, flow_idx) 1038 __field(u16, acked_tail) 1039 __field(u32, state) 1040 __field(u32, r_ack_psn) 1041 __field(u32, r_flow_psn) 1042 __field(u32, r_last_acked) 1043 __field(u32, s_next_psn) 1044 ), 1045 TP_fast_assign(/* assign */ 1046 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 1047 __entry->qpn = qp->ibqp.qp_num; 1048 __entry->newreq = newreq; 1049 __entry->opcode = opcode; 1050 __entry->psn = psn; 1051 __entry->lpsn = lpsn; 1052 __entry->cur_seg = req->cur_seg; 1053 __entry->comp_seg = req->comp_seg; 1054 __entry->ack_seg = req->ack_seg; 1055 __entry->alloc_seg = req->alloc_seg; 1056 __entry->total_segs = req->total_segs; 1057 __entry->setup_head = req->setup_head; 1058 __entry->clear_tail = req->clear_tail; 1059 __entry->flow_idx = req->flow_idx; 1060 __entry->acked_tail = req->acked_tail; 1061 __entry->state = req->state; 1062 __entry->r_ack_psn = req->r_ack_psn; 1063 __entry->r_flow_psn = req->r_flow_psn; 1064 __entry->r_last_acked = req->r_last_acked; 1065 __entry->s_next_psn = req->s_next_psn; 1066 ), 1067 TP_printk(/* print */ 1068 TID_REQ_PRN, 1069 __get_str(dev), 1070 __entry->qpn, 1071 __entry->newreq, 1072 __entry->opcode, 1073 __entry->psn, 1074 __entry->lpsn, 1075 __entry->cur_seg, 1076 __entry->comp_seg, 1077 __entry->ack_seg, 1078 __entry->alloc_seg, 1079 __entry->total_segs, 1080 __entry->setup_head, 1081 __entry->clear_tail, 1082 __entry->flow_idx, 1083 __entry->acked_tail, 1084 __entry->state, 1085 __entry->r_ack_psn, 1086 __entry->r_flow_psn, 1087 __entry->r_last_acked, 1088 __entry->s_next_psn 1089 ) 1090 ); 1091 1092 DEFINE_EVENT(/* event */ 1093 hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_read, 1094 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1095 struct tid_rdma_request *req), 1096 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1097 ); 1098 1099 DEFINE_EVENT(/* event */ 1100 hfi1_tid_rdma_request_template, hfi1_tid_req_build_read_req, 1101 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1102 struct tid_rdma_request *req), 1103 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1104 ); 1105 1106 DEFINE_EVENT(/* event */ 1107 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_req, 1108 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1109 struct tid_rdma_request *req), 1110 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1111 ); 1112 1113 DEFINE_EVENT(/* event */ 1114 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_resp, 1115 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1116 struct tid_rdma_request *req), 1117 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1118 ); 1119 1120 DEFINE_EVENT(/* event */ 1121 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_err, 1122 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1123 struct tid_rdma_request *req), 1124 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1125 ); 1126 1127 DEFINE_EVENT(/* event */ 1128 hfi1_tid_rdma_request_template, hfi1_tid_req_restart_req, 1129 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1130 struct tid_rdma_request *req), 1131 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1132 ); 1133 1134 DEFINE_EVENT(/* event */ 1135 hfi1_tid_rdma_request_template, hfi1_tid_req_setup_tid_wqe, 1136 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1137 struct tid_rdma_request *req), 1138 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1139 ); 1140 1141 DEFINE_EVENT(/* event */ 1142 hfi1_tid_rdma_request_template, hfi1_tid_req_write_alloc_res, 1143 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1144 struct tid_rdma_request *req), 1145 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1146 ); 1147 1148 DEFINE_EVENT(/* event */ 1149 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_req, 1150 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1151 struct tid_rdma_request *req), 1152 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1153 ); 1154 1155 DEFINE_EVENT(/* event */ 1156 hfi1_tid_rdma_request_template, hfi1_tid_req_build_write_resp, 1157 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1158 struct tid_rdma_request *req), 1159 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1160 ); 1161 1162 DEFINE_EVENT(/* event */ 1163 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_resp, 1164 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1165 struct tid_rdma_request *req), 1166 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1167 ); 1168 1169 DEFINE_EVENT(/* event */ 1170 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_data, 1171 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1172 struct tid_rdma_request *req), 1173 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1174 ); 1175 1176 DEFINE_EVENT(/* event */ 1177 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_tid_ack, 1178 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1179 struct tid_rdma_request *req), 1180 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1181 ); 1182 1183 DEFINE_EVENT(/* event */ 1184 hfi1_tid_rdma_request_template, hfi1_tid_req_tid_retry_timeout, 1185 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1186 struct tid_rdma_request *req), 1187 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1188 ); 1189 1190 DEFINE_EVENT(/* event */ 1191 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_resync, 1192 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1193 struct tid_rdma_request *req), 1194 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1195 ); 1196 1197 DEFINE_EVENT(/* event */ 1198 hfi1_tid_rdma_request_template, hfi1_tid_req_make_tid_pkt, 1199 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1200 struct tid_rdma_request *req), 1201 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1202 ); 1203 1204 DEFINE_EVENT(/* event */ 1205 hfi1_tid_rdma_request_template, hfi1_tid_req_make_tid_ack, 1206 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1207 struct tid_rdma_request *req), 1208 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1209 ); 1210 1211 DEFINE_EVENT(/* event */ 1212 hfi1_tid_rdma_request_template, hfi1_tid_req_handle_kdeth_eflags, 1213 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1214 struct tid_rdma_request *req), 1215 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1216 ); 1217 1218 DEFINE_EVENT(/* event */ 1219 hfi1_tid_rdma_request_template, hfi1_tid_req_make_rc_ack_write, 1220 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1221 struct tid_rdma_request *req), 1222 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1223 ); 1224 1225 DEFINE_EVENT(/* event */ 1226 hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_write, 1227 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, 1228 struct tid_rdma_request *req), 1229 TP_ARGS(qp, newreq, opcode, psn, lpsn, req) 1230 ); 1231 1232 DECLARE_EVENT_CLASS(/* rc_rcv_err */ 1233 hfi1_rc_rcv_err_template, 1234 TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff), 1235 TP_ARGS(qp, opcode, psn, diff), 1236 TP_STRUCT__entry(/* entry */ 1237 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 1238 __field(u32, qpn) 1239 __field(u32, s_flags) 1240 __field(u8, state) 1241 __field(u8, s_acked_ack_queue) 1242 __field(u8, s_tail_ack_queue) 1243 __field(u8, r_head_ack_queue) 1244 __field(u32, opcode) 1245 __field(u32, psn) 1246 __field(u32, r_psn) 1247 __field(int, diff) 1248 ), 1249 TP_fast_assign(/* assign */ 1250 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)) 1251 __entry->qpn = qp->ibqp.qp_num; 1252 __entry->s_flags = qp->s_flags; 1253 __entry->state = qp->state; 1254 __entry->s_acked_ack_queue = qp->s_acked_ack_queue; 1255 __entry->s_tail_ack_queue = qp->s_tail_ack_queue; 1256 __entry->r_head_ack_queue = qp->r_head_ack_queue; 1257 __entry->opcode = opcode; 1258 __entry->psn = psn; 1259 __entry->r_psn = qp->r_psn; 1260 __entry->diff = diff; 1261 ), 1262 TP_printk(/* print */ 1263 RCV_ERR_PRN, 1264 __get_str(dev), 1265 __entry->qpn, 1266 __entry->s_flags, 1267 __entry->state, 1268 __entry->s_acked_ack_queue, 1269 __entry->s_tail_ack_queue, 1270 __entry->r_head_ack_queue, 1271 __entry->opcode, 1272 __entry->psn, 1273 __entry->r_psn, 1274 __entry->diff 1275 ) 1276 ); 1277 1278 DEFINE_EVENT(/* event */ 1279 hfi1_rc_rcv_err_template, hfi1_tid_rdma_rcv_err, 1280 TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff), 1281 TP_ARGS(qp, opcode, psn, diff) 1282 ); 1283 1284 DECLARE_EVENT_CLASS(/* sge */ 1285 hfi1_sge_template, 1286 TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge), 1287 TP_ARGS(qp, index, sge), 1288 TP_STRUCT__entry(/* entry */ 1289 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 1290 __field(u32, qpn) 1291 __field(int, index) 1292 __field(u64, vaddr) 1293 __field(u32, sge_length) 1294 ), 1295 TP_fast_assign(/* assign */ 1296 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 1297 __entry->qpn = qp->ibqp.qp_num; 1298 __entry->index = index; 1299 __entry->vaddr = (u64)sge->vaddr; 1300 __entry->sge_length = sge->sge_length; 1301 ), 1302 TP_printk(/* print */ 1303 "[%s] qpn 0x%x sge %d: vaddr 0x%llx sge_length %u", 1304 __get_str(dev), 1305 __entry->qpn, 1306 __entry->index, 1307 __entry->vaddr, 1308 __entry->sge_length 1309 ) 1310 ); 1311 1312 DEFINE_EVENT(/* event */ 1313 hfi1_sge_template, hfi1_sge_check_align, 1314 TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge), 1315 TP_ARGS(qp, index, sge) 1316 ); 1317 1318 DECLARE_EVENT_CLASS(/* tid_write_sp */ 1319 hfi1_tid_write_rsp_template, 1320 TP_PROTO(struct rvt_qp *qp), 1321 TP_ARGS(qp), 1322 TP_STRUCT__entry(/* entry */ 1323 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 1324 __field(u32, qpn) 1325 __field(u32, r_tid_head) 1326 __field(u32, r_tid_tail) 1327 __field(u32, r_tid_ack) 1328 __field(u32, r_tid_alloc) 1329 __field(u32, alloc_w_segs) 1330 __field(u32, pending_tid_w_segs) 1331 __field(bool, sync_pt) 1332 __field(u32, ps_nak_psn) 1333 __field(u8, ps_nak_state) 1334 __field(u8, prnr_nak_state) 1335 __field(u32, hw_flow_index) 1336 __field(u32, generation) 1337 __field(u32, fpsn) 1338 __field(bool, resync) 1339 __field(u32, r_next_psn_kdeth) 1340 ), 1341 TP_fast_assign(/* assign */ 1342 struct hfi1_qp_priv *priv = qp->priv; 1343 1344 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 1345 __entry->qpn = qp->ibqp.qp_num; 1346 __entry->r_tid_head = priv->r_tid_head; 1347 __entry->r_tid_tail = priv->r_tid_tail; 1348 __entry->r_tid_ack = priv->r_tid_ack; 1349 __entry->r_tid_alloc = priv->r_tid_alloc; 1350 __entry->alloc_w_segs = priv->alloc_w_segs; 1351 __entry->pending_tid_w_segs = priv->pending_tid_w_segs; 1352 __entry->sync_pt = priv->sync_pt; 1353 __entry->ps_nak_psn = priv->s_nak_psn; 1354 __entry->ps_nak_state = priv->s_nak_state; 1355 __entry->prnr_nak_state = priv->rnr_nak_state; 1356 __entry->hw_flow_index = priv->flow_state.index; 1357 __entry->generation = priv->flow_state.generation; 1358 __entry->fpsn = priv->flow_state.psn; 1359 __entry->resync = priv->resync; 1360 __entry->r_next_psn_kdeth = priv->r_next_psn_kdeth; 1361 ), 1362 TP_printk(/* print */ 1363 TID_WRITE_RSPDR_PRN, 1364 __get_str(dev), 1365 __entry->qpn, 1366 __entry->r_tid_head, 1367 __entry->r_tid_tail, 1368 __entry->r_tid_ack, 1369 __entry->r_tid_alloc, 1370 __entry->alloc_w_segs, 1371 __entry->pending_tid_w_segs, 1372 __entry->sync_pt ? "yes" : "no", 1373 __entry->ps_nak_psn, 1374 __entry->ps_nak_state, 1375 __entry->prnr_nak_state, 1376 __entry->hw_flow_index, 1377 __entry->generation, 1378 __entry->fpsn, 1379 __entry->resync ? "yes" : "no", 1380 __entry->r_next_psn_kdeth 1381 ) 1382 ); 1383 1384 DEFINE_EVENT(/* event */ 1385 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_alloc_res, 1386 TP_PROTO(struct rvt_qp *qp), 1387 TP_ARGS(qp) 1388 ); 1389 1390 DEFINE_EVENT(/* event */ 1391 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_req, 1392 TP_PROTO(struct rvt_qp *qp), 1393 TP_ARGS(qp) 1394 ); 1395 1396 DEFINE_EVENT(/* event */ 1397 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_build_resp, 1398 TP_PROTO(struct rvt_qp *qp), 1399 TP_ARGS(qp) 1400 ); 1401 1402 DEFINE_EVENT(/* event */ 1403 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_data, 1404 TP_PROTO(struct rvt_qp *qp), 1405 TP_ARGS(qp) 1406 ); 1407 1408 DEFINE_EVENT(/* event */ 1409 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_resync, 1410 TP_PROTO(struct rvt_qp *qp), 1411 TP_ARGS(qp) 1412 ); 1413 1414 DEFINE_EVENT(/* event */ 1415 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_make_tid_ack, 1416 TP_PROTO(struct rvt_qp *qp), 1417 TP_ARGS(qp) 1418 ); 1419 1420 DEFINE_EVENT(/* event */ 1421 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_handle_kdeth_eflags, 1422 TP_PROTO(struct rvt_qp *qp), 1423 TP_ARGS(qp) 1424 ); 1425 1426 DEFINE_EVENT(/* event */ 1427 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_make_rc_ack, 1428 TP_PROTO(struct rvt_qp *qp), 1429 TP_ARGS(qp) 1430 ); 1431 1432 DECLARE_EVENT_CLASS(/* tid_write_sender */ 1433 hfi1_tid_write_sender_template, 1434 TP_PROTO(struct rvt_qp *qp, char newreq), 1435 TP_ARGS(qp, newreq), 1436 TP_STRUCT__entry(/* entry */ 1437 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 1438 __field(u32, qpn) 1439 __field(char, newreq) 1440 __field(u32, s_tid_cur) 1441 __field(u32, s_tid_tail) 1442 __field(u32, s_tid_head) 1443 __field(u32, pending_tid_w_resp) 1444 __field(u32, n_requests) 1445 __field(u32, n_tid_requests) 1446 __field(u32, s_flags) 1447 __field(u32, ps_flags) 1448 __field(unsigned long, iow_flags) 1449 __field(u8, s_state) 1450 __field(u8, s_retry) 1451 ), 1452 TP_fast_assign(/* assign */ 1453 struct hfi1_qp_priv *priv = qp->priv; 1454 1455 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 1456 __entry->qpn = qp->ibqp.qp_num; 1457 __entry->newreq = newreq; 1458 __entry->s_tid_cur = priv->s_tid_cur; 1459 __entry->s_tid_tail = priv->s_tid_tail; 1460 __entry->s_tid_head = priv->s_tid_head; 1461 __entry->pending_tid_w_resp = priv->pending_tid_w_resp; 1462 __entry->n_requests = atomic_read(&priv->n_requests); 1463 __entry->n_tid_requests = atomic_read(&priv->n_tid_requests); 1464 __entry->s_flags = qp->s_flags; 1465 __entry->ps_flags = priv->s_flags; 1466 __entry->iow_flags = priv->s_iowait.flags; 1467 __entry->s_state = priv->s_state; 1468 __entry->s_retry = priv->s_retry; 1469 ), 1470 TP_printk(/* print */ 1471 TID_WRITE_SENDER_PRN, 1472 __get_str(dev), 1473 __entry->qpn, 1474 __entry->newreq, 1475 __entry->s_tid_cur, 1476 __entry->s_tid_tail, 1477 __entry->s_tid_head, 1478 __entry->pending_tid_w_resp, 1479 __entry->n_requests, 1480 __entry->n_tid_requests, 1481 __entry->s_flags, 1482 __entry->ps_flags, 1483 __entry->iow_flags, 1484 __entry->s_state, 1485 __entry->s_retry 1486 ) 1487 ); 1488 1489 DEFINE_EVENT(/* event */ 1490 hfi1_tid_write_sender_template, hfi1_tid_write_sender_rcv_resp, 1491 TP_PROTO(struct rvt_qp *qp, char newreq), 1492 TP_ARGS(qp, newreq) 1493 ); 1494 1495 DEFINE_EVENT(/* event */ 1496 hfi1_tid_write_sender_template, hfi1_tid_write_sender_rcv_tid_ack, 1497 TP_PROTO(struct rvt_qp *qp, char newreq), 1498 TP_ARGS(qp, newreq) 1499 ); 1500 1501 DEFINE_EVENT(/* event */ 1502 hfi1_tid_write_sender_template, hfi1_tid_write_sender_retry_timeout, 1503 TP_PROTO(struct rvt_qp *qp, char newreq), 1504 TP_ARGS(qp, newreq) 1505 ); 1506 1507 DEFINE_EVENT(/* event */ 1508 hfi1_tid_write_sender_template, hfi1_tid_write_sender_make_tid_pkt, 1509 TP_PROTO(struct rvt_qp *qp, char newreq), 1510 TP_ARGS(qp, newreq) 1511 ); 1512 1513 DEFINE_EVENT(/* event */ 1514 hfi1_tid_write_sender_template, hfi1_tid_write_sender_make_req, 1515 TP_PROTO(struct rvt_qp *qp, char newreq), 1516 TP_ARGS(qp, newreq) 1517 ); 1518 1519 DEFINE_EVENT(/* event */ 1520 hfi1_tid_write_sender_template, hfi1_tid_write_sender_restart_rc, 1521 TP_PROTO(struct rvt_qp *qp, char newreq), 1522 TP_ARGS(qp, newreq) 1523 ); 1524 1525 DECLARE_EVENT_CLASS(/* tid_ack */ 1526 hfi1_tid_ack_template, 1527 TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn, 1528 u32 req_psn, u32 resync_psn), 1529 TP_ARGS(qp, aeth, psn, req_psn, resync_psn), 1530 TP_STRUCT__entry(/* entry */ 1531 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 1532 __field(u32, qpn) 1533 __field(u32, aeth) 1534 __field(u32, psn) 1535 __field(u32, req_psn) 1536 __field(u32, resync_psn) 1537 ), 1538 TP_fast_assign(/* assign */ 1539 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)) 1540 __entry->qpn = qp->ibqp.qp_num; 1541 __entry->aeth = aeth; 1542 __entry->psn = psn; 1543 __entry->req_psn = req_psn; 1544 __entry->resync_psn = resync_psn; 1545 ), 1546 TP_printk(/* print */ 1547 "[%s] qpn 0x%x aeth 0x%x psn 0x%x req_psn 0x%x resync_psn 0x%x", 1548 __get_str(dev), 1549 __entry->qpn, 1550 __entry->aeth, 1551 __entry->psn, 1552 __entry->req_psn, 1553 __entry->resync_psn 1554 ) 1555 ); 1556 1557 DEFINE_EVENT(/* rcv_tid_ack */ 1558 hfi1_tid_ack_template, hfi1_rcv_tid_ack, 1559 TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn, 1560 u32 req_psn, u32 resync_psn), 1561 TP_ARGS(qp, aeth, psn, req_psn, resync_psn) 1562 ); 1563 1564 DECLARE_EVENT_CLASS(/* kdeth_eflags_error */ 1565 hfi1_kdeth_eflags_error_template, 1566 TP_PROTO(struct rvt_qp *qp, u8 rcv_type, u8 rte, u32 psn), 1567 TP_ARGS(qp, rcv_type, rte, psn), 1568 TP_STRUCT__entry(/* entry */ 1569 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) 1570 __field(u32, qpn) 1571 __field(u8, rcv_type) 1572 __field(u8, rte) 1573 __field(u32, psn) 1574 ), 1575 TP_fast_assign(/* assign */ 1576 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); 1577 __entry->qpn = qp->ibqp.qp_num; 1578 __entry->rcv_type = rcv_type; 1579 __entry->rte = rte; 1580 __entry->psn = psn; 1581 ), 1582 TP_printk(/* print */ 1583 KDETH_EFLAGS_ERR_PRN, 1584 __get_str(dev), 1585 __entry->qpn, 1586 __entry->rcv_type, 1587 __entry->rte, 1588 __entry->psn 1589 ) 1590 ); 1591 1592 DEFINE_EVENT(/* event */ 1593 hfi1_kdeth_eflags_error_template, hfi1_eflags_err_write, 1594 TP_PROTO(struct rvt_qp *qp, u8 rcv_type, u8 rte, u32 psn), 1595 TP_ARGS(qp, rcv_type, rte, psn) 1596 ); 1597 1598 #endif /* __HFI1_TRACE_TID_H */ 1599 1600 #undef TRACE_INCLUDE_PATH 1601 #undef TRACE_INCLUDE_FILE 1602 #define TRACE_INCLUDE_PATH . 1603 #define TRACE_INCLUDE_FILE trace_tid 1604 #include <trace/define_trace.h> 1605