1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ 2 3 /* 4 * Copyright (c) 2018 Intel Corporation. All rights reserved. 5 */ 6 7 #undef TRACE_SYSTEM 8 #define TRACE_SYSTEM ib_mad 9 10 #if !defined(_TRACE_IB_MAD_H) || defined(TRACE_HEADER_MULTI_READ) 11 #define _TRACE_IB_MAD_H 12 13 #include <linux/tracepoint.h> 14 #include <rdma/ib_mad.h> 15 16 #ifdef CONFIG_TRACEPOINTS 17 struct trace_event_raw_ib_mad_send_template; 18 static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, 19 struct ib_mad_qp_info *qp_info, 20 struct trace_event_raw_ib_mad_send_template *entry); 21 #endif 22 23 DECLARE_EVENT_CLASS(ib_mad_send_template, 24 TP_PROTO(struct ib_mad_send_wr_private *wr, 25 struct ib_mad_qp_info *qp_info), 26 TP_ARGS(wr, qp_info), 27 28 TP_STRUCT__entry( 29 __field(u8, base_version) 30 __field(u8, mgmt_class) 31 __field(u8, class_version) 32 __field(u8, port_num) 33 __field(u32, qp_num) 34 __field(u8, method) 35 __field(u8, sl) 36 __field(u16, attr_id) 37 __field(u32, attr_mod) 38 __field(u64, wrtid) 39 __field(u64, tid) 40 __field(u16, status) 41 __field(u16, class_specific) 42 __field(u32, length) 43 __field(u32, dlid) 44 __field(u32, rqpn) 45 __field(u32, rqkey) 46 __field(u32, dev_index) 47 __field(void *, agent_priv) 48 __field(unsigned long, timeout) 49 __field(int, retries_left) 50 __field(int, max_retries) 51 __field(int, retry) 52 __field(u16, pkey) 53 ), 54 55 TP_fast_assign( 56 __entry->dev_index = wr->mad_agent_priv->agent.device->index; 57 __entry->port_num = wr->mad_agent_priv->agent.port_num; 58 __entry->qp_num = wr->mad_agent_priv->qp_info->qp->qp_num; 59 __entry->agent_priv = wr->mad_agent_priv; 60 __entry->wrtid = wr->tid; 61 __entry->max_retries = wr->max_retries; 62 __entry->retries_left = wr->retries_left; 63 __entry->retry = wr->retry; 64 __entry->timeout = wr->timeout; 65 __entry->length = wr->send_buf.hdr_len + 66 wr->send_buf.data_len; 67 __entry->base_version = 68 ((struct ib_mad_hdr *)wr->send_buf.mad)->base_version; 69 __entry->mgmt_class = 70 ((struct ib_mad_hdr *)wr->send_buf.mad)->mgmt_class; 71 __entry->class_version = 72 ((struct ib_mad_hdr *)wr->send_buf.mad)->class_version; 73 __entry->method = 74 ((struct ib_mad_hdr *)wr->send_buf.mad)->method; 75 __entry->status = 76 ((struct ib_mad_hdr *)wr->send_buf.mad)->status; 77 __entry->class_specific = 78 ((struct ib_mad_hdr *)wr->send_buf.mad)->class_specific; 79 __entry->tid = ((struct ib_mad_hdr *)wr->send_buf.mad)->tid; 80 __entry->attr_id = 81 ((struct ib_mad_hdr *)wr->send_buf.mad)->attr_id; 82 __entry->attr_mod = 83 ((struct ib_mad_hdr *)wr->send_buf.mad)->attr_mod; 84 create_mad_addr_info(wr, qp_info, __entry); 85 ), 86 87 TP_printk("%d:%d QP%d agent %p: " \ 88 "wrtid 0x%llx; %d/%d retries(%d); timeout %lu length %d : " \ 89 "hdr : base_ver 0x%x class 0x%x class_ver 0x%x " \ 90 "method 0x%x status 0x%x class_specific 0x%x tid 0x%llx " \ 91 "attr_id 0x%x attr_mod 0x%x => dlid 0x%08x sl %d "\ 92 "pkey 0x%x rpqn 0x%x rqpkey 0x%x", 93 __entry->dev_index, __entry->port_num, __entry->qp_num, 94 __entry->agent_priv, be64_to_cpu(__entry->wrtid), 95 __entry->retries_left, __entry->max_retries, 96 __entry->retry, __entry->timeout, __entry->length, 97 __entry->base_version, __entry->mgmt_class, 98 __entry->class_version, 99 __entry->method, be16_to_cpu(__entry->status), 100 be16_to_cpu(__entry->class_specific), 101 be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id), 102 be32_to_cpu(__entry->attr_mod), 103 be32_to_cpu(__entry->dlid), __entry->sl, __entry->pkey, 104 __entry->rqpn, __entry->rqkey 105 ) 106 ); 107 108 DEFINE_EVENT(ib_mad_send_template, ib_mad_error_handler, 109 TP_PROTO(struct ib_mad_send_wr_private *wr, 110 struct ib_mad_qp_info *qp_info), 111 TP_ARGS(wr, qp_info)); 112 DEFINE_EVENT(ib_mad_send_template, ib_mad_ib_send_mad, 113 TP_PROTO(struct ib_mad_send_wr_private *wr, 114 struct ib_mad_qp_info *qp_info), 115 TP_ARGS(wr, qp_info)); 116 DEFINE_EVENT(ib_mad_send_template, ib_mad_send_done_resend, 117 TP_PROTO(struct ib_mad_send_wr_private *wr, 118 struct ib_mad_qp_info *qp_info), 119 TP_ARGS(wr, qp_info)); 120 121 TRACE_EVENT(ib_mad_send_done_handler, 122 TP_PROTO(struct ib_mad_send_wr_private *wr, struct ib_wc *wc), 123 TP_ARGS(wr, wc), 124 125 TP_STRUCT__entry( 126 __field(u8, port_num) 127 __field(u8, base_version) 128 __field(u8, mgmt_class) 129 __field(u8, class_version) 130 __field(u32, qp_num) 131 __field(u64, wrtid) 132 __field(u16, status) 133 __field(u16, wc_status) 134 __field(u32, length) 135 __field(void *, agent_priv) 136 __field(unsigned long, timeout) 137 __field(u32, dev_index) 138 __field(int, retries_left) 139 __field(int, max_retries) 140 __field(int, retry) 141 __field(u8, method) 142 ), 143 144 TP_fast_assign( 145 __entry->dev_index = wr->mad_agent_priv->agent.device->index; 146 __entry->port_num = wr->mad_agent_priv->agent.port_num; 147 __entry->qp_num = wr->mad_agent_priv->qp_info->qp->qp_num; 148 __entry->agent_priv = wr->mad_agent_priv; 149 __entry->wrtid = wr->tid; 150 __entry->max_retries = wr->max_retries; 151 __entry->retries_left = wr->retries_left; 152 __entry->retry = wr->retry; 153 __entry->timeout = wr->timeout; 154 __entry->base_version = 155 ((struct ib_mad_hdr *)wr->send_buf.mad)->base_version; 156 __entry->mgmt_class = 157 ((struct ib_mad_hdr *)wr->send_buf.mad)->mgmt_class; 158 __entry->class_version = 159 ((struct ib_mad_hdr *)wr->send_buf.mad)->class_version; 160 __entry->method = 161 ((struct ib_mad_hdr *)wr->send_buf.mad)->method; 162 __entry->status = 163 ((struct ib_mad_hdr *)wr->send_buf.mad)->status; 164 __entry->wc_status = wc->status; 165 __entry->length = wc->byte_len; 166 ), 167 168 TP_printk("%d:%d QP%d : SEND WC Status %d : agent %p: " \ 169 "wrtid 0x%llx %d/%d retries(%d) timeout %lu length %d: " \ 170 "hdr : base_ver 0x%x class 0x%x class_ver 0x%x " \ 171 "method 0x%x status 0x%x", 172 __entry->dev_index, __entry->port_num, __entry->qp_num, 173 __entry->wc_status, 174 __entry->agent_priv, be64_to_cpu(__entry->wrtid), 175 __entry->retries_left, __entry->max_retries, 176 __entry->retry, __entry->timeout, 177 __entry->length, 178 __entry->base_version, __entry->mgmt_class, 179 __entry->class_version, __entry->method, 180 be16_to_cpu(__entry->status) 181 ) 182 ); 183 184 TRACE_EVENT(ib_mad_recv_done_handler, 185 TP_PROTO(struct ib_mad_qp_info *qp_info, struct ib_wc *wc, 186 struct ib_mad_hdr *mad_hdr), 187 TP_ARGS(qp_info, wc, mad_hdr), 188 189 TP_STRUCT__entry( 190 __field(u8, base_version) 191 __field(u8, mgmt_class) 192 __field(u8, class_version) 193 __field(u8, port_num) 194 __field(u32, qp_num) 195 __field(u16, status) 196 __field(u16, class_specific) 197 __field(u32, length) 198 __field(u64, tid) 199 __field(u8, method) 200 __field(u8, sl) 201 __field(u16, attr_id) 202 __field(u32, attr_mod) 203 __field(u16, src_qp) 204 __field(u16, wc_status) 205 __field(u32, slid) 206 __field(u32, dev_index) 207 __field(u16, pkey) 208 ), 209 210 TP_fast_assign( 211 __entry->dev_index = qp_info->port_priv->device->index; 212 __entry->port_num = qp_info->port_priv->port_num; 213 __entry->qp_num = qp_info->qp->qp_num; 214 __entry->length = wc->byte_len; 215 __entry->base_version = mad_hdr->base_version; 216 __entry->mgmt_class = mad_hdr->mgmt_class; 217 __entry->class_version = mad_hdr->class_version; 218 __entry->method = mad_hdr->method; 219 __entry->status = mad_hdr->status; 220 __entry->class_specific = mad_hdr->class_specific; 221 __entry->tid = mad_hdr->tid; 222 __entry->attr_id = mad_hdr->attr_id; 223 __entry->attr_mod = mad_hdr->attr_mod; 224 __entry->slid = wc->slid; 225 __entry->src_qp = wc->src_qp; 226 __entry->sl = wc->sl; 227 ib_query_pkey(qp_info->port_priv->device, 228 qp_info->port_priv->port_num, 229 wc->pkey_index, &__entry->pkey); 230 __entry->wc_status = wc->status; 231 ), 232 233 TP_printk("%d:%d QP%d : RECV WC Status %d : length %d : hdr : " \ 234 "base_ver 0x%02x class 0x%02x class_ver 0x%02x " \ 235 "method 0x%02x status 0x%04x class_specific 0x%04x " \ 236 "tid 0x%016llx attr_id 0x%04x attr_mod 0x%08x " \ 237 "slid 0x%08x src QP%d, sl %d pkey 0x%04x", 238 __entry->dev_index, __entry->port_num, __entry->qp_num, 239 __entry->wc_status, 240 __entry->length, 241 __entry->base_version, __entry->mgmt_class, 242 __entry->class_version, __entry->method, 243 be16_to_cpu(__entry->status), 244 be16_to_cpu(__entry->class_specific), 245 be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id), 246 be32_to_cpu(__entry->attr_mod), 247 __entry->slid, __entry->src_qp, __entry->sl, __entry->pkey 248 ) 249 ); 250 251 DECLARE_EVENT_CLASS(ib_mad_agent_template, 252 TP_PROTO(struct ib_mad_agent_private *agent), 253 TP_ARGS(agent), 254 255 TP_STRUCT__entry( 256 __field(u32, dev_index) 257 __field(u32, hi_tid) 258 __field(u8, port_num) 259 __field(u8, mgmt_class) 260 __field(u8, mgmt_class_version) 261 ), 262 263 TP_fast_assign( 264 __entry->dev_index = agent->agent.device->index; 265 __entry->port_num = agent->agent.port_num; 266 __entry->hi_tid = agent->agent.hi_tid; 267 268 if (agent->reg_req) { 269 __entry->mgmt_class = agent->reg_req->mgmt_class; 270 __entry->mgmt_class_version = 271 agent->reg_req->mgmt_class_version; 272 } else { 273 __entry->mgmt_class = 0; 274 __entry->mgmt_class_version = 0; 275 } 276 ), 277 278 TP_printk("%d:%d mad agent : hi_tid 0x%08x class 0x%02x class_ver 0x%02x", 279 __entry->dev_index, __entry->port_num, 280 __entry->hi_tid, __entry->mgmt_class, 281 __entry->mgmt_class_version 282 ) 283 ); 284 DEFINE_EVENT(ib_mad_agent_template, ib_mad_recv_done_agent, 285 TP_PROTO(struct ib_mad_agent_private *agent), 286 TP_ARGS(agent)); 287 DEFINE_EVENT(ib_mad_agent_template, ib_mad_send_done_agent, 288 TP_PROTO(struct ib_mad_agent_private *agent), 289 TP_ARGS(agent)); 290 DEFINE_EVENT(ib_mad_agent_template, ib_mad_create_agent, 291 TP_PROTO(struct ib_mad_agent_private *agent), 292 TP_ARGS(agent)); 293 DEFINE_EVENT(ib_mad_agent_template, ib_mad_unregister_agent, 294 TP_PROTO(struct ib_mad_agent_private *agent), 295 TP_ARGS(agent)); 296 297 298 299 DECLARE_EVENT_CLASS(ib_mad_opa_smi_template, 300 TP_PROTO(struct opa_smp *smp), 301 TP_ARGS(smp), 302 303 TP_STRUCT__entry( 304 __field(u64, mkey) 305 __field(u32, dr_slid) 306 __field(u32, dr_dlid) 307 __field(u8, hop_ptr) 308 __field(u8, hop_cnt) 309 __array(u8, initial_path, OPA_SMP_MAX_PATH_HOPS) 310 __array(u8, return_path, OPA_SMP_MAX_PATH_HOPS) 311 ), 312 313 TP_fast_assign( 314 __entry->hop_ptr = smp->hop_ptr; 315 __entry->hop_cnt = smp->hop_cnt; 316 __entry->mkey = smp->mkey; 317 __entry->dr_slid = smp->route.dr.dr_slid; 318 __entry->dr_dlid = smp->route.dr.dr_dlid; 319 memcpy(__entry->initial_path, smp->route.dr.initial_path, 320 OPA_SMP_MAX_PATH_HOPS); 321 memcpy(__entry->return_path, smp->route.dr.return_path, 322 OPA_SMP_MAX_PATH_HOPS); 323 ), 324 325 TP_printk("OPA SMP: hop_ptr %d hop_cnt %d " \ 326 "mkey 0x%016llx dr_slid 0x%08x dr_dlid 0x%08x " \ 327 "initial_path %*ph return_path %*ph ", 328 __entry->hop_ptr, __entry->hop_cnt, 329 be64_to_cpu(__entry->mkey), be32_to_cpu(__entry->dr_slid), 330 be32_to_cpu(__entry->dr_dlid), 331 OPA_SMP_MAX_PATH_HOPS, __entry->initial_path, 332 OPA_SMP_MAX_PATH_HOPS, __entry->return_path 333 ) 334 ); 335 336 DEFINE_EVENT(ib_mad_opa_smi_template, ib_mad_handle_opa_smi, 337 TP_PROTO(struct opa_smp *smp), 338 TP_ARGS(smp)); 339 DEFINE_EVENT(ib_mad_opa_smi_template, ib_mad_handle_out_opa_smi, 340 TP_PROTO(struct opa_smp *smp), 341 TP_ARGS(smp)); 342 343 344 DECLARE_EVENT_CLASS(ib_mad_opa_ib_template, 345 TP_PROTO(struct ib_smp *smp), 346 TP_ARGS(smp), 347 348 TP_STRUCT__entry( 349 __field(u64, mkey) 350 __field(u32, dr_slid) 351 __field(u32, dr_dlid) 352 __field(u8, hop_ptr) 353 __field(u8, hop_cnt) 354 __array(u8, initial_path, IB_SMP_MAX_PATH_HOPS) 355 __array(u8, return_path, IB_SMP_MAX_PATH_HOPS) 356 ), 357 358 TP_fast_assign( 359 __entry->hop_ptr = smp->hop_ptr; 360 __entry->hop_cnt = smp->hop_cnt; 361 __entry->mkey = smp->mkey; 362 __entry->dr_slid = smp->dr_slid; 363 __entry->dr_dlid = smp->dr_dlid; 364 memcpy(__entry->initial_path, smp->initial_path, 365 IB_SMP_MAX_PATH_HOPS); 366 memcpy(__entry->return_path, smp->return_path, 367 IB_SMP_MAX_PATH_HOPS); 368 ), 369 370 TP_printk("OPA SMP: hop_ptr %d hop_cnt %d " \ 371 "mkey 0x%016llx dr_slid 0x%04x dr_dlid 0x%04x " \ 372 "initial_path %*ph return_path %*ph ", 373 __entry->hop_ptr, __entry->hop_cnt, 374 be64_to_cpu(__entry->mkey), be16_to_cpu(__entry->dr_slid), 375 be16_to_cpu(__entry->dr_dlid), 376 IB_SMP_MAX_PATH_HOPS, __entry->initial_path, 377 IB_SMP_MAX_PATH_HOPS, __entry->return_path 378 ) 379 ); 380 381 DEFINE_EVENT(ib_mad_opa_ib_template, ib_mad_handle_ib_smi, 382 TP_PROTO(struct ib_smp *smp), 383 TP_ARGS(smp)); 384 DEFINE_EVENT(ib_mad_opa_ib_template, ib_mad_handle_out_ib_smi, 385 TP_PROTO(struct ib_smp *smp), 386 TP_ARGS(smp)); 387 388 #endif /* _TRACE_IB_MAD_H */ 389 390 #include <trace/define_trace.h> 391