1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/module.h> 10 #include <linux/vmalloc.h> 11 #include <linux/crc32.h> 12 #include "qed.h" 13 #include "qed_hsi.h" 14 #include "qed_hw.h" 15 #include "qed_mcp.h" 16 #include "qed_reg_addr.h" 17 18 /* Chip IDs enum */ 19 enum chip_ids { 20 CHIP_RESERVED, 21 CHIP_BB_B0, 22 CHIP_K2, 23 MAX_CHIP_IDS 24 }; 25 26 /* Memory groups enum */ 27 enum mem_groups { 28 MEM_GROUP_PXP_MEM, 29 MEM_GROUP_DMAE_MEM, 30 MEM_GROUP_CM_MEM, 31 MEM_GROUP_QM_MEM, 32 MEM_GROUP_TM_MEM, 33 MEM_GROUP_BRB_RAM, 34 MEM_GROUP_BRB_MEM, 35 MEM_GROUP_PRS_MEM, 36 MEM_GROUP_SDM_MEM, 37 MEM_GROUP_PBUF, 38 MEM_GROUP_IOR, 39 MEM_GROUP_RAM, 40 MEM_GROUP_BTB_RAM, 41 MEM_GROUP_RDIF_CTX, 42 MEM_GROUP_TDIF_CTX, 43 MEM_GROUP_CONN_CFC_MEM, 44 MEM_GROUP_TASK_CFC_MEM, 45 MEM_GROUP_CAU_PI, 46 MEM_GROUP_CAU_MEM, 47 MEM_GROUP_PXP_ILT, 48 MEM_GROUP_MULD_MEM, 49 MEM_GROUP_BTB_MEM, 50 MEM_GROUP_IGU_MEM, 51 MEM_GROUP_IGU_MSIX, 52 MEM_GROUP_CAU_SB, 53 MEM_GROUP_BMB_RAM, 54 MEM_GROUP_BMB_MEM, 55 MEM_GROUPS_NUM 56 }; 57 58 /* Memory groups names */ 59 static const char * const s_mem_group_names[] = { 60 "PXP_MEM", 61 "DMAE_MEM", 62 "CM_MEM", 63 "QM_MEM", 64 "TM_MEM", 65 "BRB_RAM", 66 "BRB_MEM", 67 "PRS_MEM", 68 "SDM_MEM", 69 "PBUF", 70 "IOR", 71 "RAM", 72 "BTB_RAM", 73 "RDIF_CTX", 74 "TDIF_CTX", 75 "CONN_CFC_MEM", 76 "TASK_CFC_MEM", 77 "CAU_PI", 78 "CAU_MEM", 79 "PXP_ILT", 80 "MULD_MEM", 81 "BTB_MEM", 82 "IGU_MEM", 83 "IGU_MSIX", 84 "CAU_SB", 85 "BMB_RAM", 86 "BMB_MEM", 87 }; 88 89 /* Idle check conditions */ 90 static u32 cond4(const u32 *r, const u32 *imm) 91 { 92 return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]); 93 } 94 95 static u32 cond6(const u32 *r, const u32 *imm) 96 { 97 return ((r[0] >> imm[0]) & imm[1]) != imm[2]; 98 } 99 100 static u32 cond5(const u32 *r, const u32 *imm) 101 { 102 return (r[0] & imm[0]) != imm[1]; 103 } 104 105 static u32 cond8(const u32 *r, const u32 *imm) 106 { 107 return ((r[0] & imm[0]) >> imm[1]) != 108 (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5])); 109 } 110 111 static u32 cond9(const u32 *r, const u32 *imm) 112 { 113 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]); 114 } 115 116 static u32 cond1(const u32 *r, const u32 *imm) 117 { 118 return (r[0] & ~imm[0]) != imm[1]; 119 } 120 121 static u32 cond0(const u32 *r, const u32 *imm) 122 { 123 return r[0] != imm[0]; 124 } 125 126 static u32 cond10(const u32 *r, const u32 *imm) 127 { 128 return r[0] != r[1] && r[2] == imm[0]; 129 } 130 131 static u32 cond11(const u32 *r, const u32 *imm) 132 { 133 return r[0] != r[1] && r[2] > imm[0]; 134 } 135 136 static u32 cond3(const u32 *r, const u32 *imm) 137 { 138 return r[0] != r[1]; 139 } 140 141 static u32 cond12(const u32 *r, const u32 *imm) 142 { 143 return r[0] & imm[0]; 144 } 145 146 static u32 cond7(const u32 *r, const u32 *imm) 147 { 148 return r[0] < (r[1] - imm[0]); 149 } 150 151 static u32 cond2(const u32 *r, const u32 *imm) 152 { 153 return r[0] > imm[0]; 154 } 155 156 /* Array of Idle Check conditions */ 157 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = { 158 cond0, 159 cond1, 160 cond2, 161 cond3, 162 cond4, 163 cond5, 164 cond6, 165 cond7, 166 cond8, 167 cond9, 168 cond10, 169 cond11, 170 cond12, 171 }; 172 173 /******************************* Data Types **********************************/ 174 175 enum platform_ids { 176 PLATFORM_ASIC, 177 PLATFORM_RESERVED, 178 PLATFORM_RESERVED2, 179 PLATFORM_RESERVED3, 180 MAX_PLATFORM_IDS 181 }; 182 183 struct dbg_array { 184 const u32 *ptr; 185 u32 size_in_dwords; 186 }; 187 188 /* Chip constant definitions */ 189 struct chip_defs { 190 const char *name; 191 struct { 192 u8 num_ports; 193 u8 num_pfs; 194 } per_platform[MAX_PLATFORM_IDS]; 195 }; 196 197 /* Platform constant definitions */ 198 struct platform_defs { 199 const char *name; 200 u32 delay_factor; 201 }; 202 203 /* Storm constant definitions */ 204 struct storm_defs { 205 char letter; 206 enum block_id block_id; 207 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS]; 208 bool has_vfc; 209 u32 sem_fast_mem_addr; 210 u32 sem_frame_mode_addr; 211 u32 sem_slow_enable_addr; 212 u32 sem_slow_mode_addr; 213 u32 sem_slow_mode1_conf_addr; 214 u32 sem_sync_dbg_empty_addr; 215 u32 sem_slow_dbg_empty_addr; 216 u32 cm_ctx_wr_addr; 217 u32 cm_conn_ag_ctx_lid_size; /* In quad-regs */ 218 u32 cm_conn_ag_ctx_rd_addr; 219 u32 cm_conn_st_ctx_lid_size; /* In quad-regs */ 220 u32 cm_conn_st_ctx_rd_addr; 221 u32 cm_task_ag_ctx_lid_size; /* In quad-regs */ 222 u32 cm_task_ag_ctx_rd_addr; 223 u32 cm_task_st_ctx_lid_size; /* In quad-regs */ 224 u32 cm_task_st_ctx_rd_addr; 225 }; 226 227 /* Block constant definitions */ 228 struct block_defs { 229 const char *name; 230 bool has_dbg_bus[MAX_CHIP_IDS]; 231 bool associated_to_storm; 232 u32 storm_id; /* Valid only if associated_to_storm is true */ 233 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS]; 234 u32 dbg_select_addr; 235 u32 dbg_cycle_enable_addr; 236 u32 dbg_shift_addr; 237 u32 dbg_force_valid_addr; 238 u32 dbg_force_frame_addr; 239 bool has_reset_bit; 240 bool unreset; /* If true, the block is taken out of reset before dump */ 241 enum dbg_reset_regs reset_reg; 242 u8 reset_bit_offset; /* Bit offset in reset register */ 243 }; 244 245 /* Reset register definitions */ 246 struct reset_reg_defs { 247 u32 addr; 248 u32 unreset_val; 249 bool exists[MAX_CHIP_IDS]; 250 }; 251 252 struct grc_param_defs { 253 u32 default_val[MAX_CHIP_IDS]; 254 u32 min; 255 u32 max; 256 bool is_preset; 257 u32 exclude_all_preset_val; 258 u32 crash_preset_val; 259 }; 260 261 struct rss_mem_defs { 262 const char *mem_name; 263 const char *type_name; 264 u32 addr; /* In 128b units */ 265 u32 num_entries[MAX_CHIP_IDS]; 266 u32 entry_width[MAX_CHIP_IDS]; /* In bits */ 267 }; 268 269 struct vfc_ram_defs { 270 const char *mem_name; 271 const char *type_name; 272 u32 base_row; 273 u32 num_rows; 274 }; 275 276 struct big_ram_defs { 277 const char *instance_name; 278 enum mem_groups mem_group_id; 279 enum mem_groups ram_mem_group_id; 280 enum dbg_grc_params grc_param; 281 u32 addr_reg_addr; 282 u32 data_reg_addr; 283 u32 num_of_blocks[MAX_CHIP_IDS]; 284 }; 285 286 struct phy_defs { 287 const char *phy_name; 288 u32 base_addr; 289 u32 tbus_addr_lo_addr; 290 u32 tbus_addr_hi_addr; 291 u32 tbus_data_lo_addr; 292 u32 tbus_data_hi_addr; 293 }; 294 295 /******************************** Constants **********************************/ 296 297 #define MAX_LCIDS 320 298 #define MAX_LTIDS 320 299 #define NUM_IOR_SETS 2 300 #define IORS_PER_SET 176 301 #define IOR_SET_OFFSET(set_id) ((set_id) * 256) 302 #define BYTES_IN_DWORD sizeof(u32) 303 304 /* In the macros below, size and offset are specified in bits */ 305 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32) 306 #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET 307 #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE 308 #define FIELD_DWORD_OFFSET(type, field) \ 309 (int)(FIELD_BIT_OFFSET(type, field) / 32) 310 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32) 311 #define FIELD_BIT_MASK(type, field) \ 312 (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \ 313 FIELD_DWORD_SHIFT(type, field)) 314 #define SET_VAR_FIELD(var, type, field, val) \ 315 do { \ 316 var[FIELD_DWORD_OFFSET(type, field)] &= \ 317 (~FIELD_BIT_MASK(type, field)); \ 318 var[FIELD_DWORD_OFFSET(type, field)] |= \ 319 (val) << FIELD_DWORD_SHIFT(type, field); \ 320 } while (0) 321 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \ 322 do { \ 323 for (i = 0; i < (arr_size); i++) \ 324 qed_wr(dev, ptt, addr, (arr)[i]); \ 325 } while (0) 326 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \ 327 do { \ 328 for (i = 0; i < (arr_size); i++) \ 329 (arr)[i] = qed_rd(dev, ptt, addr); \ 330 } while (0) 331 332 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD) 333 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD) 334 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2) 335 #define RAM_LINES_TO_BYTES(lines) \ 336 DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines)) 337 #define REG_DUMP_LEN_SHIFT 24 338 #define MEM_DUMP_ENTRY_SIZE_DWORDS \ 339 BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem)) 340 #define IDLE_CHK_RULE_SIZE_DWORDS \ 341 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule)) 342 #define IDLE_CHK_RESULT_HDR_DWORDS \ 343 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr)) 344 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \ 345 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr)) 346 #define IDLE_CHK_MAX_ENTRIES_SIZE 32 347 348 /* The sizes and offsets below are specified in bits */ 349 #define VFC_CAM_CMD_STRUCT_SIZE 64 350 #define VFC_CAM_CMD_ROW_OFFSET 48 351 #define VFC_CAM_CMD_ROW_SIZE 9 352 #define VFC_CAM_ADDR_STRUCT_SIZE 16 353 #define VFC_CAM_ADDR_OP_OFFSET 0 354 #define VFC_CAM_ADDR_OP_SIZE 4 355 #define VFC_CAM_RESP_STRUCT_SIZE 256 356 #define VFC_RAM_ADDR_STRUCT_SIZE 16 357 #define VFC_RAM_ADDR_OP_OFFSET 0 358 #define VFC_RAM_ADDR_OP_SIZE 2 359 #define VFC_RAM_ADDR_ROW_OFFSET 2 360 #define VFC_RAM_ADDR_ROW_SIZE 10 361 #define VFC_RAM_RESP_STRUCT_SIZE 256 362 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE) 363 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE) 364 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE) 365 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS 366 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE) 367 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE) 368 #define NUM_VFC_RAM_TYPES 4 369 #define VFC_CAM_NUM_ROWS 512 370 #define VFC_OPCODE_CAM_RD 14 371 #define VFC_OPCODE_RAM_RD 0 372 #define NUM_RSS_MEM_TYPES 5 373 #define NUM_BIG_RAM_TYPES 3 374 #define BIG_RAM_BLOCK_SIZE_BYTES 128 375 #define BIG_RAM_BLOCK_SIZE_DWORDS \ 376 BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES) 377 #define NUM_PHY_TBUS_ADDRESSES 2048 378 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2) 379 #define RESET_REG_UNRESET_OFFSET 4 380 #define STALL_DELAY_MS 500 381 #define STATIC_DEBUG_LINE_DWORDS 9 382 #define NUM_DBG_BUS_LINES 256 383 #define NUM_COMMON_GLOBAL_PARAMS 8 384 #define FW_IMG_MAIN 1 385 #define REG_FIFO_DEPTH_ELEMENTS 32 386 #define REG_FIFO_ELEMENT_DWORDS 2 387 #define REG_FIFO_DEPTH_DWORDS \ 388 (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS) 389 #define IGU_FIFO_DEPTH_ELEMENTS 64 390 #define IGU_FIFO_ELEMENT_DWORDS 4 391 #define IGU_FIFO_DEPTH_DWORDS \ 392 (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS) 393 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20 394 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2 395 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \ 396 (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \ 397 PROTECTION_OVERRIDE_ELEMENT_DWORDS) 398 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \ 399 (MCP_REG_SCRATCH + \ 400 offsetof(struct static_init, sections[SPAD_SECTION_TRACE])) 401 #define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa 402 #define EMPTY_FW_VERSION_STR "???_???_???_???" 403 #define EMPTY_FW_IMAGE_STR "???????????????" 404 405 /***************************** Constant Arrays *******************************/ 406 407 /* Debug arrays */ 408 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} }; 409 410 /* Chip constant definitions array */ 411 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { 412 { "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } }, 413 { "bb_b0", 414 { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } }, 415 { "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } } 416 }; 417 418 /* Storm constant definitions array */ 419 static struct storm_defs s_storm_defs[] = { 420 /* Tstorm */ 421 {'T', BLOCK_TSEM, 422 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, 423 DBG_BUS_CLIENT_RBCT}, true, 424 TSEM_REG_FAST_MEMORY, 425 TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE, 426 TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG, 427 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY, 428 TCM_REG_CTX_RBC_ACCS, 429 4, TCM_REG_AGG_CON_CTX, 430 16, TCM_REG_SM_CON_CTX, 431 2, TCM_REG_AGG_TASK_CTX, 432 4, TCM_REG_SM_TASK_CTX}, 433 /* Mstorm */ 434 {'M', BLOCK_MSEM, 435 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, 436 DBG_BUS_CLIENT_RBCM}, false, 437 MSEM_REG_FAST_MEMORY, 438 MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE, 439 MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG, 440 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY, 441 MCM_REG_CTX_RBC_ACCS, 442 1, MCM_REG_AGG_CON_CTX, 443 10, MCM_REG_SM_CON_CTX, 444 2, MCM_REG_AGG_TASK_CTX, 445 7, MCM_REG_SM_TASK_CTX}, 446 /* Ustorm */ 447 {'U', BLOCK_USEM, 448 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, 449 DBG_BUS_CLIENT_RBCU}, false, 450 USEM_REG_FAST_MEMORY, 451 USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE, 452 USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG, 453 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY, 454 UCM_REG_CTX_RBC_ACCS, 455 2, UCM_REG_AGG_CON_CTX, 456 13, UCM_REG_SM_CON_CTX, 457 3, UCM_REG_AGG_TASK_CTX, 458 3, UCM_REG_SM_TASK_CTX}, 459 /* Xstorm */ 460 {'X', BLOCK_XSEM, 461 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, 462 DBG_BUS_CLIENT_RBCX}, false, 463 XSEM_REG_FAST_MEMORY, 464 XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE, 465 XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG, 466 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY, 467 XCM_REG_CTX_RBC_ACCS, 468 9, XCM_REG_AGG_CON_CTX, 469 15, XCM_REG_SM_CON_CTX, 470 0, 0, 471 0, 0}, 472 /* Ystorm */ 473 {'Y', BLOCK_YSEM, 474 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, 475 DBG_BUS_CLIENT_RBCY}, false, 476 YSEM_REG_FAST_MEMORY, 477 YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE, 478 YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG, 479 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY, 480 YCM_REG_CTX_RBC_ACCS, 481 2, YCM_REG_AGG_CON_CTX, 482 3, YCM_REG_SM_CON_CTX, 483 2, YCM_REG_AGG_TASK_CTX, 484 12, YCM_REG_SM_TASK_CTX}, 485 /* Pstorm */ 486 {'P', BLOCK_PSEM, 487 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, 488 DBG_BUS_CLIENT_RBCS}, true, 489 PSEM_REG_FAST_MEMORY, 490 PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE, 491 PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG, 492 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY, 493 PCM_REG_CTX_RBC_ACCS, 494 0, 0, 495 10, PCM_REG_SM_CON_CTX, 496 0, 0, 497 0, 0} 498 }; 499 500 /* Block definitions array */ 501 static struct block_defs block_grc_defs = { 502 "grc", {true, true, true}, false, 0, 503 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, 504 GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE, 505 GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID, 506 GRC_REG_DBG_FORCE_FRAME, 507 true, false, DBG_RESET_REG_MISC_PL_UA, 1 508 }; 509 510 static struct block_defs block_miscs_defs = { 511 "miscs", {false, false, false}, false, 0, 512 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 513 0, 0, 0, 0, 0, 514 false, false, MAX_DBG_RESET_REGS, 0 515 }; 516 517 static struct block_defs block_misc_defs = { 518 "misc", {false, false, false}, false, 0, 519 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 520 0, 0, 0, 0, 0, 521 false, false, MAX_DBG_RESET_REGS, 0 522 }; 523 524 static struct block_defs block_dbu_defs = { 525 "dbu", {false, false, false}, false, 0, 526 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 527 0, 0, 0, 0, 0, 528 false, false, MAX_DBG_RESET_REGS, 0 529 }; 530 531 static struct block_defs block_pglue_b_defs = { 532 "pglue_b", {true, true, true}, false, 0, 533 {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH}, 534 PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE, 535 PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID, 536 PGLUE_B_REG_DBG_FORCE_FRAME, 537 true, false, DBG_RESET_REG_MISCS_PL_HV, 1 538 }; 539 540 static struct block_defs block_cnig_defs = { 541 "cnig", {false, false, true}, false, 0, 542 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, 543 CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2, 544 CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2, 545 CNIG_REG_DBG_FORCE_FRAME_K2, 546 true, false, DBG_RESET_REG_MISCS_PL_HV, 0 547 }; 548 549 static struct block_defs block_cpmu_defs = { 550 "cpmu", {false, false, false}, false, 0, 551 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 552 0, 0, 0, 0, 0, 553 true, false, DBG_RESET_REG_MISCS_PL_HV, 8 554 }; 555 556 static struct block_defs block_ncsi_defs = { 557 "ncsi", {true, true, true}, false, 0, 558 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, 559 NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE, 560 NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID, 561 NCSI_REG_DBG_FORCE_FRAME, 562 true, false, DBG_RESET_REG_MISCS_PL_HV, 5 563 }; 564 565 static struct block_defs block_opte_defs = { 566 "opte", {false, false, false}, false, 0, 567 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 568 0, 0, 0, 0, 0, 569 true, false, DBG_RESET_REG_MISCS_PL_HV, 4 570 }; 571 572 static struct block_defs block_bmb_defs = { 573 "bmb", {true, true, true}, false, 0, 574 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB}, 575 BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE, 576 BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID, 577 BMB_REG_DBG_FORCE_FRAME, 578 true, false, DBG_RESET_REG_MISCS_PL_UA, 7 579 }; 580 581 static struct block_defs block_pcie_defs = { 582 "pcie", {false, false, true}, false, 0, 583 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, 584 PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE, 585 PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID, 586 PCIE_REG_DBG_COMMON_FORCE_FRAME, 587 false, false, MAX_DBG_RESET_REGS, 0 588 }; 589 590 static struct block_defs block_mcp_defs = { 591 "mcp", {false, false, false}, false, 0, 592 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 593 0, 0, 0, 0, 0, 594 false, false, MAX_DBG_RESET_REGS, 0 595 }; 596 597 static struct block_defs block_mcp2_defs = { 598 "mcp2", {true, true, true}, false, 0, 599 {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, 600 MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE, 601 MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID, 602 MCP2_REG_DBG_FORCE_FRAME, 603 false, false, MAX_DBG_RESET_REGS, 0 604 }; 605 606 static struct block_defs block_pswhst_defs = { 607 "pswhst", {true, true, true}, false, 0, 608 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 609 PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE, 610 PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID, 611 PSWHST_REG_DBG_FORCE_FRAME, 612 true, false, DBG_RESET_REG_MISC_PL_HV, 0 613 }; 614 615 static struct block_defs block_pswhst2_defs = { 616 "pswhst2", {true, true, true}, false, 0, 617 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 618 PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE, 619 PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID, 620 PSWHST2_REG_DBG_FORCE_FRAME, 621 true, false, DBG_RESET_REG_MISC_PL_HV, 0 622 }; 623 624 static struct block_defs block_pswrd_defs = { 625 "pswrd", {true, true, true}, false, 0, 626 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 627 PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE, 628 PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID, 629 PSWRD_REG_DBG_FORCE_FRAME, 630 true, false, DBG_RESET_REG_MISC_PL_HV, 2 631 }; 632 633 static struct block_defs block_pswrd2_defs = { 634 "pswrd2", {true, true, true}, false, 0, 635 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 636 PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE, 637 PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID, 638 PSWRD2_REG_DBG_FORCE_FRAME, 639 true, false, DBG_RESET_REG_MISC_PL_HV, 2 640 }; 641 642 static struct block_defs block_pswwr_defs = { 643 "pswwr", {true, true, true}, false, 0, 644 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 645 PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE, 646 PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID, 647 PSWWR_REG_DBG_FORCE_FRAME, 648 true, false, DBG_RESET_REG_MISC_PL_HV, 3 649 }; 650 651 static struct block_defs block_pswwr2_defs = { 652 "pswwr2", {false, false, false}, false, 0, 653 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 654 0, 0, 0, 0, 0, 655 true, false, DBG_RESET_REG_MISC_PL_HV, 3 656 }; 657 658 static struct block_defs block_pswrq_defs = { 659 "pswrq", {true, true, true}, false, 0, 660 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 661 PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE, 662 PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID, 663 PSWRQ_REG_DBG_FORCE_FRAME, 664 true, false, DBG_RESET_REG_MISC_PL_HV, 1 665 }; 666 667 static struct block_defs block_pswrq2_defs = { 668 "pswrq2", {true, true, true}, false, 0, 669 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 670 PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE, 671 PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID, 672 PSWRQ2_REG_DBG_FORCE_FRAME, 673 true, false, DBG_RESET_REG_MISC_PL_HV, 1 674 }; 675 676 static struct block_defs block_pglcs_defs = { 677 "pglcs", {false, false, true}, false, 0, 678 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, 679 PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE, 680 PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID, 681 PGLCS_REG_DBG_FORCE_FRAME, 682 true, false, DBG_RESET_REG_MISCS_PL_HV, 2 683 }; 684 685 static struct block_defs block_ptu_defs = { 686 "ptu", {true, true, true}, false, 0, 687 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 688 PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE, 689 PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID, 690 PTU_REG_DBG_FORCE_FRAME, 691 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20 692 }; 693 694 static struct block_defs block_dmae_defs = { 695 "dmae", {true, true, true}, false, 0, 696 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 697 DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE, 698 DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID, 699 DMAE_REG_DBG_FORCE_FRAME, 700 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28 701 }; 702 703 static struct block_defs block_tcm_defs = { 704 "tcm", {true, true, true}, true, DBG_TSTORM_ID, 705 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, 706 TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE, 707 TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID, 708 TCM_REG_DBG_FORCE_FRAME, 709 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5 710 }; 711 712 static struct block_defs block_mcm_defs = { 713 "mcm", {true, true, true}, true, DBG_MSTORM_ID, 714 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, 715 MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE, 716 MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID, 717 MCM_REG_DBG_FORCE_FRAME, 718 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3 719 }; 720 721 static struct block_defs block_ucm_defs = { 722 "ucm", {true, true, true}, true, DBG_USTORM_ID, 723 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, 724 UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE, 725 UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID, 726 UCM_REG_DBG_FORCE_FRAME, 727 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8 728 }; 729 730 static struct block_defs block_xcm_defs = { 731 "xcm", {true, true, true}, true, DBG_XSTORM_ID, 732 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, 733 XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE, 734 XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID, 735 XCM_REG_DBG_FORCE_FRAME, 736 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19 737 }; 738 739 static struct block_defs block_ycm_defs = { 740 "ycm", {true, true, true}, true, DBG_YSTORM_ID, 741 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, 742 YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE, 743 YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID, 744 YCM_REG_DBG_FORCE_FRAME, 745 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5 746 }; 747 748 static struct block_defs block_pcm_defs = { 749 "pcm", {true, true, true}, true, DBG_PSTORM_ID, 750 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, 751 PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE, 752 PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID, 753 PCM_REG_DBG_FORCE_FRAME, 754 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4 755 }; 756 757 static struct block_defs block_qm_defs = { 758 "qm", {true, true, true}, false, 0, 759 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ}, 760 QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE, 761 QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID, 762 QM_REG_DBG_FORCE_FRAME, 763 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16 764 }; 765 766 static struct block_defs block_tm_defs = { 767 "tm", {true, true, true}, false, 0, 768 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, 769 TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE, 770 TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID, 771 TM_REG_DBG_FORCE_FRAME, 772 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17 773 }; 774 775 static struct block_defs block_dorq_defs = { 776 "dorq", {true, true, true}, false, 0, 777 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, 778 DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE, 779 DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID, 780 DORQ_REG_DBG_FORCE_FRAME, 781 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18 782 }; 783 784 static struct block_defs block_brb_defs = { 785 "brb", {true, true, true}, false, 0, 786 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, 787 BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE, 788 BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID, 789 BRB_REG_DBG_FORCE_FRAME, 790 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0 791 }; 792 793 static struct block_defs block_src_defs = { 794 "src", {true, true, true}, false, 0, 795 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, 796 SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE, 797 SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID, 798 SRC_REG_DBG_FORCE_FRAME, 799 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2 800 }; 801 802 static struct block_defs block_prs_defs = { 803 "prs", {true, true, true}, false, 0, 804 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, 805 PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE, 806 PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID, 807 PRS_REG_DBG_FORCE_FRAME, 808 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1 809 }; 810 811 static struct block_defs block_tsdm_defs = { 812 "tsdm", {true, true, true}, true, DBG_TSTORM_ID, 813 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, 814 TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE, 815 TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID, 816 TSDM_REG_DBG_FORCE_FRAME, 817 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3 818 }; 819 820 static struct block_defs block_msdm_defs = { 821 "msdm", {true, true, true}, true, DBG_MSTORM_ID, 822 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, 823 MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE, 824 MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID, 825 MSDM_REG_DBG_FORCE_FRAME, 826 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6 827 }; 828 829 static struct block_defs block_usdm_defs = { 830 "usdm", {true, true, true}, true, DBG_USTORM_ID, 831 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, 832 USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE, 833 USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID, 834 USDM_REG_DBG_FORCE_FRAME, 835 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7 836 }; 837 838 static struct block_defs block_xsdm_defs = { 839 "xsdm", {true, true, true}, true, DBG_XSTORM_ID, 840 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, 841 XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE, 842 XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID, 843 XSDM_REG_DBG_FORCE_FRAME, 844 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20 845 }; 846 847 static struct block_defs block_ysdm_defs = { 848 "ysdm", {true, true, true}, true, DBG_YSTORM_ID, 849 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, 850 YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE, 851 YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID, 852 YSDM_REG_DBG_FORCE_FRAME, 853 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8 854 }; 855 856 static struct block_defs block_psdm_defs = { 857 "psdm", {true, true, true}, true, DBG_PSTORM_ID, 858 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, 859 PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE, 860 PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID, 861 PSDM_REG_DBG_FORCE_FRAME, 862 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7 863 }; 864 865 static struct block_defs block_tsem_defs = { 866 "tsem", {true, true, true}, true, DBG_TSTORM_ID, 867 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, 868 TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE, 869 TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID, 870 TSEM_REG_DBG_FORCE_FRAME, 871 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4 872 }; 873 874 static struct block_defs block_msem_defs = { 875 "msem", {true, true, true}, true, DBG_MSTORM_ID, 876 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, 877 MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE, 878 MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID, 879 MSEM_REG_DBG_FORCE_FRAME, 880 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9 881 }; 882 883 static struct block_defs block_usem_defs = { 884 "usem", {true, true, true}, true, DBG_USTORM_ID, 885 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, 886 USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE, 887 USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID, 888 USEM_REG_DBG_FORCE_FRAME, 889 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9 890 }; 891 892 static struct block_defs block_xsem_defs = { 893 "xsem", {true, true, true}, true, DBG_XSTORM_ID, 894 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, 895 XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE, 896 XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID, 897 XSEM_REG_DBG_FORCE_FRAME, 898 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21 899 }; 900 901 static struct block_defs block_ysem_defs = { 902 "ysem", {true, true, true}, true, DBG_YSTORM_ID, 903 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, 904 YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE, 905 YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID, 906 YSEM_REG_DBG_FORCE_FRAME, 907 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11 908 }; 909 910 static struct block_defs block_psem_defs = { 911 "psem", {true, true, true}, true, DBG_PSTORM_ID, 912 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, 913 PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE, 914 PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID, 915 PSEM_REG_DBG_FORCE_FRAME, 916 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10 917 }; 918 919 static struct block_defs block_rss_defs = { 920 "rss", {true, true, true}, false, 0, 921 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, 922 RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE, 923 RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID, 924 RSS_REG_DBG_FORCE_FRAME, 925 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18 926 }; 927 928 static struct block_defs block_tmld_defs = { 929 "tmld", {true, true, true}, false, 0, 930 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, 931 TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE, 932 TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID, 933 TMLD_REG_DBG_FORCE_FRAME, 934 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13 935 }; 936 937 static struct block_defs block_muld_defs = { 938 "muld", {true, true, true}, false, 0, 939 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, 940 MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE, 941 MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID, 942 MULD_REG_DBG_FORCE_FRAME, 943 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14 944 }; 945 946 static struct block_defs block_yuld_defs = { 947 "yuld", {true, true, true}, false, 0, 948 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, 949 YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE, 950 YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID, 951 YULD_REG_DBG_FORCE_FRAME, 952 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15 953 }; 954 955 static struct block_defs block_xyld_defs = { 956 "xyld", {true, true, true}, false, 0, 957 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, 958 XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE, 959 XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID, 960 XYLD_REG_DBG_FORCE_FRAME, 961 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 962 }; 963 964 static struct block_defs block_prm_defs = { 965 "prm", {true, true, true}, false, 0, 966 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, 967 PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE, 968 PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID, 969 PRM_REG_DBG_FORCE_FRAME, 970 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21 971 }; 972 973 static struct block_defs block_pbf_pb1_defs = { 974 "pbf_pb1", {true, true, true}, false, 0, 975 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, 976 PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE, 977 PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID, 978 PBF_PB1_REG_DBG_FORCE_FRAME, 979 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 980 11 981 }; 982 983 static struct block_defs block_pbf_pb2_defs = { 984 "pbf_pb2", {true, true, true}, false, 0, 985 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, 986 PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE, 987 PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID, 988 PBF_PB2_REG_DBG_FORCE_FRAME, 989 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 990 12 991 }; 992 993 static struct block_defs block_rpb_defs = { 994 "rpb", {true, true, true}, false, 0, 995 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, 996 RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE, 997 RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID, 998 RPB_REG_DBG_FORCE_FRAME, 999 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13 1000 }; 1001 1002 static struct block_defs block_btb_defs = { 1003 "btb", {true, true, true}, false, 0, 1004 {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV}, 1005 BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE, 1006 BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID, 1007 BTB_REG_DBG_FORCE_FRAME, 1008 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10 1009 }; 1010 1011 static struct block_defs block_pbf_defs = { 1012 "pbf", {true, true, true}, false, 0, 1013 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, 1014 PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE, 1015 PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID, 1016 PBF_REG_DBG_FORCE_FRAME, 1017 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15 1018 }; 1019 1020 static struct block_defs block_rdif_defs = { 1021 "rdif", {true, true, true}, false, 0, 1022 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, 1023 RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE, 1024 RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID, 1025 RDIF_REG_DBG_FORCE_FRAME, 1026 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16 1027 }; 1028 1029 static struct block_defs block_tdif_defs = { 1030 "tdif", {true, true, true}, false, 0, 1031 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, 1032 TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE, 1033 TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID, 1034 TDIF_REG_DBG_FORCE_FRAME, 1035 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17 1036 }; 1037 1038 static struct block_defs block_cdu_defs = { 1039 "cdu", {true, true, true}, false, 0, 1040 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, 1041 CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE, 1042 CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID, 1043 CDU_REG_DBG_FORCE_FRAME, 1044 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23 1045 }; 1046 1047 static struct block_defs block_ccfc_defs = { 1048 "ccfc", {true, true, true}, false, 0, 1049 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, 1050 CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE, 1051 CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID, 1052 CCFC_REG_DBG_FORCE_FRAME, 1053 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24 1054 }; 1055 1056 static struct block_defs block_tcfc_defs = { 1057 "tcfc", {true, true, true}, false, 0, 1058 {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, 1059 TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE, 1060 TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID, 1061 TCFC_REG_DBG_FORCE_FRAME, 1062 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25 1063 }; 1064 1065 static struct block_defs block_igu_defs = { 1066 "igu", {true, true, true}, false, 0, 1067 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 1068 IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE, 1069 IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID, 1070 IGU_REG_DBG_FORCE_FRAME, 1071 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27 1072 }; 1073 1074 static struct block_defs block_cau_defs = { 1075 "cau", {true, true, true}, false, 0, 1076 {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, 1077 CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE, 1078 CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID, 1079 CAU_REG_DBG_FORCE_FRAME, 1080 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 1081 }; 1082 1083 static struct block_defs block_umac_defs = { 1084 "umac", {false, false, true}, false, 0, 1085 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, 1086 UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE, 1087 UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID, 1088 UMAC_REG_DBG_FORCE_FRAME, 1089 true, false, DBG_RESET_REG_MISCS_PL_HV, 6 1090 }; 1091 1092 static struct block_defs block_xmac_defs = { 1093 "xmac", {false, false, false}, false, 0, 1094 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1095 0, 0, 0, 0, 0, 1096 false, false, MAX_DBG_RESET_REGS, 0 1097 }; 1098 1099 static struct block_defs block_dbg_defs = { 1100 "dbg", {false, false, false}, false, 0, 1101 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1102 0, 0, 0, 0, 0, 1103 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 1104 }; 1105 1106 static struct block_defs block_nig_defs = { 1107 "nig", {true, true, true}, false, 0, 1108 {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, 1109 NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE, 1110 NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID, 1111 NIG_REG_DBG_FORCE_FRAME, 1112 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0 1113 }; 1114 1115 static struct block_defs block_wol_defs = { 1116 "wol", {false, false, true}, false, 0, 1117 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, 1118 WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE, 1119 WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID, 1120 WOL_REG_DBG_FORCE_FRAME, 1121 true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 1122 }; 1123 1124 static struct block_defs block_bmbn_defs = { 1125 "bmbn", {false, false, true}, false, 0, 1126 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB}, 1127 BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE, 1128 BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID, 1129 BMBN_REG_DBG_FORCE_FRAME, 1130 false, false, MAX_DBG_RESET_REGS, 0 1131 }; 1132 1133 static struct block_defs block_ipc_defs = { 1134 "ipc", {false, false, false}, false, 0, 1135 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1136 0, 0, 0, 0, 0, 1137 true, false, DBG_RESET_REG_MISCS_PL_UA, 8 1138 }; 1139 1140 static struct block_defs block_nwm_defs = { 1141 "nwm", {false, false, true}, false, 0, 1142 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, 1143 NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE, 1144 NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID, 1145 NWM_REG_DBG_FORCE_FRAME, 1146 true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 1147 }; 1148 1149 static struct block_defs block_nws_defs = { 1150 "nws", {false, false, false}, false, 0, 1151 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1152 0, 0, 0, 0, 0, 1153 true, false, DBG_RESET_REG_MISCS_PL_HV, 12 1154 }; 1155 1156 static struct block_defs block_ms_defs = { 1157 "ms", {false, false, false}, false, 0, 1158 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1159 0, 0, 0, 0, 0, 1160 true, false, DBG_RESET_REG_MISCS_PL_HV, 13 1161 }; 1162 1163 static struct block_defs block_phy_pcie_defs = { 1164 "phy_pcie", {false, false, true}, false, 0, 1165 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, 1166 PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE, 1167 PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID, 1168 PCIE_REG_DBG_COMMON_FORCE_FRAME, 1169 false, false, MAX_DBG_RESET_REGS, 0 1170 }; 1171 1172 static struct block_defs block_led_defs = { 1173 "led", {false, false, false}, false, 0, 1174 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1175 0, 0, 0, 0, 0, 1176 true, true, DBG_RESET_REG_MISCS_PL_HV, 14 1177 }; 1178 1179 static struct block_defs block_misc_aeu_defs = { 1180 "misc_aeu", {false, false, false}, false, 0, 1181 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1182 0, 0, 0, 0, 0, 1183 false, false, MAX_DBG_RESET_REGS, 0 1184 }; 1185 1186 static struct block_defs block_bar0_map_defs = { 1187 "bar0_map", {false, false, false}, false, 0, 1188 {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 1189 0, 0, 0, 0, 0, 1190 false, false, MAX_DBG_RESET_REGS, 0 1191 }; 1192 1193 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = { 1194 &block_grc_defs, 1195 &block_miscs_defs, 1196 &block_misc_defs, 1197 &block_dbu_defs, 1198 &block_pglue_b_defs, 1199 &block_cnig_defs, 1200 &block_cpmu_defs, 1201 &block_ncsi_defs, 1202 &block_opte_defs, 1203 &block_bmb_defs, 1204 &block_pcie_defs, 1205 &block_mcp_defs, 1206 &block_mcp2_defs, 1207 &block_pswhst_defs, 1208 &block_pswhst2_defs, 1209 &block_pswrd_defs, 1210 &block_pswrd2_defs, 1211 &block_pswwr_defs, 1212 &block_pswwr2_defs, 1213 &block_pswrq_defs, 1214 &block_pswrq2_defs, 1215 &block_pglcs_defs, 1216 &block_dmae_defs, 1217 &block_ptu_defs, 1218 &block_tcm_defs, 1219 &block_mcm_defs, 1220 &block_ucm_defs, 1221 &block_xcm_defs, 1222 &block_ycm_defs, 1223 &block_pcm_defs, 1224 &block_qm_defs, 1225 &block_tm_defs, 1226 &block_dorq_defs, 1227 &block_brb_defs, 1228 &block_src_defs, 1229 &block_prs_defs, 1230 &block_tsdm_defs, 1231 &block_msdm_defs, 1232 &block_usdm_defs, 1233 &block_xsdm_defs, 1234 &block_ysdm_defs, 1235 &block_psdm_defs, 1236 &block_tsem_defs, 1237 &block_msem_defs, 1238 &block_usem_defs, 1239 &block_xsem_defs, 1240 &block_ysem_defs, 1241 &block_psem_defs, 1242 &block_rss_defs, 1243 &block_tmld_defs, 1244 &block_muld_defs, 1245 &block_yuld_defs, 1246 &block_xyld_defs, 1247 &block_prm_defs, 1248 &block_pbf_pb1_defs, 1249 &block_pbf_pb2_defs, 1250 &block_rpb_defs, 1251 &block_btb_defs, 1252 &block_pbf_defs, 1253 &block_rdif_defs, 1254 &block_tdif_defs, 1255 &block_cdu_defs, 1256 &block_ccfc_defs, 1257 &block_tcfc_defs, 1258 &block_igu_defs, 1259 &block_cau_defs, 1260 &block_umac_defs, 1261 &block_xmac_defs, 1262 &block_dbg_defs, 1263 &block_nig_defs, 1264 &block_wol_defs, 1265 &block_bmbn_defs, 1266 &block_ipc_defs, 1267 &block_nwm_defs, 1268 &block_nws_defs, 1269 &block_ms_defs, 1270 &block_phy_pcie_defs, 1271 &block_led_defs, 1272 &block_misc_aeu_defs, 1273 &block_bar0_map_defs, 1274 }; 1275 1276 static struct platform_defs s_platform_defs[] = { 1277 {"asic", 1}, 1278 {"reserved", 0}, 1279 {"reserved2", 0}, 1280 {"reserved3", 0} 1281 }; 1282 1283 static struct grc_param_defs s_grc_param_defs[] = { 1284 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */ 1285 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */ 1286 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */ 1287 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */ 1288 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */ 1289 {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */ 1290 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */ 1291 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */ 1292 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */ 1293 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */ 1294 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */ 1295 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */ 1296 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */ 1297 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */ 1298 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */ 1299 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */ 1300 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */ 1301 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */ 1302 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */ 1303 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */ 1304 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */ 1305 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */ 1306 {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */ 1307 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */ 1308 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */ 1309 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */ 1310 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */ 1311 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */ 1312 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */ 1313 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */ 1314 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */ 1315 {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */ 1316 {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS, 1317 MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */ 1318 {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS, 1319 MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */ 1320 {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */ 1321 {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */ 1322 {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */ 1323 {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */ 1324 {{1, 1, 1}, 0, 1, false, 0, 1} /* DBG_GRC_PARAM_DUMP_PHY */ 1325 }; 1326 1327 static struct rss_mem_defs s_rss_mem_defs[] = { 1328 { "rss_mem_cid", "rss_cid", 0, 1329 {256, 256, 320}, 1330 {32, 32, 32} }, 1331 { "rss_mem_key_msb", "rss_key", 1024, 1332 {128, 128, 208}, 1333 {256, 256, 256} }, 1334 { "rss_mem_key_lsb", "rss_key", 2048, 1335 {128, 128, 208}, 1336 {64, 64, 64} }, 1337 { "rss_mem_info", "rss_info", 3072, 1338 {128, 128, 208}, 1339 {16, 16, 16} }, 1340 { "rss_mem_ind", "rss_ind", 4096, 1341 {(128 * 128), (128 * 128), (128 * 208)}, 1342 {16, 16, 16} } 1343 }; 1344 1345 static struct vfc_ram_defs s_vfc_ram_defs[] = { 1346 {"vfc_ram_tt1", "vfc_ram", 0, 512}, 1347 {"vfc_ram_mtt2", "vfc_ram", 512, 128}, 1348 {"vfc_ram_stt2", "vfc_ram", 640, 32}, 1349 {"vfc_ram_ro_vect", "vfc_ram", 672, 32} 1350 }; 1351 1352 static struct big_ram_defs s_big_ram_defs[] = { 1353 { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, 1354 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, 1355 {4800, 4800, 5632} }, 1356 { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, 1357 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, 1358 {2880, 2880, 3680} }, 1359 { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, 1360 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, 1361 {1152, 1152, 1152} } 1362 }; 1363 1364 static struct reset_reg_defs s_reset_regs_defs[] = { 1365 { MISCS_REG_RESET_PL_UA, 0x0, 1366 {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */ 1367 { MISCS_REG_RESET_PL_HV, 0x0, 1368 {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */ 1369 { MISCS_REG_RESET_PL_HV_2, 0x0, 1370 {false, false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */ 1371 { MISC_REG_RESET_PL_UA, 0x0, 1372 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_UA */ 1373 { MISC_REG_RESET_PL_HV, 0x0, 1374 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_HV */ 1375 { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040, 1376 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */ 1377 { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007, 1378 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */ 1379 { MISC_REG_RESET_PL_PDA_VAUX, 0x2, 1380 {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */ 1381 }; 1382 1383 static struct phy_defs s_phy_defs[] = { 1384 {"nw_phy", NWS_REG_NWS_CMU, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0, 1385 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8, 1386 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0, 1387 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8}, 1388 {"sgmii_phy", MS_REG_MS_CMU, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132, 1389 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133, 1390 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130, 1391 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131}, 1392 {"pcie_phy0", PHY_PCIE_REG_PHY0, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132, 1393 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133, 1394 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130, 1395 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131}, 1396 {"pcie_phy1", PHY_PCIE_REG_PHY1, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132, 1397 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133, 1398 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130, 1399 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131}, 1400 }; 1401 1402 /**************************** Private Functions ******************************/ 1403 1404 /* Reads and returns a single dword from the specified unaligned buffer */ 1405 static u32 qed_read_unaligned_dword(u8 *buf) 1406 { 1407 u32 dword; 1408 1409 memcpy((u8 *)&dword, buf, sizeof(dword)); 1410 return dword; 1411 } 1412 1413 /* Initializes debug data for the specified device */ 1414 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, 1415 struct qed_ptt *p_ptt) 1416 { 1417 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 1418 1419 if (dev_data->initialized) 1420 return DBG_STATUS_OK; 1421 1422 if (QED_IS_K2(p_hwfn->cdev)) { 1423 dev_data->chip_id = CHIP_K2; 1424 dev_data->mode_enable[MODE_K2] = 1; 1425 } else if (QED_IS_BB_B0(p_hwfn->cdev)) { 1426 dev_data->chip_id = CHIP_BB_B0; 1427 dev_data->mode_enable[MODE_BB_B0] = 1; 1428 } else { 1429 return DBG_STATUS_UNKNOWN_CHIP; 1430 } 1431 1432 dev_data->platform_id = PLATFORM_ASIC; 1433 dev_data->mode_enable[MODE_ASIC] = 1; 1434 dev_data->initialized = true; 1435 return DBG_STATUS_OK; 1436 } 1437 1438 /* Reads the FW info structure for the specified Storm from the chip, 1439 * and writes it to the specified fw_info pointer. 1440 */ 1441 static void qed_read_fw_info(struct qed_hwfn *p_hwfn, 1442 struct qed_ptt *p_ptt, 1443 u8 storm_id, struct fw_info *fw_info) 1444 { 1445 /* Read first the address that points to fw_info location. 1446 * The address is located in the last line of the Storm RAM. 1447 */ 1448 u32 addr = s_storm_defs[storm_id].sem_fast_mem_addr + 1449 SEM_FAST_REG_INT_RAM + 1450 DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - 1451 sizeof(struct fw_info_location); 1452 struct fw_info_location fw_info_location; 1453 u32 *dest = (u32 *)&fw_info_location; 1454 u32 i; 1455 1456 memset(&fw_info_location, 0, sizeof(fw_info_location)); 1457 memset(fw_info, 0, sizeof(*fw_info)); 1458 for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); 1459 i++, addr += BYTES_IN_DWORD) 1460 dest[i] = qed_rd(p_hwfn, p_ptt, addr); 1461 if (fw_info_location.size > 0 && fw_info_location.size <= 1462 sizeof(*fw_info)) { 1463 /* Read FW version info from Storm RAM */ 1464 addr = fw_info_location.grc_addr; 1465 dest = (u32 *)fw_info; 1466 for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size); 1467 i++, addr += BYTES_IN_DWORD) 1468 dest[i] = qed_rd(p_hwfn, p_ptt, addr); 1469 } 1470 } 1471 1472 /* Dumps the specified string to the specified buffer. Returns the dumped size 1473 * in bytes (actual length + 1 for the null character termination). 1474 */ 1475 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str) 1476 { 1477 if (dump) 1478 strcpy(dump_buf, str); 1479 return (u32)strlen(str) + 1; 1480 } 1481 1482 /* Dumps zeros to align the specified buffer to dwords. Returns the dumped size 1483 * in bytes. 1484 */ 1485 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset) 1486 { 1487 u8 offset_in_dword = (u8)(byte_offset & 0x3), align_size; 1488 1489 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0; 1490 1491 if (dump && align_size) 1492 memset(dump_buf, 0, align_size); 1493 return align_size; 1494 } 1495 1496 /* Writes the specified string param to the specified buffer. 1497 * Returns the dumped size in dwords. 1498 */ 1499 static u32 qed_dump_str_param(u32 *dump_buf, 1500 bool dump, 1501 const char *param_name, const char *param_val) 1502 { 1503 char *char_buf = (char *)dump_buf; 1504 u32 offset = 0; 1505 1506 /* Dump param name */ 1507 offset += qed_dump_str(char_buf + offset, dump, param_name); 1508 1509 /* Indicate a string param value */ 1510 if (dump) 1511 *(char_buf + offset) = 1; 1512 offset++; 1513 1514 /* Dump param value */ 1515 offset += qed_dump_str(char_buf + offset, dump, param_val); 1516 1517 /* Align buffer to next dword */ 1518 offset += qed_dump_align(char_buf + offset, dump, offset); 1519 return BYTES_TO_DWORDS(offset); 1520 } 1521 1522 /* Writes the specified numeric param to the specified buffer. 1523 * Returns the dumped size in dwords. 1524 */ 1525 static u32 qed_dump_num_param(u32 *dump_buf, 1526 bool dump, const char *param_name, u32 param_val) 1527 { 1528 char *char_buf = (char *)dump_buf; 1529 u32 offset = 0; 1530 1531 /* Dump param name */ 1532 offset += qed_dump_str(char_buf + offset, dump, param_name); 1533 1534 /* Indicate a numeric param value */ 1535 if (dump) 1536 *(char_buf + offset) = 0; 1537 offset++; 1538 1539 /* Align buffer to next dword */ 1540 offset += qed_dump_align(char_buf + offset, dump, offset); 1541 1542 /* Dump param value (and change offset from bytes to dwords) */ 1543 offset = BYTES_TO_DWORDS(offset); 1544 if (dump) 1545 *(dump_buf + offset) = param_val; 1546 offset++; 1547 return offset; 1548 } 1549 1550 /* Reads the FW version and writes it as a param to the specified buffer. 1551 * Returns the dumped size in dwords. 1552 */ 1553 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, 1554 struct qed_ptt *p_ptt, 1555 u32 *dump_buf, bool dump) 1556 { 1557 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 1558 char fw_ver_str[16] = EMPTY_FW_VERSION_STR; 1559 char fw_img_str[16] = EMPTY_FW_IMAGE_STR; 1560 struct fw_info fw_info = { {0}, {0} }; 1561 int printed_chars; 1562 u32 offset = 0; 1563 1564 if (dump) { 1565 /* Read FW image/version from PRAM in a non-reset SEMI */ 1566 bool found = false; 1567 u8 storm_id; 1568 1569 for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; 1570 storm_id++) { 1571 /* Read FW version/image */ 1572 if (!dev_data->block_in_reset 1573 [s_storm_defs[storm_id].block_id]) { 1574 /* read FW info for the current Storm */ 1575 qed_read_fw_info(p_hwfn, 1576 p_ptt, storm_id, &fw_info); 1577 1578 /* Create FW version/image strings */ 1579 printed_chars = 1580 snprintf(fw_ver_str, 1581 sizeof(fw_ver_str), 1582 "%d_%d_%d_%d", 1583 fw_info.ver.num.major, 1584 fw_info.ver.num.minor, 1585 fw_info.ver.num.rev, 1586 fw_info.ver.num.eng); 1587 if (printed_chars < 0 || printed_chars >= 1588 sizeof(fw_ver_str)) 1589 DP_NOTICE(p_hwfn, 1590 "Unexpected debug error: invalid FW version string\n"); 1591 switch (fw_info.ver.image_id) { 1592 case FW_IMG_MAIN: 1593 strcpy(fw_img_str, "main"); 1594 break; 1595 default: 1596 strcpy(fw_img_str, "unknown"); 1597 break; 1598 } 1599 1600 found = true; 1601 } 1602 } 1603 } 1604 1605 /* Dump FW version, image and timestamp */ 1606 offset += qed_dump_str_param(dump_buf + offset, 1607 dump, "fw-version", fw_ver_str); 1608 offset += qed_dump_str_param(dump_buf + offset, 1609 dump, "fw-image", fw_img_str); 1610 offset += qed_dump_num_param(dump_buf + offset, 1611 dump, 1612 "fw-timestamp", fw_info.ver.timestamp); 1613 return offset; 1614 } 1615 1616 /* Reads the MFW version and writes it as a param to the specified buffer. 1617 * Returns the dumped size in dwords. 1618 */ 1619 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn, 1620 struct qed_ptt *p_ptt, 1621 u32 *dump_buf, bool dump) 1622 { 1623 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR; 1624 1625 if (dump) { 1626 u32 global_section_offsize, global_section_addr, mfw_ver; 1627 u32 public_data_addr, global_section_offsize_addr; 1628 int printed_chars; 1629 1630 /* Find MCP public data GRC address. 1631 * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug. 1632 */ 1633 public_data_addr = qed_rd(p_hwfn, p_ptt, 1634 MISC_REG_SHARED_MEM_ADDR) | 1635 MCP_REG_SCRATCH; 1636 1637 /* Find MCP public global section offset */ 1638 global_section_offsize_addr = public_data_addr + 1639 offsetof(struct mcp_public_data, 1640 sections) + 1641 sizeof(offsize_t) * PUBLIC_GLOBAL; 1642 global_section_offsize = qed_rd(p_hwfn, p_ptt, 1643 global_section_offsize_addr); 1644 global_section_addr = MCP_REG_SCRATCH + 1645 (global_section_offsize & 1646 OFFSIZE_OFFSET_MASK) * 4; 1647 1648 /* Read MFW version from MCP public global section */ 1649 mfw_ver = qed_rd(p_hwfn, p_ptt, 1650 global_section_addr + 1651 offsetof(struct public_global, mfw_ver)); 1652 1653 /* Dump MFW version param */ 1654 printed_chars = snprintf(mfw_ver_str, sizeof(mfw_ver_str), 1655 "%d_%d_%d_%d", 1656 (u8) (mfw_ver >> 24), 1657 (u8) (mfw_ver >> 16), 1658 (u8) (mfw_ver >> 8), 1659 (u8) mfw_ver); 1660 if (printed_chars < 0 || printed_chars >= sizeof(mfw_ver_str)) 1661 DP_NOTICE(p_hwfn, 1662 "Unexpected debug error: invalid MFW version string\n"); 1663 } 1664 1665 return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str); 1666 } 1667 1668 /* Writes a section header to the specified buffer. 1669 * Returns the dumped size in dwords. 1670 */ 1671 static u32 qed_dump_section_hdr(u32 *dump_buf, 1672 bool dump, const char *name, u32 num_params) 1673 { 1674 return qed_dump_num_param(dump_buf, dump, name, num_params); 1675 } 1676 1677 /* Writes the common global params to the specified buffer. 1678 * Returns the dumped size in dwords. 1679 */ 1680 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, 1681 struct qed_ptt *p_ptt, 1682 u32 *dump_buf, 1683 bool dump, 1684 u8 num_specific_global_params) 1685 { 1686 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 1687 u32 offset = 0; 1688 1689 /* Find platform string and dump global params section header */ 1690 offset += qed_dump_section_hdr(dump_buf + offset, 1691 dump, 1692 "global_params", 1693 NUM_COMMON_GLOBAL_PARAMS + 1694 num_specific_global_params); 1695 1696 /* Store params */ 1697 offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump); 1698 offset += qed_dump_mfw_ver_param(p_hwfn, 1699 p_ptt, dump_buf + offset, dump); 1700 offset += qed_dump_num_param(dump_buf + offset, 1701 dump, "tools-version", TOOLS_VERSION); 1702 offset += qed_dump_str_param(dump_buf + offset, 1703 dump, 1704 "chip", 1705 s_chip_defs[dev_data->chip_id].name); 1706 offset += qed_dump_str_param(dump_buf + offset, 1707 dump, 1708 "platform", 1709 s_platform_defs[dev_data->platform_id]. 1710 name); 1711 offset += 1712 qed_dump_num_param(dump_buf + offset, dump, "pci-func", 1713 p_hwfn->abs_pf_id); 1714 return offset; 1715 } 1716 1717 /* Writes the last section to the specified buffer at the given offset. 1718 * Returns the dumped size in dwords. 1719 */ 1720 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump) 1721 { 1722 u32 start_offset = offset, crc = ~0; 1723 1724 /* Dump CRC section header */ 1725 offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0); 1726 1727 /* Calculate CRC32 and add it to the dword following the "last" section. 1728 */ 1729 if (dump) 1730 *(dump_buf + offset) = ~crc32(crc, (u8 *)dump_buf, 1731 DWORDS_TO_BYTES(offset)); 1732 offset++; 1733 return offset - start_offset; 1734 } 1735 1736 /* Update blocks reset state */ 1737 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn, 1738 struct qed_ptt *p_ptt) 1739 { 1740 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 1741 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 }; 1742 u32 i; 1743 1744 /* Read reset registers */ 1745 for (i = 0; i < MAX_DBG_RESET_REGS; i++) 1746 if (s_reset_regs_defs[i].exists[dev_data->chip_id]) 1747 reg_val[i] = qed_rd(p_hwfn, 1748 p_ptt, s_reset_regs_defs[i].addr); 1749 1750 /* Check if blocks are in reset */ 1751 for (i = 0; i < MAX_BLOCK_ID; i++) 1752 dev_data->block_in_reset[i] = 1753 s_block_defs[i]->has_reset_bit && 1754 !(reg_val[s_block_defs[i]->reset_reg] & 1755 BIT(s_block_defs[i]->reset_bit_offset)); 1756 } 1757 1758 /* Enable / disable the Debug block */ 1759 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn, 1760 struct qed_ptt *p_ptt, bool enable) 1761 { 1762 qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0); 1763 } 1764 1765 /* Resets the Debug block */ 1766 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn, 1767 struct qed_ptt *p_ptt) 1768 { 1769 u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val; 1770 1771 dbg_reset_reg_addr = 1772 s_reset_regs_defs[s_block_defs[BLOCK_DBG]->reset_reg].addr; 1773 old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr); 1774 new_reset_reg_val = old_reset_reg_val & 1775 ~BIT(s_block_defs[BLOCK_DBG]->reset_bit_offset); 1776 1777 qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val); 1778 qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val); 1779 } 1780 1781 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn, 1782 struct qed_ptt *p_ptt, 1783 enum dbg_bus_frame_modes mode) 1784 { 1785 qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode); 1786 } 1787 1788 /* Enable / disable Debug Bus clients according to the specified mask. 1789 * (1 = enable, 0 = disable) 1790 */ 1791 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn, 1792 struct qed_ptt *p_ptt, u32 client_mask) 1793 { 1794 qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask); 1795 } 1796 1797 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset) 1798 { 1799 const u32 *ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr; 1800 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 1801 u8 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++]; 1802 bool arg1, arg2; 1803 1804 switch (tree_val) { 1805 case INIT_MODE_OP_NOT: 1806 return !qed_is_mode_match(p_hwfn, modes_buf_offset); 1807 case INIT_MODE_OP_OR: 1808 case INIT_MODE_OP_AND: 1809 arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset); 1810 arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset); 1811 return (tree_val == INIT_MODE_OP_OR) ? (arg1 || 1812 arg2) : (arg1 && arg2); 1813 default: 1814 return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0; 1815 } 1816 } 1817 1818 /* Returns the value of the specified GRC param */ 1819 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn, 1820 enum dbg_grc_params grc_param) 1821 { 1822 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 1823 1824 return dev_data->grc.param_val[grc_param]; 1825 } 1826 1827 /* Clear all GRC params */ 1828 static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn) 1829 { 1830 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 1831 u32 i; 1832 1833 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) 1834 dev_data->grc.param_set_by_user[i] = 0; 1835 } 1836 1837 /* Assign default GRC param values */ 1838 static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn) 1839 { 1840 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 1841 u32 i; 1842 1843 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) 1844 if (!dev_data->grc.param_set_by_user[i]) 1845 dev_data->grc.param_val[i] = 1846 s_grc_param_defs[i].default_val[dev_data->chip_id]; 1847 } 1848 1849 /* Returns true if the specified entity (indicated by GRC param) should be 1850 * included in the dump, false otherwise. 1851 */ 1852 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn, 1853 enum dbg_grc_params grc_param) 1854 { 1855 return qed_grc_get_param(p_hwfn, grc_param) > 0; 1856 } 1857 1858 /* Returns true of the specified Storm should be included in the dump, false 1859 * otherwise. 1860 */ 1861 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn, 1862 enum dbg_storms storm) 1863 { 1864 return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0; 1865 } 1866 1867 /* Returns true if the specified memory should be included in the dump, false 1868 * otherwise. 1869 */ 1870 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn, 1871 enum block_id block_id, u8 mem_group_id) 1872 { 1873 u8 i; 1874 1875 /* Check Storm match */ 1876 if (s_block_defs[block_id]->associated_to_storm && 1877 !qed_grc_is_storm_included(p_hwfn, 1878 (enum dbg_storms)s_block_defs[block_id]->storm_id)) 1879 return false; 1880 1881 for (i = 0; i < NUM_BIG_RAM_TYPES; i++) 1882 if (mem_group_id == s_big_ram_defs[i].mem_group_id || 1883 mem_group_id == s_big_ram_defs[i].ram_mem_group_id) 1884 return qed_grc_is_included(p_hwfn, 1885 s_big_ram_defs[i].grc_param); 1886 if (mem_group_id == MEM_GROUP_PXP_ILT || mem_group_id == 1887 MEM_GROUP_PXP_MEM) 1888 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP); 1889 if (mem_group_id == MEM_GROUP_RAM) 1890 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM); 1891 if (mem_group_id == MEM_GROUP_PBUF) 1892 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF); 1893 if (mem_group_id == MEM_GROUP_CAU_MEM || 1894 mem_group_id == MEM_GROUP_CAU_SB || 1895 mem_group_id == MEM_GROUP_CAU_PI) 1896 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU); 1897 if (mem_group_id == MEM_GROUP_QM_MEM) 1898 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM); 1899 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM || 1900 mem_group_id == MEM_GROUP_TASK_CFC_MEM) 1901 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC); 1902 if (mem_group_id == MEM_GROUP_IGU_MEM || mem_group_id == 1903 MEM_GROUP_IGU_MSIX) 1904 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU); 1905 if (mem_group_id == MEM_GROUP_MULD_MEM) 1906 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD); 1907 if (mem_group_id == MEM_GROUP_PRS_MEM) 1908 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS); 1909 if (mem_group_id == MEM_GROUP_DMAE_MEM) 1910 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE); 1911 if (mem_group_id == MEM_GROUP_TM_MEM) 1912 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM); 1913 if (mem_group_id == MEM_GROUP_SDM_MEM) 1914 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM); 1915 if (mem_group_id == MEM_GROUP_TDIF_CTX || mem_group_id == 1916 MEM_GROUP_RDIF_CTX) 1917 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF); 1918 if (mem_group_id == MEM_GROUP_CM_MEM) 1919 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM); 1920 if (mem_group_id == MEM_GROUP_IOR) 1921 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR); 1922 1923 return true; 1924 } 1925 1926 /* Stalls all Storms */ 1927 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn, 1928 struct qed_ptt *p_ptt, bool stall) 1929 { 1930 u8 reg_val = stall ? 1 : 0; 1931 u8 storm_id; 1932 1933 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { 1934 if (qed_grc_is_storm_included(p_hwfn, 1935 (enum dbg_storms)storm_id)) { 1936 u32 reg_addr = 1937 s_storm_defs[storm_id].sem_fast_mem_addr + 1938 SEM_FAST_REG_STALL_0; 1939 1940 qed_wr(p_hwfn, p_ptt, reg_addr, reg_val); 1941 } 1942 } 1943 1944 msleep(STALL_DELAY_MS); 1945 } 1946 1947 /* Takes all blocks out of reset */ 1948 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn, 1949 struct qed_ptt *p_ptt) 1950 { 1951 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 1952 u32 reg_val[MAX_DBG_RESET_REGS] = { 0 }; 1953 u32 i; 1954 1955 /* Fill reset regs values */ 1956 for (i = 0; i < MAX_BLOCK_ID; i++) 1957 if (s_block_defs[i]->has_reset_bit && s_block_defs[i]->unreset) 1958 reg_val[s_block_defs[i]->reset_reg] |= 1959 BIT(s_block_defs[i]->reset_bit_offset); 1960 1961 /* Write reset registers */ 1962 for (i = 0; i < MAX_DBG_RESET_REGS; i++) { 1963 if (s_reset_regs_defs[i].exists[dev_data->chip_id]) { 1964 reg_val[i] |= s_reset_regs_defs[i].unreset_val; 1965 if (reg_val[i]) 1966 qed_wr(p_hwfn, 1967 p_ptt, 1968 s_reset_regs_defs[i].addr + 1969 RESET_REG_UNRESET_OFFSET, reg_val[i]); 1970 } 1971 } 1972 } 1973 1974 /* Returns the attention name offsets of the specified block */ 1975 static const struct dbg_attn_block_type_data * 1976 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type) 1977 { 1978 const struct dbg_attn_block *base_attn_block_arr = 1979 (const struct dbg_attn_block *) 1980 s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr; 1981 1982 return &base_attn_block_arr[block_id].per_type_data[attn_type]; 1983 } 1984 1985 /* Returns the attention registers of the specified block */ 1986 static const struct dbg_attn_reg * 1987 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type, 1988 u8 *num_attn_regs) 1989 { 1990 const struct dbg_attn_block_type_data *block_type_data = 1991 qed_get_block_attn_data(block_id, attn_type); 1992 1993 *num_attn_regs = block_type_data->num_regs; 1994 return &((const struct dbg_attn_reg *) 1995 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data-> 1996 regs_offset]; 1997 } 1998 1999 /* For each block, clear the status of all parities */ 2000 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, 2001 struct qed_ptt *p_ptt) 2002 { 2003 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 2004 u8 reg_idx, num_attn_regs; 2005 u32 block_id; 2006 2007 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { 2008 const struct dbg_attn_reg *attn_reg_arr; 2009 2010 if (dev_data->block_in_reset[block_id]) 2011 continue; 2012 2013 attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id, 2014 ATTN_TYPE_PARITY, 2015 &num_attn_regs); 2016 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) { 2017 const struct dbg_attn_reg *reg_data = 2018 &attn_reg_arr[reg_idx]; 2019 2020 /* Check mode */ 2021 bool eval_mode = GET_FIELD(reg_data->mode.data, 2022 DBG_MODE_HDR_EVAL_MODE) > 0; 2023 u16 modes_buf_offset = 2024 GET_FIELD(reg_data->mode.data, 2025 DBG_MODE_HDR_MODES_BUF_OFFSET); 2026 2027 if (!eval_mode || 2028 qed_is_mode_match(p_hwfn, &modes_buf_offset)) 2029 /* Mode match - read parity status read-clear 2030 * register. 2031 */ 2032 qed_rd(p_hwfn, p_ptt, 2033 DWORDS_TO_BYTES(reg_data-> 2034 sts_clr_address)); 2035 } 2036 } 2037 } 2038 2039 /* Dumps GRC registers section header. Returns the dumped size in dwords. 2040 * The following parameters are dumped: 2041 * - 'count' = num_dumped_entries 2042 * - 'split' = split_type 2043 * - 'id'i = split_id (dumped only if split_id >= 0) 2044 * - 'param_name' = param_val (user param, dumped only if param_name != NULL and 2045 * param_val != NULL) 2046 */ 2047 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, 2048 bool dump, 2049 u32 num_reg_entries, 2050 const char *split_type, 2051 int split_id, 2052 const char *param_name, const char *param_val) 2053 { 2054 u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0); 2055 u32 offset = 0; 2056 2057 offset += qed_dump_section_hdr(dump_buf + offset, 2058 dump, "grc_regs", num_params); 2059 offset += qed_dump_num_param(dump_buf + offset, 2060 dump, "count", num_reg_entries); 2061 offset += qed_dump_str_param(dump_buf + offset, 2062 dump, "split", split_type); 2063 if (split_id >= 0) 2064 offset += qed_dump_num_param(dump_buf + offset, 2065 dump, "id", split_id); 2066 if (param_name && param_val) 2067 offset += qed_dump_str_param(dump_buf + offset, 2068 dump, param_name, param_val); 2069 return offset; 2070 } 2071 2072 /* Dumps GRC register/memory. Returns the dumped size in dwords. */ 2073 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, 2074 struct qed_ptt *p_ptt, u32 *dump_buf, 2075 bool dump, u32 addr, u32 len) 2076 { 2077 u32 offset = 0, i; 2078 2079 if (dump) { 2080 *(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT); 2081 for (i = 0; i < len; i++, addr++, offset++) 2082 *(dump_buf + offset) = qed_rd(p_hwfn, 2083 p_ptt, 2084 DWORDS_TO_BYTES(addr)); 2085 } else { 2086 offset += len + 1; 2087 } 2088 2089 return offset; 2090 } 2091 2092 /* Dumps GRC registers entries. Returns the dumped size in dwords. */ 2093 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, 2094 struct qed_ptt *p_ptt, 2095 struct dbg_array input_regs_arr, 2096 u32 *dump_buf, 2097 bool dump, 2098 bool block_enable[MAX_BLOCK_ID], 2099 u32 *num_dumped_reg_entries) 2100 { 2101 u32 i, offset = 0, input_offset = 0; 2102 bool mode_match = true; 2103 2104 *num_dumped_reg_entries = 0; 2105 while (input_offset < input_regs_arr.size_in_dwords) { 2106 const struct dbg_dump_cond_hdr *cond_hdr = 2107 (const struct dbg_dump_cond_hdr *) 2108 &input_regs_arr.ptr[input_offset++]; 2109 bool eval_mode = GET_FIELD(cond_hdr->mode.data, 2110 DBG_MODE_HDR_EVAL_MODE) > 0; 2111 2112 /* Check mode/block */ 2113 if (eval_mode) { 2114 u16 modes_buf_offset = 2115 GET_FIELD(cond_hdr->mode.data, 2116 DBG_MODE_HDR_MODES_BUF_OFFSET); 2117 mode_match = qed_is_mode_match(p_hwfn, 2118 &modes_buf_offset); 2119 } 2120 2121 if (mode_match && block_enable[cond_hdr->block_id]) { 2122 for (i = 0; i < cond_hdr->data_size; 2123 i++, input_offset++) { 2124 const struct dbg_dump_reg *reg = 2125 (const struct dbg_dump_reg *) 2126 &input_regs_arr.ptr[input_offset]; 2127 2128 offset += 2129 qed_grc_dump_reg_entry(p_hwfn, p_ptt, 2130 dump_buf + offset, dump, 2131 GET_FIELD(reg->data, 2132 DBG_DUMP_REG_ADDRESS), 2133 GET_FIELD(reg->data, 2134 DBG_DUMP_REG_LENGTH)); 2135 (*num_dumped_reg_entries)++; 2136 } 2137 } else { 2138 input_offset += cond_hdr->data_size; 2139 } 2140 } 2141 2142 return offset; 2143 } 2144 2145 /* Dumps GRC registers entries. Returns the dumped size in dwords. */ 2146 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, 2147 struct qed_ptt *p_ptt, 2148 struct dbg_array input_regs_arr, 2149 u32 *dump_buf, 2150 bool dump, 2151 bool block_enable[MAX_BLOCK_ID], 2152 const char *split_type_name, 2153 u32 split_id, 2154 const char *param_name, 2155 const char *param_val) 2156 { 2157 u32 num_dumped_reg_entries, offset; 2158 2159 /* Calculate register dump header size (and skip it for now) */ 2160 offset = qed_grc_dump_regs_hdr(dump_buf, 2161 false, 2162 0, 2163 split_type_name, 2164 split_id, param_name, param_val); 2165 2166 /* Dump registers */ 2167 offset += qed_grc_dump_regs_entries(p_hwfn, 2168 p_ptt, 2169 input_regs_arr, 2170 dump_buf + offset, 2171 dump, 2172 block_enable, 2173 &num_dumped_reg_entries); 2174 2175 /* Write register dump header */ 2176 if (dump && num_dumped_reg_entries > 0) 2177 qed_grc_dump_regs_hdr(dump_buf, 2178 dump, 2179 num_dumped_reg_entries, 2180 split_type_name, 2181 split_id, param_name, param_val); 2182 2183 return num_dumped_reg_entries > 0 ? offset : 0; 2184 } 2185 2186 /* Dumps registers according to the input registers array. 2187 * Returns the dumped size in dwords. 2188 */ 2189 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, 2190 struct qed_ptt *p_ptt, 2191 u32 *dump_buf, 2192 bool dump, 2193 bool block_enable[MAX_BLOCK_ID], 2194 const char *param_name, const char *param_val) 2195 { 2196 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 2197 u32 offset = 0, input_offset = 0; 2198 u8 port_id, pf_id; 2199 2200 if (dump) 2201 DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n"); 2202 while (input_offset < 2203 s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) { 2204 const struct dbg_dump_split_hdr *split_hdr = 2205 (const struct dbg_dump_split_hdr *) 2206 &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++]; 2207 u8 split_type_id = GET_FIELD(split_hdr->hdr, 2208 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); 2209 u32 split_data_size = GET_FIELD(split_hdr->hdr, 2210 DBG_DUMP_SPLIT_HDR_DATA_SIZE); 2211 struct dbg_array curr_input_regs_arr = { 2212 &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset], 2213 split_data_size}; 2214 2215 switch (split_type_id) { 2216 case SPLIT_TYPE_NONE: 2217 case SPLIT_TYPE_VF: 2218 offset += qed_grc_dump_split_data(p_hwfn, 2219 p_ptt, 2220 curr_input_regs_arr, 2221 dump_buf + offset, 2222 dump, 2223 block_enable, 2224 "eng", 2225 (u32)(-1), 2226 param_name, 2227 param_val); 2228 break; 2229 case SPLIT_TYPE_PORT: 2230 for (port_id = 0; 2231 port_id < 2232 s_chip_defs[dev_data->chip_id]. 2233 per_platform[dev_data->platform_id].num_ports; 2234 port_id++) { 2235 if (dump) 2236 qed_port_pretend(p_hwfn, p_ptt, 2237 port_id); 2238 offset += 2239 qed_grc_dump_split_data(p_hwfn, p_ptt, 2240 curr_input_regs_arr, 2241 dump_buf + offset, 2242 dump, block_enable, 2243 "port", port_id, 2244 param_name, 2245 param_val); 2246 } 2247 break; 2248 case SPLIT_TYPE_PF: 2249 case SPLIT_TYPE_PORT_PF: 2250 for (pf_id = 0; 2251 pf_id < 2252 s_chip_defs[dev_data->chip_id]. 2253 per_platform[dev_data->platform_id].num_pfs; 2254 pf_id++) { 2255 if (dump) 2256 qed_fid_pretend(p_hwfn, p_ptt, pf_id); 2257 offset += qed_grc_dump_split_data(p_hwfn, 2258 p_ptt, 2259 curr_input_regs_arr, 2260 dump_buf + offset, 2261 dump, block_enable, 2262 "pf", pf_id, param_name, 2263 param_val); 2264 } 2265 break; 2266 default: 2267 break; 2268 } 2269 2270 input_offset += split_data_size; 2271 } 2272 2273 /* Pretend to original PF */ 2274 if (dump) 2275 qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 2276 return offset; 2277 } 2278 2279 /* Dump reset registers. Returns the dumped size in dwords. */ 2280 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, 2281 struct qed_ptt *p_ptt, 2282 u32 *dump_buf, bool dump) 2283 { 2284 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 2285 u32 i, offset = 0, num_regs = 0; 2286 2287 /* Calculate header size */ 2288 offset += qed_grc_dump_regs_hdr(dump_buf, 2289 false, 0, "eng", -1, NULL, NULL); 2290 2291 /* Write reset registers */ 2292 for (i = 0; i < MAX_DBG_RESET_REGS; i++) { 2293 if (s_reset_regs_defs[i].exists[dev_data->chip_id]) { 2294 offset += qed_grc_dump_reg_entry(p_hwfn, 2295 p_ptt, 2296 dump_buf + offset, 2297 dump, 2298 BYTES_TO_DWORDS 2299 (s_reset_regs_defs 2300 [i].addr), 1); 2301 num_regs++; 2302 } 2303 } 2304 2305 /* Write header */ 2306 if (dump) 2307 qed_grc_dump_regs_hdr(dump_buf, 2308 true, num_regs, "eng", -1, NULL, NULL); 2309 return offset; 2310 } 2311 2312 /* Dump registers that are modified during GRC Dump and therefore must be dumped 2313 * first. Returns the dumped size in dwords. 2314 */ 2315 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, 2316 struct qed_ptt *p_ptt, 2317 u32 *dump_buf, bool dump) 2318 { 2319 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 2320 u32 offset = 0, num_reg_entries = 0, block_id; 2321 u8 storm_id, reg_idx, num_attn_regs; 2322 2323 /* Calculate header size */ 2324 offset += qed_grc_dump_regs_hdr(dump_buf, 2325 false, 0, "eng", -1, NULL, NULL); 2326 2327 /* Write parity registers */ 2328 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { 2329 const struct dbg_attn_reg *attn_reg_arr; 2330 2331 if (dev_data->block_in_reset[block_id] && dump) 2332 continue; 2333 2334 attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id, 2335 ATTN_TYPE_PARITY, 2336 &num_attn_regs); 2337 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) { 2338 const struct dbg_attn_reg *reg_data = 2339 &attn_reg_arr[reg_idx]; 2340 u16 modes_buf_offset; 2341 bool eval_mode; 2342 2343 /* Check mode */ 2344 eval_mode = GET_FIELD(reg_data->mode.data, 2345 DBG_MODE_HDR_EVAL_MODE) > 0; 2346 modes_buf_offset = 2347 GET_FIELD(reg_data->mode.data, 2348 DBG_MODE_HDR_MODES_BUF_OFFSET); 2349 if (!eval_mode || 2350 qed_is_mode_match(p_hwfn, &modes_buf_offset)) { 2351 /* Mode match - read and dump registers */ 2352 offset += qed_grc_dump_reg_entry(p_hwfn, 2353 p_ptt, 2354 dump_buf + offset, 2355 dump, 2356 reg_data->mask_address, 2357 1); 2358 offset += qed_grc_dump_reg_entry(p_hwfn, 2359 p_ptt, 2360 dump_buf + offset, 2361 dump, 2362 GET_FIELD(reg_data->data, 2363 DBG_ATTN_REG_STS_ADDRESS), 2364 1); 2365 num_reg_entries += 2; 2366 } 2367 } 2368 } 2369 2370 /* Write storm stall status registers */ 2371 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { 2372 if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] && 2373 dump) 2374 continue; 2375 2376 offset += qed_grc_dump_reg_entry(p_hwfn, 2377 p_ptt, 2378 dump_buf + offset, 2379 dump, 2380 BYTES_TO_DWORDS(s_storm_defs[storm_id]. 2381 sem_fast_mem_addr + 2382 SEM_FAST_REG_STALLED), 2383 1); 2384 num_reg_entries++; 2385 } 2386 2387 /* Write header */ 2388 if (dump) 2389 qed_grc_dump_regs_hdr(dump_buf, 2390 true, 2391 num_reg_entries, "eng", -1, NULL, NULL); 2392 return offset; 2393 } 2394 2395 /* Dumps a GRC memory header (section and params). 2396 * The following parameters are dumped: 2397 * name - name is dumped only if it's not NULL. 2398 * addr - byte_addr is dumped only if name is NULL. 2399 * len - dword_len is always dumped. 2400 * width - bit_width is dumped if it's not zero. 2401 * packed - packed=1 is dumped if it's not false. 2402 * mem_group - mem_group is always dumped. 2403 * is_storm - true only if the memory is related to a Storm. 2404 * storm_letter - storm letter (valid only if is_storm is true). 2405 * Returns the dumped size in dwords. 2406 */ 2407 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, 2408 u32 *dump_buf, 2409 bool dump, 2410 const char *name, 2411 u32 byte_addr, 2412 u32 dword_len, 2413 u32 bit_width, 2414 bool packed, 2415 const char *mem_group, 2416 bool is_storm, char storm_letter) 2417 { 2418 u8 num_params = 3; 2419 u32 offset = 0; 2420 char buf[64]; 2421 2422 if (!dword_len) 2423 DP_NOTICE(p_hwfn, 2424 "Unexpected GRC Dump error: dumped memory size must be non-zero\n"); 2425 if (bit_width) 2426 num_params++; 2427 if (packed) 2428 num_params++; 2429 2430 /* Dump section header */ 2431 offset += qed_dump_section_hdr(dump_buf + offset, 2432 dump, "grc_mem", num_params); 2433 if (name) { 2434 /* Dump name */ 2435 if (is_storm) { 2436 strcpy(buf, "?STORM_"); 2437 buf[0] = storm_letter; 2438 strcpy(buf + strlen(buf), name); 2439 } else { 2440 strcpy(buf, name); 2441 } 2442 2443 offset += qed_dump_str_param(dump_buf + offset, 2444 dump, "name", buf); 2445 if (dump) 2446 DP_VERBOSE(p_hwfn, 2447 QED_MSG_DEBUG, 2448 "Dumping %d registers from %s...\n", 2449 dword_len, buf); 2450 } else { 2451 /* Dump address */ 2452 offset += qed_dump_num_param(dump_buf + offset, 2453 dump, "addr", byte_addr); 2454 if (dump && dword_len > 64) 2455 DP_VERBOSE(p_hwfn, 2456 QED_MSG_DEBUG, 2457 "Dumping %d registers from address 0x%x...\n", 2458 dword_len, byte_addr); 2459 } 2460 2461 /* Dump len */ 2462 offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len); 2463 2464 /* Dump bit width */ 2465 if (bit_width) 2466 offset += qed_dump_num_param(dump_buf + offset, 2467 dump, "width", bit_width); 2468 2469 /* Dump packed */ 2470 if (packed) 2471 offset += qed_dump_num_param(dump_buf + offset, 2472 dump, "packed", 1); 2473 2474 /* Dump reg type */ 2475 if (is_storm) { 2476 strcpy(buf, "?STORM_"); 2477 buf[0] = storm_letter; 2478 strcpy(buf + strlen(buf), mem_group); 2479 } else { 2480 strcpy(buf, mem_group); 2481 } 2482 2483 offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf); 2484 return offset; 2485 } 2486 2487 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address. 2488 * Returns the dumped size in dwords. 2489 */ 2490 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, 2491 struct qed_ptt *p_ptt, 2492 u32 *dump_buf, 2493 bool dump, 2494 const char *name, 2495 u32 byte_addr, 2496 u32 dword_len, 2497 u32 bit_width, 2498 bool packed, 2499 const char *mem_group, 2500 bool is_storm, char storm_letter) 2501 { 2502 u32 offset = 0; 2503 2504 offset += qed_grc_dump_mem_hdr(p_hwfn, 2505 dump_buf + offset, 2506 dump, 2507 name, 2508 byte_addr, 2509 dword_len, 2510 bit_width, 2511 packed, 2512 mem_group, is_storm, storm_letter); 2513 if (dump) { 2514 u32 i; 2515 2516 for (i = 0; i < dword_len; 2517 i++, byte_addr += BYTES_IN_DWORD, offset++) 2518 *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr); 2519 } else { 2520 offset += dword_len; 2521 } 2522 2523 return offset; 2524 } 2525 2526 /* Dumps GRC memories entries. Returns the dumped size in dwords. */ 2527 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn, 2528 struct qed_ptt *p_ptt, 2529 struct dbg_array input_mems_arr, 2530 u32 *dump_buf, bool dump) 2531 { 2532 u32 i, offset = 0, input_offset = 0; 2533 bool mode_match = true; 2534 2535 while (input_offset < input_mems_arr.size_in_dwords) { 2536 const struct dbg_dump_cond_hdr *cond_hdr; 2537 u32 num_entries; 2538 bool eval_mode; 2539 2540 cond_hdr = (const struct dbg_dump_cond_hdr *) 2541 &input_mems_arr.ptr[input_offset++]; 2542 eval_mode = GET_FIELD(cond_hdr->mode.data, 2543 DBG_MODE_HDR_EVAL_MODE) > 0; 2544 2545 /* Check required mode */ 2546 if (eval_mode) { 2547 u16 modes_buf_offset = 2548 GET_FIELD(cond_hdr->mode.data, 2549 DBG_MODE_HDR_MODES_BUF_OFFSET); 2550 2551 mode_match = qed_is_mode_match(p_hwfn, 2552 &modes_buf_offset); 2553 } 2554 2555 if (!mode_match) { 2556 input_offset += cond_hdr->data_size; 2557 continue; 2558 } 2559 2560 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS; 2561 for (i = 0; i < num_entries; 2562 i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) { 2563 const struct dbg_dump_mem *mem = 2564 (const struct dbg_dump_mem *) 2565 &input_mems_arr.ptr[input_offset]; 2566 u8 mem_group_id; 2567 2568 mem_group_id = GET_FIELD(mem->dword0, 2569 DBG_DUMP_MEM_MEM_GROUP_ID); 2570 if (mem_group_id >= MEM_GROUPS_NUM) { 2571 DP_NOTICE(p_hwfn, "Invalid mem_group_id\n"); 2572 return 0; 2573 } 2574 2575 if (qed_grc_is_mem_included(p_hwfn, 2576 (enum block_id)cond_hdr->block_id, 2577 mem_group_id)) { 2578 u32 mem_byte_addr = 2579 DWORDS_TO_BYTES(GET_FIELD(mem->dword0, 2580 DBG_DUMP_MEM_ADDRESS)); 2581 u32 mem_len = GET_FIELD(mem->dword1, 2582 DBG_DUMP_MEM_LENGTH); 2583 char storm_letter = 'a'; 2584 bool is_storm = false; 2585 2586 /* Update memory length for CCFC/TCFC memories 2587 * according to number of LCIDs/LTIDs. 2588 */ 2589 if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) 2590 mem_len = qed_grc_get_param(p_hwfn, 2591 DBG_GRC_PARAM_NUM_LCIDS) 2592 * (mem_len / MAX_LCIDS); 2593 else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) 2594 mem_len = qed_grc_get_param(p_hwfn, 2595 DBG_GRC_PARAM_NUM_LTIDS) 2596 * (mem_len / MAX_LTIDS); 2597 2598 /* If memory is associated with Storm, update 2599 * Storm details. 2600 */ 2601 if (s_block_defs[cond_hdr->block_id]-> 2602 associated_to_storm) { 2603 is_storm = true; 2604 storm_letter = 2605 s_storm_defs[s_block_defs[ 2606 cond_hdr->block_id]-> 2607 storm_id].letter; 2608 } 2609 2610 /* Dump memory */ 2611 offset += qed_grc_dump_mem(p_hwfn, p_ptt, 2612 dump_buf + offset, dump, NULL, 2613 mem_byte_addr, mem_len, 0, 2614 false, 2615 s_mem_group_names[mem_group_id], 2616 is_storm, storm_letter); 2617 } 2618 } 2619 } 2620 2621 return offset; 2622 } 2623 2624 /* Dumps GRC memories according to the input array dump_mem. 2625 * Returns the dumped size in dwords. 2626 */ 2627 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, 2628 struct qed_ptt *p_ptt, 2629 u32 *dump_buf, bool dump) 2630 { 2631 u32 offset = 0, input_offset = 0; 2632 2633 while (input_offset < 2634 s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) { 2635 const struct dbg_dump_split_hdr *split_hdr = 2636 (const struct dbg_dump_split_hdr *) 2637 &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++]; 2638 u8 split_type_id = GET_FIELD(split_hdr->hdr, 2639 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); 2640 u32 split_data_size = GET_FIELD(split_hdr->hdr, 2641 DBG_DUMP_SPLIT_HDR_DATA_SIZE); 2642 struct dbg_array curr_input_mems_arr = { 2643 &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset], 2644 split_data_size}; 2645 2646 switch (split_type_id) { 2647 case SPLIT_TYPE_NONE: 2648 offset += qed_grc_dump_mem_entries(p_hwfn, 2649 p_ptt, 2650 curr_input_mems_arr, 2651 dump_buf + offset, 2652 dump); 2653 break; 2654 default: 2655 DP_NOTICE(p_hwfn, 2656 "Dumping split memories is currently not supported\n"); 2657 break; 2658 } 2659 2660 input_offset += split_data_size; 2661 } 2662 2663 return offset; 2664 } 2665 2666 /* Dumps GRC context data for the specified Storm. 2667 * Returns the dumped size in dwords. 2668 */ 2669 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, 2670 struct qed_ptt *p_ptt, 2671 u32 *dump_buf, 2672 bool dump, 2673 const char *name, 2674 u32 num_lids, 2675 u32 lid_size, 2676 u32 rd_reg_addr, 2677 u8 storm_id) 2678 { 2679 u32 i, lid, total_size; 2680 u32 offset = 0; 2681 2682 if (!lid_size) 2683 return 0; 2684 lid_size *= BYTES_IN_DWORD; 2685 total_size = num_lids * lid_size; 2686 offset += qed_grc_dump_mem_hdr(p_hwfn, 2687 dump_buf + offset, 2688 dump, 2689 name, 2690 0, 2691 total_size, 2692 lid_size * 32, 2693 false, 2694 name, 2695 true, s_storm_defs[storm_id].letter); 2696 2697 /* Dump context data */ 2698 if (dump) { 2699 for (lid = 0; lid < num_lids; lid++) { 2700 for (i = 0; i < lid_size; i++, offset++) { 2701 qed_wr(p_hwfn, 2702 p_ptt, 2703 s_storm_defs[storm_id].cm_ctx_wr_addr, 2704 BIT(9) | lid); 2705 *(dump_buf + offset) = qed_rd(p_hwfn, 2706 p_ptt, 2707 rd_reg_addr); 2708 } 2709 } 2710 } else { 2711 offset += total_size; 2712 } 2713 2714 return offset; 2715 } 2716 2717 /* Dumps GRC contexts. Returns the dumped size in dwords. */ 2718 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn, 2719 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) 2720 { 2721 u32 offset = 0; 2722 u8 storm_id; 2723 2724 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { 2725 if (!qed_grc_is_storm_included(p_hwfn, 2726 (enum dbg_storms)storm_id)) 2727 continue; 2728 2729 /* Dump Conn AG context size */ 2730 offset += 2731 qed_grc_dump_ctx_data(p_hwfn, 2732 p_ptt, 2733 dump_buf + offset, 2734 dump, 2735 "CONN_AG_CTX", 2736 qed_grc_get_param(p_hwfn, 2737 DBG_GRC_PARAM_NUM_LCIDS), 2738 s_storm_defs[storm_id]. 2739 cm_conn_ag_ctx_lid_size, 2740 s_storm_defs[storm_id]. 2741 cm_conn_ag_ctx_rd_addr, 2742 storm_id); 2743 2744 /* Dump Conn ST context size */ 2745 offset += 2746 qed_grc_dump_ctx_data(p_hwfn, 2747 p_ptt, 2748 dump_buf + offset, 2749 dump, 2750 "CONN_ST_CTX", 2751 qed_grc_get_param(p_hwfn, 2752 DBG_GRC_PARAM_NUM_LCIDS), 2753 s_storm_defs[storm_id]. 2754 cm_conn_st_ctx_lid_size, 2755 s_storm_defs[storm_id]. 2756 cm_conn_st_ctx_rd_addr, 2757 storm_id); 2758 2759 /* Dump Task AG context size */ 2760 offset += 2761 qed_grc_dump_ctx_data(p_hwfn, 2762 p_ptt, 2763 dump_buf + offset, 2764 dump, 2765 "TASK_AG_CTX", 2766 qed_grc_get_param(p_hwfn, 2767 DBG_GRC_PARAM_NUM_LTIDS), 2768 s_storm_defs[storm_id]. 2769 cm_task_ag_ctx_lid_size, 2770 s_storm_defs[storm_id]. 2771 cm_task_ag_ctx_rd_addr, 2772 storm_id); 2773 2774 /* Dump Task ST context size */ 2775 offset += 2776 qed_grc_dump_ctx_data(p_hwfn, 2777 p_ptt, 2778 dump_buf + offset, 2779 dump, 2780 "TASK_ST_CTX", 2781 qed_grc_get_param(p_hwfn, 2782 DBG_GRC_PARAM_NUM_LTIDS), 2783 s_storm_defs[storm_id]. 2784 cm_task_st_ctx_lid_size, 2785 s_storm_defs[storm_id]. 2786 cm_task_st_ctx_rd_addr, 2787 storm_id); 2788 } 2789 2790 return offset; 2791 } 2792 2793 /* Dumps GRC IORs data. Returns the dumped size in dwords. */ 2794 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn, 2795 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) 2796 { 2797 char buf[10] = "IOR_SET_?"; 2798 u8 storm_id, set_id; 2799 u32 offset = 0; 2800 2801 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { 2802 if (qed_grc_is_storm_included(p_hwfn, 2803 (enum dbg_storms)storm_id)) { 2804 for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) { 2805 u32 addr = 2806 s_storm_defs[storm_id].sem_fast_mem_addr + 2807 SEM_FAST_REG_STORM_REG_FILE + 2808 DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id)); 2809 2810 buf[strlen(buf) - 1] = '0' + set_id; 2811 offset += qed_grc_dump_mem(p_hwfn, 2812 p_ptt, 2813 dump_buf + offset, 2814 dump, 2815 buf, 2816 addr, 2817 IORS_PER_SET, 2818 32, 2819 false, 2820 "ior", 2821 true, 2822 s_storm_defs 2823 [storm_id].letter); 2824 } 2825 } 2826 } 2827 2828 return offset; 2829 } 2830 2831 /* Dump VFC CAM. Returns the dumped size in dwords. */ 2832 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn, 2833 struct qed_ptt *p_ptt, 2834 u32 *dump_buf, bool dump, u8 storm_id) 2835 { 2836 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS; 2837 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 }; 2838 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 }; 2839 u32 offset = 0; 2840 u32 row, i; 2841 2842 offset += qed_grc_dump_mem_hdr(p_hwfn, 2843 dump_buf + offset, 2844 dump, 2845 "vfc_cam", 2846 0, 2847 total_size, 2848 256, 2849 false, 2850 "vfc_cam", 2851 true, s_storm_defs[storm_id].letter); 2852 if (dump) { 2853 /* Prepare CAM address */ 2854 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD); 2855 for (row = 0; row < VFC_CAM_NUM_ROWS; 2856 row++, offset += VFC_CAM_RESP_DWORDS) { 2857 /* Write VFC CAM command */ 2858 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row); 2859 ARR_REG_WR(p_hwfn, 2860 p_ptt, 2861 s_storm_defs[storm_id].sem_fast_mem_addr + 2862 SEM_FAST_REG_VFC_DATA_WR, 2863 cam_cmd, VFC_CAM_CMD_DWORDS); 2864 2865 /* Write VFC CAM address */ 2866 ARR_REG_WR(p_hwfn, 2867 p_ptt, 2868 s_storm_defs[storm_id].sem_fast_mem_addr + 2869 SEM_FAST_REG_VFC_ADDR, 2870 cam_addr, VFC_CAM_ADDR_DWORDS); 2871 2872 /* Read VFC CAM read response */ 2873 ARR_REG_RD(p_hwfn, 2874 p_ptt, 2875 s_storm_defs[storm_id].sem_fast_mem_addr + 2876 SEM_FAST_REG_VFC_DATA_RD, 2877 dump_buf + offset, VFC_CAM_RESP_DWORDS); 2878 } 2879 } else { 2880 offset += total_size; 2881 } 2882 2883 return offset; 2884 } 2885 2886 /* Dump VFC RAM. Returns the dumped size in dwords. */ 2887 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn, 2888 struct qed_ptt *p_ptt, 2889 u32 *dump_buf, 2890 bool dump, 2891 u8 storm_id, struct vfc_ram_defs *ram_defs) 2892 { 2893 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS; 2894 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 }; 2895 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 }; 2896 u32 offset = 0; 2897 u32 row, i; 2898 2899 offset += qed_grc_dump_mem_hdr(p_hwfn, 2900 dump_buf + offset, 2901 dump, 2902 ram_defs->mem_name, 2903 0, 2904 total_size, 2905 256, 2906 false, 2907 ram_defs->type_name, 2908 true, s_storm_defs[storm_id].letter); 2909 2910 /* Prepare RAM address */ 2911 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD); 2912 2913 if (!dump) 2914 return offset + total_size; 2915 2916 for (row = ram_defs->base_row; 2917 row < ram_defs->base_row + ram_defs->num_rows; 2918 row++, offset += VFC_RAM_RESP_DWORDS) { 2919 /* Write VFC RAM command */ 2920 ARR_REG_WR(p_hwfn, 2921 p_ptt, 2922 s_storm_defs[storm_id].sem_fast_mem_addr + 2923 SEM_FAST_REG_VFC_DATA_WR, 2924 ram_cmd, VFC_RAM_CMD_DWORDS); 2925 2926 /* Write VFC RAM address */ 2927 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row); 2928 ARR_REG_WR(p_hwfn, 2929 p_ptt, 2930 s_storm_defs[storm_id].sem_fast_mem_addr + 2931 SEM_FAST_REG_VFC_ADDR, 2932 ram_addr, VFC_RAM_ADDR_DWORDS); 2933 2934 /* Read VFC RAM read response */ 2935 ARR_REG_RD(p_hwfn, 2936 p_ptt, 2937 s_storm_defs[storm_id].sem_fast_mem_addr + 2938 SEM_FAST_REG_VFC_DATA_RD, 2939 dump_buf + offset, VFC_RAM_RESP_DWORDS); 2940 } 2941 2942 return offset; 2943 } 2944 2945 /* Dumps GRC VFC data. Returns the dumped size in dwords. */ 2946 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn, 2947 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) 2948 { 2949 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 2950 u8 storm_id, i; 2951 u32 offset = 0; 2952 2953 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { 2954 if (qed_grc_is_storm_included(p_hwfn, 2955 (enum dbg_storms)storm_id) && 2956 s_storm_defs[storm_id].has_vfc && 2957 (storm_id != DBG_PSTORM_ID || 2958 dev_data->platform_id == PLATFORM_ASIC)) { 2959 /* Read CAM */ 2960 offset += qed_grc_dump_vfc_cam(p_hwfn, 2961 p_ptt, 2962 dump_buf + offset, 2963 dump, storm_id); 2964 2965 /* Read RAM */ 2966 for (i = 0; i < NUM_VFC_RAM_TYPES; i++) 2967 offset += qed_grc_dump_vfc_ram(p_hwfn, 2968 p_ptt, 2969 dump_buf + 2970 offset, 2971 dump, 2972 storm_id, 2973 &s_vfc_ram_defs 2974 [i]); 2975 } 2976 } 2977 2978 return offset; 2979 } 2980 2981 /* Dumps GRC RSS data. Returns the dumped size in dwords. */ 2982 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, 2983 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) 2984 { 2985 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 2986 u32 offset = 0; 2987 u8 rss_mem_id; 2988 2989 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) { 2990 struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id]; 2991 u32 num_entries = rss_defs->num_entries[dev_data->chip_id]; 2992 u32 entry_width = rss_defs->entry_width[dev_data->chip_id]; 2993 u32 total_size = (num_entries * entry_width) / 32; 2994 bool packed = (entry_width == 16); 2995 u32 addr = rss_defs->addr; 2996 u32 i, j; 2997 2998 offset += qed_grc_dump_mem_hdr(p_hwfn, 2999 dump_buf + offset, 3000 dump, 3001 rss_defs->mem_name, 3002 addr, 3003 total_size, 3004 entry_width, 3005 packed, 3006 rss_defs->type_name, false, 0); 3007 3008 if (!dump) { 3009 offset += total_size; 3010 continue; 3011 } 3012 3013 /* Dump RSS data */ 3014 for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) { 3015 qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr); 3016 for (j = 0; j < BYTES_IN_DWORD; j++, offset++) 3017 *(dump_buf + offset) = 3018 qed_rd(p_hwfn, p_ptt, 3019 RSS_REG_RSS_RAM_DATA + 3020 DWORDS_TO_BYTES(j)); 3021 } 3022 } 3023 3024 return offset; 3025 } 3026 3027 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */ 3028 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, 3029 struct qed_ptt *p_ptt, 3030 u32 *dump_buf, bool dump, u8 big_ram_id) 3031 { 3032 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 3033 char mem_name[12] = "???_BIG_RAM"; 3034 char type_name[8] = "???_RAM"; 3035 u32 ram_size, total_blocks; 3036 u32 offset = 0, i, j; 3037 3038 total_blocks = 3039 s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id]; 3040 ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS; 3041 3042 strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name, 3043 strlen(s_big_ram_defs[big_ram_id].instance_name)); 3044 strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name, 3045 strlen(s_big_ram_defs[big_ram_id].instance_name)); 3046 3047 /* Dump memory header */ 3048 offset += qed_grc_dump_mem_hdr(p_hwfn, 3049 dump_buf + offset, 3050 dump, 3051 mem_name, 3052 0, 3053 ram_size, 3054 BIG_RAM_BLOCK_SIZE_BYTES * 8, 3055 false, type_name, false, 0); 3056 3057 if (!dump) 3058 return offset + ram_size; 3059 3060 /* Read and dump Big RAM data */ 3061 for (i = 0; i < total_blocks / 2; i++) { 3062 qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr, 3063 i); 3064 for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++) 3065 *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, 3066 s_big_ram_defs[big_ram_id]. 3067 data_reg_addr + 3068 DWORDS_TO_BYTES(j)); 3069 } 3070 3071 return offset; 3072 } 3073 3074 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, 3075 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) 3076 { 3077 bool block_enable[MAX_BLOCK_ID] = { 0 }; 3078 bool halted = false; 3079 u32 offset = 0; 3080 3081 /* Halt MCP */ 3082 if (dump) { 3083 halted = !qed_mcp_halt(p_hwfn, p_ptt); 3084 if (!halted) 3085 DP_NOTICE(p_hwfn, "MCP halt failed!\n"); 3086 } 3087 3088 /* Dump MCP scratchpad */ 3089 offset += qed_grc_dump_mem(p_hwfn, 3090 p_ptt, 3091 dump_buf + offset, 3092 dump, 3093 NULL, 3094 MCP_REG_SCRATCH, 3095 MCP_REG_SCRATCH_SIZE, 3096 0, false, "MCP", false, 0); 3097 3098 /* Dump MCP cpu_reg_file */ 3099 offset += qed_grc_dump_mem(p_hwfn, 3100 p_ptt, 3101 dump_buf + offset, 3102 dump, 3103 NULL, 3104 MCP_REG_CPU_REG_FILE, 3105 MCP_REG_CPU_REG_FILE_SIZE, 3106 0, false, "MCP", false, 0); 3107 3108 /* Dump MCP registers */ 3109 block_enable[BLOCK_MCP] = true; 3110 offset += qed_grc_dump_registers(p_hwfn, 3111 p_ptt, 3112 dump_buf + offset, 3113 dump, block_enable, "block", "MCP"); 3114 3115 /* Dump required non-MCP registers */ 3116 offset += qed_grc_dump_regs_hdr(dump_buf + offset, 3117 dump, 1, "eng", -1, "block", "MCP"); 3118 offset += qed_grc_dump_reg_entry(p_hwfn, 3119 p_ptt, 3120 dump_buf + offset, 3121 dump, 3122 BYTES_TO_DWORDS 3123 (MISC_REG_SHARED_MEM_ADDR), 1); 3124 3125 /* Release MCP */ 3126 if (halted && qed_mcp_resume(p_hwfn, p_ptt)) 3127 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n"); 3128 return offset; 3129 } 3130 3131 /* Dumps the tbus indirect memory for all PHYs. */ 3132 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, 3133 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) 3134 { 3135 u32 offset = 0, tbus_lo_offset, tbus_hi_offset; 3136 char mem_name[32]; 3137 u8 phy_id; 3138 3139 for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) { 3140 struct phy_defs *phy_defs = &s_phy_defs[phy_id]; 3141 int printed_chars; 3142 3143 printed_chars = snprintf(mem_name, sizeof(mem_name), "tbus_%s", 3144 phy_defs->phy_name); 3145 if (printed_chars < 0 || printed_chars >= sizeof(mem_name)) 3146 DP_NOTICE(p_hwfn, 3147 "Unexpected debug error: invalid PHY memory name\n"); 3148 offset += qed_grc_dump_mem_hdr(p_hwfn, 3149 dump_buf + offset, 3150 dump, 3151 mem_name, 3152 0, 3153 PHY_DUMP_SIZE_DWORDS, 3154 16, true, mem_name, false, 0); 3155 if (dump) { 3156 u32 addr_lo_addr = phy_defs->base_addr + 3157 phy_defs->tbus_addr_lo_addr; 3158 u32 addr_hi_addr = phy_defs->base_addr + 3159 phy_defs->tbus_addr_hi_addr; 3160 u32 data_lo_addr = phy_defs->base_addr + 3161 phy_defs->tbus_data_lo_addr; 3162 u32 data_hi_addr = phy_defs->base_addr + 3163 phy_defs->tbus_data_hi_addr; 3164 u8 *bytes_buf = (u8 *)(dump_buf + offset); 3165 3166 for (tbus_hi_offset = 0; 3167 tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); 3168 tbus_hi_offset++) { 3169 qed_wr(p_hwfn, 3170 p_ptt, addr_hi_addr, tbus_hi_offset); 3171 for (tbus_lo_offset = 0; tbus_lo_offset < 256; 3172 tbus_lo_offset++) { 3173 qed_wr(p_hwfn, 3174 p_ptt, 3175 addr_lo_addr, tbus_lo_offset); 3176 *(bytes_buf++) = 3177 (u8)qed_rd(p_hwfn, p_ptt, 3178 data_lo_addr); 3179 *(bytes_buf++) = 3180 (u8)qed_rd(p_hwfn, p_ptt, 3181 data_hi_addr); 3182 } 3183 } 3184 } 3185 3186 offset += PHY_DUMP_SIZE_DWORDS; 3187 } 3188 3189 return offset; 3190 } 3191 3192 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn, 3193 struct qed_ptt *p_ptt, 3194 enum block_id block_id, 3195 u8 line_id, 3196 u8 cycle_en, 3197 u8 right_shift, u8 force_valid, u8 force_frame) 3198 { 3199 struct block_defs *p_block_defs = s_block_defs[block_id]; 3200 3201 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_select_addr, line_id); 3202 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_cycle_enable_addr, cycle_en); 3203 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_shift_addr, right_shift); 3204 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_valid_addr, force_valid); 3205 qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_frame_addr, force_frame); 3206 } 3207 3208 /* Dumps Static Debug data. Returns the dumped size in dwords. */ 3209 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, 3210 struct qed_ptt *p_ptt, 3211 u32 *dump_buf, bool dump) 3212 { 3213 u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS; 3214 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 3215 u32 offset = 0, block_id, line_id, addr, i; 3216 struct block_defs *p_block_defs; 3217 3218 if (dump) { 3219 DP_VERBOSE(p_hwfn, 3220 QED_MSG_DEBUG, "Dumping static debug data...\n"); 3221 3222 /* Disable all blocks debug output */ 3223 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { 3224 p_block_defs = s_block_defs[block_id]; 3225 3226 if (p_block_defs->has_dbg_bus[dev_data->chip_id]) 3227 qed_wr(p_hwfn, p_ptt, 3228 p_block_defs->dbg_cycle_enable_addr, 0); 3229 } 3230 3231 qed_bus_reset_dbg_block(p_hwfn, p_ptt); 3232 qed_bus_set_framing_mode(p_hwfn, 3233 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST); 3234 qed_wr(p_hwfn, 3235 p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF); 3236 qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1); 3237 qed_bus_enable_dbg_block(p_hwfn, p_ptt, true); 3238 } 3239 3240 /* Dump all static debug lines for each relevant block */ 3241 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { 3242 p_block_defs = s_block_defs[block_id]; 3243 3244 if (!p_block_defs->has_dbg_bus[dev_data->chip_id]) 3245 continue; 3246 3247 /* Dump static section params */ 3248 offset += qed_grc_dump_mem_hdr(p_hwfn, 3249 dump_buf + offset, 3250 dump, 3251 p_block_defs->name, 0, 3252 block_dwords, 32, false, 3253 "STATIC", false, 0); 3254 3255 if (dump && !dev_data->block_in_reset[block_id]) { 3256 u8 dbg_client_id = 3257 p_block_defs->dbg_client_id[dev_data->chip_id]; 3258 3259 /* Enable block's client */ 3260 qed_bus_enable_clients(p_hwfn, p_ptt, 3261 BIT(dbg_client_id)); 3262 3263 for (line_id = 0; line_id < NUM_DBG_BUS_LINES; 3264 line_id++) { 3265 /* Configure debug line ID */ 3266 qed_config_dbg_line(p_hwfn, 3267 p_ptt, 3268 (enum block_id)block_id, 3269 (u8)line_id, 3270 0xf, 0, 0, 0); 3271 3272 /* Read debug line info */ 3273 for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA; 3274 i < STATIC_DEBUG_LINE_DWORDS; 3275 i++, offset++, addr += BYTES_IN_DWORD) 3276 dump_buf[offset] = qed_rd(p_hwfn, p_ptt, 3277 addr); 3278 } 3279 3280 /* Disable block's client and debug output */ 3281 qed_bus_enable_clients(p_hwfn, p_ptt, 0); 3282 qed_wr(p_hwfn, p_ptt, 3283 p_block_defs->dbg_cycle_enable_addr, 0); 3284 } else { 3285 /* All lines are invalid - dump zeros */ 3286 if (dump) 3287 memset(dump_buf + offset, 0, 3288 DWORDS_TO_BYTES(block_dwords)); 3289 offset += block_dwords; 3290 } 3291 } 3292 3293 if (dump) { 3294 qed_bus_enable_dbg_block(p_hwfn, p_ptt, false); 3295 qed_bus_enable_clients(p_hwfn, p_ptt, 0); 3296 } 3297 3298 return offset; 3299 } 3300 3301 /* Performs GRC Dump to the specified buffer. 3302 * Returns the dumped size in dwords. 3303 */ 3304 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, 3305 struct qed_ptt *p_ptt, 3306 u32 *dump_buf, 3307 bool dump, u32 *num_dumped_dwords) 3308 { 3309 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 3310 bool parities_masked = false; 3311 u8 i, port_mode = 0; 3312 u32 offset = 0; 3313 3314 /* Check if emulation platform */ 3315 *num_dumped_dwords = 0; 3316 3317 /* Fill GRC parameters that were not set by the user with their default 3318 * value. 3319 */ 3320 qed_dbg_grc_set_params_default(p_hwfn); 3321 3322 /* Find port mode */ 3323 if (dump) { 3324 switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) { 3325 case 0: 3326 port_mode = 1; 3327 break; 3328 case 1: 3329 port_mode = 2; 3330 break; 3331 case 2: 3332 port_mode = 4; 3333 break; 3334 } 3335 } 3336 3337 /* Update reset state */ 3338 if (dump) 3339 qed_update_blocks_reset_state(p_hwfn, p_ptt); 3340 3341 /* Dump global params */ 3342 offset += qed_dump_common_global_params(p_hwfn, 3343 p_ptt, 3344 dump_buf + offset, dump, 4); 3345 offset += qed_dump_str_param(dump_buf + offset, 3346 dump, "dump-type", "grc-dump"); 3347 offset += qed_dump_num_param(dump_buf + offset, 3348 dump, 3349 "num-lcids", 3350 qed_grc_get_param(p_hwfn, 3351 DBG_GRC_PARAM_NUM_LCIDS)); 3352 offset += qed_dump_num_param(dump_buf + offset, 3353 dump, 3354 "num-ltids", 3355 qed_grc_get_param(p_hwfn, 3356 DBG_GRC_PARAM_NUM_LTIDS)); 3357 offset += qed_dump_num_param(dump_buf + offset, 3358 dump, "num-ports", port_mode); 3359 3360 /* Dump reset registers (dumped before taking blocks out of reset ) */ 3361 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) 3362 offset += qed_grc_dump_reset_regs(p_hwfn, 3363 p_ptt, 3364 dump_buf + offset, dump); 3365 3366 /* Take all blocks out of reset (using reset registers) */ 3367 if (dump) { 3368 qed_grc_unreset_blocks(p_hwfn, p_ptt); 3369 qed_update_blocks_reset_state(p_hwfn, p_ptt); 3370 } 3371 3372 /* Disable all parities using MFW command */ 3373 if (dump) { 3374 parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1); 3375 if (!parities_masked) { 3376 if (qed_grc_get_param 3377 (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE)) 3378 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY; 3379 else 3380 DP_NOTICE(p_hwfn, 3381 "Failed to mask parities using MFW\n"); 3382 } 3383 } 3384 3385 /* Dump modified registers (dumped before modifying them) */ 3386 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) 3387 offset += qed_grc_dump_modified_regs(p_hwfn, 3388 p_ptt, 3389 dump_buf + offset, dump); 3390 3391 /* Stall storms */ 3392 if (dump && 3393 (qed_grc_is_included(p_hwfn, 3394 DBG_GRC_PARAM_DUMP_IOR) || 3395 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))) 3396 qed_grc_stall_storms(p_hwfn, p_ptt, true); 3397 3398 /* Dump all regs */ 3399 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) { 3400 /* Dump all blocks except MCP */ 3401 bool block_enable[MAX_BLOCK_ID]; 3402 3403 for (i = 0; i < MAX_BLOCK_ID; i++) 3404 block_enable[i] = true; 3405 block_enable[BLOCK_MCP] = false; 3406 offset += qed_grc_dump_registers(p_hwfn, 3407 p_ptt, 3408 dump_buf + 3409 offset, 3410 dump, 3411 block_enable, NULL, NULL); 3412 } 3413 3414 /* Dump memories */ 3415 offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump); 3416 3417 /* Dump MCP */ 3418 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP)) 3419 offset += qed_grc_dump_mcp(p_hwfn, 3420 p_ptt, dump_buf + offset, dump); 3421 3422 /* Dump context */ 3423 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX)) 3424 offset += qed_grc_dump_ctx(p_hwfn, 3425 p_ptt, dump_buf + offset, dump); 3426 3427 /* Dump RSS memories */ 3428 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS)) 3429 offset += qed_grc_dump_rss(p_hwfn, 3430 p_ptt, dump_buf + offset, dump); 3431 3432 /* Dump Big RAM */ 3433 for (i = 0; i < NUM_BIG_RAM_TYPES; i++) 3434 if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param)) 3435 offset += qed_grc_dump_big_ram(p_hwfn, 3436 p_ptt, 3437 dump_buf + offset, 3438 dump, i); 3439 3440 /* Dump IORs */ 3441 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR)) 3442 offset += qed_grc_dump_iors(p_hwfn, 3443 p_ptt, dump_buf + offset, dump); 3444 3445 /* Dump VFC */ 3446 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) 3447 offset += qed_grc_dump_vfc(p_hwfn, 3448 p_ptt, dump_buf + offset, dump); 3449 3450 /* Dump PHY tbus */ 3451 if (qed_grc_is_included(p_hwfn, 3452 DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id == 3453 CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC) 3454 offset += qed_grc_dump_phy(p_hwfn, 3455 p_ptt, dump_buf + offset, dump); 3456 3457 /* Dump static debug data */ 3458 if (qed_grc_is_included(p_hwfn, 3459 DBG_GRC_PARAM_DUMP_STATIC) && 3460 dev_data->bus.state == DBG_BUS_STATE_IDLE) 3461 offset += qed_grc_dump_static_debug(p_hwfn, 3462 p_ptt, 3463 dump_buf + offset, dump); 3464 3465 /* Dump last section */ 3466 offset += qed_dump_last_section(dump_buf, offset, dump); 3467 if (dump) { 3468 /* Unstall storms */ 3469 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL)) 3470 qed_grc_stall_storms(p_hwfn, p_ptt, false); 3471 3472 /* Clear parity status */ 3473 qed_grc_clear_all_prty(p_hwfn, p_ptt); 3474 3475 /* Enable all parities using MFW command */ 3476 if (parities_masked) 3477 qed_mcp_mask_parities(p_hwfn, p_ptt, 0); 3478 } 3479 3480 *num_dumped_dwords = offset; 3481 3482 return DBG_STATUS_OK; 3483 } 3484 3485 /* Writes the specified failing Idle Check rule to the specified buffer. 3486 * Returns the dumped size in dwords. 3487 */ 3488 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, 3489 struct qed_ptt *p_ptt, 3490 u32 * 3491 dump_buf, 3492 bool dump, 3493 u16 rule_id, 3494 const struct dbg_idle_chk_rule *rule, 3495 u16 fail_entry_id, u32 *cond_reg_values) 3496 { 3497 const union dbg_idle_chk_reg *regs = &((const union dbg_idle_chk_reg *) 3498 s_dbg_arrays 3499 [BIN_BUF_DBG_IDLE_CHK_REGS]. 3500 ptr)[rule->reg_offset]; 3501 const struct dbg_idle_chk_cond_reg *cond_regs = ®s[0].cond_reg; 3502 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 3503 struct dbg_idle_chk_result_hdr *hdr = 3504 (struct dbg_idle_chk_result_hdr *)dump_buf; 3505 const struct dbg_idle_chk_info_reg *info_regs = 3506 ®s[rule->num_cond_regs].info_reg; 3507 u32 next_reg_offset = 0, i, offset = 0; 3508 u8 reg_id; 3509 3510 /* Dump rule data */ 3511 if (dump) { 3512 memset(hdr, 0, sizeof(*hdr)); 3513 hdr->rule_id = rule_id; 3514 hdr->mem_entry_id = fail_entry_id; 3515 hdr->severity = rule->severity; 3516 hdr->num_dumped_cond_regs = rule->num_cond_regs; 3517 } 3518 3519 offset += IDLE_CHK_RESULT_HDR_DWORDS; 3520 3521 /* Dump condition register values */ 3522 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) { 3523 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id]; 3524 3525 /* Write register header */ 3526 if (dump) { 3527 struct dbg_idle_chk_result_reg_hdr *reg_hdr = 3528 (struct dbg_idle_chk_result_reg_hdr *)(dump_buf 3529 + offset); 3530 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS; 3531 memset(reg_hdr, 0, 3532 sizeof(struct dbg_idle_chk_result_reg_hdr)); 3533 reg_hdr->start_entry = reg->start_entry; 3534 reg_hdr->size = reg->entry_size; 3535 SET_FIELD(reg_hdr->data, 3536 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, 3537 reg->num_entries > 1 || reg->start_entry > 0 3538 ? 1 : 0); 3539 SET_FIELD(reg_hdr->data, 3540 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id); 3541 3542 /* Write register values */ 3543 for (i = 0; i < reg_hdr->size; 3544 i++, next_reg_offset++, offset++) 3545 dump_buf[offset] = 3546 cond_reg_values[next_reg_offset]; 3547 } else { 3548 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + 3549 reg->entry_size; 3550 } 3551 } 3552 3553 /* Dump info register values */ 3554 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) { 3555 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id]; 3556 u32 block_id; 3557 3558 if (!dump) { 3559 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size; 3560 continue; 3561 } 3562 3563 /* Check if register's block is in reset */ 3564 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID); 3565 if (block_id >= MAX_BLOCK_ID) { 3566 DP_NOTICE(p_hwfn, "Invalid block_id\n"); 3567 return 0; 3568 } 3569 3570 if (!dev_data->block_in_reset[block_id]) { 3571 bool eval_mode = GET_FIELD(reg->mode.data, 3572 DBG_MODE_HDR_EVAL_MODE) > 0; 3573 bool mode_match = true; 3574 3575 /* Check mode */ 3576 if (eval_mode) { 3577 u16 modes_buf_offset = 3578 GET_FIELD(reg->mode.data, 3579 DBG_MODE_HDR_MODES_BUF_OFFSET); 3580 mode_match = 3581 qed_is_mode_match(p_hwfn, 3582 &modes_buf_offset); 3583 } 3584 3585 if (mode_match) { 3586 u32 grc_addr = 3587 DWORDS_TO_BYTES(GET_FIELD(reg->data, 3588 DBG_IDLE_CHK_INFO_REG_ADDRESS)); 3589 3590 /* Write register header */ 3591 struct dbg_idle_chk_result_reg_hdr *reg_hdr = 3592 (struct dbg_idle_chk_result_reg_hdr *) 3593 (dump_buf + offset); 3594 3595 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS; 3596 hdr->num_dumped_info_regs++; 3597 memset(reg_hdr, 0, sizeof(*reg_hdr)); 3598 reg_hdr->size = reg->size; 3599 SET_FIELD(reg_hdr->data, 3600 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, 3601 rule->num_cond_regs + reg_id); 3602 3603 /* Write register values */ 3604 for (i = 0; i < reg->size; 3605 i++, offset++, grc_addr += 4) 3606 dump_buf[offset] = 3607 qed_rd(p_hwfn, p_ptt, grc_addr); 3608 } 3609 } 3610 } 3611 3612 return offset; 3613 } 3614 3615 /* Dumps idle check rule entries. Returns the dumped size in dwords. */ 3616 static u32 3617 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 3618 u32 *dump_buf, bool dump, 3619 const struct dbg_idle_chk_rule *input_rules, 3620 u32 num_input_rules, u32 *num_failing_rules) 3621 { 3622 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 3623 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE]; 3624 u32 i, j, offset = 0; 3625 u16 entry_id; 3626 u8 reg_id; 3627 3628 *num_failing_rules = 0; 3629 for (i = 0; i < num_input_rules; i++) { 3630 const struct dbg_idle_chk_cond_reg *cond_regs; 3631 const struct dbg_idle_chk_rule *rule; 3632 const union dbg_idle_chk_reg *regs; 3633 u16 num_reg_entries = 1; 3634 bool check_rule = true; 3635 const u32 *imm_values; 3636 3637 rule = &input_rules[i]; 3638 regs = &((const union dbg_idle_chk_reg *) 3639 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr) 3640 [rule->reg_offset]; 3641 cond_regs = ®s[0].cond_reg; 3642 imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr 3643 [rule->imm_offset]; 3644 3645 /* Check if all condition register blocks are out of reset, and 3646 * find maximal number of entries (all condition registers that 3647 * are memories must have the same size, which is > 1). 3648 */ 3649 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule; 3650 reg_id++) { 3651 u32 block_id = GET_FIELD(cond_regs[reg_id].data, 3652 DBG_IDLE_CHK_COND_REG_BLOCK_ID); 3653 3654 if (block_id >= MAX_BLOCK_ID) { 3655 DP_NOTICE(p_hwfn, "Invalid block_id\n"); 3656 return 0; 3657 } 3658 3659 check_rule = !dev_data->block_in_reset[block_id]; 3660 if (cond_regs[reg_id].num_entries > num_reg_entries) 3661 num_reg_entries = cond_regs[reg_id].num_entries; 3662 } 3663 3664 if (!check_rule && dump) 3665 continue; 3666 3667 /* Go over all register entries (number of entries is the same 3668 * for all condition registers). 3669 */ 3670 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) { 3671 /* Read current entry of all condition registers */ 3672 if (dump) { 3673 u32 next_reg_offset = 0; 3674 3675 for (reg_id = 0; 3676 reg_id < rule->num_cond_regs; 3677 reg_id++) { 3678 const struct dbg_idle_chk_cond_reg 3679 *reg = &cond_regs[reg_id]; 3680 3681 /* Find GRC address (if it's a memory, 3682 * the address of the specific entry is 3683 * calculated). 3684 */ 3685 u32 grc_addr = 3686 DWORDS_TO_BYTES( 3687 GET_FIELD(reg->data, 3688 DBG_IDLE_CHK_COND_REG_ADDRESS)); 3689 3690 if (reg->num_entries > 1 || 3691 reg->start_entry > 0) { 3692 u32 padded_entry_size = 3693 reg->entry_size > 1 ? 3694 roundup_pow_of_two 3695 (reg->entry_size) : 1; 3696 3697 grc_addr += 3698 DWORDS_TO_BYTES( 3699 (reg->start_entry + 3700 entry_id) 3701 * padded_entry_size); 3702 } 3703 3704 /* Read registers */ 3705 if (next_reg_offset + reg->entry_size >= 3706 IDLE_CHK_MAX_ENTRIES_SIZE) { 3707 DP_NOTICE(p_hwfn, 3708 "idle check registers entry is too large\n"); 3709 return 0; 3710 } 3711 3712 for (j = 0; j < reg->entry_size; 3713 j++, next_reg_offset++, 3714 grc_addr += 4) 3715 cond_reg_values[next_reg_offset] = 3716 qed_rd(p_hwfn, p_ptt, grc_addr); 3717 } 3718 } 3719 3720 /* Call rule's condition function - a return value of 3721 * true indicates failure. 3722 */ 3723 if ((*cond_arr[rule->cond_id])(cond_reg_values, 3724 imm_values) || !dump) { 3725 offset += 3726 qed_idle_chk_dump_failure(p_hwfn, 3727 p_ptt, 3728 dump_buf + offset, 3729 dump, 3730 rule->rule_id, 3731 rule, 3732 entry_id, 3733 cond_reg_values); 3734 (*num_failing_rules)++; 3735 break; 3736 } 3737 } 3738 } 3739 3740 return offset; 3741 } 3742 3743 /* Performs Idle Check Dump to the specified buffer. 3744 * Returns the dumped size in dwords. 3745 */ 3746 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, 3747 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) 3748 { 3749 u32 offset = 0, input_offset = 0, num_failing_rules = 0; 3750 u32 num_failing_rules_offset; 3751 3752 /* Dump global params */ 3753 offset += qed_dump_common_global_params(p_hwfn, 3754 p_ptt, 3755 dump_buf + offset, dump, 1); 3756 offset += qed_dump_str_param(dump_buf + offset, 3757 dump, "dump-type", "idle-chk"); 3758 3759 /* Dump idle check section header with a single parameter */ 3760 offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1); 3761 num_failing_rules_offset = offset; 3762 offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0); 3763 while (input_offset < 3764 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) { 3765 const struct dbg_idle_chk_cond_hdr *cond_hdr = 3766 (const struct dbg_idle_chk_cond_hdr *) 3767 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr 3768 [input_offset++]; 3769 bool eval_mode = GET_FIELD(cond_hdr->mode.data, 3770 DBG_MODE_HDR_EVAL_MODE) > 0; 3771 bool mode_match = true; 3772 3773 /* Check mode */ 3774 if (eval_mode) { 3775 u16 modes_buf_offset = 3776 GET_FIELD(cond_hdr->mode.data, 3777 DBG_MODE_HDR_MODES_BUF_OFFSET); 3778 3779 mode_match = qed_is_mode_match(p_hwfn, 3780 &modes_buf_offset); 3781 } 3782 3783 if (mode_match) { 3784 u32 curr_failing_rules; 3785 3786 offset += 3787 qed_idle_chk_dump_rule_entries(p_hwfn, 3788 p_ptt, 3789 dump_buf + offset, 3790 dump, 3791 (const struct dbg_idle_chk_rule *) 3792 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES]. 3793 ptr[input_offset], 3794 cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS, 3795 &curr_failing_rules); 3796 num_failing_rules += curr_failing_rules; 3797 } 3798 3799 input_offset += cond_hdr->data_size; 3800 } 3801 3802 /* Overwrite num_rules parameter */ 3803 if (dump) 3804 qed_dump_num_param(dump_buf + num_failing_rules_offset, 3805 dump, "num_rules", num_failing_rules); 3806 3807 return offset; 3808 } 3809 3810 /* Finds the meta data image in NVRAM. */ 3811 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, 3812 struct qed_ptt *p_ptt, 3813 u32 image_type, 3814 u32 *nvram_offset_bytes, 3815 u32 *nvram_size_bytes) 3816 { 3817 u32 ret_mcp_resp, ret_mcp_param, ret_txn_size; 3818 struct mcp_file_att file_att; 3819 3820 /* Call NVRAM get file command */ 3821 if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT, 3822 image_type, &ret_mcp_resp, &ret_mcp_param, 3823 &ret_txn_size, (u32 *)&file_att) != 0) 3824 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED; 3825 3826 /* Check response */ 3827 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) 3828 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED; 3829 3830 /* Update return values */ 3831 *nvram_offset_bytes = file_att.nvm_start_addr; 3832 *nvram_size_bytes = file_att.len; 3833 DP_VERBOSE(p_hwfn, 3834 QED_MSG_DEBUG, 3835 "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", 3836 image_type, *nvram_offset_bytes, *nvram_size_bytes); 3837 3838 /* Check alignment */ 3839 if (*nvram_size_bytes & 0x3) 3840 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE; 3841 return DBG_STATUS_OK; 3842 } 3843 3844 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, 3845 struct qed_ptt *p_ptt, 3846 u32 nvram_offset_bytes, 3847 u32 nvram_size_bytes, u32 *ret_buf) 3848 { 3849 u32 ret_mcp_resp, ret_mcp_param, ret_read_size; 3850 u32 bytes_to_copy, read_offset = 0; 3851 s32 bytes_left = nvram_size_bytes; 3852 3853 DP_VERBOSE(p_hwfn, 3854 QED_MSG_DEBUG, 3855 "nvram_read: reading image of size %d bytes from NVRAM\n", 3856 nvram_size_bytes); 3857 do { 3858 bytes_to_copy = 3859 (bytes_left > 3860 MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left; 3861 3862 /* Call NVRAM read command */ 3863 if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, 3864 DRV_MSG_CODE_NVM_READ_NVRAM, 3865 (nvram_offset_bytes + 3866 read_offset) | 3867 (bytes_to_copy << 3868 DRV_MB_PARAM_NVM_LEN_SHIFT), 3869 &ret_mcp_resp, &ret_mcp_param, 3870 &ret_read_size, 3871 (u32 *)((u8 *)ret_buf + 3872 read_offset)) != 0) 3873 return DBG_STATUS_NVRAM_READ_FAILED; 3874 3875 /* Check response */ 3876 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) 3877 return DBG_STATUS_NVRAM_READ_FAILED; 3878 3879 /* Update read offset */ 3880 read_offset += ret_read_size; 3881 bytes_left -= ret_read_size; 3882 } while (bytes_left > 0); 3883 3884 return DBG_STATUS_OK; 3885 } 3886 3887 /* Get info on the MCP Trace data in the scratchpad: 3888 * - trace_data_grc_addr - the GRC address of the trace data 3889 * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without 3890 * the header) 3891 */ 3892 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn, 3893 struct qed_ptt *p_ptt, 3894 u32 *trace_data_grc_addr, 3895 u32 *trace_data_size_bytes) 3896 { 3897 /* Read MCP trace section offsize structure from MCP scratchpad */ 3898 u32 spad_trace_offsize = qed_rd(p_hwfn, 3899 p_ptt, 3900 MCP_SPAD_TRACE_OFFSIZE_ADDR); 3901 u32 signature; 3902 3903 /* Extract MCP trace section GRC address from offsize structure (within 3904 * scratchpad). 3905 */ 3906 *trace_data_grc_addr = 3907 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize); 3908 3909 /* Read signature from MCP trace section */ 3910 signature = qed_rd(p_hwfn, p_ptt, 3911 *trace_data_grc_addr + 3912 offsetof(struct mcp_trace, signature)); 3913 if (signature != MFW_TRACE_SIGNATURE) 3914 return DBG_STATUS_INVALID_TRACE_SIGNATURE; 3915 3916 /* Read trace size from MCP trace section */ 3917 *trace_data_size_bytes = qed_rd(p_hwfn, 3918 p_ptt, 3919 *trace_data_grc_addr + 3920 offsetof(struct mcp_trace, size)); 3921 return DBG_STATUS_OK; 3922 } 3923 3924 /* Reads MCP trace meta data image from NVRAM. 3925 * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from 3926 * file) 3927 * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP 3928 * Trace meta data starts (invalid when loaded from file) 3929 * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data 3930 */ 3931 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn, 3932 struct qed_ptt *p_ptt, 3933 u32 trace_data_size_bytes, 3934 u32 *running_bundle_id, 3935 u32 *trace_meta_offset_bytes, 3936 u32 *trace_meta_size_bytes) 3937 { 3938 /* Read MCP trace section offsize structure from MCP scratchpad */ 3939 u32 spad_trace_offsize = qed_rd(p_hwfn, 3940 p_ptt, 3941 MCP_SPAD_TRACE_OFFSIZE_ADDR); 3942 3943 /* Find running bundle ID */ 3944 u32 running_mfw_addr = 3945 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + 3946 QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes; 3947 enum dbg_status status; 3948 u32 nvram_image_type; 3949 3950 *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr); 3951 if (*running_bundle_id > 1) 3952 return DBG_STATUS_INVALID_NVRAM_BUNDLE; 3953 3954 /* Find image in NVRAM */ 3955 nvram_image_type = 3956 (*running_bundle_id == 3957 DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2; 3958 status = qed_find_nvram_image(p_hwfn, 3959 p_ptt, 3960 nvram_image_type, 3961 trace_meta_offset_bytes, 3962 trace_meta_size_bytes); 3963 3964 return status; 3965 } 3966 3967 /* Reads the MCP Trace data from the specified GRC address into the specified 3968 * buffer. 3969 */ 3970 static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn, 3971 struct qed_ptt *p_ptt, 3972 u32 grc_addr, u32 size_in_dwords, u32 *buf) 3973 { 3974 u32 i; 3975 3976 DP_VERBOSE(p_hwfn, 3977 QED_MSG_DEBUG, 3978 "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n", 3979 size_in_dwords, grc_addr); 3980 for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD) 3981 buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr); 3982 } 3983 3984 /* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified 3985 * buffer. 3986 */ 3987 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn, 3988 struct qed_ptt *p_ptt, 3989 u32 nvram_offset_in_bytes, 3990 u32 size_in_bytes, u32 *buf) 3991 { 3992 u8 *byte_buf = (u8 *)buf; 3993 u8 modules_num, i; 3994 u32 signature; 3995 3996 /* Read meta data from NVRAM */ 3997 enum dbg_status status = qed_nvram_read(p_hwfn, 3998 p_ptt, 3999 nvram_offset_in_bytes, 4000 size_in_bytes, 4001 buf); 4002 4003 if (status != DBG_STATUS_OK) 4004 return status; 4005 4006 /* Extract and check first signature */ 4007 signature = qed_read_unaligned_dword(byte_buf); 4008 byte_buf += sizeof(u32); 4009 if (signature != MCP_TRACE_META_IMAGE_SIGNATURE) 4010 return DBG_STATUS_INVALID_TRACE_SIGNATURE; 4011 4012 /* Extract number of modules */ 4013 modules_num = *(byte_buf++); 4014 4015 /* Skip all modules */ 4016 for (i = 0; i < modules_num; i++) { 4017 u8 module_len = *(byte_buf++); 4018 4019 byte_buf += module_len; 4020 } 4021 4022 /* Extract and check second signature */ 4023 signature = qed_read_unaligned_dword(byte_buf); 4024 byte_buf += sizeof(u32); 4025 if (signature != MCP_TRACE_META_IMAGE_SIGNATURE) 4026 return DBG_STATUS_INVALID_TRACE_SIGNATURE; 4027 return DBG_STATUS_OK; 4028 } 4029 4030 /* Dump MCP Trace */ 4031 enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, 4032 struct qed_ptt *p_ptt, 4033 u32 *dump_buf, 4034 bool dump, u32 *num_dumped_dwords) 4035 { 4036 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords; 4037 u32 trace_meta_size_dwords, running_bundle_id, offset = 0; 4038 u32 trace_meta_offset_bytes, trace_meta_size_bytes; 4039 enum dbg_status status; 4040 int halted = 0; 4041 4042 *num_dumped_dwords = 0; 4043 4044 /* Get trace data info */ 4045 status = qed_mcp_trace_get_data_info(p_hwfn, 4046 p_ptt, 4047 &trace_data_grc_addr, 4048 &trace_data_size_bytes); 4049 if (status != DBG_STATUS_OK) 4050 return status; 4051 4052 /* Dump global params */ 4053 offset += qed_dump_common_global_params(p_hwfn, 4054 p_ptt, 4055 dump_buf + offset, dump, 1); 4056 offset += qed_dump_str_param(dump_buf + offset, 4057 dump, "dump-type", "mcp-trace"); 4058 4059 /* Halt MCP while reading from scratchpad so the read data will be 4060 * consistent if halt fails, MCP trace is taken anyway, with a small 4061 * risk that it may be corrupt. 4062 */ 4063 if (dump) { 4064 halted = !qed_mcp_halt(p_hwfn, p_ptt); 4065 if (!halted) 4066 DP_NOTICE(p_hwfn, "MCP halt failed!\n"); 4067 } 4068 4069 /* Find trace data size */ 4070 trace_data_size_dwords = 4071 DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), 4072 BYTES_IN_DWORD); 4073 4074 /* Dump trace data section header and param */ 4075 offset += qed_dump_section_hdr(dump_buf + offset, 4076 dump, "mcp_trace_data", 1); 4077 offset += qed_dump_num_param(dump_buf + offset, 4078 dump, "size", trace_data_size_dwords); 4079 4080 /* Read trace data from scratchpad into dump buffer */ 4081 if (dump) 4082 qed_mcp_trace_read_data(p_hwfn, 4083 p_ptt, 4084 trace_data_grc_addr, 4085 trace_data_size_dwords, 4086 dump_buf + offset); 4087 offset += trace_data_size_dwords; 4088 4089 /* Resume MCP (only if halt succeeded) */ 4090 if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0) 4091 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n"); 4092 4093 /* Dump trace meta section header */ 4094 offset += qed_dump_section_hdr(dump_buf + offset, 4095 dump, "mcp_trace_meta", 1); 4096 4097 /* Read trace meta info */ 4098 status = qed_mcp_trace_get_meta_info(p_hwfn, 4099 p_ptt, 4100 trace_data_size_bytes, 4101 &running_bundle_id, 4102 &trace_meta_offset_bytes, 4103 &trace_meta_size_bytes); 4104 if (status != DBG_STATUS_OK) 4105 return status; 4106 4107 /* Dump trace meta size param (trace_meta_size_bytes is always 4108 * dword-aligned). 4109 */ 4110 trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes); 4111 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 4112 trace_meta_size_dwords); 4113 4114 /* Read trace meta image into dump buffer */ 4115 if (dump) { 4116 status = qed_mcp_trace_read_meta(p_hwfn, 4117 p_ptt, 4118 trace_meta_offset_bytes, 4119 trace_meta_size_bytes, 4120 dump_buf + offset); 4121 if (status != DBG_STATUS_OK) 4122 return status; 4123 } 4124 4125 offset += trace_meta_size_dwords; 4126 4127 *num_dumped_dwords = offset; 4128 4129 return DBG_STATUS_OK; 4130 } 4131 4132 /* Dump GRC FIFO */ 4133 enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, 4134 struct qed_ptt *p_ptt, 4135 u32 *dump_buf, 4136 bool dump, u32 *num_dumped_dwords) 4137 { 4138 u32 offset = 0, dwords_read, size_param_offset; 4139 bool fifo_has_data; 4140 4141 *num_dumped_dwords = 0; 4142 4143 /* Dump global params */ 4144 offset += qed_dump_common_global_params(p_hwfn, 4145 p_ptt, 4146 dump_buf + offset, dump, 1); 4147 offset += qed_dump_str_param(dump_buf + offset, 4148 dump, "dump-type", "reg-fifo"); 4149 4150 /* Dump fifo data section header and param. The size param is 0 for now, 4151 * and is overwritten after reading the FIFO. 4152 */ 4153 offset += qed_dump_section_hdr(dump_buf + offset, 4154 dump, "reg_fifo_data", 1); 4155 size_param_offset = offset; 4156 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0); 4157 4158 if (!dump) { 4159 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to 4160 * test how much data is available, except for reading it. 4161 */ 4162 offset += REG_FIFO_DEPTH_DWORDS; 4163 *num_dumped_dwords = offset; 4164 return DBG_STATUS_OK; 4165 } 4166 4167 fifo_has_data = qed_rd(p_hwfn, p_ptt, 4168 GRC_REG_TRACE_FIFO_VALID_DATA) > 0; 4169 4170 /* Pull available data from fifo. Use DMAE since this is widebus memory 4171 * and must be accessed atomically. Test for dwords_read not passing 4172 * buffer size since more entries could be added to the buffer as we are 4173 * emptying it. 4174 */ 4175 for (dwords_read = 0; 4176 fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; 4177 dwords_read += REG_FIFO_ELEMENT_DWORDS, offset += 4178 REG_FIFO_ELEMENT_DWORDS) { 4179 if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO, 4180 (u64)(uintptr_t)(&dump_buf[offset]), 4181 REG_FIFO_ELEMENT_DWORDS, 0)) 4182 return DBG_STATUS_DMAE_FAILED; 4183 fifo_has_data = qed_rd(p_hwfn, p_ptt, 4184 GRC_REG_TRACE_FIFO_VALID_DATA) > 0; 4185 } 4186 4187 qed_dump_num_param(dump_buf + size_param_offset, dump, "size", 4188 dwords_read); 4189 4190 *num_dumped_dwords = offset; 4191 return DBG_STATUS_OK; 4192 } 4193 4194 /* Dump IGU FIFO */ 4195 enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, 4196 struct qed_ptt *p_ptt, 4197 u32 *dump_buf, 4198 bool dump, u32 *num_dumped_dwords) 4199 { 4200 u32 offset = 0, dwords_read, size_param_offset; 4201 bool fifo_has_data; 4202 4203 *num_dumped_dwords = 0; 4204 4205 /* Dump global params */ 4206 offset += qed_dump_common_global_params(p_hwfn, 4207 p_ptt, 4208 dump_buf + offset, dump, 1); 4209 offset += qed_dump_str_param(dump_buf + offset, 4210 dump, "dump-type", "igu-fifo"); 4211 4212 /* Dump fifo data section header and param. The size param is 0 for now, 4213 * and is overwritten after reading the FIFO. 4214 */ 4215 offset += qed_dump_section_hdr(dump_buf + offset, 4216 dump, "igu_fifo_data", 1); 4217 size_param_offset = offset; 4218 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0); 4219 4220 if (!dump) { 4221 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to 4222 * test how much data is available, except for reading it. 4223 */ 4224 offset += IGU_FIFO_DEPTH_DWORDS; 4225 *num_dumped_dwords = offset; 4226 return DBG_STATUS_OK; 4227 } 4228 4229 fifo_has_data = qed_rd(p_hwfn, p_ptt, 4230 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0; 4231 4232 /* Pull available data from fifo. Use DMAE since this is widebus memory 4233 * and must be accessed atomically. Test for dwords_read not passing 4234 * buffer size since more entries could be added to the buffer as we are 4235 * emptying it. 4236 */ 4237 for (dwords_read = 0; 4238 fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS; 4239 dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset += 4240 IGU_FIFO_ELEMENT_DWORDS) { 4241 if (qed_dmae_grc2host(p_hwfn, p_ptt, 4242 IGU_REG_ERROR_HANDLING_MEMORY, 4243 (u64)(uintptr_t)(&dump_buf[offset]), 4244 IGU_FIFO_ELEMENT_DWORDS, 0)) 4245 return DBG_STATUS_DMAE_FAILED; 4246 fifo_has_data = qed_rd(p_hwfn, p_ptt, 4247 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0; 4248 } 4249 4250 qed_dump_num_param(dump_buf + size_param_offset, dump, "size", 4251 dwords_read); 4252 4253 *num_dumped_dwords = offset; 4254 return DBG_STATUS_OK; 4255 } 4256 4257 /* Protection Override dump */ 4258 enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, 4259 struct qed_ptt *p_ptt, 4260 u32 *dump_buf, 4261 bool dump, u32 *num_dumped_dwords) 4262 { 4263 u32 offset = 0, size_param_offset, override_window_dwords; 4264 4265 *num_dumped_dwords = 0; 4266 4267 /* Dump global params */ 4268 offset += qed_dump_common_global_params(p_hwfn, 4269 p_ptt, 4270 dump_buf + offset, dump, 1); 4271 offset += qed_dump_str_param(dump_buf + offset, 4272 dump, "dump-type", "protection-override"); 4273 4274 /* Dump data section header and param. The size param is 0 for now, and 4275 * is overwritten after reading the data. 4276 */ 4277 offset += qed_dump_section_hdr(dump_buf + offset, 4278 dump, "protection_override_data", 1); 4279 size_param_offset = offset; 4280 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0); 4281 4282 if (!dump) { 4283 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS; 4284 *num_dumped_dwords = offset; 4285 return DBG_STATUS_OK; 4286 } 4287 4288 /* Add override window info to buffer */ 4289 override_window_dwords = 4290 qed_rd(p_hwfn, p_ptt, 4291 GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * 4292 PROTECTION_OVERRIDE_ELEMENT_DWORDS; 4293 if (qed_dmae_grc2host(p_hwfn, p_ptt, 4294 GRC_REG_PROTECTION_OVERRIDE_WINDOW, 4295 (u64)(uintptr_t)(dump_buf + offset), 4296 override_window_dwords, 0)) 4297 return DBG_STATUS_DMAE_FAILED; 4298 offset += override_window_dwords; 4299 qed_dump_num_param(dump_buf + size_param_offset, dump, "size", 4300 override_window_dwords); 4301 4302 *num_dumped_dwords = offset; 4303 return DBG_STATUS_OK; 4304 } 4305 4306 /* Performs FW Asserts Dump to the specified buffer. 4307 * Returns the dumped size in dwords. 4308 */ 4309 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, 4310 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) 4311 { 4312 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 4313 char storm_letter_str[2] = "?"; 4314 struct fw_info fw_info; 4315 u32 offset = 0, i; 4316 u8 storm_id; 4317 4318 /* Dump global params */ 4319 offset += qed_dump_common_global_params(p_hwfn, 4320 p_ptt, 4321 dump_buf + offset, dump, 1); 4322 offset += qed_dump_str_param(dump_buf + offset, 4323 dump, "dump-type", "fw-asserts"); 4324 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { 4325 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx, 4326 last_list_idx, element_addr; 4327 4328 if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id]) 4329 continue; 4330 4331 /* Read FW info for the current Storm */ 4332 qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); 4333 4334 /* Dump FW Asserts section header and params */ 4335 storm_letter_str[0] = s_storm_defs[storm_id].letter; 4336 offset += qed_dump_section_hdr(dump_buf + offset, dump, 4337 "fw_asserts", 2); 4338 offset += qed_dump_str_param(dump_buf + offset, dump, "storm", 4339 storm_letter_str); 4340 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 4341 fw_info.fw_asserts_section. 4342 list_element_dword_size); 4343 4344 if (!dump) { 4345 offset += fw_info.fw_asserts_section. 4346 list_element_dword_size; 4347 continue; 4348 } 4349 4350 /* Read and dump FW Asserts data */ 4351 fw_asserts_section_addr = 4352 s_storm_defs[storm_id].sem_fast_mem_addr + 4353 SEM_FAST_REG_INT_RAM + 4354 RAM_LINES_TO_BYTES(fw_info.fw_asserts_section. 4355 section_ram_line_offset); 4356 next_list_idx_addr = 4357 fw_asserts_section_addr + 4358 DWORDS_TO_BYTES(fw_info.fw_asserts_section. 4359 list_next_index_dword_offset); 4360 next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr); 4361 last_list_idx = (next_list_idx > 0 4362 ? next_list_idx 4363 : fw_info.fw_asserts_section.list_num_elements) 4364 - 1; 4365 element_addr = 4366 fw_asserts_section_addr + 4367 DWORDS_TO_BYTES(fw_info.fw_asserts_section. 4368 list_dword_offset) + 4369 last_list_idx * 4370 DWORDS_TO_BYTES(fw_info.fw_asserts_section. 4371 list_element_dword_size); 4372 for (i = 0; 4373 i < fw_info.fw_asserts_section.list_element_dword_size; 4374 i++, offset++, element_addr += BYTES_IN_DWORD) 4375 dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr); 4376 } 4377 4378 /* Dump last section */ 4379 offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0); 4380 return offset; 4381 } 4382 4383 /***************************** Public Functions *******************************/ 4384 4385 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr) 4386 { 4387 /* Convert binary data to debug arrays */ 4388 u32 num_of_buffers = *(u32 *)bin_ptr; 4389 struct bin_buffer_hdr *buf_array; 4390 u8 buf_id; 4391 4392 buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1); 4393 4394 for (buf_id = 0; buf_id < num_of_buffers; buf_id++) { 4395 s_dbg_arrays[buf_id].ptr = 4396 (u32 *)(bin_ptr + buf_array[buf_id].offset); 4397 s_dbg_arrays[buf_id].size_in_dwords = 4398 BYTES_TO_DWORDS(buf_array[buf_id].length); 4399 } 4400 4401 return DBG_STATUS_OK; 4402 } 4403 4404 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, 4405 struct qed_ptt *p_ptt, 4406 u32 *buf_size) 4407 { 4408 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); 4409 4410 *buf_size = 0; 4411 if (status != DBG_STATUS_OK) 4412 return status; 4413 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || 4414 !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || 4415 !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr || 4416 !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || 4417 !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr) 4418 return DBG_STATUS_DBG_ARRAY_NOT_SET; 4419 return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size); 4420 } 4421 4422 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, 4423 struct qed_ptt *p_ptt, 4424 u32 *dump_buf, 4425 u32 buf_size_in_dwords, 4426 u32 *num_dumped_dwords) 4427 { 4428 u32 needed_buf_size_in_dwords; 4429 enum dbg_status status; 4430 4431 status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, 4432 &needed_buf_size_in_dwords); 4433 4434 *num_dumped_dwords = 0; 4435 if (status != DBG_STATUS_OK) 4436 return status; 4437 if (buf_size_in_dwords < needed_buf_size_in_dwords) 4438 return DBG_STATUS_DUMP_BUF_TOO_SMALL; 4439 4440 /* GRC Dump */ 4441 status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords); 4442 4443 /* Clear all GRC params */ 4444 qed_dbg_grc_clear_params(p_hwfn); 4445 return status; 4446 } 4447 4448 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, 4449 struct qed_ptt *p_ptt, 4450 u32 *buf_size) 4451 { 4452 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); 4453 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; 4454 4455 *buf_size = 0; 4456 if (status != DBG_STATUS_OK) 4457 return status; 4458 if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || 4459 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr || 4460 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || 4461 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr) 4462 return DBG_STATUS_DBG_ARRAY_NOT_SET; 4463 if (!dev_data->idle_chk.buf_size_set) { 4464 dev_data->idle_chk.buf_size = qed_idle_chk_dump(p_hwfn, 4465 p_ptt, 4466 NULL, false); 4467 dev_data->idle_chk.buf_size_set = true; 4468 } 4469 4470 *buf_size = dev_data->idle_chk.buf_size; 4471 return DBG_STATUS_OK; 4472 } 4473 4474 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, 4475 struct qed_ptt *p_ptt, 4476 u32 *dump_buf, 4477 u32 buf_size_in_dwords, 4478 u32 *num_dumped_dwords) 4479 { 4480 u32 needed_buf_size_in_dwords; 4481 enum dbg_status status; 4482 4483 status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, 4484 &needed_buf_size_in_dwords); 4485 4486 *num_dumped_dwords = 0; 4487 if (status != DBG_STATUS_OK) 4488 return status; 4489 if (buf_size_in_dwords < needed_buf_size_in_dwords) 4490 return DBG_STATUS_DUMP_BUF_TOO_SMALL; 4491 4492 /* Update reset state */ 4493 qed_update_blocks_reset_state(p_hwfn, p_ptt); 4494 4495 /* Idle Check Dump */ 4496 *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true); 4497 return DBG_STATUS_OK; 4498 } 4499 4500 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, 4501 struct qed_ptt *p_ptt, 4502 u32 *buf_size) 4503 { 4504 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); 4505 4506 *buf_size = 0; 4507 if (status != DBG_STATUS_OK) 4508 return status; 4509 return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size); 4510 } 4511 4512 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, 4513 struct qed_ptt *p_ptt, 4514 u32 *dump_buf, 4515 u32 buf_size_in_dwords, 4516 u32 *num_dumped_dwords) 4517 { 4518 u32 needed_buf_size_in_dwords; 4519 enum dbg_status status; 4520 4521 status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt, 4522 &needed_buf_size_in_dwords); 4523 4524 if (status != DBG_STATUS_OK) 4525 return status; 4526 if (buf_size_in_dwords < needed_buf_size_in_dwords) 4527 return DBG_STATUS_DUMP_BUF_TOO_SMALL; 4528 4529 /* Update reset state */ 4530 qed_update_blocks_reset_state(p_hwfn, p_ptt); 4531 4532 /* Perform dump */ 4533 return qed_mcp_trace_dump(p_hwfn, 4534 p_ptt, dump_buf, true, num_dumped_dwords); 4535 } 4536 4537 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, 4538 struct qed_ptt *p_ptt, 4539 u32 *buf_size) 4540 { 4541 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); 4542 4543 *buf_size = 0; 4544 if (status != DBG_STATUS_OK) 4545 return status; 4546 return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size); 4547 } 4548 4549 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, 4550 struct qed_ptt *p_ptt, 4551 u32 *dump_buf, 4552 u32 buf_size_in_dwords, 4553 u32 *num_dumped_dwords) 4554 { 4555 u32 needed_buf_size_in_dwords; 4556 enum dbg_status status; 4557 4558 status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt, 4559 &needed_buf_size_in_dwords); 4560 4561 *num_dumped_dwords = 0; 4562 if (status != DBG_STATUS_OK) 4563 return status; 4564 if (buf_size_in_dwords < needed_buf_size_in_dwords) 4565 return DBG_STATUS_DUMP_BUF_TOO_SMALL; 4566 4567 /* Update reset state */ 4568 qed_update_blocks_reset_state(p_hwfn, p_ptt); 4569 return qed_reg_fifo_dump(p_hwfn, 4570 p_ptt, dump_buf, true, num_dumped_dwords); 4571 } 4572 4573 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, 4574 struct qed_ptt *p_ptt, 4575 u32 *buf_size) 4576 { 4577 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); 4578 4579 *buf_size = 0; 4580 if (status != DBG_STATUS_OK) 4581 return status; 4582 return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size); 4583 } 4584 4585 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, 4586 struct qed_ptt *p_ptt, 4587 u32 *dump_buf, 4588 u32 buf_size_in_dwords, 4589 u32 *num_dumped_dwords) 4590 { 4591 u32 needed_buf_size_in_dwords; 4592 enum dbg_status status; 4593 4594 status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt, 4595 &needed_buf_size_in_dwords); 4596 4597 *num_dumped_dwords = 0; 4598 if (status != DBG_STATUS_OK) 4599 return status; 4600 if (buf_size_in_dwords < needed_buf_size_in_dwords) 4601 return DBG_STATUS_DUMP_BUF_TOO_SMALL; 4602 4603 /* Update reset state */ 4604 qed_update_blocks_reset_state(p_hwfn, p_ptt); 4605 return qed_igu_fifo_dump(p_hwfn, 4606 p_ptt, dump_buf, true, num_dumped_dwords); 4607 } 4608 4609 enum dbg_status 4610 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, 4611 struct qed_ptt *p_ptt, 4612 u32 *buf_size) 4613 { 4614 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); 4615 4616 *buf_size = 0; 4617 if (status != DBG_STATUS_OK) 4618 return status; 4619 return qed_protection_override_dump(p_hwfn, 4620 p_ptt, NULL, false, buf_size); 4621 } 4622 4623 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, 4624 struct qed_ptt *p_ptt, 4625 u32 *dump_buf, 4626 u32 buf_size_in_dwords, 4627 u32 *num_dumped_dwords) 4628 { 4629 u32 needed_buf_size_in_dwords; 4630 enum dbg_status status; 4631 4632 status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt, 4633 &needed_buf_size_in_dwords); 4634 4635 *num_dumped_dwords = 0; 4636 if (status != DBG_STATUS_OK) 4637 return status; 4638 if (buf_size_in_dwords < needed_buf_size_in_dwords) 4639 return DBG_STATUS_DUMP_BUF_TOO_SMALL; 4640 4641 /* Update reset state */ 4642 qed_update_blocks_reset_state(p_hwfn, p_ptt); 4643 return qed_protection_override_dump(p_hwfn, 4644 p_ptt, 4645 dump_buf, true, num_dumped_dwords); 4646 } 4647 4648 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, 4649 struct qed_ptt *p_ptt, 4650 u32 *buf_size) 4651 { 4652 enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt); 4653 4654 *buf_size = 0; 4655 if (status != DBG_STATUS_OK) 4656 return status; 4657 4658 /* Update reset state */ 4659 qed_update_blocks_reset_state(p_hwfn, p_ptt); 4660 *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false); 4661 return DBG_STATUS_OK; 4662 } 4663 4664 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, 4665 struct qed_ptt *p_ptt, 4666 u32 *dump_buf, 4667 u32 buf_size_in_dwords, 4668 u32 *num_dumped_dwords) 4669 { 4670 u32 needed_buf_size_in_dwords; 4671 enum dbg_status status; 4672 4673 status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt, 4674 &needed_buf_size_in_dwords); 4675 4676 *num_dumped_dwords = 0; 4677 if (status != DBG_STATUS_OK) 4678 return status; 4679 if (buf_size_in_dwords < needed_buf_size_in_dwords) 4680 return DBG_STATUS_DUMP_BUF_TOO_SMALL; 4681 4682 *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true); 4683 return DBG_STATUS_OK; 4684 } 4685 4686 /******************************* Data Types **********************************/ 4687 4688 struct mcp_trace_format { 4689 u32 data; 4690 #define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff 4691 #define MCP_TRACE_FORMAT_MODULE_SHIFT 0 4692 #define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 4693 #define MCP_TRACE_FORMAT_LEVEL_SHIFT 16 4694 #define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 4695 #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18 4696 #define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 4697 #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20 4698 #define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 4699 #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22 4700 #define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 4701 #define MCP_TRACE_FORMAT_LEN_SHIFT 24 4702 char *format_str; 4703 }; 4704 4705 struct mcp_trace_meta { 4706 u32 modules_num; 4707 char **modules; 4708 u32 formats_num; 4709 struct mcp_trace_format *formats; 4710 }; 4711 4712 /* Reg fifo element */ 4713 struct reg_fifo_element { 4714 u64 data; 4715 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0 4716 #define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff 4717 #define REG_FIFO_ELEMENT_ACCESS_SHIFT 23 4718 #define REG_FIFO_ELEMENT_ACCESS_MASK 0x1 4719 #define REG_FIFO_ELEMENT_PF_SHIFT 24 4720 #define REG_FIFO_ELEMENT_PF_MASK 0xf 4721 #define REG_FIFO_ELEMENT_VF_SHIFT 28 4722 #define REG_FIFO_ELEMENT_VF_MASK 0xff 4723 #define REG_FIFO_ELEMENT_PORT_SHIFT 36 4724 #define REG_FIFO_ELEMENT_PORT_MASK 0x3 4725 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38 4726 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3 4727 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40 4728 #define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7 4729 #define REG_FIFO_ELEMENT_MASTER_SHIFT 43 4730 #define REG_FIFO_ELEMENT_MASTER_MASK 0xf 4731 #define REG_FIFO_ELEMENT_ERROR_SHIFT 47 4732 #define REG_FIFO_ELEMENT_ERROR_MASK 0x1f 4733 }; 4734 4735 /* IGU fifo element */ 4736 struct igu_fifo_element { 4737 u32 dword0; 4738 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0 4739 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff 4740 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8 4741 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1 4742 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9 4743 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf 4744 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13 4745 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf 4746 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17 4747 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff 4748 u32 dword1; 4749 u32 dword2; 4750 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0 4751 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1 4752 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1 4753 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff 4754 u32 reserved; 4755 }; 4756 4757 struct igu_fifo_wr_data { 4758 u32 data; 4759 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0 4760 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff 4761 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24 4762 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1 4763 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25 4764 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3 4765 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27 4766 #define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1 4767 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28 4768 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1 4769 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31 4770 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1 4771 }; 4772 4773 struct igu_fifo_cleanup_wr_data { 4774 u32 data; 4775 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0 4776 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff 4777 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27 4778 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1 4779 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28 4780 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7 4781 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31 4782 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1 4783 }; 4784 4785 /* Protection override element */ 4786 struct protection_override_element { 4787 u64 data; 4788 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0 4789 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff 4790 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23 4791 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff 4792 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47 4793 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1 4794 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48 4795 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1 4796 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49 4797 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7 4798 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52 4799 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7 4800 }; 4801 4802 enum igu_fifo_sources { 4803 IGU_SRC_PXP0, 4804 IGU_SRC_PXP1, 4805 IGU_SRC_PXP2, 4806 IGU_SRC_PXP3, 4807 IGU_SRC_PXP4, 4808 IGU_SRC_PXP5, 4809 IGU_SRC_PXP6, 4810 IGU_SRC_PXP7, 4811 IGU_SRC_CAU, 4812 IGU_SRC_ATTN, 4813 IGU_SRC_GRC 4814 }; 4815 4816 enum igu_fifo_addr_types { 4817 IGU_ADDR_TYPE_MSIX_MEM, 4818 IGU_ADDR_TYPE_WRITE_PBA, 4819 IGU_ADDR_TYPE_WRITE_INT_ACK, 4820 IGU_ADDR_TYPE_WRITE_ATTN_BITS, 4821 IGU_ADDR_TYPE_READ_INT, 4822 IGU_ADDR_TYPE_WRITE_PROD_UPDATE, 4823 IGU_ADDR_TYPE_RESERVED 4824 }; 4825 4826 struct igu_fifo_addr_data { 4827 u16 start_addr; 4828 u16 end_addr; 4829 char *desc; 4830 char *vf_desc; 4831 enum igu_fifo_addr_types type; 4832 }; 4833 4834 /******************************** Constants **********************************/ 4835 4836 #define MAX_MSG_LEN 1024 4837 #define MCP_TRACE_MAX_MODULE_LEN 8 4838 #define MCP_TRACE_FORMAT_MAX_PARAMS 3 4839 #define MCP_TRACE_FORMAT_PARAM_WIDTH \ 4840 (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT) 4841 #define REG_FIFO_ELEMENT_ADDR_FACTOR 4 4842 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127 4843 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4 4844 4845 /********************************* Macros ************************************/ 4846 4847 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD) 4848 4849 /***************************** Constant Arrays *******************************/ 4850 4851 /* Status string array */ 4852 static const char * const s_status_str[] = { 4853 "Operation completed successfully", 4854 "Debug application version wasn't set", 4855 "Unsupported debug application version", 4856 "The debug block wasn't reset since the last recording", 4857 "Invalid arguments", 4858 "The debug output was already set", 4859 "Invalid PCI buffer size", 4860 "PCI buffer allocation failed", 4861 "A PCI buffer wasn't allocated", 4862 "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true", 4863 "GRC/Timestamp input overlap in cycle dword 0", 4864 "Cannot record Storm data since the entire recording cycle is used by HW", 4865 "The Storm was already enabled", 4866 "The specified Storm wasn't enabled", 4867 "The block was already enabled", 4868 "The specified block wasn't enabled", 4869 "No input was enabled for recording", 4870 "Filters and triggers are not allowed when recording in 64b units", 4871 "The filter was already enabled", 4872 "The trigger was already enabled", 4873 "The trigger wasn't enabled", 4874 "A constraint can be added only after a filter was enabled or a trigger state was added", 4875 "Cannot add more than 3 trigger states", 4876 "Cannot add more than 4 constraints per filter or trigger state", 4877 "The recording wasn't started", 4878 "A trigger was configured, but it didn't trigger", 4879 "No data was recorded", 4880 "Dump buffer is too small", 4881 "Dumped data is not aligned to chunks", 4882 "Unknown chip", 4883 "Failed allocating virtual memory", 4884 "The input block is in reset", 4885 "Invalid MCP trace signature found in NVRAM", 4886 "Invalid bundle ID found in NVRAM", 4887 "Failed getting NVRAM image", 4888 "NVRAM image is not dword-aligned", 4889 "Failed reading from NVRAM", 4890 "Idle check parsing failed", 4891 "MCP Trace data is corrupt", 4892 "Dump doesn't contain meta data - it must be provided in an image file", 4893 "Failed to halt MCP", 4894 "Failed to resume MCP after halt", 4895 "DMAE transaction failed", 4896 "Failed to empty SEMI sync FIFO", 4897 "IGU FIFO data is corrupt", 4898 "MCP failed to mask parities", 4899 "FW Asserts parsing failed", 4900 "GRC FIFO data is corrupt", 4901 "Protection Override data is corrupt", 4902 "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)", 4903 "When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)" 4904 }; 4905 4906 /* Idle check severity names array */ 4907 static const char * const s_idle_chk_severity_str[] = { 4908 "Error", 4909 "Error if no traffic", 4910 "Warning" 4911 }; 4912 4913 /* MCP Trace level names array */ 4914 static const char * const s_mcp_trace_level_str[] = { 4915 "ERROR", 4916 "TRACE", 4917 "DEBUG" 4918 }; 4919 4920 /* Parsing strings */ 4921 static const char * const s_access_strs[] = { 4922 "read", 4923 "write" 4924 }; 4925 4926 static const char * const s_privilege_strs[] = { 4927 "VF", 4928 "PDA", 4929 "HV", 4930 "UA" 4931 }; 4932 4933 static const char * const s_protection_strs[] = { 4934 "(default)", 4935 "(default)", 4936 "(default)", 4937 "(default)", 4938 "override VF", 4939 "override PDA", 4940 "override HV", 4941 "override UA" 4942 }; 4943 4944 static const char * const s_master_strs[] = { 4945 "???", 4946 "pxp", 4947 "mcp", 4948 "msdm", 4949 "psdm", 4950 "ysdm", 4951 "usdm", 4952 "tsdm", 4953 "xsdm", 4954 "dbu", 4955 "dmae", 4956 "???", 4957 "???", 4958 "???", 4959 "???", 4960 "???" 4961 }; 4962 4963 static const char * const s_reg_fifo_error_strs[] = { 4964 "grc timeout", 4965 "address doesn't belong to any block", 4966 "reserved address in block or write to read-only address", 4967 "privilege/protection mismatch", 4968 "path isolation error" 4969 }; 4970 4971 static const char * const s_igu_fifo_source_strs[] = { 4972 "TSTORM", 4973 "MSTORM", 4974 "USTORM", 4975 "XSTORM", 4976 "YSTORM", 4977 "PSTORM", 4978 "PCIE", 4979 "NIG_QM_PBF", 4980 "CAU", 4981 "ATTN", 4982 "GRC", 4983 }; 4984 4985 static const char * const s_igu_fifo_error_strs[] = { 4986 "no error", 4987 "length error", 4988 "function disabled", 4989 "VF sent command to attnetion address", 4990 "host sent prod update command", 4991 "read of during interrupt register while in MIMD mode", 4992 "access to PXP BAR reserved address", 4993 "producer update command to attention index", 4994 "unknown error", 4995 "SB index not valid", 4996 "SB relative index and FID not found", 4997 "FID not match", 4998 "command with error flag asserted (PCI error or CAU discard)", 4999 "VF sent cleanup and RF cleanup is disabled", 5000 "cleanup command on type bigger than 4" 5001 }; 5002 5003 /* IGU FIFO address data */ 5004 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = { 5005 {0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM}, 5006 {0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED}, 5007 {0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA}, 5008 {0x201, 0x201, "Write PBA[64:127]", "reserved", 5009 IGU_ADDR_TYPE_WRITE_PBA}, 5010 {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA}, 5011 {0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED}, 5012 {0x400, 0x5ef, "Write interrupt acknowledgment", NULL, 5013 IGU_ADDR_TYPE_WRITE_INT_ACK}, 5014 {0x5f0, 0x5f0, "Attention bits update", NULL, 5015 IGU_ADDR_TYPE_WRITE_ATTN_BITS}, 5016 {0x5f1, 0x5f1, "Attention bits set", NULL, 5017 IGU_ADDR_TYPE_WRITE_ATTN_BITS}, 5018 {0x5f2, 0x5f2, "Attention bits clear", NULL, 5019 IGU_ADDR_TYPE_WRITE_ATTN_BITS}, 5020 {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL, 5021 IGU_ADDR_TYPE_READ_INT}, 5022 {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL, 5023 IGU_ADDR_TYPE_READ_INT}, 5024 {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL, 5025 IGU_ADDR_TYPE_READ_INT}, 5026 {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL, 5027 IGU_ADDR_TYPE_READ_INT}, 5028 {0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED}, 5029 {0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE} 5030 }; 5031 5032 /******************************** Variables **********************************/ 5033 5034 /* MCP Trace meta data - used in case the dump doesn't contain the meta data 5035 * (e.g. due to no NVRAM access). 5036 */ 5037 static struct dbg_array s_mcp_trace_meta = { NULL, 0 }; 5038 5039 /* Temporary buffer, used for print size calculations */ 5040 static char s_temp_buf[MAX_MSG_LEN]; 5041 5042 /***************************** Public Functions *******************************/ 5043 5044 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr) 5045 { 5046 /* Convert binary data to debug arrays */ 5047 u32 num_of_buffers = *(u32 *)bin_ptr; 5048 struct bin_buffer_hdr *buf_array; 5049 u8 buf_id; 5050 5051 buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1); 5052 5053 for (buf_id = 0; buf_id < num_of_buffers; buf_id++) { 5054 s_dbg_arrays[buf_id].ptr = 5055 (u32 *)(bin_ptr + buf_array[buf_id].offset); 5056 s_dbg_arrays[buf_id].size_in_dwords = 5057 BYTES_TO_DWORDS(buf_array[buf_id].length); 5058 } 5059 5060 return DBG_STATUS_OK; 5061 } 5062 5063 static u32 qed_cyclic_add(u32 a, u32 b, u32 size) 5064 { 5065 return (a + b) % size; 5066 } 5067 5068 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size) 5069 { 5070 return (size + a - b) % size; 5071 } 5072 5073 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4 5074 * bytes) and returns them as a dword value. the specified buffer offset is 5075 * updated. 5076 */ 5077 static u32 qed_read_from_cyclic_buf(void *buf, 5078 u32 *offset, 5079 u32 buf_size, u8 num_bytes_to_read) 5080 { 5081 u8 *bytes_buf = (u8 *)buf; 5082 u8 *val_ptr; 5083 u32 val = 0; 5084 u8 i; 5085 5086 val_ptr = (u8 *)&val; 5087 5088 for (i = 0; i < num_bytes_to_read; i++) { 5089 val_ptr[i] = bytes_buf[*offset]; 5090 *offset = qed_cyclic_add(*offset, 1, buf_size); 5091 } 5092 5093 return val; 5094 } 5095 5096 /* Reads and returns the next byte from the specified buffer. 5097 * The specified buffer offset is updated. 5098 */ 5099 static u8 qed_read_byte_from_buf(void *buf, u32 *offset) 5100 { 5101 return ((u8 *)buf)[(*offset)++]; 5102 } 5103 5104 /* Reads and returns the next dword from the specified buffer. 5105 * The specified buffer offset is updated. 5106 */ 5107 static u32 qed_read_dword_from_buf(void *buf, u32 *offset) 5108 { 5109 u32 dword_val = *(u32 *)&((u8 *)buf)[*offset]; 5110 5111 *offset += 4; 5112 return dword_val; 5113 } 5114 5115 /* Reads the next string from the specified buffer, and copies it to the 5116 * specified pointer. The specified buffer offset is updated. 5117 */ 5118 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest) 5119 { 5120 const char *source_str = &((const char *)buf)[*offset]; 5121 5122 strncpy(dest, source_str, size); 5123 dest[size - 1] = '\0'; 5124 *offset += size; 5125 } 5126 5127 /* Returns a pointer to the specified offset (in bytes) of the specified buffer. 5128 * If the specified buffer in NULL, a temporary buffer pointer is returned. 5129 */ 5130 static char *qed_get_buf_ptr(void *buf, u32 offset) 5131 { 5132 return buf ? (char *)buf + offset : s_temp_buf; 5133 } 5134 5135 /* Reads a param from the specified buffer. Returns the number of dwords read. 5136 * If the returned str_param is NULL, the param is numeric and its value is 5137 * returned in num_param. 5138 * Otheriwise, the param is a string and its pointer is returned in str_param. 5139 */ 5140 static u32 qed_read_param(u32 *dump_buf, 5141 const char **param_name, 5142 const char **param_str_val, u32 *param_num_val) 5143 { 5144 char *char_buf = (char *)dump_buf; 5145 u32 offset = 0; /* In bytes */ 5146 5147 /* Extract param name */ 5148 *param_name = char_buf; 5149 offset += strlen(*param_name) + 1; 5150 5151 /* Check param type */ 5152 if (*(char_buf + offset++)) { 5153 /* String param */ 5154 *param_str_val = char_buf + offset; 5155 offset += strlen(*param_str_val) + 1; 5156 if (offset & 0x3) 5157 offset += (4 - (offset & 0x3)); 5158 } else { 5159 /* Numeric param */ 5160 *param_str_val = NULL; 5161 if (offset & 0x3) 5162 offset += (4 - (offset & 0x3)); 5163 *param_num_val = *(u32 *)(char_buf + offset); 5164 offset += 4; 5165 } 5166 5167 return offset / 4; 5168 } 5169 5170 /* Reads a section header from the specified buffer. 5171 * Returns the number of dwords read. 5172 */ 5173 static u32 qed_read_section_hdr(u32 *dump_buf, 5174 const char **section_name, 5175 u32 *num_section_params) 5176 { 5177 const char *param_str_val; 5178 5179 return qed_read_param(dump_buf, 5180 section_name, ¶m_str_val, num_section_params); 5181 } 5182 5183 /* Reads section params from the specified buffer and prints them to the results 5184 * buffer. Returns the number of dwords read. 5185 */ 5186 static u32 qed_print_section_params(u32 *dump_buf, 5187 u32 num_section_params, 5188 char *results_buf, u32 *num_chars_printed) 5189 { 5190 u32 i, dump_offset = 0, results_offset = 0; 5191 5192 for (i = 0; i < num_section_params; i++) { 5193 const char *param_name; 5194 const char *param_str_val; 5195 u32 param_num_val = 0; 5196 5197 dump_offset += qed_read_param(dump_buf + dump_offset, 5198 ¶m_name, 5199 ¶m_str_val, ¶m_num_val); 5200 if (param_str_val) 5201 /* String param */ 5202 results_offset += 5203 sprintf(qed_get_buf_ptr(results_buf, 5204 results_offset), 5205 "%s: %s\n", param_name, param_str_val); 5206 else if (strcmp(param_name, "fw-timestamp")) 5207 /* Numeric param */ 5208 results_offset += 5209 sprintf(qed_get_buf_ptr(results_buf, 5210 results_offset), 5211 "%s: %d\n", param_name, param_num_val); 5212 } 5213 5214 results_offset += 5215 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n"); 5216 *num_chars_printed = results_offset; 5217 return dump_offset; 5218 } 5219 5220 const char *qed_dbg_get_status_str(enum dbg_status status) 5221 { 5222 return (status < 5223 MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status"; 5224 } 5225 5226 /* Parses the idle check rules and returns the number of characters printed. 5227 * In case of parsing error, returns 0. 5228 */ 5229 static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, 5230 u32 *dump_buf, 5231 u32 *dump_buf_end, 5232 u32 num_rules, 5233 bool print_fw_idle_chk, 5234 char *results_buf, 5235 u32 *num_errors, u32 *num_warnings) 5236 { 5237 u32 rule_idx, results_offset = 0; /* Offset in results_buf in bytes */ 5238 u16 i, j; 5239 5240 *num_errors = 0; 5241 *num_warnings = 0; 5242 5243 /* Go over dumped results */ 5244 for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end; 5245 rule_idx++) { 5246 const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data; 5247 struct dbg_idle_chk_result_hdr *hdr; 5248 const char *parsing_str; 5249 u32 parsing_str_offset; 5250 const char *lsi_msg; 5251 u8 curr_reg_id = 0; 5252 bool has_fw_msg; 5253 5254 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf; 5255 rule_parsing_data = 5256 (const struct dbg_idle_chk_rule_parsing_data *) 5257 &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA]. 5258 ptr[hdr->rule_id]; 5259 parsing_str_offset = 5260 GET_FIELD(rule_parsing_data->data, 5261 DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET); 5262 has_fw_msg = 5263 GET_FIELD(rule_parsing_data->data, 5264 DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0; 5265 parsing_str = &((const char *) 5266 s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr) 5267 [parsing_str_offset]; 5268 lsi_msg = parsing_str; 5269 5270 if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES) 5271 return 0; 5272 5273 /* Skip rule header */ 5274 dump_buf += (sizeof(struct dbg_idle_chk_result_hdr) / 4); 5275 5276 /* Update errors/warnings count */ 5277 if (hdr->severity == IDLE_CHK_SEVERITY_ERROR || 5278 hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC) 5279 (*num_errors)++; 5280 else 5281 (*num_warnings)++; 5282 5283 /* Print rule severity */ 5284 results_offset += 5285 sprintf(qed_get_buf_ptr(results_buf, 5286 results_offset), "%s: ", 5287 s_idle_chk_severity_str[hdr->severity]); 5288 5289 /* Print rule message */ 5290 if (has_fw_msg) 5291 parsing_str += strlen(parsing_str) + 1; 5292 results_offset += 5293 sprintf(qed_get_buf_ptr(results_buf, 5294 results_offset), "%s.", 5295 has_fw_msg && 5296 print_fw_idle_chk ? parsing_str : lsi_msg); 5297 parsing_str += strlen(parsing_str) + 1; 5298 5299 /* Print register values */ 5300 results_offset += 5301 sprintf(qed_get_buf_ptr(results_buf, 5302 results_offset), " Registers:"); 5303 for (i = 0; 5304 i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs; 5305 i++) { 5306 struct dbg_idle_chk_result_reg_hdr *reg_hdr 5307 = (struct dbg_idle_chk_result_reg_hdr *) 5308 dump_buf; 5309 bool is_mem = 5310 GET_FIELD(reg_hdr->data, 5311 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM); 5312 u8 reg_id = 5313 GET_FIELD(reg_hdr->data, 5314 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID); 5315 5316 /* Skip reg header */ 5317 dump_buf += 5318 (sizeof(struct dbg_idle_chk_result_reg_hdr) / 4); 5319 5320 /* Skip register names until the required reg_id is 5321 * reached. 5322 */ 5323 for (; reg_id > curr_reg_id; 5324 curr_reg_id++, 5325 parsing_str += strlen(parsing_str) + 1); 5326 5327 results_offset += 5328 sprintf(qed_get_buf_ptr(results_buf, 5329 results_offset), " %s", 5330 parsing_str); 5331 if (i < hdr->num_dumped_cond_regs && is_mem) 5332 results_offset += 5333 sprintf(qed_get_buf_ptr(results_buf, 5334 results_offset), 5335 "[%d]", hdr->mem_entry_id + 5336 reg_hdr->start_entry); 5337 results_offset += 5338 sprintf(qed_get_buf_ptr(results_buf, 5339 results_offset), "="); 5340 for (j = 0; j < reg_hdr->size; j++, dump_buf++) { 5341 results_offset += 5342 sprintf(qed_get_buf_ptr(results_buf, 5343 results_offset), 5344 "0x%x", *dump_buf); 5345 if (j < reg_hdr->size - 1) 5346 results_offset += 5347 sprintf(qed_get_buf_ptr 5348 (results_buf, 5349 results_offset), ","); 5350 } 5351 } 5352 5353 results_offset += 5354 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n"); 5355 } 5356 5357 /* Check if end of dump buffer was exceeded */ 5358 if (dump_buf > dump_buf_end) 5359 return 0; 5360 return results_offset; 5361 } 5362 5363 /* Parses an idle check dump buffer. 5364 * If result_buf is not NULL, the idle check results are printed to it. 5365 * In any case, the required results buffer size is assigned to 5366 * parsed_results_bytes. 5367 * The parsing status is returned. 5368 */ 5369 static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, 5370 u32 *dump_buf, 5371 u32 num_dumped_dwords, 5372 char *results_buf, 5373 u32 *parsed_results_bytes, 5374 u32 *num_errors, 5375 u32 *num_warnings) 5376 { 5377 const char *section_name, *param_name, *param_str_val; 5378 u32 *dump_buf_end = dump_buf + num_dumped_dwords; 5379 u32 num_section_params = 0, num_rules; 5380 u32 results_offset = 0; /* Offset in results_buf in bytes */ 5381 5382 *parsed_results_bytes = 0; 5383 *num_errors = 0; 5384 *num_warnings = 0; 5385 if (!s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr || 5386 !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr) 5387 return DBG_STATUS_DBG_ARRAY_NOT_SET; 5388 5389 /* Read global_params section */ 5390 dump_buf += qed_read_section_hdr(dump_buf, 5391 §ion_name, &num_section_params); 5392 if (strcmp(section_name, "global_params")) 5393 return DBG_STATUS_IDLE_CHK_PARSE_FAILED; 5394 5395 /* Print global params */ 5396 dump_buf += qed_print_section_params(dump_buf, 5397 num_section_params, 5398 results_buf, &results_offset); 5399 5400 /* Read idle_chk section */ 5401 dump_buf += qed_read_section_hdr(dump_buf, 5402 §ion_name, &num_section_params); 5403 if (strcmp(section_name, "idle_chk") || num_section_params != 1) 5404 return DBG_STATUS_IDLE_CHK_PARSE_FAILED; 5405 5406 dump_buf += qed_read_param(dump_buf, 5407 ¶m_name, ¶m_str_val, &num_rules); 5408 if (strcmp(param_name, "num_rules") != 0) 5409 return DBG_STATUS_IDLE_CHK_PARSE_FAILED; 5410 5411 if (num_rules) { 5412 u32 rules_print_size; 5413 5414 /* Print FW output */ 5415 results_offset += 5416 sprintf(qed_get_buf_ptr(results_buf, 5417 results_offset), 5418 "FW_IDLE_CHECK:\n"); 5419 rules_print_size = 5420 qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf, 5421 dump_buf_end, num_rules, 5422 true, 5423 results_buf ? 5424 results_buf + 5425 results_offset : NULL, 5426 num_errors, num_warnings); 5427 results_offset += rules_print_size; 5428 if (rules_print_size == 0) 5429 return DBG_STATUS_IDLE_CHK_PARSE_FAILED; 5430 5431 /* Print LSI output */ 5432 results_offset += 5433 sprintf(qed_get_buf_ptr(results_buf, 5434 results_offset), 5435 "\nLSI_IDLE_CHECK:\n"); 5436 rules_print_size = 5437 qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf, 5438 dump_buf_end, num_rules, 5439 false, 5440 results_buf ? 5441 results_buf + 5442 results_offset : NULL, 5443 num_errors, num_warnings); 5444 results_offset += rules_print_size; 5445 if (rules_print_size == 0) 5446 return DBG_STATUS_IDLE_CHK_PARSE_FAILED; 5447 } 5448 5449 /* Print errors/warnings count */ 5450 if (*num_errors) { 5451 results_offset += 5452 sprintf(qed_get_buf_ptr(results_buf, 5453 results_offset), 5454 "\nIdle Check failed!!! (with %d errors and %d warnings)\n", 5455 *num_errors, *num_warnings); 5456 } else if (*num_warnings) { 5457 results_offset += 5458 sprintf(qed_get_buf_ptr(results_buf, 5459 results_offset), 5460 "\nIdle Check completed successfuly (with %d warnings)\n", 5461 *num_warnings); 5462 } else { 5463 results_offset += 5464 sprintf(qed_get_buf_ptr(results_buf, 5465 results_offset), 5466 "\nIdle Check completed successfuly\n"); 5467 } 5468 5469 /* Add 1 for string NULL termination */ 5470 *parsed_results_bytes = results_offset + 1; 5471 return DBG_STATUS_OK; 5472 } 5473 5474 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, 5475 u32 *dump_buf, 5476 u32 num_dumped_dwords, 5477 u32 *results_buf_size) 5478 { 5479 u32 num_errors, num_warnings; 5480 5481 return qed_parse_idle_chk_dump(p_hwfn, 5482 dump_buf, 5483 num_dumped_dwords, 5484 NULL, 5485 results_buf_size, 5486 &num_errors, &num_warnings); 5487 } 5488 5489 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, 5490 u32 *dump_buf, 5491 u32 num_dumped_dwords, 5492 char *results_buf, 5493 u32 *num_errors, u32 *num_warnings) 5494 { 5495 u32 parsed_buf_size; 5496 5497 return qed_parse_idle_chk_dump(p_hwfn, 5498 dump_buf, 5499 num_dumped_dwords, 5500 results_buf, 5501 &parsed_buf_size, 5502 num_errors, num_warnings); 5503 } 5504 5505 /* Frees the specified MCP Trace meta data */ 5506 static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn, 5507 struct mcp_trace_meta *meta) 5508 { 5509 u32 i; 5510 5511 /* Release modules */ 5512 if (meta->modules) { 5513 for (i = 0; i < meta->modules_num; i++) 5514 kfree(meta->modules[i]); 5515 kfree(meta->modules); 5516 } 5517 5518 /* Release formats */ 5519 if (meta->formats) { 5520 for (i = 0; i < meta->formats_num; i++) 5521 kfree(meta->formats[i].format_str); 5522 kfree(meta->formats); 5523 } 5524 } 5525 5526 /* Allocates and fills MCP Trace meta data based on the specified meta data 5527 * dump buffer. 5528 * Returns debug status code. 5529 */ 5530 static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, 5531 const u32 *meta_buf, 5532 struct mcp_trace_meta *meta) 5533 { 5534 u8 *meta_buf_bytes = (u8 *)meta_buf; 5535 u32 offset = 0, signature, i; 5536 5537 memset(meta, 0, sizeof(*meta)); 5538 5539 /* Read first signature */ 5540 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset); 5541 if (signature != MCP_TRACE_META_IMAGE_SIGNATURE) 5542 return DBG_STATUS_INVALID_TRACE_SIGNATURE; 5543 5544 /* Read number of modules and allocate memory for all the modules 5545 * pointers. 5546 */ 5547 meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset); 5548 meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL); 5549 if (!meta->modules) 5550 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; 5551 5552 /* Allocate and read all module strings */ 5553 for (i = 0; i < meta->modules_num; i++) { 5554 u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset); 5555 5556 *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL); 5557 if (!(*(meta->modules + i))) { 5558 /* Update number of modules to be released */ 5559 meta->modules_num = i ? i - 1 : 0; 5560 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; 5561 } 5562 5563 qed_read_str_from_buf(meta_buf_bytes, &offset, module_len, 5564 *(meta->modules + i)); 5565 if (module_len > MCP_TRACE_MAX_MODULE_LEN) 5566 (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0'; 5567 } 5568 5569 /* Read second signature */ 5570 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset); 5571 if (signature != MCP_TRACE_META_IMAGE_SIGNATURE) 5572 return DBG_STATUS_INVALID_TRACE_SIGNATURE; 5573 5574 /* Read number of formats and allocate memory for all formats */ 5575 meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset); 5576 meta->formats = kzalloc(meta->formats_num * 5577 sizeof(struct mcp_trace_format), 5578 GFP_KERNEL); 5579 if (!meta->formats) 5580 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; 5581 5582 /* Allocate and read all strings */ 5583 for (i = 0; i < meta->formats_num; i++) { 5584 struct mcp_trace_format *format_ptr = &meta->formats[i]; 5585 u8 format_len; 5586 5587 format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes, 5588 &offset); 5589 format_len = 5590 (format_ptr->data & 5591 MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT; 5592 format_ptr->format_str = kzalloc(format_len, GFP_KERNEL); 5593 if (!format_ptr->format_str) { 5594 /* Update number of modules to be released */ 5595 meta->formats_num = i ? i - 1 : 0; 5596 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; 5597 } 5598 5599 qed_read_str_from_buf(meta_buf_bytes, 5600 &offset, 5601 format_len, format_ptr->format_str); 5602 } 5603 5604 return DBG_STATUS_OK; 5605 } 5606 5607 /* Parses an MCP Trace dump buffer. 5608 * If result_buf is not NULL, the MCP Trace results are printed to it. 5609 * In any case, the required results buffer size is assigned to 5610 * parsed_results_bytes. 5611 * The parsing status is returned. 5612 */ 5613 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, 5614 u32 *dump_buf, 5615 u32 num_dumped_dwords, 5616 char *results_buf, 5617 u32 *parsed_results_bytes) 5618 { 5619 u32 results_offset = 0, param_mask, param_shift, param_num_val; 5620 u32 num_section_params, offset, end_offset, bytes_left; 5621 const char *section_name, *param_name, *param_str_val; 5622 u32 trace_data_dwords, trace_meta_dwords; 5623 struct mcp_trace_meta meta; 5624 struct mcp_trace *trace; 5625 enum dbg_status status; 5626 const u32 *meta_buf; 5627 u8 *trace_buf; 5628 5629 *parsed_results_bytes = 0; 5630 5631 /* Read global_params section */ 5632 dump_buf += qed_read_section_hdr(dump_buf, 5633 §ion_name, &num_section_params); 5634 if (strcmp(section_name, "global_params")) 5635 return DBG_STATUS_MCP_TRACE_BAD_DATA; 5636 5637 /* Print global params */ 5638 dump_buf += qed_print_section_params(dump_buf, 5639 num_section_params, 5640 results_buf, &results_offset); 5641 5642 /* Read trace_data section */ 5643 dump_buf += qed_read_section_hdr(dump_buf, 5644 §ion_name, &num_section_params); 5645 if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1) 5646 return DBG_STATUS_MCP_TRACE_BAD_DATA; 5647 dump_buf += qed_read_param(dump_buf, 5648 ¶m_name, ¶m_str_val, ¶m_num_val); 5649 if (strcmp(param_name, "size")) 5650 return DBG_STATUS_MCP_TRACE_BAD_DATA; 5651 trace_data_dwords = param_num_val; 5652 5653 /* Prepare trace info */ 5654 trace = (struct mcp_trace *)dump_buf; 5655 trace_buf = (u8 *)dump_buf + sizeof(struct mcp_trace); 5656 offset = trace->trace_oldest; 5657 end_offset = trace->trace_prod; 5658 bytes_left = qed_cyclic_sub(end_offset, offset, trace->size); 5659 dump_buf += trace_data_dwords; 5660 5661 /* Read meta_data section */ 5662 dump_buf += qed_read_section_hdr(dump_buf, 5663 §ion_name, &num_section_params); 5664 if (strcmp(section_name, "mcp_trace_meta")) 5665 return DBG_STATUS_MCP_TRACE_BAD_DATA; 5666 dump_buf += qed_read_param(dump_buf, 5667 ¶m_name, ¶m_str_val, ¶m_num_val); 5668 if (strcmp(param_name, "size") != 0) 5669 return DBG_STATUS_MCP_TRACE_BAD_DATA; 5670 trace_meta_dwords = param_num_val; 5671 5672 /* Choose meta data buffer */ 5673 if (!trace_meta_dwords) { 5674 /* Dump doesn't include meta data */ 5675 if (!s_mcp_trace_meta.ptr) 5676 return DBG_STATUS_MCP_TRACE_NO_META; 5677 meta_buf = s_mcp_trace_meta.ptr; 5678 } else { 5679 /* Dump includes meta data */ 5680 meta_buf = dump_buf; 5681 } 5682 5683 /* Allocate meta data memory */ 5684 status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta); 5685 if (status != DBG_STATUS_OK) 5686 goto free_mem; 5687 5688 /* Ignore the level and modules masks - just print everything that is 5689 * already in the buffer. 5690 */ 5691 while (bytes_left) { 5692 struct mcp_trace_format *format_ptr; 5693 u8 format_level, format_module; 5694 u32 params[3] = { 0, 0, 0 }; 5695 u32 header, format_idx, i; 5696 5697 if (bytes_left < MFW_TRACE_ENTRY_SIZE) { 5698 status = DBG_STATUS_MCP_TRACE_BAD_DATA; 5699 goto free_mem; 5700 } 5701 5702 header = qed_read_from_cyclic_buf(trace_buf, 5703 &offset, 5704 trace->size, 5705 MFW_TRACE_ENTRY_SIZE); 5706 bytes_left -= MFW_TRACE_ENTRY_SIZE; 5707 format_idx = header & MFW_TRACE_EVENTID_MASK; 5708 5709 /* Skip message if its index doesn't exist in the meta data */ 5710 if (format_idx > meta.formats_num) { 5711 u8 format_size = 5712 (u8)((header & 5713 MFW_TRACE_PRM_SIZE_MASK) >> 5714 MFW_TRACE_PRM_SIZE_SHIFT); 5715 5716 if (bytes_left < format_size) { 5717 status = DBG_STATUS_MCP_TRACE_BAD_DATA; 5718 goto free_mem; 5719 } 5720 5721 offset = qed_cyclic_add(offset, 5722 format_size, trace->size); 5723 bytes_left -= format_size; 5724 continue; 5725 } 5726 5727 format_ptr = &meta.formats[format_idx]; 5728 for (i = 0, 5729 param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift = 5730 MCP_TRACE_FORMAT_P1_SIZE_SHIFT; 5731 i < MCP_TRACE_FORMAT_MAX_PARAMS; 5732 i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH, 5733 param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) { 5734 /* Extract param size (0..3) */ 5735 u8 param_size = 5736 (u8)((format_ptr->data & 5737 param_mask) >> param_shift); 5738 5739 /* If the param size is zero, there are no other 5740 * parameters. 5741 */ 5742 if (!param_size) 5743 break; 5744 5745 /* Size is encoded using 2 bits, where 3 is used to 5746 * encode 4. 5747 */ 5748 if (param_size == 3) 5749 param_size = 4; 5750 if (bytes_left < param_size) { 5751 status = DBG_STATUS_MCP_TRACE_BAD_DATA; 5752 goto free_mem; 5753 } 5754 5755 params[i] = qed_read_from_cyclic_buf(trace_buf, 5756 &offset, 5757 trace->size, 5758 param_size); 5759 bytes_left -= param_size; 5760 } 5761 5762 format_level = 5763 (u8)((format_ptr->data & 5764 MCP_TRACE_FORMAT_LEVEL_MASK) >> 5765 MCP_TRACE_FORMAT_LEVEL_SHIFT); 5766 format_module = 5767 (u8)((format_ptr->data & 5768 MCP_TRACE_FORMAT_MODULE_MASK) >> 5769 MCP_TRACE_FORMAT_MODULE_SHIFT); 5770 if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) { 5771 status = DBG_STATUS_MCP_TRACE_BAD_DATA; 5772 goto free_mem; 5773 } 5774 5775 /* Print current message to results buffer */ 5776 results_offset += 5777 sprintf(qed_get_buf_ptr(results_buf, 5778 results_offset), "%s %-8s: ", 5779 s_mcp_trace_level_str[format_level], 5780 meta.modules[format_module]); 5781 results_offset += 5782 sprintf(qed_get_buf_ptr(results_buf, 5783 results_offset), 5784 format_ptr->format_str, params[0], params[1], 5785 params[2]); 5786 } 5787 5788 free_mem: 5789 *parsed_results_bytes = results_offset + 1; 5790 qed_mcp_trace_free_meta(p_hwfn, &meta); 5791 return status; 5792 } 5793 5794 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, 5795 u32 *dump_buf, 5796 u32 num_dumped_dwords, 5797 u32 *results_buf_size) 5798 { 5799 return qed_parse_mcp_trace_dump(p_hwfn, 5800 dump_buf, 5801 num_dumped_dwords, 5802 NULL, results_buf_size); 5803 } 5804 5805 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, 5806 u32 *dump_buf, 5807 u32 num_dumped_dwords, 5808 char *results_buf) 5809 { 5810 u32 parsed_buf_size; 5811 5812 return qed_parse_mcp_trace_dump(p_hwfn, 5813 dump_buf, 5814 num_dumped_dwords, 5815 results_buf, &parsed_buf_size); 5816 } 5817 5818 /* Parses a Reg FIFO dump buffer. 5819 * If result_buf is not NULL, the Reg FIFO results are printed to it. 5820 * In any case, the required results buffer size is assigned to 5821 * parsed_results_bytes. 5822 * The parsing status is returned. 5823 */ 5824 static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn, 5825 u32 *dump_buf, 5826 u32 num_dumped_dwords, 5827 char *results_buf, 5828 u32 *parsed_results_bytes) 5829 { 5830 u32 results_offset = 0, param_num_val, num_section_params, num_elements; 5831 const char *section_name, *param_name, *param_str_val; 5832 struct reg_fifo_element *elements; 5833 u8 i, j, err_val, vf_val; 5834 char vf_str[4]; 5835 5836 /* Read global_params section */ 5837 dump_buf += qed_read_section_hdr(dump_buf, 5838 §ion_name, &num_section_params); 5839 if (strcmp(section_name, "global_params")) 5840 return DBG_STATUS_REG_FIFO_BAD_DATA; 5841 5842 /* Print global params */ 5843 dump_buf += qed_print_section_params(dump_buf, 5844 num_section_params, 5845 results_buf, &results_offset); 5846 5847 /* Read reg_fifo_data section */ 5848 dump_buf += qed_read_section_hdr(dump_buf, 5849 §ion_name, &num_section_params); 5850 if (strcmp(section_name, "reg_fifo_data")) 5851 return DBG_STATUS_REG_FIFO_BAD_DATA; 5852 dump_buf += qed_read_param(dump_buf, 5853 ¶m_name, ¶m_str_val, ¶m_num_val); 5854 if (strcmp(param_name, "size")) 5855 return DBG_STATUS_REG_FIFO_BAD_DATA; 5856 if (param_num_val % REG_FIFO_ELEMENT_DWORDS) 5857 return DBG_STATUS_REG_FIFO_BAD_DATA; 5858 num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS; 5859 elements = (struct reg_fifo_element *)dump_buf; 5860 5861 /* Decode elements */ 5862 for (i = 0; i < num_elements; i++) { 5863 bool err_printed = false; 5864 5865 /* Discover if element belongs to a VF or a PF */ 5866 vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF); 5867 if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL) 5868 sprintf(vf_str, "%s", "N/A"); 5869 else 5870 sprintf(vf_str, "%d", vf_val); 5871 5872 /* Add parsed element to parsed buffer */ 5873 results_offset += 5874 sprintf(qed_get_buf_ptr(results_buf, 5875 results_offset), 5876 "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ", 5877 elements[i].data, 5878 GET_FIELD(elements[i].data, 5879 REG_FIFO_ELEMENT_ADDRESS) * 5880 REG_FIFO_ELEMENT_ADDR_FACTOR, 5881 s_access_strs[GET_FIELD(elements[i].data, 5882 REG_FIFO_ELEMENT_ACCESS)], 5883 GET_FIELD(elements[i].data, 5884 REG_FIFO_ELEMENT_PF), vf_str, 5885 GET_FIELD(elements[i].data, 5886 REG_FIFO_ELEMENT_PORT), 5887 s_privilege_strs[GET_FIELD(elements[i]. 5888 data, 5889 REG_FIFO_ELEMENT_PRIVILEGE)], 5890 s_protection_strs[GET_FIELD(elements[i].data, 5891 REG_FIFO_ELEMENT_PROTECTION)], 5892 s_master_strs[GET_FIELD(elements[i].data, 5893 REG_FIFO_ELEMENT_MASTER)]); 5894 5895 /* Print errors */ 5896 for (j = 0, 5897 err_val = GET_FIELD(elements[i].data, 5898 REG_FIFO_ELEMENT_ERROR); 5899 j < ARRAY_SIZE(s_reg_fifo_error_strs); 5900 j++, err_val >>= 1) { 5901 if (!(err_val & 0x1)) 5902 continue; 5903 if (err_printed) 5904 results_offset += 5905 sprintf(qed_get_buf_ptr(results_buf, 5906 results_offset), 5907 ", "); 5908 results_offset += 5909 sprintf(qed_get_buf_ptr(results_buf, 5910 results_offset), "%s", 5911 s_reg_fifo_error_strs[j]); 5912 err_printed = true; 5913 } 5914 5915 results_offset += 5916 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n"); 5917 } 5918 5919 results_offset += sprintf(qed_get_buf_ptr(results_buf, 5920 results_offset), 5921 "fifo contained %d elements", num_elements); 5922 5923 /* Add 1 for string NULL termination */ 5924 *parsed_results_bytes = results_offset + 1; 5925 return DBG_STATUS_OK; 5926 } 5927 5928 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, 5929 u32 *dump_buf, 5930 u32 num_dumped_dwords, 5931 u32 *results_buf_size) 5932 { 5933 return qed_parse_reg_fifo_dump(p_hwfn, 5934 dump_buf, 5935 num_dumped_dwords, 5936 NULL, results_buf_size); 5937 } 5938 5939 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, 5940 u32 *dump_buf, 5941 u32 num_dumped_dwords, 5942 char *results_buf) 5943 { 5944 u32 parsed_buf_size; 5945 5946 return qed_parse_reg_fifo_dump(p_hwfn, 5947 dump_buf, 5948 num_dumped_dwords, 5949 results_buf, &parsed_buf_size); 5950 } 5951 5952 /* Parses an IGU FIFO dump buffer. 5953 * If result_buf is not NULL, the IGU FIFO results are printed to it. 5954 * In any case, the required results buffer size is assigned to 5955 * parsed_results_bytes. 5956 * The parsing status is returned. 5957 */ 5958 static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn, 5959 u32 *dump_buf, 5960 u32 num_dumped_dwords, 5961 char *results_buf, 5962 u32 *parsed_results_bytes) 5963 { 5964 u32 results_offset = 0, param_num_val, num_section_params, num_elements; 5965 const char *section_name, *param_name, *param_str_val; 5966 struct igu_fifo_element *elements; 5967 char parsed_addr_data[32]; 5968 char parsed_wr_data[256]; 5969 u8 i, j; 5970 5971 /* Read global_params section */ 5972 dump_buf += qed_read_section_hdr(dump_buf, 5973 §ion_name, &num_section_params); 5974 if (strcmp(section_name, "global_params")) 5975 return DBG_STATUS_IGU_FIFO_BAD_DATA; 5976 5977 /* Print global params */ 5978 dump_buf += qed_print_section_params(dump_buf, 5979 num_section_params, 5980 results_buf, &results_offset); 5981 5982 /* Read igu_fifo_data section */ 5983 dump_buf += qed_read_section_hdr(dump_buf, 5984 §ion_name, &num_section_params); 5985 if (strcmp(section_name, "igu_fifo_data")) 5986 return DBG_STATUS_IGU_FIFO_BAD_DATA; 5987 dump_buf += qed_read_param(dump_buf, 5988 ¶m_name, ¶m_str_val, ¶m_num_val); 5989 if (strcmp(param_name, "size")) 5990 return DBG_STATUS_IGU_FIFO_BAD_DATA; 5991 if (param_num_val % IGU_FIFO_ELEMENT_DWORDS) 5992 return DBG_STATUS_IGU_FIFO_BAD_DATA; 5993 num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS; 5994 elements = (struct igu_fifo_element *)dump_buf; 5995 5996 /* Decode elements */ 5997 for (i = 0; i < num_elements; i++) { 5998 /* dword12 (dword index 1 and 2) contains bits 32..95 of the 5999 * FIFO element. 6000 */ 6001 u64 dword12 = 6002 ((u64)elements[i].dword2 << 32) | elements[i].dword1; 6003 bool is_wr_cmd = GET_FIELD(dword12, 6004 IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD); 6005 bool is_pf = GET_FIELD(elements[i].dword0, 6006 IGU_FIFO_ELEMENT_DWORD0_IS_PF); 6007 u16 cmd_addr = GET_FIELD(elements[i].dword0, 6008 IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR); 6009 u8 source = GET_FIELD(elements[i].dword0, 6010 IGU_FIFO_ELEMENT_DWORD0_SOURCE); 6011 u8 err_type = GET_FIELD(elements[i].dword0, 6012 IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE); 6013 const struct igu_fifo_addr_data *addr_data = NULL; 6014 6015 if (source >= ARRAY_SIZE(s_igu_fifo_source_strs)) 6016 return DBG_STATUS_IGU_FIFO_BAD_DATA; 6017 if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs)) 6018 return DBG_STATUS_IGU_FIFO_BAD_DATA; 6019 6020 /* Find address data */ 6021 for (j = 0; j < ARRAY_SIZE(s_igu_fifo_addr_data) && !addr_data; 6022 j++) 6023 if (cmd_addr >= s_igu_fifo_addr_data[j].start_addr && 6024 cmd_addr <= s_igu_fifo_addr_data[j].end_addr) 6025 addr_data = &s_igu_fifo_addr_data[j]; 6026 if (!addr_data) 6027 return DBG_STATUS_IGU_FIFO_BAD_DATA; 6028 6029 /* Prepare parsed address data */ 6030 switch (addr_data->type) { 6031 case IGU_ADDR_TYPE_MSIX_MEM: 6032 sprintf(parsed_addr_data, 6033 " vector_num=0x%x", cmd_addr / 2); 6034 break; 6035 case IGU_ADDR_TYPE_WRITE_INT_ACK: 6036 case IGU_ADDR_TYPE_WRITE_PROD_UPDATE: 6037 sprintf(parsed_addr_data, 6038 " SB=0x%x", cmd_addr - addr_data->start_addr); 6039 break; 6040 default: 6041 parsed_addr_data[0] = '\0'; 6042 } 6043 6044 /* Prepare parsed write data */ 6045 if (is_wr_cmd) { 6046 u32 wr_data = GET_FIELD(dword12, 6047 IGU_FIFO_ELEMENT_DWORD12_WR_DATA); 6048 u32 prod_cons = GET_FIELD(wr_data, 6049 IGU_FIFO_WR_DATA_PROD_CONS); 6050 u8 is_cleanup = GET_FIELD(wr_data, 6051 IGU_FIFO_WR_DATA_CMD_TYPE); 6052 6053 if (source == IGU_SRC_ATTN) { 6054 sprintf(parsed_wr_data, 6055 "prod: 0x%x, ", prod_cons); 6056 } else { 6057 if (is_cleanup) { 6058 u8 cleanup_val = GET_FIELD(wr_data, 6059 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL); 6060 u8 cleanup_type = GET_FIELD(wr_data, 6061 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE); 6062 6063 sprintf(parsed_wr_data, 6064 "cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ", 6065 cleanup_val ? "set" : "clear", 6066 cleanup_type); 6067 } else { 6068 u8 update_flag = GET_FIELD(wr_data, 6069 IGU_FIFO_WR_DATA_UPDATE_FLAG); 6070 u8 en_dis_int_for_sb = 6071 GET_FIELD(wr_data, 6072 IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB); 6073 u8 segment = GET_FIELD(wr_data, 6074 IGU_FIFO_WR_DATA_SEGMENT); 6075 u8 timer_mask = GET_FIELD(wr_data, 6076 IGU_FIFO_WR_DATA_TIMER_MASK); 6077 6078 sprintf(parsed_wr_data, 6079 "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ", 6080 prod_cons, 6081 update_flag ? "update" : "nop", 6082 en_dis_int_for_sb 6083 ? (en_dis_int_for_sb == 6084 1 ? "disable" : "nop") : 6085 "enable", 6086 segment ? "attn" : "regular", 6087 timer_mask); 6088 } 6089 } 6090 } else { 6091 parsed_wr_data[0] = '\0'; 6092 } 6093 6094 /* Add parsed element to parsed buffer */ 6095 results_offset += 6096 sprintf(qed_get_buf_ptr(results_buf, 6097 results_offset), 6098 "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n", 6099 elements[i].dword2, elements[i].dword1, 6100 elements[i].dword0, 6101 is_pf ? "pf" : "vf", 6102 GET_FIELD(elements[i].dword0, 6103 IGU_FIFO_ELEMENT_DWORD0_FID), 6104 s_igu_fifo_source_strs[source], 6105 is_wr_cmd ? "wr" : "rd", cmd_addr, 6106 (!is_pf && addr_data->vf_desc) 6107 ? addr_data->vf_desc : addr_data->desc, 6108 parsed_addr_data, parsed_wr_data, 6109 s_igu_fifo_error_strs[err_type]); 6110 } 6111 6112 results_offset += sprintf(qed_get_buf_ptr(results_buf, 6113 results_offset), 6114 "fifo contained %d elements", num_elements); 6115 6116 /* Add 1 for string NULL termination */ 6117 *parsed_results_bytes = results_offset + 1; 6118 return DBG_STATUS_OK; 6119 } 6120 6121 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, 6122 u32 *dump_buf, 6123 u32 num_dumped_dwords, 6124 u32 *results_buf_size) 6125 { 6126 return qed_parse_igu_fifo_dump(p_hwfn, 6127 dump_buf, 6128 num_dumped_dwords, 6129 NULL, results_buf_size); 6130 } 6131 6132 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, 6133 u32 *dump_buf, 6134 u32 num_dumped_dwords, 6135 char *results_buf) 6136 { 6137 u32 parsed_buf_size; 6138 6139 return qed_parse_igu_fifo_dump(p_hwfn, 6140 dump_buf, 6141 num_dumped_dwords, 6142 results_buf, &parsed_buf_size); 6143 } 6144 6145 static enum dbg_status 6146 qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn, 6147 u32 *dump_buf, 6148 u32 num_dumped_dwords, 6149 char *results_buf, 6150 u32 *parsed_results_bytes) 6151 { 6152 u32 results_offset = 0, param_num_val, num_section_params, num_elements; 6153 const char *section_name, *param_name, *param_str_val; 6154 struct protection_override_element *elements; 6155 u8 i; 6156 6157 /* Read global_params section */ 6158 dump_buf += qed_read_section_hdr(dump_buf, 6159 §ion_name, &num_section_params); 6160 if (strcmp(section_name, "global_params")) 6161 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; 6162 6163 /* Print global params */ 6164 dump_buf += qed_print_section_params(dump_buf, 6165 num_section_params, 6166 results_buf, &results_offset); 6167 6168 /* Read protection_override_data section */ 6169 dump_buf += qed_read_section_hdr(dump_buf, 6170 §ion_name, &num_section_params); 6171 if (strcmp(section_name, "protection_override_data")) 6172 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; 6173 dump_buf += qed_read_param(dump_buf, 6174 ¶m_name, ¶m_str_val, ¶m_num_val); 6175 if (strcmp(param_name, "size")) 6176 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; 6177 if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS != 0) 6178 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; 6179 num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS; 6180 elements = (struct protection_override_element *)dump_buf; 6181 6182 /* Decode elements */ 6183 for (i = 0; i < num_elements; i++) { 6184 u32 address = GET_FIELD(elements[i].data, 6185 PROTECTION_OVERRIDE_ELEMENT_ADDRESS) * 6186 PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR; 6187 6188 results_offset += 6189 sprintf(qed_get_buf_ptr(results_buf, 6190 results_offset), 6191 "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n", 6192 i, address, 6193 GET_FIELD(elements[i].data, 6194 PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE), 6195 GET_FIELD(elements[i].data, 6196 PROTECTION_OVERRIDE_ELEMENT_READ), 6197 GET_FIELD(elements[i].data, 6198 PROTECTION_OVERRIDE_ELEMENT_WRITE), 6199 s_protection_strs[GET_FIELD(elements[i].data, 6200 PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)], 6201 s_protection_strs[GET_FIELD(elements[i].data, 6202 PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]); 6203 } 6204 6205 results_offset += sprintf(qed_get_buf_ptr(results_buf, 6206 results_offset), 6207 "protection override contained %d elements", 6208 num_elements); 6209 6210 /* Add 1 for string NULL termination */ 6211 *parsed_results_bytes = results_offset + 1; 6212 return DBG_STATUS_OK; 6213 } 6214 6215 enum dbg_status 6216 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, 6217 u32 *dump_buf, 6218 u32 num_dumped_dwords, 6219 u32 *results_buf_size) 6220 { 6221 return qed_parse_protection_override_dump(p_hwfn, 6222 dump_buf, 6223 num_dumped_dwords, 6224 NULL, results_buf_size); 6225 } 6226 6227 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, 6228 u32 *dump_buf, 6229 u32 num_dumped_dwords, 6230 char *results_buf) 6231 { 6232 u32 parsed_buf_size; 6233 6234 return qed_parse_protection_override_dump(p_hwfn, 6235 dump_buf, 6236 num_dumped_dwords, 6237 results_buf, 6238 &parsed_buf_size); 6239 } 6240 6241 /* Parses a FW Asserts dump buffer. 6242 * If result_buf is not NULL, the FW Asserts results are printed to it. 6243 * In any case, the required results buffer size is assigned to 6244 * parsed_results_bytes. 6245 * The parsing status is returned. 6246 */ 6247 static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn, 6248 u32 *dump_buf, 6249 u32 num_dumped_dwords, 6250 char *results_buf, 6251 u32 *parsed_results_bytes) 6252 { 6253 u32 results_offset = 0, num_section_params, param_num_val, i; 6254 const char *param_name, *param_str_val, *section_name; 6255 bool last_section_found = false; 6256 6257 *parsed_results_bytes = 0; 6258 6259 /* Read global_params section */ 6260 dump_buf += qed_read_section_hdr(dump_buf, 6261 §ion_name, &num_section_params); 6262 if (strcmp(section_name, "global_params")) 6263 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; 6264 6265 /* Print global params */ 6266 dump_buf += qed_print_section_params(dump_buf, 6267 num_section_params, 6268 results_buf, &results_offset); 6269 while (!last_section_found) { 6270 const char *storm_letter = NULL; 6271 u32 storm_dump_size = 0; 6272 6273 dump_buf += qed_read_section_hdr(dump_buf, 6274 §ion_name, 6275 &num_section_params); 6276 if (!strcmp(section_name, "last")) { 6277 last_section_found = true; 6278 continue; 6279 } else if (strcmp(section_name, "fw_asserts")) { 6280 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; 6281 } 6282 6283 /* Extract params */ 6284 for (i = 0; i < num_section_params; i++) { 6285 dump_buf += qed_read_param(dump_buf, 6286 ¶m_name, 6287 ¶m_str_val, 6288 ¶m_num_val); 6289 if (!strcmp(param_name, "storm")) 6290 storm_letter = param_str_val; 6291 else if (!strcmp(param_name, "size")) 6292 storm_dump_size = param_num_val; 6293 else 6294 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; 6295 } 6296 6297 if (!storm_letter || !storm_dump_size) 6298 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; 6299 6300 /* Print data */ 6301 results_offset += sprintf(qed_get_buf_ptr(results_buf, 6302 results_offset), 6303 "\n%sSTORM_ASSERT: size=%d\n", 6304 storm_letter, storm_dump_size); 6305 for (i = 0; i < storm_dump_size; i++, dump_buf++) 6306 results_offset += 6307 sprintf(qed_get_buf_ptr(results_buf, 6308 results_offset), 6309 "%08x\n", *dump_buf); 6310 } 6311 6312 /* Add 1 for string NULL termination */ 6313 *parsed_results_bytes = results_offset + 1; 6314 return DBG_STATUS_OK; 6315 } 6316 6317 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, 6318 u32 *dump_buf, 6319 u32 num_dumped_dwords, 6320 u32 *results_buf_size) 6321 { 6322 return qed_parse_fw_asserts_dump(p_hwfn, 6323 dump_buf, 6324 num_dumped_dwords, 6325 NULL, results_buf_size); 6326 } 6327 6328 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, 6329 u32 *dump_buf, 6330 u32 num_dumped_dwords, 6331 char *results_buf) 6332 { 6333 u32 parsed_buf_size; 6334 6335 return qed_parse_fw_asserts_dump(p_hwfn, 6336 dump_buf, 6337 num_dumped_dwords, 6338 results_buf, &parsed_buf_size); 6339 } 6340 6341 /* Wrapper for unifying the idle_chk and mcp_trace api */ 6342 enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, 6343 u32 *dump_buf, 6344 u32 num_dumped_dwords, 6345 char *results_buf) 6346 { 6347 u32 num_errors, num_warnnings; 6348 6349 return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords, 6350 results_buf, &num_errors, 6351 &num_warnnings); 6352 } 6353 6354 /* Feature meta data lookup table */ 6355 static struct { 6356 char *name; 6357 enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn, 6358 struct qed_ptt *p_ptt, u32 *size); 6359 enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn, 6360 struct qed_ptt *p_ptt, u32 *dump_buf, 6361 u32 buf_size, u32 *dumped_dwords); 6362 enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn, 6363 u32 *dump_buf, u32 num_dumped_dwords, 6364 char *results_buf); 6365 enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn, 6366 u32 *dump_buf, 6367 u32 num_dumped_dwords, 6368 u32 *results_buf_size); 6369 } qed_features_lookup[] = { 6370 { 6371 "grc", qed_dbg_grc_get_dump_buf_size, 6372 qed_dbg_grc_dump, NULL, NULL}, { 6373 "idle_chk", 6374 qed_dbg_idle_chk_get_dump_buf_size, 6375 qed_dbg_idle_chk_dump, 6376 qed_print_idle_chk_results_wrapper, 6377 qed_get_idle_chk_results_buf_size}, { 6378 "mcp_trace", 6379 qed_dbg_mcp_trace_get_dump_buf_size, 6380 qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results, 6381 qed_get_mcp_trace_results_buf_size}, { 6382 "reg_fifo", 6383 qed_dbg_reg_fifo_get_dump_buf_size, 6384 qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results, 6385 qed_get_reg_fifo_results_buf_size}, { 6386 "igu_fifo", 6387 qed_dbg_igu_fifo_get_dump_buf_size, 6388 qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results, 6389 qed_get_igu_fifo_results_buf_size}, { 6390 "protection_override", 6391 qed_dbg_protection_override_get_dump_buf_size, 6392 qed_dbg_protection_override_dump, 6393 qed_print_protection_override_results, 6394 qed_get_protection_override_results_buf_size}, { 6395 "fw_asserts", 6396 qed_dbg_fw_asserts_get_dump_buf_size, 6397 qed_dbg_fw_asserts_dump, 6398 qed_print_fw_asserts_results, 6399 qed_get_fw_asserts_results_buf_size},}; 6400 6401 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size) 6402 { 6403 u32 i, precision = 80; 6404 6405 if (!p_text_buf) 6406 return; 6407 6408 pr_notice("\n%.*s", precision, p_text_buf); 6409 for (i = precision; i < text_size; i += precision) 6410 pr_cont("%.*s", precision, p_text_buf + i); 6411 pr_cont("\n"); 6412 } 6413 6414 #define QED_RESULTS_BUF_MIN_SIZE 16 6415 /* Generic function for decoding debug feature info */ 6416 enum dbg_status format_feature(struct qed_hwfn *p_hwfn, 6417 enum qed_dbg_features feature_idx) 6418 { 6419 struct qed_dbg_feature *feature = 6420 &p_hwfn->cdev->dbg_params.features[feature_idx]; 6421 u32 text_size_bytes, null_char_pos, i; 6422 enum dbg_status rc; 6423 char *text_buf; 6424 6425 /* Check if feature supports formatting capability */ 6426 if (!qed_features_lookup[feature_idx].results_buf_size) 6427 return DBG_STATUS_OK; 6428 6429 /* Obtain size of formatted output */ 6430 rc = qed_features_lookup[feature_idx]. 6431 results_buf_size(p_hwfn, (u32 *)feature->dump_buf, 6432 feature->dumped_dwords, &text_size_bytes); 6433 if (rc != DBG_STATUS_OK) 6434 return rc; 6435 6436 /* Make sure that the allocated size is a multiple of dword (4 bytes) */ 6437 null_char_pos = text_size_bytes - 1; 6438 text_size_bytes = (text_size_bytes + 3) & ~0x3; 6439 6440 if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) { 6441 DP_NOTICE(p_hwfn->cdev, 6442 "formatted size of feature was too small %d. Aborting\n", 6443 text_size_bytes); 6444 return DBG_STATUS_INVALID_ARGS; 6445 } 6446 6447 /* Allocate temp text buf */ 6448 text_buf = vzalloc(text_size_bytes); 6449 if (!text_buf) 6450 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; 6451 6452 /* Decode feature opcodes to string on temp buf */ 6453 rc = qed_features_lookup[feature_idx]. 6454 print_results(p_hwfn, (u32 *)feature->dump_buf, 6455 feature->dumped_dwords, text_buf); 6456 if (rc != DBG_STATUS_OK) { 6457 vfree(text_buf); 6458 return rc; 6459 } 6460 6461 /* Replace the original null character with a '\n' character. 6462 * The bytes that were added as a result of the dword alignment are also 6463 * padded with '\n' characters. 6464 */ 6465 for (i = null_char_pos; i < text_size_bytes; i++) 6466 text_buf[i] = '\n'; 6467 6468 /* Dump printable feature to log */ 6469 if (p_hwfn->cdev->dbg_params.print_data) 6470 qed_dbg_print_feature(text_buf, text_size_bytes); 6471 6472 /* Free the old dump_buf and point the dump_buf to the newly allocagted 6473 * and formatted text buffer. 6474 */ 6475 vfree(feature->dump_buf); 6476 feature->dump_buf = text_buf; 6477 feature->buf_size = text_size_bytes; 6478 feature->dumped_dwords = text_size_bytes / 4; 6479 return rc; 6480 } 6481 6482 /* Generic function for performing the dump of a debug feature. */ 6483 enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 6484 enum qed_dbg_features feature_idx) 6485 { 6486 struct qed_dbg_feature *feature = 6487 &p_hwfn->cdev->dbg_params.features[feature_idx]; 6488 u32 buf_size_dwords; 6489 enum dbg_status rc; 6490 6491 DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n", 6492 qed_features_lookup[feature_idx].name); 6493 6494 /* Dump_buf was already allocated need to free (this can happen if dump 6495 * was called but file was never read). 6496 * We can't use the buffer as is since size may have changed. 6497 */ 6498 if (feature->dump_buf) { 6499 vfree(feature->dump_buf); 6500 feature->dump_buf = NULL; 6501 } 6502 6503 /* Get buffer size from hsi, allocate accordingly, and perform the 6504 * dump. 6505 */ 6506 rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt, 6507 &buf_size_dwords); 6508 if (rc != DBG_STATUS_OK) 6509 return rc; 6510 feature->buf_size = buf_size_dwords * sizeof(u32); 6511 feature->dump_buf = vmalloc(feature->buf_size); 6512 if (!feature->dump_buf) 6513 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; 6514 6515 rc = qed_features_lookup[feature_idx]. 6516 perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf, 6517 feature->buf_size / sizeof(u32), 6518 &feature->dumped_dwords); 6519 6520 /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error. 6521 * In this case the buffer holds valid binary data, but we wont able 6522 * to parse it (since parsing relies on data in NVRAM which is only 6523 * accessible when MFW is responsive). skip the formatting but return 6524 * success so that binary data is provided. 6525 */ 6526 if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED) 6527 return DBG_STATUS_OK; 6528 6529 if (rc != DBG_STATUS_OK) 6530 return rc; 6531 6532 /* Format output */ 6533 rc = format_feature(p_hwfn, feature_idx); 6534 return rc; 6535 } 6536 6537 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) 6538 { 6539 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes); 6540 } 6541 6542 int qed_dbg_grc_size(struct qed_dev *cdev) 6543 { 6544 return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC); 6545 } 6546 6547 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) 6548 { 6549 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK, 6550 num_dumped_bytes); 6551 } 6552 6553 int qed_dbg_idle_chk_size(struct qed_dev *cdev) 6554 { 6555 return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK); 6556 } 6557 6558 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) 6559 { 6560 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO, 6561 num_dumped_bytes); 6562 } 6563 6564 int qed_dbg_reg_fifo_size(struct qed_dev *cdev) 6565 { 6566 return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO); 6567 } 6568 6569 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) 6570 { 6571 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO, 6572 num_dumped_bytes); 6573 } 6574 6575 int qed_dbg_igu_fifo_size(struct qed_dev *cdev) 6576 { 6577 return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO); 6578 } 6579 6580 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer, 6581 u32 *num_dumped_bytes) 6582 { 6583 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE, 6584 num_dumped_bytes); 6585 } 6586 6587 int qed_dbg_protection_override_size(struct qed_dev *cdev) 6588 { 6589 return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE); 6590 } 6591 6592 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer, 6593 u32 *num_dumped_bytes) 6594 { 6595 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS, 6596 num_dumped_bytes); 6597 } 6598 6599 int qed_dbg_fw_asserts_size(struct qed_dev *cdev) 6600 { 6601 return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS); 6602 } 6603 6604 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer, 6605 u32 *num_dumped_bytes) 6606 { 6607 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE, 6608 num_dumped_bytes); 6609 } 6610 6611 int qed_dbg_mcp_trace_size(struct qed_dev *cdev) 6612 { 6613 return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE); 6614 } 6615 6616 /* Defines the amount of bytes allocated for recording the length of debugfs 6617 * feature buffer. 6618 */ 6619 #define REGDUMP_HEADER_SIZE sizeof(u32) 6620 #define REGDUMP_HEADER_FEATURE_SHIFT 24 6621 #define REGDUMP_HEADER_ENGINE_SHIFT 31 6622 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30 6623 enum debug_print_features { 6624 OLD_MODE = 0, 6625 IDLE_CHK = 1, 6626 GRC_DUMP = 2, 6627 MCP_TRACE = 3, 6628 REG_FIFO = 4, 6629 PROTECTION_OVERRIDE = 5, 6630 IGU_FIFO = 6, 6631 PHY = 7, 6632 FW_ASSERTS = 8, 6633 }; 6634 6635 static u32 qed_calc_regdump_header(enum debug_print_features feature, 6636 int engine, u32 feature_size, u8 omit_engine) 6637 { 6638 /* Insert the engine, feature and mode inside the header and combine it 6639 * with feature size. 6640 */ 6641 return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) | 6642 (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) | 6643 (engine << REGDUMP_HEADER_ENGINE_SHIFT); 6644 } 6645 6646 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) 6647 { 6648 u8 cur_engine, omit_engine = 0, org_engine; 6649 u32 offset = 0, feature_size; 6650 int rc; 6651 6652 if (cdev->num_hwfns == 1) 6653 omit_engine = 1; 6654 6655 org_engine = qed_get_debug_engine(cdev); 6656 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { 6657 /* Collect idle_chks and grcDump for each hw function */ 6658 DP_VERBOSE(cdev, QED_MSG_DEBUG, 6659 "obtaining idle_chk and grcdump for current engine\n"); 6660 qed_set_debug_engine(cdev, cur_engine); 6661 6662 /* First idle_chk */ 6663 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset + 6664 REGDUMP_HEADER_SIZE, &feature_size); 6665 if (!rc) { 6666 *(u32 *)((u8 *)buffer + offset) = 6667 qed_calc_regdump_header(IDLE_CHK, cur_engine, 6668 feature_size, omit_engine); 6669 offset += (feature_size + REGDUMP_HEADER_SIZE); 6670 } else { 6671 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc); 6672 } 6673 6674 /* Second idle_chk */ 6675 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset + 6676 REGDUMP_HEADER_SIZE, &feature_size); 6677 if (!rc) { 6678 *(u32 *)((u8 *)buffer + offset) = 6679 qed_calc_regdump_header(IDLE_CHK, cur_engine, 6680 feature_size, omit_engine); 6681 offset += (feature_size + REGDUMP_HEADER_SIZE); 6682 } else { 6683 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc); 6684 } 6685 6686 /* reg_fifo dump */ 6687 rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset + 6688 REGDUMP_HEADER_SIZE, &feature_size); 6689 if (!rc) { 6690 *(u32 *)((u8 *)buffer + offset) = 6691 qed_calc_regdump_header(REG_FIFO, cur_engine, 6692 feature_size, omit_engine); 6693 offset += (feature_size + REGDUMP_HEADER_SIZE); 6694 } else { 6695 DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc); 6696 } 6697 6698 /* igu_fifo dump */ 6699 rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset + 6700 REGDUMP_HEADER_SIZE, &feature_size); 6701 if (!rc) { 6702 *(u32 *)((u8 *)buffer + offset) = 6703 qed_calc_regdump_header(IGU_FIFO, cur_engine, 6704 feature_size, omit_engine); 6705 offset += (feature_size + REGDUMP_HEADER_SIZE); 6706 } else { 6707 DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc); 6708 } 6709 6710 /* protection_override dump */ 6711 rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset + 6712 REGDUMP_HEADER_SIZE, 6713 &feature_size); 6714 if (!rc) { 6715 *(u32 *)((u8 *)buffer + offset) = 6716 qed_calc_regdump_header(PROTECTION_OVERRIDE, 6717 cur_engine, 6718 feature_size, omit_engine); 6719 offset += (feature_size + REGDUMP_HEADER_SIZE); 6720 } else { 6721 DP_ERR(cdev, 6722 "qed_dbg_protection_override failed. rc = %d\n", 6723 rc); 6724 } 6725 6726 /* fw_asserts dump */ 6727 rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset + 6728 REGDUMP_HEADER_SIZE, &feature_size); 6729 if (!rc) { 6730 *(u32 *)((u8 *)buffer + offset) = 6731 qed_calc_regdump_header(FW_ASSERTS, cur_engine, 6732 feature_size, omit_engine); 6733 offset += (feature_size + REGDUMP_HEADER_SIZE); 6734 } else { 6735 DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n", 6736 rc); 6737 } 6738 6739 /* GRC dump - must be last because when mcp stuck it will 6740 * clutter idle_chk, reg_fifo, ... 6741 */ 6742 rc = qed_dbg_grc(cdev, (u8 *)buffer + offset + 6743 REGDUMP_HEADER_SIZE, &feature_size); 6744 if (!rc) { 6745 *(u32 *)((u8 *)buffer + offset) = 6746 qed_calc_regdump_header(GRC_DUMP, cur_engine, 6747 feature_size, omit_engine); 6748 offset += (feature_size + REGDUMP_HEADER_SIZE); 6749 } else { 6750 DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc); 6751 } 6752 } 6753 6754 /* mcp_trace */ 6755 rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset + 6756 REGDUMP_HEADER_SIZE, &feature_size); 6757 if (!rc) { 6758 *(u32 *)((u8 *)buffer + offset) = 6759 qed_calc_regdump_header(MCP_TRACE, cur_engine, 6760 feature_size, omit_engine); 6761 offset += (feature_size + REGDUMP_HEADER_SIZE); 6762 } else { 6763 DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); 6764 } 6765 6766 qed_set_debug_engine(cdev, org_engine); 6767 6768 return 0; 6769 } 6770 6771 int qed_dbg_all_data_size(struct qed_dev *cdev) 6772 { 6773 u8 cur_engine, org_engine; 6774 u32 regs_len = 0; 6775 6776 org_engine = qed_get_debug_engine(cdev); 6777 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { 6778 /* Engine specific */ 6779 DP_VERBOSE(cdev, QED_MSG_DEBUG, 6780 "calculating idle_chk and grcdump register length for current engine\n"); 6781 qed_set_debug_engine(cdev, cur_engine); 6782 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + 6783 REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + 6784 REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) + 6785 REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) + 6786 REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) + 6787 REGDUMP_HEADER_SIZE + 6788 qed_dbg_protection_override_size(cdev) + 6789 REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev); 6790 } 6791 6792 /* Engine common */ 6793 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev); 6794 qed_set_debug_engine(cdev, org_engine); 6795 6796 return regs_len; 6797 } 6798 6799 int qed_dbg_feature(struct qed_dev *cdev, void *buffer, 6800 enum qed_dbg_features feature, u32 *num_dumped_bytes) 6801 { 6802 struct qed_hwfn *p_hwfn = 6803 &cdev->hwfns[cdev->dbg_params.engine_for_debug]; 6804 struct qed_dbg_feature *qed_feature = 6805 &cdev->dbg_params.features[feature]; 6806 enum dbg_status dbg_rc; 6807 struct qed_ptt *p_ptt; 6808 int rc = 0; 6809 6810 /* Acquire ptt */ 6811 p_ptt = qed_ptt_acquire(p_hwfn); 6812 if (!p_ptt) 6813 return -EINVAL; 6814 6815 /* Get dump */ 6816 dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature); 6817 if (dbg_rc != DBG_STATUS_OK) { 6818 DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n", 6819 qed_dbg_get_status_str(dbg_rc)); 6820 *num_dumped_bytes = 0; 6821 rc = -EINVAL; 6822 goto out; 6823 } 6824 6825 DP_VERBOSE(cdev, QED_MSG_DEBUG, 6826 "copying debugfs feature to external buffer\n"); 6827 memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size); 6828 *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords * 6829 4; 6830 6831 out: 6832 qed_ptt_release(p_hwfn, p_ptt); 6833 return rc; 6834 } 6835 6836 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature) 6837 { 6838 struct qed_hwfn *p_hwfn = 6839 &cdev->hwfns[cdev->dbg_params.engine_for_debug]; 6840 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); 6841 struct qed_dbg_feature *qed_feature = 6842 &cdev->dbg_params.features[feature]; 6843 u32 buf_size_dwords; 6844 enum dbg_status rc; 6845 6846 if (!p_ptt) 6847 return -EINVAL; 6848 6849 rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt, 6850 &buf_size_dwords); 6851 if (rc != DBG_STATUS_OK) 6852 buf_size_dwords = 0; 6853 6854 qed_ptt_release(p_hwfn, p_ptt); 6855 qed_feature->buf_size = buf_size_dwords * sizeof(u32); 6856 return qed_feature->buf_size; 6857 } 6858 6859 u8 qed_get_debug_engine(struct qed_dev *cdev) 6860 { 6861 return cdev->dbg_params.engine_for_debug; 6862 } 6863 6864 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number) 6865 { 6866 DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n", 6867 engine_number); 6868 cdev->dbg_params.engine_for_debug = engine_number; 6869 } 6870 6871 void qed_dbg_pf_init(struct qed_dev *cdev) 6872 { 6873 const u8 *dbg_values; 6874 6875 /* Debug values are after init values. 6876 * The offset is the first dword of the file. 6877 */ 6878 dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data; 6879 qed_dbg_set_bin_ptr((u8 *)dbg_values); 6880 qed_dbg_user_set_bin_ptr((u8 *)dbg_values); 6881 } 6882 6883 void qed_dbg_pf_exit(struct qed_dev *cdev) 6884 { 6885 struct qed_dbg_feature *feature = NULL; 6886 enum qed_dbg_features feature_idx; 6887 6888 /* Debug features' buffers may be allocated if debug feature was used 6889 * but dump wasn't called. 6890 */ 6891 for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) { 6892 feature = &cdev->dbg_params.features[feature_idx]; 6893 if (feature->dump_buf) { 6894 vfree(feature->dump_buf); 6895 feature->dump_buf = NULL; 6896 } 6897 } 6898 } 6899