xref: /openbmc/linux/drivers/net/ethernet/qlogic/qed/qed_debug.c (revision 5ef12cb4a3a78ffb331c03a795a15eea4ae35155)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/vmalloc.h>
11 #include <linux/crc32.h>
12 #include "qed.h"
13 #include "qed_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_mcp.h"
16 #include "qed_reg_addr.h"
17 
18 /* Memory groups enum */
19 enum mem_groups {
20 	MEM_GROUP_PXP_MEM,
21 	MEM_GROUP_DMAE_MEM,
22 	MEM_GROUP_CM_MEM,
23 	MEM_GROUP_QM_MEM,
24 	MEM_GROUP_DORQ_MEM,
25 	MEM_GROUP_BRB_RAM,
26 	MEM_GROUP_BRB_MEM,
27 	MEM_GROUP_PRS_MEM,
28 	MEM_GROUP_IOR,
29 	MEM_GROUP_BTB_RAM,
30 	MEM_GROUP_CONN_CFC_MEM,
31 	MEM_GROUP_TASK_CFC_MEM,
32 	MEM_GROUP_CAU_PI,
33 	MEM_GROUP_CAU_MEM,
34 	MEM_GROUP_PXP_ILT,
35 	MEM_GROUP_TM_MEM,
36 	MEM_GROUP_SDM_MEM,
37 	MEM_GROUP_PBUF,
38 	MEM_GROUP_RAM,
39 	MEM_GROUP_MULD_MEM,
40 	MEM_GROUP_BTB_MEM,
41 	MEM_GROUP_RDIF_CTX,
42 	MEM_GROUP_TDIF_CTX,
43 	MEM_GROUP_CFC_MEM,
44 	MEM_GROUP_IGU_MEM,
45 	MEM_GROUP_IGU_MSIX,
46 	MEM_GROUP_CAU_SB,
47 	MEM_GROUP_BMB_RAM,
48 	MEM_GROUP_BMB_MEM,
49 	MEM_GROUPS_NUM
50 };
51 
52 /* Memory groups names */
53 static const char * const s_mem_group_names[] = {
54 	"PXP_MEM",
55 	"DMAE_MEM",
56 	"CM_MEM",
57 	"QM_MEM",
58 	"DORQ_MEM",
59 	"BRB_RAM",
60 	"BRB_MEM",
61 	"PRS_MEM",
62 	"IOR",
63 	"BTB_RAM",
64 	"CONN_CFC_MEM",
65 	"TASK_CFC_MEM",
66 	"CAU_PI",
67 	"CAU_MEM",
68 	"PXP_ILT",
69 	"TM_MEM",
70 	"SDM_MEM",
71 	"PBUF",
72 	"RAM",
73 	"MULD_MEM",
74 	"BTB_MEM",
75 	"RDIF_CTX",
76 	"TDIF_CTX",
77 	"CFC_MEM",
78 	"IGU_MEM",
79 	"IGU_MSIX",
80 	"CAU_SB",
81 	"BMB_RAM",
82 	"BMB_MEM",
83 };
84 
85 /* Idle check conditions */
86 
87 static u32 cond5(const u32 *r, const u32 *imm)
88 {
89 	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
90 }
91 
92 static u32 cond7(const u32 *r, const u32 *imm)
93 {
94 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
95 }
96 
97 static u32 cond6(const u32 *r, const u32 *imm)
98 {
99 	return (r[0] & imm[0]) != imm[1];
100 }
101 
102 static u32 cond9(const u32 *r, const u32 *imm)
103 {
104 	return ((r[0] & imm[0]) >> imm[1]) !=
105 	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
106 }
107 
108 static u32 cond10(const u32 *r, const u32 *imm)
109 {
110 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
111 }
112 
113 static u32 cond4(const u32 *r, const u32 *imm)
114 {
115 	return (r[0] & ~imm[0]) != imm[1];
116 }
117 
118 static u32 cond0(const u32 *r, const u32 *imm)
119 {
120 	return (r[0] & ~r[1]) != imm[0];
121 }
122 
123 static u32 cond1(const u32 *r, const u32 *imm)
124 {
125 	return r[0] != imm[0];
126 }
127 
128 static u32 cond11(const u32 *r, const u32 *imm)
129 {
130 	return r[0] != r[1] && r[2] == imm[0];
131 }
132 
133 static u32 cond12(const u32 *r, const u32 *imm)
134 {
135 	return r[0] != r[1] && r[2] > imm[0];
136 }
137 
138 static u32 cond3(const u32 *r, const u32 *imm)
139 {
140 	return r[0] != r[1];
141 }
142 
143 static u32 cond13(const u32 *r, const u32 *imm)
144 {
145 	return r[0] & imm[0];
146 }
147 
148 static u32 cond8(const u32 *r, const u32 *imm)
149 {
150 	return r[0] < (r[1] - imm[0]);
151 }
152 
153 static u32 cond2(const u32 *r, const u32 *imm)
154 {
155 	return r[0] > imm[0];
156 }
157 
158 /* Array of Idle Check conditions */
159 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
160 	cond0,
161 	cond1,
162 	cond2,
163 	cond3,
164 	cond4,
165 	cond5,
166 	cond6,
167 	cond7,
168 	cond8,
169 	cond9,
170 	cond10,
171 	cond11,
172 	cond12,
173 	cond13,
174 };
175 
176 /******************************* Data Types **********************************/
177 
178 enum platform_ids {
179 	PLATFORM_ASIC,
180 	PLATFORM_RESERVED,
181 	PLATFORM_RESERVED2,
182 	PLATFORM_RESERVED3,
183 	MAX_PLATFORM_IDS
184 };
185 
186 struct chip_platform_defs {
187 	u8 num_ports;
188 	u8 num_pfs;
189 	u8 num_vfs;
190 };
191 
192 /* Chip constant definitions */
193 struct chip_defs {
194 	const char *name;
195 	struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
196 };
197 
198 /* Platform constant definitions */
199 struct platform_defs {
200 	const char *name;
201 	u32 delay_factor;
202 	u32 dmae_thresh;
203 	u32 log_thresh;
204 };
205 
206 /* Storm constant definitions.
207  * Addresses are in bytes, sizes are in quad-regs.
208  */
209 struct storm_defs {
210 	char letter;
211 	enum block_id block_id;
212 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
213 	bool has_vfc;
214 	u32 sem_fast_mem_addr;
215 	u32 sem_frame_mode_addr;
216 	u32 sem_slow_enable_addr;
217 	u32 sem_slow_mode_addr;
218 	u32 sem_slow_mode1_conf_addr;
219 	u32 sem_sync_dbg_empty_addr;
220 	u32 sem_slow_dbg_empty_addr;
221 	u32 cm_ctx_wr_addr;
222 	u32 cm_conn_ag_ctx_lid_size;
223 	u32 cm_conn_ag_ctx_rd_addr;
224 	u32 cm_conn_st_ctx_lid_size;
225 	u32 cm_conn_st_ctx_rd_addr;
226 	u32 cm_task_ag_ctx_lid_size;
227 	u32 cm_task_ag_ctx_rd_addr;
228 	u32 cm_task_st_ctx_lid_size;
229 	u32 cm_task_st_ctx_rd_addr;
230 };
231 
232 /* Block constant definitions */
233 struct block_defs {
234 	const char *name;
235 	bool exists[MAX_CHIP_IDS];
236 	bool associated_to_storm;
237 
238 	/* Valid only if associated_to_storm is true */
239 	u32 storm_id;
240 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
241 	u32 dbg_select_addr;
242 	u32 dbg_enable_addr;
243 	u32 dbg_shift_addr;
244 	u32 dbg_force_valid_addr;
245 	u32 dbg_force_frame_addr;
246 	bool has_reset_bit;
247 
248 	/* If true, block is taken out of reset before dump */
249 	bool unreset;
250 	enum dbg_reset_regs reset_reg;
251 
252 	/* Bit offset in reset register */
253 	u8 reset_bit_offset;
254 };
255 
256 /* Reset register definitions */
257 struct reset_reg_defs {
258 	u32 addr;
259 	bool exists[MAX_CHIP_IDS];
260 	u32 unreset_val[MAX_CHIP_IDS];
261 };
262 
263 struct grc_param_defs {
264 	u32 default_val[MAX_CHIP_IDS];
265 	u32 min;
266 	u32 max;
267 	bool is_preset;
268 	bool is_persistent;
269 	u32 exclude_all_preset_val;
270 	u32 crash_preset_val;
271 };
272 
273 /* Address is in 128b units. Width is in bits. */
274 struct rss_mem_defs {
275 	const char *mem_name;
276 	const char *type_name;
277 	u32 addr;
278 	u32 entry_width;
279 	u32 num_entries[MAX_CHIP_IDS];
280 };
281 
282 struct vfc_ram_defs {
283 	const char *mem_name;
284 	const char *type_name;
285 	u32 base_row;
286 	u32 num_rows;
287 };
288 
289 struct big_ram_defs {
290 	const char *instance_name;
291 	enum mem_groups mem_group_id;
292 	enum mem_groups ram_mem_group_id;
293 	enum dbg_grc_params grc_param;
294 	u32 addr_reg_addr;
295 	u32 data_reg_addr;
296 	u32 is_256b_reg_addr;
297 	u32 is_256b_bit_offset[MAX_CHIP_IDS];
298 	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
299 };
300 
301 struct phy_defs {
302 	const char *phy_name;
303 
304 	/* PHY base GRC address */
305 	u32 base_addr;
306 
307 	/* Relative address of indirect TBUS address register (bits 0..7) */
308 	u32 tbus_addr_lo_addr;
309 
310 	/* Relative address of indirect TBUS address register (bits 8..10) */
311 	u32 tbus_addr_hi_addr;
312 
313 	/* Relative address of indirect TBUS data register (bits 0..7) */
314 	u32 tbus_data_lo_addr;
315 
316 	/* Relative address of indirect TBUS data register (bits 8..11) */
317 	u32 tbus_data_hi_addr;
318 };
319 
320 /******************************** Constants **********************************/
321 
322 #define MAX_LCIDS			320
323 #define MAX_LTIDS			320
324 
325 #define NUM_IOR_SETS			2
326 #define IORS_PER_SET			176
327 #define IOR_SET_OFFSET(set_id)		((set_id) * 256)
328 
329 #define BYTES_IN_DWORD			sizeof(u32)
330 
331 /* In the macros below, size and offset are specified in bits */
332 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
333 #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
334 #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
335 #define FIELD_DWORD_OFFSET(type, field) \
336 	 (int)(FIELD_BIT_OFFSET(type, field) / 32)
337 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
338 #define FIELD_BIT_MASK(type, field) \
339 	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
340 	 FIELD_DWORD_SHIFT(type, field))
341 
342 #define SET_VAR_FIELD(var, type, field, val) \
343 	do { \
344 		var[FIELD_DWORD_OFFSET(type, field)] &=	\
345 		(~FIELD_BIT_MASK(type, field));	\
346 		var[FIELD_DWORD_OFFSET(type, field)] |= \
347 		(val) << FIELD_DWORD_SHIFT(type, field); \
348 	} while (0)
349 
350 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
351 	do { \
352 		for (i = 0; i < (arr_size); i++) \
353 			qed_wr(dev, ptt, addr,	(arr)[i]); \
354 	} while (0)
355 
356 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
357 	do { \
358 		for (i = 0; i < (arr_size); i++) \
359 			(arr)[i] = qed_rd(dev, ptt, addr); \
360 	} while (0)
361 
362 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
363 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
364 
365 /* Extra lines include a signature line + optional latency events line */
366 #define NUM_EXTRA_DBG_LINES(block_desc) \
367 	(1 + ((block_desc)->has_latency_events ? 1 : 0))
368 #define NUM_DBG_LINES(block_desc) \
369 	((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
370 
371 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
372 #define RAM_LINES_TO_BYTES(lines) \
373 	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
374 
375 #define REG_DUMP_LEN_SHIFT		24
376 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
377 	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
378 
379 #define IDLE_CHK_RULE_SIZE_DWORDS \
380 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
381 
382 #define IDLE_CHK_RESULT_HDR_DWORDS \
383 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
384 
385 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
386 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
387 
388 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
389 
390 /* The sizes and offsets below are specified in bits */
391 #define VFC_CAM_CMD_STRUCT_SIZE		64
392 #define VFC_CAM_CMD_ROW_OFFSET		48
393 #define VFC_CAM_CMD_ROW_SIZE		9
394 #define VFC_CAM_ADDR_STRUCT_SIZE	16
395 #define VFC_CAM_ADDR_OP_OFFSET		0
396 #define VFC_CAM_ADDR_OP_SIZE		4
397 #define VFC_CAM_RESP_STRUCT_SIZE	256
398 #define VFC_RAM_ADDR_STRUCT_SIZE	16
399 #define VFC_RAM_ADDR_OP_OFFSET		0
400 #define VFC_RAM_ADDR_OP_SIZE		2
401 #define VFC_RAM_ADDR_ROW_OFFSET		2
402 #define VFC_RAM_ADDR_ROW_SIZE		10
403 #define VFC_RAM_RESP_STRUCT_SIZE	256
404 
405 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
406 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
407 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
408 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
409 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
410 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
411 
412 #define NUM_VFC_RAM_TYPES		4
413 
414 #define VFC_CAM_NUM_ROWS		512
415 
416 #define VFC_OPCODE_CAM_RD		14
417 #define VFC_OPCODE_RAM_RD		0
418 
419 #define NUM_RSS_MEM_TYPES		5
420 
421 #define NUM_BIG_RAM_TYPES		3
422 
423 #define NUM_PHY_TBUS_ADDRESSES		2048
424 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
425 
426 #define RESET_REG_UNRESET_OFFSET	4
427 
428 #define STALL_DELAY_MS			500
429 
430 #define STATIC_DEBUG_LINE_DWORDS	9
431 
432 #define NUM_COMMON_GLOBAL_PARAMS	8
433 
434 #define FW_IMG_MAIN			1
435 
436 #define REG_FIFO_ELEMENT_DWORDS		2
437 #define REG_FIFO_DEPTH_ELEMENTS		32
438 #define REG_FIFO_DEPTH_DWORDS \
439 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
440 
441 #define IGU_FIFO_ELEMENT_DWORDS		4
442 #define IGU_FIFO_DEPTH_ELEMENTS		64
443 #define IGU_FIFO_DEPTH_DWORDS \
444 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
445 
446 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
447 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
448 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
449 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
450 	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
451 
452 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
453 	(MCP_REG_SCRATCH + \
454 	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
455 
456 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
457 #define EMPTY_FW_IMAGE_STR		"???????????????"
458 
459 /***************************** Constant Arrays *******************************/
460 
461 struct dbg_array {
462 	const u32 *ptr;
463 	u32 size_in_dwords;
464 };
465 
466 /* Debug arrays */
467 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
468 
469 /* Chip constant definitions array */
470 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
471 	{ "bb",
472 	  {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB},
473 	   {0, 0, 0},
474 	   {0, 0, 0},
475 	   {0, 0, 0} } },
476 	{ "ah",
477 	  {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
478 	   {0, 0, 0},
479 	   {0, 0, 0},
480 	   {0, 0, 0} } },
481 	{ "reserved",
482 	   {{0, 0, 0},
483 	   {0, 0, 0},
484 	   {0, 0, 0},
485 	   {0, 0, 0} } }
486 };
487 
488 /* Storm constant definitions array */
489 static struct storm_defs s_storm_defs[] = {
490 	/* Tstorm */
491 	{'T', BLOCK_TSEM,
492 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
493 	  DBG_BUS_CLIENT_RBCT}, true,
494 	 TSEM_REG_FAST_MEMORY,
495 	 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
496 	 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
497 	 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
498 	 TCM_REG_CTX_RBC_ACCS,
499 	 4, TCM_REG_AGG_CON_CTX,
500 	 16, TCM_REG_SM_CON_CTX,
501 	 2, TCM_REG_AGG_TASK_CTX,
502 	 4, TCM_REG_SM_TASK_CTX},
503 
504 	/* Mstorm */
505 	{'M', BLOCK_MSEM,
506 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
507 	  DBG_BUS_CLIENT_RBCM}, false,
508 	 MSEM_REG_FAST_MEMORY,
509 	 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
510 	 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
511 	 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
512 	 MCM_REG_CTX_RBC_ACCS,
513 	 1, MCM_REG_AGG_CON_CTX,
514 	 10, MCM_REG_SM_CON_CTX,
515 	 2, MCM_REG_AGG_TASK_CTX,
516 	 7, MCM_REG_SM_TASK_CTX},
517 
518 	/* Ustorm */
519 	{'U', BLOCK_USEM,
520 	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
521 	  DBG_BUS_CLIENT_RBCU}, false,
522 	 USEM_REG_FAST_MEMORY,
523 	 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
524 	 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
525 	 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
526 	 UCM_REG_CTX_RBC_ACCS,
527 	 2, UCM_REG_AGG_CON_CTX,
528 	 13, UCM_REG_SM_CON_CTX,
529 	 3, UCM_REG_AGG_TASK_CTX,
530 	 3, UCM_REG_SM_TASK_CTX},
531 
532 	/* Xstorm */
533 	{'X', BLOCK_XSEM,
534 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
535 	  DBG_BUS_CLIENT_RBCX}, false,
536 	 XSEM_REG_FAST_MEMORY,
537 	 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
538 	 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
539 	 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
540 	 XCM_REG_CTX_RBC_ACCS,
541 	 9, XCM_REG_AGG_CON_CTX,
542 	 15, XCM_REG_SM_CON_CTX,
543 	 0, 0,
544 	 0, 0},
545 
546 	/* Ystorm */
547 	{'Y', BLOCK_YSEM,
548 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
549 	  DBG_BUS_CLIENT_RBCY}, false,
550 	 YSEM_REG_FAST_MEMORY,
551 	 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
552 	 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
553 	 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
554 	 YCM_REG_CTX_RBC_ACCS,
555 	 2, YCM_REG_AGG_CON_CTX,
556 	 3, YCM_REG_SM_CON_CTX,
557 	 2, YCM_REG_AGG_TASK_CTX,
558 	 12, YCM_REG_SM_TASK_CTX},
559 
560 	/* Pstorm */
561 	{'P', BLOCK_PSEM,
562 	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
563 	  DBG_BUS_CLIENT_RBCS}, true,
564 	 PSEM_REG_FAST_MEMORY,
565 	 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
566 	 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
567 	 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
568 	 PCM_REG_CTX_RBC_ACCS,
569 	 0, 0,
570 	 10, PCM_REG_SM_CON_CTX,
571 	 0, 0,
572 	 0, 0}
573 };
574 
575 /* Block definitions array */
576 
577 static struct block_defs block_grc_defs = {
578 	"grc",
579 	{true, true, true}, false, 0,
580 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
581 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
582 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
583 	GRC_REG_DBG_FORCE_FRAME,
584 	true, false, DBG_RESET_REG_MISC_PL_UA, 1
585 };
586 
587 static struct block_defs block_miscs_defs = {
588 	"miscs", {true, true, true}, false, 0,
589 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
590 	0, 0, 0, 0, 0,
591 	false, false, MAX_DBG_RESET_REGS, 0
592 };
593 
594 static struct block_defs block_misc_defs = {
595 	"misc", {true, true, true}, false, 0,
596 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
597 	0, 0, 0, 0, 0,
598 	false, false, MAX_DBG_RESET_REGS, 0
599 };
600 
601 static struct block_defs block_dbu_defs = {
602 	"dbu", {true, true, true}, false, 0,
603 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
604 	0, 0, 0, 0, 0,
605 	false, false, MAX_DBG_RESET_REGS, 0
606 };
607 
608 static struct block_defs block_pglue_b_defs = {
609 	"pglue_b",
610 	{true, true, true}, false, 0,
611 	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
612 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
613 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
614 	PGLUE_B_REG_DBG_FORCE_FRAME,
615 	true, false, DBG_RESET_REG_MISCS_PL_HV, 1
616 };
617 
618 static struct block_defs block_cnig_defs = {
619 	"cnig",
620 	{true, true, true}, false, 0,
621 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
622 	 DBG_BUS_CLIENT_RBCW},
623 	CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
624 	CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
625 	CNIG_REG_DBG_FORCE_FRAME_K2_E5,
626 	true, false, DBG_RESET_REG_MISCS_PL_HV, 0
627 };
628 
629 static struct block_defs block_cpmu_defs = {
630 	"cpmu", {true, true, true}, false, 0,
631 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
632 	0, 0, 0, 0, 0,
633 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8
634 };
635 
636 static struct block_defs block_ncsi_defs = {
637 	"ncsi",
638 	{true, true, true}, false, 0,
639 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
640 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
641 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
642 	NCSI_REG_DBG_FORCE_FRAME,
643 	true, false, DBG_RESET_REG_MISCS_PL_HV, 5
644 };
645 
646 static struct block_defs block_opte_defs = {
647 	"opte", {true, true, false}, false, 0,
648 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
649 	0, 0, 0, 0, 0,
650 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4
651 };
652 
653 static struct block_defs block_bmb_defs = {
654 	"bmb",
655 	{true, true, true}, false, 0,
656 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
657 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
658 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
659 	BMB_REG_DBG_FORCE_FRAME,
660 	true, false, DBG_RESET_REG_MISCS_PL_UA, 7
661 };
662 
663 static struct block_defs block_pcie_defs = {
664 	"pcie",
665 	{true, true, true}, false, 0,
666 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
667 	 DBG_BUS_CLIENT_RBCH},
668 	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
669 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
670 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
671 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
672 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
673 	false, false, MAX_DBG_RESET_REGS, 0
674 };
675 
676 static struct block_defs block_mcp_defs = {
677 	"mcp", {true, true, true}, false, 0,
678 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
679 	0, 0, 0, 0, 0,
680 	false, false, MAX_DBG_RESET_REGS, 0
681 };
682 
683 static struct block_defs block_mcp2_defs = {
684 	"mcp2",
685 	{true, true, true}, false, 0,
686 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
687 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
688 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
689 	MCP2_REG_DBG_FORCE_FRAME,
690 	false, false, MAX_DBG_RESET_REGS, 0
691 };
692 
693 static struct block_defs block_pswhst_defs = {
694 	"pswhst",
695 	{true, true, true}, false, 0,
696 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
697 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
698 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
699 	PSWHST_REG_DBG_FORCE_FRAME,
700 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
701 };
702 
703 static struct block_defs block_pswhst2_defs = {
704 	"pswhst2",
705 	{true, true, true}, false, 0,
706 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
707 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
708 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
709 	PSWHST2_REG_DBG_FORCE_FRAME,
710 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
711 };
712 
713 static struct block_defs block_pswrd_defs = {
714 	"pswrd",
715 	{true, true, true}, false, 0,
716 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
717 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
718 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
719 	PSWRD_REG_DBG_FORCE_FRAME,
720 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
721 };
722 
723 static struct block_defs block_pswrd2_defs = {
724 	"pswrd2",
725 	{true, true, true}, false, 0,
726 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
727 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
728 	PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
729 	PSWRD2_REG_DBG_FORCE_FRAME,
730 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
731 };
732 
733 static struct block_defs block_pswwr_defs = {
734 	"pswwr",
735 	{true, true, true}, false, 0,
736 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
737 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
738 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
739 	PSWWR_REG_DBG_FORCE_FRAME,
740 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
741 };
742 
743 static struct block_defs block_pswwr2_defs = {
744 	"pswwr2", {true, true, true}, false, 0,
745 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
746 	0, 0, 0, 0, 0,
747 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
748 };
749 
750 static struct block_defs block_pswrq_defs = {
751 	"pswrq",
752 	{true, true, true}, false, 0,
753 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
754 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
755 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
756 	PSWRQ_REG_DBG_FORCE_FRAME,
757 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
758 };
759 
760 static struct block_defs block_pswrq2_defs = {
761 	"pswrq2",
762 	{true, true, true}, false, 0,
763 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
764 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
765 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
766 	PSWRQ2_REG_DBG_FORCE_FRAME,
767 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
768 };
769 
770 static struct block_defs block_pglcs_defs = {
771 	"pglcs",
772 	{true, true, true}, false, 0,
773 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
774 	 DBG_BUS_CLIENT_RBCH},
775 	PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
776 	PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
777 	PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
778 	true, false, DBG_RESET_REG_MISCS_PL_HV, 2
779 };
780 
781 static struct block_defs block_ptu_defs = {
782 	"ptu",
783 	{true, true, true}, false, 0,
784 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
785 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
786 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
787 	PTU_REG_DBG_FORCE_FRAME,
788 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
789 };
790 
791 static struct block_defs block_dmae_defs = {
792 	"dmae",
793 	{true, true, true}, false, 0,
794 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
795 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
796 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
797 	DMAE_REG_DBG_FORCE_FRAME,
798 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
799 };
800 
801 static struct block_defs block_tcm_defs = {
802 	"tcm",
803 	{true, true, true}, true, DBG_TSTORM_ID,
804 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
805 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
806 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
807 	TCM_REG_DBG_FORCE_FRAME,
808 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
809 };
810 
811 static struct block_defs block_mcm_defs = {
812 	"mcm",
813 	{true, true, true}, true, DBG_MSTORM_ID,
814 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
815 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
816 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
817 	MCM_REG_DBG_FORCE_FRAME,
818 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
819 };
820 
821 static struct block_defs block_ucm_defs = {
822 	"ucm",
823 	{true, true, true}, true, DBG_USTORM_ID,
824 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
825 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
826 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
827 	UCM_REG_DBG_FORCE_FRAME,
828 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
829 };
830 
831 static struct block_defs block_xcm_defs = {
832 	"xcm",
833 	{true, true, true}, true, DBG_XSTORM_ID,
834 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
835 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
836 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
837 	XCM_REG_DBG_FORCE_FRAME,
838 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
839 };
840 
841 static struct block_defs block_ycm_defs = {
842 	"ycm",
843 	{true, true, true}, true, DBG_YSTORM_ID,
844 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
845 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
846 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
847 	YCM_REG_DBG_FORCE_FRAME,
848 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
849 };
850 
851 static struct block_defs block_pcm_defs = {
852 	"pcm",
853 	{true, true, true}, true, DBG_PSTORM_ID,
854 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
855 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
856 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
857 	PCM_REG_DBG_FORCE_FRAME,
858 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
859 };
860 
861 static struct block_defs block_qm_defs = {
862 	"qm",
863 	{true, true, true}, false, 0,
864 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
865 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
866 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
867 	QM_REG_DBG_FORCE_FRAME,
868 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
869 };
870 
871 static struct block_defs block_tm_defs = {
872 	"tm",
873 	{true, true, true}, false, 0,
874 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
875 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
876 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
877 	TM_REG_DBG_FORCE_FRAME,
878 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
879 };
880 
881 static struct block_defs block_dorq_defs = {
882 	"dorq",
883 	{true, true, true}, false, 0,
884 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
885 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
886 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
887 	DORQ_REG_DBG_FORCE_FRAME,
888 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
889 };
890 
891 static struct block_defs block_brb_defs = {
892 	"brb",
893 	{true, true, true}, false, 0,
894 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
895 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
896 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
897 	BRB_REG_DBG_FORCE_FRAME,
898 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
899 };
900 
901 static struct block_defs block_src_defs = {
902 	"src",
903 	{true, true, true}, false, 0,
904 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
905 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
906 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
907 	SRC_REG_DBG_FORCE_FRAME,
908 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
909 };
910 
911 static struct block_defs block_prs_defs = {
912 	"prs",
913 	{true, true, true}, false, 0,
914 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
915 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
916 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
917 	PRS_REG_DBG_FORCE_FRAME,
918 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
919 };
920 
921 static struct block_defs block_tsdm_defs = {
922 	"tsdm",
923 	{true, true, true}, true, DBG_TSTORM_ID,
924 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
925 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
926 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
927 	TSDM_REG_DBG_FORCE_FRAME,
928 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
929 };
930 
931 static struct block_defs block_msdm_defs = {
932 	"msdm",
933 	{true, true, true}, true, DBG_MSTORM_ID,
934 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
935 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
936 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
937 	MSDM_REG_DBG_FORCE_FRAME,
938 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
939 };
940 
941 static struct block_defs block_usdm_defs = {
942 	"usdm",
943 	{true, true, true}, true, DBG_USTORM_ID,
944 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
945 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
946 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
947 	USDM_REG_DBG_FORCE_FRAME,
948 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
949 };
950 
951 static struct block_defs block_xsdm_defs = {
952 	"xsdm",
953 	{true, true, true}, true, DBG_XSTORM_ID,
954 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
955 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
956 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
957 	XSDM_REG_DBG_FORCE_FRAME,
958 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
959 };
960 
961 static struct block_defs block_ysdm_defs = {
962 	"ysdm",
963 	{true, true, true}, true, DBG_YSTORM_ID,
964 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
965 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
966 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
967 	YSDM_REG_DBG_FORCE_FRAME,
968 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
969 };
970 
971 static struct block_defs block_psdm_defs = {
972 	"psdm",
973 	{true, true, true}, true, DBG_PSTORM_ID,
974 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
975 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
976 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
977 	PSDM_REG_DBG_FORCE_FRAME,
978 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
979 };
980 
981 static struct block_defs block_tsem_defs = {
982 	"tsem",
983 	{true, true, true}, true, DBG_TSTORM_ID,
984 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
985 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
986 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
987 	TSEM_REG_DBG_FORCE_FRAME,
988 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
989 };
990 
991 static struct block_defs block_msem_defs = {
992 	"msem",
993 	{true, true, true}, true, DBG_MSTORM_ID,
994 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
995 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
996 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
997 	MSEM_REG_DBG_FORCE_FRAME,
998 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
999 };
1000 
1001 static struct block_defs block_usem_defs = {
1002 	"usem",
1003 	{true, true, true}, true, DBG_USTORM_ID,
1004 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1005 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1006 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1007 	USEM_REG_DBG_FORCE_FRAME,
1008 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
1009 };
1010 
1011 static struct block_defs block_xsem_defs = {
1012 	"xsem",
1013 	{true, true, true}, true, DBG_XSTORM_ID,
1014 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1015 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1016 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1017 	XSEM_REG_DBG_FORCE_FRAME,
1018 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
1019 };
1020 
1021 static struct block_defs block_ysem_defs = {
1022 	"ysem",
1023 	{true, true, true}, true, DBG_YSTORM_ID,
1024 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
1025 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1026 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1027 	YSEM_REG_DBG_FORCE_FRAME,
1028 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
1029 };
1030 
1031 static struct block_defs block_psem_defs = {
1032 	"psem",
1033 	{true, true, true}, true, DBG_PSTORM_ID,
1034 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1035 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1036 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1037 	PSEM_REG_DBG_FORCE_FRAME,
1038 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
1039 };
1040 
1041 static struct block_defs block_rss_defs = {
1042 	"rss",
1043 	{true, true, true}, false, 0,
1044 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
1045 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1046 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1047 	RSS_REG_DBG_FORCE_FRAME,
1048 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
1049 };
1050 
1051 static struct block_defs block_tmld_defs = {
1052 	"tmld",
1053 	{true, true, true}, false, 0,
1054 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1055 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1056 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1057 	TMLD_REG_DBG_FORCE_FRAME,
1058 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
1059 };
1060 
1061 static struct block_defs block_muld_defs = {
1062 	"muld",
1063 	{true, true, true}, false, 0,
1064 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1065 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1066 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1067 	MULD_REG_DBG_FORCE_FRAME,
1068 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
1069 };
1070 
1071 static struct block_defs block_yuld_defs = {
1072 	"yuld",
1073 	{true, true, false}, false, 0,
1074 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
1075 	 MAX_DBG_BUS_CLIENTS},
1076 	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1077 	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1078 	YULD_REG_DBG_FORCE_FRAME_BB_K2,
1079 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1080 	15
1081 };
1082 
1083 static struct block_defs block_xyld_defs = {
1084 	"xyld",
1085 	{true, true, true}, false, 0,
1086 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1087 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1088 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1089 	XYLD_REG_DBG_FORCE_FRAME,
1090 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1091 };
1092 
1093 static struct block_defs block_ptld_defs = {
1094 	"ptld",
1095 	{false, false, true}, false, 0,
1096 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
1097 	PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1098 	PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1099 	PTLD_REG_DBG_FORCE_FRAME_E5,
1100 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1101 	28
1102 };
1103 
1104 static struct block_defs block_ypld_defs = {
1105 	"ypld",
1106 	{false, false, true}, false, 0,
1107 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
1108 	YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1109 	YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1110 	YPLD_REG_DBG_FORCE_FRAME_E5,
1111 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1112 	27
1113 };
1114 
1115 static struct block_defs block_prm_defs = {
1116 	"prm",
1117 	{true, true, true}, false, 0,
1118 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1119 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1120 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1121 	PRM_REG_DBG_FORCE_FRAME,
1122 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1123 };
1124 
1125 static struct block_defs block_pbf_pb1_defs = {
1126 	"pbf_pb1",
1127 	{true, true, true}, false, 0,
1128 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1129 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1130 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1131 	PBF_PB1_REG_DBG_FORCE_FRAME,
1132 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1133 	11
1134 };
1135 
1136 static struct block_defs block_pbf_pb2_defs = {
1137 	"pbf_pb2",
1138 	{true, true, true}, false, 0,
1139 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1140 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1141 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1142 	PBF_PB2_REG_DBG_FORCE_FRAME,
1143 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1144 	12
1145 };
1146 
1147 static struct block_defs block_rpb_defs = {
1148 	"rpb",
1149 	{true, true, true}, false, 0,
1150 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1151 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1152 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1153 	RPB_REG_DBG_FORCE_FRAME,
1154 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1155 };
1156 
1157 static struct block_defs block_btb_defs = {
1158 	"btb",
1159 	{true, true, true}, false, 0,
1160 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1161 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1162 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1163 	BTB_REG_DBG_FORCE_FRAME,
1164 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1165 };
1166 
1167 static struct block_defs block_pbf_defs = {
1168 	"pbf",
1169 	{true, true, true}, false, 0,
1170 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1171 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1172 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1173 	PBF_REG_DBG_FORCE_FRAME,
1174 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1175 };
1176 
1177 static struct block_defs block_rdif_defs = {
1178 	"rdif",
1179 	{true, true, true}, false, 0,
1180 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1181 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1182 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1183 	RDIF_REG_DBG_FORCE_FRAME,
1184 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1185 };
1186 
1187 static struct block_defs block_tdif_defs = {
1188 	"tdif",
1189 	{true, true, true}, false, 0,
1190 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1191 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1192 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1193 	TDIF_REG_DBG_FORCE_FRAME,
1194 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1195 };
1196 
1197 static struct block_defs block_cdu_defs = {
1198 	"cdu",
1199 	{true, true, true}, false, 0,
1200 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1201 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1202 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1203 	CDU_REG_DBG_FORCE_FRAME,
1204 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1205 };
1206 
1207 static struct block_defs block_ccfc_defs = {
1208 	"ccfc",
1209 	{true, true, true}, false, 0,
1210 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1211 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1212 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1213 	CCFC_REG_DBG_FORCE_FRAME,
1214 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1215 };
1216 
1217 static struct block_defs block_tcfc_defs = {
1218 	"tcfc",
1219 	{true, true, true}, false, 0,
1220 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1221 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1222 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1223 	TCFC_REG_DBG_FORCE_FRAME,
1224 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1225 };
1226 
1227 static struct block_defs block_igu_defs = {
1228 	"igu",
1229 	{true, true, true}, false, 0,
1230 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1231 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1232 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1233 	IGU_REG_DBG_FORCE_FRAME,
1234 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1235 };
1236 
1237 static struct block_defs block_cau_defs = {
1238 	"cau",
1239 	{true, true, true}, false, 0,
1240 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1241 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1242 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1243 	CAU_REG_DBG_FORCE_FRAME,
1244 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1245 };
1246 
1247 static struct block_defs block_rgfs_defs = {
1248 	"rgfs", {false, false, true}, false, 0,
1249 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1250 	0, 0, 0, 0, 0,
1251 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
1252 };
1253 
1254 static struct block_defs block_rgsrc_defs = {
1255 	"rgsrc",
1256 	{false, false, true}, false, 0,
1257 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1258 	RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1259 	RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1260 	RGSRC_REG_DBG_FORCE_FRAME_E5,
1261 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1262 	30
1263 };
1264 
1265 static struct block_defs block_tgfs_defs = {
1266 	"tgfs", {false, false, true}, false, 0,
1267 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1268 	0, 0, 0, 0, 0,
1269 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
1270 };
1271 
1272 static struct block_defs block_tgsrc_defs = {
1273 	"tgsrc",
1274 	{false, false, true}, false, 0,
1275 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
1276 	TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1277 	TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1278 	TGSRC_REG_DBG_FORCE_FRAME_E5,
1279 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1280 	31
1281 };
1282 
1283 static struct block_defs block_umac_defs = {
1284 	"umac",
1285 	{true, true, true}, false, 0,
1286 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
1287 	 DBG_BUS_CLIENT_RBCZ},
1288 	UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1289 	UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1290 	UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1291 	true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1292 };
1293 
1294 static struct block_defs block_xmac_defs = {
1295 	"xmac", {true, false, false}, false, 0,
1296 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1297 	0, 0, 0, 0, 0,
1298 	false, false, MAX_DBG_RESET_REGS, 0
1299 };
1300 
1301 static struct block_defs block_dbg_defs = {
1302 	"dbg", {true, true, true}, false, 0,
1303 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1304 	0, 0, 0, 0, 0,
1305 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1306 };
1307 
1308 static struct block_defs block_nig_defs = {
1309 	"nig",
1310 	{true, true, true}, false, 0,
1311 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1312 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1313 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1314 	NIG_REG_DBG_FORCE_FRAME,
1315 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1316 };
1317 
1318 static struct block_defs block_wol_defs = {
1319 	"wol",
1320 	{false, true, true}, false, 0,
1321 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1322 	WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1323 	WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1324 	WOL_REG_DBG_FORCE_FRAME_K2_E5,
1325 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1326 };
1327 
1328 static struct block_defs block_bmbn_defs = {
1329 	"bmbn",
1330 	{false, true, true}, false, 0,
1331 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
1332 	 DBG_BUS_CLIENT_RBCB},
1333 	BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1334 	BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1335 	BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1336 	false, false, MAX_DBG_RESET_REGS, 0
1337 };
1338 
1339 static struct block_defs block_ipc_defs = {
1340 	"ipc", {true, true, true}, false, 0,
1341 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1342 	0, 0, 0, 0, 0,
1343 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1344 };
1345 
1346 static struct block_defs block_nwm_defs = {
1347 	"nwm",
1348 	{false, true, true}, false, 0,
1349 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1350 	NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1351 	NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1352 	NWM_REG_DBG_FORCE_FRAME_K2_E5,
1353 	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1354 };
1355 
1356 static struct block_defs block_nws_defs = {
1357 	"nws",
1358 	{false, true, true}, false, 0,
1359 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1360 	NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1361 	NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1362 	NWS_REG_DBG_FORCE_FRAME_K2_E5,
1363 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1364 };
1365 
1366 static struct block_defs block_ms_defs = {
1367 	"ms",
1368 	{false, true, true}, false, 0,
1369 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1370 	MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1371 	MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1372 	MS_REG_DBG_FORCE_FRAME_K2_E5,
1373 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1374 };
1375 
1376 static struct block_defs block_phy_pcie_defs = {
1377 	"phy_pcie",
1378 	{false, true, true}, false, 0,
1379 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
1380 	 DBG_BUS_CLIENT_RBCH},
1381 	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
1382 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1383 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
1384 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1385 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1386 	false, false, MAX_DBG_RESET_REGS, 0
1387 };
1388 
1389 static struct block_defs block_led_defs = {
1390 	"led", {false, true, true}, false, 0,
1391 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1392 	0, 0, 0, 0, 0,
1393 	true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1394 };
1395 
1396 static struct block_defs block_avs_wrap_defs = {
1397 	"avs_wrap", {false, true, false}, false, 0,
1398 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1399 	0, 0, 0, 0, 0,
1400 	true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1401 };
1402 
1403 static struct block_defs block_pxpreqbus_defs = {
1404 	"pxpreqbus", {false, false, false}, false, 0,
1405 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1406 	0, 0, 0, 0, 0,
1407 	false, false, MAX_DBG_RESET_REGS, 0
1408 };
1409 
1410 static struct block_defs block_misc_aeu_defs = {
1411 	"misc_aeu", {true, true, true}, false, 0,
1412 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1413 	0, 0, 0, 0, 0,
1414 	false, false, MAX_DBG_RESET_REGS, 0
1415 };
1416 
1417 static struct block_defs block_bar0_map_defs = {
1418 	"bar0_map", {true, true, true}, false, 0,
1419 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1420 	0, 0, 0, 0, 0,
1421 	false, false, MAX_DBG_RESET_REGS, 0
1422 };
1423 
1424 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1425 	&block_grc_defs,
1426 	&block_miscs_defs,
1427 	&block_misc_defs,
1428 	&block_dbu_defs,
1429 	&block_pglue_b_defs,
1430 	&block_cnig_defs,
1431 	&block_cpmu_defs,
1432 	&block_ncsi_defs,
1433 	&block_opte_defs,
1434 	&block_bmb_defs,
1435 	&block_pcie_defs,
1436 	&block_mcp_defs,
1437 	&block_mcp2_defs,
1438 	&block_pswhst_defs,
1439 	&block_pswhst2_defs,
1440 	&block_pswrd_defs,
1441 	&block_pswrd2_defs,
1442 	&block_pswwr_defs,
1443 	&block_pswwr2_defs,
1444 	&block_pswrq_defs,
1445 	&block_pswrq2_defs,
1446 	&block_pglcs_defs,
1447 	&block_dmae_defs,
1448 	&block_ptu_defs,
1449 	&block_tcm_defs,
1450 	&block_mcm_defs,
1451 	&block_ucm_defs,
1452 	&block_xcm_defs,
1453 	&block_ycm_defs,
1454 	&block_pcm_defs,
1455 	&block_qm_defs,
1456 	&block_tm_defs,
1457 	&block_dorq_defs,
1458 	&block_brb_defs,
1459 	&block_src_defs,
1460 	&block_prs_defs,
1461 	&block_tsdm_defs,
1462 	&block_msdm_defs,
1463 	&block_usdm_defs,
1464 	&block_xsdm_defs,
1465 	&block_ysdm_defs,
1466 	&block_psdm_defs,
1467 	&block_tsem_defs,
1468 	&block_msem_defs,
1469 	&block_usem_defs,
1470 	&block_xsem_defs,
1471 	&block_ysem_defs,
1472 	&block_psem_defs,
1473 	&block_rss_defs,
1474 	&block_tmld_defs,
1475 	&block_muld_defs,
1476 	&block_yuld_defs,
1477 	&block_xyld_defs,
1478 	&block_ptld_defs,
1479 	&block_ypld_defs,
1480 	&block_prm_defs,
1481 	&block_pbf_pb1_defs,
1482 	&block_pbf_pb2_defs,
1483 	&block_rpb_defs,
1484 	&block_btb_defs,
1485 	&block_pbf_defs,
1486 	&block_rdif_defs,
1487 	&block_tdif_defs,
1488 	&block_cdu_defs,
1489 	&block_ccfc_defs,
1490 	&block_tcfc_defs,
1491 	&block_igu_defs,
1492 	&block_cau_defs,
1493 	&block_rgfs_defs,
1494 	&block_rgsrc_defs,
1495 	&block_tgfs_defs,
1496 	&block_tgsrc_defs,
1497 	&block_umac_defs,
1498 	&block_xmac_defs,
1499 	&block_dbg_defs,
1500 	&block_nig_defs,
1501 	&block_wol_defs,
1502 	&block_bmbn_defs,
1503 	&block_ipc_defs,
1504 	&block_nwm_defs,
1505 	&block_nws_defs,
1506 	&block_ms_defs,
1507 	&block_phy_pcie_defs,
1508 	&block_led_defs,
1509 	&block_avs_wrap_defs,
1510 	&block_pxpreqbus_defs,
1511 	&block_misc_aeu_defs,
1512 	&block_bar0_map_defs,
1513 };
1514 
1515 static struct platform_defs s_platform_defs[] = {
1516 	{"asic", 1, 256, 32768},
1517 	{"reserved", 0, 0, 0},
1518 	{"reserved2", 0, 0, 0},
1519 	{"reserved3", 0, 0, 0}
1520 };
1521 
1522 static struct grc_param_defs s_grc_param_defs[] = {
1523 	/* DBG_GRC_PARAM_DUMP_TSTORM */
1524 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1525 
1526 	/* DBG_GRC_PARAM_DUMP_MSTORM */
1527 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1528 
1529 	/* DBG_GRC_PARAM_DUMP_USTORM */
1530 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1531 
1532 	/* DBG_GRC_PARAM_DUMP_XSTORM */
1533 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1534 
1535 	/* DBG_GRC_PARAM_DUMP_YSTORM */
1536 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1537 
1538 	/* DBG_GRC_PARAM_DUMP_PSTORM */
1539 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1540 
1541 	/* DBG_GRC_PARAM_DUMP_REGS */
1542 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1543 
1544 	/* DBG_GRC_PARAM_DUMP_RAM */
1545 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1546 
1547 	/* DBG_GRC_PARAM_DUMP_PBUF */
1548 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1549 
1550 	/* DBG_GRC_PARAM_DUMP_IOR */
1551 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1552 
1553 	/* DBG_GRC_PARAM_DUMP_VFC */
1554 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1555 
1556 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
1557 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1558 
1559 	/* DBG_GRC_PARAM_DUMP_ILT */
1560 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1561 
1562 	/* DBG_GRC_PARAM_DUMP_RSS */
1563 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1564 
1565 	/* DBG_GRC_PARAM_DUMP_CAU */
1566 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1567 
1568 	/* DBG_GRC_PARAM_DUMP_QM */
1569 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1570 
1571 	/* DBG_GRC_PARAM_DUMP_MCP */
1572 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1573 
1574 	/* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
1575 	{{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1},
1576 
1577 	/* DBG_GRC_PARAM_DUMP_CFC */
1578 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1579 
1580 	/* DBG_GRC_PARAM_DUMP_IGU */
1581 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1582 
1583 	/* DBG_GRC_PARAM_DUMP_BRB */
1584 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1585 
1586 	/* DBG_GRC_PARAM_DUMP_BTB */
1587 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1588 
1589 	/* DBG_GRC_PARAM_DUMP_BMB */
1590 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1591 
1592 	/* DBG_GRC_PARAM_DUMP_NIG */
1593 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1594 
1595 	/* DBG_GRC_PARAM_DUMP_MULD */
1596 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1597 
1598 	/* DBG_GRC_PARAM_DUMP_PRS */
1599 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1600 
1601 	/* DBG_GRC_PARAM_DUMP_DMAE */
1602 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1603 
1604 	/* DBG_GRC_PARAM_DUMP_TM */
1605 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1606 
1607 	/* DBG_GRC_PARAM_DUMP_SDM */
1608 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1609 
1610 	/* DBG_GRC_PARAM_DUMP_DIF */
1611 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1612 
1613 	/* DBG_GRC_PARAM_DUMP_STATIC */
1614 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1615 
1616 	/* DBG_GRC_PARAM_UNSTALL */
1617 	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1618 
1619 	/* DBG_GRC_PARAM_NUM_LCIDS */
1620 	{{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false,
1621 	 MAX_LCIDS, MAX_LCIDS},
1622 
1623 	/* DBG_GRC_PARAM_NUM_LTIDS */
1624 	{{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false,
1625 	 MAX_LTIDS, MAX_LTIDS},
1626 
1627 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
1628 	{{0, 0, 0}, 0, 1, true, false, 0, 0},
1629 
1630 	/* DBG_GRC_PARAM_CRASH */
1631 	{{0, 0, 0}, 0, 1, true, false, 0, 0},
1632 
1633 	/* DBG_GRC_PARAM_PARITY_SAFE */
1634 	{{0, 0, 0}, 0, 1, false, false, 1, 0},
1635 
1636 	/* DBG_GRC_PARAM_DUMP_CM */
1637 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1638 
1639 	/* DBG_GRC_PARAM_DUMP_PHY */
1640 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1641 
1642 	/* DBG_GRC_PARAM_NO_MCP */
1643 	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1644 
1645 	/* DBG_GRC_PARAM_NO_FW_VER */
1646 	{{0, 0, 0}, 0, 1, false, false, 0, 0}
1647 };
1648 
1649 static struct rss_mem_defs s_rss_mem_defs[] = {
1650 	{ "rss_mem_cid", "rss_cid", 0, 32,
1651 	  {256, 320, 512} },
1652 
1653 	{ "rss_mem_key_msb", "rss_key", 1024, 256,
1654 	  {128, 208, 257} },
1655 
1656 	{ "rss_mem_key_lsb", "rss_key", 2048, 64,
1657 	  {128, 208, 257} },
1658 
1659 	{ "rss_mem_info", "rss_info", 3072, 16,
1660 	  {128, 208, 256} },
1661 
1662 	{ "rss_mem_ind", "rss_ind", 4096, 16,
1663 	  {16384, 26624, 32768} }
1664 };
1665 
1666 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1667 	{"vfc_ram_tt1", "vfc_ram", 0, 512},
1668 	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
1669 	{"vfc_ram_stt2", "vfc_ram", 640, 32},
1670 	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1671 };
1672 
1673 static struct big_ram_defs s_big_ram_defs[] = {
1674 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1675 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1676 	  MISC_REG_BLOCK_256B_EN, {0, 0, 0},
1677 	  {153600, 180224, 282624} },
1678 
1679 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1680 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1681 	  MISC_REG_BLOCK_256B_EN, {0, 1, 1},
1682 	  {92160, 117760, 168960} },
1683 
1684 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1685 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1686 	  MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
1687 	  {36864, 36864, 36864} }
1688 };
1689 
1690 static struct reset_reg_defs s_reset_regs_defs[] = {
1691 	/* DBG_RESET_REG_MISCS_PL_UA */
1692 	{ MISCS_REG_RESET_PL_UA,
1693 	  {true, true, true}, {0x0, 0x0, 0x0} },
1694 
1695 	/* DBG_RESET_REG_MISCS_PL_HV */
1696 	{ MISCS_REG_RESET_PL_HV,
1697 	  {true, true, true}, {0x0, 0x400, 0x600} },
1698 
1699 	/* DBG_RESET_REG_MISCS_PL_HV_2 */
1700 	{ MISCS_REG_RESET_PL_HV_2_K2_E5,
1701 	  {false, true, true}, {0x0, 0x0, 0x0} },
1702 
1703 	/* DBG_RESET_REG_MISC_PL_UA */
1704 	{ MISC_REG_RESET_PL_UA,
1705 	  {true, true, true}, {0x0, 0x0, 0x0} },
1706 
1707 	/* DBG_RESET_REG_MISC_PL_HV */
1708 	{ MISC_REG_RESET_PL_HV,
1709 	  {true, true, true}, {0x0, 0x0, 0x0} },
1710 
1711 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1712 	{ MISC_REG_RESET_PL_PDA_VMAIN_1,
1713 	  {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
1714 
1715 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1716 	{ MISC_REG_RESET_PL_PDA_VMAIN_2,
1717 	  {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
1718 
1719 	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1720 	{ MISC_REG_RESET_PL_PDA_VAUX,
1721 	  {true, true, true}, {0x2, 0x2, 0x2} },
1722 };
1723 
1724 static struct phy_defs s_phy_defs[] = {
1725 	{"nw_phy", NWS_REG_NWS_CMU_K2,
1726 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
1727 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
1728 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
1729 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
1730 	{"sgmii_phy", MS_REG_MS_CMU_K2_E5,
1731 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1732 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1733 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1734 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1735 	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
1736 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1737 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1738 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1739 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1740 	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
1741 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1742 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1743 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1744 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1745 };
1746 
1747 /**************************** Private Functions ******************************/
1748 
1749 /* Reads and returns a single dword from the specified unaligned buffer */
1750 static u32 qed_read_unaligned_dword(u8 *buf)
1751 {
1752 	u32 dword;
1753 
1754 	memcpy((u8 *)&dword, buf, sizeof(dword));
1755 	return dword;
1756 }
1757 
1758 /* Returns the value of the specified GRC param */
1759 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1760 			     enum dbg_grc_params grc_param)
1761 {
1762 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1763 
1764 	return dev_data->grc.param_val[grc_param];
1765 }
1766 
1767 /* Initializes the GRC parameters */
1768 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1769 {
1770 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1771 
1772 	if (!dev_data->grc.params_initialized) {
1773 		qed_dbg_grc_set_params_default(p_hwfn);
1774 		dev_data->grc.params_initialized = 1;
1775 	}
1776 }
1777 
1778 /* Initializes debug data for the specified device */
1779 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1780 					struct qed_ptt *p_ptt)
1781 {
1782 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1783 
1784 	if (dev_data->initialized)
1785 		return DBG_STATUS_OK;
1786 
1787 	if (QED_IS_K2(p_hwfn->cdev)) {
1788 		dev_data->chip_id = CHIP_K2;
1789 		dev_data->mode_enable[MODE_K2] = 1;
1790 	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1791 		dev_data->chip_id = CHIP_BB;
1792 		dev_data->mode_enable[MODE_BB] = 1;
1793 	} else {
1794 		return DBG_STATUS_UNKNOWN_CHIP;
1795 	}
1796 
1797 	dev_data->platform_id = PLATFORM_ASIC;
1798 	dev_data->mode_enable[MODE_ASIC] = 1;
1799 
1800 	/* Initializes the GRC parameters */
1801 	qed_dbg_grc_init_params(p_hwfn);
1802 
1803 	dev_data->use_dmae = true;
1804 	dev_data->num_regs_read = 0;
1805 	dev_data->initialized = 1;
1806 
1807 	return DBG_STATUS_OK;
1808 }
1809 
1810 static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
1811 						    enum block_id block_id)
1812 {
1813 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1814 
1815 	return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
1816 						       MAX_CHIP_IDS +
1817 						       dev_data->chip_id];
1818 }
1819 
1820 /* Reads the FW info structure for the specified Storm from the chip,
1821  * and writes it to the specified fw_info pointer.
1822  */
1823 static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
1824 			     struct qed_ptt *p_ptt,
1825 			     u8 storm_id, struct fw_info *fw_info)
1826 {
1827 	struct storm_defs *storm = &s_storm_defs[storm_id];
1828 	struct fw_info_location fw_info_location;
1829 	u32 addr, i, *dest;
1830 
1831 	memset(&fw_info_location, 0, sizeof(fw_info_location));
1832 	memset(fw_info, 0, sizeof(*fw_info));
1833 
1834 	/* Read first the address that points to fw_info location.
1835 	 * The address is located in the last line of the Storm RAM.
1836 	 */
1837 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1838 	       DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
1839 	       sizeof(fw_info_location);
1840 	dest = (u32 *)&fw_info_location;
1841 
1842 	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1843 	     i++, addr += BYTES_IN_DWORD)
1844 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1845 
1846 	/* Read FW version info from Storm RAM */
1847 	if (fw_info_location.size > 0 && fw_info_location.size <=
1848 	    sizeof(*fw_info)) {
1849 		addr = fw_info_location.grc_addr;
1850 		dest = (u32 *)fw_info;
1851 		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1852 		     i++, addr += BYTES_IN_DWORD)
1853 			dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1854 	}
1855 }
1856 
1857 /* Dumps the specified string to the specified buffer.
1858  * Returns the dumped size in bytes.
1859  */
1860 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1861 {
1862 	if (dump)
1863 		strcpy(dump_buf, str);
1864 
1865 	return (u32)strlen(str) + 1;
1866 }
1867 
1868 /* Dumps zeros to align the specified buffer to dwords.
1869  * Returns the dumped size in bytes.
1870  */
1871 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1872 {
1873 	u8 offset_in_dword, align_size;
1874 
1875 	offset_in_dword = (u8)(byte_offset & 0x3);
1876 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1877 
1878 	if (dump && align_size)
1879 		memset(dump_buf, 0, align_size);
1880 
1881 	return align_size;
1882 }
1883 
1884 /* Writes the specified string param to the specified buffer.
1885  * Returns the dumped size in dwords.
1886  */
1887 static u32 qed_dump_str_param(u32 *dump_buf,
1888 			      bool dump,
1889 			      const char *param_name, const char *param_val)
1890 {
1891 	char *char_buf = (char *)dump_buf;
1892 	u32 offset = 0;
1893 
1894 	/* Dump param name */
1895 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1896 
1897 	/* Indicate a string param value */
1898 	if (dump)
1899 		*(char_buf + offset) = 1;
1900 	offset++;
1901 
1902 	/* Dump param value */
1903 	offset += qed_dump_str(char_buf + offset, dump, param_val);
1904 
1905 	/* Align buffer to next dword */
1906 	offset += qed_dump_align(char_buf + offset, dump, offset);
1907 
1908 	return BYTES_TO_DWORDS(offset);
1909 }
1910 
1911 /* Writes the specified numeric param to the specified buffer.
1912  * Returns the dumped size in dwords.
1913  */
1914 static u32 qed_dump_num_param(u32 *dump_buf,
1915 			      bool dump, const char *param_name, u32 param_val)
1916 {
1917 	char *char_buf = (char *)dump_buf;
1918 	u32 offset = 0;
1919 
1920 	/* Dump param name */
1921 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1922 
1923 	/* Indicate a numeric param value */
1924 	if (dump)
1925 		*(char_buf + offset) = 0;
1926 	offset++;
1927 
1928 	/* Align buffer to next dword */
1929 	offset += qed_dump_align(char_buf + offset, dump, offset);
1930 
1931 	/* Dump param value (and change offset from bytes to dwords) */
1932 	offset = BYTES_TO_DWORDS(offset);
1933 	if (dump)
1934 		*(dump_buf + offset) = param_val;
1935 	offset++;
1936 
1937 	return offset;
1938 }
1939 
1940 /* Reads the FW version and writes it as a param to the specified buffer.
1941  * Returns the dumped size in dwords.
1942  */
1943 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1944 				 struct qed_ptt *p_ptt,
1945 				 u32 *dump_buf, bool dump)
1946 {
1947 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1948 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1949 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1950 	struct fw_info fw_info = { {0}, {0} };
1951 	u32 offset = 0;
1952 
1953 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1954 		/* Read FW image/version from PRAM in a non-reset SEMI */
1955 		bool found = false;
1956 		u8 storm_id;
1957 
1958 		for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
1959 		     storm_id++) {
1960 			struct storm_defs *storm = &s_storm_defs[storm_id];
1961 
1962 			/* Read FW version/image */
1963 			if (dev_data->block_in_reset[storm->block_id])
1964 				continue;
1965 
1966 			/* Read FW info for the current Storm */
1967 			qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
1968 
1969 			/* Create FW version/image strings */
1970 			if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1971 				     "%d_%d_%d_%d", fw_info.ver.num.major,
1972 				     fw_info.ver.num.minor, fw_info.ver.num.rev,
1973 				     fw_info.ver.num.eng) < 0)
1974 				DP_NOTICE(p_hwfn,
1975 					  "Unexpected debug error: invalid FW version string\n");
1976 			switch (fw_info.ver.image_id) {
1977 			case FW_IMG_MAIN:
1978 				strcpy(fw_img_str, "main");
1979 				break;
1980 			default:
1981 				strcpy(fw_img_str, "unknown");
1982 				break;
1983 			}
1984 
1985 			found = true;
1986 		}
1987 	}
1988 
1989 	/* Dump FW version, image and timestamp */
1990 	offset += qed_dump_str_param(dump_buf + offset,
1991 				     dump, "fw-version", fw_ver_str);
1992 	offset += qed_dump_str_param(dump_buf + offset,
1993 				     dump, "fw-image", fw_img_str);
1994 	offset += qed_dump_num_param(dump_buf + offset,
1995 				     dump,
1996 				     "fw-timestamp", fw_info.ver.timestamp);
1997 
1998 	return offset;
1999 }
2000 
2001 /* Reads the MFW version and writes it as a param to the specified buffer.
2002  * Returns the dumped size in dwords.
2003  */
2004 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
2005 				  struct qed_ptt *p_ptt,
2006 				  u32 *dump_buf, bool dump)
2007 {
2008 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2009 
2010 	if (dump &&
2011 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2012 		u32 global_section_offsize, global_section_addr, mfw_ver;
2013 		u32 public_data_addr, global_section_offsize_addr;
2014 
2015 		/* Find MCP public data GRC address. Needs to be ORed with
2016 		 * MCP_REG_SCRATCH due to a HW bug.
2017 		 */
2018 		public_data_addr = qed_rd(p_hwfn,
2019 					  p_ptt,
2020 					  MISC_REG_SHARED_MEM_ADDR) |
2021 				   MCP_REG_SCRATCH;
2022 
2023 		/* Find MCP public global section offset */
2024 		global_section_offsize_addr = public_data_addr +
2025 					      offsetof(struct mcp_public_data,
2026 						       sections) +
2027 					      sizeof(offsize_t) * PUBLIC_GLOBAL;
2028 		global_section_offsize = qed_rd(p_hwfn, p_ptt,
2029 						global_section_offsize_addr);
2030 		global_section_addr =
2031 			MCP_REG_SCRATCH +
2032 			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2033 
2034 		/* Read MFW version from MCP public global section */
2035 		mfw_ver = qed_rd(p_hwfn, p_ptt,
2036 				 global_section_addr +
2037 				 offsetof(struct public_global, mfw_ver));
2038 
2039 		/* Dump MFW version param */
2040 		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
2041 			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
2042 			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2043 			DP_NOTICE(p_hwfn,
2044 				  "Unexpected debug error: invalid MFW version string\n");
2045 	}
2046 
2047 	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2048 }
2049 
2050 /* Writes a section header to the specified buffer.
2051  * Returns the dumped size in dwords.
2052  */
2053 static u32 qed_dump_section_hdr(u32 *dump_buf,
2054 				bool dump, const char *name, u32 num_params)
2055 {
2056 	return qed_dump_num_param(dump_buf, dump, name, num_params);
2057 }
2058 
2059 /* Writes the common global params to the specified buffer.
2060  * Returns the dumped size in dwords.
2061  */
2062 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
2063 					 struct qed_ptt *p_ptt,
2064 					 u32 *dump_buf,
2065 					 bool dump,
2066 					 u8 num_specific_global_params)
2067 {
2068 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2069 	u32 offset = 0;
2070 	u8 num_params;
2071 
2072 	/* Dump global params section header */
2073 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2074 	offset += qed_dump_section_hdr(dump_buf + offset,
2075 				       dump, "global_params", num_params);
2076 
2077 	/* Store params */
2078 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2079 	offset += qed_dump_mfw_ver_param(p_hwfn,
2080 					 p_ptt, dump_buf + offset, dump);
2081 	offset += qed_dump_num_param(dump_buf + offset,
2082 				     dump, "tools-version", TOOLS_VERSION);
2083 	offset += qed_dump_str_param(dump_buf + offset,
2084 				     dump,
2085 				     "chip",
2086 				     s_chip_defs[dev_data->chip_id].name);
2087 	offset += qed_dump_str_param(dump_buf + offset,
2088 				     dump,
2089 				     "platform",
2090 				     s_platform_defs[dev_data->platform_id].
2091 				     name);
2092 	offset +=
2093 	    qed_dump_num_param(dump_buf + offset, dump, "pci-func",
2094 			       p_hwfn->abs_pf_id);
2095 
2096 	return offset;
2097 }
2098 
2099 /* Writes the "last" section (including CRC) to the specified buffer at the
2100  * given offset. Returns the dumped size in dwords.
2101  */
2102 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
2103 {
2104 	u32 start_offset = offset;
2105 
2106 	/* Dump CRC section header */
2107 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2108 
2109 	/* Calculate CRC32 and add it to the dword after the "last" section */
2110 	if (dump)
2111 		*(dump_buf + offset) = ~crc32(0xffffffff,
2112 					      (u8 *)dump_buf,
2113 					      DWORDS_TO_BYTES(offset));
2114 
2115 	offset++;
2116 
2117 	return offset - start_offset;
2118 }
2119 
2120 /* Update blocks reset state  */
2121 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
2122 					  struct qed_ptt *p_ptt)
2123 {
2124 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2125 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2126 	u32 i;
2127 
2128 	/* Read reset registers */
2129 	for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2130 		if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2131 			reg_val[i] = qed_rd(p_hwfn,
2132 					    p_ptt, s_reset_regs_defs[i].addr);
2133 
2134 	/* Check if blocks are in reset */
2135 	for (i = 0; i < MAX_BLOCK_ID; i++) {
2136 		struct block_defs *block = s_block_defs[i];
2137 
2138 		dev_data->block_in_reset[i] = block->has_reset_bit &&
2139 		    !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
2140 	}
2141 }
2142 
2143 /* Enable / disable the Debug block */
2144 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
2145 				     struct qed_ptt *p_ptt, bool enable)
2146 {
2147 	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2148 }
2149 
2150 /* Resets the Debug block */
2151 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
2152 				    struct qed_ptt *p_ptt)
2153 {
2154 	u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2155 	struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2156 
2157 	dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2158 	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2159 	new_reset_reg_val =
2160 	    old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
2161 
2162 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2163 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2164 }
2165 
2166 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
2167 				     struct qed_ptt *p_ptt,
2168 				     enum dbg_bus_frame_modes mode)
2169 {
2170 	qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2171 }
2172 
2173 /* Enable / disable Debug Bus clients according to the specified mask
2174  * (1 = enable, 0 = disable).
2175  */
2176 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
2177 				   struct qed_ptt *p_ptt, u32 client_mask)
2178 {
2179 	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2180 }
2181 
2182 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
2183 {
2184 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2185 	bool arg1, arg2;
2186 	const u32 *ptr;
2187 	u8 tree_val;
2188 
2189 	/* Get next element from modes tree buffer */
2190 	ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
2191 	tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
2192 
2193 	switch (tree_val) {
2194 	case INIT_MODE_OP_NOT:
2195 		return !qed_is_mode_match(p_hwfn, modes_buf_offset);
2196 	case INIT_MODE_OP_OR:
2197 	case INIT_MODE_OP_AND:
2198 		arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2199 		arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2200 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
2201 							arg2) : (arg1 && arg2);
2202 	default:
2203 		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2204 	}
2205 }
2206 
2207 /* Returns true if the specified entity (indicated by GRC param) should be
2208  * included in the dump, false otherwise.
2209  */
2210 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
2211 				enum dbg_grc_params grc_param)
2212 {
2213 	return qed_grc_get_param(p_hwfn, grc_param) > 0;
2214 }
2215 
2216 /* Returns true of the specified Storm should be included in the dump, false
2217  * otherwise.
2218  */
2219 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
2220 				      enum dbg_storms storm)
2221 {
2222 	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2223 }
2224 
2225 /* Returns true if the specified memory should be included in the dump, false
2226  * otherwise.
2227  */
2228 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
2229 				    enum block_id block_id, u8 mem_group_id)
2230 {
2231 	struct block_defs *block = s_block_defs[block_id];
2232 	u8 i;
2233 
2234 	/* Check Storm match */
2235 	if (block->associated_to_storm &&
2236 	    !qed_grc_is_storm_included(p_hwfn,
2237 				       (enum dbg_storms)block->storm_id))
2238 		return false;
2239 
2240 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2241 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2242 
2243 		if (mem_group_id == big_ram->mem_group_id ||
2244 		    mem_group_id == big_ram->ram_mem_group_id)
2245 			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
2246 	}
2247 
2248 	switch (mem_group_id) {
2249 	case MEM_GROUP_PXP_ILT:
2250 	case MEM_GROUP_PXP_MEM:
2251 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2252 	case MEM_GROUP_RAM:
2253 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2254 	case MEM_GROUP_PBUF:
2255 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2256 	case MEM_GROUP_CAU_MEM:
2257 	case MEM_GROUP_CAU_SB:
2258 	case MEM_GROUP_CAU_PI:
2259 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2260 	case MEM_GROUP_QM_MEM:
2261 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2262 	case MEM_GROUP_CFC_MEM:
2263 	case MEM_GROUP_CONN_CFC_MEM:
2264 	case MEM_GROUP_TASK_CFC_MEM:
2265 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
2266 		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2267 	case MEM_GROUP_IGU_MEM:
2268 	case MEM_GROUP_IGU_MSIX:
2269 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2270 	case MEM_GROUP_MULD_MEM:
2271 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2272 	case MEM_GROUP_PRS_MEM:
2273 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2274 	case MEM_GROUP_DMAE_MEM:
2275 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2276 	case MEM_GROUP_TM_MEM:
2277 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2278 	case MEM_GROUP_SDM_MEM:
2279 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2280 	case MEM_GROUP_TDIF_CTX:
2281 	case MEM_GROUP_RDIF_CTX:
2282 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2283 	case MEM_GROUP_CM_MEM:
2284 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2285 	case MEM_GROUP_IOR:
2286 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2287 	default:
2288 		return true;
2289 	}
2290 }
2291 
2292 /* Stalls all Storms */
2293 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2294 				 struct qed_ptt *p_ptt, bool stall)
2295 {
2296 	u32 reg_addr;
2297 	u8 storm_id;
2298 
2299 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2300 		if (!qed_grc_is_storm_included(p_hwfn,
2301 					       (enum dbg_storms)storm_id))
2302 			continue;
2303 
2304 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
2305 		    SEM_FAST_REG_STALL_0_BB_K2;
2306 		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2307 	}
2308 
2309 	msleep(STALL_DELAY_MS);
2310 }
2311 
2312 /* Takes all blocks out of reset */
2313 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2314 				   struct qed_ptt *p_ptt)
2315 {
2316 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2317 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2318 	u32 block_id, i;
2319 
2320 	/* Fill reset regs values */
2321 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2322 		struct block_defs *block = s_block_defs[block_id];
2323 
2324 		if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
2325 		    block->unreset)
2326 			reg_val[block->reset_reg] |=
2327 			    BIT(block->reset_bit_offset);
2328 	}
2329 
2330 	/* Write reset registers */
2331 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2332 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2333 			continue;
2334 
2335 		reg_val[i] |=
2336 			s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2337 
2338 		if (reg_val[i])
2339 			qed_wr(p_hwfn,
2340 			       p_ptt,
2341 			       s_reset_regs_defs[i].addr +
2342 			       RESET_REG_UNRESET_OFFSET, reg_val[i]);
2343 	}
2344 }
2345 
2346 /* Returns the attention block data of the specified block */
2347 static const struct dbg_attn_block_type_data *
2348 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
2349 {
2350 	const struct dbg_attn_block *base_attn_block_arr =
2351 		(const struct dbg_attn_block *)
2352 		s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2353 
2354 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
2355 }
2356 
2357 /* Returns the attention registers of the specified block */
2358 static const struct dbg_attn_reg *
2359 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
2360 			u8 *num_attn_regs)
2361 {
2362 	const struct dbg_attn_block_type_data *block_type_data =
2363 		qed_get_block_attn_data(block_id, attn_type);
2364 
2365 	*num_attn_regs = block_type_data->num_regs;
2366 
2367 	return &((const struct dbg_attn_reg *)
2368 		 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2369 							  regs_offset];
2370 }
2371 
2372 /* For each block, clear the status of all parities */
2373 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2374 				   struct qed_ptt *p_ptt)
2375 {
2376 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2377 	const struct dbg_attn_reg *attn_reg_arr;
2378 	u8 reg_idx, num_attn_regs;
2379 	u32 block_id;
2380 
2381 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2382 		if (dev_data->block_in_reset[block_id])
2383 			continue;
2384 
2385 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2386 						       ATTN_TYPE_PARITY,
2387 						       &num_attn_regs);
2388 
2389 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2390 			const struct dbg_attn_reg *reg_data =
2391 				&attn_reg_arr[reg_idx];
2392 			u16 modes_buf_offset;
2393 			bool eval_mode;
2394 
2395 			/* Check mode */
2396 			eval_mode = GET_FIELD(reg_data->mode.data,
2397 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2398 			modes_buf_offset =
2399 				GET_FIELD(reg_data->mode.data,
2400 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2401 
2402 			/* If Mode match: clear parity status */
2403 			if (!eval_mode ||
2404 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
2405 				qed_rd(p_hwfn, p_ptt,
2406 				       DWORDS_TO_BYTES(reg_data->
2407 						       sts_clr_address));
2408 		}
2409 	}
2410 }
2411 
2412 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2413  * The following parameters are dumped:
2414  * - count:	 no. of dumped entries
2415  * - split:	 split type
2416  * - id:	 split ID (dumped only if split_id >= 0)
2417  * - param_name: user parameter value (dumped only if param_name != NULL
2418  *		 and param_val != NULL).
2419  */
2420 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2421 				 bool dump,
2422 				 u32 num_reg_entries,
2423 				 const char *split_type,
2424 				 int split_id,
2425 				 const char *param_name, const char *param_val)
2426 {
2427 	u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2428 	u32 offset = 0;
2429 
2430 	offset += qed_dump_section_hdr(dump_buf + offset,
2431 				       dump, "grc_regs", num_params);
2432 	offset += qed_dump_num_param(dump_buf + offset,
2433 				     dump, "count", num_reg_entries);
2434 	offset += qed_dump_str_param(dump_buf + offset,
2435 				     dump, "split", split_type);
2436 	if (split_id >= 0)
2437 		offset += qed_dump_num_param(dump_buf + offset,
2438 					     dump, "id", split_id);
2439 	if (param_name && param_val)
2440 		offset += qed_dump_str_param(dump_buf + offset,
2441 					     dump, param_name, param_val);
2442 
2443 	return offset;
2444 }
2445 
2446 /* Reads the specified registers into the specified buffer.
2447  * The addr and len arguments are specified in dwords.
2448  */
2449 void qed_read_regs(struct qed_hwfn *p_hwfn,
2450 		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
2451 {
2452 	u32 i;
2453 
2454 	for (i = 0; i < len; i++)
2455 		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2456 }
2457 
2458 /* Dumps the GRC registers in the specified address range.
2459  * Returns the dumped size in dwords.
2460  * The addr and len arguments are specified in dwords.
2461  */
2462 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2463 				   struct qed_ptt *p_ptt,
2464 				   u32 *dump_buf,
2465 				   bool dump, u32 addr, u32 len, bool wide_bus)
2466 {
2467 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2468 
2469 	if (!dump)
2470 		return len;
2471 
2472 	/* Print log if needed */
2473 	dev_data->num_regs_read += len;
2474 	if (dev_data->num_regs_read >=
2475 	    s_platform_defs[dev_data->platform_id].log_thresh) {
2476 		DP_VERBOSE(p_hwfn,
2477 			   QED_MSG_DEBUG,
2478 			   "Dumping %d registers...\n",
2479 			   dev_data->num_regs_read);
2480 		dev_data->num_regs_read = 0;
2481 	}
2482 
2483 	/* Try reading using DMAE */
2484 	if (dev_data->use_dmae &&
2485 	    (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
2486 	     wide_bus)) {
2487 		if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
2488 				       (u64)(uintptr_t)(dump_buf), len, 0))
2489 			return len;
2490 		dev_data->use_dmae = 0;
2491 		DP_VERBOSE(p_hwfn,
2492 			   QED_MSG_DEBUG,
2493 			   "Failed reading from chip using DMAE, using GRC instead\n");
2494 	}
2495 
2496 	/* Read registers */
2497 	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2498 
2499 	return len;
2500 }
2501 
2502 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2503  * The addr and len arguments are specified in dwords.
2504  */
2505 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2506 				      bool dump, u32 addr, u32 len)
2507 {
2508 	if (dump)
2509 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2510 
2511 	return 1;
2512 }
2513 
2514 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2515  * The addr and len arguments are specified in dwords.
2516  */
2517 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2518 				  struct qed_ptt *p_ptt,
2519 				  u32 *dump_buf,
2520 				  bool dump, u32 addr, u32 len, bool wide_bus)
2521 {
2522 	u32 offset = 0;
2523 
2524 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2525 	offset += qed_grc_dump_addr_range(p_hwfn,
2526 					  p_ptt,
2527 					  dump_buf + offset,
2528 					  dump, addr, len, wide_bus);
2529 
2530 	return offset;
2531 }
2532 
2533 /* Dumps GRC registers sequence with skip cycle.
2534  * Returns the dumped size in dwords.
2535  * - addr:	start GRC address in dwords
2536  * - total_len:	total no. of dwords to dump
2537  * - read_len:	no. consecutive dwords to read
2538  * - skip_len:	no. of dwords to skip (and fill with zeros)
2539  */
2540 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2541 				       struct qed_ptt *p_ptt,
2542 				       u32 *dump_buf,
2543 				       bool dump,
2544 				       u32 addr,
2545 				       u32 total_len,
2546 				       u32 read_len, u32 skip_len)
2547 {
2548 	u32 offset = 0, reg_offset = 0;
2549 
2550 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2551 
2552 	if (!dump)
2553 		return offset + total_len;
2554 
2555 	while (reg_offset < total_len) {
2556 		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2557 
2558 		offset += qed_grc_dump_addr_range(p_hwfn,
2559 						  p_ptt,
2560 						  dump_buf + offset,
2561 						  dump, addr, curr_len, false);
2562 		reg_offset += curr_len;
2563 		addr += curr_len;
2564 
2565 		if (reg_offset < total_len) {
2566 			curr_len = min_t(u32, skip_len, total_len - skip_len);
2567 			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2568 			offset += curr_len;
2569 			reg_offset += curr_len;
2570 			addr += curr_len;
2571 		}
2572 	}
2573 
2574 	return offset;
2575 }
2576 
2577 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2578 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2579 				     struct qed_ptt *p_ptt,
2580 				     struct dbg_array input_regs_arr,
2581 				     u32 *dump_buf,
2582 				     bool dump,
2583 				     bool block_enable[MAX_BLOCK_ID],
2584 				     u32 *num_dumped_reg_entries)
2585 {
2586 	u32 i, offset = 0, input_offset = 0;
2587 	bool mode_match = true;
2588 
2589 	*num_dumped_reg_entries = 0;
2590 
2591 	while (input_offset < input_regs_arr.size_in_dwords) {
2592 		const struct dbg_dump_cond_hdr *cond_hdr =
2593 		    (const struct dbg_dump_cond_hdr *)
2594 		    &input_regs_arr.ptr[input_offset++];
2595 		u16 modes_buf_offset;
2596 		bool eval_mode;
2597 
2598 		/* Check mode/block */
2599 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2600 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2601 		if (eval_mode) {
2602 			modes_buf_offset =
2603 				GET_FIELD(cond_hdr->mode.data,
2604 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2605 			mode_match = qed_is_mode_match(p_hwfn,
2606 						       &modes_buf_offset);
2607 		}
2608 
2609 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2610 			input_offset += cond_hdr->data_size;
2611 			continue;
2612 		}
2613 
2614 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2615 			const struct dbg_dump_reg *reg =
2616 			    (const struct dbg_dump_reg *)
2617 			    &input_regs_arr.ptr[input_offset];
2618 			u32 addr, len;
2619 			bool wide_bus;
2620 
2621 			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2622 			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2623 			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2624 			offset += qed_grc_dump_reg_entry(p_hwfn,
2625 							 p_ptt,
2626 							 dump_buf + offset,
2627 							 dump,
2628 							 addr,
2629 							 len,
2630 							 wide_bus);
2631 			(*num_dumped_reg_entries)++;
2632 		}
2633 	}
2634 
2635 	return offset;
2636 }
2637 
2638 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2639 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2640 				   struct qed_ptt *p_ptt,
2641 				   struct dbg_array input_regs_arr,
2642 				   u32 *dump_buf,
2643 				   bool dump,
2644 				   bool block_enable[MAX_BLOCK_ID],
2645 				   const char *split_type_name,
2646 				   u32 split_id,
2647 				   const char *param_name,
2648 				   const char *param_val)
2649 {
2650 	u32 num_dumped_reg_entries, offset;
2651 
2652 	/* Calculate register dump header size (and skip it for now) */
2653 	offset = qed_grc_dump_regs_hdr(dump_buf,
2654 				       false,
2655 				       0,
2656 				       split_type_name,
2657 				       split_id, param_name, param_val);
2658 
2659 	/* Dump registers */
2660 	offset += qed_grc_dump_regs_entries(p_hwfn,
2661 					    p_ptt,
2662 					    input_regs_arr,
2663 					    dump_buf + offset,
2664 					    dump,
2665 					    block_enable,
2666 					    &num_dumped_reg_entries);
2667 
2668 	/* Write register dump header */
2669 	if (dump && num_dumped_reg_entries > 0)
2670 		qed_grc_dump_regs_hdr(dump_buf,
2671 				      dump,
2672 				      num_dumped_reg_entries,
2673 				      split_type_name,
2674 				      split_id, param_name, param_val);
2675 
2676 	return num_dumped_reg_entries > 0 ? offset : 0;
2677 }
2678 
2679 /* Dumps registers according to the input registers array. Returns the dumped
2680  * size in dwords.
2681  */
2682 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2683 				  struct qed_ptt *p_ptt,
2684 				  u32 *dump_buf,
2685 				  bool dump,
2686 				  bool block_enable[MAX_BLOCK_ID],
2687 				  const char *param_name, const char *param_val)
2688 {
2689 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2690 	struct chip_platform_defs *chip_platform;
2691 	u32 offset = 0, input_offset = 0;
2692 	struct chip_defs *chip;
2693 	u8 port_id, pf_id, vf_id;
2694 	u16 fid;
2695 
2696 	chip = &s_chip_defs[dev_data->chip_id];
2697 	chip_platform = &chip->per_platform[dev_data->platform_id];
2698 
2699 	while (input_offset <
2700 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2701 		const struct dbg_dump_split_hdr *split_hdr;
2702 		struct dbg_array curr_input_regs_arr;
2703 		u32 split_data_size;
2704 		u8 split_type_id;
2705 
2706 		split_hdr =
2707 			(const struct dbg_dump_split_hdr *)
2708 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2709 		split_type_id =
2710 			GET_FIELD(split_hdr->hdr,
2711 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2712 		split_data_size =
2713 			GET_FIELD(split_hdr->hdr,
2714 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2715 		curr_input_regs_arr.ptr =
2716 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
2717 		curr_input_regs_arr.size_in_dwords = split_data_size;
2718 
2719 		switch (split_type_id) {
2720 		case SPLIT_TYPE_NONE:
2721 			offset += qed_grc_dump_split_data(p_hwfn,
2722 							  p_ptt,
2723 							  curr_input_regs_arr,
2724 							  dump_buf + offset,
2725 							  dump,
2726 							  block_enable,
2727 							  "eng",
2728 							  (u32)(-1),
2729 							  param_name,
2730 							  param_val);
2731 			break;
2732 
2733 		case SPLIT_TYPE_PORT:
2734 			for (port_id = 0; port_id < chip_platform->num_ports;
2735 			     port_id++) {
2736 				if (dump)
2737 					qed_port_pretend(p_hwfn, p_ptt,
2738 							 port_id);
2739 				offset +=
2740 				    qed_grc_dump_split_data(p_hwfn, p_ptt,
2741 							    curr_input_regs_arr,
2742 							    dump_buf + offset,
2743 							    dump, block_enable,
2744 							    "port", port_id,
2745 							    param_name,
2746 							    param_val);
2747 			}
2748 			break;
2749 
2750 		case SPLIT_TYPE_PF:
2751 		case SPLIT_TYPE_PORT_PF:
2752 			for (pf_id = 0; pf_id < chip_platform->num_pfs;
2753 			     pf_id++) {
2754 				u8 pfid_shift =
2755 					PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2756 
2757 				if (dump) {
2758 					fid = pf_id << pfid_shift;
2759 					qed_fid_pretend(p_hwfn, p_ptt, fid);
2760 				}
2761 
2762 				offset +=
2763 				    qed_grc_dump_split_data(p_hwfn,
2764 							    p_ptt,
2765 							    curr_input_regs_arr,
2766 							    dump_buf + offset,
2767 							    dump,
2768 							    block_enable,
2769 							    "pf",
2770 							    pf_id,
2771 							    param_name,
2772 							    param_val);
2773 			}
2774 			break;
2775 
2776 		case SPLIT_TYPE_VF:
2777 			for (vf_id = 0; vf_id < chip_platform->num_vfs;
2778 			     vf_id++) {
2779 				u8 vfvalid_shift =
2780 					PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
2781 				u8 vfid_shift =
2782 					PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
2783 
2784 				if (dump) {
2785 					fid = BIT(vfvalid_shift) |
2786 					      (vf_id << vfid_shift);
2787 					qed_fid_pretend(p_hwfn, p_ptt, fid);
2788 				}
2789 
2790 				offset +=
2791 				    qed_grc_dump_split_data(p_hwfn, p_ptt,
2792 							    curr_input_regs_arr,
2793 							    dump_buf + offset,
2794 							    dump, block_enable,
2795 							    "vf", vf_id,
2796 							    param_name,
2797 							    param_val);
2798 			}
2799 			break;
2800 
2801 		default:
2802 			break;
2803 		}
2804 
2805 		input_offset += split_data_size;
2806 	}
2807 
2808 	/* Pretend to original PF */
2809 	if (dump) {
2810 		fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2811 		qed_fid_pretend(p_hwfn, p_ptt, fid);
2812 	}
2813 
2814 	return offset;
2815 }
2816 
2817 /* Dump reset registers. Returns the dumped size in dwords. */
2818 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2819 				   struct qed_ptt *p_ptt,
2820 				   u32 *dump_buf, bool dump)
2821 {
2822 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2823 	u32 i, offset = 0, num_regs = 0;
2824 
2825 	/* Calculate header size */
2826 	offset += qed_grc_dump_regs_hdr(dump_buf,
2827 					false, 0, "eng", -1, NULL, NULL);
2828 
2829 	/* Write reset registers */
2830 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2831 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2832 			continue;
2833 
2834 		offset += qed_grc_dump_reg_entry(p_hwfn,
2835 						 p_ptt,
2836 						 dump_buf + offset,
2837 						 dump,
2838 						 BYTES_TO_DWORDS
2839 						 (s_reset_regs_defs[i].addr), 1,
2840 						 false);
2841 		num_regs++;
2842 	}
2843 
2844 	/* Write header */
2845 	if (dump)
2846 		qed_grc_dump_regs_hdr(dump_buf,
2847 				      true, num_regs, "eng", -1, NULL, NULL);
2848 
2849 	return offset;
2850 }
2851 
2852 /* Dump registers that are modified during GRC Dump and therefore must be
2853  * dumped first. Returns the dumped size in dwords.
2854  */
2855 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2856 				      struct qed_ptt *p_ptt,
2857 				      u32 *dump_buf, bool dump)
2858 {
2859 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2860 	u32 block_id, offset = 0, num_reg_entries = 0;
2861 	const struct dbg_attn_reg *attn_reg_arr;
2862 	u8 storm_id, reg_idx, num_attn_regs;
2863 
2864 	/* Calculate header size */
2865 	offset += qed_grc_dump_regs_hdr(dump_buf,
2866 					false, 0, "eng", -1, NULL, NULL);
2867 
2868 	/* Write parity registers */
2869 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2870 		if (dev_data->block_in_reset[block_id] && dump)
2871 			continue;
2872 
2873 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2874 						       ATTN_TYPE_PARITY,
2875 						       &num_attn_regs);
2876 
2877 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2878 			const struct dbg_attn_reg *reg_data =
2879 				&attn_reg_arr[reg_idx];
2880 			u16 modes_buf_offset;
2881 			bool eval_mode;
2882 			u32 addr;
2883 
2884 			/* Check mode */
2885 			eval_mode = GET_FIELD(reg_data->mode.data,
2886 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2887 			modes_buf_offset =
2888 				GET_FIELD(reg_data->mode.data,
2889 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2890 			if (eval_mode &&
2891 			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2892 				continue;
2893 
2894 			/* Mode match: read & dump registers */
2895 			addr = reg_data->mask_address;
2896 			offset += qed_grc_dump_reg_entry(p_hwfn,
2897 							 p_ptt,
2898 							 dump_buf + offset,
2899 							 dump,
2900 							 addr,
2901 							 1, false);
2902 			addr = GET_FIELD(reg_data->data,
2903 					 DBG_ATTN_REG_STS_ADDRESS);
2904 			offset += qed_grc_dump_reg_entry(p_hwfn,
2905 							 p_ptt,
2906 							 dump_buf + offset,
2907 							 dump,
2908 							 addr,
2909 							 1, false);
2910 			num_reg_entries += 2;
2911 		}
2912 	}
2913 
2914 	/* Write Storm stall status registers */
2915 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2916 		struct storm_defs *storm = &s_storm_defs[storm_id];
2917 		u32 addr;
2918 
2919 		if (dev_data->block_in_reset[storm->block_id] && dump)
2920 			continue;
2921 
2922 		addr =
2923 		    BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2924 				    SEM_FAST_REG_STALLED);
2925 		offset += qed_grc_dump_reg_entry(p_hwfn,
2926 						 p_ptt,
2927 						 dump_buf + offset,
2928 						 dump,
2929 						 addr,
2930 						 1,
2931 						 false);
2932 		num_reg_entries++;
2933 	}
2934 
2935 	/* Write header */
2936 	if (dump)
2937 		qed_grc_dump_regs_hdr(dump_buf,
2938 				      true,
2939 				      num_reg_entries, "eng", -1, NULL, NULL);
2940 
2941 	return offset;
2942 }
2943 
2944 /* Dumps registers that can't be represented in the debug arrays */
2945 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2946 				     struct qed_ptt *p_ptt,
2947 				     u32 *dump_buf, bool dump)
2948 {
2949 	u32 offset = 0, addr;
2950 
2951 	offset += qed_grc_dump_regs_hdr(dump_buf,
2952 					dump, 2, "eng", -1, NULL, NULL);
2953 
2954 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2955 	 * skipped).
2956 	 */
2957 	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2958 	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2959 					      p_ptt,
2960 					      dump_buf + offset,
2961 					      dump,
2962 					      addr,
2963 					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2964 					      7,
2965 					      1);
2966 	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2967 	offset +=
2968 	    qed_grc_dump_reg_entry_skip(p_hwfn,
2969 					p_ptt,
2970 					dump_buf + offset,
2971 					dump,
2972 					addr,
2973 					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2974 					7,
2975 					1);
2976 
2977 	return offset;
2978 }
2979 
2980 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2981  * dwords. The following parameters are dumped:
2982  * - name:	   dumped only if it's not NULL.
2983  * - addr:	   in dwords, dumped only if name is NULL.
2984  * - len:	   in dwords, always dumped.
2985  * - width:	   dumped if it's not zero.
2986  * - packed:	   dumped only if it's not false.
2987  * - mem_group:	   always dumped.
2988  * - is_storm:	   true only if the memory is related to a Storm.
2989  * - storm_letter: valid only if is_storm is true.
2990  *
2991  */
2992 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2993 				u32 *dump_buf,
2994 				bool dump,
2995 				const char *name,
2996 				u32 addr,
2997 				u32 len,
2998 				u32 bit_width,
2999 				bool packed,
3000 				const char *mem_group,
3001 				bool is_storm, char storm_letter)
3002 {
3003 	u8 num_params = 3;
3004 	u32 offset = 0;
3005 	char buf[64];
3006 
3007 	if (!len)
3008 		DP_NOTICE(p_hwfn,
3009 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3010 
3011 	if (bit_width)
3012 		num_params++;
3013 	if (packed)
3014 		num_params++;
3015 
3016 	/* Dump section header */
3017 	offset += qed_dump_section_hdr(dump_buf + offset,
3018 				       dump, "grc_mem", num_params);
3019 
3020 	if (name) {
3021 		/* Dump name */
3022 		if (is_storm) {
3023 			strcpy(buf, "?STORM_");
3024 			buf[0] = storm_letter;
3025 			strcpy(buf + strlen(buf), name);
3026 		} else {
3027 			strcpy(buf, name);
3028 		}
3029 
3030 		offset += qed_dump_str_param(dump_buf + offset,
3031 					     dump, "name", buf);
3032 	} else {
3033 		/* Dump address */
3034 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3035 
3036 		offset += qed_dump_num_param(dump_buf + offset,
3037 					     dump, "addr", addr_in_bytes);
3038 	}
3039 
3040 	/* Dump len */
3041 	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
3042 
3043 	/* Dump bit width */
3044 	if (bit_width)
3045 		offset += qed_dump_num_param(dump_buf + offset,
3046 					     dump, "width", bit_width);
3047 
3048 	/* Dump packed */
3049 	if (packed)
3050 		offset += qed_dump_num_param(dump_buf + offset,
3051 					     dump, "packed", 1);
3052 
3053 	/* Dump reg type */
3054 	if (is_storm) {
3055 		strcpy(buf, "?STORM_");
3056 		buf[0] = storm_letter;
3057 		strcpy(buf + strlen(buf), mem_group);
3058 	} else {
3059 		strcpy(buf, mem_group);
3060 	}
3061 
3062 	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
3063 
3064 	return offset;
3065 }
3066 
3067 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
3068  * Returns the dumped size in dwords.
3069  * The addr and len arguments are specified in dwords.
3070  */
3071 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
3072 			    struct qed_ptt *p_ptt,
3073 			    u32 *dump_buf,
3074 			    bool dump,
3075 			    const char *name,
3076 			    u32 addr,
3077 			    u32 len,
3078 			    bool wide_bus,
3079 			    u32 bit_width,
3080 			    bool packed,
3081 			    const char *mem_group,
3082 			    bool is_storm, char storm_letter)
3083 {
3084 	u32 offset = 0;
3085 
3086 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3087 				       dump_buf + offset,
3088 				       dump,
3089 				       name,
3090 				       addr,
3091 				       len,
3092 				       bit_width,
3093 				       packed,
3094 				       mem_group, is_storm, storm_letter);
3095 	offset += qed_grc_dump_addr_range(p_hwfn,
3096 					  p_ptt,
3097 					  dump_buf + offset,
3098 					  dump, addr, len, wide_bus);
3099 
3100 	return offset;
3101 }
3102 
3103 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3104 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
3105 				    struct qed_ptt *p_ptt,
3106 				    struct dbg_array input_mems_arr,
3107 				    u32 *dump_buf, bool dump)
3108 {
3109 	u32 i, offset = 0, input_offset = 0;
3110 	bool mode_match = true;
3111 
3112 	while (input_offset < input_mems_arr.size_in_dwords) {
3113 		const struct dbg_dump_cond_hdr *cond_hdr;
3114 		u16 modes_buf_offset;
3115 		u32 num_entries;
3116 		bool eval_mode;
3117 
3118 		cond_hdr = (const struct dbg_dump_cond_hdr *)
3119 			   &input_mems_arr.ptr[input_offset++];
3120 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3121 
3122 		/* Check required mode */
3123 		eval_mode = GET_FIELD(cond_hdr->mode.data,
3124 				      DBG_MODE_HDR_EVAL_MODE) > 0;
3125 		if (eval_mode) {
3126 			modes_buf_offset =
3127 				GET_FIELD(cond_hdr->mode.data,
3128 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
3129 			mode_match = qed_is_mode_match(p_hwfn,
3130 						       &modes_buf_offset);
3131 		}
3132 
3133 		if (!mode_match) {
3134 			input_offset += cond_hdr->data_size;
3135 			continue;
3136 		}
3137 
3138 		for (i = 0; i < num_entries;
3139 		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3140 			const struct dbg_dump_mem *mem =
3141 				(const struct dbg_dump_mem *)
3142 				&input_mems_arr.ptr[input_offset];
3143 			u8 mem_group_id = GET_FIELD(mem->dword0,
3144 						    DBG_DUMP_MEM_MEM_GROUP_ID);
3145 			bool is_storm = false, mem_wide_bus;
3146 			enum dbg_grc_params grc_param;
3147 			char storm_letter = 'a';
3148 			enum block_id block_id;
3149 			u32 mem_addr, mem_len;
3150 
3151 			if (mem_group_id >= MEM_GROUPS_NUM) {
3152 				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
3153 				return 0;
3154 			}
3155 
3156 			block_id = (enum block_id)cond_hdr->block_id;
3157 			if (!qed_grc_is_mem_included(p_hwfn,
3158 						     block_id,
3159 						     mem_group_id))
3160 				continue;
3161 
3162 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3163 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3164 			mem_wide_bus = GET_FIELD(mem->dword1,
3165 						 DBG_DUMP_MEM_WIDE_BUS);
3166 
3167 			/* Update memory length for CCFC/TCFC memories
3168 			 * according to number of LCIDs/LTIDs.
3169 			 */
3170 			if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3171 				if (mem_len % MAX_LCIDS) {
3172 					DP_NOTICE(p_hwfn,
3173 						  "Invalid CCFC connection memory size\n");
3174 					return 0;
3175 				}
3176 
3177 				grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3178 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3179 					  (mem_len / MAX_LCIDS);
3180 			} else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3181 				if (mem_len % MAX_LTIDS) {
3182 					DP_NOTICE(p_hwfn,
3183 						  "Invalid TCFC task memory size\n");
3184 					return 0;
3185 				}
3186 
3187 				grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3188 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3189 					  (mem_len / MAX_LTIDS);
3190 			}
3191 
3192 			/* If memory is associated with Storm, update Storm
3193 			 * details.
3194 			 */
3195 			if (s_block_defs
3196 			    [cond_hdr->block_id]->associated_to_storm) {
3197 				is_storm = true;
3198 				storm_letter =
3199 				    s_storm_defs[s_block_defs
3200 						 [cond_hdr->block_id]->
3201 						 storm_id].letter;
3202 			}
3203 
3204 			/* Dump memory */
3205 			offset += qed_grc_dump_mem(p_hwfn,
3206 						p_ptt,
3207 						dump_buf + offset,
3208 						dump,
3209 						NULL,
3210 						mem_addr,
3211 						mem_len,
3212 						mem_wide_bus,
3213 						0,
3214 						false,
3215 						s_mem_group_names[mem_group_id],
3216 						is_storm,
3217 						storm_letter);
3218 		}
3219 	}
3220 
3221 	return offset;
3222 }
3223 
3224 /* Dumps GRC memories according to the input array dump_mem.
3225  * Returns the dumped size in dwords.
3226  */
3227 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
3228 				 struct qed_ptt *p_ptt,
3229 				 u32 *dump_buf, bool dump)
3230 {
3231 	u32 offset = 0, input_offset = 0;
3232 
3233 	while (input_offset <
3234 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3235 		const struct dbg_dump_split_hdr *split_hdr;
3236 		struct dbg_array curr_input_mems_arr;
3237 		u32 split_data_size;
3238 		u8 split_type_id;
3239 
3240 		split_hdr = (const struct dbg_dump_split_hdr *)
3241 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3242 		split_type_id =
3243 			GET_FIELD(split_hdr->hdr,
3244 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3245 		split_data_size =
3246 			GET_FIELD(split_hdr->hdr,
3247 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3248 		curr_input_mems_arr.ptr =
3249 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3250 		curr_input_mems_arr.size_in_dwords = split_data_size;
3251 
3252 		switch (split_type_id) {
3253 		case SPLIT_TYPE_NONE:
3254 			offset += qed_grc_dump_mem_entries(p_hwfn,
3255 							   p_ptt,
3256 							   curr_input_mems_arr,
3257 							   dump_buf + offset,
3258 							   dump);
3259 			break;
3260 
3261 		default:
3262 			DP_NOTICE(p_hwfn,
3263 				  "Dumping split memories is currently not supported\n");
3264 			break;
3265 		}
3266 
3267 		input_offset += split_data_size;
3268 	}
3269 
3270 	return offset;
3271 }
3272 
3273 /* Dumps GRC context data for the specified Storm.
3274  * Returns the dumped size in dwords.
3275  * The lid_size argument is specified in quad-regs.
3276  */
3277 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
3278 				 struct qed_ptt *p_ptt,
3279 				 u32 *dump_buf,
3280 				 bool dump,
3281 				 const char *name,
3282 				 u32 num_lids,
3283 				 u32 lid_size,
3284 				 u32 rd_reg_addr,
3285 				 u8 storm_id)
3286 {
3287 	struct storm_defs *storm = &s_storm_defs[storm_id];
3288 	u32 i, lid, total_size, offset = 0;
3289 
3290 	if (!lid_size)
3291 		return 0;
3292 
3293 	lid_size *= BYTES_IN_DWORD;
3294 	total_size = num_lids * lid_size;
3295 
3296 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3297 				       dump_buf + offset,
3298 				       dump,
3299 				       name,
3300 				       0,
3301 				       total_size,
3302 				       lid_size * 32,
3303 				       false, name, true, storm->letter);
3304 
3305 	if (!dump)
3306 		return offset + total_size;
3307 
3308 	/* Dump context data */
3309 	for (lid = 0; lid < num_lids; lid++) {
3310 		for (i = 0; i < lid_size; i++, offset++) {
3311 			qed_wr(p_hwfn,
3312 			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3313 			*(dump_buf + offset) = qed_rd(p_hwfn,
3314 						      p_ptt, rd_reg_addr);
3315 		}
3316 	}
3317 
3318 	return offset;
3319 }
3320 
3321 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3322 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
3323 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3324 {
3325 	enum dbg_grc_params grc_param;
3326 	u32 offset = 0;
3327 	u8 storm_id;
3328 
3329 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3330 		struct storm_defs *storm = &s_storm_defs[storm_id];
3331 
3332 		if (!qed_grc_is_storm_included(p_hwfn,
3333 					       (enum dbg_storms)storm_id))
3334 			continue;
3335 
3336 		/* Dump Conn AG context size */
3337 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3338 		offset +=
3339 			qed_grc_dump_ctx_data(p_hwfn,
3340 					      p_ptt,
3341 					      dump_buf + offset,
3342 					      dump,
3343 					      "CONN_AG_CTX",
3344 					      qed_grc_get_param(p_hwfn,
3345 								grc_param),
3346 					      storm->cm_conn_ag_ctx_lid_size,
3347 					      storm->cm_conn_ag_ctx_rd_addr,
3348 					      storm_id);
3349 
3350 		/* Dump Conn ST context size */
3351 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3352 		offset +=
3353 			qed_grc_dump_ctx_data(p_hwfn,
3354 					      p_ptt,
3355 					      dump_buf + offset,
3356 					      dump,
3357 					      "CONN_ST_CTX",
3358 					      qed_grc_get_param(p_hwfn,
3359 								grc_param),
3360 					      storm->cm_conn_st_ctx_lid_size,
3361 					      storm->cm_conn_st_ctx_rd_addr,
3362 					      storm_id);
3363 
3364 		/* Dump Task AG context size */
3365 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3366 		offset +=
3367 			qed_grc_dump_ctx_data(p_hwfn,
3368 					      p_ptt,
3369 					      dump_buf + offset,
3370 					      dump,
3371 					      "TASK_AG_CTX",
3372 					      qed_grc_get_param(p_hwfn,
3373 								grc_param),
3374 					      storm->cm_task_ag_ctx_lid_size,
3375 					      storm->cm_task_ag_ctx_rd_addr,
3376 					      storm_id);
3377 
3378 		/* Dump Task ST context size */
3379 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3380 		offset +=
3381 			qed_grc_dump_ctx_data(p_hwfn,
3382 					      p_ptt,
3383 					      dump_buf + offset,
3384 					      dump,
3385 					      "TASK_ST_CTX",
3386 					      qed_grc_get_param(p_hwfn,
3387 								grc_param),
3388 					      storm->cm_task_st_ctx_lid_size,
3389 					      storm->cm_task_st_ctx_rd_addr,
3390 					      storm_id);
3391 	}
3392 
3393 	return offset;
3394 }
3395 
3396 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3397 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3398 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3399 {
3400 	char buf[10] = "IOR_SET_?";
3401 	u32 addr, offset = 0;
3402 	u8 storm_id, set_id;
3403 
3404 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3405 		struct storm_defs *storm = &s_storm_defs[storm_id];
3406 
3407 		if (!qed_grc_is_storm_included(p_hwfn,
3408 					       (enum dbg_storms)storm_id))
3409 			continue;
3410 
3411 		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3412 			addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
3413 					       SEM_FAST_REG_STORM_REG_FILE) +
3414 			       IOR_SET_OFFSET(set_id);
3415 			buf[strlen(buf) - 1] = '0' + set_id;
3416 			offset += qed_grc_dump_mem(p_hwfn,
3417 						   p_ptt,
3418 						   dump_buf + offset,
3419 						   dump,
3420 						   buf,
3421 						   addr,
3422 						   IORS_PER_SET,
3423 						   false,
3424 						   32,
3425 						   false,
3426 						   "ior",
3427 						   true,
3428 						   storm->letter);
3429 		}
3430 	}
3431 
3432 	return offset;
3433 }
3434 
3435 /* Dump VFC CAM. Returns the dumped size in dwords. */
3436 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3437 				struct qed_ptt *p_ptt,
3438 				u32 *dump_buf, bool dump, u8 storm_id)
3439 {
3440 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3441 	struct storm_defs *storm = &s_storm_defs[storm_id];
3442 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3443 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3444 	u32 row, i, offset = 0;
3445 
3446 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3447 				       dump_buf + offset,
3448 				       dump,
3449 				       "vfc_cam",
3450 				       0,
3451 				       total_size,
3452 				       256,
3453 				       false, "vfc_cam", true, storm->letter);
3454 
3455 	if (!dump)
3456 		return offset + total_size;
3457 
3458 	/* Prepare CAM address */
3459 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3460 
3461 	for (row = 0; row < VFC_CAM_NUM_ROWS;
3462 	     row++, offset += VFC_CAM_RESP_DWORDS) {
3463 		/* Write VFC CAM command */
3464 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3465 		ARR_REG_WR(p_hwfn,
3466 			   p_ptt,
3467 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3468 			   cam_cmd, VFC_CAM_CMD_DWORDS);
3469 
3470 		/* Write VFC CAM address */
3471 		ARR_REG_WR(p_hwfn,
3472 			   p_ptt,
3473 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3474 			   cam_addr, VFC_CAM_ADDR_DWORDS);
3475 
3476 		/* Read VFC CAM read response */
3477 		ARR_REG_RD(p_hwfn,
3478 			   p_ptt,
3479 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3480 			   dump_buf + offset, VFC_CAM_RESP_DWORDS);
3481 	}
3482 
3483 	return offset;
3484 }
3485 
3486 /* Dump VFC RAM. Returns the dumped size in dwords. */
3487 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3488 				struct qed_ptt *p_ptt,
3489 				u32 *dump_buf,
3490 				bool dump,
3491 				u8 storm_id, struct vfc_ram_defs *ram_defs)
3492 {
3493 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3494 	struct storm_defs *storm = &s_storm_defs[storm_id];
3495 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3496 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3497 	u32 row, i, offset = 0;
3498 
3499 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3500 				       dump_buf + offset,
3501 				       dump,
3502 				       ram_defs->mem_name,
3503 				       0,
3504 				       total_size,
3505 				       256,
3506 				       false,
3507 				       ram_defs->type_name,
3508 				       true, storm->letter);
3509 
3510 	/* Prepare RAM address */
3511 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3512 
3513 	if (!dump)
3514 		return offset + total_size;
3515 
3516 	for (row = ram_defs->base_row;
3517 	     row < ram_defs->base_row + ram_defs->num_rows;
3518 	     row++, offset += VFC_RAM_RESP_DWORDS) {
3519 		/* Write VFC RAM command */
3520 		ARR_REG_WR(p_hwfn,
3521 			   p_ptt,
3522 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3523 			   ram_cmd, VFC_RAM_CMD_DWORDS);
3524 
3525 		/* Write VFC RAM address */
3526 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3527 		ARR_REG_WR(p_hwfn,
3528 			   p_ptt,
3529 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3530 			   ram_addr, VFC_RAM_ADDR_DWORDS);
3531 
3532 		/* Read VFC RAM read response */
3533 		ARR_REG_RD(p_hwfn,
3534 			   p_ptt,
3535 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3536 			   dump_buf + offset, VFC_RAM_RESP_DWORDS);
3537 	}
3538 
3539 	return offset;
3540 }
3541 
3542 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3543 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3544 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3545 {
3546 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3547 	u8 storm_id, i;
3548 	u32 offset = 0;
3549 
3550 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3551 		if (!qed_grc_is_storm_included(p_hwfn,
3552 					       (enum dbg_storms)storm_id) ||
3553 		    !s_storm_defs[storm_id].has_vfc ||
3554 		    (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
3555 		     PLATFORM_ASIC))
3556 			continue;
3557 
3558 		/* Read CAM */
3559 		offset += qed_grc_dump_vfc_cam(p_hwfn,
3560 					       p_ptt,
3561 					       dump_buf + offset,
3562 					       dump, storm_id);
3563 
3564 		/* Read RAM */
3565 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3566 			offset += qed_grc_dump_vfc_ram(p_hwfn,
3567 						       p_ptt,
3568 						       dump_buf + offset,
3569 						       dump,
3570 						       storm_id,
3571 						       &s_vfc_ram_defs[i]);
3572 	}
3573 
3574 	return offset;
3575 }
3576 
3577 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3578 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3579 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3580 {
3581 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3582 	u32 offset = 0;
3583 	u8 rss_mem_id;
3584 
3585 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3586 		u32 rss_addr, num_entries, total_dwords;
3587 		struct rss_mem_defs *rss_defs;
3588 		u32 addr, num_dwords_to_read;
3589 		bool packed;
3590 
3591 		rss_defs = &s_rss_mem_defs[rss_mem_id];
3592 		rss_addr = rss_defs->addr;
3593 		num_entries = rss_defs->num_entries[dev_data->chip_id];
3594 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3595 		packed = (rss_defs->entry_width == 16);
3596 
3597 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3598 					       dump_buf + offset,
3599 					       dump,
3600 					       rss_defs->mem_name,
3601 					       0,
3602 					       total_dwords,
3603 					       rss_defs->entry_width,
3604 					       packed,
3605 					       rss_defs->type_name, false, 0);
3606 
3607 		/* Dump RSS data */
3608 		if (!dump) {
3609 			offset += total_dwords;
3610 			continue;
3611 		}
3612 
3613 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3614 		while (total_dwords) {
3615 			num_dwords_to_read = min_t(u32,
3616 						   RSS_REG_RSS_RAM_DATA_SIZE,
3617 						   total_dwords);
3618 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3619 			offset += qed_grc_dump_addr_range(p_hwfn,
3620 							  p_ptt,
3621 							  dump_buf + offset,
3622 							  dump,
3623 							  addr,
3624 							  num_dwords_to_read,
3625 							  false);
3626 			total_dwords -= num_dwords_to_read;
3627 			rss_addr++;
3628 		}
3629 	}
3630 
3631 	return offset;
3632 }
3633 
3634 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3635 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3636 				struct qed_ptt *p_ptt,
3637 				u32 *dump_buf, bool dump, u8 big_ram_id)
3638 {
3639 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3640 	u32 block_size, ram_size, offset = 0, reg_val, i;
3641 	char mem_name[12] = "???_BIG_RAM";
3642 	char type_name[8] = "???_RAM";
3643 	struct big_ram_defs *big_ram;
3644 
3645 	big_ram = &s_big_ram_defs[big_ram_id];
3646 	ram_size = big_ram->ram_size[dev_data->chip_id];
3647 
3648 	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3649 	block_size = reg_val &
3650 		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3651 									 : 128;
3652 
3653 	strscpy(type_name, big_ram->instance_name, sizeof(type_name));
3654 	strscpy(mem_name, big_ram->instance_name, sizeof(mem_name));
3655 
3656 	/* Dump memory header */
3657 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3658 				       dump_buf + offset,
3659 				       dump,
3660 				       mem_name,
3661 				       0,
3662 				       ram_size,
3663 				       block_size * 8,
3664 				       false, type_name, false, 0);
3665 
3666 	/* Read and dump Big RAM data */
3667 	if (!dump)
3668 		return offset + ram_size;
3669 
3670 	/* Dump Big RAM */
3671 	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3672 	     i++) {
3673 		u32 addr, len;
3674 
3675 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3676 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3677 		len = BRB_REG_BIG_RAM_DATA_SIZE;
3678 		offset += qed_grc_dump_addr_range(p_hwfn,
3679 						  p_ptt,
3680 						  dump_buf + offset,
3681 						  dump,
3682 						  addr,
3683 						  len,
3684 						  false);
3685 	}
3686 
3687 	return offset;
3688 }
3689 
3690 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3691 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3692 {
3693 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3694 	u32 offset = 0, addr;
3695 	bool halted = false;
3696 
3697 	/* Halt MCP */
3698 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3699 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3700 		if (!halted)
3701 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3702 	}
3703 
3704 	/* Dump MCP scratchpad */
3705 	offset += qed_grc_dump_mem(p_hwfn,
3706 				   p_ptt,
3707 				   dump_buf + offset,
3708 				   dump,
3709 				   NULL,
3710 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3711 				   MCP_REG_SCRATCH_SIZE_BB_K2,
3712 				   false, 0, false, "MCP", false, 0);
3713 
3714 	/* Dump MCP cpu_reg_file */
3715 	offset += qed_grc_dump_mem(p_hwfn,
3716 				   p_ptt,
3717 				   dump_buf + offset,
3718 				   dump,
3719 				   NULL,
3720 				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3721 				   MCP_REG_CPU_REG_FILE_SIZE,
3722 				   false, 0, false, "MCP", false, 0);
3723 
3724 	/* Dump MCP registers */
3725 	block_enable[BLOCK_MCP] = true;
3726 	offset += qed_grc_dump_registers(p_hwfn,
3727 					 p_ptt,
3728 					 dump_buf + offset,
3729 					 dump, block_enable, "block", "MCP");
3730 
3731 	/* Dump required non-MCP registers */
3732 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3733 					dump, 1, "eng", -1, "block", "MCP");
3734 	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3735 	offset += qed_grc_dump_reg_entry(p_hwfn,
3736 					 p_ptt,
3737 					 dump_buf + offset,
3738 					 dump,
3739 					 addr,
3740 					 1,
3741 					 false);
3742 
3743 	/* Release MCP */
3744 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3745 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3746 
3747 	return offset;
3748 }
3749 
3750 /* Dumps the tbus indirect memory for all PHYs. */
3751 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3752 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3753 {
3754 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3755 	char mem_name[32];
3756 	u8 phy_id;
3757 
3758 	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3759 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3760 		struct phy_defs *phy_defs;
3761 		u8 *bytes_buf;
3762 
3763 		phy_defs = &s_phy_defs[phy_id];
3764 		addr_lo_addr = phy_defs->base_addr +
3765 			       phy_defs->tbus_addr_lo_addr;
3766 		addr_hi_addr = phy_defs->base_addr +
3767 			       phy_defs->tbus_addr_hi_addr;
3768 		data_lo_addr = phy_defs->base_addr +
3769 			       phy_defs->tbus_data_lo_addr;
3770 		data_hi_addr = phy_defs->base_addr +
3771 			       phy_defs->tbus_data_hi_addr;
3772 
3773 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3774 			     phy_defs->phy_name) < 0)
3775 			DP_NOTICE(p_hwfn,
3776 				  "Unexpected debug error: invalid PHY memory name\n");
3777 
3778 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3779 					       dump_buf + offset,
3780 					       dump,
3781 					       mem_name,
3782 					       0,
3783 					       PHY_DUMP_SIZE_DWORDS,
3784 					       16, true, mem_name, false, 0);
3785 
3786 		if (!dump) {
3787 			offset += PHY_DUMP_SIZE_DWORDS;
3788 			continue;
3789 		}
3790 
3791 		bytes_buf = (u8 *)(dump_buf + offset);
3792 		for (tbus_hi_offset = 0;
3793 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3794 		     tbus_hi_offset++) {
3795 			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3796 			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3797 			     tbus_lo_offset++) {
3798 				qed_wr(p_hwfn,
3799 				       p_ptt, addr_lo_addr, tbus_lo_offset);
3800 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3801 							    p_ptt,
3802 							    data_lo_addr);
3803 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3804 							    p_ptt,
3805 							    data_hi_addr);
3806 			}
3807 		}
3808 
3809 		offset += PHY_DUMP_SIZE_DWORDS;
3810 	}
3811 
3812 	return offset;
3813 }
3814 
3815 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3816 				struct qed_ptt *p_ptt,
3817 				enum block_id block_id,
3818 				u8 line_id,
3819 				u8 enable_mask,
3820 				u8 right_shift,
3821 				u8 force_valid_mask, u8 force_frame_mask)
3822 {
3823 	struct block_defs *block = s_block_defs[block_id];
3824 
3825 	qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3826 	qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3827 	qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3828 	qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3829 	qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3830 }
3831 
3832 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3833 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3834 				     struct qed_ptt *p_ptt,
3835 				     u32 *dump_buf, bool dump)
3836 {
3837 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3838 	u32 block_id, line_id, offset = 0;
3839 
3840 	/* Don't dump static debug if a debug bus recording is in progress */
3841 	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3842 		return 0;
3843 
3844 	if (dump) {
3845 		/* Disable all blocks debug output */
3846 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3847 			struct block_defs *block = s_block_defs[block_id];
3848 
3849 			if (block->dbg_client_id[dev_data->chip_id] !=
3850 			    MAX_DBG_BUS_CLIENTS)
3851 				qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
3852 				       0);
3853 		}
3854 
3855 		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3856 		qed_bus_set_framing_mode(p_hwfn,
3857 					 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3858 		qed_wr(p_hwfn,
3859 		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3860 		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3861 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3862 	}
3863 
3864 	/* Dump all static debug lines for each relevant block */
3865 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3866 		struct block_defs *block = s_block_defs[block_id];
3867 		struct dbg_bus_block *block_desc;
3868 		u32 block_dwords, addr, len;
3869 		u8 dbg_client_id;
3870 
3871 		if (block->dbg_client_id[dev_data->chip_id] ==
3872 		    MAX_DBG_BUS_CLIENTS)
3873 			continue;
3874 
3875 		block_desc = get_dbg_bus_block_desc(p_hwfn,
3876 						    (enum block_id)block_id);
3877 		block_dwords = NUM_DBG_LINES(block_desc) *
3878 			       STATIC_DEBUG_LINE_DWORDS;
3879 
3880 		/* Dump static section params */
3881 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3882 					       dump_buf + offset,
3883 					       dump,
3884 					       block->name,
3885 					       0,
3886 					       block_dwords,
3887 					       32, false, "STATIC", false, 0);
3888 
3889 		if (!dump) {
3890 			offset += block_dwords;
3891 			continue;
3892 		}
3893 
3894 		/* If all lines are invalid - dump zeros */
3895 		if (dev_data->block_in_reset[block_id]) {
3896 			memset(dump_buf + offset, 0,
3897 			       DWORDS_TO_BYTES(block_dwords));
3898 			offset += block_dwords;
3899 			continue;
3900 		}
3901 
3902 		/* Enable block's client */
3903 		dbg_client_id = block->dbg_client_id[dev_data->chip_id];
3904 		qed_bus_enable_clients(p_hwfn,
3905 				       p_ptt,
3906 				       BIT(dbg_client_id));
3907 
3908 		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3909 		len = STATIC_DEBUG_LINE_DWORDS;
3910 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
3911 		     line_id++) {
3912 			/* Configure debug line ID */
3913 			qed_config_dbg_line(p_hwfn,
3914 					    p_ptt,
3915 					    (enum block_id)block_id,
3916 					    (u8)line_id, 0xf, 0, 0, 0);
3917 
3918 			/* Read debug line info */
3919 			offset += qed_grc_dump_addr_range(p_hwfn,
3920 							  p_ptt,
3921 							  dump_buf + offset,
3922 							  dump,
3923 							  addr,
3924 							  len,
3925 							  true);
3926 		}
3927 
3928 		/* Disable block's client and debug output */
3929 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3930 		qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3931 	}
3932 
3933 	if (dump) {
3934 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3935 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3936 	}
3937 
3938 	return offset;
3939 }
3940 
3941 /* Performs GRC Dump to the specified buffer.
3942  * Returns the dumped size in dwords.
3943  */
3944 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3945 				    struct qed_ptt *p_ptt,
3946 				    u32 *dump_buf,
3947 				    bool dump, u32 *num_dumped_dwords)
3948 {
3949 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3950 	bool parities_masked = false;
3951 	u8 i, port_mode = 0;
3952 	u32 offset = 0;
3953 
3954 	*num_dumped_dwords = 0;
3955 
3956 	if (dump) {
3957 		/* Find port mode */
3958 		switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
3959 		case 0:
3960 			port_mode = 1;
3961 			break;
3962 		case 1:
3963 			port_mode = 2;
3964 			break;
3965 		case 2:
3966 			port_mode = 4;
3967 			break;
3968 		}
3969 
3970 		/* Update reset state */
3971 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3972 	}
3973 
3974 	/* Dump global params */
3975 	offset += qed_dump_common_global_params(p_hwfn,
3976 						p_ptt,
3977 						dump_buf + offset, dump, 4);
3978 	offset += qed_dump_str_param(dump_buf + offset,
3979 				     dump, "dump-type", "grc-dump");
3980 	offset += qed_dump_num_param(dump_buf + offset,
3981 				     dump,
3982 				     "num-lcids",
3983 				     qed_grc_get_param(p_hwfn,
3984 						DBG_GRC_PARAM_NUM_LCIDS));
3985 	offset += qed_dump_num_param(dump_buf + offset,
3986 				     dump,
3987 				     "num-ltids",
3988 				     qed_grc_get_param(p_hwfn,
3989 						DBG_GRC_PARAM_NUM_LTIDS));
3990 	offset += qed_dump_num_param(dump_buf + offset,
3991 				     dump, "num-ports", port_mode);
3992 
3993 	/* Dump reset registers (dumped before taking blocks out of reset ) */
3994 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3995 		offset += qed_grc_dump_reset_regs(p_hwfn,
3996 						  p_ptt,
3997 						  dump_buf + offset, dump);
3998 
3999 	/* Take all blocks out of reset (using reset registers) */
4000 	if (dump) {
4001 		qed_grc_unreset_blocks(p_hwfn, p_ptt);
4002 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
4003 	}
4004 
4005 	/* Disable all parities using MFW command */
4006 	if (dump &&
4007 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4008 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
4009 		if (!parities_masked) {
4010 			DP_NOTICE(p_hwfn,
4011 				  "Failed to mask parities using MFW\n");
4012 			if (qed_grc_get_param
4013 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4014 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4015 		}
4016 	}
4017 
4018 	/* Dump modified registers (dumped before modifying them) */
4019 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4020 		offset += qed_grc_dump_modified_regs(p_hwfn,
4021 						     p_ptt,
4022 						     dump_buf + offset, dump);
4023 
4024 	/* Stall storms */
4025 	if (dump &&
4026 	    (qed_grc_is_included(p_hwfn,
4027 				 DBG_GRC_PARAM_DUMP_IOR) ||
4028 	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4029 		qed_grc_stall_storms(p_hwfn, p_ptt, true);
4030 
4031 	/* Dump all regs  */
4032 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4033 		bool block_enable[MAX_BLOCK_ID];
4034 
4035 		/* Dump all blocks except MCP */
4036 		for (i = 0; i < MAX_BLOCK_ID; i++)
4037 			block_enable[i] = true;
4038 		block_enable[BLOCK_MCP] = false;
4039 		offset += qed_grc_dump_registers(p_hwfn,
4040 						 p_ptt,
4041 						 dump_buf +
4042 						 offset,
4043 						 dump,
4044 						 block_enable, NULL, NULL);
4045 
4046 		/* Dump special registers */
4047 		offset += qed_grc_dump_special_regs(p_hwfn,
4048 						    p_ptt,
4049 						    dump_buf + offset, dump);
4050 	}
4051 
4052 	/* Dump memories */
4053 	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4054 
4055 	/* Dump MCP */
4056 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4057 		offset += qed_grc_dump_mcp(p_hwfn,
4058 					   p_ptt, dump_buf + offset, dump);
4059 
4060 	/* Dump context */
4061 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4062 		offset += qed_grc_dump_ctx(p_hwfn,
4063 					   p_ptt, dump_buf + offset, dump);
4064 
4065 	/* Dump RSS memories */
4066 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4067 		offset += qed_grc_dump_rss(p_hwfn,
4068 					   p_ptt, dump_buf + offset, dump);
4069 
4070 	/* Dump Big RAM */
4071 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4072 		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4073 			offset += qed_grc_dump_big_ram(p_hwfn,
4074 						       p_ptt,
4075 						       dump_buf + offset,
4076 						       dump, i);
4077 
4078 	/* Dump IORs */
4079 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4080 		offset += qed_grc_dump_iors(p_hwfn,
4081 					    p_ptt, dump_buf + offset, dump);
4082 
4083 	/* Dump VFC */
4084 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4085 		offset += qed_grc_dump_vfc(p_hwfn,
4086 					   p_ptt, dump_buf + offset, dump);
4087 
4088 	/* Dump PHY tbus */
4089 	if (qed_grc_is_included(p_hwfn,
4090 				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
4091 	    CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4092 		offset += qed_grc_dump_phy(p_hwfn,
4093 					   p_ptt, dump_buf + offset, dump);
4094 
4095 	/* Dump static debug data  */
4096 	if (qed_grc_is_included(p_hwfn,
4097 				DBG_GRC_PARAM_DUMP_STATIC) &&
4098 	    dev_data->bus.state == DBG_BUS_STATE_IDLE)
4099 		offset += qed_grc_dump_static_debug(p_hwfn,
4100 						    p_ptt,
4101 						    dump_buf + offset, dump);
4102 
4103 	/* Dump last section */
4104 	offset += qed_dump_last_section(dump_buf, offset, dump);
4105 
4106 	if (dump) {
4107 		/* Unstall storms */
4108 		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4109 			qed_grc_stall_storms(p_hwfn, p_ptt, false);
4110 
4111 		/* Clear parity status */
4112 		qed_grc_clear_all_prty(p_hwfn, p_ptt);
4113 
4114 		/* Enable all parities using MFW command */
4115 		if (parities_masked)
4116 			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
4117 	}
4118 
4119 	*num_dumped_dwords = offset;
4120 
4121 	return DBG_STATUS_OK;
4122 }
4123 
4124 /* Writes the specified failing Idle Check rule to the specified buffer.
4125  * Returns the dumped size in dwords.
4126  */
4127 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
4128 				     struct qed_ptt *p_ptt,
4129 				     u32 *
4130 				     dump_buf,
4131 				     bool dump,
4132 				     u16 rule_id,
4133 				     const struct dbg_idle_chk_rule *rule,
4134 				     u16 fail_entry_id, u32 *cond_reg_values)
4135 {
4136 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4137 	const struct dbg_idle_chk_cond_reg *cond_regs;
4138 	const struct dbg_idle_chk_info_reg *info_regs;
4139 	u32 i, next_reg_offset = 0, offset = 0;
4140 	struct dbg_idle_chk_result_hdr *hdr;
4141 	const union dbg_idle_chk_reg *regs;
4142 	u8 reg_id;
4143 
4144 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4145 	regs = &((const union dbg_idle_chk_reg *)
4146 		 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4147 	cond_regs = &regs[0].cond_reg;
4148 	info_regs = &regs[rule->num_cond_regs].info_reg;
4149 
4150 	/* Dump rule data */
4151 	if (dump) {
4152 		memset(hdr, 0, sizeof(*hdr));
4153 		hdr->rule_id = rule_id;
4154 		hdr->mem_entry_id = fail_entry_id;
4155 		hdr->severity = rule->severity;
4156 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
4157 	}
4158 
4159 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
4160 
4161 	/* Dump condition register values */
4162 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4163 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4164 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4165 
4166 		reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4167 			  (dump_buf + offset);
4168 
4169 		/* Write register header */
4170 		if (!dump) {
4171 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
4172 			    reg->entry_size;
4173 			continue;
4174 		}
4175 
4176 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4177 		memset(reg_hdr, 0, sizeof(*reg_hdr));
4178 		reg_hdr->start_entry = reg->start_entry;
4179 		reg_hdr->size = reg->entry_size;
4180 		SET_FIELD(reg_hdr->data,
4181 			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
4182 			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4183 		SET_FIELD(reg_hdr->data,
4184 			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4185 
4186 		/* Write register values */
4187 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4188 			dump_buf[offset] = cond_reg_values[next_reg_offset];
4189 	}
4190 
4191 	/* Dump info register values */
4192 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4193 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4194 		u32 block_id;
4195 
4196 		/* Check if register's block is in reset */
4197 		if (!dump) {
4198 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4199 			continue;
4200 		}
4201 
4202 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4203 		if (block_id >= MAX_BLOCK_ID) {
4204 			DP_NOTICE(p_hwfn, "Invalid block_id\n");
4205 			return 0;
4206 		}
4207 
4208 		if (!dev_data->block_in_reset[block_id]) {
4209 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4210 			bool wide_bus, eval_mode, mode_match = true;
4211 			u16 modes_buf_offset;
4212 			u32 addr;
4213 
4214 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4215 				  (dump_buf + offset);
4216 
4217 			/* Check mode */
4218 			eval_mode = GET_FIELD(reg->mode.data,
4219 					      DBG_MODE_HDR_EVAL_MODE) > 0;
4220 			if (eval_mode) {
4221 				modes_buf_offset =
4222 				    GET_FIELD(reg->mode.data,
4223 					      DBG_MODE_HDR_MODES_BUF_OFFSET);
4224 				mode_match =
4225 					qed_is_mode_match(p_hwfn,
4226 							  &modes_buf_offset);
4227 			}
4228 
4229 			if (!mode_match)
4230 				continue;
4231 
4232 			addr = GET_FIELD(reg->data,
4233 					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
4234 			wide_bus = GET_FIELD(reg->data,
4235 					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4236 
4237 			/* Write register header */
4238 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4239 			hdr->num_dumped_info_regs++;
4240 			memset(reg_hdr, 0, sizeof(*reg_hdr));
4241 			reg_hdr->size = reg->size;
4242 			SET_FIELD(reg_hdr->data,
4243 				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
4244 				  rule->num_cond_regs + reg_id);
4245 
4246 			/* Write register values */
4247 			offset += qed_grc_dump_addr_range(p_hwfn,
4248 							  p_ptt,
4249 							  dump_buf + offset,
4250 							  dump,
4251 							  addr,
4252 							  reg->size, wide_bus);
4253 		}
4254 	}
4255 
4256 	return offset;
4257 }
4258 
4259 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4260 static u32
4261 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4262 			       u32 *dump_buf, bool dump,
4263 			       const struct dbg_idle_chk_rule *input_rules,
4264 			       u32 num_input_rules, u32 *num_failing_rules)
4265 {
4266 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4267 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4268 	u32 i, offset = 0;
4269 	u16 entry_id;
4270 	u8 reg_id;
4271 
4272 	*num_failing_rules = 0;
4273 
4274 	for (i = 0; i < num_input_rules; i++) {
4275 		const struct dbg_idle_chk_cond_reg *cond_regs;
4276 		const struct dbg_idle_chk_rule *rule;
4277 		const union dbg_idle_chk_reg *regs;
4278 		u16 num_reg_entries = 1;
4279 		bool check_rule = true;
4280 		const u32 *imm_values;
4281 
4282 		rule = &input_rules[i];
4283 		regs = &((const union dbg_idle_chk_reg *)
4284 			 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
4285 			[rule->reg_offset];
4286 		cond_regs = &regs[0].cond_reg;
4287 		imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
4288 			     [rule->imm_offset];
4289 
4290 		/* Check if all condition register blocks are out of reset, and
4291 		 * find maximal number of entries (all condition registers that
4292 		 * are memories must have the same size, which is > 1).
4293 		 */
4294 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
4295 		     reg_id++) {
4296 			u32 block_id =
4297 				GET_FIELD(cond_regs[reg_id].data,
4298 					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4299 
4300 			if (block_id >= MAX_BLOCK_ID) {
4301 				DP_NOTICE(p_hwfn, "Invalid block_id\n");
4302 				return 0;
4303 			}
4304 
4305 			check_rule = !dev_data->block_in_reset[block_id];
4306 			if (cond_regs[reg_id].num_entries > num_reg_entries)
4307 				num_reg_entries = cond_regs[reg_id].num_entries;
4308 		}
4309 
4310 		if (!check_rule && dump)
4311 			continue;
4312 
4313 		if (!dump) {
4314 			u32 entry_dump_size =
4315 				qed_idle_chk_dump_failure(p_hwfn,
4316 							  p_ptt,
4317 							  dump_buf + offset,
4318 							  false,
4319 							  rule->rule_id,
4320 							  rule,
4321 							  0,
4322 							  NULL);
4323 
4324 			offset += num_reg_entries * entry_dump_size;
4325 			(*num_failing_rules) += num_reg_entries;
4326 			continue;
4327 		}
4328 
4329 		/* Go over all register entries (number of entries is the same
4330 		 * for all condition registers).
4331 		 */
4332 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4333 			u32 next_reg_offset = 0;
4334 
4335 			/* Read current entry of all condition registers */
4336 			for (reg_id = 0; reg_id < rule->num_cond_regs;
4337 			     reg_id++) {
4338 				const struct dbg_idle_chk_cond_reg *reg =
4339 					&cond_regs[reg_id];
4340 				u32 padded_entry_size, addr;
4341 				bool wide_bus;
4342 
4343 				/* Find GRC address (if it's a memory, the
4344 				 * address of the specific entry is calculated).
4345 				 */
4346 				addr = GET_FIELD(reg->data,
4347 						 DBG_IDLE_CHK_COND_REG_ADDRESS);
4348 				wide_bus =
4349 				    GET_FIELD(reg->data,
4350 					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4351 				if (reg->num_entries > 1 ||
4352 				    reg->start_entry > 0) {
4353 					padded_entry_size =
4354 					   reg->entry_size > 1 ?
4355 					   roundup_pow_of_two(reg->entry_size) :
4356 					   1;
4357 					addr += (reg->start_entry + entry_id) *
4358 						padded_entry_size;
4359 				}
4360 
4361 				/* Read registers */
4362 				if (next_reg_offset + reg->entry_size >=
4363 				    IDLE_CHK_MAX_ENTRIES_SIZE) {
4364 					DP_NOTICE(p_hwfn,
4365 						  "idle check registers entry is too large\n");
4366 					return 0;
4367 				}
4368 
4369 				next_reg_offset +=
4370 				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4371 							    cond_reg_values +
4372 							    next_reg_offset,
4373 							    dump, addr,
4374 							    reg->entry_size,
4375 							    wide_bus);
4376 			}
4377 
4378 			/* Call rule condition function.
4379 			 * If returns true, it's a failure.
4380 			 */
4381 			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
4382 							imm_values)) {
4383 				offset += qed_idle_chk_dump_failure(p_hwfn,
4384 							p_ptt,
4385 							dump_buf + offset,
4386 							dump,
4387 							rule->rule_id,
4388 							rule,
4389 							entry_id,
4390 							cond_reg_values);
4391 				(*num_failing_rules)++;
4392 			}
4393 		}
4394 	}
4395 
4396 	return offset;
4397 }
4398 
4399 /* Performs Idle Check Dump to the specified buffer.
4400  * Returns the dumped size in dwords.
4401  */
4402 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4403 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4404 {
4405 	u32 num_failing_rules_offset, offset = 0, input_offset = 0;
4406 	u32 num_failing_rules = 0;
4407 
4408 	/* Dump global params */
4409 	offset += qed_dump_common_global_params(p_hwfn,
4410 						p_ptt,
4411 						dump_buf + offset, dump, 1);
4412 	offset += qed_dump_str_param(dump_buf + offset,
4413 				     dump, "dump-type", "idle-chk");
4414 
4415 	/* Dump idle check section header with a single parameter */
4416 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4417 	num_failing_rules_offset = offset;
4418 	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4419 
4420 	while (input_offset <
4421 	       s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4422 		const struct dbg_idle_chk_cond_hdr *cond_hdr =
4423 			(const struct dbg_idle_chk_cond_hdr *)
4424 			&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4425 			[input_offset++];
4426 		bool eval_mode, mode_match = true;
4427 		u32 curr_failing_rules;
4428 		u16 modes_buf_offset;
4429 
4430 		/* Check mode */
4431 		eval_mode = GET_FIELD(cond_hdr->mode.data,
4432 				      DBG_MODE_HDR_EVAL_MODE) > 0;
4433 		if (eval_mode) {
4434 			modes_buf_offset =
4435 				GET_FIELD(cond_hdr->mode.data,
4436 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
4437 			mode_match = qed_is_mode_match(p_hwfn,
4438 						       &modes_buf_offset);
4439 		}
4440 
4441 		if (mode_match) {
4442 			offset +=
4443 			    qed_idle_chk_dump_rule_entries(p_hwfn,
4444 				p_ptt,
4445 				dump_buf + offset,
4446 				dump,
4447 				(const struct dbg_idle_chk_rule *)
4448 				&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4449 				ptr[input_offset],
4450 				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4451 				&curr_failing_rules);
4452 			num_failing_rules += curr_failing_rules;
4453 		}
4454 
4455 		input_offset += cond_hdr->data_size;
4456 	}
4457 
4458 	/* Overwrite num_rules parameter */
4459 	if (dump)
4460 		qed_dump_num_param(dump_buf + num_failing_rules_offset,
4461 				   dump, "num_rules", num_failing_rules);
4462 
4463 	/* Dump last section */
4464 	offset += qed_dump_last_section(dump_buf, offset, dump);
4465 
4466 	return offset;
4467 }
4468 
4469 /* Finds the meta data image in NVRAM */
4470 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4471 					    struct qed_ptt *p_ptt,
4472 					    u32 image_type,
4473 					    u32 *nvram_offset_bytes,
4474 					    u32 *nvram_size_bytes)
4475 {
4476 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4477 	struct mcp_file_att file_att;
4478 	int nvm_result;
4479 
4480 	/* Call NVRAM get file command */
4481 	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4482 					p_ptt,
4483 					DRV_MSG_CODE_NVM_GET_FILE_ATT,
4484 					image_type,
4485 					&ret_mcp_resp,
4486 					&ret_mcp_param,
4487 					&ret_txn_size, (u32 *)&file_att);
4488 
4489 	/* Check response */
4490 	if (nvm_result ||
4491 	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4492 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4493 
4494 	/* Update return values */
4495 	*nvram_offset_bytes = file_att.nvm_start_addr;
4496 	*nvram_size_bytes = file_att.len;
4497 
4498 	DP_VERBOSE(p_hwfn,
4499 		   QED_MSG_DEBUG,
4500 		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4501 		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
4502 
4503 	/* Check alignment */
4504 	if (*nvram_size_bytes & 0x3)
4505 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4506 
4507 	return DBG_STATUS_OK;
4508 }
4509 
4510 /* Reads data from NVRAM */
4511 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4512 				      struct qed_ptt *p_ptt,
4513 				      u32 nvram_offset_bytes,
4514 				      u32 nvram_size_bytes, u32 *ret_buf)
4515 {
4516 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4517 	s32 bytes_left = nvram_size_bytes;
4518 	u32 read_offset = 0;
4519 
4520 	DP_VERBOSE(p_hwfn,
4521 		   QED_MSG_DEBUG,
4522 		   "nvram_read: reading image of size %d bytes from NVRAM\n",
4523 		   nvram_size_bytes);
4524 
4525 	do {
4526 		bytes_to_copy =
4527 		    (bytes_left >
4528 		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4529 
4530 		/* Call NVRAM read command */
4531 		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4532 				       DRV_MSG_CODE_NVM_READ_NVRAM,
4533 				       (nvram_offset_bytes +
4534 					read_offset) |
4535 				       (bytes_to_copy <<
4536 					DRV_MB_PARAM_NVM_LEN_OFFSET),
4537 				       &ret_mcp_resp, &ret_mcp_param,
4538 				       &ret_read_size,
4539 				       (u32 *)((u8 *)ret_buf + read_offset)))
4540 			return DBG_STATUS_NVRAM_READ_FAILED;
4541 
4542 		/* Check response */
4543 		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4544 			return DBG_STATUS_NVRAM_READ_FAILED;
4545 
4546 		/* Update read offset */
4547 		read_offset += ret_read_size;
4548 		bytes_left -= ret_read_size;
4549 	} while (bytes_left > 0);
4550 
4551 	return DBG_STATUS_OK;
4552 }
4553 
4554 /* Get info on the MCP Trace data in the scratchpad:
4555  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4556  * - trace_data_size (OUT): trace data size in bytes (without the header)
4557  */
4558 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4559 						   struct qed_ptt *p_ptt,
4560 						   u32 *trace_data_grc_addr,
4561 						   u32 *trace_data_size)
4562 {
4563 	u32 spad_trace_offsize, signature;
4564 
4565 	/* Read trace section offsize structure from MCP scratchpad */
4566 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4567 
4568 	/* Extract trace section address from offsize (in scratchpad) */
4569 	*trace_data_grc_addr =
4570 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4571 
4572 	/* Read signature from MCP trace section */
4573 	signature = qed_rd(p_hwfn, p_ptt,
4574 			   *trace_data_grc_addr +
4575 			   offsetof(struct mcp_trace, signature));
4576 
4577 	if (signature != MFW_TRACE_SIGNATURE)
4578 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4579 
4580 	/* Read trace size from MCP trace section */
4581 	*trace_data_size = qed_rd(p_hwfn,
4582 				  p_ptt,
4583 				  *trace_data_grc_addr +
4584 				  offsetof(struct mcp_trace, size));
4585 
4586 	return DBG_STATUS_OK;
4587 }
4588 
4589 /* Reads MCP trace meta data image from NVRAM
4590  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4591  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4592  *			      loaded from file).
4593  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4594  */
4595 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4596 						   struct qed_ptt *p_ptt,
4597 						   u32 trace_data_size_bytes,
4598 						   u32 *running_bundle_id,
4599 						   u32 *trace_meta_offset,
4600 						   u32 *trace_meta_size)
4601 {
4602 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4603 
4604 	/* Read MCP trace section offsize structure from MCP scratchpad */
4605 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4606 
4607 	/* Find running bundle ID */
4608 	running_mfw_addr =
4609 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4610 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4611 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4612 	if (*running_bundle_id > 1)
4613 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4614 
4615 	/* Find image in NVRAM */
4616 	nvram_image_type =
4617 	    (*running_bundle_id ==
4618 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4619 	return qed_find_nvram_image(p_hwfn,
4620 				    p_ptt,
4621 				    nvram_image_type,
4622 				    trace_meta_offset, trace_meta_size);
4623 }
4624 
4625 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4626 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4627 					       struct qed_ptt *p_ptt,
4628 					       u32 nvram_offset_in_bytes,
4629 					       u32 size_in_bytes, u32 *buf)
4630 {
4631 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4632 	enum dbg_status status;
4633 	u32 signature;
4634 
4635 	/* Read meta data from NVRAM */
4636 	status = qed_nvram_read(p_hwfn,
4637 				p_ptt,
4638 				nvram_offset_in_bytes, size_in_bytes, buf);
4639 	if (status != DBG_STATUS_OK)
4640 		return status;
4641 
4642 	/* Extract and check first signature */
4643 	signature = qed_read_unaligned_dword(byte_buf);
4644 	byte_buf += sizeof(signature);
4645 	if (signature != NVM_MAGIC_VALUE)
4646 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4647 
4648 	/* Extract number of modules */
4649 	modules_num = *(byte_buf++);
4650 
4651 	/* Skip all modules */
4652 	for (i = 0; i < modules_num; i++) {
4653 		module_len = *(byte_buf++);
4654 		byte_buf += module_len;
4655 	}
4656 
4657 	/* Extract and check second signature */
4658 	signature = qed_read_unaligned_dword(byte_buf);
4659 	byte_buf += sizeof(signature);
4660 	if (signature != NVM_MAGIC_VALUE)
4661 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4662 
4663 	return DBG_STATUS_OK;
4664 }
4665 
4666 /* Dump MCP Trace */
4667 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4668 					  struct qed_ptt *p_ptt,
4669 					  u32 *dump_buf,
4670 					  bool dump, u32 *num_dumped_dwords)
4671 {
4672 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4673 	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4674 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4675 	enum dbg_status status;
4676 	bool mcp_access;
4677 	int halted = 0;
4678 
4679 	*num_dumped_dwords = 0;
4680 
4681 	mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4682 
4683 	/* Get trace data info */
4684 	status = qed_mcp_trace_get_data_info(p_hwfn,
4685 					     p_ptt,
4686 					     &trace_data_grc_addr,
4687 					     &trace_data_size_bytes);
4688 	if (status != DBG_STATUS_OK)
4689 		return status;
4690 
4691 	/* Dump global params */
4692 	offset += qed_dump_common_global_params(p_hwfn,
4693 						p_ptt,
4694 						dump_buf + offset, dump, 1);
4695 	offset += qed_dump_str_param(dump_buf + offset,
4696 				     dump, "dump-type", "mcp-trace");
4697 
4698 	/* Halt MCP while reading from scratchpad so the read data will be
4699 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4700 	 * risk that it may be corrupt.
4701 	 */
4702 	if (dump && mcp_access) {
4703 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4704 		if (!halted)
4705 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4706 	}
4707 
4708 	/* Find trace data size */
4709 	trace_data_size_dwords =
4710 	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4711 			 BYTES_IN_DWORD);
4712 
4713 	/* Dump trace data section header and param */
4714 	offset += qed_dump_section_hdr(dump_buf + offset,
4715 				       dump, "mcp_trace_data", 1);
4716 	offset += qed_dump_num_param(dump_buf + offset,
4717 				     dump, "size", trace_data_size_dwords);
4718 
4719 	/* Read trace data from scratchpad into dump buffer */
4720 	offset += qed_grc_dump_addr_range(p_hwfn,
4721 					  p_ptt,
4722 					  dump_buf + offset,
4723 					  dump,
4724 					  BYTES_TO_DWORDS(trace_data_grc_addr),
4725 					  trace_data_size_dwords, false);
4726 
4727 	/* Resume MCP (only if halt succeeded) */
4728 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4729 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4730 
4731 	/* Dump trace meta section header */
4732 	offset += qed_dump_section_hdr(dump_buf + offset,
4733 				       dump, "mcp_trace_meta", 1);
4734 
4735 	/* If MCP Trace meta size parameter was set, use it.
4736 	 * Otherwise, read trace meta.
4737 	 * trace_meta_size_bytes is dword-aligned.
4738 	 */
4739 	trace_meta_size_bytes =
4740 		qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4741 	if ((!trace_meta_size_bytes || dump) && mcp_access) {
4742 		status = qed_mcp_trace_get_meta_info(p_hwfn,
4743 						     p_ptt,
4744 						     trace_data_size_bytes,
4745 						     &running_bundle_id,
4746 						     &trace_meta_offset_bytes,
4747 						     &trace_meta_size_bytes);
4748 		if (status == DBG_STATUS_OK)
4749 			trace_meta_size_dwords =
4750 				BYTES_TO_DWORDS(trace_meta_size_bytes);
4751 	}
4752 
4753 	/* Dump trace meta size param */
4754 	offset += qed_dump_num_param(dump_buf + offset,
4755 				     dump, "size", trace_meta_size_dwords);
4756 
4757 	/* Read trace meta image into dump buffer */
4758 	if (dump && trace_meta_size_dwords)
4759 		status = qed_mcp_trace_read_meta(p_hwfn,
4760 						 p_ptt,
4761 						 trace_meta_offset_bytes,
4762 						 trace_meta_size_bytes,
4763 						 dump_buf + offset);
4764 	if (status == DBG_STATUS_OK)
4765 		offset += trace_meta_size_dwords;
4766 
4767 	/* Dump last section */
4768 	offset += qed_dump_last_section(dump_buf, offset, dump);
4769 
4770 	*num_dumped_dwords = offset;
4771 
4772 	/* If no mcp access, indicate that the dump doesn't contain the meta
4773 	 * data from NVRAM.
4774 	 */
4775 	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4776 }
4777 
4778 /* Dump GRC FIFO */
4779 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4780 					 struct qed_ptt *p_ptt,
4781 					 u32 *dump_buf,
4782 					 bool dump, u32 *num_dumped_dwords)
4783 {
4784 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4785 	bool fifo_has_data;
4786 
4787 	*num_dumped_dwords = 0;
4788 
4789 	/* Dump global params */
4790 	offset += qed_dump_common_global_params(p_hwfn,
4791 						p_ptt,
4792 						dump_buf + offset, dump, 1);
4793 	offset += qed_dump_str_param(dump_buf + offset,
4794 				     dump, "dump-type", "reg-fifo");
4795 
4796 	/* Dump fifo data section header and param. The size param is 0 for
4797 	 * now, and is overwritten after reading the FIFO.
4798 	 */
4799 	offset += qed_dump_section_hdr(dump_buf + offset,
4800 				       dump, "reg_fifo_data", 1);
4801 	size_param_offset = offset;
4802 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4803 
4804 	if (!dump) {
4805 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4806 		 * test how much data is available, except for reading it.
4807 		 */
4808 		offset += REG_FIFO_DEPTH_DWORDS;
4809 		goto out;
4810 	}
4811 
4812 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4813 			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4814 
4815 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4816 	 * and must be accessed atomically. Test for dwords_read not passing
4817 	 * buffer size since more entries could be added to the buffer as we are
4818 	 * emptying it.
4819 	 */
4820 	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4821 	len = REG_FIFO_ELEMENT_DWORDS;
4822 	for (dwords_read = 0;
4823 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4824 	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4825 		offset += qed_grc_dump_addr_range(p_hwfn,
4826 						  p_ptt,
4827 						  dump_buf + offset,
4828 						  true,
4829 						  addr,
4830 						  len,
4831 						  true);
4832 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4833 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4834 	}
4835 
4836 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4837 			   dwords_read);
4838 out:
4839 	/* Dump last section */
4840 	offset += qed_dump_last_section(dump_buf, offset, dump);
4841 
4842 	*num_dumped_dwords = offset;
4843 
4844 	return DBG_STATUS_OK;
4845 }
4846 
4847 /* Dump IGU FIFO */
4848 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4849 					 struct qed_ptt *p_ptt,
4850 					 u32 *dump_buf,
4851 					 bool dump, u32 *num_dumped_dwords)
4852 {
4853 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4854 	bool fifo_has_data;
4855 
4856 	*num_dumped_dwords = 0;
4857 
4858 	/* Dump global params */
4859 	offset += qed_dump_common_global_params(p_hwfn,
4860 						p_ptt,
4861 						dump_buf + offset, dump, 1);
4862 	offset += qed_dump_str_param(dump_buf + offset,
4863 				     dump, "dump-type", "igu-fifo");
4864 
4865 	/* Dump fifo data section header and param. The size param is 0 for
4866 	 * now, and is overwritten after reading the FIFO.
4867 	 */
4868 	offset += qed_dump_section_hdr(dump_buf + offset,
4869 				       dump, "igu_fifo_data", 1);
4870 	size_param_offset = offset;
4871 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4872 
4873 	if (!dump) {
4874 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4875 		 * test how much data is available, except for reading it.
4876 		 */
4877 		offset += IGU_FIFO_DEPTH_DWORDS;
4878 		goto out;
4879 	}
4880 
4881 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4882 			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4883 
4884 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4885 	 * and must be accessed atomically. Test for dwords_read not passing
4886 	 * buffer size since more entries could be added to the buffer as we are
4887 	 * emptying it.
4888 	 */
4889 	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4890 	len = IGU_FIFO_ELEMENT_DWORDS;
4891 	for (dwords_read = 0;
4892 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4893 	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4894 		offset += qed_grc_dump_addr_range(p_hwfn,
4895 						  p_ptt,
4896 						  dump_buf + offset,
4897 						  true,
4898 						  addr,
4899 						  len,
4900 						  true);
4901 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4902 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4903 	}
4904 
4905 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4906 			   dwords_read);
4907 out:
4908 	/* Dump last section */
4909 	offset += qed_dump_last_section(dump_buf, offset, dump);
4910 
4911 	*num_dumped_dwords = offset;
4912 
4913 	return DBG_STATUS_OK;
4914 }
4915 
4916 /* Protection Override dump */
4917 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4918 						    struct qed_ptt *p_ptt,
4919 						    u32 *dump_buf,
4920 						    bool dump,
4921 						    u32 *num_dumped_dwords)
4922 {
4923 	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4924 
4925 	*num_dumped_dwords = 0;
4926 
4927 	/* Dump global params */
4928 	offset += qed_dump_common_global_params(p_hwfn,
4929 						p_ptt,
4930 						dump_buf + offset, dump, 1);
4931 	offset += qed_dump_str_param(dump_buf + offset,
4932 				     dump, "dump-type", "protection-override");
4933 
4934 	/* Dump data section header and param. The size param is 0 for now,
4935 	 * and is overwritten after reading the data.
4936 	 */
4937 	offset += qed_dump_section_hdr(dump_buf + offset,
4938 				       dump, "protection_override_data", 1);
4939 	size_param_offset = offset;
4940 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4941 
4942 	if (!dump) {
4943 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4944 		goto out;
4945 	}
4946 
4947 	/* Add override window info to buffer */
4948 	override_window_dwords =
4949 		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4950 		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4951 	addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4952 	offset += qed_grc_dump_addr_range(p_hwfn,
4953 					  p_ptt,
4954 					  dump_buf + offset,
4955 					  true,
4956 					  addr,
4957 					  override_window_dwords,
4958 					  true);
4959 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4960 			   override_window_dwords);
4961 out:
4962 	/* Dump last section */
4963 	offset += qed_dump_last_section(dump_buf, offset, dump);
4964 
4965 	*num_dumped_dwords = offset;
4966 
4967 	return DBG_STATUS_OK;
4968 }
4969 
4970 /* Performs FW Asserts Dump to the specified buffer.
4971  * Returns the dumped size in dwords.
4972  */
4973 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4974 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4975 {
4976 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4977 	struct fw_asserts_ram_section *asserts;
4978 	char storm_letter_str[2] = "?";
4979 	struct fw_info fw_info;
4980 	u32 offset = 0;
4981 	u8 storm_id;
4982 
4983 	/* Dump global params */
4984 	offset += qed_dump_common_global_params(p_hwfn,
4985 						p_ptt,
4986 						dump_buf + offset, dump, 1);
4987 	offset += qed_dump_str_param(dump_buf + offset,
4988 				     dump, "dump-type", "fw-asserts");
4989 
4990 	/* Find Storm dump size */
4991 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4992 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4993 		struct storm_defs *storm = &s_storm_defs[storm_id];
4994 		u32 last_list_idx, addr;
4995 
4996 		if (dev_data->block_in_reset[storm->block_id])
4997 			continue;
4998 
4999 		/* Read FW info for the current Storm */
5000 		qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
5001 
5002 		asserts = &fw_info.fw_asserts_section;
5003 
5004 		/* Dump FW Asserts section header and params */
5005 		storm_letter_str[0] = storm->letter;
5006 		offset += qed_dump_section_hdr(dump_buf + offset,
5007 					       dump, "fw_asserts", 2);
5008 		offset += qed_dump_str_param(dump_buf + offset,
5009 					     dump, "storm", storm_letter_str);
5010 		offset += qed_dump_num_param(dump_buf + offset,
5011 					     dump,
5012 					     "size",
5013 					     asserts->list_element_dword_size);
5014 
5015 		/* Read and dump FW Asserts data */
5016 		if (!dump) {
5017 			offset += asserts->list_element_dword_size;
5018 			continue;
5019 		}
5020 
5021 		fw_asserts_section_addr = storm->sem_fast_mem_addr +
5022 			SEM_FAST_REG_INT_RAM +
5023 			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
5024 		next_list_idx_addr = fw_asserts_section_addr +
5025 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
5026 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
5027 		last_list_idx = (next_list_idx > 0 ?
5028 				 next_list_idx :
5029 				 asserts->list_num_elements) - 1;
5030 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
5031 		       asserts->list_dword_offset +
5032 		       last_list_idx * asserts->list_element_dword_size;
5033 		offset +=
5034 		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
5035 					    dump_buf + offset,
5036 					    dump, addr,
5037 					    asserts->list_element_dword_size,
5038 					    false);
5039 	}
5040 
5041 	/* Dump last section */
5042 	offset += qed_dump_last_section(dump_buf, offset, dump);
5043 
5044 	return offset;
5045 }
5046 
5047 /***************************** Public Functions *******************************/
5048 
5049 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
5050 {
5051 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
5052 	u8 buf_id;
5053 
5054 	/* convert binary data to debug arrays */
5055 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
5056 		s_dbg_arrays[buf_id].ptr =
5057 		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
5058 		s_dbg_arrays[buf_id].size_in_dwords =
5059 		    BYTES_TO_DWORDS(buf_array[buf_id].length);
5060 	}
5061 
5062 	return DBG_STATUS_OK;
5063 }
5064 
5065 /* Assign default GRC param values */
5066 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5067 {
5068 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5069 	u32 i;
5070 
5071 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5072 		if (!s_grc_param_defs[i].is_persistent)
5073 			dev_data->grc.param_val[i] =
5074 			    s_grc_param_defs[i].default_val[dev_data->chip_id];
5075 }
5076 
5077 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5078 					      struct qed_ptt *p_ptt,
5079 					      u32 *buf_size)
5080 {
5081 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5082 
5083 	*buf_size = 0;
5084 
5085 	if (status != DBG_STATUS_OK)
5086 		return status;
5087 
5088 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5089 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5090 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5091 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5092 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5093 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5094 
5095 	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5096 }
5097 
5098 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5099 				 struct qed_ptt *p_ptt,
5100 				 u32 *dump_buf,
5101 				 u32 buf_size_in_dwords,
5102 				 u32 *num_dumped_dwords)
5103 {
5104 	u32 needed_buf_size_in_dwords;
5105 	enum dbg_status status;
5106 
5107 	*num_dumped_dwords = 0;
5108 
5109 	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5110 					       p_ptt,
5111 					       &needed_buf_size_in_dwords);
5112 	if (status != DBG_STATUS_OK)
5113 		return status;
5114 
5115 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5116 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5117 
5118 	/* GRC Dump */
5119 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5120 
5121 	/* Revert GRC params to their default */
5122 	qed_dbg_grc_set_params_default(p_hwfn);
5123 
5124 	return status;
5125 }
5126 
5127 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5128 						   struct qed_ptt *p_ptt,
5129 						   u32 *buf_size)
5130 {
5131 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5132 	struct idle_chk_data *idle_chk;
5133 	enum dbg_status status;
5134 
5135 	idle_chk = &dev_data->idle_chk;
5136 	*buf_size = 0;
5137 
5138 	status = qed_dbg_dev_init(p_hwfn, p_ptt);
5139 	if (status != DBG_STATUS_OK)
5140 		return status;
5141 
5142 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5143 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5144 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5145 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5146 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5147 
5148 	if (!idle_chk->buf_size_set) {
5149 		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5150 						       p_ptt, NULL, false);
5151 		idle_chk->buf_size_set = true;
5152 	}
5153 
5154 	*buf_size = idle_chk->buf_size;
5155 
5156 	return DBG_STATUS_OK;
5157 }
5158 
5159 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5160 				      struct qed_ptt *p_ptt,
5161 				      u32 *dump_buf,
5162 				      u32 buf_size_in_dwords,
5163 				      u32 *num_dumped_dwords)
5164 {
5165 	u32 needed_buf_size_in_dwords;
5166 	enum dbg_status status;
5167 
5168 	*num_dumped_dwords = 0;
5169 
5170 	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5171 						    p_ptt,
5172 						    &needed_buf_size_in_dwords);
5173 	if (status != DBG_STATUS_OK)
5174 		return status;
5175 
5176 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5177 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5178 
5179 	/* Update reset state */
5180 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5181 
5182 	/* Idle Check Dump */
5183 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5184 
5185 	/* Revert GRC params to their default */
5186 	qed_dbg_grc_set_params_default(p_hwfn);
5187 
5188 	return DBG_STATUS_OK;
5189 }
5190 
5191 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5192 						    struct qed_ptt *p_ptt,
5193 						    u32 *buf_size)
5194 {
5195 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5196 
5197 	*buf_size = 0;
5198 
5199 	if (status != DBG_STATUS_OK)
5200 		return status;
5201 
5202 	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5203 }
5204 
5205 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5206 				       struct qed_ptt *p_ptt,
5207 				       u32 *dump_buf,
5208 				       u32 buf_size_in_dwords,
5209 				       u32 *num_dumped_dwords)
5210 {
5211 	u32 needed_buf_size_in_dwords;
5212 	enum dbg_status status;
5213 
5214 	status =
5215 		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5216 						    p_ptt,
5217 						    &needed_buf_size_in_dwords);
5218 	if (status != DBG_STATUS_OK && status !=
5219 	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5220 		return status;
5221 
5222 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5223 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5224 
5225 	/* Update reset state */
5226 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5227 
5228 	/* Perform dump */
5229 	status = qed_mcp_trace_dump(p_hwfn,
5230 				    p_ptt, dump_buf, true, num_dumped_dwords);
5231 
5232 	/* Revert GRC params to their default */
5233 	qed_dbg_grc_set_params_default(p_hwfn);
5234 
5235 	return status;
5236 }
5237 
5238 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5239 						   struct qed_ptt *p_ptt,
5240 						   u32 *buf_size)
5241 {
5242 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5243 
5244 	*buf_size = 0;
5245 
5246 	if (status != DBG_STATUS_OK)
5247 		return status;
5248 
5249 	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5250 }
5251 
5252 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5253 				      struct qed_ptt *p_ptt,
5254 				      u32 *dump_buf,
5255 				      u32 buf_size_in_dwords,
5256 				      u32 *num_dumped_dwords)
5257 {
5258 	u32 needed_buf_size_in_dwords;
5259 	enum dbg_status status;
5260 
5261 	*num_dumped_dwords = 0;
5262 
5263 	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5264 						    p_ptt,
5265 						    &needed_buf_size_in_dwords);
5266 	if (status != DBG_STATUS_OK)
5267 		return status;
5268 
5269 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5270 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5271 
5272 	/* Update reset state */
5273 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5274 
5275 	status = qed_reg_fifo_dump(p_hwfn,
5276 				   p_ptt, dump_buf, true, num_dumped_dwords);
5277 
5278 	/* Revert GRC params to their default */
5279 	qed_dbg_grc_set_params_default(p_hwfn);
5280 
5281 	return status;
5282 }
5283 
5284 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5285 						   struct qed_ptt *p_ptt,
5286 						   u32 *buf_size)
5287 {
5288 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5289 
5290 	*buf_size = 0;
5291 
5292 	if (status != DBG_STATUS_OK)
5293 		return status;
5294 
5295 	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5296 }
5297 
5298 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5299 				      struct qed_ptt *p_ptt,
5300 				      u32 *dump_buf,
5301 				      u32 buf_size_in_dwords,
5302 				      u32 *num_dumped_dwords)
5303 {
5304 	u32 needed_buf_size_in_dwords;
5305 	enum dbg_status status;
5306 
5307 	*num_dumped_dwords = 0;
5308 
5309 	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5310 						    p_ptt,
5311 						    &needed_buf_size_in_dwords);
5312 	if (status != DBG_STATUS_OK)
5313 		return status;
5314 
5315 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5316 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5317 
5318 	/* Update reset state */
5319 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5320 
5321 	status = qed_igu_fifo_dump(p_hwfn,
5322 				   p_ptt, dump_buf, true, num_dumped_dwords);
5323 	/* Revert GRC params to their default */
5324 	qed_dbg_grc_set_params_default(p_hwfn);
5325 
5326 	return status;
5327 }
5328 
5329 enum dbg_status
5330 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5331 					      struct qed_ptt *p_ptt,
5332 					      u32 *buf_size)
5333 {
5334 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5335 
5336 	*buf_size = 0;
5337 
5338 	if (status != DBG_STATUS_OK)
5339 		return status;
5340 
5341 	return qed_protection_override_dump(p_hwfn,
5342 					    p_ptt, NULL, false, buf_size);
5343 }
5344 
5345 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5346 						 struct qed_ptt *p_ptt,
5347 						 u32 *dump_buf,
5348 						 u32 buf_size_in_dwords,
5349 						 u32 *num_dumped_dwords)
5350 {
5351 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5352 	enum dbg_status status;
5353 
5354 	*num_dumped_dwords = 0;
5355 
5356 	status =
5357 		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5358 							      p_ptt,
5359 							      p_size);
5360 	if (status != DBG_STATUS_OK)
5361 		return status;
5362 
5363 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5364 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5365 
5366 	/* Update reset state */
5367 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5368 
5369 	status = qed_protection_override_dump(p_hwfn,
5370 					      p_ptt,
5371 					      dump_buf,
5372 					      true, num_dumped_dwords);
5373 
5374 	/* Revert GRC params to their default */
5375 	qed_dbg_grc_set_params_default(p_hwfn);
5376 
5377 	return status;
5378 }
5379 
5380 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5381 						     struct qed_ptt *p_ptt,
5382 						     u32 *buf_size)
5383 {
5384 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5385 
5386 	*buf_size = 0;
5387 
5388 	if (status != DBG_STATUS_OK)
5389 		return status;
5390 
5391 	/* Update reset state */
5392 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5393 
5394 	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5395 
5396 	return DBG_STATUS_OK;
5397 }
5398 
5399 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5400 					struct qed_ptt *p_ptt,
5401 					u32 *dump_buf,
5402 					u32 buf_size_in_dwords,
5403 					u32 *num_dumped_dwords)
5404 {
5405 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5406 	enum dbg_status status;
5407 
5408 	*num_dumped_dwords = 0;
5409 
5410 	status =
5411 		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5412 						     p_ptt,
5413 						     p_size);
5414 	if (status != DBG_STATUS_OK)
5415 		return status;
5416 
5417 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5418 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5419 
5420 	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5421 
5422 	/* Revert GRC params to their default */
5423 	qed_dbg_grc_set_params_default(p_hwfn);
5424 
5425 	return DBG_STATUS_OK;
5426 }
5427 
5428 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5429 				  struct qed_ptt *p_ptt,
5430 				  enum block_id block_id,
5431 				  enum dbg_attn_type attn_type,
5432 				  bool clear_status,
5433 				  struct dbg_attn_block_result *results)
5434 {
5435 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5436 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5437 	const struct dbg_attn_reg *attn_reg_arr;
5438 
5439 	if (status != DBG_STATUS_OK)
5440 		return status;
5441 
5442 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5443 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5444 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5445 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5446 
5447 	attn_reg_arr = qed_get_block_attn_regs(block_id,
5448 					       attn_type, &num_attn_regs);
5449 
5450 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5451 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5452 		struct dbg_attn_reg_result *reg_result;
5453 		u32 sts_addr, sts_val;
5454 		u16 modes_buf_offset;
5455 		bool eval_mode;
5456 
5457 		/* Check mode */
5458 		eval_mode = GET_FIELD(reg_data->mode.data,
5459 				      DBG_MODE_HDR_EVAL_MODE) > 0;
5460 		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5461 					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5462 		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5463 			continue;
5464 
5465 		/* Mode match - read attention status register */
5466 		sts_addr = DWORDS_TO_BYTES(clear_status ?
5467 					   reg_data->sts_clr_address :
5468 					   GET_FIELD(reg_data->data,
5469 						     DBG_ATTN_REG_STS_ADDRESS));
5470 		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5471 		if (!sts_val)
5472 			continue;
5473 
5474 		/* Non-zero attention status - add to results */
5475 		reg_result = &results->reg_results[num_result_regs];
5476 		SET_FIELD(reg_result->data,
5477 			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5478 		SET_FIELD(reg_result->data,
5479 			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5480 			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5481 		reg_result->block_attn_offset = reg_data->block_attn_offset;
5482 		reg_result->sts_val = sts_val;
5483 		reg_result->mask_val = qed_rd(p_hwfn,
5484 					      p_ptt,
5485 					      DWORDS_TO_BYTES
5486 					      (reg_data->mask_address));
5487 		num_result_regs++;
5488 	}
5489 
5490 	results->block_id = (u8)block_id;
5491 	results->names_offset =
5492 	    qed_get_block_attn_data(block_id, attn_type)->names_offset;
5493 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5494 	SET_FIELD(results->data,
5495 		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5496 
5497 	return DBG_STATUS_OK;
5498 }
5499 
5500 /******************************* Data Types **********************************/
5501 
5502 struct block_info {
5503 	const char *name;
5504 	enum block_id id;
5505 };
5506 
5507 struct mcp_trace_format {
5508 	u32 data;
5509 #define MCP_TRACE_FORMAT_MODULE_MASK	0x0000ffff
5510 #define MCP_TRACE_FORMAT_MODULE_SHIFT	0
5511 #define MCP_TRACE_FORMAT_LEVEL_MASK	0x00030000
5512 #define MCP_TRACE_FORMAT_LEVEL_SHIFT	16
5513 #define MCP_TRACE_FORMAT_P1_SIZE_MASK	0x000c0000
5514 #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT	18
5515 #define MCP_TRACE_FORMAT_P2_SIZE_MASK	0x00300000
5516 #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT	20
5517 #define MCP_TRACE_FORMAT_P3_SIZE_MASK	0x00c00000
5518 #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT	22
5519 #define MCP_TRACE_FORMAT_LEN_MASK	0xff000000
5520 #define MCP_TRACE_FORMAT_LEN_SHIFT	24
5521 
5522 	char *format_str;
5523 };
5524 
5525 /* Meta data structure, generated by a perl script during MFW build. therefore,
5526  * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl
5527  * script.
5528  */
5529 struct mcp_trace_meta {
5530 	u32 modules_num;
5531 	char **modules;
5532 	u32 formats_num;
5533 	struct mcp_trace_format *formats;
5534 };
5535 
5536 /* REG fifo element */
5537 struct reg_fifo_element {
5538 	u64 data;
5539 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5540 #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5541 #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5542 #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5543 #define REG_FIFO_ELEMENT_PF_SHIFT		24
5544 #define REG_FIFO_ELEMENT_PF_MASK		0xf
5545 #define REG_FIFO_ELEMENT_VF_SHIFT		28
5546 #define REG_FIFO_ELEMENT_VF_MASK		0xff
5547 #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5548 #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5549 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5550 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5551 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5552 #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5553 #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5554 #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5555 #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5556 #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5557 };
5558 
5559 /* IGU fifo element */
5560 struct igu_fifo_element {
5561 	u32 dword0;
5562 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5563 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5564 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5565 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5566 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5567 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5568 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5569 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5570 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5571 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5572 	u32 dword1;
5573 	u32 dword2;
5574 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5575 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5576 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5577 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5578 	u32 reserved;
5579 };
5580 
5581 struct igu_fifo_wr_data {
5582 	u32 data;
5583 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5584 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5585 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5586 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5587 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5588 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5589 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5590 #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5591 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5592 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5593 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5594 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5595 };
5596 
5597 struct igu_fifo_cleanup_wr_data {
5598 	u32 data;
5599 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5600 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5601 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5602 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5603 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5604 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5605 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5606 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5607 };
5608 
5609 /* Protection override element */
5610 struct protection_override_element {
5611 	u64 data;
5612 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5613 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5614 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5615 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5616 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5617 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5618 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5619 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5620 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5621 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5622 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5623 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5624 };
5625 
5626 enum igu_fifo_sources {
5627 	IGU_SRC_PXP0,
5628 	IGU_SRC_PXP1,
5629 	IGU_SRC_PXP2,
5630 	IGU_SRC_PXP3,
5631 	IGU_SRC_PXP4,
5632 	IGU_SRC_PXP5,
5633 	IGU_SRC_PXP6,
5634 	IGU_SRC_PXP7,
5635 	IGU_SRC_CAU,
5636 	IGU_SRC_ATTN,
5637 	IGU_SRC_GRC
5638 };
5639 
5640 enum igu_fifo_addr_types {
5641 	IGU_ADDR_TYPE_MSIX_MEM,
5642 	IGU_ADDR_TYPE_WRITE_PBA,
5643 	IGU_ADDR_TYPE_WRITE_INT_ACK,
5644 	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5645 	IGU_ADDR_TYPE_READ_INT,
5646 	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5647 	IGU_ADDR_TYPE_RESERVED
5648 };
5649 
5650 struct igu_fifo_addr_data {
5651 	u16 start_addr;
5652 	u16 end_addr;
5653 	char *desc;
5654 	char *vf_desc;
5655 	enum igu_fifo_addr_types type;
5656 };
5657 
5658 /******************************** Constants **********************************/
5659 
5660 #define MAX_MSG_LEN				1024
5661 
5662 #define MCP_TRACE_MAX_MODULE_LEN		8
5663 #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5664 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5665 	(MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
5666 
5667 #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5668 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5669 
5670 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5671 
5672 /***************************** Constant Arrays *******************************/
5673 
5674 struct user_dbg_array {
5675 	const u32 *ptr;
5676 	u32 size_in_dwords;
5677 };
5678 
5679 /* Debug arrays */
5680 static struct user_dbg_array
5681 s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
5682 
5683 /* Block names array */
5684 static struct block_info s_block_info_arr[] = {
5685 	{"grc", BLOCK_GRC},
5686 	{"miscs", BLOCK_MISCS},
5687 	{"misc", BLOCK_MISC},
5688 	{"dbu", BLOCK_DBU},
5689 	{"pglue_b", BLOCK_PGLUE_B},
5690 	{"cnig", BLOCK_CNIG},
5691 	{"cpmu", BLOCK_CPMU},
5692 	{"ncsi", BLOCK_NCSI},
5693 	{"opte", BLOCK_OPTE},
5694 	{"bmb", BLOCK_BMB},
5695 	{"pcie", BLOCK_PCIE},
5696 	{"mcp", BLOCK_MCP},
5697 	{"mcp2", BLOCK_MCP2},
5698 	{"pswhst", BLOCK_PSWHST},
5699 	{"pswhst2", BLOCK_PSWHST2},
5700 	{"pswrd", BLOCK_PSWRD},
5701 	{"pswrd2", BLOCK_PSWRD2},
5702 	{"pswwr", BLOCK_PSWWR},
5703 	{"pswwr2", BLOCK_PSWWR2},
5704 	{"pswrq", BLOCK_PSWRQ},
5705 	{"pswrq2", BLOCK_PSWRQ2},
5706 	{"pglcs", BLOCK_PGLCS},
5707 	{"ptu", BLOCK_PTU},
5708 	{"dmae", BLOCK_DMAE},
5709 	{"tcm", BLOCK_TCM},
5710 	{"mcm", BLOCK_MCM},
5711 	{"ucm", BLOCK_UCM},
5712 	{"xcm", BLOCK_XCM},
5713 	{"ycm", BLOCK_YCM},
5714 	{"pcm", BLOCK_PCM},
5715 	{"qm", BLOCK_QM},
5716 	{"tm", BLOCK_TM},
5717 	{"dorq", BLOCK_DORQ},
5718 	{"brb", BLOCK_BRB},
5719 	{"src", BLOCK_SRC},
5720 	{"prs", BLOCK_PRS},
5721 	{"tsdm", BLOCK_TSDM},
5722 	{"msdm", BLOCK_MSDM},
5723 	{"usdm", BLOCK_USDM},
5724 	{"xsdm", BLOCK_XSDM},
5725 	{"ysdm", BLOCK_YSDM},
5726 	{"psdm", BLOCK_PSDM},
5727 	{"tsem", BLOCK_TSEM},
5728 	{"msem", BLOCK_MSEM},
5729 	{"usem", BLOCK_USEM},
5730 	{"xsem", BLOCK_XSEM},
5731 	{"ysem", BLOCK_YSEM},
5732 	{"psem", BLOCK_PSEM},
5733 	{"rss", BLOCK_RSS},
5734 	{"tmld", BLOCK_TMLD},
5735 	{"muld", BLOCK_MULD},
5736 	{"yuld", BLOCK_YULD},
5737 	{"xyld", BLOCK_XYLD},
5738 	{"ptld", BLOCK_PTLD},
5739 	{"ypld", BLOCK_YPLD},
5740 	{"prm", BLOCK_PRM},
5741 	{"pbf_pb1", BLOCK_PBF_PB1},
5742 	{"pbf_pb2", BLOCK_PBF_PB2},
5743 	{"rpb", BLOCK_RPB},
5744 	{"btb", BLOCK_BTB},
5745 	{"pbf", BLOCK_PBF},
5746 	{"rdif", BLOCK_RDIF},
5747 	{"tdif", BLOCK_TDIF},
5748 	{"cdu", BLOCK_CDU},
5749 	{"ccfc", BLOCK_CCFC},
5750 	{"tcfc", BLOCK_TCFC},
5751 	{"igu", BLOCK_IGU},
5752 	{"cau", BLOCK_CAU},
5753 	{"rgfs", BLOCK_RGFS},
5754 	{"rgsrc", BLOCK_RGSRC},
5755 	{"tgfs", BLOCK_TGFS},
5756 	{"tgsrc", BLOCK_TGSRC},
5757 	{"umac", BLOCK_UMAC},
5758 	{"xmac", BLOCK_XMAC},
5759 	{"dbg", BLOCK_DBG},
5760 	{"nig", BLOCK_NIG},
5761 	{"wol", BLOCK_WOL},
5762 	{"bmbn", BLOCK_BMBN},
5763 	{"ipc", BLOCK_IPC},
5764 	{"nwm", BLOCK_NWM},
5765 	{"nws", BLOCK_NWS},
5766 	{"ms", BLOCK_MS},
5767 	{"phy_pcie", BLOCK_PHY_PCIE},
5768 	{"led", BLOCK_LED},
5769 	{"avs_wrap", BLOCK_AVS_WRAP},
5770 	{"pxpreqbus", BLOCK_PXPREQBUS},
5771 	{"misc_aeu", BLOCK_MISC_AEU},
5772 	{"bar0_map", BLOCK_BAR0_MAP}
5773 };
5774 
5775 /* Status string array */
5776 static const char * const s_status_str[] = {
5777 	/* DBG_STATUS_OK */
5778 	"Operation completed successfully",
5779 
5780 	/* DBG_STATUS_APP_VERSION_NOT_SET */
5781 	"Debug application version wasn't set",
5782 
5783 	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5784 	"Unsupported debug application version",
5785 
5786 	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5787 	"The debug block wasn't reset since the last recording",
5788 
5789 	/* DBG_STATUS_INVALID_ARGS */
5790 	"Invalid arguments",
5791 
5792 	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5793 	"The debug output was already set",
5794 
5795 	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5796 	"Invalid PCI buffer size",
5797 
5798 	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5799 	"PCI buffer allocation failed",
5800 
5801 	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5802 	"A PCI buffer wasn't allocated",
5803 
5804 	/* DBG_STATUS_TOO_MANY_INPUTS */
5805 	"Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
5806 
5807 	/* DBG_STATUS_INPUT_OVERLAP */
5808 	"Overlapping debug bus inputs",
5809 
5810 	/* DBG_STATUS_HW_ONLY_RECORDING */
5811 	"Cannot record Storm data since the entire recording cycle is used by HW",
5812 
5813 	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5814 	"The Storm was already enabled",
5815 
5816 	/* DBG_STATUS_STORM_NOT_ENABLED */
5817 	"The specified Storm wasn't enabled",
5818 
5819 	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5820 	"The block was already enabled",
5821 
5822 	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5823 	"The specified block wasn't enabled",
5824 
5825 	/* DBG_STATUS_NO_INPUT_ENABLED */
5826 	"No input was enabled for recording",
5827 
5828 	/* DBG_STATUS_NO_FILTER_TRIGGER_64B */
5829 	"Filters and triggers are not allowed when recording in 64b units",
5830 
5831 	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5832 	"The filter was already enabled",
5833 
5834 	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5835 	"The trigger was already enabled",
5836 
5837 	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5838 	"The trigger wasn't enabled",
5839 
5840 	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5841 	"A constraint can be added only after a filter was enabled or a trigger state was added",
5842 
5843 	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5844 	"Cannot add more than 3 trigger states",
5845 
5846 	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5847 	"Cannot add more than 4 constraints per filter or trigger state",
5848 
5849 	/* DBG_STATUS_RECORDING_NOT_STARTED */
5850 	"The recording wasn't started",
5851 
5852 	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
5853 	"A trigger was configured, but it didn't trigger",
5854 
5855 	/* DBG_STATUS_NO_DATA_RECORDED */
5856 	"No data was recorded",
5857 
5858 	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5859 	"Dump buffer is too small",
5860 
5861 	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5862 	"Dumped data is not aligned to chunks",
5863 
5864 	/* DBG_STATUS_UNKNOWN_CHIP */
5865 	"Unknown chip",
5866 
5867 	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5868 	"Failed allocating virtual memory",
5869 
5870 	/* DBG_STATUS_BLOCK_IN_RESET */
5871 	"The input block is in reset",
5872 
5873 	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5874 	"Invalid MCP trace signature found in NVRAM",
5875 
5876 	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5877 	"Invalid bundle ID found in NVRAM",
5878 
5879 	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5880 	"Failed getting NVRAM image",
5881 
5882 	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5883 	"NVRAM image is not dword-aligned",
5884 
5885 	/* DBG_STATUS_NVRAM_READ_FAILED */
5886 	"Failed reading from NVRAM",
5887 
5888 	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5889 	"Idle check parsing failed",
5890 
5891 	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
5892 	"MCP Trace data is corrupt",
5893 
5894 	/* DBG_STATUS_MCP_TRACE_NO_META */
5895 	"Dump doesn't contain meta data - it must be provided in image file",
5896 
5897 	/* DBG_STATUS_MCP_COULD_NOT_HALT */
5898 	"Failed to halt MCP",
5899 
5900 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
5901 	"Failed to resume MCP after halt",
5902 
5903 	/* DBG_STATUS_RESERVED2 */
5904 	"Reserved debug status - shouldn't be returned",
5905 
5906 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5907 	"Failed to empty SEMI sync FIFO",
5908 
5909 	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
5910 	"IGU FIFO data is corrupt",
5911 
5912 	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5913 	"MCP failed to mask parities",
5914 
5915 	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5916 	"FW Asserts parsing failed",
5917 
5918 	/* DBG_STATUS_REG_FIFO_BAD_DATA */
5919 	"GRC FIFO data is corrupt",
5920 
5921 	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5922 	"Protection Override data is corrupt",
5923 
5924 	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
5925 	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5926 
5927 	/* DBG_STATUS_FILTER_BUG */
5928 	"Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
5929 
5930 	/* DBG_STATUS_NON_MATCHING_LINES */
5931 	"Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
5932 
5933 	/* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
5934 	"The selected trigger dword offset wasn't enabled in the recorded HW block",
5935 
5936 	/* DBG_STATUS_DBG_BUS_IN_USE */
5937 	"The debug bus is in use"
5938 };
5939 
5940 /* Idle check severity names array */
5941 static const char * const s_idle_chk_severity_str[] = {
5942 	"Error",
5943 	"Error if no traffic",
5944 	"Warning"
5945 };
5946 
5947 /* MCP Trace level names array */
5948 static const char * const s_mcp_trace_level_str[] = {
5949 	"ERROR",
5950 	"TRACE",
5951 	"DEBUG"
5952 };
5953 
5954 /* Access type names array */
5955 static const char * const s_access_strs[] = {
5956 	"read",
5957 	"write"
5958 };
5959 
5960 /* Privilege type names array */
5961 static const char * const s_privilege_strs[] = {
5962 	"VF",
5963 	"PDA",
5964 	"HV",
5965 	"UA"
5966 };
5967 
5968 /* Protection type names array */
5969 static const char * const s_protection_strs[] = {
5970 	"(default)",
5971 	"(default)",
5972 	"(default)",
5973 	"(default)",
5974 	"override VF",
5975 	"override PDA",
5976 	"override HV",
5977 	"override UA"
5978 };
5979 
5980 /* Master type names array */
5981 static const char * const s_master_strs[] = {
5982 	"???",
5983 	"pxp",
5984 	"mcp",
5985 	"msdm",
5986 	"psdm",
5987 	"ysdm",
5988 	"usdm",
5989 	"tsdm",
5990 	"xsdm",
5991 	"dbu",
5992 	"dmae",
5993 	"???",
5994 	"???",
5995 	"???",
5996 	"???",
5997 	"???"
5998 };
5999 
6000 /* REG FIFO error messages array */
6001 static const char * const s_reg_fifo_error_strs[] = {
6002 	"grc timeout",
6003 	"address doesn't belong to any block",
6004 	"reserved address in block or write to read-only address",
6005 	"privilege/protection mismatch",
6006 	"path isolation error"
6007 };
6008 
6009 /* IGU FIFO sources array */
6010 static const char * const s_igu_fifo_source_strs[] = {
6011 	"TSTORM",
6012 	"MSTORM",
6013 	"USTORM",
6014 	"XSTORM",
6015 	"YSTORM",
6016 	"PSTORM",
6017 	"PCIE",
6018 	"NIG_QM_PBF",
6019 	"CAU",
6020 	"ATTN",
6021 	"GRC",
6022 };
6023 
6024 /* IGU FIFO error messages */
6025 static const char * const s_igu_fifo_error_strs[] = {
6026 	"no error",
6027 	"length error",
6028 	"function disabled",
6029 	"VF sent command to attnetion address",
6030 	"host sent prod update command",
6031 	"read of during interrupt register while in MIMD mode",
6032 	"access to PXP BAR reserved address",
6033 	"producer update command to attention index",
6034 	"unknown error",
6035 	"SB index not valid",
6036 	"SB relative index and FID not found",
6037 	"FID not match",
6038 	"command with error flag asserted (PCI error or CAU discard)",
6039 	"VF sent cleanup and RF cleanup is disabled",
6040 	"cleanup command on type bigger than 4"
6041 };
6042 
6043 /* IGU FIFO address data */
6044 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6045 	{0x0, 0x101, "MSI-X Memory", NULL,
6046 	 IGU_ADDR_TYPE_MSIX_MEM},
6047 	{0x102, 0x1ff, "reserved", NULL,
6048 	 IGU_ADDR_TYPE_RESERVED},
6049 	{0x200, 0x200, "Write PBA[0:63]", NULL,
6050 	 IGU_ADDR_TYPE_WRITE_PBA},
6051 	{0x201, 0x201, "Write PBA[64:127]", "reserved",
6052 	 IGU_ADDR_TYPE_WRITE_PBA},
6053 	{0x202, 0x202, "Write PBA[128]", "reserved",
6054 	 IGU_ADDR_TYPE_WRITE_PBA},
6055 	{0x203, 0x3ff, "reserved", NULL,
6056 	 IGU_ADDR_TYPE_RESERVED},
6057 	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6058 	 IGU_ADDR_TYPE_WRITE_INT_ACK},
6059 	{0x5f0, 0x5f0, "Attention bits update", NULL,
6060 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6061 	{0x5f1, 0x5f1, "Attention bits set", NULL,
6062 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6063 	{0x5f2, 0x5f2, "Attention bits clear", NULL,
6064 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6065 	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6066 	 IGU_ADDR_TYPE_READ_INT},
6067 	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6068 	 IGU_ADDR_TYPE_READ_INT},
6069 	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6070 	 IGU_ADDR_TYPE_READ_INT},
6071 	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6072 	 IGU_ADDR_TYPE_READ_INT},
6073 	{0x5f7, 0x5ff, "reserved", NULL,
6074 	 IGU_ADDR_TYPE_RESERVED},
6075 	{0x600, 0x7ff, "Producer update", NULL,
6076 	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6077 };
6078 
6079 /******************************** Variables **********************************/
6080 
6081 /* MCP Trace meta data array - used in case the dump doesn't contain the
6082  * meta data (e.g. due to no NVRAM access).
6083  */
6084 static struct user_dbg_array s_mcp_trace_meta_arr = { NULL, 0 };
6085 
6086 /* Parsed MCP Trace meta data info, based on MCP trace meta array */
6087 static struct mcp_trace_meta s_mcp_trace_meta;
6088 static bool s_mcp_trace_meta_valid;
6089 
6090 /* Temporary buffer, used for print size calculations */
6091 static char s_temp_buf[MAX_MSG_LEN];
6092 
6093 /**************************** Private Functions ******************************/
6094 
6095 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6096 {
6097 	return (a + b) % size;
6098 }
6099 
6100 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6101 {
6102 	return (size + a - b) % size;
6103 }
6104 
6105 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6106  * bytes) and returns them as a dword value. the specified buffer offset is
6107  * updated.
6108  */
6109 static u32 qed_read_from_cyclic_buf(void *buf,
6110 				    u32 *offset,
6111 				    u32 buf_size, u8 num_bytes_to_read)
6112 {
6113 	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6114 	u32 val = 0;
6115 
6116 	val_ptr = (u8 *)&val;
6117 
6118 	/* Assume running on a LITTLE ENDIAN and the buffer is network order
6119 	 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
6120 	 */
6121 	for (i = 0; i < num_bytes_to_read; i++) {
6122 		val_ptr[i] = bytes_buf[*offset];
6123 		*offset = qed_cyclic_add(*offset, 1, buf_size);
6124 	}
6125 
6126 	return val;
6127 }
6128 
6129 /* Reads and returns the next byte from the specified buffer.
6130  * The specified buffer offset is updated.
6131  */
6132 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6133 {
6134 	return ((u8 *)buf)[(*offset)++];
6135 }
6136 
6137 /* Reads and returns the next dword from the specified buffer.
6138  * The specified buffer offset is updated.
6139  */
6140 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6141 {
6142 	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6143 
6144 	*offset += 4;
6145 
6146 	return dword_val;
6147 }
6148 
6149 /* Reads the next string from the specified buffer, and copies it to the
6150  * specified pointer. The specified buffer offset is updated.
6151  */
6152 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6153 {
6154 	const char *source_str = &((const char *)buf)[*offset];
6155 
6156 	strncpy(dest, source_str, size);
6157 	dest[size - 1] = '\0';
6158 	*offset += size;
6159 }
6160 
6161 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6162  * If the specified buffer in NULL, a temporary buffer pointer is returned.
6163  */
6164 static char *qed_get_buf_ptr(void *buf, u32 offset)
6165 {
6166 	return buf ? (char *)buf + offset : s_temp_buf;
6167 }
6168 
6169 /* Reads a param from the specified buffer. Returns the number of dwords read.
6170  * If the returned str_param is NULL, the param is numeric and its value is
6171  * returned in num_param.
6172  * Otheriwise, the param is a string and its pointer is returned in str_param.
6173  */
6174 static u32 qed_read_param(u32 *dump_buf,
6175 			  const char **param_name,
6176 			  const char **param_str_val, u32 *param_num_val)
6177 {
6178 	char *char_buf = (char *)dump_buf;
6179 	size_t offset = 0;
6180 
6181 	/* Extract param name */
6182 	*param_name = char_buf;
6183 	offset += strlen(*param_name) + 1;
6184 
6185 	/* Check param type */
6186 	if (*(char_buf + offset++)) {
6187 		/* String param */
6188 		*param_str_val = char_buf + offset;
6189 		*param_num_val = 0;
6190 		offset += strlen(*param_str_val) + 1;
6191 		if (offset & 0x3)
6192 			offset += (4 - (offset & 0x3));
6193 	} else {
6194 		/* Numeric param */
6195 		*param_str_val = NULL;
6196 		if (offset & 0x3)
6197 			offset += (4 - (offset & 0x3));
6198 		*param_num_val = *(u32 *)(char_buf + offset);
6199 		offset += 4;
6200 	}
6201 
6202 	return (u32)offset / 4;
6203 }
6204 
6205 /* Reads a section header from the specified buffer.
6206  * Returns the number of dwords read.
6207  */
6208 static u32 qed_read_section_hdr(u32 *dump_buf,
6209 				const char **section_name,
6210 				u32 *num_section_params)
6211 {
6212 	const char *param_str_val;
6213 
6214 	return qed_read_param(dump_buf,
6215 			      section_name, &param_str_val, num_section_params);
6216 }
6217 
6218 /* Reads section params from the specified buffer and prints them to the results
6219  * buffer. Returns the number of dwords read.
6220  */
6221 static u32 qed_print_section_params(u32 *dump_buf,
6222 				    u32 num_section_params,
6223 				    char *results_buf, u32 *num_chars_printed)
6224 {
6225 	u32 i, dump_offset = 0, results_offset = 0;
6226 
6227 	for (i = 0; i < num_section_params; i++) {
6228 		const char *param_name, *param_str_val;
6229 		u32 param_num_val = 0;
6230 
6231 		dump_offset += qed_read_param(dump_buf + dump_offset,
6232 					      &param_name,
6233 					      &param_str_val, &param_num_val);
6234 
6235 		if (param_str_val)
6236 			results_offset +=
6237 				sprintf(qed_get_buf_ptr(results_buf,
6238 							results_offset),
6239 					"%s: %s\n", param_name, param_str_val);
6240 		else if (strcmp(param_name, "fw-timestamp"))
6241 			results_offset +=
6242 				sprintf(qed_get_buf_ptr(results_buf,
6243 							results_offset),
6244 					"%s: %d\n", param_name, param_num_val);
6245 	}
6246 
6247 	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6248 				  "\n");
6249 
6250 	*num_chars_printed = results_offset;
6251 
6252 	return dump_offset;
6253 }
6254 
6255 /* Parses the idle check rules and returns the number of characters printed.
6256  * In case of parsing error, returns 0.
6257  */
6258 static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
6259 					 u32 *dump_buf_end,
6260 					 u32 num_rules,
6261 					 bool print_fw_idle_chk,
6262 					 char *results_buf,
6263 					 u32 *num_errors, u32 *num_warnings)
6264 {
6265 	/* Offset in results_buf in bytes */
6266 	u32 results_offset = 0;
6267 
6268 	u32 rule_idx;
6269 	u16 i, j;
6270 
6271 	*num_errors = 0;
6272 	*num_warnings = 0;
6273 
6274 	/* Go over dumped results */
6275 	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6276 	     rule_idx++) {
6277 		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6278 		struct dbg_idle_chk_result_hdr *hdr;
6279 		const char *parsing_str, *lsi_msg;
6280 		u32 parsing_str_offset;
6281 		bool has_fw_msg;
6282 		u8 curr_reg_id;
6283 
6284 		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6285 		rule_parsing_data =
6286 			(const struct dbg_idle_chk_rule_parsing_data *)
6287 			&s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
6288 			ptr[hdr->rule_id];
6289 		parsing_str_offset =
6290 			GET_FIELD(rule_parsing_data->data,
6291 				  DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6292 		has_fw_msg =
6293 			GET_FIELD(rule_parsing_data->data,
6294 				DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6295 		parsing_str =
6296 			&((const char *)
6297 			s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
6298 			[parsing_str_offset];
6299 		lsi_msg = parsing_str;
6300 		curr_reg_id = 0;
6301 
6302 		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6303 			return 0;
6304 
6305 		/* Skip rule header */
6306 		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6307 
6308 		/* Update errors/warnings count */
6309 		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6310 		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6311 			(*num_errors)++;
6312 		else
6313 			(*num_warnings)++;
6314 
6315 		/* Print rule severity */
6316 		results_offset +=
6317 		    sprintf(qed_get_buf_ptr(results_buf,
6318 					    results_offset), "%s: ",
6319 			    s_idle_chk_severity_str[hdr->severity]);
6320 
6321 		/* Print rule message */
6322 		if (has_fw_msg)
6323 			parsing_str += strlen(parsing_str) + 1;
6324 		results_offset +=
6325 		    sprintf(qed_get_buf_ptr(results_buf,
6326 					    results_offset), "%s.",
6327 			    has_fw_msg &&
6328 			    print_fw_idle_chk ? parsing_str : lsi_msg);
6329 		parsing_str += strlen(parsing_str) + 1;
6330 
6331 		/* Print register values */
6332 		results_offset +=
6333 		    sprintf(qed_get_buf_ptr(results_buf,
6334 					    results_offset), " Registers:");
6335 		for (i = 0;
6336 		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6337 		     i++) {
6338 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6339 			bool is_mem;
6340 			u8 reg_id;
6341 
6342 			reg_hdr =
6343 				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6344 			is_mem = GET_FIELD(reg_hdr->data,
6345 					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6346 			reg_id = GET_FIELD(reg_hdr->data,
6347 					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6348 
6349 			/* Skip reg header */
6350 			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6351 
6352 			/* Skip register names until the required reg_id is
6353 			 * reached.
6354 			 */
6355 			for (; reg_id > curr_reg_id;
6356 			     curr_reg_id++,
6357 			     parsing_str += strlen(parsing_str) + 1);
6358 
6359 			results_offset +=
6360 			    sprintf(qed_get_buf_ptr(results_buf,
6361 						    results_offset), " %s",
6362 				    parsing_str);
6363 			if (i < hdr->num_dumped_cond_regs && is_mem)
6364 				results_offset +=
6365 				    sprintf(qed_get_buf_ptr(results_buf,
6366 							    results_offset),
6367 					    "[%d]", hdr->mem_entry_id +
6368 					    reg_hdr->start_entry);
6369 			results_offset +=
6370 			    sprintf(qed_get_buf_ptr(results_buf,
6371 						    results_offset), "=");
6372 			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6373 				results_offset +=
6374 				    sprintf(qed_get_buf_ptr(results_buf,
6375 							    results_offset),
6376 					    "0x%x", *dump_buf);
6377 				if (j < reg_hdr->size - 1)
6378 					results_offset +=
6379 					    sprintf(qed_get_buf_ptr
6380 						    (results_buf,
6381 						     results_offset), ",");
6382 			}
6383 		}
6384 
6385 		results_offset +=
6386 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6387 	}
6388 
6389 	/* Check if end of dump buffer was exceeded */
6390 	if (dump_buf > dump_buf_end)
6391 		return 0;
6392 
6393 	return results_offset;
6394 }
6395 
6396 /* Parses an idle check dump buffer.
6397  * If result_buf is not NULL, the idle check results are printed to it.
6398  * In any case, the required results buffer size is assigned to
6399  * parsed_results_bytes.
6400  * The parsing status is returned.
6401  */
6402 static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
6403 					       u32 num_dumped_dwords,
6404 					       char *results_buf,
6405 					       u32 *parsed_results_bytes,
6406 					       u32 *num_errors,
6407 					       u32 *num_warnings)
6408 {
6409 	const char *section_name, *param_name, *param_str_val;
6410 	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6411 	u32 num_section_params = 0, num_rules;
6412 
6413 	/* Offset in results_buf in bytes */
6414 	u32 results_offset = 0;
6415 
6416 	*parsed_results_bytes = 0;
6417 	*num_errors = 0;
6418 	*num_warnings = 0;
6419 
6420 	if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6421 	    !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6422 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6423 
6424 	/* Read global_params section */
6425 	dump_buf += qed_read_section_hdr(dump_buf,
6426 					 &section_name, &num_section_params);
6427 	if (strcmp(section_name, "global_params"))
6428 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6429 
6430 	/* Print global params */
6431 	dump_buf += qed_print_section_params(dump_buf,
6432 					     num_section_params,
6433 					     results_buf, &results_offset);
6434 
6435 	/* Read idle_chk section */
6436 	dump_buf += qed_read_section_hdr(dump_buf,
6437 					 &section_name, &num_section_params);
6438 	if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6439 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6440 	dump_buf += qed_read_param(dump_buf,
6441 				   &param_name, &param_str_val, &num_rules);
6442 	if (strcmp(param_name, "num_rules"))
6443 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6444 
6445 	if (num_rules) {
6446 		u32 rules_print_size;
6447 
6448 		/* Print FW output */
6449 		results_offset +=
6450 		    sprintf(qed_get_buf_ptr(results_buf,
6451 					    results_offset),
6452 			    "FW_IDLE_CHECK:\n");
6453 		rules_print_size =
6454 			qed_parse_idle_chk_dump_rules(dump_buf,
6455 						      dump_buf_end,
6456 						      num_rules,
6457 						      true,
6458 						      results_buf ?
6459 						      results_buf +
6460 						      results_offset :
6461 						      NULL,
6462 						      num_errors,
6463 						      num_warnings);
6464 		results_offset += rules_print_size;
6465 		if (!rules_print_size)
6466 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6467 
6468 		/* Print LSI output */
6469 		results_offset +=
6470 		    sprintf(qed_get_buf_ptr(results_buf,
6471 					    results_offset),
6472 			    "\nLSI_IDLE_CHECK:\n");
6473 		rules_print_size =
6474 			qed_parse_idle_chk_dump_rules(dump_buf,
6475 						      dump_buf_end,
6476 						      num_rules,
6477 						      false,
6478 						      results_buf ?
6479 						      results_buf +
6480 						      results_offset :
6481 						      NULL,
6482 						      num_errors,
6483 						      num_warnings);
6484 		results_offset += rules_print_size;
6485 		if (!rules_print_size)
6486 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6487 	}
6488 
6489 	/* Print errors/warnings count */
6490 	if (*num_errors)
6491 		results_offset +=
6492 		    sprintf(qed_get_buf_ptr(results_buf,
6493 					    results_offset),
6494 			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6495 			    *num_errors, *num_warnings);
6496 	else if (*num_warnings)
6497 		results_offset +=
6498 		    sprintf(qed_get_buf_ptr(results_buf,
6499 					    results_offset),
6500 			    "\nIdle Check completed successfully (with %d warnings)\n",
6501 			    *num_warnings);
6502 	else
6503 		results_offset +=
6504 		    sprintf(qed_get_buf_ptr(results_buf,
6505 					    results_offset),
6506 			    "\nIdle Check completed successfully\n");
6507 
6508 	/* Add 1 for string NULL termination */
6509 	*parsed_results_bytes = results_offset + 1;
6510 
6511 	return DBG_STATUS_OK;
6512 }
6513 
6514 /* Frees the specified MCP Trace meta data */
6515 static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
6516 				    struct mcp_trace_meta *meta)
6517 {
6518 	u32 i;
6519 
6520 	s_mcp_trace_meta_valid = false;
6521 
6522 	/* Release modules */
6523 	if (meta->modules) {
6524 		for (i = 0; i < meta->modules_num; i++)
6525 			kfree(meta->modules[i]);
6526 		kfree(meta->modules);
6527 	}
6528 
6529 	/* Release formats */
6530 	if (meta->formats) {
6531 		for (i = 0; i < meta->formats_num; i++)
6532 			kfree(meta->formats[i].format_str);
6533 		kfree(meta->formats);
6534 	}
6535 }
6536 
6537 /* Allocates and fills MCP Trace meta data based on the specified meta data
6538  * dump buffer.
6539  * Returns debug status code.
6540  */
6541 static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
6542 						const u32 *meta_buf,
6543 						struct mcp_trace_meta *meta)
6544 {
6545 	u8 *meta_buf_bytes = (u8 *)meta_buf;
6546 	u32 offset = 0, signature, i;
6547 
6548 	/* Free the previous meta before loading a new one. */
6549 	if (s_mcp_trace_meta_valid)
6550 		qed_mcp_trace_free_meta(p_hwfn, meta);
6551 
6552 	memset(meta, 0, sizeof(*meta));
6553 
6554 	/* Read first signature */
6555 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6556 	if (signature != NVM_MAGIC_VALUE)
6557 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6558 
6559 	/* Read no. of modules and allocate memory for their pointers */
6560 	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6561 	meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
6562 	if (!meta->modules)
6563 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6564 
6565 	/* Allocate and read all module strings */
6566 	for (i = 0; i < meta->modules_num; i++) {
6567 		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6568 
6569 		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6570 		if (!(*(meta->modules + i))) {
6571 			/* Update number of modules to be released */
6572 			meta->modules_num = i ? i - 1 : 0;
6573 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6574 		}
6575 
6576 		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6577 				      *(meta->modules + i));
6578 		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6579 			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6580 	}
6581 
6582 	/* Read second signature */
6583 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6584 	if (signature != NVM_MAGIC_VALUE)
6585 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6586 
6587 	/* Read number of formats and allocate memory for all formats */
6588 	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6589 	meta->formats = kzalloc(meta->formats_num *
6590 				sizeof(struct mcp_trace_format),
6591 				GFP_KERNEL);
6592 	if (!meta->formats)
6593 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6594 
6595 	/* Allocate and read all strings */
6596 	for (i = 0; i < meta->formats_num; i++) {
6597 		struct mcp_trace_format *format_ptr = &meta->formats[i];
6598 		u8 format_len;
6599 
6600 		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6601 							   &offset);
6602 		format_len =
6603 		    (format_ptr->data &
6604 		     MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
6605 		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6606 		if (!format_ptr->format_str) {
6607 			/* Update number of modules to be released */
6608 			meta->formats_num = i ? i - 1 : 0;
6609 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6610 		}
6611 
6612 		qed_read_str_from_buf(meta_buf_bytes,
6613 				      &offset,
6614 				      format_len, format_ptr->format_str);
6615 	}
6616 
6617 	s_mcp_trace_meta_valid = true;
6618 	return DBG_STATUS_OK;
6619 }
6620 
6621 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6622  * are printed to it. The parsing status is returned.
6623  * Arguments:
6624  * trace_buf - MCP trace cyclic buffer
6625  * trace_buf_size - MCP trace cyclic buffer size in bytes
6626  * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6627  *               buffer.
6628  * data_size - size in bytes of data to parse.
6629  * parsed_buf - destination buffer for parsed data.
6630  * parsed_bytes - size of parsed data in bytes.
6631  */
6632 static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
6633 					       u32 trace_buf_size,
6634 					       u32 data_offset,
6635 					       u32 data_size,
6636 					       char *parsed_buf,
6637 					       u32 *parsed_bytes)
6638 {
6639 	u32 param_mask, param_shift;
6640 	enum dbg_status status;
6641 
6642 	*parsed_bytes = 0;
6643 
6644 	if (!s_mcp_trace_meta_valid)
6645 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6646 
6647 	status = DBG_STATUS_OK;
6648 
6649 	while (data_size) {
6650 		struct mcp_trace_format *format_ptr;
6651 		u8 format_level, format_module;
6652 		u32 params[3] = { 0, 0, 0 };
6653 		u32 header, format_idx, i;
6654 
6655 		if (data_size < MFW_TRACE_ENTRY_SIZE)
6656 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6657 
6658 		header = qed_read_from_cyclic_buf(trace_buf,
6659 						  &data_offset,
6660 						  trace_buf_size,
6661 						  MFW_TRACE_ENTRY_SIZE);
6662 		data_size -= MFW_TRACE_ENTRY_SIZE;
6663 		format_idx = header & MFW_TRACE_EVENTID_MASK;
6664 
6665 		/* Skip message if its index doesn't exist in the meta data */
6666 		if (format_idx > s_mcp_trace_meta.formats_num) {
6667 			u8 format_size =
6668 				(u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
6669 				     MFW_TRACE_PRM_SIZE_SHIFT);
6670 
6671 			if (data_size < format_size)
6672 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6673 
6674 			data_offset = qed_cyclic_add(data_offset,
6675 						     format_size,
6676 						     trace_buf_size);
6677 			data_size -= format_size;
6678 			continue;
6679 		}
6680 
6681 		format_ptr = &s_mcp_trace_meta.formats[format_idx];
6682 
6683 		for (i = 0,
6684 		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
6685 		     param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
6686 		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6687 		     i++,
6688 		     param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6689 		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6690 			/* Extract param size (0..3) */
6691 			u8 param_size = (u8)((format_ptr->data & param_mask) >>
6692 					     param_shift);
6693 
6694 			/* If the param size is zero, there are no other
6695 			 * parameters.
6696 			 */
6697 			if (!param_size)
6698 				break;
6699 
6700 			/* Size is encoded using 2 bits, where 3 is used to
6701 			 * encode 4.
6702 			 */
6703 			if (param_size == 3)
6704 				param_size = 4;
6705 
6706 			if (data_size < param_size)
6707 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6708 
6709 			params[i] = qed_read_from_cyclic_buf(trace_buf,
6710 							     &data_offset,
6711 							     trace_buf_size,
6712 							     param_size);
6713 			data_size -= param_size;
6714 		}
6715 
6716 		format_level = (u8)((format_ptr->data &
6717 				     MCP_TRACE_FORMAT_LEVEL_MASK) >>
6718 				    MCP_TRACE_FORMAT_LEVEL_SHIFT);
6719 		format_module = (u8)((format_ptr->data &
6720 				      MCP_TRACE_FORMAT_MODULE_MASK) >>
6721 				     MCP_TRACE_FORMAT_MODULE_SHIFT);
6722 		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6723 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6724 
6725 		/* Print current message to results buffer */
6726 		*parsed_bytes +=
6727 			sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes),
6728 				"%s %-8s: ",
6729 				s_mcp_trace_level_str[format_level],
6730 				s_mcp_trace_meta.modules[format_module]);
6731 		*parsed_bytes +=
6732 		    sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes),
6733 			    format_ptr->format_str,
6734 			    params[0], params[1], params[2]);
6735 	}
6736 
6737 	/* Add string NULL terminator */
6738 	(*parsed_bytes)++;
6739 
6740 	return status;
6741 }
6742 
6743 /* Parses an MCP Trace dump buffer.
6744  * If result_buf is not NULL, the MCP Trace results are printed to it.
6745  * In any case, the required results buffer size is assigned to
6746  * parsed_bytes.
6747  * The parsing status is returned.
6748  */
6749 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6750 						u32 *dump_buf,
6751 						char *parsed_buf,
6752 						u32 *parsed_bytes)
6753 {
6754 	const char *section_name, *param_name, *param_str_val;
6755 	u32 data_size, trace_data_dwords, trace_meta_dwords;
6756 	u32 offset, results_offset, parsed_buf_bytes;
6757 	u32 param_num_val, num_section_params;
6758 	struct mcp_trace *trace;
6759 	enum dbg_status status;
6760 	const u32 *meta_buf;
6761 	u8 *trace_buf;
6762 
6763 	*parsed_bytes = 0;
6764 
6765 	/* Read global_params section */
6766 	dump_buf += qed_read_section_hdr(dump_buf,
6767 					 &section_name, &num_section_params);
6768 	if (strcmp(section_name, "global_params"))
6769 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6770 
6771 	/* Print global params */
6772 	dump_buf += qed_print_section_params(dump_buf,
6773 					     num_section_params,
6774 					     parsed_buf, &results_offset);
6775 
6776 	/* Read trace_data section */
6777 	dump_buf += qed_read_section_hdr(dump_buf,
6778 					 &section_name, &num_section_params);
6779 	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6780 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6781 	dump_buf += qed_read_param(dump_buf,
6782 				   &param_name, &param_str_val, &param_num_val);
6783 	if (strcmp(param_name, "size"))
6784 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6785 	trace_data_dwords = param_num_val;
6786 
6787 	/* Prepare trace info */
6788 	trace = (struct mcp_trace *)dump_buf;
6789 	trace_buf = (u8 *)dump_buf + sizeof(*trace);
6790 	offset = trace->trace_oldest;
6791 	data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
6792 	dump_buf += trace_data_dwords;
6793 
6794 	/* Read meta_data section */
6795 	dump_buf += qed_read_section_hdr(dump_buf,
6796 					 &section_name, &num_section_params);
6797 	if (strcmp(section_name, "mcp_trace_meta"))
6798 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6799 	dump_buf += qed_read_param(dump_buf,
6800 				   &param_name, &param_str_val, &param_num_val);
6801 	if (strcmp(param_name, "size"))
6802 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6803 	trace_meta_dwords = param_num_val;
6804 
6805 	/* Choose meta data buffer */
6806 	if (!trace_meta_dwords) {
6807 		/* Dump doesn't include meta data */
6808 		if (!s_mcp_trace_meta_arr.ptr)
6809 			return DBG_STATUS_MCP_TRACE_NO_META;
6810 		meta_buf = s_mcp_trace_meta_arr.ptr;
6811 	} else {
6812 		/* Dump includes meta data */
6813 		meta_buf = dump_buf;
6814 	}
6815 
6816 	/* Allocate meta data memory */
6817 	status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &s_mcp_trace_meta);
6818 	if (status != DBG_STATUS_OK)
6819 		return status;
6820 
6821 	status = qed_parse_mcp_trace_buf(trace_buf,
6822 					 trace->size,
6823 					 offset,
6824 					 data_size,
6825 					 parsed_buf ?
6826 					 parsed_buf + results_offset :
6827 					 NULL,
6828 					 &parsed_buf_bytes);
6829 	if (status != DBG_STATUS_OK)
6830 		return status;
6831 
6832 	*parsed_bytes = results_offset + parsed_buf_bytes;
6833 
6834 	return DBG_STATUS_OK;
6835 }
6836 
6837 /* Parses a Reg FIFO dump buffer.
6838  * If result_buf is not NULL, the Reg FIFO results are printed to it.
6839  * In any case, the required results buffer size is assigned to
6840  * parsed_results_bytes.
6841  * The parsing status is returned.
6842  */
6843 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6844 					       char *results_buf,
6845 					       u32 *parsed_results_bytes)
6846 {
6847 	const char *section_name, *param_name, *param_str_val;
6848 	u32 param_num_val, num_section_params, num_elements;
6849 	struct reg_fifo_element *elements;
6850 	u8 i, j, err_val, vf_val;
6851 	u32 results_offset = 0;
6852 	char vf_str[4];
6853 
6854 	/* Read global_params section */
6855 	dump_buf += qed_read_section_hdr(dump_buf,
6856 					 &section_name, &num_section_params);
6857 	if (strcmp(section_name, "global_params"))
6858 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6859 
6860 	/* Print global params */
6861 	dump_buf += qed_print_section_params(dump_buf,
6862 					     num_section_params,
6863 					     results_buf, &results_offset);
6864 
6865 	/* Read reg_fifo_data section */
6866 	dump_buf += qed_read_section_hdr(dump_buf,
6867 					 &section_name, &num_section_params);
6868 	if (strcmp(section_name, "reg_fifo_data"))
6869 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6870 	dump_buf += qed_read_param(dump_buf,
6871 				   &param_name, &param_str_val, &param_num_val);
6872 	if (strcmp(param_name, "size"))
6873 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6874 	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6875 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6876 	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6877 	elements = (struct reg_fifo_element *)dump_buf;
6878 
6879 	/* Decode elements */
6880 	for (i = 0; i < num_elements; i++) {
6881 		bool err_printed = false;
6882 
6883 		/* Discover if element belongs to a VF or a PF */
6884 		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6885 		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6886 			sprintf(vf_str, "%s", "N/A");
6887 		else
6888 			sprintf(vf_str, "%d", vf_val);
6889 
6890 		/* Add parsed element to parsed buffer */
6891 		results_offset +=
6892 		    sprintf(qed_get_buf_ptr(results_buf,
6893 					    results_offset),
6894 			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
6895 			    elements[i].data,
6896 			    (u32)GET_FIELD(elements[i].data,
6897 					   REG_FIFO_ELEMENT_ADDRESS) *
6898 			    REG_FIFO_ELEMENT_ADDR_FACTOR,
6899 			    s_access_strs[GET_FIELD(elements[i].data,
6900 						    REG_FIFO_ELEMENT_ACCESS)],
6901 			    (u32)GET_FIELD(elements[i].data,
6902 					   REG_FIFO_ELEMENT_PF),
6903 			    vf_str,
6904 			    (u32)GET_FIELD(elements[i].data,
6905 					   REG_FIFO_ELEMENT_PORT),
6906 			    s_privilege_strs[GET_FIELD(elements[i].data,
6907 						REG_FIFO_ELEMENT_PRIVILEGE)],
6908 			    s_protection_strs[GET_FIELD(elements[i].data,
6909 						REG_FIFO_ELEMENT_PROTECTION)],
6910 			    s_master_strs[GET_FIELD(elements[i].data,
6911 						REG_FIFO_ELEMENT_MASTER)]);
6912 
6913 		/* Print errors */
6914 		for (j = 0,
6915 		     err_val = GET_FIELD(elements[i].data,
6916 					 REG_FIFO_ELEMENT_ERROR);
6917 		     j < ARRAY_SIZE(s_reg_fifo_error_strs);
6918 		     j++, err_val >>= 1) {
6919 			if (err_val & 0x1) {
6920 				if (err_printed)
6921 					results_offset +=
6922 					    sprintf(qed_get_buf_ptr
6923 						    (results_buf,
6924 						     results_offset), ", ");
6925 				results_offset +=
6926 				    sprintf(qed_get_buf_ptr
6927 					    (results_buf, results_offset), "%s",
6928 					    s_reg_fifo_error_strs[j]);
6929 				err_printed = true;
6930 			}
6931 		}
6932 
6933 		results_offset +=
6934 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6935 	}
6936 
6937 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6938 						  results_offset),
6939 				  "fifo contained %d elements", num_elements);
6940 
6941 	/* Add 1 for string NULL termination */
6942 	*parsed_results_bytes = results_offset + 1;
6943 
6944 	return DBG_STATUS_OK;
6945 }
6946 
6947 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6948 						  *element, char
6949 						  *results_buf,
6950 						  u32 *results_offset)
6951 {
6952 	const struct igu_fifo_addr_data *found_addr = NULL;
6953 	u8 source, err_type, i, is_cleanup;
6954 	char parsed_addr_data[32];
6955 	char parsed_wr_data[256];
6956 	u32 wr_data, prod_cons;
6957 	bool is_wr_cmd, is_pf;
6958 	u16 cmd_addr;
6959 	u64 dword12;
6960 
6961 	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
6962 	 * FIFO element.
6963 	 */
6964 	dword12 = ((u64)element->dword2 << 32) | element->dword1;
6965 	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6966 	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6967 	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6968 	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6969 	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6970 
6971 	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6972 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6973 	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6974 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6975 
6976 	/* Find address data */
6977 	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
6978 		const struct igu_fifo_addr_data *curr_addr =
6979 			&s_igu_fifo_addr_data[i];
6980 
6981 		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
6982 		    curr_addr->end_addr)
6983 			found_addr = curr_addr;
6984 	}
6985 
6986 	if (!found_addr)
6987 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6988 
6989 	/* Prepare parsed address data */
6990 	switch (found_addr->type) {
6991 	case IGU_ADDR_TYPE_MSIX_MEM:
6992 		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
6993 		break;
6994 	case IGU_ADDR_TYPE_WRITE_INT_ACK:
6995 	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6996 		sprintf(parsed_addr_data,
6997 			" SB = 0x%x", cmd_addr - found_addr->start_addr);
6998 		break;
6999 	default:
7000 		parsed_addr_data[0] = '\0';
7001 	}
7002 
7003 	if (!is_wr_cmd) {
7004 		parsed_wr_data[0] = '\0';
7005 		goto out;
7006 	}
7007 
7008 	/* Prepare parsed write data */
7009 	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
7010 	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
7011 	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
7012 
7013 	if (source == IGU_SRC_ATTN) {
7014 		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
7015 	} else {
7016 		if (is_cleanup) {
7017 			u8 cleanup_val, cleanup_type;
7018 
7019 			cleanup_val =
7020 				GET_FIELD(wr_data,
7021 					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
7022 			cleanup_type =
7023 			    GET_FIELD(wr_data,
7024 				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
7025 
7026 			sprintf(parsed_wr_data,
7027 				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
7028 				cleanup_val ? "set" : "clear",
7029 				cleanup_type);
7030 		} else {
7031 			u8 update_flag, en_dis_int_for_sb, segment;
7032 			u8 timer_mask;
7033 
7034 			update_flag = GET_FIELD(wr_data,
7035 						IGU_FIFO_WR_DATA_UPDATE_FLAG);
7036 			en_dis_int_for_sb =
7037 				GET_FIELD(wr_data,
7038 					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
7039 			segment = GET_FIELD(wr_data,
7040 					    IGU_FIFO_WR_DATA_SEGMENT);
7041 			timer_mask = GET_FIELD(wr_data,
7042 					       IGU_FIFO_WR_DATA_TIMER_MASK);
7043 
7044 			sprintf(parsed_wr_data,
7045 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
7046 				prod_cons,
7047 				update_flag ? "update" : "nop",
7048 				en_dis_int_for_sb ?
7049 				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
7050 				"enable",
7051 				segment ? "attn" : "regular",
7052 				timer_mask);
7053 		}
7054 	}
7055 out:
7056 	/* Add parsed element to parsed buffer */
7057 	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
7058 						   *results_offset),
7059 				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7060 				   element->dword2, element->dword1,
7061 				   element->dword0,
7062 				   is_pf ? "pf" : "vf",
7063 				   GET_FIELD(element->dword0,
7064 					     IGU_FIFO_ELEMENT_DWORD0_FID),
7065 				   s_igu_fifo_source_strs[source],
7066 				   is_wr_cmd ? "wr" : "rd",
7067 				   cmd_addr,
7068 				   (!is_pf && found_addr->vf_desc)
7069 				   ? found_addr->vf_desc
7070 				   : found_addr->desc,
7071 				   parsed_addr_data,
7072 				   parsed_wr_data,
7073 				   s_igu_fifo_error_strs[err_type]);
7074 
7075 	return DBG_STATUS_OK;
7076 }
7077 
7078 /* Parses an IGU FIFO dump buffer.
7079  * If result_buf is not NULL, the IGU FIFO results are printed to it.
7080  * In any case, the required results buffer size is assigned to
7081  * parsed_results_bytes.
7082  * The parsing status is returned.
7083  */
7084 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7085 					       char *results_buf,
7086 					       u32 *parsed_results_bytes)
7087 {
7088 	const char *section_name, *param_name, *param_str_val;
7089 	u32 param_num_val, num_section_params, num_elements;
7090 	struct igu_fifo_element *elements;
7091 	enum dbg_status status;
7092 	u32 results_offset = 0;
7093 	u8 i;
7094 
7095 	/* Read global_params section */
7096 	dump_buf += qed_read_section_hdr(dump_buf,
7097 					 &section_name, &num_section_params);
7098 	if (strcmp(section_name, "global_params"))
7099 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7100 
7101 	/* Print global params */
7102 	dump_buf += qed_print_section_params(dump_buf,
7103 					     num_section_params,
7104 					     results_buf, &results_offset);
7105 
7106 	/* Read igu_fifo_data section */
7107 	dump_buf += qed_read_section_hdr(dump_buf,
7108 					 &section_name, &num_section_params);
7109 	if (strcmp(section_name, "igu_fifo_data"))
7110 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7111 	dump_buf += qed_read_param(dump_buf,
7112 				   &param_name, &param_str_val, &param_num_val);
7113 	if (strcmp(param_name, "size"))
7114 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7115 	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7116 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7117 	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7118 	elements = (struct igu_fifo_element *)dump_buf;
7119 
7120 	/* Decode elements */
7121 	for (i = 0; i < num_elements; i++) {
7122 		status = qed_parse_igu_fifo_element(&elements[i],
7123 						    results_buf,
7124 						    &results_offset);
7125 		if (status != DBG_STATUS_OK)
7126 			return status;
7127 	}
7128 
7129 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7130 						  results_offset),
7131 				  "fifo contained %d elements", num_elements);
7132 
7133 	/* Add 1 for string NULL termination */
7134 	*parsed_results_bytes = results_offset + 1;
7135 
7136 	return DBG_STATUS_OK;
7137 }
7138 
7139 static enum dbg_status
7140 qed_parse_protection_override_dump(u32 *dump_buf,
7141 				   char *results_buf,
7142 				   u32 *parsed_results_bytes)
7143 {
7144 	const char *section_name, *param_name, *param_str_val;
7145 	u32 param_num_val, num_section_params, num_elements;
7146 	struct protection_override_element *elements;
7147 	u32 results_offset = 0;
7148 	u8 i;
7149 
7150 	/* Read global_params section */
7151 	dump_buf += qed_read_section_hdr(dump_buf,
7152 					 &section_name, &num_section_params);
7153 	if (strcmp(section_name, "global_params"))
7154 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7155 
7156 	/* Print global params */
7157 	dump_buf += qed_print_section_params(dump_buf,
7158 					     num_section_params,
7159 					     results_buf, &results_offset);
7160 
7161 	/* Read protection_override_data section */
7162 	dump_buf += qed_read_section_hdr(dump_buf,
7163 					 &section_name, &num_section_params);
7164 	if (strcmp(section_name, "protection_override_data"))
7165 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7166 	dump_buf += qed_read_param(dump_buf,
7167 				   &param_name, &param_str_val, &param_num_val);
7168 	if (strcmp(param_name, "size"))
7169 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7170 	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7171 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7172 	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7173 	elements = (struct protection_override_element *)dump_buf;
7174 
7175 	/* Decode elements */
7176 	for (i = 0; i < num_elements; i++) {
7177 		u32 address = GET_FIELD(elements[i].data,
7178 					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7179 			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7180 
7181 		results_offset +=
7182 		    sprintf(qed_get_buf_ptr(results_buf,
7183 					    results_offset),
7184 			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7185 			    i, address,
7186 			    (u32)GET_FIELD(elements[i].data,
7187 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7188 			    (u32)GET_FIELD(elements[i].data,
7189 				      PROTECTION_OVERRIDE_ELEMENT_READ),
7190 			    (u32)GET_FIELD(elements[i].data,
7191 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
7192 			    s_protection_strs[GET_FIELD(elements[i].data,
7193 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7194 			    s_protection_strs[GET_FIELD(elements[i].data,
7195 				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7196 	}
7197 
7198 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7199 						  results_offset),
7200 				  "protection override contained %d elements",
7201 				  num_elements);
7202 
7203 	/* Add 1 for string NULL termination */
7204 	*parsed_results_bytes = results_offset + 1;
7205 
7206 	return DBG_STATUS_OK;
7207 }
7208 
7209 /* Parses a FW Asserts dump buffer.
7210  * If result_buf is not NULL, the FW Asserts results are printed to it.
7211  * In any case, the required results buffer size is assigned to
7212  * parsed_results_bytes.
7213  * The parsing status is returned.
7214  */
7215 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7216 						 char *results_buf,
7217 						 u32 *parsed_results_bytes)
7218 {
7219 	u32 num_section_params, param_num_val, i, results_offset = 0;
7220 	const char *param_name, *param_str_val, *section_name;
7221 	bool last_section_found = false;
7222 
7223 	*parsed_results_bytes = 0;
7224 
7225 	/* Read global_params section */
7226 	dump_buf += qed_read_section_hdr(dump_buf,
7227 					 &section_name, &num_section_params);
7228 	if (strcmp(section_name, "global_params"))
7229 		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7230 
7231 	/* Print global params */
7232 	dump_buf += qed_print_section_params(dump_buf,
7233 					     num_section_params,
7234 					     results_buf, &results_offset);
7235 
7236 	while (!last_section_found) {
7237 		dump_buf += qed_read_section_hdr(dump_buf,
7238 						 &section_name,
7239 						 &num_section_params);
7240 		if (!strcmp(section_name, "fw_asserts")) {
7241 			/* Extract params */
7242 			const char *storm_letter = NULL;
7243 			u32 storm_dump_size = 0;
7244 
7245 			for (i = 0; i < num_section_params; i++) {
7246 				dump_buf += qed_read_param(dump_buf,
7247 							   &param_name,
7248 							   &param_str_val,
7249 							   &param_num_val);
7250 				if (!strcmp(param_name, "storm"))
7251 					storm_letter = param_str_val;
7252 				else if (!strcmp(param_name, "size"))
7253 					storm_dump_size = param_num_val;
7254 				else
7255 					return
7256 					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7257 			}
7258 
7259 			if (!storm_letter || !storm_dump_size)
7260 				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7261 
7262 			/* Print data */
7263 			results_offset +=
7264 			    sprintf(qed_get_buf_ptr(results_buf,
7265 						    results_offset),
7266 				    "\n%sSTORM_ASSERT: size=%d\n",
7267 				    storm_letter, storm_dump_size);
7268 			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7269 				results_offset +=
7270 				    sprintf(qed_get_buf_ptr(results_buf,
7271 							    results_offset),
7272 					    "%08x\n", *dump_buf);
7273 		} else if (!strcmp(section_name, "last")) {
7274 			last_section_found = true;
7275 		} else {
7276 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7277 		}
7278 	}
7279 
7280 	/* Add 1 for string NULL termination */
7281 	*parsed_results_bytes = results_offset + 1;
7282 
7283 	return DBG_STATUS_OK;
7284 }
7285 
7286 /***************************** Public Functions *******************************/
7287 
7288 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
7289 {
7290 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
7291 	u8 buf_id;
7292 
7293 	/* Convert binary data to debug arrays */
7294 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
7295 		s_user_dbg_arrays[buf_id].ptr =
7296 			(u32 *)(bin_ptr + buf_array[buf_id].offset);
7297 		s_user_dbg_arrays[buf_id].size_in_dwords =
7298 			BYTES_TO_DWORDS(buf_array[buf_id].length);
7299 	}
7300 
7301 	return DBG_STATUS_OK;
7302 }
7303 
7304 const char *qed_dbg_get_status_str(enum dbg_status status)
7305 {
7306 	return (status <
7307 		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7308 }
7309 
7310 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7311 						  u32 *dump_buf,
7312 						  u32 num_dumped_dwords,
7313 						  u32 *results_buf_size)
7314 {
7315 	u32 num_errors, num_warnings;
7316 
7317 	return qed_parse_idle_chk_dump(dump_buf,
7318 				       num_dumped_dwords,
7319 				       NULL,
7320 				       results_buf_size,
7321 				       &num_errors, &num_warnings);
7322 }
7323 
7324 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7325 					   u32 *dump_buf,
7326 					   u32 num_dumped_dwords,
7327 					   char *results_buf,
7328 					   u32 *num_errors,
7329 					   u32 *num_warnings)
7330 {
7331 	u32 parsed_buf_size;
7332 
7333 	return qed_parse_idle_chk_dump(dump_buf,
7334 				       num_dumped_dwords,
7335 				       results_buf,
7336 				       &parsed_buf_size,
7337 				       num_errors, num_warnings);
7338 }
7339 
7340 void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size)
7341 {
7342 	s_mcp_trace_meta_arr.ptr = data;
7343 	s_mcp_trace_meta_arr.size_in_dwords = size;
7344 }
7345 
7346 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7347 						   u32 *dump_buf,
7348 						   u32 num_dumped_dwords,
7349 						   u32 *results_buf_size)
7350 {
7351 	return qed_parse_mcp_trace_dump(p_hwfn,
7352 					dump_buf, NULL, results_buf_size);
7353 }
7354 
7355 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7356 					    u32 *dump_buf,
7357 					    u32 num_dumped_dwords,
7358 					    char *results_buf)
7359 {
7360 	u32 parsed_buf_size;
7361 
7362 	return qed_parse_mcp_trace_dump(p_hwfn,
7363 					dump_buf,
7364 					results_buf, &parsed_buf_size);
7365 }
7366 
7367 enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf,
7368 					 u32 num_dumped_bytes,
7369 					 char *results_buf)
7370 {
7371 	u32 parsed_bytes;
7372 
7373 	return qed_parse_mcp_trace_buf(dump_buf,
7374 				       num_dumped_bytes,
7375 				       0,
7376 				       num_dumped_bytes,
7377 				       results_buf, &parsed_bytes);
7378 }
7379 
7380 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7381 						  u32 *dump_buf,
7382 						  u32 num_dumped_dwords,
7383 						  u32 *results_buf_size)
7384 {
7385 	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7386 }
7387 
7388 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7389 					   u32 *dump_buf,
7390 					   u32 num_dumped_dwords,
7391 					   char *results_buf)
7392 {
7393 	u32 parsed_buf_size;
7394 
7395 	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7396 }
7397 
7398 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7399 						  u32 *dump_buf,
7400 						  u32 num_dumped_dwords,
7401 						  u32 *results_buf_size)
7402 {
7403 	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7404 }
7405 
7406 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7407 					   u32 *dump_buf,
7408 					   u32 num_dumped_dwords,
7409 					   char *results_buf)
7410 {
7411 	u32 parsed_buf_size;
7412 
7413 	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7414 }
7415 
7416 enum dbg_status
7417 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7418 					     u32 *dump_buf,
7419 					     u32 num_dumped_dwords,
7420 					     u32 *results_buf_size)
7421 {
7422 	return qed_parse_protection_override_dump(dump_buf,
7423 						  NULL, results_buf_size);
7424 }
7425 
7426 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7427 						      u32 *dump_buf,
7428 						      u32 num_dumped_dwords,
7429 						      char *results_buf)
7430 {
7431 	u32 parsed_buf_size;
7432 
7433 	return qed_parse_protection_override_dump(dump_buf,
7434 						  results_buf,
7435 						  &parsed_buf_size);
7436 }
7437 
7438 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7439 						    u32 *dump_buf,
7440 						    u32 num_dumped_dwords,
7441 						    u32 *results_buf_size)
7442 {
7443 	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7444 }
7445 
7446 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7447 					     u32 *dump_buf,
7448 					     u32 num_dumped_dwords,
7449 					     char *results_buf)
7450 {
7451 	u32 parsed_buf_size;
7452 
7453 	return qed_parse_fw_asserts_dump(dump_buf,
7454 					 results_buf, &parsed_buf_size);
7455 }
7456 
7457 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7458 				   struct dbg_attn_block_result *results)
7459 {
7460 	struct user_dbg_array *block_attn, *pstrings;
7461 	const u32 *block_attn_name_offsets;
7462 	enum dbg_attn_type attn_type;
7463 	const char *block_name;
7464 	u8 num_regs, i, j;
7465 
7466 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7467 	attn_type = (enum dbg_attn_type)
7468 		    GET_FIELD(results->data,
7469 			      DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7470 	block_name = s_block_info_arr[results->block_id].name;
7471 
7472 	if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7473 	    !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7474 	    !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7475 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7476 
7477 	block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
7478 	block_attn_name_offsets = &block_attn->ptr[results->names_offset];
7479 
7480 	/* Go over registers with a non-zero attention status */
7481 	for (i = 0; i < num_regs; i++) {
7482 		struct dbg_attn_bit_mapping *bit_mapping;
7483 		struct dbg_attn_reg_result *reg_result;
7484 		u8 num_reg_attn, bit_idx = 0;
7485 
7486 		reg_result = &results->reg_results[i];
7487 		num_reg_attn = GET_FIELD(reg_result->data,
7488 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7489 		block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
7490 		bit_mapping = &((struct dbg_attn_bit_mapping *)
7491 				block_attn->ptr)[reg_result->block_attn_offset];
7492 
7493 		pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
7494 
7495 		/* Go over attention status bits */
7496 		for (j = 0; j < num_reg_attn; j++) {
7497 			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7498 						     DBG_ATTN_BIT_MAPPING_VAL);
7499 			const char *attn_name, *attn_type_str, *masked_str;
7500 			u32 attn_name_offset, sts_addr;
7501 
7502 			/* Check if bit mask should be advanced (due to unused
7503 			 * bits).
7504 			 */
7505 			if (GET_FIELD(bit_mapping[j].data,
7506 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7507 				bit_idx += (u8)attn_idx_val;
7508 				continue;
7509 			}
7510 
7511 			/* Check current bit index */
7512 			if (!(reg_result->sts_val & BIT(bit_idx))) {
7513 				bit_idx++;
7514 				continue;
7515 			}
7516 
7517 			/* Find attention name */
7518 			attn_name_offset =
7519 				block_attn_name_offsets[attn_idx_val];
7520 			attn_name = &((const char *)
7521 				      pstrings->ptr)[attn_name_offset];
7522 			attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
7523 					"Interrupt" : "Parity";
7524 			masked_str = reg_result->mask_val & BIT(bit_idx) ?
7525 				     " [masked]" : "";
7526 			sts_addr = GET_FIELD(reg_result->data,
7527 					     DBG_ATTN_REG_RESULT_STS_ADDRESS);
7528 			DP_NOTICE(p_hwfn,
7529 				  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7530 				  block_name, attn_type_str, attn_name,
7531 				  sts_addr, bit_idx, masked_str);
7532 
7533 			bit_idx++;
7534 		}
7535 	}
7536 
7537 	return DBG_STATUS_OK;
7538 }
7539 
7540 /* Wrapper for unifying the idle_chk and mcp_trace api */
7541 static enum dbg_status
7542 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7543 				   u32 *dump_buf,
7544 				   u32 num_dumped_dwords,
7545 				   char *results_buf)
7546 {
7547 	u32 num_errors, num_warnnings;
7548 
7549 	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7550 					  results_buf, &num_errors,
7551 					  &num_warnnings);
7552 }
7553 
7554 /* Feature meta data lookup table */
7555 static struct {
7556 	char *name;
7557 	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7558 				    struct qed_ptt *p_ptt, u32 *size);
7559 	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7560 					struct qed_ptt *p_ptt, u32 *dump_buf,
7561 					u32 buf_size, u32 *dumped_dwords);
7562 	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7563 					 u32 *dump_buf, u32 num_dumped_dwords,
7564 					 char *results_buf);
7565 	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7566 					    u32 *dump_buf,
7567 					    u32 num_dumped_dwords,
7568 					    u32 *results_buf_size);
7569 } qed_features_lookup[] = {
7570 	{
7571 	"grc", qed_dbg_grc_get_dump_buf_size,
7572 		    qed_dbg_grc_dump, NULL, NULL}, {
7573 	"idle_chk",
7574 		    qed_dbg_idle_chk_get_dump_buf_size,
7575 		    qed_dbg_idle_chk_dump,
7576 		    qed_print_idle_chk_results_wrapper,
7577 		    qed_get_idle_chk_results_buf_size}, {
7578 	"mcp_trace",
7579 		    qed_dbg_mcp_trace_get_dump_buf_size,
7580 		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7581 		    qed_get_mcp_trace_results_buf_size}, {
7582 	"reg_fifo",
7583 		    qed_dbg_reg_fifo_get_dump_buf_size,
7584 		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7585 		    qed_get_reg_fifo_results_buf_size}, {
7586 	"igu_fifo",
7587 		    qed_dbg_igu_fifo_get_dump_buf_size,
7588 		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7589 		    qed_get_igu_fifo_results_buf_size}, {
7590 	"protection_override",
7591 		    qed_dbg_protection_override_get_dump_buf_size,
7592 		    qed_dbg_protection_override_dump,
7593 		    qed_print_protection_override_results,
7594 		    qed_get_protection_override_results_buf_size}, {
7595 	"fw_asserts",
7596 		    qed_dbg_fw_asserts_get_dump_buf_size,
7597 		    qed_dbg_fw_asserts_dump,
7598 		    qed_print_fw_asserts_results,
7599 		    qed_get_fw_asserts_results_buf_size},};
7600 
7601 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7602 {
7603 	u32 i, precision = 80;
7604 
7605 	if (!p_text_buf)
7606 		return;
7607 
7608 	pr_notice("\n%.*s", precision, p_text_buf);
7609 	for (i = precision; i < text_size; i += precision)
7610 		pr_cont("%.*s", precision, p_text_buf + i);
7611 	pr_cont("\n");
7612 }
7613 
7614 #define QED_RESULTS_BUF_MIN_SIZE 16
7615 /* Generic function for decoding debug feature info */
7616 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7617 				      enum qed_dbg_features feature_idx)
7618 {
7619 	struct qed_dbg_feature *feature =
7620 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7621 	u32 text_size_bytes, null_char_pos, i;
7622 	enum dbg_status rc;
7623 	char *text_buf;
7624 
7625 	/* Check if feature supports formatting capability */
7626 	if (!qed_features_lookup[feature_idx].results_buf_size)
7627 		return DBG_STATUS_OK;
7628 
7629 	/* Obtain size of formatted output */
7630 	rc = qed_features_lookup[feature_idx].
7631 		results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7632 				 feature->dumped_dwords, &text_size_bytes);
7633 	if (rc != DBG_STATUS_OK)
7634 		return rc;
7635 
7636 	/* Make sure that the allocated size is a multiple of dword (4 bytes) */
7637 	null_char_pos = text_size_bytes - 1;
7638 	text_size_bytes = (text_size_bytes + 3) & ~0x3;
7639 
7640 	if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7641 		DP_NOTICE(p_hwfn->cdev,
7642 			  "formatted size of feature was too small %d. Aborting\n",
7643 			  text_size_bytes);
7644 		return DBG_STATUS_INVALID_ARGS;
7645 	}
7646 
7647 	/* Allocate temp text buf */
7648 	text_buf = vzalloc(text_size_bytes);
7649 	if (!text_buf)
7650 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7651 
7652 	/* Decode feature opcodes to string on temp buf */
7653 	rc = qed_features_lookup[feature_idx].
7654 		print_results(p_hwfn, (u32 *)feature->dump_buf,
7655 			      feature->dumped_dwords, text_buf);
7656 	if (rc != DBG_STATUS_OK) {
7657 		vfree(text_buf);
7658 		return rc;
7659 	}
7660 
7661 	/* Replace the original null character with a '\n' character.
7662 	 * The bytes that were added as a result of the dword alignment are also
7663 	 * padded with '\n' characters.
7664 	 */
7665 	for (i = null_char_pos; i < text_size_bytes; i++)
7666 		text_buf[i] = '\n';
7667 
7668 	/* Dump printable feature to log */
7669 	if (p_hwfn->cdev->dbg_params.print_data)
7670 		qed_dbg_print_feature(text_buf, text_size_bytes);
7671 
7672 	/* Free the old dump_buf and point the dump_buf to the newly allocagted
7673 	 * and formatted text buffer.
7674 	 */
7675 	vfree(feature->dump_buf);
7676 	feature->dump_buf = text_buf;
7677 	feature->buf_size = text_size_bytes;
7678 	feature->dumped_dwords = text_size_bytes / 4;
7679 	return rc;
7680 }
7681 
7682 /* Generic function for performing the dump of a debug feature. */
7683 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7684 				    struct qed_ptt *p_ptt,
7685 				    enum qed_dbg_features feature_idx)
7686 {
7687 	struct qed_dbg_feature *feature =
7688 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7689 	u32 buf_size_dwords;
7690 	enum dbg_status rc;
7691 
7692 	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7693 		  qed_features_lookup[feature_idx].name);
7694 
7695 	/* Dump_buf was already allocated need to free (this can happen if dump
7696 	 * was called but file was never read).
7697 	 * We can't use the buffer as is since size may have changed.
7698 	 */
7699 	if (feature->dump_buf) {
7700 		vfree(feature->dump_buf);
7701 		feature->dump_buf = NULL;
7702 	}
7703 
7704 	/* Get buffer size from hsi, allocate accordingly, and perform the
7705 	 * dump.
7706 	 */
7707 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7708 						       &buf_size_dwords);
7709 	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7710 		return rc;
7711 	feature->buf_size = buf_size_dwords * sizeof(u32);
7712 	feature->dump_buf = vmalloc(feature->buf_size);
7713 	if (!feature->dump_buf)
7714 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7715 
7716 	rc = qed_features_lookup[feature_idx].
7717 		perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7718 			     feature->buf_size / sizeof(u32),
7719 			     &feature->dumped_dwords);
7720 
7721 	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7722 	 * In this case the buffer holds valid binary data, but we wont able
7723 	 * to parse it (since parsing relies on data in NVRAM which is only
7724 	 * accessible when MFW is responsive). skip the formatting but return
7725 	 * success so that binary data is provided.
7726 	 */
7727 	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7728 		return DBG_STATUS_OK;
7729 
7730 	if (rc != DBG_STATUS_OK)
7731 		return rc;
7732 
7733 	/* Format output */
7734 	rc = format_feature(p_hwfn, feature_idx);
7735 	return rc;
7736 }
7737 
7738 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7739 {
7740 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7741 }
7742 
7743 int qed_dbg_grc_size(struct qed_dev *cdev)
7744 {
7745 	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7746 }
7747 
7748 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7749 {
7750 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7751 			       num_dumped_bytes);
7752 }
7753 
7754 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7755 {
7756 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7757 }
7758 
7759 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7760 {
7761 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7762 			       num_dumped_bytes);
7763 }
7764 
7765 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7766 {
7767 	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7768 }
7769 
7770 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7771 {
7772 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7773 			       num_dumped_bytes);
7774 }
7775 
7776 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7777 {
7778 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7779 }
7780 
7781 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7782 				u32 *num_dumped_bytes)
7783 {
7784 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7785 			       num_dumped_bytes);
7786 }
7787 
7788 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7789 {
7790 	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7791 }
7792 
7793 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7794 		       u32 *num_dumped_bytes)
7795 {
7796 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7797 			       num_dumped_bytes);
7798 }
7799 
7800 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7801 {
7802 	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7803 }
7804 
7805 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7806 		      u32 *num_dumped_bytes)
7807 {
7808 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7809 			       num_dumped_bytes);
7810 }
7811 
7812 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7813 {
7814 	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7815 }
7816 
7817 /* Defines the amount of bytes allocated for recording the length of debugfs
7818  * feature buffer.
7819  */
7820 #define REGDUMP_HEADER_SIZE			sizeof(u32)
7821 #define REGDUMP_HEADER_FEATURE_SHIFT		24
7822 #define REGDUMP_HEADER_ENGINE_SHIFT		31
7823 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
7824 enum debug_print_features {
7825 	OLD_MODE = 0,
7826 	IDLE_CHK = 1,
7827 	GRC_DUMP = 2,
7828 	MCP_TRACE = 3,
7829 	REG_FIFO = 4,
7830 	PROTECTION_OVERRIDE = 5,
7831 	IGU_FIFO = 6,
7832 	PHY = 7,
7833 	FW_ASSERTS = 8,
7834 };
7835 
7836 static u32 qed_calc_regdump_header(enum debug_print_features feature,
7837 				   int engine, u32 feature_size, u8 omit_engine)
7838 {
7839 	/* Insert the engine, feature and mode inside the header and combine it
7840 	 * with feature size.
7841 	 */
7842 	return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
7843 	       (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
7844 	       (engine << REGDUMP_HEADER_ENGINE_SHIFT);
7845 }
7846 
7847 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
7848 {
7849 	u8 cur_engine, omit_engine = 0, org_engine;
7850 	u32 offset = 0, feature_size;
7851 	int rc;
7852 
7853 	if (cdev->num_hwfns == 1)
7854 		omit_engine = 1;
7855 
7856 	org_engine = qed_get_debug_engine(cdev);
7857 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7858 		/* Collect idle_chks and grcDump for each hw function */
7859 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
7860 			   "obtaining idle_chk and grcdump for current engine\n");
7861 		qed_set_debug_engine(cdev, cur_engine);
7862 
7863 		/* First idle_chk */
7864 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7865 				      REGDUMP_HEADER_SIZE, &feature_size);
7866 		if (!rc) {
7867 			*(u32 *)((u8 *)buffer + offset) =
7868 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
7869 						    feature_size, omit_engine);
7870 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7871 		} else {
7872 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7873 		}
7874 
7875 		/* Second idle_chk */
7876 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7877 				      REGDUMP_HEADER_SIZE, &feature_size);
7878 		if (!rc) {
7879 			*(u32 *)((u8 *)buffer + offset) =
7880 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
7881 						    feature_size, omit_engine);
7882 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7883 		} else {
7884 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7885 		}
7886 
7887 		/* reg_fifo dump */
7888 		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
7889 				      REGDUMP_HEADER_SIZE, &feature_size);
7890 		if (!rc) {
7891 			*(u32 *)((u8 *)buffer + offset) =
7892 			    qed_calc_regdump_header(REG_FIFO, cur_engine,
7893 						    feature_size, omit_engine);
7894 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7895 		} else {
7896 			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7897 		}
7898 
7899 		/* igu_fifo dump */
7900 		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
7901 				      REGDUMP_HEADER_SIZE, &feature_size);
7902 		if (!rc) {
7903 			*(u32 *)((u8 *)buffer + offset) =
7904 			    qed_calc_regdump_header(IGU_FIFO, cur_engine,
7905 						    feature_size, omit_engine);
7906 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7907 		} else {
7908 			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7909 		}
7910 
7911 		/* protection_override dump */
7912 		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
7913 						 REGDUMP_HEADER_SIZE,
7914 						 &feature_size);
7915 		if (!rc) {
7916 			*(u32 *)((u8 *)buffer + offset) =
7917 			    qed_calc_regdump_header(PROTECTION_OVERRIDE,
7918 						    cur_engine,
7919 						    feature_size, omit_engine);
7920 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7921 		} else {
7922 			DP_ERR(cdev,
7923 			       "qed_dbg_protection_override failed. rc = %d\n",
7924 			       rc);
7925 		}
7926 
7927 		/* fw_asserts dump */
7928 		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
7929 					REGDUMP_HEADER_SIZE, &feature_size);
7930 		if (!rc) {
7931 			*(u32 *)((u8 *)buffer + offset) =
7932 			    qed_calc_regdump_header(FW_ASSERTS, cur_engine,
7933 						    feature_size, omit_engine);
7934 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7935 		} else {
7936 			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
7937 			       rc);
7938 		}
7939 
7940 		/* GRC dump - must be last because when mcp stuck it will
7941 		 * clutter idle_chk, reg_fifo, ...
7942 		 */
7943 		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
7944 				 REGDUMP_HEADER_SIZE, &feature_size);
7945 		if (!rc) {
7946 			*(u32 *)((u8 *)buffer + offset) =
7947 			    qed_calc_regdump_header(GRC_DUMP, cur_engine,
7948 						    feature_size, omit_engine);
7949 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7950 		} else {
7951 			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
7952 		}
7953 	}
7954 
7955 	qed_set_debug_engine(cdev, org_engine);
7956 	/* mcp_trace */
7957 	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
7958 			       REGDUMP_HEADER_SIZE, &feature_size);
7959 	if (!rc) {
7960 		*(u32 *)((u8 *)buffer + offset) =
7961 		    qed_calc_regdump_header(MCP_TRACE, cur_engine,
7962 					    feature_size, omit_engine);
7963 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7964 	} else {
7965 		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
7966 	}
7967 
7968 	return 0;
7969 }
7970 
7971 int qed_dbg_all_data_size(struct qed_dev *cdev)
7972 {
7973 	u8 cur_engine, org_engine;
7974 	u32 regs_len = 0;
7975 
7976 	org_engine = qed_get_debug_engine(cdev);
7977 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7978 		/* Engine specific */
7979 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
7980 			   "calculating idle_chk and grcdump register length for current engine\n");
7981 		qed_set_debug_engine(cdev, cur_engine);
7982 		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
7983 			    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
7984 			    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
7985 			    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
7986 			    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
7987 			    REGDUMP_HEADER_SIZE +
7988 			    qed_dbg_protection_override_size(cdev) +
7989 			    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
7990 	}
7991 
7992 	qed_set_debug_engine(cdev, org_engine);
7993 
7994 	/* Engine common */
7995 	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
7996 
7997 	return regs_len;
7998 }
7999 
8000 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8001 		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
8002 {
8003 	struct qed_hwfn *p_hwfn =
8004 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8005 	struct qed_dbg_feature *qed_feature =
8006 		&cdev->dbg_params.features[feature];
8007 	enum dbg_status dbg_rc;
8008 	struct qed_ptt *p_ptt;
8009 	int rc = 0;
8010 
8011 	/* Acquire ptt */
8012 	p_ptt = qed_ptt_acquire(p_hwfn);
8013 	if (!p_ptt)
8014 		return -EINVAL;
8015 
8016 	/* Get dump */
8017 	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8018 	if (dbg_rc != DBG_STATUS_OK) {
8019 		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8020 			   qed_dbg_get_status_str(dbg_rc));
8021 		*num_dumped_bytes = 0;
8022 		rc = -EINVAL;
8023 		goto out;
8024 	}
8025 
8026 	DP_VERBOSE(cdev, QED_MSG_DEBUG,
8027 		   "copying debugfs feature to external buffer\n");
8028 	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8029 	*num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
8030 			    4;
8031 
8032 out:
8033 	qed_ptt_release(p_hwfn, p_ptt);
8034 	return rc;
8035 }
8036 
8037 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8038 {
8039 	struct qed_hwfn *p_hwfn =
8040 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8041 	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8042 	struct qed_dbg_feature *qed_feature =
8043 		&cdev->dbg_params.features[feature];
8044 	u32 buf_size_dwords;
8045 	enum dbg_status rc;
8046 
8047 	if (!p_ptt)
8048 		return -EINVAL;
8049 
8050 	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8051 						   &buf_size_dwords);
8052 	if (rc != DBG_STATUS_OK)
8053 		buf_size_dwords = 0;
8054 
8055 	qed_ptt_release(p_hwfn, p_ptt);
8056 	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8057 	return qed_feature->buf_size;
8058 }
8059 
8060 u8 qed_get_debug_engine(struct qed_dev *cdev)
8061 {
8062 	return cdev->dbg_params.engine_for_debug;
8063 }
8064 
8065 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8066 {
8067 	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8068 		   engine_number);
8069 	cdev->dbg_params.engine_for_debug = engine_number;
8070 }
8071 
8072 void qed_dbg_pf_init(struct qed_dev *cdev)
8073 {
8074 	const u8 *dbg_values;
8075 
8076 	/* Debug values are after init values.
8077 	 * The offset is the first dword of the file.
8078 	 */
8079 	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8080 	qed_dbg_set_bin_ptr((u8 *)dbg_values);
8081 	qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
8082 }
8083 
8084 void qed_dbg_pf_exit(struct qed_dev *cdev)
8085 {
8086 	struct qed_dbg_feature *feature = NULL;
8087 	enum qed_dbg_features feature_idx;
8088 
8089 	/* Debug features' buffers may be allocated if debug feature was used
8090 	 * but dump wasn't called.
8091 	 */
8092 	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8093 		feature = &cdev->dbg_params.features[feature_idx];
8094 		if (feature->dump_buf) {
8095 			vfree(feature->dump_buf);
8096 			feature->dump_buf = NULL;
8097 		}
8098 	}
8099 }
8100