xref: /openbmc/linux/drivers/net/ethernet/qlogic/qed/qed_debug.c (revision 9dae47aba0a055f761176d9297371d5bb24289ec)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/vmalloc.h>
11 #include <linux/crc32.h>
12 #include "qed.h"
13 #include "qed_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_mcp.h"
16 #include "qed_reg_addr.h"
17 
18 /* Memory groups enum */
19 enum mem_groups {
20 	MEM_GROUP_PXP_MEM,
21 	MEM_GROUP_DMAE_MEM,
22 	MEM_GROUP_CM_MEM,
23 	MEM_GROUP_QM_MEM,
24 	MEM_GROUP_DORQ_MEM,
25 	MEM_GROUP_BRB_RAM,
26 	MEM_GROUP_BRB_MEM,
27 	MEM_GROUP_PRS_MEM,
28 	MEM_GROUP_IOR,
29 	MEM_GROUP_BTB_RAM,
30 	MEM_GROUP_CONN_CFC_MEM,
31 	MEM_GROUP_TASK_CFC_MEM,
32 	MEM_GROUP_CAU_PI,
33 	MEM_GROUP_CAU_MEM,
34 	MEM_GROUP_PXP_ILT,
35 	MEM_GROUP_TM_MEM,
36 	MEM_GROUP_SDM_MEM,
37 	MEM_GROUP_PBUF,
38 	MEM_GROUP_RAM,
39 	MEM_GROUP_MULD_MEM,
40 	MEM_GROUP_BTB_MEM,
41 	MEM_GROUP_RDIF_CTX,
42 	MEM_GROUP_TDIF_CTX,
43 	MEM_GROUP_CFC_MEM,
44 	MEM_GROUP_IGU_MEM,
45 	MEM_GROUP_IGU_MSIX,
46 	MEM_GROUP_CAU_SB,
47 	MEM_GROUP_BMB_RAM,
48 	MEM_GROUP_BMB_MEM,
49 	MEM_GROUPS_NUM
50 };
51 
52 /* Memory groups names */
53 static const char * const s_mem_group_names[] = {
54 	"PXP_MEM",
55 	"DMAE_MEM",
56 	"CM_MEM",
57 	"QM_MEM",
58 	"DORQ_MEM",
59 	"BRB_RAM",
60 	"BRB_MEM",
61 	"PRS_MEM",
62 	"IOR",
63 	"BTB_RAM",
64 	"CONN_CFC_MEM",
65 	"TASK_CFC_MEM",
66 	"CAU_PI",
67 	"CAU_MEM",
68 	"PXP_ILT",
69 	"TM_MEM",
70 	"SDM_MEM",
71 	"PBUF",
72 	"RAM",
73 	"MULD_MEM",
74 	"BTB_MEM",
75 	"RDIF_CTX",
76 	"TDIF_CTX",
77 	"CFC_MEM",
78 	"IGU_MEM",
79 	"IGU_MSIX",
80 	"CAU_SB",
81 	"BMB_RAM",
82 	"BMB_MEM",
83 };
84 
85 /* Idle check conditions */
86 
87 static u32 cond5(const u32 *r, const u32 *imm)
88 {
89 	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
90 }
91 
92 static u32 cond7(const u32 *r, const u32 *imm)
93 {
94 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
95 }
96 
97 static u32 cond6(const u32 *r, const u32 *imm)
98 {
99 	return (r[0] & imm[0]) != imm[1];
100 }
101 
102 static u32 cond9(const u32 *r, const u32 *imm)
103 {
104 	return ((r[0] & imm[0]) >> imm[1]) !=
105 	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
106 }
107 
108 static u32 cond10(const u32 *r, const u32 *imm)
109 {
110 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
111 }
112 
113 static u32 cond4(const u32 *r, const u32 *imm)
114 {
115 	return (r[0] & ~imm[0]) != imm[1];
116 }
117 
118 static u32 cond0(const u32 *r, const u32 *imm)
119 {
120 	return (r[0] & ~r[1]) != imm[0];
121 }
122 
123 static u32 cond1(const u32 *r, const u32 *imm)
124 {
125 	return r[0] != imm[0];
126 }
127 
128 static u32 cond11(const u32 *r, const u32 *imm)
129 {
130 	return r[0] != r[1] && r[2] == imm[0];
131 }
132 
133 static u32 cond12(const u32 *r, const u32 *imm)
134 {
135 	return r[0] != r[1] && r[2] > imm[0];
136 }
137 
138 static u32 cond3(const u32 *r, const u32 *imm)
139 {
140 	return r[0] != r[1];
141 }
142 
143 static u32 cond13(const u32 *r, const u32 *imm)
144 {
145 	return r[0] & imm[0];
146 }
147 
148 static u32 cond8(const u32 *r, const u32 *imm)
149 {
150 	return r[0] < (r[1] - imm[0]);
151 }
152 
153 static u32 cond2(const u32 *r, const u32 *imm)
154 {
155 	return r[0] > imm[0];
156 }
157 
158 /* Array of Idle Check conditions */
159 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
160 	cond0,
161 	cond1,
162 	cond2,
163 	cond3,
164 	cond4,
165 	cond5,
166 	cond6,
167 	cond7,
168 	cond8,
169 	cond9,
170 	cond10,
171 	cond11,
172 	cond12,
173 	cond13,
174 };
175 
176 /******************************* Data Types **********************************/
177 
178 enum platform_ids {
179 	PLATFORM_ASIC,
180 	PLATFORM_RESERVED,
181 	PLATFORM_RESERVED2,
182 	PLATFORM_RESERVED3,
183 	MAX_PLATFORM_IDS
184 };
185 
186 struct chip_platform_defs {
187 	u8 num_ports;
188 	u8 num_pfs;
189 	u8 num_vfs;
190 };
191 
192 /* Chip constant definitions */
193 struct chip_defs {
194 	const char *name;
195 	struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
196 };
197 
198 /* Platform constant definitions */
199 struct platform_defs {
200 	const char *name;
201 	u32 delay_factor;
202 	u32 dmae_thresh;
203 	u32 log_thresh;
204 };
205 
206 /* Storm constant definitions.
207  * Addresses are in bytes, sizes are in quad-regs.
208  */
209 struct storm_defs {
210 	char letter;
211 	enum block_id block_id;
212 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
213 	bool has_vfc;
214 	u32 sem_fast_mem_addr;
215 	u32 sem_frame_mode_addr;
216 	u32 sem_slow_enable_addr;
217 	u32 sem_slow_mode_addr;
218 	u32 sem_slow_mode1_conf_addr;
219 	u32 sem_sync_dbg_empty_addr;
220 	u32 sem_slow_dbg_empty_addr;
221 	u32 cm_ctx_wr_addr;
222 	u32 cm_conn_ag_ctx_lid_size;
223 	u32 cm_conn_ag_ctx_rd_addr;
224 	u32 cm_conn_st_ctx_lid_size;
225 	u32 cm_conn_st_ctx_rd_addr;
226 	u32 cm_task_ag_ctx_lid_size;
227 	u32 cm_task_ag_ctx_rd_addr;
228 	u32 cm_task_st_ctx_lid_size;
229 	u32 cm_task_st_ctx_rd_addr;
230 };
231 
232 /* Block constant definitions */
233 struct block_defs {
234 	const char *name;
235 	bool exists[MAX_CHIP_IDS];
236 	bool associated_to_storm;
237 
238 	/* Valid only if associated_to_storm is true */
239 	u32 storm_id;
240 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
241 	u32 dbg_select_addr;
242 	u32 dbg_enable_addr;
243 	u32 dbg_shift_addr;
244 	u32 dbg_force_valid_addr;
245 	u32 dbg_force_frame_addr;
246 	bool has_reset_bit;
247 
248 	/* If true, block is taken out of reset before dump */
249 	bool unreset;
250 	enum dbg_reset_regs reset_reg;
251 
252 	/* Bit offset in reset register */
253 	u8 reset_bit_offset;
254 };
255 
256 /* Reset register definitions */
257 struct reset_reg_defs {
258 	u32 addr;
259 	bool exists[MAX_CHIP_IDS];
260 	u32 unreset_val[MAX_CHIP_IDS];
261 };
262 
263 struct grc_param_defs {
264 	u32 default_val[MAX_CHIP_IDS];
265 	u32 min;
266 	u32 max;
267 	bool is_preset;
268 	u32 exclude_all_preset_val;
269 	u32 crash_preset_val;
270 };
271 
272 /* Address is in 128b units. Width is in bits. */
273 struct rss_mem_defs {
274 	const char *mem_name;
275 	const char *type_name;
276 	u32 addr;
277 	u32 entry_width;
278 	u32 num_entries[MAX_CHIP_IDS];
279 };
280 
281 struct vfc_ram_defs {
282 	const char *mem_name;
283 	const char *type_name;
284 	u32 base_row;
285 	u32 num_rows;
286 };
287 
288 struct big_ram_defs {
289 	const char *instance_name;
290 	enum mem_groups mem_group_id;
291 	enum mem_groups ram_mem_group_id;
292 	enum dbg_grc_params grc_param;
293 	u32 addr_reg_addr;
294 	u32 data_reg_addr;
295 	u32 is_256b_reg_addr;
296 	u32 is_256b_bit_offset[MAX_CHIP_IDS];
297 	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
298 };
299 
300 struct phy_defs {
301 	const char *phy_name;
302 
303 	/* PHY base GRC address */
304 	u32 base_addr;
305 
306 	/* Relative address of indirect TBUS address register (bits 0..7) */
307 	u32 tbus_addr_lo_addr;
308 
309 	/* Relative address of indirect TBUS address register (bits 8..10) */
310 	u32 tbus_addr_hi_addr;
311 
312 	/* Relative address of indirect TBUS data register (bits 0..7) */
313 	u32 tbus_data_lo_addr;
314 
315 	/* Relative address of indirect TBUS data register (bits 8..11) */
316 	u32 tbus_data_hi_addr;
317 };
318 
319 /******************************** Constants **********************************/
320 
321 #define MAX_LCIDS			320
322 #define MAX_LTIDS			320
323 
324 #define NUM_IOR_SETS			2
325 #define IORS_PER_SET			176
326 #define IOR_SET_OFFSET(set_id)		((set_id) * 256)
327 
328 #define BYTES_IN_DWORD			sizeof(u32)
329 
330 /* In the macros below, size and offset are specified in bits */
331 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
332 #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
333 #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
334 #define FIELD_DWORD_OFFSET(type, field) \
335 	 (int)(FIELD_BIT_OFFSET(type, field) / 32)
336 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
337 #define FIELD_BIT_MASK(type, field) \
338 	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
339 	 FIELD_DWORD_SHIFT(type, field))
340 
341 #define SET_VAR_FIELD(var, type, field, val) \
342 	do { \
343 		var[FIELD_DWORD_OFFSET(type, field)] &=	\
344 		(~FIELD_BIT_MASK(type, field));	\
345 		var[FIELD_DWORD_OFFSET(type, field)] |= \
346 		(val) << FIELD_DWORD_SHIFT(type, field); \
347 	} while (0)
348 
349 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
350 	do { \
351 		for (i = 0; i < (arr_size); i++) \
352 			qed_wr(dev, ptt, addr,	(arr)[i]); \
353 	} while (0)
354 
355 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
356 	do { \
357 		for (i = 0; i < (arr_size); i++) \
358 			(arr)[i] = qed_rd(dev, ptt, addr); \
359 	} while (0)
360 
361 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
362 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
363 
364 /* Extra lines include a signature line + optional latency events line */
365 #define NUM_EXTRA_DBG_LINES(block_desc) \
366 	(1 + ((block_desc)->has_latency_events ? 1 : 0))
367 #define NUM_DBG_LINES(block_desc) \
368 	((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
369 
370 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
371 #define RAM_LINES_TO_BYTES(lines) \
372 	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
373 
374 #define REG_DUMP_LEN_SHIFT		24
375 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
376 	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
377 
378 #define IDLE_CHK_RULE_SIZE_DWORDS \
379 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
380 
381 #define IDLE_CHK_RESULT_HDR_DWORDS \
382 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
383 
384 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
385 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
386 
387 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
388 
389 /* The sizes and offsets below are specified in bits */
390 #define VFC_CAM_CMD_STRUCT_SIZE		64
391 #define VFC_CAM_CMD_ROW_OFFSET		48
392 #define VFC_CAM_CMD_ROW_SIZE		9
393 #define VFC_CAM_ADDR_STRUCT_SIZE	16
394 #define VFC_CAM_ADDR_OP_OFFSET		0
395 #define VFC_CAM_ADDR_OP_SIZE		4
396 #define VFC_CAM_RESP_STRUCT_SIZE	256
397 #define VFC_RAM_ADDR_STRUCT_SIZE	16
398 #define VFC_RAM_ADDR_OP_OFFSET		0
399 #define VFC_RAM_ADDR_OP_SIZE		2
400 #define VFC_RAM_ADDR_ROW_OFFSET		2
401 #define VFC_RAM_ADDR_ROW_SIZE		10
402 #define VFC_RAM_RESP_STRUCT_SIZE	256
403 
404 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
405 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
406 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
407 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
408 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
409 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
410 
411 #define NUM_VFC_RAM_TYPES		4
412 
413 #define VFC_CAM_NUM_ROWS		512
414 
415 #define VFC_OPCODE_CAM_RD		14
416 #define VFC_OPCODE_RAM_RD		0
417 
418 #define NUM_RSS_MEM_TYPES		5
419 
420 #define NUM_BIG_RAM_TYPES		3
421 
422 #define NUM_PHY_TBUS_ADDRESSES		2048
423 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
424 
425 #define RESET_REG_UNRESET_OFFSET	4
426 
427 #define STALL_DELAY_MS			500
428 
429 #define STATIC_DEBUG_LINE_DWORDS	9
430 
431 #define NUM_COMMON_GLOBAL_PARAMS	8
432 
433 #define FW_IMG_MAIN			1
434 
435 #define REG_FIFO_ELEMENT_DWORDS		2
436 #define REG_FIFO_DEPTH_ELEMENTS		32
437 #define REG_FIFO_DEPTH_DWORDS \
438 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
439 
440 #define IGU_FIFO_ELEMENT_DWORDS		4
441 #define IGU_FIFO_DEPTH_ELEMENTS		64
442 #define IGU_FIFO_DEPTH_DWORDS \
443 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
444 
445 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
446 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
447 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
448 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
449 	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
450 
451 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
452 	(MCP_REG_SCRATCH + \
453 	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
454 
455 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
456 #define EMPTY_FW_IMAGE_STR		"???????????????"
457 
458 /***************************** Constant Arrays *******************************/
459 
460 struct dbg_array {
461 	const u32 *ptr;
462 	u32 size_in_dwords;
463 };
464 
465 /* Debug arrays */
466 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
467 
468 /* Chip constant definitions array */
469 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
470 	{ "bb",
471 	  {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB},
472 	   {0, 0, 0},
473 	   {0, 0, 0},
474 	   {0, 0, 0} } },
475 	{ "ah",
476 	  {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
477 	   {0, 0, 0},
478 	   {0, 0, 0},
479 	   {0, 0, 0} } },
480 	{ "reserved",
481 	   {{0, 0, 0},
482 	   {0, 0, 0},
483 	   {0, 0, 0},
484 	   {0, 0, 0} } }
485 };
486 
487 /* Storm constant definitions array */
488 static struct storm_defs s_storm_defs[] = {
489 	/* Tstorm */
490 	{'T', BLOCK_TSEM,
491 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
492 	  DBG_BUS_CLIENT_RBCT}, true,
493 	 TSEM_REG_FAST_MEMORY,
494 	 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
495 	 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
496 	 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
497 	 TCM_REG_CTX_RBC_ACCS,
498 	 4, TCM_REG_AGG_CON_CTX,
499 	 16, TCM_REG_SM_CON_CTX,
500 	 2, TCM_REG_AGG_TASK_CTX,
501 	 4, TCM_REG_SM_TASK_CTX},
502 
503 	/* Mstorm */
504 	{'M', BLOCK_MSEM,
505 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
506 	  DBG_BUS_CLIENT_RBCM}, false,
507 	 MSEM_REG_FAST_MEMORY,
508 	 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
509 	 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
510 	 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
511 	 MCM_REG_CTX_RBC_ACCS,
512 	 1, MCM_REG_AGG_CON_CTX,
513 	 10, MCM_REG_SM_CON_CTX,
514 	 2, MCM_REG_AGG_TASK_CTX,
515 	 7, MCM_REG_SM_TASK_CTX},
516 
517 	/* Ustorm */
518 	{'U', BLOCK_USEM,
519 	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
520 	  DBG_BUS_CLIENT_RBCU}, false,
521 	 USEM_REG_FAST_MEMORY,
522 	 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
523 	 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
524 	 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
525 	 UCM_REG_CTX_RBC_ACCS,
526 	 2, UCM_REG_AGG_CON_CTX,
527 	 13, UCM_REG_SM_CON_CTX,
528 	 3, UCM_REG_AGG_TASK_CTX,
529 	 3, UCM_REG_SM_TASK_CTX},
530 
531 	/* Xstorm */
532 	{'X', BLOCK_XSEM,
533 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
534 	  DBG_BUS_CLIENT_RBCX}, false,
535 	 XSEM_REG_FAST_MEMORY,
536 	 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
537 	 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
538 	 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
539 	 XCM_REG_CTX_RBC_ACCS,
540 	 9, XCM_REG_AGG_CON_CTX,
541 	 15, XCM_REG_SM_CON_CTX,
542 	 0, 0,
543 	 0, 0},
544 
545 	/* Ystorm */
546 	{'Y', BLOCK_YSEM,
547 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
548 	  DBG_BUS_CLIENT_RBCY}, false,
549 	 YSEM_REG_FAST_MEMORY,
550 	 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
551 	 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
552 	 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
553 	 YCM_REG_CTX_RBC_ACCS,
554 	 2, YCM_REG_AGG_CON_CTX,
555 	 3, YCM_REG_SM_CON_CTX,
556 	 2, YCM_REG_AGG_TASK_CTX,
557 	 12, YCM_REG_SM_TASK_CTX},
558 
559 	/* Pstorm */
560 	{'P', BLOCK_PSEM,
561 	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
562 	  DBG_BUS_CLIENT_RBCS}, true,
563 	 PSEM_REG_FAST_MEMORY,
564 	 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
565 	 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
566 	 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
567 	 PCM_REG_CTX_RBC_ACCS,
568 	 0, 0,
569 	 10, PCM_REG_SM_CON_CTX,
570 	 0, 0,
571 	 0, 0}
572 };
573 
574 /* Block definitions array */
575 
576 static struct block_defs block_grc_defs = {
577 	"grc",
578 	{true, true, true}, false, 0,
579 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
580 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
581 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
582 	GRC_REG_DBG_FORCE_FRAME,
583 	true, false, DBG_RESET_REG_MISC_PL_UA, 1
584 };
585 
586 static struct block_defs block_miscs_defs = {
587 	"miscs", {true, true, true}, false, 0,
588 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
589 	0, 0, 0, 0, 0,
590 	false, false, MAX_DBG_RESET_REGS, 0
591 };
592 
593 static struct block_defs block_misc_defs = {
594 	"misc", {true, true, true}, false, 0,
595 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
596 	0, 0, 0, 0, 0,
597 	false, false, MAX_DBG_RESET_REGS, 0
598 };
599 
600 static struct block_defs block_dbu_defs = {
601 	"dbu", {true, true, true}, false, 0,
602 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
603 	0, 0, 0, 0, 0,
604 	false, false, MAX_DBG_RESET_REGS, 0
605 };
606 
607 static struct block_defs block_pglue_b_defs = {
608 	"pglue_b",
609 	{true, true, true}, false, 0,
610 	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
611 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
612 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
613 	PGLUE_B_REG_DBG_FORCE_FRAME,
614 	true, false, DBG_RESET_REG_MISCS_PL_HV, 1
615 };
616 
617 static struct block_defs block_cnig_defs = {
618 	"cnig",
619 	{true, true, true}, false, 0,
620 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
621 	 DBG_BUS_CLIENT_RBCW},
622 	CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
623 	CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
624 	CNIG_REG_DBG_FORCE_FRAME_K2_E5,
625 	true, false, DBG_RESET_REG_MISCS_PL_HV, 0
626 };
627 
628 static struct block_defs block_cpmu_defs = {
629 	"cpmu", {true, true, true}, false, 0,
630 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
631 	0, 0, 0, 0, 0,
632 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8
633 };
634 
635 static struct block_defs block_ncsi_defs = {
636 	"ncsi",
637 	{true, true, true}, false, 0,
638 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
639 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
640 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
641 	NCSI_REG_DBG_FORCE_FRAME,
642 	true, false, DBG_RESET_REG_MISCS_PL_HV, 5
643 };
644 
645 static struct block_defs block_opte_defs = {
646 	"opte", {true, true, false}, false, 0,
647 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
648 	0, 0, 0, 0, 0,
649 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4
650 };
651 
652 static struct block_defs block_bmb_defs = {
653 	"bmb",
654 	{true, true, true}, false, 0,
655 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
656 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
657 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
658 	BMB_REG_DBG_FORCE_FRAME,
659 	true, false, DBG_RESET_REG_MISCS_PL_UA, 7
660 };
661 
662 static struct block_defs block_pcie_defs = {
663 	"pcie",
664 	{true, true, true}, false, 0,
665 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
666 	 DBG_BUS_CLIENT_RBCH},
667 	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
668 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
669 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
670 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
671 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
672 	false, false, MAX_DBG_RESET_REGS, 0
673 };
674 
675 static struct block_defs block_mcp_defs = {
676 	"mcp", {true, true, true}, false, 0,
677 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
678 	0, 0, 0, 0, 0,
679 	false, false, MAX_DBG_RESET_REGS, 0
680 };
681 
682 static struct block_defs block_mcp2_defs = {
683 	"mcp2",
684 	{true, true, true}, false, 0,
685 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
686 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
687 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
688 	MCP2_REG_DBG_FORCE_FRAME,
689 	false, false, MAX_DBG_RESET_REGS, 0
690 };
691 
692 static struct block_defs block_pswhst_defs = {
693 	"pswhst",
694 	{true, true, true}, false, 0,
695 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
696 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
697 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
698 	PSWHST_REG_DBG_FORCE_FRAME,
699 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
700 };
701 
702 static struct block_defs block_pswhst2_defs = {
703 	"pswhst2",
704 	{true, true, true}, false, 0,
705 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
706 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
707 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
708 	PSWHST2_REG_DBG_FORCE_FRAME,
709 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
710 };
711 
712 static struct block_defs block_pswrd_defs = {
713 	"pswrd",
714 	{true, true, true}, false, 0,
715 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
716 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
717 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
718 	PSWRD_REG_DBG_FORCE_FRAME,
719 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
720 };
721 
722 static struct block_defs block_pswrd2_defs = {
723 	"pswrd2",
724 	{true, true, true}, false, 0,
725 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
726 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
727 	PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
728 	PSWRD2_REG_DBG_FORCE_FRAME,
729 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
730 };
731 
732 static struct block_defs block_pswwr_defs = {
733 	"pswwr",
734 	{true, true, true}, false, 0,
735 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
736 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
737 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
738 	PSWWR_REG_DBG_FORCE_FRAME,
739 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
740 };
741 
742 static struct block_defs block_pswwr2_defs = {
743 	"pswwr2", {true, true, true}, false, 0,
744 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
745 	0, 0, 0, 0, 0,
746 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
747 };
748 
749 static struct block_defs block_pswrq_defs = {
750 	"pswrq",
751 	{true, true, true}, false, 0,
752 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
753 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
754 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
755 	PSWRQ_REG_DBG_FORCE_FRAME,
756 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
757 };
758 
759 static struct block_defs block_pswrq2_defs = {
760 	"pswrq2",
761 	{true, true, true}, false, 0,
762 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
763 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
764 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
765 	PSWRQ2_REG_DBG_FORCE_FRAME,
766 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
767 };
768 
769 static struct block_defs block_pglcs_defs = {
770 	"pglcs",
771 	{true, true, true}, false, 0,
772 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
773 	 DBG_BUS_CLIENT_RBCH},
774 	PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
775 	PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
776 	PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
777 	true, false, DBG_RESET_REG_MISCS_PL_HV, 2
778 };
779 
780 static struct block_defs block_ptu_defs = {
781 	"ptu",
782 	{true, true, true}, false, 0,
783 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
784 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
785 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
786 	PTU_REG_DBG_FORCE_FRAME,
787 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
788 };
789 
790 static struct block_defs block_dmae_defs = {
791 	"dmae",
792 	{true, true, true}, false, 0,
793 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
794 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
795 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
796 	DMAE_REG_DBG_FORCE_FRAME,
797 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
798 };
799 
800 static struct block_defs block_tcm_defs = {
801 	"tcm",
802 	{true, true, true}, true, DBG_TSTORM_ID,
803 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
804 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
805 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
806 	TCM_REG_DBG_FORCE_FRAME,
807 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
808 };
809 
810 static struct block_defs block_mcm_defs = {
811 	"mcm",
812 	{true, true, true}, true, DBG_MSTORM_ID,
813 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
814 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
815 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
816 	MCM_REG_DBG_FORCE_FRAME,
817 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
818 };
819 
820 static struct block_defs block_ucm_defs = {
821 	"ucm",
822 	{true, true, true}, true, DBG_USTORM_ID,
823 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
824 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
825 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
826 	UCM_REG_DBG_FORCE_FRAME,
827 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
828 };
829 
830 static struct block_defs block_xcm_defs = {
831 	"xcm",
832 	{true, true, true}, true, DBG_XSTORM_ID,
833 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
834 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
835 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
836 	XCM_REG_DBG_FORCE_FRAME,
837 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
838 };
839 
840 static struct block_defs block_ycm_defs = {
841 	"ycm",
842 	{true, true, true}, true, DBG_YSTORM_ID,
843 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
844 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
845 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
846 	YCM_REG_DBG_FORCE_FRAME,
847 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
848 };
849 
850 static struct block_defs block_pcm_defs = {
851 	"pcm",
852 	{true, true, true}, true, DBG_PSTORM_ID,
853 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
854 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
855 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
856 	PCM_REG_DBG_FORCE_FRAME,
857 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
858 };
859 
860 static struct block_defs block_qm_defs = {
861 	"qm",
862 	{true, true, true}, false, 0,
863 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
864 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
865 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
866 	QM_REG_DBG_FORCE_FRAME,
867 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
868 };
869 
870 static struct block_defs block_tm_defs = {
871 	"tm",
872 	{true, true, true}, false, 0,
873 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
874 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
875 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
876 	TM_REG_DBG_FORCE_FRAME,
877 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
878 };
879 
880 static struct block_defs block_dorq_defs = {
881 	"dorq",
882 	{true, true, true}, false, 0,
883 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
884 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
885 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
886 	DORQ_REG_DBG_FORCE_FRAME,
887 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
888 };
889 
890 static struct block_defs block_brb_defs = {
891 	"brb",
892 	{true, true, true}, false, 0,
893 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
894 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
895 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
896 	BRB_REG_DBG_FORCE_FRAME,
897 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
898 };
899 
900 static struct block_defs block_src_defs = {
901 	"src",
902 	{true, true, true}, false, 0,
903 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
904 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
905 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
906 	SRC_REG_DBG_FORCE_FRAME,
907 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
908 };
909 
910 static struct block_defs block_prs_defs = {
911 	"prs",
912 	{true, true, true}, false, 0,
913 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
914 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
915 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
916 	PRS_REG_DBG_FORCE_FRAME,
917 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
918 };
919 
920 static struct block_defs block_tsdm_defs = {
921 	"tsdm",
922 	{true, true, true}, true, DBG_TSTORM_ID,
923 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
924 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
925 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
926 	TSDM_REG_DBG_FORCE_FRAME,
927 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
928 };
929 
930 static struct block_defs block_msdm_defs = {
931 	"msdm",
932 	{true, true, true}, true, DBG_MSTORM_ID,
933 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
934 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
935 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
936 	MSDM_REG_DBG_FORCE_FRAME,
937 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
938 };
939 
940 static struct block_defs block_usdm_defs = {
941 	"usdm",
942 	{true, true, true}, true, DBG_USTORM_ID,
943 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
944 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
945 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
946 	USDM_REG_DBG_FORCE_FRAME,
947 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
948 };
949 
950 static struct block_defs block_xsdm_defs = {
951 	"xsdm",
952 	{true, true, true}, true, DBG_XSTORM_ID,
953 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
954 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
955 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
956 	XSDM_REG_DBG_FORCE_FRAME,
957 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
958 };
959 
960 static struct block_defs block_ysdm_defs = {
961 	"ysdm",
962 	{true, true, true}, true, DBG_YSTORM_ID,
963 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
964 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
965 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
966 	YSDM_REG_DBG_FORCE_FRAME,
967 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
968 };
969 
970 static struct block_defs block_psdm_defs = {
971 	"psdm",
972 	{true, true, true}, true, DBG_PSTORM_ID,
973 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
974 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
975 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
976 	PSDM_REG_DBG_FORCE_FRAME,
977 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
978 };
979 
980 static struct block_defs block_tsem_defs = {
981 	"tsem",
982 	{true, true, true}, true, DBG_TSTORM_ID,
983 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
984 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
985 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
986 	TSEM_REG_DBG_FORCE_FRAME,
987 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
988 };
989 
990 static struct block_defs block_msem_defs = {
991 	"msem",
992 	{true, true, true}, true, DBG_MSTORM_ID,
993 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
994 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
995 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
996 	MSEM_REG_DBG_FORCE_FRAME,
997 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
998 };
999 
1000 static struct block_defs block_usem_defs = {
1001 	"usem",
1002 	{true, true, true}, true, DBG_USTORM_ID,
1003 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1004 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1005 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1006 	USEM_REG_DBG_FORCE_FRAME,
1007 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
1008 };
1009 
1010 static struct block_defs block_xsem_defs = {
1011 	"xsem",
1012 	{true, true, true}, true, DBG_XSTORM_ID,
1013 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1014 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1015 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1016 	XSEM_REG_DBG_FORCE_FRAME,
1017 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
1018 };
1019 
1020 static struct block_defs block_ysem_defs = {
1021 	"ysem",
1022 	{true, true, true}, true, DBG_YSTORM_ID,
1023 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
1024 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1025 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1026 	YSEM_REG_DBG_FORCE_FRAME,
1027 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
1028 };
1029 
1030 static struct block_defs block_psem_defs = {
1031 	"psem",
1032 	{true, true, true}, true, DBG_PSTORM_ID,
1033 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1034 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1035 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1036 	PSEM_REG_DBG_FORCE_FRAME,
1037 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
1038 };
1039 
1040 static struct block_defs block_rss_defs = {
1041 	"rss",
1042 	{true, true, true}, false, 0,
1043 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
1044 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1045 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1046 	RSS_REG_DBG_FORCE_FRAME,
1047 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
1048 };
1049 
1050 static struct block_defs block_tmld_defs = {
1051 	"tmld",
1052 	{true, true, true}, false, 0,
1053 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1054 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1055 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1056 	TMLD_REG_DBG_FORCE_FRAME,
1057 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
1058 };
1059 
1060 static struct block_defs block_muld_defs = {
1061 	"muld",
1062 	{true, true, true}, false, 0,
1063 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1064 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1065 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1066 	MULD_REG_DBG_FORCE_FRAME,
1067 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
1068 };
1069 
1070 static struct block_defs block_yuld_defs = {
1071 	"yuld",
1072 	{true, true, false}, false, 0,
1073 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
1074 	 MAX_DBG_BUS_CLIENTS},
1075 	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1076 	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1077 	YULD_REG_DBG_FORCE_FRAME_BB_K2,
1078 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1079 	15
1080 };
1081 
1082 static struct block_defs block_xyld_defs = {
1083 	"xyld",
1084 	{true, true, true}, false, 0,
1085 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1086 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1087 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1088 	XYLD_REG_DBG_FORCE_FRAME,
1089 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1090 };
1091 
1092 static struct block_defs block_ptld_defs = {
1093 	"ptld",
1094 	{false, false, true}, false, 0,
1095 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
1096 	PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1097 	PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1098 	PTLD_REG_DBG_FORCE_FRAME_E5,
1099 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1100 	28
1101 };
1102 
1103 static struct block_defs block_ypld_defs = {
1104 	"ypld",
1105 	{false, false, true}, false, 0,
1106 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
1107 	YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1108 	YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1109 	YPLD_REG_DBG_FORCE_FRAME_E5,
1110 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1111 	27
1112 };
1113 
1114 static struct block_defs block_prm_defs = {
1115 	"prm",
1116 	{true, true, true}, false, 0,
1117 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1118 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1119 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1120 	PRM_REG_DBG_FORCE_FRAME,
1121 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1122 };
1123 
1124 static struct block_defs block_pbf_pb1_defs = {
1125 	"pbf_pb1",
1126 	{true, true, true}, false, 0,
1127 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1128 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1129 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1130 	PBF_PB1_REG_DBG_FORCE_FRAME,
1131 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1132 	11
1133 };
1134 
1135 static struct block_defs block_pbf_pb2_defs = {
1136 	"pbf_pb2",
1137 	{true, true, true}, false, 0,
1138 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1139 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1140 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1141 	PBF_PB2_REG_DBG_FORCE_FRAME,
1142 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1143 	12
1144 };
1145 
1146 static struct block_defs block_rpb_defs = {
1147 	"rpb",
1148 	{true, true, true}, false, 0,
1149 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1150 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1151 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1152 	RPB_REG_DBG_FORCE_FRAME,
1153 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1154 };
1155 
1156 static struct block_defs block_btb_defs = {
1157 	"btb",
1158 	{true, true, true}, false, 0,
1159 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1160 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1161 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1162 	BTB_REG_DBG_FORCE_FRAME,
1163 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1164 };
1165 
1166 static struct block_defs block_pbf_defs = {
1167 	"pbf",
1168 	{true, true, true}, false, 0,
1169 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1170 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1171 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1172 	PBF_REG_DBG_FORCE_FRAME,
1173 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1174 };
1175 
1176 static struct block_defs block_rdif_defs = {
1177 	"rdif",
1178 	{true, true, true}, false, 0,
1179 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1180 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1181 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1182 	RDIF_REG_DBG_FORCE_FRAME,
1183 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1184 };
1185 
1186 static struct block_defs block_tdif_defs = {
1187 	"tdif",
1188 	{true, true, true}, false, 0,
1189 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1190 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1191 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1192 	TDIF_REG_DBG_FORCE_FRAME,
1193 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1194 };
1195 
1196 static struct block_defs block_cdu_defs = {
1197 	"cdu",
1198 	{true, true, true}, false, 0,
1199 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1200 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1201 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1202 	CDU_REG_DBG_FORCE_FRAME,
1203 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1204 };
1205 
1206 static struct block_defs block_ccfc_defs = {
1207 	"ccfc",
1208 	{true, true, true}, false, 0,
1209 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1210 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1211 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1212 	CCFC_REG_DBG_FORCE_FRAME,
1213 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1214 };
1215 
1216 static struct block_defs block_tcfc_defs = {
1217 	"tcfc",
1218 	{true, true, true}, false, 0,
1219 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1220 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1221 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1222 	TCFC_REG_DBG_FORCE_FRAME,
1223 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1224 };
1225 
1226 static struct block_defs block_igu_defs = {
1227 	"igu",
1228 	{true, true, true}, false, 0,
1229 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1230 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1231 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1232 	IGU_REG_DBG_FORCE_FRAME,
1233 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1234 };
1235 
1236 static struct block_defs block_cau_defs = {
1237 	"cau",
1238 	{true, true, true}, false, 0,
1239 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1240 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1241 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1242 	CAU_REG_DBG_FORCE_FRAME,
1243 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1244 };
1245 
1246 static struct block_defs block_rgfs_defs = {
1247 	"rgfs", {false, false, true}, false, 0,
1248 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1249 	0, 0, 0, 0, 0,
1250 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
1251 };
1252 
1253 static struct block_defs block_rgsrc_defs = {
1254 	"rgsrc",
1255 	{false, false, true}, false, 0,
1256 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1257 	RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1258 	RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1259 	RGSRC_REG_DBG_FORCE_FRAME_E5,
1260 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1261 	30
1262 };
1263 
1264 static struct block_defs block_tgfs_defs = {
1265 	"tgfs", {false, false, true}, false, 0,
1266 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1267 	0, 0, 0, 0, 0,
1268 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
1269 };
1270 
1271 static struct block_defs block_tgsrc_defs = {
1272 	"tgsrc",
1273 	{false, false, true}, false, 0,
1274 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
1275 	TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1276 	TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1277 	TGSRC_REG_DBG_FORCE_FRAME_E5,
1278 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1279 	31
1280 };
1281 
1282 static struct block_defs block_umac_defs = {
1283 	"umac",
1284 	{true, true, true}, false, 0,
1285 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
1286 	 DBG_BUS_CLIENT_RBCZ},
1287 	UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1288 	UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1289 	UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1290 	true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1291 };
1292 
1293 static struct block_defs block_xmac_defs = {
1294 	"xmac", {true, false, false}, false, 0,
1295 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1296 	0, 0, 0, 0, 0,
1297 	false, false, MAX_DBG_RESET_REGS, 0
1298 };
1299 
1300 static struct block_defs block_dbg_defs = {
1301 	"dbg", {true, true, true}, false, 0,
1302 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1303 	0, 0, 0, 0, 0,
1304 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1305 };
1306 
1307 static struct block_defs block_nig_defs = {
1308 	"nig",
1309 	{true, true, true}, false, 0,
1310 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1311 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1312 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1313 	NIG_REG_DBG_FORCE_FRAME,
1314 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1315 };
1316 
1317 static struct block_defs block_wol_defs = {
1318 	"wol",
1319 	{false, true, true}, false, 0,
1320 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1321 	WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1322 	WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1323 	WOL_REG_DBG_FORCE_FRAME_K2_E5,
1324 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1325 };
1326 
1327 static struct block_defs block_bmbn_defs = {
1328 	"bmbn",
1329 	{false, true, true}, false, 0,
1330 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
1331 	 DBG_BUS_CLIENT_RBCB},
1332 	BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1333 	BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1334 	BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1335 	false, false, MAX_DBG_RESET_REGS, 0
1336 };
1337 
1338 static struct block_defs block_ipc_defs = {
1339 	"ipc", {true, true, true}, false, 0,
1340 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1341 	0, 0, 0, 0, 0,
1342 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1343 };
1344 
1345 static struct block_defs block_nwm_defs = {
1346 	"nwm",
1347 	{false, true, true}, false, 0,
1348 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1349 	NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1350 	NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1351 	NWM_REG_DBG_FORCE_FRAME_K2_E5,
1352 	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1353 };
1354 
1355 static struct block_defs block_nws_defs = {
1356 	"nws",
1357 	{false, true, true}, false, 0,
1358 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1359 	NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1360 	NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1361 	NWS_REG_DBG_FORCE_FRAME_K2_E5,
1362 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1363 };
1364 
1365 static struct block_defs block_ms_defs = {
1366 	"ms",
1367 	{false, true, true}, false, 0,
1368 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1369 	MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1370 	MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1371 	MS_REG_DBG_FORCE_FRAME_K2_E5,
1372 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1373 };
1374 
1375 static struct block_defs block_phy_pcie_defs = {
1376 	"phy_pcie",
1377 	{false, true, true}, false, 0,
1378 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
1379 	 DBG_BUS_CLIENT_RBCH},
1380 	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
1381 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1382 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
1383 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1384 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1385 	false, false, MAX_DBG_RESET_REGS, 0
1386 };
1387 
1388 static struct block_defs block_led_defs = {
1389 	"led", {false, true, true}, false, 0,
1390 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1391 	0, 0, 0, 0, 0,
1392 	true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1393 };
1394 
1395 static struct block_defs block_avs_wrap_defs = {
1396 	"avs_wrap", {false, true, false}, false, 0,
1397 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1398 	0, 0, 0, 0, 0,
1399 	true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1400 };
1401 
1402 static struct block_defs block_pxpreqbus_defs = {
1403 	"pxpreqbus", {false, false, false}, false, 0,
1404 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1405 	0, 0, 0, 0, 0,
1406 	false, false, MAX_DBG_RESET_REGS, 0
1407 };
1408 
1409 static struct block_defs block_misc_aeu_defs = {
1410 	"misc_aeu", {true, true, true}, false, 0,
1411 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1412 	0, 0, 0, 0, 0,
1413 	false, false, MAX_DBG_RESET_REGS, 0
1414 };
1415 
1416 static struct block_defs block_bar0_map_defs = {
1417 	"bar0_map", {true, true, true}, false, 0,
1418 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1419 	0, 0, 0, 0, 0,
1420 	false, false, MAX_DBG_RESET_REGS, 0
1421 };
1422 
1423 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1424 	&block_grc_defs,
1425 	&block_miscs_defs,
1426 	&block_misc_defs,
1427 	&block_dbu_defs,
1428 	&block_pglue_b_defs,
1429 	&block_cnig_defs,
1430 	&block_cpmu_defs,
1431 	&block_ncsi_defs,
1432 	&block_opte_defs,
1433 	&block_bmb_defs,
1434 	&block_pcie_defs,
1435 	&block_mcp_defs,
1436 	&block_mcp2_defs,
1437 	&block_pswhst_defs,
1438 	&block_pswhst2_defs,
1439 	&block_pswrd_defs,
1440 	&block_pswrd2_defs,
1441 	&block_pswwr_defs,
1442 	&block_pswwr2_defs,
1443 	&block_pswrq_defs,
1444 	&block_pswrq2_defs,
1445 	&block_pglcs_defs,
1446 	&block_dmae_defs,
1447 	&block_ptu_defs,
1448 	&block_tcm_defs,
1449 	&block_mcm_defs,
1450 	&block_ucm_defs,
1451 	&block_xcm_defs,
1452 	&block_ycm_defs,
1453 	&block_pcm_defs,
1454 	&block_qm_defs,
1455 	&block_tm_defs,
1456 	&block_dorq_defs,
1457 	&block_brb_defs,
1458 	&block_src_defs,
1459 	&block_prs_defs,
1460 	&block_tsdm_defs,
1461 	&block_msdm_defs,
1462 	&block_usdm_defs,
1463 	&block_xsdm_defs,
1464 	&block_ysdm_defs,
1465 	&block_psdm_defs,
1466 	&block_tsem_defs,
1467 	&block_msem_defs,
1468 	&block_usem_defs,
1469 	&block_xsem_defs,
1470 	&block_ysem_defs,
1471 	&block_psem_defs,
1472 	&block_rss_defs,
1473 	&block_tmld_defs,
1474 	&block_muld_defs,
1475 	&block_yuld_defs,
1476 	&block_xyld_defs,
1477 	&block_ptld_defs,
1478 	&block_ypld_defs,
1479 	&block_prm_defs,
1480 	&block_pbf_pb1_defs,
1481 	&block_pbf_pb2_defs,
1482 	&block_rpb_defs,
1483 	&block_btb_defs,
1484 	&block_pbf_defs,
1485 	&block_rdif_defs,
1486 	&block_tdif_defs,
1487 	&block_cdu_defs,
1488 	&block_ccfc_defs,
1489 	&block_tcfc_defs,
1490 	&block_igu_defs,
1491 	&block_cau_defs,
1492 	&block_rgfs_defs,
1493 	&block_rgsrc_defs,
1494 	&block_tgfs_defs,
1495 	&block_tgsrc_defs,
1496 	&block_umac_defs,
1497 	&block_xmac_defs,
1498 	&block_dbg_defs,
1499 	&block_nig_defs,
1500 	&block_wol_defs,
1501 	&block_bmbn_defs,
1502 	&block_ipc_defs,
1503 	&block_nwm_defs,
1504 	&block_nws_defs,
1505 	&block_ms_defs,
1506 	&block_phy_pcie_defs,
1507 	&block_led_defs,
1508 	&block_avs_wrap_defs,
1509 	&block_pxpreqbus_defs,
1510 	&block_misc_aeu_defs,
1511 	&block_bar0_map_defs,
1512 };
1513 
1514 static struct platform_defs s_platform_defs[] = {
1515 	{"asic", 1, 256, 32768},
1516 	{"reserved", 0, 0, 0},
1517 	{"reserved2", 0, 0, 0},
1518 	{"reserved3", 0, 0, 0}
1519 };
1520 
1521 static struct grc_param_defs s_grc_param_defs[] = {
1522 	/* DBG_GRC_PARAM_DUMP_TSTORM */
1523 	{{1, 1, 1}, 0, 1, false, 1, 1},
1524 
1525 	/* DBG_GRC_PARAM_DUMP_MSTORM */
1526 	{{1, 1, 1}, 0, 1, false, 1, 1},
1527 
1528 	/* DBG_GRC_PARAM_DUMP_USTORM */
1529 	{{1, 1, 1}, 0, 1, false, 1, 1},
1530 
1531 	/* DBG_GRC_PARAM_DUMP_XSTORM */
1532 	{{1, 1, 1}, 0, 1, false, 1, 1},
1533 
1534 	/* DBG_GRC_PARAM_DUMP_YSTORM */
1535 	{{1, 1, 1}, 0, 1, false, 1, 1},
1536 
1537 	/* DBG_GRC_PARAM_DUMP_PSTORM */
1538 	{{1, 1, 1}, 0, 1, false, 1, 1},
1539 
1540 	/* DBG_GRC_PARAM_DUMP_REGS */
1541 	{{1, 1, 1}, 0, 1, false, 0, 1},
1542 
1543 	/* DBG_GRC_PARAM_DUMP_RAM */
1544 	{{1, 1, 1}, 0, 1, false, 0, 1},
1545 
1546 	/* DBG_GRC_PARAM_DUMP_PBUF */
1547 	{{1, 1, 1}, 0, 1, false, 0, 1},
1548 
1549 	/* DBG_GRC_PARAM_DUMP_IOR */
1550 	{{0, 0, 0}, 0, 1, false, 0, 1},
1551 
1552 	/* DBG_GRC_PARAM_DUMP_VFC */
1553 	{{0, 0, 0}, 0, 1, false, 0, 1},
1554 
1555 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
1556 	{{1, 1, 1}, 0, 1, false, 0, 1},
1557 
1558 	/* DBG_GRC_PARAM_DUMP_ILT */
1559 	{{1, 1, 1}, 0, 1, false, 0, 1},
1560 
1561 	/* DBG_GRC_PARAM_DUMP_RSS */
1562 	{{1, 1, 1}, 0, 1, false, 0, 1},
1563 
1564 	/* DBG_GRC_PARAM_DUMP_CAU */
1565 	{{1, 1, 1}, 0, 1, false, 0, 1},
1566 
1567 	/* DBG_GRC_PARAM_DUMP_QM */
1568 	{{1, 1, 1}, 0, 1, false, 0, 1},
1569 
1570 	/* DBG_GRC_PARAM_DUMP_MCP */
1571 	{{1, 1, 1}, 0, 1, false, 0, 1},
1572 
1573 	/* DBG_GRC_PARAM_RESERVED */
1574 	{{1, 1, 1}, 0, 1, false, 0, 1},
1575 
1576 	/* DBG_GRC_PARAM_DUMP_CFC */
1577 	{{1, 1, 1}, 0, 1, false, 0, 1},
1578 
1579 	/* DBG_GRC_PARAM_DUMP_IGU */
1580 	{{1, 1, 1}, 0, 1, false, 0, 1},
1581 
1582 	/* DBG_GRC_PARAM_DUMP_BRB */
1583 	{{0, 0, 0}, 0, 1, false, 0, 1},
1584 
1585 	/* DBG_GRC_PARAM_DUMP_BTB */
1586 	{{0, 0, 0}, 0, 1, false, 0, 1},
1587 
1588 	/* DBG_GRC_PARAM_DUMP_BMB */
1589 	{{0, 0, 0}, 0, 1, false, 0, 1},
1590 
1591 	/* DBG_GRC_PARAM_DUMP_NIG */
1592 	{{1, 1, 1}, 0, 1, false, 0, 1},
1593 
1594 	/* DBG_GRC_PARAM_DUMP_MULD */
1595 	{{1, 1, 1}, 0, 1, false, 0, 1},
1596 
1597 	/* DBG_GRC_PARAM_DUMP_PRS */
1598 	{{1, 1, 1}, 0, 1, false, 0, 1},
1599 
1600 	/* DBG_GRC_PARAM_DUMP_DMAE */
1601 	{{1, 1, 1}, 0, 1, false, 0, 1},
1602 
1603 	/* DBG_GRC_PARAM_DUMP_TM */
1604 	{{1, 1, 1}, 0, 1, false, 0, 1},
1605 
1606 	/* DBG_GRC_PARAM_DUMP_SDM */
1607 	{{1, 1, 1}, 0, 1, false, 0, 1},
1608 
1609 	/* DBG_GRC_PARAM_DUMP_DIF */
1610 	{{1, 1, 1}, 0, 1, false, 0, 1},
1611 
1612 	/* DBG_GRC_PARAM_DUMP_STATIC */
1613 	{{1, 1, 1}, 0, 1, false, 0, 1},
1614 
1615 	/* DBG_GRC_PARAM_UNSTALL */
1616 	{{0, 0, 0}, 0, 1, false, 0, 0},
1617 
1618 	/* DBG_GRC_PARAM_NUM_LCIDS */
1619 	{{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
1620 	 MAX_LCIDS},
1621 
1622 	/* DBG_GRC_PARAM_NUM_LTIDS */
1623 	{{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
1624 	 MAX_LTIDS},
1625 
1626 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
1627 	{{0, 0, 0}, 0, 1, true, 0, 0},
1628 
1629 	/* DBG_GRC_PARAM_CRASH */
1630 	{{0, 0, 0}, 0, 1, true, 0, 0},
1631 
1632 	/* DBG_GRC_PARAM_PARITY_SAFE */
1633 	{{0, 0, 0}, 0, 1, false, 1, 0},
1634 
1635 	/* DBG_GRC_PARAM_DUMP_CM */
1636 	{{1, 1, 1}, 0, 1, false, 0, 1},
1637 
1638 	/* DBG_GRC_PARAM_DUMP_PHY */
1639 	{{1, 1, 1}, 0, 1, false, 0, 1},
1640 
1641 	/* DBG_GRC_PARAM_NO_MCP */
1642 	{{0, 0, 0}, 0, 1, false, 0, 0},
1643 
1644 	/* DBG_GRC_PARAM_NO_FW_VER */
1645 	{{0, 0, 0}, 0, 1, false, 0, 0}
1646 };
1647 
1648 static struct rss_mem_defs s_rss_mem_defs[] = {
1649 	{ "rss_mem_cid", "rss_cid", 0, 32,
1650 	  {256, 320, 512} },
1651 
1652 	{ "rss_mem_key_msb", "rss_key", 1024, 256,
1653 	  {128, 208, 257} },
1654 
1655 	{ "rss_mem_key_lsb", "rss_key", 2048, 64,
1656 	  {128, 208, 257} },
1657 
1658 	{ "rss_mem_info", "rss_info", 3072, 16,
1659 	  {128, 208, 256} },
1660 
1661 	{ "rss_mem_ind", "rss_ind", 4096, 16,
1662 	  {16384, 26624, 32768} }
1663 };
1664 
1665 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1666 	{"vfc_ram_tt1", "vfc_ram", 0, 512},
1667 	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
1668 	{"vfc_ram_stt2", "vfc_ram", 640, 32},
1669 	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1670 };
1671 
1672 static struct big_ram_defs s_big_ram_defs[] = {
1673 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1674 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1675 	  MISC_REG_BLOCK_256B_EN, {0, 0, 0},
1676 	  {153600, 180224, 282624} },
1677 
1678 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1679 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1680 	  MISC_REG_BLOCK_256B_EN, {0, 1, 1},
1681 	  {92160, 117760, 168960} },
1682 
1683 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1684 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1685 	  MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
1686 	  {36864, 36864, 36864} }
1687 };
1688 
1689 static struct reset_reg_defs s_reset_regs_defs[] = {
1690 	/* DBG_RESET_REG_MISCS_PL_UA */
1691 	{ MISCS_REG_RESET_PL_UA,
1692 	  {true, true, true}, {0x0, 0x0, 0x0} },
1693 
1694 	/* DBG_RESET_REG_MISCS_PL_HV */
1695 	{ MISCS_REG_RESET_PL_HV,
1696 	  {true, true, true}, {0x0, 0x400, 0x600} },
1697 
1698 	/* DBG_RESET_REG_MISCS_PL_HV_2 */
1699 	{ MISCS_REG_RESET_PL_HV_2_K2_E5,
1700 	  {false, true, true}, {0x0, 0x0, 0x0} },
1701 
1702 	/* DBG_RESET_REG_MISC_PL_UA */
1703 	{ MISC_REG_RESET_PL_UA,
1704 	  {true, true, true}, {0x0, 0x0, 0x0} },
1705 
1706 	/* DBG_RESET_REG_MISC_PL_HV */
1707 	{ MISC_REG_RESET_PL_HV,
1708 	  {true, true, true}, {0x0, 0x0, 0x0} },
1709 
1710 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1711 	{ MISC_REG_RESET_PL_PDA_VMAIN_1,
1712 	  {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
1713 
1714 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1715 	{ MISC_REG_RESET_PL_PDA_VMAIN_2,
1716 	  {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
1717 
1718 	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1719 	{ MISC_REG_RESET_PL_PDA_VAUX,
1720 	  {true, true, true}, {0x2, 0x2, 0x2} },
1721 };
1722 
1723 static struct phy_defs s_phy_defs[] = {
1724 	{"nw_phy", NWS_REG_NWS_CMU_K2,
1725 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
1726 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
1727 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
1728 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
1729 	{"sgmii_phy", MS_REG_MS_CMU_K2_E5,
1730 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1731 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1732 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1733 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1734 	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
1735 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1736 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1737 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1738 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1739 	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
1740 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1741 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1742 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1743 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1744 };
1745 
1746 /**************************** Private Functions ******************************/
1747 
1748 /* Reads and returns a single dword from the specified unaligned buffer */
1749 static u32 qed_read_unaligned_dword(u8 *buf)
1750 {
1751 	u32 dword;
1752 
1753 	memcpy((u8 *)&dword, buf, sizeof(dword));
1754 	return dword;
1755 }
1756 
1757 /* Returns the value of the specified GRC param */
1758 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1759 			     enum dbg_grc_params grc_param)
1760 {
1761 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1762 
1763 	return dev_data->grc.param_val[grc_param];
1764 }
1765 
1766 /* Initializes the GRC parameters */
1767 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1768 {
1769 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1770 
1771 	if (!dev_data->grc.params_initialized) {
1772 		qed_dbg_grc_set_params_default(p_hwfn);
1773 		dev_data->grc.params_initialized = 1;
1774 	}
1775 }
1776 
1777 /* Initializes debug data for the specified device */
1778 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1779 					struct qed_ptt *p_ptt)
1780 {
1781 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1782 
1783 	if (dev_data->initialized)
1784 		return DBG_STATUS_OK;
1785 
1786 	if (QED_IS_K2(p_hwfn->cdev)) {
1787 		dev_data->chip_id = CHIP_K2;
1788 		dev_data->mode_enable[MODE_K2] = 1;
1789 	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1790 		dev_data->chip_id = CHIP_BB;
1791 		dev_data->mode_enable[MODE_BB] = 1;
1792 	} else {
1793 		return DBG_STATUS_UNKNOWN_CHIP;
1794 	}
1795 
1796 	dev_data->platform_id = PLATFORM_ASIC;
1797 	dev_data->mode_enable[MODE_ASIC] = 1;
1798 
1799 	/* Initializes the GRC parameters */
1800 	qed_dbg_grc_init_params(p_hwfn);
1801 
1802 	dev_data->use_dmae = true;
1803 	dev_data->num_regs_read = 0;
1804 	dev_data->initialized = 1;
1805 
1806 	return DBG_STATUS_OK;
1807 }
1808 
1809 static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
1810 						    enum block_id block_id)
1811 {
1812 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1813 
1814 	return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
1815 						       MAX_CHIP_IDS +
1816 						       dev_data->chip_id];
1817 }
1818 
1819 /* Reads the FW info structure for the specified Storm from the chip,
1820  * and writes it to the specified fw_info pointer.
1821  */
1822 static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
1823 			     struct qed_ptt *p_ptt,
1824 			     u8 storm_id, struct fw_info *fw_info)
1825 {
1826 	struct storm_defs *storm = &s_storm_defs[storm_id];
1827 	struct fw_info_location fw_info_location;
1828 	u32 addr, i, *dest;
1829 
1830 	memset(&fw_info_location, 0, sizeof(fw_info_location));
1831 	memset(fw_info, 0, sizeof(*fw_info));
1832 
1833 	/* Read first the address that points to fw_info location.
1834 	 * The address is located in the last line of the Storm RAM.
1835 	 */
1836 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1837 	       DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
1838 	       sizeof(fw_info_location);
1839 	dest = (u32 *)&fw_info_location;
1840 
1841 	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1842 	     i++, addr += BYTES_IN_DWORD)
1843 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1844 
1845 	/* Read FW version info from Storm RAM */
1846 	if (fw_info_location.size > 0 && fw_info_location.size <=
1847 	    sizeof(*fw_info)) {
1848 		addr = fw_info_location.grc_addr;
1849 		dest = (u32 *)fw_info;
1850 		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1851 		     i++, addr += BYTES_IN_DWORD)
1852 			dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1853 	}
1854 }
1855 
1856 /* Dumps the specified string to the specified buffer.
1857  * Returns the dumped size in bytes.
1858  */
1859 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1860 {
1861 	if (dump)
1862 		strcpy(dump_buf, str);
1863 
1864 	return (u32)strlen(str) + 1;
1865 }
1866 
1867 /* Dumps zeros to align the specified buffer to dwords.
1868  * Returns the dumped size in bytes.
1869  */
1870 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1871 {
1872 	u8 offset_in_dword, align_size;
1873 
1874 	offset_in_dword = (u8)(byte_offset & 0x3);
1875 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1876 
1877 	if (dump && align_size)
1878 		memset(dump_buf, 0, align_size);
1879 
1880 	return align_size;
1881 }
1882 
1883 /* Writes the specified string param to the specified buffer.
1884  * Returns the dumped size in dwords.
1885  */
1886 static u32 qed_dump_str_param(u32 *dump_buf,
1887 			      bool dump,
1888 			      const char *param_name, const char *param_val)
1889 {
1890 	char *char_buf = (char *)dump_buf;
1891 	u32 offset = 0;
1892 
1893 	/* Dump param name */
1894 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1895 
1896 	/* Indicate a string param value */
1897 	if (dump)
1898 		*(char_buf + offset) = 1;
1899 	offset++;
1900 
1901 	/* Dump param value */
1902 	offset += qed_dump_str(char_buf + offset, dump, param_val);
1903 
1904 	/* Align buffer to next dword */
1905 	offset += qed_dump_align(char_buf + offset, dump, offset);
1906 
1907 	return BYTES_TO_DWORDS(offset);
1908 }
1909 
1910 /* Writes the specified numeric param to the specified buffer.
1911  * Returns the dumped size in dwords.
1912  */
1913 static u32 qed_dump_num_param(u32 *dump_buf,
1914 			      bool dump, const char *param_name, u32 param_val)
1915 {
1916 	char *char_buf = (char *)dump_buf;
1917 	u32 offset = 0;
1918 
1919 	/* Dump param name */
1920 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1921 
1922 	/* Indicate a numeric param value */
1923 	if (dump)
1924 		*(char_buf + offset) = 0;
1925 	offset++;
1926 
1927 	/* Align buffer to next dword */
1928 	offset += qed_dump_align(char_buf + offset, dump, offset);
1929 
1930 	/* Dump param value (and change offset from bytes to dwords) */
1931 	offset = BYTES_TO_DWORDS(offset);
1932 	if (dump)
1933 		*(dump_buf + offset) = param_val;
1934 	offset++;
1935 
1936 	return offset;
1937 }
1938 
1939 /* Reads the FW version and writes it as a param to the specified buffer.
1940  * Returns the dumped size in dwords.
1941  */
1942 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1943 				 struct qed_ptt *p_ptt,
1944 				 u32 *dump_buf, bool dump)
1945 {
1946 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1947 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1948 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1949 	struct fw_info fw_info = { {0}, {0} };
1950 	u32 offset = 0;
1951 
1952 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1953 		/* Read FW image/version from PRAM in a non-reset SEMI */
1954 		bool found = false;
1955 		u8 storm_id;
1956 
1957 		for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
1958 		     storm_id++) {
1959 			struct storm_defs *storm = &s_storm_defs[storm_id];
1960 
1961 			/* Read FW version/image */
1962 			if (dev_data->block_in_reset[storm->block_id])
1963 				continue;
1964 
1965 			/* Read FW info for the current Storm */
1966 			qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
1967 
1968 			/* Create FW version/image strings */
1969 			if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1970 				     "%d_%d_%d_%d", fw_info.ver.num.major,
1971 				     fw_info.ver.num.minor, fw_info.ver.num.rev,
1972 				     fw_info.ver.num.eng) < 0)
1973 				DP_NOTICE(p_hwfn,
1974 					  "Unexpected debug error: invalid FW version string\n");
1975 			switch (fw_info.ver.image_id) {
1976 			case FW_IMG_MAIN:
1977 				strcpy(fw_img_str, "main");
1978 				break;
1979 			default:
1980 				strcpy(fw_img_str, "unknown");
1981 				break;
1982 			}
1983 
1984 			found = true;
1985 		}
1986 	}
1987 
1988 	/* Dump FW version, image and timestamp */
1989 	offset += qed_dump_str_param(dump_buf + offset,
1990 				     dump, "fw-version", fw_ver_str);
1991 	offset += qed_dump_str_param(dump_buf + offset,
1992 				     dump, "fw-image", fw_img_str);
1993 	offset += qed_dump_num_param(dump_buf + offset,
1994 				     dump,
1995 				     "fw-timestamp", fw_info.ver.timestamp);
1996 
1997 	return offset;
1998 }
1999 
2000 /* Reads the MFW version and writes it as a param to the specified buffer.
2001  * Returns the dumped size in dwords.
2002  */
2003 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
2004 				  struct qed_ptt *p_ptt,
2005 				  u32 *dump_buf, bool dump)
2006 {
2007 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2008 
2009 	if (dump &&
2010 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2011 		u32 global_section_offsize, global_section_addr, mfw_ver;
2012 		u32 public_data_addr, global_section_offsize_addr;
2013 
2014 		/* Find MCP public data GRC address. Needs to be ORed with
2015 		 * MCP_REG_SCRATCH due to a HW bug.
2016 		 */
2017 		public_data_addr = qed_rd(p_hwfn,
2018 					  p_ptt,
2019 					  MISC_REG_SHARED_MEM_ADDR) |
2020 				   MCP_REG_SCRATCH;
2021 
2022 		/* Find MCP public global section offset */
2023 		global_section_offsize_addr = public_data_addr +
2024 					      offsetof(struct mcp_public_data,
2025 						       sections) +
2026 					      sizeof(offsize_t) * PUBLIC_GLOBAL;
2027 		global_section_offsize = qed_rd(p_hwfn, p_ptt,
2028 						global_section_offsize_addr);
2029 		global_section_addr =
2030 			MCP_REG_SCRATCH +
2031 			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2032 
2033 		/* Read MFW version from MCP public global section */
2034 		mfw_ver = qed_rd(p_hwfn, p_ptt,
2035 				 global_section_addr +
2036 				 offsetof(struct public_global, mfw_ver));
2037 
2038 		/* Dump MFW version param */
2039 		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
2040 			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
2041 			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2042 			DP_NOTICE(p_hwfn,
2043 				  "Unexpected debug error: invalid MFW version string\n");
2044 	}
2045 
2046 	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2047 }
2048 
2049 /* Writes a section header to the specified buffer.
2050  * Returns the dumped size in dwords.
2051  */
2052 static u32 qed_dump_section_hdr(u32 *dump_buf,
2053 				bool dump, const char *name, u32 num_params)
2054 {
2055 	return qed_dump_num_param(dump_buf, dump, name, num_params);
2056 }
2057 
2058 /* Writes the common global params to the specified buffer.
2059  * Returns the dumped size in dwords.
2060  */
2061 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
2062 					 struct qed_ptt *p_ptt,
2063 					 u32 *dump_buf,
2064 					 bool dump,
2065 					 u8 num_specific_global_params)
2066 {
2067 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2068 	u32 offset = 0;
2069 	u8 num_params;
2070 
2071 	/* Dump global params section header */
2072 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2073 	offset += qed_dump_section_hdr(dump_buf + offset,
2074 				       dump, "global_params", num_params);
2075 
2076 	/* Store params */
2077 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2078 	offset += qed_dump_mfw_ver_param(p_hwfn,
2079 					 p_ptt, dump_buf + offset, dump);
2080 	offset += qed_dump_num_param(dump_buf + offset,
2081 				     dump, "tools-version", TOOLS_VERSION);
2082 	offset += qed_dump_str_param(dump_buf + offset,
2083 				     dump,
2084 				     "chip",
2085 				     s_chip_defs[dev_data->chip_id].name);
2086 	offset += qed_dump_str_param(dump_buf + offset,
2087 				     dump,
2088 				     "platform",
2089 				     s_platform_defs[dev_data->platform_id].
2090 				     name);
2091 	offset +=
2092 	    qed_dump_num_param(dump_buf + offset, dump, "pci-func",
2093 			       p_hwfn->abs_pf_id);
2094 
2095 	return offset;
2096 }
2097 
2098 /* Writes the "last" section (including CRC) to the specified buffer at the
2099  * given offset. Returns the dumped size in dwords.
2100  */
2101 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
2102 {
2103 	u32 start_offset = offset;
2104 
2105 	/* Dump CRC section header */
2106 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2107 
2108 	/* Calculate CRC32 and add it to the dword after the "last" section */
2109 	if (dump)
2110 		*(dump_buf + offset) = ~crc32(0xffffffff,
2111 					      (u8 *)dump_buf,
2112 					      DWORDS_TO_BYTES(offset));
2113 
2114 	offset++;
2115 
2116 	return offset - start_offset;
2117 }
2118 
2119 /* Update blocks reset state  */
2120 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
2121 					  struct qed_ptt *p_ptt)
2122 {
2123 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2124 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2125 	u32 i;
2126 
2127 	/* Read reset registers */
2128 	for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2129 		if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2130 			reg_val[i] = qed_rd(p_hwfn,
2131 					    p_ptt, s_reset_regs_defs[i].addr);
2132 
2133 	/* Check if blocks are in reset */
2134 	for (i = 0; i < MAX_BLOCK_ID; i++) {
2135 		struct block_defs *block = s_block_defs[i];
2136 
2137 		dev_data->block_in_reset[i] = block->has_reset_bit &&
2138 		    !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
2139 	}
2140 }
2141 
2142 /* Enable / disable the Debug block */
2143 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
2144 				     struct qed_ptt *p_ptt, bool enable)
2145 {
2146 	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2147 }
2148 
2149 /* Resets the Debug block */
2150 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
2151 				    struct qed_ptt *p_ptt)
2152 {
2153 	u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2154 	struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2155 
2156 	dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2157 	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2158 	new_reset_reg_val =
2159 	    old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
2160 
2161 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2162 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2163 }
2164 
2165 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
2166 				     struct qed_ptt *p_ptt,
2167 				     enum dbg_bus_frame_modes mode)
2168 {
2169 	qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2170 }
2171 
2172 /* Enable / disable Debug Bus clients according to the specified mask
2173  * (1 = enable, 0 = disable).
2174  */
2175 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
2176 				   struct qed_ptt *p_ptt, u32 client_mask)
2177 {
2178 	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2179 }
2180 
2181 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
2182 {
2183 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2184 	bool arg1, arg2;
2185 	const u32 *ptr;
2186 	u8 tree_val;
2187 
2188 	/* Get next element from modes tree buffer */
2189 	ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
2190 	tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
2191 
2192 	switch (tree_val) {
2193 	case INIT_MODE_OP_NOT:
2194 		return !qed_is_mode_match(p_hwfn, modes_buf_offset);
2195 	case INIT_MODE_OP_OR:
2196 	case INIT_MODE_OP_AND:
2197 		arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2198 		arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2199 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
2200 							arg2) : (arg1 && arg2);
2201 	default:
2202 		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2203 	}
2204 }
2205 
2206 /* Returns true if the specified entity (indicated by GRC param) should be
2207  * included in the dump, false otherwise.
2208  */
2209 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
2210 				enum dbg_grc_params grc_param)
2211 {
2212 	return qed_grc_get_param(p_hwfn, grc_param) > 0;
2213 }
2214 
2215 /* Returns true of the specified Storm should be included in the dump, false
2216  * otherwise.
2217  */
2218 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
2219 				      enum dbg_storms storm)
2220 {
2221 	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2222 }
2223 
2224 /* Returns true if the specified memory should be included in the dump, false
2225  * otherwise.
2226  */
2227 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
2228 				    enum block_id block_id, u8 mem_group_id)
2229 {
2230 	struct block_defs *block = s_block_defs[block_id];
2231 	u8 i;
2232 
2233 	/* Check Storm match */
2234 	if (block->associated_to_storm &&
2235 	    !qed_grc_is_storm_included(p_hwfn,
2236 				       (enum dbg_storms)block->storm_id))
2237 		return false;
2238 
2239 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2240 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2241 
2242 		if (mem_group_id == big_ram->mem_group_id ||
2243 		    mem_group_id == big_ram->ram_mem_group_id)
2244 			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
2245 	}
2246 
2247 	switch (mem_group_id) {
2248 	case MEM_GROUP_PXP_ILT:
2249 	case MEM_GROUP_PXP_MEM:
2250 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2251 	case MEM_GROUP_RAM:
2252 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2253 	case MEM_GROUP_PBUF:
2254 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2255 	case MEM_GROUP_CAU_MEM:
2256 	case MEM_GROUP_CAU_SB:
2257 	case MEM_GROUP_CAU_PI:
2258 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2259 	case MEM_GROUP_QM_MEM:
2260 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2261 	case MEM_GROUP_CFC_MEM:
2262 	case MEM_GROUP_CONN_CFC_MEM:
2263 	case MEM_GROUP_TASK_CFC_MEM:
2264 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
2265 		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2266 	case MEM_GROUP_IGU_MEM:
2267 	case MEM_GROUP_IGU_MSIX:
2268 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2269 	case MEM_GROUP_MULD_MEM:
2270 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2271 	case MEM_GROUP_PRS_MEM:
2272 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2273 	case MEM_GROUP_DMAE_MEM:
2274 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2275 	case MEM_GROUP_TM_MEM:
2276 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2277 	case MEM_GROUP_SDM_MEM:
2278 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2279 	case MEM_GROUP_TDIF_CTX:
2280 	case MEM_GROUP_RDIF_CTX:
2281 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2282 	case MEM_GROUP_CM_MEM:
2283 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2284 	case MEM_GROUP_IOR:
2285 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2286 	default:
2287 		return true;
2288 	}
2289 }
2290 
2291 /* Stalls all Storms */
2292 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2293 				 struct qed_ptt *p_ptt, bool stall)
2294 {
2295 	u32 reg_addr;
2296 	u8 storm_id;
2297 
2298 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2299 		if (!qed_grc_is_storm_included(p_hwfn,
2300 					       (enum dbg_storms)storm_id))
2301 			continue;
2302 
2303 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
2304 		    SEM_FAST_REG_STALL_0_BB_K2;
2305 		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2306 	}
2307 
2308 	msleep(STALL_DELAY_MS);
2309 }
2310 
2311 /* Takes all blocks out of reset */
2312 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2313 				   struct qed_ptt *p_ptt)
2314 {
2315 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2316 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2317 	u32 block_id, i;
2318 
2319 	/* Fill reset regs values */
2320 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2321 		struct block_defs *block = s_block_defs[block_id];
2322 
2323 		if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
2324 		    block->unreset)
2325 			reg_val[block->reset_reg] |=
2326 			    BIT(block->reset_bit_offset);
2327 	}
2328 
2329 	/* Write reset registers */
2330 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2331 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2332 			continue;
2333 
2334 		reg_val[i] |=
2335 			s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2336 
2337 		if (reg_val[i])
2338 			qed_wr(p_hwfn,
2339 			       p_ptt,
2340 			       s_reset_regs_defs[i].addr +
2341 			       RESET_REG_UNRESET_OFFSET, reg_val[i]);
2342 	}
2343 }
2344 
2345 /* Returns the attention block data of the specified block */
2346 static const struct dbg_attn_block_type_data *
2347 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
2348 {
2349 	const struct dbg_attn_block *base_attn_block_arr =
2350 		(const struct dbg_attn_block *)
2351 		s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2352 
2353 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
2354 }
2355 
2356 /* Returns the attention registers of the specified block */
2357 static const struct dbg_attn_reg *
2358 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
2359 			u8 *num_attn_regs)
2360 {
2361 	const struct dbg_attn_block_type_data *block_type_data =
2362 		qed_get_block_attn_data(block_id, attn_type);
2363 
2364 	*num_attn_regs = block_type_data->num_regs;
2365 
2366 	return &((const struct dbg_attn_reg *)
2367 		 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2368 							  regs_offset];
2369 }
2370 
2371 /* For each block, clear the status of all parities */
2372 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2373 				   struct qed_ptt *p_ptt)
2374 {
2375 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2376 	const struct dbg_attn_reg *attn_reg_arr;
2377 	u8 reg_idx, num_attn_regs;
2378 	u32 block_id;
2379 
2380 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2381 		if (dev_data->block_in_reset[block_id])
2382 			continue;
2383 
2384 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2385 						       ATTN_TYPE_PARITY,
2386 						       &num_attn_regs);
2387 
2388 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2389 			const struct dbg_attn_reg *reg_data =
2390 				&attn_reg_arr[reg_idx];
2391 			u16 modes_buf_offset;
2392 			bool eval_mode;
2393 
2394 			/* Check mode */
2395 			eval_mode = GET_FIELD(reg_data->mode.data,
2396 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2397 			modes_buf_offset =
2398 				GET_FIELD(reg_data->mode.data,
2399 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2400 
2401 			/* If Mode match: clear parity status */
2402 			if (!eval_mode ||
2403 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
2404 				qed_rd(p_hwfn, p_ptt,
2405 				       DWORDS_TO_BYTES(reg_data->
2406 						       sts_clr_address));
2407 		}
2408 	}
2409 }
2410 
2411 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2412  * The following parameters are dumped:
2413  * - count:	 no. of dumped entries
2414  * - split:	 split type
2415  * - id:	 split ID (dumped only if split_id >= 0)
2416  * - param_name: user parameter value (dumped only if param_name != NULL
2417  *		 and param_val != NULL).
2418  */
2419 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2420 				 bool dump,
2421 				 u32 num_reg_entries,
2422 				 const char *split_type,
2423 				 int split_id,
2424 				 const char *param_name, const char *param_val)
2425 {
2426 	u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2427 	u32 offset = 0;
2428 
2429 	offset += qed_dump_section_hdr(dump_buf + offset,
2430 				       dump, "grc_regs", num_params);
2431 	offset += qed_dump_num_param(dump_buf + offset,
2432 				     dump, "count", num_reg_entries);
2433 	offset += qed_dump_str_param(dump_buf + offset,
2434 				     dump, "split", split_type);
2435 	if (split_id >= 0)
2436 		offset += qed_dump_num_param(dump_buf + offset,
2437 					     dump, "id", split_id);
2438 	if (param_name && param_val)
2439 		offset += qed_dump_str_param(dump_buf + offset,
2440 					     dump, param_name, param_val);
2441 
2442 	return offset;
2443 }
2444 
2445 /* Reads the specified registers into the specified buffer.
2446  * The addr and len arguments are specified in dwords.
2447  */
2448 void qed_read_regs(struct qed_hwfn *p_hwfn,
2449 		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
2450 {
2451 	u32 i;
2452 
2453 	for (i = 0; i < len; i++)
2454 		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2455 }
2456 
2457 /* Dumps the GRC registers in the specified address range.
2458  * Returns the dumped size in dwords.
2459  * The addr and len arguments are specified in dwords.
2460  */
2461 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2462 				   struct qed_ptt *p_ptt,
2463 				   u32 *dump_buf,
2464 				   bool dump, u32 addr, u32 len, bool wide_bus)
2465 {
2466 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2467 
2468 	if (!dump)
2469 		return len;
2470 
2471 	/* Print log if needed */
2472 	dev_data->num_regs_read += len;
2473 	if (dev_data->num_regs_read >=
2474 	    s_platform_defs[dev_data->platform_id].log_thresh) {
2475 		DP_VERBOSE(p_hwfn,
2476 			   QED_MSG_DEBUG,
2477 			   "Dumping %d registers...\n",
2478 			   dev_data->num_regs_read);
2479 		dev_data->num_regs_read = 0;
2480 	}
2481 
2482 	/* Try reading using DMAE */
2483 	if (dev_data->use_dmae &&
2484 	    (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
2485 	     wide_bus)) {
2486 		if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
2487 				       (u64)(uintptr_t)(dump_buf), len, 0))
2488 			return len;
2489 		dev_data->use_dmae = 0;
2490 		DP_VERBOSE(p_hwfn,
2491 			   QED_MSG_DEBUG,
2492 			   "Failed reading from chip using DMAE, using GRC instead\n");
2493 	}
2494 
2495 	/* Read registers */
2496 	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2497 
2498 	return len;
2499 }
2500 
2501 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2502  * The addr and len arguments are specified in dwords.
2503  */
2504 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2505 				      bool dump, u32 addr, u32 len)
2506 {
2507 	if (dump)
2508 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2509 
2510 	return 1;
2511 }
2512 
2513 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2514  * The addr and len arguments are specified in dwords.
2515  */
2516 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2517 				  struct qed_ptt *p_ptt,
2518 				  u32 *dump_buf,
2519 				  bool dump, u32 addr, u32 len, bool wide_bus)
2520 {
2521 	u32 offset = 0;
2522 
2523 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2524 	offset += qed_grc_dump_addr_range(p_hwfn,
2525 					  p_ptt,
2526 					  dump_buf + offset,
2527 					  dump, addr, len, wide_bus);
2528 
2529 	return offset;
2530 }
2531 
2532 /* Dumps GRC registers sequence with skip cycle.
2533  * Returns the dumped size in dwords.
2534  * - addr:	start GRC address in dwords
2535  * - total_len:	total no. of dwords to dump
2536  * - read_len:	no. consecutive dwords to read
2537  * - skip_len:	no. of dwords to skip (and fill with zeros)
2538  */
2539 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2540 				       struct qed_ptt *p_ptt,
2541 				       u32 *dump_buf,
2542 				       bool dump,
2543 				       u32 addr,
2544 				       u32 total_len,
2545 				       u32 read_len, u32 skip_len)
2546 {
2547 	u32 offset = 0, reg_offset = 0;
2548 
2549 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2550 
2551 	if (!dump)
2552 		return offset + total_len;
2553 
2554 	while (reg_offset < total_len) {
2555 		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2556 
2557 		offset += qed_grc_dump_addr_range(p_hwfn,
2558 						  p_ptt,
2559 						  dump_buf + offset,
2560 						  dump, addr, curr_len, false);
2561 		reg_offset += curr_len;
2562 		addr += curr_len;
2563 
2564 		if (reg_offset < total_len) {
2565 			curr_len = min_t(u32, skip_len, total_len - skip_len);
2566 			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2567 			offset += curr_len;
2568 			reg_offset += curr_len;
2569 			addr += curr_len;
2570 		}
2571 	}
2572 
2573 	return offset;
2574 }
2575 
2576 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2577 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2578 				     struct qed_ptt *p_ptt,
2579 				     struct dbg_array input_regs_arr,
2580 				     u32 *dump_buf,
2581 				     bool dump,
2582 				     bool block_enable[MAX_BLOCK_ID],
2583 				     u32 *num_dumped_reg_entries)
2584 {
2585 	u32 i, offset = 0, input_offset = 0;
2586 	bool mode_match = true;
2587 
2588 	*num_dumped_reg_entries = 0;
2589 
2590 	while (input_offset < input_regs_arr.size_in_dwords) {
2591 		const struct dbg_dump_cond_hdr *cond_hdr =
2592 		    (const struct dbg_dump_cond_hdr *)
2593 		    &input_regs_arr.ptr[input_offset++];
2594 		u16 modes_buf_offset;
2595 		bool eval_mode;
2596 
2597 		/* Check mode/block */
2598 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2599 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2600 		if (eval_mode) {
2601 			modes_buf_offset =
2602 				GET_FIELD(cond_hdr->mode.data,
2603 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2604 			mode_match = qed_is_mode_match(p_hwfn,
2605 						       &modes_buf_offset);
2606 		}
2607 
2608 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2609 			input_offset += cond_hdr->data_size;
2610 			continue;
2611 		}
2612 
2613 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2614 			const struct dbg_dump_reg *reg =
2615 			    (const struct dbg_dump_reg *)
2616 			    &input_regs_arr.ptr[input_offset];
2617 			u32 addr, len;
2618 			bool wide_bus;
2619 
2620 			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2621 			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2622 			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2623 			offset += qed_grc_dump_reg_entry(p_hwfn,
2624 							 p_ptt,
2625 							 dump_buf + offset,
2626 							 dump,
2627 							 addr,
2628 							 len,
2629 							 wide_bus);
2630 			(*num_dumped_reg_entries)++;
2631 		}
2632 	}
2633 
2634 	return offset;
2635 }
2636 
2637 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2638 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2639 				   struct qed_ptt *p_ptt,
2640 				   struct dbg_array input_regs_arr,
2641 				   u32 *dump_buf,
2642 				   bool dump,
2643 				   bool block_enable[MAX_BLOCK_ID],
2644 				   const char *split_type_name,
2645 				   u32 split_id,
2646 				   const char *param_name,
2647 				   const char *param_val)
2648 {
2649 	u32 num_dumped_reg_entries, offset;
2650 
2651 	/* Calculate register dump header size (and skip it for now) */
2652 	offset = qed_grc_dump_regs_hdr(dump_buf,
2653 				       false,
2654 				       0,
2655 				       split_type_name,
2656 				       split_id, param_name, param_val);
2657 
2658 	/* Dump registers */
2659 	offset += qed_grc_dump_regs_entries(p_hwfn,
2660 					    p_ptt,
2661 					    input_regs_arr,
2662 					    dump_buf + offset,
2663 					    dump,
2664 					    block_enable,
2665 					    &num_dumped_reg_entries);
2666 
2667 	/* Write register dump header */
2668 	if (dump && num_dumped_reg_entries > 0)
2669 		qed_grc_dump_regs_hdr(dump_buf,
2670 				      dump,
2671 				      num_dumped_reg_entries,
2672 				      split_type_name,
2673 				      split_id, param_name, param_val);
2674 
2675 	return num_dumped_reg_entries > 0 ? offset : 0;
2676 }
2677 
2678 /* Dumps registers according to the input registers array. Returns the dumped
2679  * size in dwords.
2680  */
2681 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2682 				  struct qed_ptt *p_ptt,
2683 				  u32 *dump_buf,
2684 				  bool dump,
2685 				  bool block_enable[MAX_BLOCK_ID],
2686 				  const char *param_name, const char *param_val)
2687 {
2688 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2689 	struct chip_platform_defs *chip_platform;
2690 	u32 offset = 0, input_offset = 0;
2691 	struct chip_defs *chip;
2692 	u8 port_id, pf_id, vf_id;
2693 	u16 fid;
2694 
2695 	chip = &s_chip_defs[dev_data->chip_id];
2696 	chip_platform = &chip->per_platform[dev_data->platform_id];
2697 
2698 	while (input_offset <
2699 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2700 		const struct dbg_dump_split_hdr *split_hdr;
2701 		struct dbg_array curr_input_regs_arr;
2702 		u32 split_data_size;
2703 		u8 split_type_id;
2704 
2705 		split_hdr =
2706 			(const struct dbg_dump_split_hdr *)
2707 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2708 		split_type_id =
2709 			GET_FIELD(split_hdr->hdr,
2710 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2711 		split_data_size =
2712 			GET_FIELD(split_hdr->hdr,
2713 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2714 		curr_input_regs_arr.ptr =
2715 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
2716 		curr_input_regs_arr.size_in_dwords = split_data_size;
2717 
2718 		switch (split_type_id) {
2719 		case SPLIT_TYPE_NONE:
2720 			offset += qed_grc_dump_split_data(p_hwfn,
2721 							  p_ptt,
2722 							  curr_input_regs_arr,
2723 							  dump_buf + offset,
2724 							  dump,
2725 							  block_enable,
2726 							  "eng",
2727 							  (u32)(-1),
2728 							  param_name,
2729 							  param_val);
2730 			break;
2731 
2732 		case SPLIT_TYPE_PORT:
2733 			for (port_id = 0; port_id < chip_platform->num_ports;
2734 			     port_id++) {
2735 				if (dump)
2736 					qed_port_pretend(p_hwfn, p_ptt,
2737 							 port_id);
2738 				offset +=
2739 				    qed_grc_dump_split_data(p_hwfn, p_ptt,
2740 							    curr_input_regs_arr,
2741 							    dump_buf + offset,
2742 							    dump, block_enable,
2743 							    "port", port_id,
2744 							    param_name,
2745 							    param_val);
2746 			}
2747 			break;
2748 
2749 		case SPLIT_TYPE_PF:
2750 		case SPLIT_TYPE_PORT_PF:
2751 			for (pf_id = 0; pf_id < chip_platform->num_pfs;
2752 			     pf_id++) {
2753 				u8 pfid_shift =
2754 					PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2755 
2756 				if (dump) {
2757 					fid = pf_id << pfid_shift;
2758 					qed_fid_pretend(p_hwfn, p_ptt, fid);
2759 				}
2760 
2761 				offset +=
2762 				    qed_grc_dump_split_data(p_hwfn,
2763 							    p_ptt,
2764 							    curr_input_regs_arr,
2765 							    dump_buf + offset,
2766 							    dump,
2767 							    block_enable,
2768 							    "pf",
2769 							    pf_id,
2770 							    param_name,
2771 							    param_val);
2772 			}
2773 			break;
2774 
2775 		case SPLIT_TYPE_VF:
2776 			for (vf_id = 0; vf_id < chip_platform->num_vfs;
2777 			     vf_id++) {
2778 				u8 vfvalid_shift =
2779 					PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
2780 				u8 vfid_shift =
2781 					PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
2782 
2783 				if (dump) {
2784 					fid = BIT(vfvalid_shift) |
2785 					      (vf_id << vfid_shift);
2786 					qed_fid_pretend(p_hwfn, p_ptt, fid);
2787 				}
2788 
2789 				offset +=
2790 				    qed_grc_dump_split_data(p_hwfn, p_ptt,
2791 							    curr_input_regs_arr,
2792 							    dump_buf + offset,
2793 							    dump, block_enable,
2794 							    "vf", vf_id,
2795 							    param_name,
2796 							    param_val);
2797 			}
2798 			break;
2799 
2800 		default:
2801 			break;
2802 		}
2803 
2804 		input_offset += split_data_size;
2805 	}
2806 
2807 	/* Pretend to original PF */
2808 	if (dump) {
2809 		fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2810 		qed_fid_pretend(p_hwfn, p_ptt, fid);
2811 	}
2812 
2813 	return offset;
2814 }
2815 
2816 /* Dump reset registers. Returns the dumped size in dwords. */
2817 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2818 				   struct qed_ptt *p_ptt,
2819 				   u32 *dump_buf, bool dump)
2820 {
2821 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2822 	u32 i, offset = 0, num_regs = 0;
2823 
2824 	/* Calculate header size */
2825 	offset += qed_grc_dump_regs_hdr(dump_buf,
2826 					false, 0, "eng", -1, NULL, NULL);
2827 
2828 	/* Write reset registers */
2829 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2830 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2831 			continue;
2832 
2833 		offset += qed_grc_dump_reg_entry(p_hwfn,
2834 						 p_ptt,
2835 						 dump_buf + offset,
2836 						 dump,
2837 						 BYTES_TO_DWORDS
2838 						 (s_reset_regs_defs[i].addr), 1,
2839 						 false);
2840 		num_regs++;
2841 	}
2842 
2843 	/* Write header */
2844 	if (dump)
2845 		qed_grc_dump_regs_hdr(dump_buf,
2846 				      true, num_regs, "eng", -1, NULL, NULL);
2847 
2848 	return offset;
2849 }
2850 
2851 /* Dump registers that are modified during GRC Dump and therefore must be
2852  * dumped first. Returns the dumped size in dwords.
2853  */
2854 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2855 				      struct qed_ptt *p_ptt,
2856 				      u32 *dump_buf, bool dump)
2857 {
2858 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2859 	u32 block_id, offset = 0, num_reg_entries = 0;
2860 	const struct dbg_attn_reg *attn_reg_arr;
2861 	u8 storm_id, reg_idx, num_attn_regs;
2862 
2863 	/* Calculate header size */
2864 	offset += qed_grc_dump_regs_hdr(dump_buf,
2865 					false, 0, "eng", -1, NULL, NULL);
2866 
2867 	/* Write parity registers */
2868 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2869 		if (dev_data->block_in_reset[block_id] && dump)
2870 			continue;
2871 
2872 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2873 						       ATTN_TYPE_PARITY,
2874 						       &num_attn_regs);
2875 
2876 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2877 			const struct dbg_attn_reg *reg_data =
2878 				&attn_reg_arr[reg_idx];
2879 			u16 modes_buf_offset;
2880 			bool eval_mode;
2881 			u32 addr;
2882 
2883 			/* Check mode */
2884 			eval_mode = GET_FIELD(reg_data->mode.data,
2885 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2886 			modes_buf_offset =
2887 				GET_FIELD(reg_data->mode.data,
2888 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2889 			if (eval_mode &&
2890 			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2891 				continue;
2892 
2893 			/* Mode match: read & dump registers */
2894 			addr = reg_data->mask_address;
2895 			offset += qed_grc_dump_reg_entry(p_hwfn,
2896 							 p_ptt,
2897 							 dump_buf + offset,
2898 							 dump,
2899 							 addr,
2900 							 1, false);
2901 			addr = GET_FIELD(reg_data->data,
2902 					 DBG_ATTN_REG_STS_ADDRESS);
2903 			offset += qed_grc_dump_reg_entry(p_hwfn,
2904 							 p_ptt,
2905 							 dump_buf + offset,
2906 							 dump,
2907 							 addr,
2908 							 1, false);
2909 			num_reg_entries += 2;
2910 		}
2911 	}
2912 
2913 	/* Write Storm stall status registers */
2914 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2915 		struct storm_defs *storm = &s_storm_defs[storm_id];
2916 		u32 addr;
2917 
2918 		if (dev_data->block_in_reset[storm->block_id] && dump)
2919 			continue;
2920 
2921 		addr =
2922 		    BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2923 				    SEM_FAST_REG_STALLED);
2924 		offset += qed_grc_dump_reg_entry(p_hwfn,
2925 						 p_ptt,
2926 						 dump_buf + offset,
2927 						 dump,
2928 						 addr,
2929 						 1,
2930 						 false);
2931 		num_reg_entries++;
2932 	}
2933 
2934 	/* Write header */
2935 	if (dump)
2936 		qed_grc_dump_regs_hdr(dump_buf,
2937 				      true,
2938 				      num_reg_entries, "eng", -1, NULL, NULL);
2939 
2940 	return offset;
2941 }
2942 
2943 /* Dumps registers that can't be represented in the debug arrays */
2944 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2945 				     struct qed_ptt *p_ptt,
2946 				     u32 *dump_buf, bool dump)
2947 {
2948 	u32 offset = 0, addr;
2949 
2950 	offset += qed_grc_dump_regs_hdr(dump_buf,
2951 					dump, 2, "eng", -1, NULL, NULL);
2952 
2953 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2954 	 * skipped).
2955 	 */
2956 	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2957 	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2958 					      p_ptt,
2959 					      dump_buf + offset,
2960 					      dump,
2961 					      addr,
2962 					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2963 					      7,
2964 					      1);
2965 	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2966 	offset +=
2967 	    qed_grc_dump_reg_entry_skip(p_hwfn,
2968 					p_ptt,
2969 					dump_buf + offset,
2970 					dump,
2971 					addr,
2972 					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2973 					7,
2974 					1);
2975 
2976 	return offset;
2977 }
2978 
2979 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2980  * dwords. The following parameters are dumped:
2981  * - name:	   dumped only if it's not NULL.
2982  * - addr:	   in dwords, dumped only if name is NULL.
2983  * - len:	   in dwords, always dumped.
2984  * - width:	   dumped if it's not zero.
2985  * - packed:	   dumped only if it's not false.
2986  * - mem_group:	   always dumped.
2987  * - is_storm:	   true only if the memory is related to a Storm.
2988  * - storm_letter: valid only if is_storm is true.
2989  *
2990  */
2991 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2992 				u32 *dump_buf,
2993 				bool dump,
2994 				const char *name,
2995 				u32 addr,
2996 				u32 len,
2997 				u32 bit_width,
2998 				bool packed,
2999 				const char *mem_group,
3000 				bool is_storm, char storm_letter)
3001 {
3002 	u8 num_params = 3;
3003 	u32 offset = 0;
3004 	char buf[64];
3005 
3006 	if (!len)
3007 		DP_NOTICE(p_hwfn,
3008 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3009 
3010 	if (bit_width)
3011 		num_params++;
3012 	if (packed)
3013 		num_params++;
3014 
3015 	/* Dump section header */
3016 	offset += qed_dump_section_hdr(dump_buf + offset,
3017 				       dump, "grc_mem", num_params);
3018 
3019 	if (name) {
3020 		/* Dump name */
3021 		if (is_storm) {
3022 			strcpy(buf, "?STORM_");
3023 			buf[0] = storm_letter;
3024 			strcpy(buf + strlen(buf), name);
3025 		} else {
3026 			strcpy(buf, name);
3027 		}
3028 
3029 		offset += qed_dump_str_param(dump_buf + offset,
3030 					     dump, "name", buf);
3031 	} else {
3032 		/* Dump address */
3033 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3034 
3035 		offset += qed_dump_num_param(dump_buf + offset,
3036 					     dump, "addr", addr_in_bytes);
3037 	}
3038 
3039 	/* Dump len */
3040 	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
3041 
3042 	/* Dump bit width */
3043 	if (bit_width)
3044 		offset += qed_dump_num_param(dump_buf + offset,
3045 					     dump, "width", bit_width);
3046 
3047 	/* Dump packed */
3048 	if (packed)
3049 		offset += qed_dump_num_param(dump_buf + offset,
3050 					     dump, "packed", 1);
3051 
3052 	/* Dump reg type */
3053 	if (is_storm) {
3054 		strcpy(buf, "?STORM_");
3055 		buf[0] = storm_letter;
3056 		strcpy(buf + strlen(buf), mem_group);
3057 	} else {
3058 		strcpy(buf, mem_group);
3059 	}
3060 
3061 	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
3062 
3063 	return offset;
3064 }
3065 
3066 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
3067  * Returns the dumped size in dwords.
3068  * The addr and len arguments are specified in dwords.
3069  */
3070 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
3071 			    struct qed_ptt *p_ptt,
3072 			    u32 *dump_buf,
3073 			    bool dump,
3074 			    const char *name,
3075 			    u32 addr,
3076 			    u32 len,
3077 			    bool wide_bus,
3078 			    u32 bit_width,
3079 			    bool packed,
3080 			    const char *mem_group,
3081 			    bool is_storm, char storm_letter)
3082 {
3083 	u32 offset = 0;
3084 
3085 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3086 				       dump_buf + offset,
3087 				       dump,
3088 				       name,
3089 				       addr,
3090 				       len,
3091 				       bit_width,
3092 				       packed,
3093 				       mem_group, is_storm, storm_letter);
3094 	offset += qed_grc_dump_addr_range(p_hwfn,
3095 					  p_ptt,
3096 					  dump_buf + offset,
3097 					  dump, addr, len, wide_bus);
3098 
3099 	return offset;
3100 }
3101 
3102 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3103 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
3104 				    struct qed_ptt *p_ptt,
3105 				    struct dbg_array input_mems_arr,
3106 				    u32 *dump_buf, bool dump)
3107 {
3108 	u32 i, offset = 0, input_offset = 0;
3109 	bool mode_match = true;
3110 
3111 	while (input_offset < input_mems_arr.size_in_dwords) {
3112 		const struct dbg_dump_cond_hdr *cond_hdr;
3113 		u16 modes_buf_offset;
3114 		u32 num_entries;
3115 		bool eval_mode;
3116 
3117 		cond_hdr = (const struct dbg_dump_cond_hdr *)
3118 			   &input_mems_arr.ptr[input_offset++];
3119 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3120 
3121 		/* Check required mode */
3122 		eval_mode = GET_FIELD(cond_hdr->mode.data,
3123 				      DBG_MODE_HDR_EVAL_MODE) > 0;
3124 		if (eval_mode) {
3125 			modes_buf_offset =
3126 				GET_FIELD(cond_hdr->mode.data,
3127 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
3128 			mode_match = qed_is_mode_match(p_hwfn,
3129 						       &modes_buf_offset);
3130 		}
3131 
3132 		if (!mode_match) {
3133 			input_offset += cond_hdr->data_size;
3134 			continue;
3135 		}
3136 
3137 		for (i = 0; i < num_entries;
3138 		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3139 			const struct dbg_dump_mem *mem =
3140 				(const struct dbg_dump_mem *)
3141 				&input_mems_arr.ptr[input_offset];
3142 			u8 mem_group_id = GET_FIELD(mem->dword0,
3143 						    DBG_DUMP_MEM_MEM_GROUP_ID);
3144 			bool is_storm = false, mem_wide_bus;
3145 			enum dbg_grc_params grc_param;
3146 			char storm_letter = 'a';
3147 			enum block_id block_id;
3148 			u32 mem_addr, mem_len;
3149 
3150 			if (mem_group_id >= MEM_GROUPS_NUM) {
3151 				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
3152 				return 0;
3153 			}
3154 
3155 			block_id = (enum block_id)cond_hdr->block_id;
3156 			if (!qed_grc_is_mem_included(p_hwfn,
3157 						     block_id,
3158 						     mem_group_id))
3159 				continue;
3160 
3161 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3162 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3163 			mem_wide_bus = GET_FIELD(mem->dword1,
3164 						 DBG_DUMP_MEM_WIDE_BUS);
3165 
3166 			/* Update memory length for CCFC/TCFC memories
3167 			 * according to number of LCIDs/LTIDs.
3168 			 */
3169 			if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3170 				if (mem_len % MAX_LCIDS) {
3171 					DP_NOTICE(p_hwfn,
3172 						  "Invalid CCFC connection memory size\n");
3173 					return 0;
3174 				}
3175 
3176 				grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3177 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3178 					  (mem_len / MAX_LCIDS);
3179 			} else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3180 				if (mem_len % MAX_LTIDS) {
3181 					DP_NOTICE(p_hwfn,
3182 						  "Invalid TCFC task memory size\n");
3183 					return 0;
3184 				}
3185 
3186 				grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3187 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3188 					  (mem_len / MAX_LTIDS);
3189 			}
3190 
3191 			/* If memory is associated with Storm, update Storm
3192 			 * details.
3193 			 */
3194 			if (s_block_defs
3195 			    [cond_hdr->block_id]->associated_to_storm) {
3196 				is_storm = true;
3197 				storm_letter =
3198 				    s_storm_defs[s_block_defs
3199 						 [cond_hdr->block_id]->
3200 						 storm_id].letter;
3201 			}
3202 
3203 			/* Dump memory */
3204 			offset += qed_grc_dump_mem(p_hwfn,
3205 						p_ptt,
3206 						dump_buf + offset,
3207 						dump,
3208 						NULL,
3209 						mem_addr,
3210 						mem_len,
3211 						mem_wide_bus,
3212 						0,
3213 						false,
3214 						s_mem_group_names[mem_group_id],
3215 						is_storm,
3216 						storm_letter);
3217 		}
3218 	}
3219 
3220 	return offset;
3221 }
3222 
3223 /* Dumps GRC memories according to the input array dump_mem.
3224  * Returns the dumped size in dwords.
3225  */
3226 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
3227 				 struct qed_ptt *p_ptt,
3228 				 u32 *dump_buf, bool dump)
3229 {
3230 	u32 offset = 0, input_offset = 0;
3231 
3232 	while (input_offset <
3233 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3234 		const struct dbg_dump_split_hdr *split_hdr;
3235 		struct dbg_array curr_input_mems_arr;
3236 		u32 split_data_size;
3237 		u8 split_type_id;
3238 
3239 		split_hdr = (const struct dbg_dump_split_hdr *)
3240 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3241 		split_type_id =
3242 			GET_FIELD(split_hdr->hdr,
3243 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3244 		split_data_size =
3245 			GET_FIELD(split_hdr->hdr,
3246 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3247 		curr_input_mems_arr.ptr =
3248 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3249 		curr_input_mems_arr.size_in_dwords = split_data_size;
3250 
3251 		switch (split_type_id) {
3252 		case SPLIT_TYPE_NONE:
3253 			offset += qed_grc_dump_mem_entries(p_hwfn,
3254 							   p_ptt,
3255 							   curr_input_mems_arr,
3256 							   dump_buf + offset,
3257 							   dump);
3258 			break;
3259 
3260 		default:
3261 			DP_NOTICE(p_hwfn,
3262 				  "Dumping split memories is currently not supported\n");
3263 			break;
3264 		}
3265 
3266 		input_offset += split_data_size;
3267 	}
3268 
3269 	return offset;
3270 }
3271 
3272 /* Dumps GRC context data for the specified Storm.
3273  * Returns the dumped size in dwords.
3274  * The lid_size argument is specified in quad-regs.
3275  */
3276 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
3277 				 struct qed_ptt *p_ptt,
3278 				 u32 *dump_buf,
3279 				 bool dump,
3280 				 const char *name,
3281 				 u32 num_lids,
3282 				 u32 lid_size,
3283 				 u32 rd_reg_addr,
3284 				 u8 storm_id)
3285 {
3286 	struct storm_defs *storm = &s_storm_defs[storm_id];
3287 	u32 i, lid, total_size, offset = 0;
3288 
3289 	if (!lid_size)
3290 		return 0;
3291 
3292 	lid_size *= BYTES_IN_DWORD;
3293 	total_size = num_lids * lid_size;
3294 
3295 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3296 				       dump_buf + offset,
3297 				       dump,
3298 				       name,
3299 				       0,
3300 				       total_size,
3301 				       lid_size * 32,
3302 				       false, name, true, storm->letter);
3303 
3304 	if (!dump)
3305 		return offset + total_size;
3306 
3307 	/* Dump context data */
3308 	for (lid = 0; lid < num_lids; lid++) {
3309 		for (i = 0; i < lid_size; i++, offset++) {
3310 			qed_wr(p_hwfn,
3311 			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3312 			*(dump_buf + offset) = qed_rd(p_hwfn,
3313 						      p_ptt, rd_reg_addr);
3314 		}
3315 	}
3316 
3317 	return offset;
3318 }
3319 
3320 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3321 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
3322 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3323 {
3324 	enum dbg_grc_params grc_param;
3325 	u32 offset = 0;
3326 	u8 storm_id;
3327 
3328 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3329 		struct storm_defs *storm = &s_storm_defs[storm_id];
3330 
3331 		if (!qed_grc_is_storm_included(p_hwfn,
3332 					       (enum dbg_storms)storm_id))
3333 			continue;
3334 
3335 		/* Dump Conn AG context size */
3336 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3337 		offset +=
3338 			qed_grc_dump_ctx_data(p_hwfn,
3339 					      p_ptt,
3340 					      dump_buf + offset,
3341 					      dump,
3342 					      "CONN_AG_CTX",
3343 					      qed_grc_get_param(p_hwfn,
3344 								grc_param),
3345 					      storm->cm_conn_ag_ctx_lid_size,
3346 					      storm->cm_conn_ag_ctx_rd_addr,
3347 					      storm_id);
3348 
3349 		/* Dump Conn ST context size */
3350 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3351 		offset +=
3352 			qed_grc_dump_ctx_data(p_hwfn,
3353 					      p_ptt,
3354 					      dump_buf + offset,
3355 					      dump,
3356 					      "CONN_ST_CTX",
3357 					      qed_grc_get_param(p_hwfn,
3358 								grc_param),
3359 					      storm->cm_conn_st_ctx_lid_size,
3360 					      storm->cm_conn_st_ctx_rd_addr,
3361 					      storm_id);
3362 
3363 		/* Dump Task AG context size */
3364 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3365 		offset +=
3366 			qed_grc_dump_ctx_data(p_hwfn,
3367 					      p_ptt,
3368 					      dump_buf + offset,
3369 					      dump,
3370 					      "TASK_AG_CTX",
3371 					      qed_grc_get_param(p_hwfn,
3372 								grc_param),
3373 					      storm->cm_task_ag_ctx_lid_size,
3374 					      storm->cm_task_ag_ctx_rd_addr,
3375 					      storm_id);
3376 
3377 		/* Dump Task ST context size */
3378 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3379 		offset +=
3380 			qed_grc_dump_ctx_data(p_hwfn,
3381 					      p_ptt,
3382 					      dump_buf + offset,
3383 					      dump,
3384 					      "TASK_ST_CTX",
3385 					      qed_grc_get_param(p_hwfn,
3386 								grc_param),
3387 					      storm->cm_task_st_ctx_lid_size,
3388 					      storm->cm_task_st_ctx_rd_addr,
3389 					      storm_id);
3390 	}
3391 
3392 	return offset;
3393 }
3394 
3395 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3396 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3397 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3398 {
3399 	char buf[10] = "IOR_SET_?";
3400 	u32 addr, offset = 0;
3401 	u8 storm_id, set_id;
3402 
3403 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3404 		struct storm_defs *storm = &s_storm_defs[storm_id];
3405 
3406 		if (!qed_grc_is_storm_included(p_hwfn,
3407 					       (enum dbg_storms)storm_id))
3408 			continue;
3409 
3410 		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3411 			addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
3412 					       SEM_FAST_REG_STORM_REG_FILE) +
3413 			       IOR_SET_OFFSET(set_id);
3414 			buf[strlen(buf) - 1] = '0' + set_id;
3415 			offset += qed_grc_dump_mem(p_hwfn,
3416 						   p_ptt,
3417 						   dump_buf + offset,
3418 						   dump,
3419 						   buf,
3420 						   addr,
3421 						   IORS_PER_SET,
3422 						   false,
3423 						   32,
3424 						   false,
3425 						   "ior",
3426 						   true,
3427 						   storm->letter);
3428 		}
3429 	}
3430 
3431 	return offset;
3432 }
3433 
3434 /* Dump VFC CAM. Returns the dumped size in dwords. */
3435 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3436 				struct qed_ptt *p_ptt,
3437 				u32 *dump_buf, bool dump, u8 storm_id)
3438 {
3439 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3440 	struct storm_defs *storm = &s_storm_defs[storm_id];
3441 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3442 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3443 	u32 row, i, offset = 0;
3444 
3445 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3446 				       dump_buf + offset,
3447 				       dump,
3448 				       "vfc_cam",
3449 				       0,
3450 				       total_size,
3451 				       256,
3452 				       false, "vfc_cam", true, storm->letter);
3453 
3454 	if (!dump)
3455 		return offset + total_size;
3456 
3457 	/* Prepare CAM address */
3458 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3459 
3460 	for (row = 0; row < VFC_CAM_NUM_ROWS;
3461 	     row++, offset += VFC_CAM_RESP_DWORDS) {
3462 		/* Write VFC CAM command */
3463 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3464 		ARR_REG_WR(p_hwfn,
3465 			   p_ptt,
3466 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3467 			   cam_cmd, VFC_CAM_CMD_DWORDS);
3468 
3469 		/* Write VFC CAM address */
3470 		ARR_REG_WR(p_hwfn,
3471 			   p_ptt,
3472 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3473 			   cam_addr, VFC_CAM_ADDR_DWORDS);
3474 
3475 		/* Read VFC CAM read response */
3476 		ARR_REG_RD(p_hwfn,
3477 			   p_ptt,
3478 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3479 			   dump_buf + offset, VFC_CAM_RESP_DWORDS);
3480 	}
3481 
3482 	return offset;
3483 }
3484 
3485 /* Dump VFC RAM. Returns the dumped size in dwords. */
3486 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3487 				struct qed_ptt *p_ptt,
3488 				u32 *dump_buf,
3489 				bool dump,
3490 				u8 storm_id, struct vfc_ram_defs *ram_defs)
3491 {
3492 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3493 	struct storm_defs *storm = &s_storm_defs[storm_id];
3494 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3495 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3496 	u32 row, i, offset = 0;
3497 
3498 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3499 				       dump_buf + offset,
3500 				       dump,
3501 				       ram_defs->mem_name,
3502 				       0,
3503 				       total_size,
3504 				       256,
3505 				       false,
3506 				       ram_defs->type_name,
3507 				       true, storm->letter);
3508 
3509 	/* Prepare RAM address */
3510 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3511 
3512 	if (!dump)
3513 		return offset + total_size;
3514 
3515 	for (row = ram_defs->base_row;
3516 	     row < ram_defs->base_row + ram_defs->num_rows;
3517 	     row++, offset += VFC_RAM_RESP_DWORDS) {
3518 		/* Write VFC RAM command */
3519 		ARR_REG_WR(p_hwfn,
3520 			   p_ptt,
3521 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3522 			   ram_cmd, VFC_RAM_CMD_DWORDS);
3523 
3524 		/* Write VFC RAM address */
3525 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3526 		ARR_REG_WR(p_hwfn,
3527 			   p_ptt,
3528 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3529 			   ram_addr, VFC_RAM_ADDR_DWORDS);
3530 
3531 		/* Read VFC RAM read response */
3532 		ARR_REG_RD(p_hwfn,
3533 			   p_ptt,
3534 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3535 			   dump_buf + offset, VFC_RAM_RESP_DWORDS);
3536 	}
3537 
3538 	return offset;
3539 }
3540 
3541 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3542 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3543 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3544 {
3545 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3546 	u8 storm_id, i;
3547 	u32 offset = 0;
3548 
3549 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3550 		if (!qed_grc_is_storm_included(p_hwfn,
3551 					       (enum dbg_storms)storm_id) ||
3552 		    !s_storm_defs[storm_id].has_vfc ||
3553 		    (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
3554 		     PLATFORM_ASIC))
3555 			continue;
3556 
3557 		/* Read CAM */
3558 		offset += qed_grc_dump_vfc_cam(p_hwfn,
3559 					       p_ptt,
3560 					       dump_buf + offset,
3561 					       dump, storm_id);
3562 
3563 		/* Read RAM */
3564 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3565 			offset += qed_grc_dump_vfc_ram(p_hwfn,
3566 						       p_ptt,
3567 						       dump_buf + offset,
3568 						       dump,
3569 						       storm_id,
3570 						       &s_vfc_ram_defs[i]);
3571 	}
3572 
3573 	return offset;
3574 }
3575 
3576 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3577 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3578 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3579 {
3580 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3581 	u32 offset = 0;
3582 	u8 rss_mem_id;
3583 
3584 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3585 		u32 rss_addr, num_entries, total_dwords;
3586 		struct rss_mem_defs *rss_defs;
3587 		u32 addr, num_dwords_to_read;
3588 		bool packed;
3589 
3590 		rss_defs = &s_rss_mem_defs[rss_mem_id];
3591 		rss_addr = rss_defs->addr;
3592 		num_entries = rss_defs->num_entries[dev_data->chip_id];
3593 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3594 		packed = (rss_defs->entry_width == 16);
3595 
3596 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3597 					       dump_buf + offset,
3598 					       dump,
3599 					       rss_defs->mem_name,
3600 					       0,
3601 					       total_dwords,
3602 					       rss_defs->entry_width,
3603 					       packed,
3604 					       rss_defs->type_name, false, 0);
3605 
3606 		/* Dump RSS data */
3607 		if (!dump) {
3608 			offset += total_dwords;
3609 			continue;
3610 		}
3611 
3612 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3613 		while (total_dwords) {
3614 			num_dwords_to_read = min_t(u32,
3615 						   RSS_REG_RSS_RAM_DATA_SIZE,
3616 						   total_dwords);
3617 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3618 			offset += qed_grc_dump_addr_range(p_hwfn,
3619 							  p_ptt,
3620 							  dump_buf + offset,
3621 							  dump,
3622 							  addr,
3623 							  num_dwords_to_read,
3624 							  false);
3625 			total_dwords -= num_dwords_to_read;
3626 			rss_addr++;
3627 		}
3628 	}
3629 
3630 	return offset;
3631 }
3632 
3633 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3634 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3635 				struct qed_ptt *p_ptt,
3636 				u32 *dump_buf, bool dump, u8 big_ram_id)
3637 {
3638 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3639 	u32 block_size, ram_size, offset = 0, reg_val, i;
3640 	char mem_name[12] = "???_BIG_RAM";
3641 	char type_name[8] = "???_RAM";
3642 	struct big_ram_defs *big_ram;
3643 
3644 	big_ram = &s_big_ram_defs[big_ram_id];
3645 	ram_size = big_ram->ram_size[dev_data->chip_id];
3646 
3647 	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3648 	block_size = reg_val &
3649 		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3650 									 : 128;
3651 
3652 	strncpy(type_name, big_ram->instance_name,
3653 		strlen(big_ram->instance_name));
3654 	strncpy(mem_name, big_ram->instance_name,
3655 		strlen(big_ram->instance_name));
3656 
3657 	/* Dump memory header */
3658 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3659 				       dump_buf + offset,
3660 				       dump,
3661 				       mem_name,
3662 				       0,
3663 				       ram_size,
3664 				       block_size * 8,
3665 				       false, type_name, false, 0);
3666 
3667 	/* Read and dump Big RAM data */
3668 	if (!dump)
3669 		return offset + ram_size;
3670 
3671 	/* Dump Big RAM */
3672 	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3673 	     i++) {
3674 		u32 addr, len;
3675 
3676 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3677 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3678 		len = BRB_REG_BIG_RAM_DATA_SIZE;
3679 		offset += qed_grc_dump_addr_range(p_hwfn,
3680 						  p_ptt,
3681 						  dump_buf + offset,
3682 						  dump,
3683 						  addr,
3684 						  len,
3685 						  false);
3686 	}
3687 
3688 	return offset;
3689 }
3690 
3691 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3692 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3693 {
3694 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3695 	u32 offset = 0, addr;
3696 	bool halted = false;
3697 
3698 	/* Halt MCP */
3699 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3700 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3701 		if (!halted)
3702 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3703 	}
3704 
3705 	/* Dump MCP scratchpad */
3706 	offset += qed_grc_dump_mem(p_hwfn,
3707 				   p_ptt,
3708 				   dump_buf + offset,
3709 				   dump,
3710 				   NULL,
3711 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3712 				   MCP_REG_SCRATCH_SIZE_BB_K2,
3713 				   false, 0, false, "MCP", false, 0);
3714 
3715 	/* Dump MCP cpu_reg_file */
3716 	offset += qed_grc_dump_mem(p_hwfn,
3717 				   p_ptt,
3718 				   dump_buf + offset,
3719 				   dump,
3720 				   NULL,
3721 				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3722 				   MCP_REG_CPU_REG_FILE_SIZE,
3723 				   false, 0, false, "MCP", false, 0);
3724 
3725 	/* Dump MCP registers */
3726 	block_enable[BLOCK_MCP] = true;
3727 	offset += qed_grc_dump_registers(p_hwfn,
3728 					 p_ptt,
3729 					 dump_buf + offset,
3730 					 dump, block_enable, "block", "MCP");
3731 
3732 	/* Dump required non-MCP registers */
3733 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3734 					dump, 1, "eng", -1, "block", "MCP");
3735 	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3736 	offset += qed_grc_dump_reg_entry(p_hwfn,
3737 					 p_ptt,
3738 					 dump_buf + offset,
3739 					 dump,
3740 					 addr,
3741 					 1,
3742 					 false);
3743 
3744 	/* Release MCP */
3745 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3746 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3747 
3748 	return offset;
3749 }
3750 
3751 /* Dumps the tbus indirect memory for all PHYs. */
3752 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3753 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3754 {
3755 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3756 	char mem_name[32];
3757 	u8 phy_id;
3758 
3759 	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3760 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3761 		struct phy_defs *phy_defs;
3762 		u8 *bytes_buf;
3763 
3764 		phy_defs = &s_phy_defs[phy_id];
3765 		addr_lo_addr = phy_defs->base_addr +
3766 			       phy_defs->tbus_addr_lo_addr;
3767 		addr_hi_addr = phy_defs->base_addr +
3768 			       phy_defs->tbus_addr_hi_addr;
3769 		data_lo_addr = phy_defs->base_addr +
3770 			       phy_defs->tbus_data_lo_addr;
3771 		data_hi_addr = phy_defs->base_addr +
3772 			       phy_defs->tbus_data_hi_addr;
3773 
3774 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3775 			     phy_defs->phy_name) < 0)
3776 			DP_NOTICE(p_hwfn,
3777 				  "Unexpected debug error: invalid PHY memory name\n");
3778 
3779 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3780 					       dump_buf + offset,
3781 					       dump,
3782 					       mem_name,
3783 					       0,
3784 					       PHY_DUMP_SIZE_DWORDS,
3785 					       16, true, mem_name, false, 0);
3786 
3787 		if (!dump) {
3788 			offset += PHY_DUMP_SIZE_DWORDS;
3789 			continue;
3790 		}
3791 
3792 		bytes_buf = (u8 *)(dump_buf + offset);
3793 		for (tbus_hi_offset = 0;
3794 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3795 		     tbus_hi_offset++) {
3796 			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3797 			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3798 			     tbus_lo_offset++) {
3799 				qed_wr(p_hwfn,
3800 				       p_ptt, addr_lo_addr, tbus_lo_offset);
3801 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3802 							    p_ptt,
3803 							    data_lo_addr);
3804 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3805 							    p_ptt,
3806 							    data_hi_addr);
3807 			}
3808 		}
3809 
3810 		offset += PHY_DUMP_SIZE_DWORDS;
3811 	}
3812 
3813 	return offset;
3814 }
3815 
3816 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3817 				struct qed_ptt *p_ptt,
3818 				enum block_id block_id,
3819 				u8 line_id,
3820 				u8 enable_mask,
3821 				u8 right_shift,
3822 				u8 force_valid_mask, u8 force_frame_mask)
3823 {
3824 	struct block_defs *block = s_block_defs[block_id];
3825 
3826 	qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3827 	qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3828 	qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3829 	qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3830 	qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3831 }
3832 
3833 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3834 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3835 				     struct qed_ptt *p_ptt,
3836 				     u32 *dump_buf, bool dump)
3837 {
3838 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3839 	u32 block_id, line_id, offset = 0;
3840 
3841 	/* Don't dump static debug if a debug bus recording is in progress */
3842 	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3843 		return 0;
3844 
3845 	if (dump) {
3846 		/* Disable all blocks debug output */
3847 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3848 			struct block_defs *block = s_block_defs[block_id];
3849 
3850 			if (block->dbg_client_id[dev_data->chip_id] !=
3851 			    MAX_DBG_BUS_CLIENTS)
3852 				qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
3853 				       0);
3854 		}
3855 
3856 		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3857 		qed_bus_set_framing_mode(p_hwfn,
3858 					 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3859 		qed_wr(p_hwfn,
3860 		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3861 		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3862 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3863 	}
3864 
3865 	/* Dump all static debug lines for each relevant block */
3866 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3867 		struct block_defs *block = s_block_defs[block_id];
3868 		struct dbg_bus_block *block_desc;
3869 		u32 block_dwords, addr, len;
3870 		u8 dbg_client_id;
3871 
3872 		if (block->dbg_client_id[dev_data->chip_id] ==
3873 		    MAX_DBG_BUS_CLIENTS)
3874 			continue;
3875 
3876 		block_desc = get_dbg_bus_block_desc(p_hwfn,
3877 						    (enum block_id)block_id);
3878 		block_dwords = NUM_DBG_LINES(block_desc) *
3879 			       STATIC_DEBUG_LINE_DWORDS;
3880 
3881 		/* Dump static section params */
3882 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3883 					       dump_buf + offset,
3884 					       dump,
3885 					       block->name,
3886 					       0,
3887 					       block_dwords,
3888 					       32, false, "STATIC", false, 0);
3889 
3890 		if (!dump) {
3891 			offset += block_dwords;
3892 			continue;
3893 		}
3894 
3895 		/* If all lines are invalid - dump zeros */
3896 		if (dev_data->block_in_reset[block_id]) {
3897 			memset(dump_buf + offset, 0,
3898 			       DWORDS_TO_BYTES(block_dwords));
3899 			offset += block_dwords;
3900 			continue;
3901 		}
3902 
3903 		/* Enable block's client */
3904 		dbg_client_id = block->dbg_client_id[dev_data->chip_id];
3905 		qed_bus_enable_clients(p_hwfn,
3906 				       p_ptt,
3907 				       BIT(dbg_client_id));
3908 
3909 		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3910 		len = STATIC_DEBUG_LINE_DWORDS;
3911 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
3912 		     line_id++) {
3913 			/* Configure debug line ID */
3914 			qed_config_dbg_line(p_hwfn,
3915 					    p_ptt,
3916 					    (enum block_id)block_id,
3917 					    (u8)line_id, 0xf, 0, 0, 0);
3918 
3919 			/* Read debug line info */
3920 			offset += qed_grc_dump_addr_range(p_hwfn,
3921 							  p_ptt,
3922 							  dump_buf + offset,
3923 							  dump,
3924 							  addr,
3925 							  len,
3926 							  true);
3927 		}
3928 
3929 		/* Disable block's client and debug output */
3930 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3931 		qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3932 	}
3933 
3934 	if (dump) {
3935 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3936 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3937 	}
3938 
3939 	return offset;
3940 }
3941 
3942 /* Performs GRC Dump to the specified buffer.
3943  * Returns the dumped size in dwords.
3944  */
3945 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3946 				    struct qed_ptt *p_ptt,
3947 				    u32 *dump_buf,
3948 				    bool dump, u32 *num_dumped_dwords)
3949 {
3950 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3951 	bool parities_masked = false;
3952 	u8 i, port_mode = 0;
3953 	u32 offset = 0;
3954 
3955 	*num_dumped_dwords = 0;
3956 
3957 	if (dump) {
3958 		/* Find port mode */
3959 		switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
3960 		case 0:
3961 			port_mode = 1;
3962 			break;
3963 		case 1:
3964 			port_mode = 2;
3965 			break;
3966 		case 2:
3967 			port_mode = 4;
3968 			break;
3969 		}
3970 
3971 		/* Update reset state */
3972 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3973 	}
3974 
3975 	/* Dump global params */
3976 	offset += qed_dump_common_global_params(p_hwfn,
3977 						p_ptt,
3978 						dump_buf + offset, dump, 4);
3979 	offset += qed_dump_str_param(dump_buf + offset,
3980 				     dump, "dump-type", "grc-dump");
3981 	offset += qed_dump_num_param(dump_buf + offset,
3982 				     dump,
3983 				     "num-lcids",
3984 				     qed_grc_get_param(p_hwfn,
3985 						DBG_GRC_PARAM_NUM_LCIDS));
3986 	offset += qed_dump_num_param(dump_buf + offset,
3987 				     dump,
3988 				     "num-ltids",
3989 				     qed_grc_get_param(p_hwfn,
3990 						DBG_GRC_PARAM_NUM_LTIDS));
3991 	offset += qed_dump_num_param(dump_buf + offset,
3992 				     dump, "num-ports", port_mode);
3993 
3994 	/* Dump reset registers (dumped before taking blocks out of reset ) */
3995 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3996 		offset += qed_grc_dump_reset_regs(p_hwfn,
3997 						  p_ptt,
3998 						  dump_buf + offset, dump);
3999 
4000 	/* Take all blocks out of reset (using reset registers) */
4001 	if (dump) {
4002 		qed_grc_unreset_blocks(p_hwfn, p_ptt);
4003 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
4004 	}
4005 
4006 	/* Disable all parities using MFW command */
4007 	if (dump &&
4008 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4009 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
4010 		if (!parities_masked) {
4011 			DP_NOTICE(p_hwfn,
4012 				  "Failed to mask parities using MFW\n");
4013 			if (qed_grc_get_param
4014 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4015 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4016 		}
4017 	}
4018 
4019 	/* Dump modified registers (dumped before modifying them) */
4020 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4021 		offset += qed_grc_dump_modified_regs(p_hwfn,
4022 						     p_ptt,
4023 						     dump_buf + offset, dump);
4024 
4025 	/* Stall storms */
4026 	if (dump &&
4027 	    (qed_grc_is_included(p_hwfn,
4028 				 DBG_GRC_PARAM_DUMP_IOR) ||
4029 	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4030 		qed_grc_stall_storms(p_hwfn, p_ptt, true);
4031 
4032 	/* Dump all regs  */
4033 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4034 		bool block_enable[MAX_BLOCK_ID];
4035 
4036 		/* Dump all blocks except MCP */
4037 		for (i = 0; i < MAX_BLOCK_ID; i++)
4038 			block_enable[i] = true;
4039 		block_enable[BLOCK_MCP] = false;
4040 		offset += qed_grc_dump_registers(p_hwfn,
4041 						 p_ptt,
4042 						 dump_buf +
4043 						 offset,
4044 						 dump,
4045 						 block_enable, NULL, NULL);
4046 
4047 		/* Dump special registers */
4048 		offset += qed_grc_dump_special_regs(p_hwfn,
4049 						    p_ptt,
4050 						    dump_buf + offset, dump);
4051 	}
4052 
4053 	/* Dump memories */
4054 	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4055 
4056 	/* Dump MCP */
4057 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4058 		offset += qed_grc_dump_mcp(p_hwfn,
4059 					   p_ptt, dump_buf + offset, dump);
4060 
4061 	/* Dump context */
4062 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4063 		offset += qed_grc_dump_ctx(p_hwfn,
4064 					   p_ptt, dump_buf + offset, dump);
4065 
4066 	/* Dump RSS memories */
4067 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4068 		offset += qed_grc_dump_rss(p_hwfn,
4069 					   p_ptt, dump_buf + offset, dump);
4070 
4071 	/* Dump Big RAM */
4072 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4073 		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4074 			offset += qed_grc_dump_big_ram(p_hwfn,
4075 						       p_ptt,
4076 						       dump_buf + offset,
4077 						       dump, i);
4078 
4079 	/* Dump IORs */
4080 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4081 		offset += qed_grc_dump_iors(p_hwfn,
4082 					    p_ptt, dump_buf + offset, dump);
4083 
4084 	/* Dump VFC */
4085 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4086 		offset += qed_grc_dump_vfc(p_hwfn,
4087 					   p_ptt, dump_buf + offset, dump);
4088 
4089 	/* Dump PHY tbus */
4090 	if (qed_grc_is_included(p_hwfn,
4091 				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
4092 	    CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4093 		offset += qed_grc_dump_phy(p_hwfn,
4094 					   p_ptt, dump_buf + offset, dump);
4095 
4096 	/* Dump static debug data  */
4097 	if (qed_grc_is_included(p_hwfn,
4098 				DBG_GRC_PARAM_DUMP_STATIC) &&
4099 	    dev_data->bus.state == DBG_BUS_STATE_IDLE)
4100 		offset += qed_grc_dump_static_debug(p_hwfn,
4101 						    p_ptt,
4102 						    dump_buf + offset, dump);
4103 
4104 	/* Dump last section */
4105 	offset += qed_dump_last_section(dump_buf, offset, dump);
4106 
4107 	if (dump) {
4108 		/* Unstall storms */
4109 		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4110 			qed_grc_stall_storms(p_hwfn, p_ptt, false);
4111 
4112 		/* Clear parity status */
4113 		qed_grc_clear_all_prty(p_hwfn, p_ptt);
4114 
4115 		/* Enable all parities using MFW command */
4116 		if (parities_masked)
4117 			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
4118 	}
4119 
4120 	*num_dumped_dwords = offset;
4121 
4122 	return DBG_STATUS_OK;
4123 }
4124 
4125 /* Writes the specified failing Idle Check rule to the specified buffer.
4126  * Returns the dumped size in dwords.
4127  */
4128 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
4129 				     struct qed_ptt *p_ptt,
4130 				     u32 *
4131 				     dump_buf,
4132 				     bool dump,
4133 				     u16 rule_id,
4134 				     const struct dbg_idle_chk_rule *rule,
4135 				     u16 fail_entry_id, u32 *cond_reg_values)
4136 {
4137 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4138 	const struct dbg_idle_chk_cond_reg *cond_regs;
4139 	const struct dbg_idle_chk_info_reg *info_regs;
4140 	u32 i, next_reg_offset = 0, offset = 0;
4141 	struct dbg_idle_chk_result_hdr *hdr;
4142 	const union dbg_idle_chk_reg *regs;
4143 	u8 reg_id;
4144 
4145 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4146 	regs = &((const union dbg_idle_chk_reg *)
4147 		 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4148 	cond_regs = &regs[0].cond_reg;
4149 	info_regs = &regs[rule->num_cond_regs].info_reg;
4150 
4151 	/* Dump rule data */
4152 	if (dump) {
4153 		memset(hdr, 0, sizeof(*hdr));
4154 		hdr->rule_id = rule_id;
4155 		hdr->mem_entry_id = fail_entry_id;
4156 		hdr->severity = rule->severity;
4157 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
4158 	}
4159 
4160 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
4161 
4162 	/* Dump condition register values */
4163 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4164 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4165 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4166 
4167 		reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4168 			  (dump_buf + offset);
4169 
4170 		/* Write register header */
4171 		if (!dump) {
4172 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
4173 			    reg->entry_size;
4174 			continue;
4175 		}
4176 
4177 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4178 		memset(reg_hdr, 0, sizeof(*reg_hdr));
4179 		reg_hdr->start_entry = reg->start_entry;
4180 		reg_hdr->size = reg->entry_size;
4181 		SET_FIELD(reg_hdr->data,
4182 			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
4183 			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4184 		SET_FIELD(reg_hdr->data,
4185 			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4186 
4187 		/* Write register values */
4188 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4189 			dump_buf[offset] = cond_reg_values[next_reg_offset];
4190 	}
4191 
4192 	/* Dump info register values */
4193 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4194 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4195 		u32 block_id;
4196 
4197 		/* Check if register's block is in reset */
4198 		if (!dump) {
4199 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4200 			continue;
4201 		}
4202 
4203 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4204 		if (block_id >= MAX_BLOCK_ID) {
4205 			DP_NOTICE(p_hwfn, "Invalid block_id\n");
4206 			return 0;
4207 		}
4208 
4209 		if (!dev_data->block_in_reset[block_id]) {
4210 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4211 			bool wide_bus, eval_mode, mode_match = true;
4212 			u16 modes_buf_offset;
4213 			u32 addr;
4214 
4215 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4216 				  (dump_buf + offset);
4217 
4218 			/* Check mode */
4219 			eval_mode = GET_FIELD(reg->mode.data,
4220 					      DBG_MODE_HDR_EVAL_MODE) > 0;
4221 			if (eval_mode) {
4222 				modes_buf_offset =
4223 				    GET_FIELD(reg->mode.data,
4224 					      DBG_MODE_HDR_MODES_BUF_OFFSET);
4225 				mode_match =
4226 					qed_is_mode_match(p_hwfn,
4227 							  &modes_buf_offset);
4228 			}
4229 
4230 			if (!mode_match)
4231 				continue;
4232 
4233 			addr = GET_FIELD(reg->data,
4234 					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
4235 			wide_bus = GET_FIELD(reg->data,
4236 					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4237 
4238 			/* Write register header */
4239 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4240 			hdr->num_dumped_info_regs++;
4241 			memset(reg_hdr, 0, sizeof(*reg_hdr));
4242 			reg_hdr->size = reg->size;
4243 			SET_FIELD(reg_hdr->data,
4244 				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
4245 				  rule->num_cond_regs + reg_id);
4246 
4247 			/* Write register values */
4248 			offset += qed_grc_dump_addr_range(p_hwfn,
4249 							  p_ptt,
4250 							  dump_buf + offset,
4251 							  dump,
4252 							  addr,
4253 							  reg->size, wide_bus);
4254 		}
4255 	}
4256 
4257 	return offset;
4258 }
4259 
4260 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4261 static u32
4262 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4263 			       u32 *dump_buf, bool dump,
4264 			       const struct dbg_idle_chk_rule *input_rules,
4265 			       u32 num_input_rules, u32 *num_failing_rules)
4266 {
4267 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4268 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4269 	u32 i, offset = 0;
4270 	u16 entry_id;
4271 	u8 reg_id;
4272 
4273 	*num_failing_rules = 0;
4274 
4275 	for (i = 0; i < num_input_rules; i++) {
4276 		const struct dbg_idle_chk_cond_reg *cond_regs;
4277 		const struct dbg_idle_chk_rule *rule;
4278 		const union dbg_idle_chk_reg *regs;
4279 		u16 num_reg_entries = 1;
4280 		bool check_rule = true;
4281 		const u32 *imm_values;
4282 
4283 		rule = &input_rules[i];
4284 		regs = &((const union dbg_idle_chk_reg *)
4285 			 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
4286 			[rule->reg_offset];
4287 		cond_regs = &regs[0].cond_reg;
4288 		imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
4289 			     [rule->imm_offset];
4290 
4291 		/* Check if all condition register blocks are out of reset, and
4292 		 * find maximal number of entries (all condition registers that
4293 		 * are memories must have the same size, which is > 1).
4294 		 */
4295 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
4296 		     reg_id++) {
4297 			u32 block_id =
4298 				GET_FIELD(cond_regs[reg_id].data,
4299 					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4300 
4301 			if (block_id >= MAX_BLOCK_ID) {
4302 				DP_NOTICE(p_hwfn, "Invalid block_id\n");
4303 				return 0;
4304 			}
4305 
4306 			check_rule = !dev_data->block_in_reset[block_id];
4307 			if (cond_regs[reg_id].num_entries > num_reg_entries)
4308 				num_reg_entries = cond_regs[reg_id].num_entries;
4309 		}
4310 
4311 		if (!check_rule && dump)
4312 			continue;
4313 
4314 		if (!dump) {
4315 			u32 entry_dump_size =
4316 				qed_idle_chk_dump_failure(p_hwfn,
4317 							  p_ptt,
4318 							  dump_buf + offset,
4319 							  false,
4320 							  rule->rule_id,
4321 							  rule,
4322 							  0,
4323 							  NULL);
4324 
4325 			offset += num_reg_entries * entry_dump_size;
4326 			(*num_failing_rules) += num_reg_entries;
4327 			continue;
4328 		}
4329 
4330 		/* Go over all register entries (number of entries is the same
4331 		 * for all condition registers).
4332 		 */
4333 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4334 			u32 next_reg_offset = 0;
4335 
4336 			/* Read current entry of all condition registers */
4337 			for (reg_id = 0; reg_id < rule->num_cond_regs;
4338 			     reg_id++) {
4339 				const struct dbg_idle_chk_cond_reg *reg =
4340 					&cond_regs[reg_id];
4341 				u32 padded_entry_size, addr;
4342 				bool wide_bus;
4343 
4344 				/* Find GRC address (if it's a memory, the
4345 				 * address of the specific entry is calculated).
4346 				 */
4347 				addr = GET_FIELD(reg->data,
4348 						 DBG_IDLE_CHK_COND_REG_ADDRESS);
4349 				wide_bus =
4350 				    GET_FIELD(reg->data,
4351 					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4352 				if (reg->num_entries > 1 ||
4353 				    reg->start_entry > 0) {
4354 					padded_entry_size =
4355 					   reg->entry_size > 1 ?
4356 					   roundup_pow_of_two(reg->entry_size) :
4357 					   1;
4358 					addr += (reg->start_entry + entry_id) *
4359 						padded_entry_size;
4360 				}
4361 
4362 				/* Read registers */
4363 				if (next_reg_offset + reg->entry_size >=
4364 				    IDLE_CHK_MAX_ENTRIES_SIZE) {
4365 					DP_NOTICE(p_hwfn,
4366 						  "idle check registers entry is too large\n");
4367 					return 0;
4368 				}
4369 
4370 				next_reg_offset +=
4371 				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4372 							    cond_reg_values +
4373 							    next_reg_offset,
4374 							    dump, addr,
4375 							    reg->entry_size,
4376 							    wide_bus);
4377 			}
4378 
4379 			/* Call rule condition function.
4380 			 * If returns true, it's a failure.
4381 			 */
4382 			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
4383 							imm_values)) {
4384 				offset += qed_idle_chk_dump_failure(p_hwfn,
4385 							p_ptt,
4386 							dump_buf + offset,
4387 							dump,
4388 							rule->rule_id,
4389 							rule,
4390 							entry_id,
4391 							cond_reg_values);
4392 				(*num_failing_rules)++;
4393 			}
4394 		}
4395 	}
4396 
4397 	return offset;
4398 }
4399 
4400 /* Performs Idle Check Dump to the specified buffer.
4401  * Returns the dumped size in dwords.
4402  */
4403 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4404 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4405 {
4406 	u32 num_failing_rules_offset, offset = 0, input_offset = 0;
4407 	u32 num_failing_rules = 0;
4408 
4409 	/* Dump global params */
4410 	offset += qed_dump_common_global_params(p_hwfn,
4411 						p_ptt,
4412 						dump_buf + offset, dump, 1);
4413 	offset += qed_dump_str_param(dump_buf + offset,
4414 				     dump, "dump-type", "idle-chk");
4415 
4416 	/* Dump idle check section header with a single parameter */
4417 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4418 	num_failing_rules_offset = offset;
4419 	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4420 
4421 	while (input_offset <
4422 	       s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4423 		const struct dbg_idle_chk_cond_hdr *cond_hdr =
4424 			(const struct dbg_idle_chk_cond_hdr *)
4425 			&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4426 			[input_offset++];
4427 		bool eval_mode, mode_match = true;
4428 		u32 curr_failing_rules;
4429 		u16 modes_buf_offset;
4430 
4431 		/* Check mode */
4432 		eval_mode = GET_FIELD(cond_hdr->mode.data,
4433 				      DBG_MODE_HDR_EVAL_MODE) > 0;
4434 		if (eval_mode) {
4435 			modes_buf_offset =
4436 				GET_FIELD(cond_hdr->mode.data,
4437 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
4438 			mode_match = qed_is_mode_match(p_hwfn,
4439 						       &modes_buf_offset);
4440 		}
4441 
4442 		if (mode_match) {
4443 			offset +=
4444 			    qed_idle_chk_dump_rule_entries(p_hwfn,
4445 				p_ptt,
4446 				dump_buf + offset,
4447 				dump,
4448 				(const struct dbg_idle_chk_rule *)
4449 				&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4450 				ptr[input_offset],
4451 				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4452 				&curr_failing_rules);
4453 			num_failing_rules += curr_failing_rules;
4454 		}
4455 
4456 		input_offset += cond_hdr->data_size;
4457 	}
4458 
4459 	/* Overwrite num_rules parameter */
4460 	if (dump)
4461 		qed_dump_num_param(dump_buf + num_failing_rules_offset,
4462 				   dump, "num_rules", num_failing_rules);
4463 
4464 	/* Dump last section */
4465 	offset += qed_dump_last_section(dump_buf, offset, dump);
4466 
4467 	return offset;
4468 }
4469 
4470 /* Finds the meta data image in NVRAM */
4471 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4472 					    struct qed_ptt *p_ptt,
4473 					    u32 image_type,
4474 					    u32 *nvram_offset_bytes,
4475 					    u32 *nvram_size_bytes)
4476 {
4477 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4478 	struct mcp_file_att file_att;
4479 	int nvm_result;
4480 
4481 	/* Call NVRAM get file command */
4482 	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4483 					p_ptt,
4484 					DRV_MSG_CODE_NVM_GET_FILE_ATT,
4485 					image_type,
4486 					&ret_mcp_resp,
4487 					&ret_mcp_param,
4488 					&ret_txn_size, (u32 *)&file_att);
4489 
4490 	/* Check response */
4491 	if (nvm_result ||
4492 	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4493 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4494 
4495 	/* Update return values */
4496 	*nvram_offset_bytes = file_att.nvm_start_addr;
4497 	*nvram_size_bytes = file_att.len;
4498 
4499 	DP_VERBOSE(p_hwfn,
4500 		   QED_MSG_DEBUG,
4501 		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4502 		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
4503 
4504 	/* Check alignment */
4505 	if (*nvram_size_bytes & 0x3)
4506 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4507 
4508 	return DBG_STATUS_OK;
4509 }
4510 
4511 /* Reads data from NVRAM */
4512 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4513 				      struct qed_ptt *p_ptt,
4514 				      u32 nvram_offset_bytes,
4515 				      u32 nvram_size_bytes, u32 *ret_buf)
4516 {
4517 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4518 	s32 bytes_left = nvram_size_bytes;
4519 	u32 read_offset = 0;
4520 
4521 	DP_VERBOSE(p_hwfn,
4522 		   QED_MSG_DEBUG,
4523 		   "nvram_read: reading image of size %d bytes from NVRAM\n",
4524 		   nvram_size_bytes);
4525 
4526 	do {
4527 		bytes_to_copy =
4528 		    (bytes_left >
4529 		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4530 
4531 		/* Call NVRAM read command */
4532 		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4533 				       DRV_MSG_CODE_NVM_READ_NVRAM,
4534 				       (nvram_offset_bytes +
4535 					read_offset) |
4536 				       (bytes_to_copy <<
4537 					DRV_MB_PARAM_NVM_LEN_OFFSET),
4538 				       &ret_mcp_resp, &ret_mcp_param,
4539 				       &ret_read_size,
4540 				       (u32 *)((u8 *)ret_buf + read_offset)))
4541 			return DBG_STATUS_NVRAM_READ_FAILED;
4542 
4543 		/* Check response */
4544 		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4545 			return DBG_STATUS_NVRAM_READ_FAILED;
4546 
4547 		/* Update read offset */
4548 		read_offset += ret_read_size;
4549 		bytes_left -= ret_read_size;
4550 	} while (bytes_left > 0);
4551 
4552 	return DBG_STATUS_OK;
4553 }
4554 
4555 /* Get info on the MCP Trace data in the scratchpad:
4556  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4557  * - trace_data_size (OUT): trace data size in bytes (without the header)
4558  */
4559 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4560 						   struct qed_ptt *p_ptt,
4561 						   u32 *trace_data_grc_addr,
4562 						   u32 *trace_data_size)
4563 {
4564 	u32 spad_trace_offsize, signature;
4565 
4566 	/* Read trace section offsize structure from MCP scratchpad */
4567 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4568 
4569 	/* Extract trace section address from offsize (in scratchpad) */
4570 	*trace_data_grc_addr =
4571 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4572 
4573 	/* Read signature from MCP trace section */
4574 	signature = qed_rd(p_hwfn, p_ptt,
4575 			   *trace_data_grc_addr +
4576 			   offsetof(struct mcp_trace, signature));
4577 
4578 	if (signature != MFW_TRACE_SIGNATURE)
4579 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4580 
4581 	/* Read trace size from MCP trace section */
4582 	*trace_data_size = qed_rd(p_hwfn,
4583 				  p_ptt,
4584 				  *trace_data_grc_addr +
4585 				  offsetof(struct mcp_trace, size));
4586 
4587 	return DBG_STATUS_OK;
4588 }
4589 
4590 /* Reads MCP trace meta data image from NVRAM
4591  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4592  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4593  *			      loaded from file).
4594  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4595  */
4596 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4597 						   struct qed_ptt *p_ptt,
4598 						   u32 trace_data_size_bytes,
4599 						   u32 *running_bundle_id,
4600 						   u32 *trace_meta_offset,
4601 						   u32 *trace_meta_size)
4602 {
4603 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4604 
4605 	/* Read MCP trace section offsize structure from MCP scratchpad */
4606 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4607 
4608 	/* Find running bundle ID */
4609 	running_mfw_addr =
4610 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4611 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4612 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4613 	if (*running_bundle_id > 1)
4614 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4615 
4616 	/* Find image in NVRAM */
4617 	nvram_image_type =
4618 	    (*running_bundle_id ==
4619 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4620 	return qed_find_nvram_image(p_hwfn,
4621 				    p_ptt,
4622 				    nvram_image_type,
4623 				    trace_meta_offset, trace_meta_size);
4624 }
4625 
4626 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4627 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4628 					       struct qed_ptt *p_ptt,
4629 					       u32 nvram_offset_in_bytes,
4630 					       u32 size_in_bytes, u32 *buf)
4631 {
4632 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4633 	enum dbg_status status;
4634 	u32 signature;
4635 
4636 	/* Read meta data from NVRAM */
4637 	status = qed_nvram_read(p_hwfn,
4638 				p_ptt,
4639 				nvram_offset_in_bytes, size_in_bytes, buf);
4640 	if (status != DBG_STATUS_OK)
4641 		return status;
4642 
4643 	/* Extract and check first signature */
4644 	signature = qed_read_unaligned_dword(byte_buf);
4645 	byte_buf += sizeof(signature);
4646 	if (signature != NVM_MAGIC_VALUE)
4647 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4648 
4649 	/* Extract number of modules */
4650 	modules_num = *(byte_buf++);
4651 
4652 	/* Skip all modules */
4653 	for (i = 0; i < modules_num; i++) {
4654 		module_len = *(byte_buf++);
4655 		byte_buf += module_len;
4656 	}
4657 
4658 	/* Extract and check second signature */
4659 	signature = qed_read_unaligned_dword(byte_buf);
4660 	byte_buf += sizeof(signature);
4661 	if (signature != NVM_MAGIC_VALUE)
4662 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4663 
4664 	return DBG_STATUS_OK;
4665 }
4666 
4667 /* Dump MCP Trace */
4668 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4669 					  struct qed_ptt *p_ptt,
4670 					  u32 *dump_buf,
4671 					  bool dump, u32 *num_dumped_dwords)
4672 {
4673 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4674 	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4675 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4676 	enum dbg_status status;
4677 	bool mcp_access;
4678 	int halted = 0;
4679 
4680 	*num_dumped_dwords = 0;
4681 
4682 	mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4683 
4684 	/* Get trace data info */
4685 	status = qed_mcp_trace_get_data_info(p_hwfn,
4686 					     p_ptt,
4687 					     &trace_data_grc_addr,
4688 					     &trace_data_size_bytes);
4689 	if (status != DBG_STATUS_OK)
4690 		return status;
4691 
4692 	/* Dump global params */
4693 	offset += qed_dump_common_global_params(p_hwfn,
4694 						p_ptt,
4695 						dump_buf + offset, dump, 1);
4696 	offset += qed_dump_str_param(dump_buf + offset,
4697 				     dump, "dump-type", "mcp-trace");
4698 
4699 	/* Halt MCP while reading from scratchpad so the read data will be
4700 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4701 	 * risk that it may be corrupt.
4702 	 */
4703 	if (dump && mcp_access) {
4704 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4705 		if (!halted)
4706 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4707 	}
4708 
4709 	/* Find trace data size */
4710 	trace_data_size_dwords =
4711 	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4712 			 BYTES_IN_DWORD);
4713 
4714 	/* Dump trace data section header and param */
4715 	offset += qed_dump_section_hdr(dump_buf + offset,
4716 				       dump, "mcp_trace_data", 1);
4717 	offset += qed_dump_num_param(dump_buf + offset,
4718 				     dump, "size", trace_data_size_dwords);
4719 
4720 	/* Read trace data from scratchpad into dump buffer */
4721 	offset += qed_grc_dump_addr_range(p_hwfn,
4722 					  p_ptt,
4723 					  dump_buf + offset,
4724 					  dump,
4725 					  BYTES_TO_DWORDS(trace_data_grc_addr),
4726 					  trace_data_size_dwords, false);
4727 
4728 	/* Resume MCP (only if halt succeeded) */
4729 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4730 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4731 
4732 	/* Dump trace meta section header */
4733 	offset += qed_dump_section_hdr(dump_buf + offset,
4734 				       dump, "mcp_trace_meta", 1);
4735 
4736 	/* Read trace meta info (trace_meta_size_bytes is dword-aligned) */
4737 	if (mcp_access) {
4738 		status = qed_mcp_trace_get_meta_info(p_hwfn,
4739 						     p_ptt,
4740 						     trace_data_size_bytes,
4741 						     &running_bundle_id,
4742 						     &trace_meta_offset_bytes,
4743 						     &trace_meta_size_bytes);
4744 		if (status == DBG_STATUS_OK)
4745 			trace_meta_size_dwords =
4746 				BYTES_TO_DWORDS(trace_meta_size_bytes);
4747 	}
4748 
4749 	/* Dump trace meta size param */
4750 	offset += qed_dump_num_param(dump_buf + offset,
4751 				     dump, "size", trace_meta_size_dwords);
4752 
4753 	/* Read trace meta image into dump buffer */
4754 	if (dump && trace_meta_size_dwords)
4755 		status = qed_mcp_trace_read_meta(p_hwfn,
4756 						 p_ptt,
4757 						 trace_meta_offset_bytes,
4758 						 trace_meta_size_bytes,
4759 						 dump_buf + offset);
4760 	if (status == DBG_STATUS_OK)
4761 		offset += trace_meta_size_dwords;
4762 
4763 	/* Dump last section */
4764 	offset += qed_dump_last_section(dump_buf, offset, dump);
4765 
4766 	*num_dumped_dwords = offset;
4767 
4768 	/* If no mcp access, indicate that the dump doesn't contain the meta
4769 	 * data from NVRAM.
4770 	 */
4771 	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4772 }
4773 
4774 /* Dump GRC FIFO */
4775 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4776 					 struct qed_ptt *p_ptt,
4777 					 u32 *dump_buf,
4778 					 bool dump, u32 *num_dumped_dwords)
4779 {
4780 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4781 	bool fifo_has_data;
4782 
4783 	*num_dumped_dwords = 0;
4784 
4785 	/* Dump global params */
4786 	offset += qed_dump_common_global_params(p_hwfn,
4787 						p_ptt,
4788 						dump_buf + offset, dump, 1);
4789 	offset += qed_dump_str_param(dump_buf + offset,
4790 				     dump, "dump-type", "reg-fifo");
4791 
4792 	/* Dump fifo data section header and param. The size param is 0 for
4793 	 * now, and is overwritten after reading the FIFO.
4794 	 */
4795 	offset += qed_dump_section_hdr(dump_buf + offset,
4796 				       dump, "reg_fifo_data", 1);
4797 	size_param_offset = offset;
4798 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4799 
4800 	if (!dump) {
4801 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4802 		 * test how much data is available, except for reading it.
4803 		 */
4804 		offset += REG_FIFO_DEPTH_DWORDS;
4805 		goto out;
4806 	}
4807 
4808 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4809 			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4810 
4811 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4812 	 * and must be accessed atomically. Test for dwords_read not passing
4813 	 * buffer size since more entries could be added to the buffer as we are
4814 	 * emptying it.
4815 	 */
4816 	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4817 	len = REG_FIFO_ELEMENT_DWORDS;
4818 	for (dwords_read = 0;
4819 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4820 	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4821 		offset += qed_grc_dump_addr_range(p_hwfn,
4822 						  p_ptt,
4823 						  dump_buf + offset,
4824 						  true,
4825 						  addr,
4826 						  len,
4827 						  true);
4828 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4829 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4830 	}
4831 
4832 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4833 			   dwords_read);
4834 out:
4835 	/* Dump last section */
4836 	offset += qed_dump_last_section(dump_buf, offset, dump);
4837 
4838 	*num_dumped_dwords = offset;
4839 
4840 	return DBG_STATUS_OK;
4841 }
4842 
4843 /* Dump IGU FIFO */
4844 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4845 					 struct qed_ptt *p_ptt,
4846 					 u32 *dump_buf,
4847 					 bool dump, u32 *num_dumped_dwords)
4848 {
4849 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4850 	bool fifo_has_data;
4851 
4852 	*num_dumped_dwords = 0;
4853 
4854 	/* Dump global params */
4855 	offset += qed_dump_common_global_params(p_hwfn,
4856 						p_ptt,
4857 						dump_buf + offset, dump, 1);
4858 	offset += qed_dump_str_param(dump_buf + offset,
4859 				     dump, "dump-type", "igu-fifo");
4860 
4861 	/* Dump fifo data section header and param. The size param is 0 for
4862 	 * now, and is overwritten after reading the FIFO.
4863 	 */
4864 	offset += qed_dump_section_hdr(dump_buf + offset,
4865 				       dump, "igu_fifo_data", 1);
4866 	size_param_offset = offset;
4867 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4868 
4869 	if (!dump) {
4870 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4871 		 * test how much data is available, except for reading it.
4872 		 */
4873 		offset += IGU_FIFO_DEPTH_DWORDS;
4874 		goto out;
4875 	}
4876 
4877 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4878 			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4879 
4880 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4881 	 * and must be accessed atomically. Test for dwords_read not passing
4882 	 * buffer size since more entries could be added to the buffer as we are
4883 	 * emptying it.
4884 	 */
4885 	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4886 	len = IGU_FIFO_ELEMENT_DWORDS;
4887 	for (dwords_read = 0;
4888 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4889 	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4890 		offset += qed_grc_dump_addr_range(p_hwfn,
4891 						  p_ptt,
4892 						  dump_buf + offset,
4893 						  true,
4894 						  addr,
4895 						  len,
4896 						  true);
4897 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4898 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4899 	}
4900 
4901 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4902 			   dwords_read);
4903 out:
4904 	/* Dump last section */
4905 	offset += qed_dump_last_section(dump_buf, offset, dump);
4906 
4907 	*num_dumped_dwords = offset;
4908 
4909 	return DBG_STATUS_OK;
4910 }
4911 
4912 /* Protection Override dump */
4913 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4914 						    struct qed_ptt *p_ptt,
4915 						    u32 *dump_buf,
4916 						    bool dump,
4917 						    u32 *num_dumped_dwords)
4918 {
4919 	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4920 
4921 	*num_dumped_dwords = 0;
4922 
4923 	/* Dump global params */
4924 	offset += qed_dump_common_global_params(p_hwfn,
4925 						p_ptt,
4926 						dump_buf + offset, dump, 1);
4927 	offset += qed_dump_str_param(dump_buf + offset,
4928 				     dump, "dump-type", "protection-override");
4929 
4930 	/* Dump data section header and param. The size param is 0 for now,
4931 	 * and is overwritten after reading the data.
4932 	 */
4933 	offset += qed_dump_section_hdr(dump_buf + offset,
4934 				       dump, "protection_override_data", 1);
4935 	size_param_offset = offset;
4936 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4937 
4938 	if (!dump) {
4939 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4940 		goto out;
4941 	}
4942 
4943 	/* Add override window info to buffer */
4944 	override_window_dwords =
4945 		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4946 		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4947 	addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4948 	offset += qed_grc_dump_addr_range(p_hwfn,
4949 					  p_ptt,
4950 					  dump_buf + offset,
4951 					  true,
4952 					  addr,
4953 					  override_window_dwords,
4954 					  true);
4955 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4956 			   override_window_dwords);
4957 out:
4958 	/* Dump last section */
4959 	offset += qed_dump_last_section(dump_buf, offset, dump);
4960 
4961 	*num_dumped_dwords = offset;
4962 
4963 	return DBG_STATUS_OK;
4964 }
4965 
4966 /* Performs FW Asserts Dump to the specified buffer.
4967  * Returns the dumped size in dwords.
4968  */
4969 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4970 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4971 {
4972 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4973 	struct fw_asserts_ram_section *asserts;
4974 	char storm_letter_str[2] = "?";
4975 	struct fw_info fw_info;
4976 	u32 offset = 0;
4977 	u8 storm_id;
4978 
4979 	/* Dump global params */
4980 	offset += qed_dump_common_global_params(p_hwfn,
4981 						p_ptt,
4982 						dump_buf + offset, dump, 1);
4983 	offset += qed_dump_str_param(dump_buf + offset,
4984 				     dump, "dump-type", "fw-asserts");
4985 
4986 	/* Find Storm dump size */
4987 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4988 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4989 		struct storm_defs *storm = &s_storm_defs[storm_id];
4990 		u32 last_list_idx, addr;
4991 
4992 		if (dev_data->block_in_reset[storm->block_id])
4993 			continue;
4994 
4995 		/* Read FW info for the current Storm */
4996 		qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4997 
4998 		asserts = &fw_info.fw_asserts_section;
4999 
5000 		/* Dump FW Asserts section header and params */
5001 		storm_letter_str[0] = storm->letter;
5002 		offset += qed_dump_section_hdr(dump_buf + offset,
5003 					       dump, "fw_asserts", 2);
5004 		offset += qed_dump_str_param(dump_buf + offset,
5005 					     dump, "storm", storm_letter_str);
5006 		offset += qed_dump_num_param(dump_buf + offset,
5007 					     dump,
5008 					     "size",
5009 					     asserts->list_element_dword_size);
5010 
5011 		/* Read and dump FW Asserts data */
5012 		if (!dump) {
5013 			offset += asserts->list_element_dword_size;
5014 			continue;
5015 		}
5016 
5017 		fw_asserts_section_addr = storm->sem_fast_mem_addr +
5018 			SEM_FAST_REG_INT_RAM +
5019 			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
5020 		next_list_idx_addr = fw_asserts_section_addr +
5021 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
5022 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
5023 		last_list_idx = (next_list_idx > 0 ?
5024 				 next_list_idx :
5025 				 asserts->list_num_elements) - 1;
5026 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
5027 		       asserts->list_dword_offset +
5028 		       last_list_idx * asserts->list_element_dword_size;
5029 		offset +=
5030 		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
5031 					    dump_buf + offset,
5032 					    dump, addr,
5033 					    asserts->list_element_dword_size,
5034 					    false);
5035 	}
5036 
5037 	/* Dump last section */
5038 	offset += qed_dump_last_section(dump_buf, offset, dump);
5039 
5040 	return offset;
5041 }
5042 
5043 /***************************** Public Functions *******************************/
5044 
5045 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
5046 {
5047 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
5048 	u8 buf_id;
5049 
5050 	/* convert binary data to debug arrays */
5051 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
5052 		s_dbg_arrays[buf_id].ptr =
5053 		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
5054 		s_dbg_arrays[buf_id].size_in_dwords =
5055 		    BYTES_TO_DWORDS(buf_array[buf_id].length);
5056 	}
5057 
5058 	return DBG_STATUS_OK;
5059 }
5060 
5061 /* Assign default GRC param values */
5062 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5063 {
5064 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5065 	u32 i;
5066 
5067 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5068 		dev_data->grc.param_val[i] =
5069 		    s_grc_param_defs[i].default_val[dev_data->chip_id];
5070 }
5071 
5072 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5073 					      struct qed_ptt *p_ptt,
5074 					      u32 *buf_size)
5075 {
5076 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5077 
5078 	*buf_size = 0;
5079 
5080 	if (status != DBG_STATUS_OK)
5081 		return status;
5082 
5083 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5084 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5085 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5086 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5087 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5088 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5089 
5090 	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5091 }
5092 
5093 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5094 				 struct qed_ptt *p_ptt,
5095 				 u32 *dump_buf,
5096 				 u32 buf_size_in_dwords,
5097 				 u32 *num_dumped_dwords)
5098 {
5099 	u32 needed_buf_size_in_dwords;
5100 	enum dbg_status status;
5101 
5102 	*num_dumped_dwords = 0;
5103 
5104 	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5105 					       p_ptt,
5106 					       &needed_buf_size_in_dwords);
5107 	if (status != DBG_STATUS_OK)
5108 		return status;
5109 
5110 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5111 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5112 
5113 	/* GRC Dump */
5114 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5115 
5116 	/* Revert GRC params to their default */
5117 	qed_dbg_grc_set_params_default(p_hwfn);
5118 
5119 	return status;
5120 }
5121 
5122 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5123 						   struct qed_ptt *p_ptt,
5124 						   u32 *buf_size)
5125 {
5126 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5127 	struct idle_chk_data *idle_chk;
5128 	enum dbg_status status;
5129 
5130 	idle_chk = &dev_data->idle_chk;
5131 	*buf_size = 0;
5132 
5133 	status = qed_dbg_dev_init(p_hwfn, p_ptt);
5134 	if (status != DBG_STATUS_OK)
5135 		return status;
5136 
5137 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5138 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5139 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5140 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5141 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5142 
5143 	if (!idle_chk->buf_size_set) {
5144 		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5145 						       p_ptt, NULL, false);
5146 		idle_chk->buf_size_set = true;
5147 	}
5148 
5149 	*buf_size = idle_chk->buf_size;
5150 
5151 	return DBG_STATUS_OK;
5152 }
5153 
5154 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5155 				      struct qed_ptt *p_ptt,
5156 				      u32 *dump_buf,
5157 				      u32 buf_size_in_dwords,
5158 				      u32 *num_dumped_dwords)
5159 {
5160 	u32 needed_buf_size_in_dwords;
5161 	enum dbg_status status;
5162 
5163 	*num_dumped_dwords = 0;
5164 
5165 	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5166 						    p_ptt,
5167 						    &needed_buf_size_in_dwords);
5168 	if (status != DBG_STATUS_OK)
5169 		return status;
5170 
5171 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5172 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5173 
5174 	/* Update reset state */
5175 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5176 
5177 	/* Idle Check Dump */
5178 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5179 
5180 	/* Revert GRC params to their default */
5181 	qed_dbg_grc_set_params_default(p_hwfn);
5182 
5183 	return DBG_STATUS_OK;
5184 }
5185 
5186 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5187 						    struct qed_ptt *p_ptt,
5188 						    u32 *buf_size)
5189 {
5190 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5191 
5192 	*buf_size = 0;
5193 
5194 	if (status != DBG_STATUS_OK)
5195 		return status;
5196 
5197 	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5198 }
5199 
5200 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5201 				       struct qed_ptt *p_ptt,
5202 				       u32 *dump_buf,
5203 				       u32 buf_size_in_dwords,
5204 				       u32 *num_dumped_dwords)
5205 {
5206 	u32 needed_buf_size_in_dwords;
5207 	enum dbg_status status;
5208 
5209 	status =
5210 		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5211 						    p_ptt,
5212 						    &needed_buf_size_in_dwords);
5213 	if (status != DBG_STATUS_OK && status !=
5214 	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5215 		return status;
5216 
5217 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5218 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5219 
5220 	/* Update reset state */
5221 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5222 
5223 	/* Perform dump */
5224 	status = qed_mcp_trace_dump(p_hwfn,
5225 				    p_ptt, dump_buf, true, num_dumped_dwords);
5226 
5227 	/* Revert GRC params to their default */
5228 	qed_dbg_grc_set_params_default(p_hwfn);
5229 
5230 	return status;
5231 }
5232 
5233 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5234 						   struct qed_ptt *p_ptt,
5235 						   u32 *buf_size)
5236 {
5237 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5238 
5239 	*buf_size = 0;
5240 
5241 	if (status != DBG_STATUS_OK)
5242 		return status;
5243 
5244 	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5245 }
5246 
5247 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5248 				      struct qed_ptt *p_ptt,
5249 				      u32 *dump_buf,
5250 				      u32 buf_size_in_dwords,
5251 				      u32 *num_dumped_dwords)
5252 {
5253 	u32 needed_buf_size_in_dwords;
5254 	enum dbg_status status;
5255 
5256 	*num_dumped_dwords = 0;
5257 
5258 	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5259 						    p_ptt,
5260 						    &needed_buf_size_in_dwords);
5261 	if (status != DBG_STATUS_OK)
5262 		return status;
5263 
5264 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5265 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5266 
5267 	/* Update reset state */
5268 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5269 
5270 	status = qed_reg_fifo_dump(p_hwfn,
5271 				   p_ptt, dump_buf, true, num_dumped_dwords);
5272 
5273 	/* Revert GRC params to their default */
5274 	qed_dbg_grc_set_params_default(p_hwfn);
5275 
5276 	return status;
5277 }
5278 
5279 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5280 						   struct qed_ptt *p_ptt,
5281 						   u32 *buf_size)
5282 {
5283 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5284 
5285 	*buf_size = 0;
5286 
5287 	if (status != DBG_STATUS_OK)
5288 		return status;
5289 
5290 	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5291 }
5292 
5293 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5294 				      struct qed_ptt *p_ptt,
5295 				      u32 *dump_buf,
5296 				      u32 buf_size_in_dwords,
5297 				      u32 *num_dumped_dwords)
5298 {
5299 	u32 needed_buf_size_in_dwords;
5300 	enum dbg_status status;
5301 
5302 	*num_dumped_dwords = 0;
5303 
5304 	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5305 						    p_ptt,
5306 						    &needed_buf_size_in_dwords);
5307 	if (status != DBG_STATUS_OK)
5308 		return status;
5309 
5310 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5311 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5312 
5313 	/* Update reset state */
5314 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5315 
5316 	status = qed_igu_fifo_dump(p_hwfn,
5317 				   p_ptt, dump_buf, true, num_dumped_dwords);
5318 	/* Revert GRC params to their default */
5319 	qed_dbg_grc_set_params_default(p_hwfn);
5320 
5321 	return status;
5322 }
5323 
5324 enum dbg_status
5325 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5326 					      struct qed_ptt *p_ptt,
5327 					      u32 *buf_size)
5328 {
5329 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5330 
5331 	*buf_size = 0;
5332 
5333 	if (status != DBG_STATUS_OK)
5334 		return status;
5335 
5336 	return qed_protection_override_dump(p_hwfn,
5337 					    p_ptt, NULL, false, buf_size);
5338 }
5339 
5340 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5341 						 struct qed_ptt *p_ptt,
5342 						 u32 *dump_buf,
5343 						 u32 buf_size_in_dwords,
5344 						 u32 *num_dumped_dwords)
5345 {
5346 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5347 	enum dbg_status status;
5348 
5349 	*num_dumped_dwords = 0;
5350 
5351 	status =
5352 		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5353 							      p_ptt,
5354 							      p_size);
5355 	if (status != DBG_STATUS_OK)
5356 		return status;
5357 
5358 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5359 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5360 
5361 	/* Update reset state */
5362 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5363 
5364 	status = qed_protection_override_dump(p_hwfn,
5365 					      p_ptt,
5366 					      dump_buf,
5367 					      true, num_dumped_dwords);
5368 
5369 	/* Revert GRC params to their default */
5370 	qed_dbg_grc_set_params_default(p_hwfn);
5371 
5372 	return status;
5373 }
5374 
5375 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5376 						     struct qed_ptt *p_ptt,
5377 						     u32 *buf_size)
5378 {
5379 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5380 
5381 	*buf_size = 0;
5382 
5383 	if (status != DBG_STATUS_OK)
5384 		return status;
5385 
5386 	/* Update reset state */
5387 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5388 
5389 	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5390 
5391 	return DBG_STATUS_OK;
5392 }
5393 
5394 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5395 					struct qed_ptt *p_ptt,
5396 					u32 *dump_buf,
5397 					u32 buf_size_in_dwords,
5398 					u32 *num_dumped_dwords)
5399 {
5400 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5401 	enum dbg_status status;
5402 
5403 	*num_dumped_dwords = 0;
5404 
5405 	status =
5406 		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5407 						     p_ptt,
5408 						     p_size);
5409 	if (status != DBG_STATUS_OK)
5410 		return status;
5411 
5412 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5413 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5414 
5415 	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5416 
5417 	/* Revert GRC params to their default */
5418 	qed_dbg_grc_set_params_default(p_hwfn);
5419 
5420 	return DBG_STATUS_OK;
5421 }
5422 
5423 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5424 				  struct qed_ptt *p_ptt,
5425 				  enum block_id block_id,
5426 				  enum dbg_attn_type attn_type,
5427 				  bool clear_status,
5428 				  struct dbg_attn_block_result *results)
5429 {
5430 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5431 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5432 	const struct dbg_attn_reg *attn_reg_arr;
5433 
5434 	if (status != DBG_STATUS_OK)
5435 		return status;
5436 
5437 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5438 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5439 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5440 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5441 
5442 	attn_reg_arr = qed_get_block_attn_regs(block_id,
5443 					       attn_type, &num_attn_regs);
5444 
5445 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5446 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5447 		struct dbg_attn_reg_result *reg_result;
5448 		u32 sts_addr, sts_val;
5449 		u16 modes_buf_offset;
5450 		bool eval_mode;
5451 
5452 		/* Check mode */
5453 		eval_mode = GET_FIELD(reg_data->mode.data,
5454 				      DBG_MODE_HDR_EVAL_MODE) > 0;
5455 		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5456 					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5457 		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5458 			continue;
5459 
5460 		/* Mode match - read attention status register */
5461 		sts_addr = DWORDS_TO_BYTES(clear_status ?
5462 					   reg_data->sts_clr_address :
5463 					   GET_FIELD(reg_data->data,
5464 						     DBG_ATTN_REG_STS_ADDRESS));
5465 		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5466 		if (!sts_val)
5467 			continue;
5468 
5469 		/* Non-zero attention status - add to results */
5470 		reg_result = &results->reg_results[num_result_regs];
5471 		SET_FIELD(reg_result->data,
5472 			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5473 		SET_FIELD(reg_result->data,
5474 			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5475 			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5476 		reg_result->block_attn_offset = reg_data->block_attn_offset;
5477 		reg_result->sts_val = sts_val;
5478 		reg_result->mask_val = qed_rd(p_hwfn,
5479 					      p_ptt,
5480 					      DWORDS_TO_BYTES
5481 					      (reg_data->mask_address));
5482 		num_result_regs++;
5483 	}
5484 
5485 	results->block_id = (u8)block_id;
5486 	results->names_offset =
5487 	    qed_get_block_attn_data(block_id, attn_type)->names_offset;
5488 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5489 	SET_FIELD(results->data,
5490 		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5491 
5492 	return DBG_STATUS_OK;
5493 }
5494 
5495 /******************************* Data Types **********************************/
5496 
5497 struct block_info {
5498 	const char *name;
5499 	enum block_id id;
5500 };
5501 
5502 struct mcp_trace_format {
5503 	u32 data;
5504 #define MCP_TRACE_FORMAT_MODULE_MASK	0x0000ffff
5505 #define MCP_TRACE_FORMAT_MODULE_SHIFT	0
5506 #define MCP_TRACE_FORMAT_LEVEL_MASK	0x00030000
5507 #define MCP_TRACE_FORMAT_LEVEL_SHIFT	16
5508 #define MCP_TRACE_FORMAT_P1_SIZE_MASK	0x000c0000
5509 #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT	18
5510 #define MCP_TRACE_FORMAT_P2_SIZE_MASK	0x00300000
5511 #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT	20
5512 #define MCP_TRACE_FORMAT_P3_SIZE_MASK	0x00c00000
5513 #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT	22
5514 #define MCP_TRACE_FORMAT_LEN_MASK	0xff000000
5515 #define MCP_TRACE_FORMAT_LEN_SHIFT	24
5516 
5517 	char *format_str;
5518 };
5519 
5520 /* Meta data structure, generated by a perl script during MFW build. therefore,
5521  * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl
5522  * script.
5523  */
5524 struct mcp_trace_meta {
5525 	u32 modules_num;
5526 	char **modules;
5527 	u32 formats_num;
5528 	struct mcp_trace_format *formats;
5529 };
5530 
5531 /* REG fifo element */
5532 struct reg_fifo_element {
5533 	u64 data;
5534 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5535 #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5536 #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5537 #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5538 #define REG_FIFO_ELEMENT_PF_SHIFT		24
5539 #define REG_FIFO_ELEMENT_PF_MASK		0xf
5540 #define REG_FIFO_ELEMENT_VF_SHIFT		28
5541 #define REG_FIFO_ELEMENT_VF_MASK		0xff
5542 #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5543 #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5544 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5545 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5546 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5547 #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5548 #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5549 #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5550 #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5551 #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5552 };
5553 
5554 /* IGU fifo element */
5555 struct igu_fifo_element {
5556 	u32 dword0;
5557 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5558 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5559 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5560 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5561 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5562 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5563 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5564 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5565 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5566 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5567 	u32 dword1;
5568 	u32 dword2;
5569 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5570 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5571 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5572 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5573 	u32 reserved;
5574 };
5575 
5576 struct igu_fifo_wr_data {
5577 	u32 data;
5578 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5579 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5580 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5581 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5582 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5583 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5584 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5585 #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5586 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5587 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5588 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5589 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5590 };
5591 
5592 struct igu_fifo_cleanup_wr_data {
5593 	u32 data;
5594 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5595 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5596 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5597 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5598 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5599 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5600 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5601 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5602 };
5603 
5604 /* Protection override element */
5605 struct protection_override_element {
5606 	u64 data;
5607 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5608 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5609 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5610 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5611 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5612 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5613 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5614 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5615 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5616 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5617 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5618 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5619 };
5620 
5621 enum igu_fifo_sources {
5622 	IGU_SRC_PXP0,
5623 	IGU_SRC_PXP1,
5624 	IGU_SRC_PXP2,
5625 	IGU_SRC_PXP3,
5626 	IGU_SRC_PXP4,
5627 	IGU_SRC_PXP5,
5628 	IGU_SRC_PXP6,
5629 	IGU_SRC_PXP7,
5630 	IGU_SRC_CAU,
5631 	IGU_SRC_ATTN,
5632 	IGU_SRC_GRC
5633 };
5634 
5635 enum igu_fifo_addr_types {
5636 	IGU_ADDR_TYPE_MSIX_MEM,
5637 	IGU_ADDR_TYPE_WRITE_PBA,
5638 	IGU_ADDR_TYPE_WRITE_INT_ACK,
5639 	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5640 	IGU_ADDR_TYPE_READ_INT,
5641 	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5642 	IGU_ADDR_TYPE_RESERVED
5643 };
5644 
5645 struct igu_fifo_addr_data {
5646 	u16 start_addr;
5647 	u16 end_addr;
5648 	char *desc;
5649 	char *vf_desc;
5650 	enum igu_fifo_addr_types type;
5651 };
5652 
5653 /******************************** Constants **********************************/
5654 
5655 #define MAX_MSG_LEN				1024
5656 
5657 #define MCP_TRACE_MAX_MODULE_LEN		8
5658 #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5659 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5660 	(MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
5661 
5662 #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5663 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5664 
5665 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5666 
5667 /***************************** Constant Arrays *******************************/
5668 
5669 struct user_dbg_array {
5670 	const u32 *ptr;
5671 	u32 size_in_dwords;
5672 };
5673 
5674 /* Debug arrays */
5675 static struct user_dbg_array
5676 s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
5677 
5678 /* Block names array */
5679 static struct block_info s_block_info_arr[] = {
5680 	{"grc", BLOCK_GRC},
5681 	{"miscs", BLOCK_MISCS},
5682 	{"misc", BLOCK_MISC},
5683 	{"dbu", BLOCK_DBU},
5684 	{"pglue_b", BLOCK_PGLUE_B},
5685 	{"cnig", BLOCK_CNIG},
5686 	{"cpmu", BLOCK_CPMU},
5687 	{"ncsi", BLOCK_NCSI},
5688 	{"opte", BLOCK_OPTE},
5689 	{"bmb", BLOCK_BMB},
5690 	{"pcie", BLOCK_PCIE},
5691 	{"mcp", BLOCK_MCP},
5692 	{"mcp2", BLOCK_MCP2},
5693 	{"pswhst", BLOCK_PSWHST},
5694 	{"pswhst2", BLOCK_PSWHST2},
5695 	{"pswrd", BLOCK_PSWRD},
5696 	{"pswrd2", BLOCK_PSWRD2},
5697 	{"pswwr", BLOCK_PSWWR},
5698 	{"pswwr2", BLOCK_PSWWR2},
5699 	{"pswrq", BLOCK_PSWRQ},
5700 	{"pswrq2", BLOCK_PSWRQ2},
5701 	{"pglcs", BLOCK_PGLCS},
5702 	{"ptu", BLOCK_PTU},
5703 	{"dmae", BLOCK_DMAE},
5704 	{"tcm", BLOCK_TCM},
5705 	{"mcm", BLOCK_MCM},
5706 	{"ucm", BLOCK_UCM},
5707 	{"xcm", BLOCK_XCM},
5708 	{"ycm", BLOCK_YCM},
5709 	{"pcm", BLOCK_PCM},
5710 	{"qm", BLOCK_QM},
5711 	{"tm", BLOCK_TM},
5712 	{"dorq", BLOCK_DORQ},
5713 	{"brb", BLOCK_BRB},
5714 	{"src", BLOCK_SRC},
5715 	{"prs", BLOCK_PRS},
5716 	{"tsdm", BLOCK_TSDM},
5717 	{"msdm", BLOCK_MSDM},
5718 	{"usdm", BLOCK_USDM},
5719 	{"xsdm", BLOCK_XSDM},
5720 	{"ysdm", BLOCK_YSDM},
5721 	{"psdm", BLOCK_PSDM},
5722 	{"tsem", BLOCK_TSEM},
5723 	{"msem", BLOCK_MSEM},
5724 	{"usem", BLOCK_USEM},
5725 	{"xsem", BLOCK_XSEM},
5726 	{"ysem", BLOCK_YSEM},
5727 	{"psem", BLOCK_PSEM},
5728 	{"rss", BLOCK_RSS},
5729 	{"tmld", BLOCK_TMLD},
5730 	{"muld", BLOCK_MULD},
5731 	{"yuld", BLOCK_YULD},
5732 	{"xyld", BLOCK_XYLD},
5733 	{"ptld", BLOCK_PTLD},
5734 	{"ypld", BLOCK_YPLD},
5735 	{"prm", BLOCK_PRM},
5736 	{"pbf_pb1", BLOCK_PBF_PB1},
5737 	{"pbf_pb2", BLOCK_PBF_PB2},
5738 	{"rpb", BLOCK_RPB},
5739 	{"btb", BLOCK_BTB},
5740 	{"pbf", BLOCK_PBF},
5741 	{"rdif", BLOCK_RDIF},
5742 	{"tdif", BLOCK_TDIF},
5743 	{"cdu", BLOCK_CDU},
5744 	{"ccfc", BLOCK_CCFC},
5745 	{"tcfc", BLOCK_TCFC},
5746 	{"igu", BLOCK_IGU},
5747 	{"cau", BLOCK_CAU},
5748 	{"rgfs", BLOCK_RGFS},
5749 	{"rgsrc", BLOCK_RGSRC},
5750 	{"tgfs", BLOCK_TGFS},
5751 	{"tgsrc", BLOCK_TGSRC},
5752 	{"umac", BLOCK_UMAC},
5753 	{"xmac", BLOCK_XMAC},
5754 	{"dbg", BLOCK_DBG},
5755 	{"nig", BLOCK_NIG},
5756 	{"wol", BLOCK_WOL},
5757 	{"bmbn", BLOCK_BMBN},
5758 	{"ipc", BLOCK_IPC},
5759 	{"nwm", BLOCK_NWM},
5760 	{"nws", BLOCK_NWS},
5761 	{"ms", BLOCK_MS},
5762 	{"phy_pcie", BLOCK_PHY_PCIE},
5763 	{"led", BLOCK_LED},
5764 	{"avs_wrap", BLOCK_AVS_WRAP},
5765 	{"pxpreqbus", BLOCK_PXPREQBUS},
5766 	{"misc_aeu", BLOCK_MISC_AEU},
5767 	{"bar0_map", BLOCK_BAR0_MAP}
5768 };
5769 
5770 /* Status string array */
5771 static const char * const s_status_str[] = {
5772 	/* DBG_STATUS_OK */
5773 	"Operation completed successfully",
5774 
5775 	/* DBG_STATUS_APP_VERSION_NOT_SET */
5776 	"Debug application version wasn't set",
5777 
5778 	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5779 	"Unsupported debug application version",
5780 
5781 	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5782 	"The debug block wasn't reset since the last recording",
5783 
5784 	/* DBG_STATUS_INVALID_ARGS */
5785 	"Invalid arguments",
5786 
5787 	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5788 	"The debug output was already set",
5789 
5790 	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5791 	"Invalid PCI buffer size",
5792 
5793 	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5794 	"PCI buffer allocation failed",
5795 
5796 	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5797 	"A PCI buffer wasn't allocated",
5798 
5799 	/* DBG_STATUS_TOO_MANY_INPUTS */
5800 	"Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
5801 
5802 	/* DBG_STATUS_INPUT_OVERLAP */
5803 	"Overlapping debug bus inputs",
5804 
5805 	/* DBG_STATUS_HW_ONLY_RECORDING */
5806 	"Cannot record Storm data since the entire recording cycle is used by HW",
5807 
5808 	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5809 	"The Storm was already enabled",
5810 
5811 	/* DBG_STATUS_STORM_NOT_ENABLED */
5812 	"The specified Storm wasn't enabled",
5813 
5814 	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5815 	"The block was already enabled",
5816 
5817 	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5818 	"The specified block wasn't enabled",
5819 
5820 	/* DBG_STATUS_NO_INPUT_ENABLED */
5821 	"No input was enabled for recording",
5822 
5823 	/* DBG_STATUS_NO_FILTER_TRIGGER_64B */
5824 	"Filters and triggers are not allowed when recording in 64b units",
5825 
5826 	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5827 	"The filter was already enabled",
5828 
5829 	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5830 	"The trigger was already enabled",
5831 
5832 	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5833 	"The trigger wasn't enabled",
5834 
5835 	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5836 	"A constraint can be added only after a filter was enabled or a trigger state was added",
5837 
5838 	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5839 	"Cannot add more than 3 trigger states",
5840 
5841 	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5842 	"Cannot add more than 4 constraints per filter or trigger state",
5843 
5844 	/* DBG_STATUS_RECORDING_NOT_STARTED */
5845 	"The recording wasn't started",
5846 
5847 	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
5848 	"A trigger was configured, but it didn't trigger",
5849 
5850 	/* DBG_STATUS_NO_DATA_RECORDED */
5851 	"No data was recorded",
5852 
5853 	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5854 	"Dump buffer is too small",
5855 
5856 	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5857 	"Dumped data is not aligned to chunks",
5858 
5859 	/* DBG_STATUS_UNKNOWN_CHIP */
5860 	"Unknown chip",
5861 
5862 	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5863 	"Failed allocating virtual memory",
5864 
5865 	/* DBG_STATUS_BLOCK_IN_RESET */
5866 	"The input block is in reset",
5867 
5868 	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5869 	"Invalid MCP trace signature found in NVRAM",
5870 
5871 	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5872 	"Invalid bundle ID found in NVRAM",
5873 
5874 	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5875 	"Failed getting NVRAM image",
5876 
5877 	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5878 	"NVRAM image is not dword-aligned",
5879 
5880 	/* DBG_STATUS_NVRAM_READ_FAILED */
5881 	"Failed reading from NVRAM",
5882 
5883 	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5884 	"Idle check parsing failed",
5885 
5886 	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
5887 	"MCP Trace data is corrupt",
5888 
5889 	/* DBG_STATUS_MCP_TRACE_NO_META */
5890 	"Dump doesn't contain meta data - it must be provided in image file",
5891 
5892 	/* DBG_STATUS_MCP_COULD_NOT_HALT */
5893 	"Failed to halt MCP",
5894 
5895 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
5896 	"Failed to resume MCP after halt",
5897 
5898 	/* DBG_STATUS_RESERVED2 */
5899 	"Reserved debug status - shouldn't be returned",
5900 
5901 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5902 	"Failed to empty SEMI sync FIFO",
5903 
5904 	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
5905 	"IGU FIFO data is corrupt",
5906 
5907 	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5908 	"MCP failed to mask parities",
5909 
5910 	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5911 	"FW Asserts parsing failed",
5912 
5913 	/* DBG_STATUS_REG_FIFO_BAD_DATA */
5914 	"GRC FIFO data is corrupt",
5915 
5916 	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5917 	"Protection Override data is corrupt",
5918 
5919 	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
5920 	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5921 
5922 	/* DBG_STATUS_FILTER_BUG */
5923 	"Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
5924 
5925 	/* DBG_STATUS_NON_MATCHING_LINES */
5926 	"Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
5927 
5928 	/* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
5929 	"The selected trigger dword offset wasn't enabled in the recorded HW block",
5930 
5931 	/* DBG_STATUS_DBG_BUS_IN_USE */
5932 	"The debug bus is in use"
5933 };
5934 
5935 /* Idle check severity names array */
5936 static const char * const s_idle_chk_severity_str[] = {
5937 	"Error",
5938 	"Error if no traffic",
5939 	"Warning"
5940 };
5941 
5942 /* MCP Trace level names array */
5943 static const char * const s_mcp_trace_level_str[] = {
5944 	"ERROR",
5945 	"TRACE",
5946 	"DEBUG"
5947 };
5948 
5949 /* Access type names array */
5950 static const char * const s_access_strs[] = {
5951 	"read",
5952 	"write"
5953 };
5954 
5955 /* Privilege type names array */
5956 static const char * const s_privilege_strs[] = {
5957 	"VF",
5958 	"PDA",
5959 	"HV",
5960 	"UA"
5961 };
5962 
5963 /* Protection type names array */
5964 static const char * const s_protection_strs[] = {
5965 	"(default)",
5966 	"(default)",
5967 	"(default)",
5968 	"(default)",
5969 	"override VF",
5970 	"override PDA",
5971 	"override HV",
5972 	"override UA"
5973 };
5974 
5975 /* Master type names array */
5976 static const char * const s_master_strs[] = {
5977 	"???",
5978 	"pxp",
5979 	"mcp",
5980 	"msdm",
5981 	"psdm",
5982 	"ysdm",
5983 	"usdm",
5984 	"tsdm",
5985 	"xsdm",
5986 	"dbu",
5987 	"dmae",
5988 	"???",
5989 	"???",
5990 	"???",
5991 	"???",
5992 	"???"
5993 };
5994 
5995 /* REG FIFO error messages array */
5996 static const char * const s_reg_fifo_error_strs[] = {
5997 	"grc timeout",
5998 	"address doesn't belong to any block",
5999 	"reserved address in block or write to read-only address",
6000 	"privilege/protection mismatch",
6001 	"path isolation error"
6002 };
6003 
6004 /* IGU FIFO sources array */
6005 static const char * const s_igu_fifo_source_strs[] = {
6006 	"TSTORM",
6007 	"MSTORM",
6008 	"USTORM",
6009 	"XSTORM",
6010 	"YSTORM",
6011 	"PSTORM",
6012 	"PCIE",
6013 	"NIG_QM_PBF",
6014 	"CAU",
6015 	"ATTN",
6016 	"GRC",
6017 };
6018 
6019 /* IGU FIFO error messages */
6020 static const char * const s_igu_fifo_error_strs[] = {
6021 	"no error",
6022 	"length error",
6023 	"function disabled",
6024 	"VF sent command to attnetion address",
6025 	"host sent prod update command",
6026 	"read of during interrupt register while in MIMD mode",
6027 	"access to PXP BAR reserved address",
6028 	"producer update command to attention index",
6029 	"unknown error",
6030 	"SB index not valid",
6031 	"SB relative index and FID not found",
6032 	"FID not match",
6033 	"command with error flag asserted (PCI error or CAU discard)",
6034 	"VF sent cleanup and RF cleanup is disabled",
6035 	"cleanup command on type bigger than 4"
6036 };
6037 
6038 /* IGU FIFO address data */
6039 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6040 	{0x0, 0x101, "MSI-X Memory", NULL,
6041 	 IGU_ADDR_TYPE_MSIX_MEM},
6042 	{0x102, 0x1ff, "reserved", NULL,
6043 	 IGU_ADDR_TYPE_RESERVED},
6044 	{0x200, 0x200, "Write PBA[0:63]", NULL,
6045 	 IGU_ADDR_TYPE_WRITE_PBA},
6046 	{0x201, 0x201, "Write PBA[64:127]", "reserved",
6047 	 IGU_ADDR_TYPE_WRITE_PBA},
6048 	{0x202, 0x202, "Write PBA[128]", "reserved",
6049 	 IGU_ADDR_TYPE_WRITE_PBA},
6050 	{0x203, 0x3ff, "reserved", NULL,
6051 	 IGU_ADDR_TYPE_RESERVED},
6052 	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6053 	 IGU_ADDR_TYPE_WRITE_INT_ACK},
6054 	{0x5f0, 0x5f0, "Attention bits update", NULL,
6055 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6056 	{0x5f1, 0x5f1, "Attention bits set", NULL,
6057 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6058 	{0x5f2, 0x5f2, "Attention bits clear", NULL,
6059 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6060 	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6061 	 IGU_ADDR_TYPE_READ_INT},
6062 	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6063 	 IGU_ADDR_TYPE_READ_INT},
6064 	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6065 	 IGU_ADDR_TYPE_READ_INT},
6066 	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6067 	 IGU_ADDR_TYPE_READ_INT},
6068 	{0x5f7, 0x5ff, "reserved", NULL,
6069 	 IGU_ADDR_TYPE_RESERVED},
6070 	{0x600, 0x7ff, "Producer update", NULL,
6071 	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6072 };
6073 
6074 /******************************** Variables **********************************/
6075 
6076 /* MCP Trace meta data - used in case the dump doesn't contain the meta data
6077  * (e.g. due to no NVRAM access).
6078  */
6079 static struct user_dbg_array s_mcp_trace_meta = { NULL, 0 };
6080 
6081 /* Temporary buffer, used for print size calculations */
6082 static char s_temp_buf[MAX_MSG_LEN];
6083 
6084 /**************************** Private Functions ******************************/
6085 
6086 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6087 {
6088 	return (a + b) % size;
6089 }
6090 
6091 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6092 {
6093 	return (size + a - b) % size;
6094 }
6095 
6096 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6097  * bytes) and returns them as a dword value. the specified buffer offset is
6098  * updated.
6099  */
6100 static u32 qed_read_from_cyclic_buf(void *buf,
6101 				    u32 *offset,
6102 				    u32 buf_size, u8 num_bytes_to_read)
6103 {
6104 	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6105 	u32 val = 0;
6106 
6107 	val_ptr = (u8 *)&val;
6108 
6109 	for (i = 0; i < num_bytes_to_read; i++) {
6110 		val_ptr[i] = bytes_buf[*offset];
6111 		*offset = qed_cyclic_add(*offset, 1, buf_size);
6112 	}
6113 
6114 	return val;
6115 }
6116 
6117 /* Reads and returns the next byte from the specified buffer.
6118  * The specified buffer offset is updated.
6119  */
6120 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6121 {
6122 	return ((u8 *)buf)[(*offset)++];
6123 }
6124 
6125 /* Reads and returns the next dword from the specified buffer.
6126  * The specified buffer offset is updated.
6127  */
6128 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6129 {
6130 	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6131 
6132 	*offset += 4;
6133 
6134 	return dword_val;
6135 }
6136 
6137 /* Reads the next string from the specified buffer, and copies it to the
6138  * specified pointer. The specified buffer offset is updated.
6139  */
6140 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6141 {
6142 	const char *source_str = &((const char *)buf)[*offset];
6143 
6144 	strncpy(dest, source_str, size);
6145 	dest[size - 1] = '\0';
6146 	*offset += size;
6147 }
6148 
6149 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6150  * If the specified buffer in NULL, a temporary buffer pointer is returned.
6151  */
6152 static char *qed_get_buf_ptr(void *buf, u32 offset)
6153 {
6154 	return buf ? (char *)buf + offset : s_temp_buf;
6155 }
6156 
6157 /* Reads a param from the specified buffer. Returns the number of dwords read.
6158  * If the returned str_param is NULL, the param is numeric and its value is
6159  * returned in num_param.
6160  * Otheriwise, the param is a string and its pointer is returned in str_param.
6161  */
6162 static u32 qed_read_param(u32 *dump_buf,
6163 			  const char **param_name,
6164 			  const char **param_str_val, u32 *param_num_val)
6165 {
6166 	char *char_buf = (char *)dump_buf;
6167 	size_t offset = 0;
6168 
6169 	/* Extract param name */
6170 	*param_name = char_buf;
6171 	offset += strlen(*param_name) + 1;
6172 
6173 	/* Check param type */
6174 	if (*(char_buf + offset++)) {
6175 		/* String param */
6176 		*param_str_val = char_buf + offset;
6177 		*param_num_val = 0;
6178 		offset += strlen(*param_str_val) + 1;
6179 		if (offset & 0x3)
6180 			offset += (4 - (offset & 0x3));
6181 	} else {
6182 		/* Numeric param */
6183 		*param_str_val = NULL;
6184 		if (offset & 0x3)
6185 			offset += (4 - (offset & 0x3));
6186 		*param_num_val = *(u32 *)(char_buf + offset);
6187 		offset += 4;
6188 	}
6189 
6190 	return offset / 4;
6191 }
6192 
6193 /* Reads a section header from the specified buffer.
6194  * Returns the number of dwords read.
6195  */
6196 static u32 qed_read_section_hdr(u32 *dump_buf,
6197 				const char **section_name,
6198 				u32 *num_section_params)
6199 {
6200 	const char *param_str_val;
6201 
6202 	return qed_read_param(dump_buf,
6203 			      section_name, &param_str_val, num_section_params);
6204 }
6205 
6206 /* Reads section params from the specified buffer and prints them to the results
6207  * buffer. Returns the number of dwords read.
6208  */
6209 static u32 qed_print_section_params(u32 *dump_buf,
6210 				    u32 num_section_params,
6211 				    char *results_buf, u32 *num_chars_printed)
6212 {
6213 	u32 i, dump_offset = 0, results_offset = 0;
6214 
6215 	for (i = 0; i < num_section_params; i++) {
6216 		const char *param_name, *param_str_val;
6217 		u32 param_num_val = 0;
6218 
6219 		dump_offset += qed_read_param(dump_buf + dump_offset,
6220 					      &param_name,
6221 					      &param_str_val, &param_num_val);
6222 
6223 		if (param_str_val)
6224 			results_offset +=
6225 				sprintf(qed_get_buf_ptr(results_buf,
6226 							results_offset),
6227 					"%s: %s\n", param_name, param_str_val);
6228 		else if (strcmp(param_name, "fw-timestamp"))
6229 			results_offset +=
6230 				sprintf(qed_get_buf_ptr(results_buf,
6231 							results_offset),
6232 					"%s: %d\n", param_name, param_num_val);
6233 	}
6234 
6235 	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6236 				  "\n");
6237 
6238 	*num_chars_printed = results_offset;
6239 
6240 	return dump_offset;
6241 }
6242 
6243 /* Parses the idle check rules and returns the number of characters printed.
6244  * In case of parsing error, returns 0.
6245  */
6246 static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
6247 					 u32 *dump_buf_end,
6248 					 u32 num_rules,
6249 					 bool print_fw_idle_chk,
6250 					 char *results_buf,
6251 					 u32 *num_errors, u32 *num_warnings)
6252 {
6253 	/* Offset in results_buf in bytes */
6254 	u32 results_offset = 0;
6255 
6256 	u32 rule_idx;
6257 	u16 i, j;
6258 
6259 	*num_errors = 0;
6260 	*num_warnings = 0;
6261 
6262 	/* Go over dumped results */
6263 	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6264 	     rule_idx++) {
6265 		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6266 		struct dbg_idle_chk_result_hdr *hdr;
6267 		const char *parsing_str, *lsi_msg;
6268 		u32 parsing_str_offset;
6269 		bool has_fw_msg;
6270 		u8 curr_reg_id;
6271 
6272 		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6273 		rule_parsing_data =
6274 			(const struct dbg_idle_chk_rule_parsing_data *)
6275 			&s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
6276 			ptr[hdr->rule_id];
6277 		parsing_str_offset =
6278 			GET_FIELD(rule_parsing_data->data,
6279 				  DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6280 		has_fw_msg =
6281 			GET_FIELD(rule_parsing_data->data,
6282 				DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6283 		parsing_str =
6284 			&((const char *)
6285 			s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
6286 			[parsing_str_offset];
6287 		lsi_msg = parsing_str;
6288 		curr_reg_id = 0;
6289 
6290 		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6291 			return 0;
6292 
6293 		/* Skip rule header */
6294 		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6295 
6296 		/* Update errors/warnings count */
6297 		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6298 		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6299 			(*num_errors)++;
6300 		else
6301 			(*num_warnings)++;
6302 
6303 		/* Print rule severity */
6304 		results_offset +=
6305 		    sprintf(qed_get_buf_ptr(results_buf,
6306 					    results_offset), "%s: ",
6307 			    s_idle_chk_severity_str[hdr->severity]);
6308 
6309 		/* Print rule message */
6310 		if (has_fw_msg)
6311 			parsing_str += strlen(parsing_str) + 1;
6312 		results_offset +=
6313 		    sprintf(qed_get_buf_ptr(results_buf,
6314 					    results_offset), "%s.",
6315 			    has_fw_msg &&
6316 			    print_fw_idle_chk ? parsing_str : lsi_msg);
6317 		parsing_str += strlen(parsing_str) + 1;
6318 
6319 		/* Print register values */
6320 		results_offset +=
6321 		    sprintf(qed_get_buf_ptr(results_buf,
6322 					    results_offset), " Registers:");
6323 		for (i = 0;
6324 		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6325 		     i++) {
6326 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6327 			bool is_mem;
6328 			u8 reg_id;
6329 
6330 			reg_hdr =
6331 				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6332 			is_mem = GET_FIELD(reg_hdr->data,
6333 					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6334 			reg_id = GET_FIELD(reg_hdr->data,
6335 					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6336 
6337 			/* Skip reg header */
6338 			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6339 
6340 			/* Skip register names until the required reg_id is
6341 			 * reached.
6342 			 */
6343 			for (; reg_id > curr_reg_id;
6344 			     curr_reg_id++,
6345 			     parsing_str += strlen(parsing_str) + 1);
6346 
6347 			results_offset +=
6348 			    sprintf(qed_get_buf_ptr(results_buf,
6349 						    results_offset), " %s",
6350 				    parsing_str);
6351 			if (i < hdr->num_dumped_cond_regs && is_mem)
6352 				results_offset +=
6353 				    sprintf(qed_get_buf_ptr(results_buf,
6354 							    results_offset),
6355 					    "[%d]", hdr->mem_entry_id +
6356 					    reg_hdr->start_entry);
6357 			results_offset +=
6358 			    sprintf(qed_get_buf_ptr(results_buf,
6359 						    results_offset), "=");
6360 			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6361 				results_offset +=
6362 				    sprintf(qed_get_buf_ptr(results_buf,
6363 							    results_offset),
6364 					    "0x%x", *dump_buf);
6365 				if (j < reg_hdr->size - 1)
6366 					results_offset +=
6367 					    sprintf(qed_get_buf_ptr
6368 						    (results_buf,
6369 						     results_offset), ",");
6370 			}
6371 		}
6372 
6373 		results_offset +=
6374 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6375 	}
6376 
6377 	/* Check if end of dump buffer was exceeded */
6378 	if (dump_buf > dump_buf_end)
6379 		return 0;
6380 
6381 	return results_offset;
6382 }
6383 
6384 /* Parses an idle check dump buffer.
6385  * If result_buf is not NULL, the idle check results are printed to it.
6386  * In any case, the required results buffer size is assigned to
6387  * parsed_results_bytes.
6388  * The parsing status is returned.
6389  */
6390 static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
6391 					       u32 num_dumped_dwords,
6392 					       char *results_buf,
6393 					       u32 *parsed_results_bytes,
6394 					       u32 *num_errors,
6395 					       u32 *num_warnings)
6396 {
6397 	const char *section_name, *param_name, *param_str_val;
6398 	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6399 	u32 num_section_params = 0, num_rules;
6400 
6401 	/* Offset in results_buf in bytes */
6402 	u32 results_offset = 0;
6403 
6404 	*parsed_results_bytes = 0;
6405 	*num_errors = 0;
6406 	*num_warnings = 0;
6407 
6408 	if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6409 	    !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6410 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6411 
6412 	/* Read global_params section */
6413 	dump_buf += qed_read_section_hdr(dump_buf,
6414 					 &section_name, &num_section_params);
6415 	if (strcmp(section_name, "global_params"))
6416 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6417 
6418 	/* Print global params */
6419 	dump_buf += qed_print_section_params(dump_buf,
6420 					     num_section_params,
6421 					     results_buf, &results_offset);
6422 
6423 	/* Read idle_chk section */
6424 	dump_buf += qed_read_section_hdr(dump_buf,
6425 					 &section_name, &num_section_params);
6426 	if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6427 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6428 	dump_buf += qed_read_param(dump_buf,
6429 				   &param_name, &param_str_val, &num_rules);
6430 	if (strcmp(param_name, "num_rules"))
6431 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6432 
6433 	if (num_rules) {
6434 		u32 rules_print_size;
6435 
6436 		/* Print FW output */
6437 		results_offset +=
6438 		    sprintf(qed_get_buf_ptr(results_buf,
6439 					    results_offset),
6440 			    "FW_IDLE_CHECK:\n");
6441 		rules_print_size =
6442 			qed_parse_idle_chk_dump_rules(dump_buf,
6443 						      dump_buf_end,
6444 						      num_rules,
6445 						      true,
6446 						      results_buf ?
6447 						      results_buf +
6448 						      results_offset :
6449 						      NULL,
6450 						      num_errors,
6451 						      num_warnings);
6452 		results_offset += rules_print_size;
6453 		if (!rules_print_size)
6454 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6455 
6456 		/* Print LSI output */
6457 		results_offset +=
6458 		    sprintf(qed_get_buf_ptr(results_buf,
6459 					    results_offset),
6460 			    "\nLSI_IDLE_CHECK:\n");
6461 		rules_print_size =
6462 			qed_parse_idle_chk_dump_rules(dump_buf,
6463 						      dump_buf_end,
6464 						      num_rules,
6465 						      false,
6466 						      results_buf ?
6467 						      results_buf +
6468 						      results_offset :
6469 						      NULL,
6470 						      num_errors,
6471 						      num_warnings);
6472 		results_offset += rules_print_size;
6473 		if (!rules_print_size)
6474 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6475 	}
6476 
6477 	/* Print errors/warnings count */
6478 	if (*num_errors)
6479 		results_offset +=
6480 		    sprintf(qed_get_buf_ptr(results_buf,
6481 					    results_offset),
6482 			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6483 			    *num_errors, *num_warnings);
6484 	else if (*num_warnings)
6485 		results_offset +=
6486 		    sprintf(qed_get_buf_ptr(results_buf,
6487 					    results_offset),
6488 			    "\nIdle Check completed successfully (with %d warnings)\n",
6489 			    *num_warnings);
6490 	else
6491 		results_offset +=
6492 		    sprintf(qed_get_buf_ptr(results_buf,
6493 					    results_offset),
6494 			    "\nIdle Check completed successfully\n");
6495 
6496 	/* Add 1 for string NULL termination */
6497 	*parsed_results_bytes = results_offset + 1;
6498 
6499 	return DBG_STATUS_OK;
6500 }
6501 
6502 /* Frees the specified MCP Trace meta data */
6503 static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
6504 				    struct mcp_trace_meta *meta)
6505 {
6506 	u32 i;
6507 
6508 	/* Release modules */
6509 	if (meta->modules) {
6510 		for (i = 0; i < meta->modules_num; i++)
6511 			kfree(meta->modules[i]);
6512 		kfree(meta->modules);
6513 	}
6514 
6515 	/* Release formats */
6516 	if (meta->formats) {
6517 		for (i = 0; i < meta->formats_num; i++)
6518 			kfree(meta->formats[i].format_str);
6519 		kfree(meta->formats);
6520 	}
6521 }
6522 
6523 /* Allocates and fills MCP Trace meta data based on the specified meta data
6524  * dump buffer.
6525  * Returns debug status code.
6526  */
6527 static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
6528 						const u32 *meta_buf,
6529 						struct mcp_trace_meta *meta)
6530 {
6531 	u8 *meta_buf_bytes = (u8 *)meta_buf;
6532 	u32 offset = 0, signature, i;
6533 
6534 	memset(meta, 0, sizeof(*meta));
6535 
6536 	/* Read first signature */
6537 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6538 	if (signature != NVM_MAGIC_VALUE)
6539 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6540 
6541 	/* Read no. of modules and allocate memory for their pointers */
6542 	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6543 	meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
6544 	if (!meta->modules)
6545 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6546 
6547 	/* Allocate and read all module strings */
6548 	for (i = 0; i < meta->modules_num; i++) {
6549 		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6550 
6551 		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6552 		if (!(*(meta->modules + i))) {
6553 			/* Update number of modules to be released */
6554 			meta->modules_num = i ? i - 1 : 0;
6555 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6556 		}
6557 
6558 		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6559 				      *(meta->modules + i));
6560 		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6561 			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6562 	}
6563 
6564 	/* Read second signature */
6565 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6566 	if (signature != NVM_MAGIC_VALUE)
6567 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6568 
6569 	/* Read number of formats and allocate memory for all formats */
6570 	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6571 	meta->formats = kzalloc(meta->formats_num *
6572 				sizeof(struct mcp_trace_format),
6573 				GFP_KERNEL);
6574 	if (!meta->formats)
6575 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6576 
6577 	/* Allocate and read all strings */
6578 	for (i = 0; i < meta->formats_num; i++) {
6579 		struct mcp_trace_format *format_ptr = &meta->formats[i];
6580 		u8 format_len;
6581 
6582 		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6583 							   &offset);
6584 		format_len =
6585 		    (format_ptr->data &
6586 		     MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
6587 		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6588 		if (!format_ptr->format_str) {
6589 			/* Update number of modules to be released */
6590 			meta->formats_num = i ? i - 1 : 0;
6591 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6592 		}
6593 
6594 		qed_read_str_from_buf(meta_buf_bytes,
6595 				      &offset,
6596 				      format_len, format_ptr->format_str);
6597 	}
6598 
6599 	return DBG_STATUS_OK;
6600 }
6601 
6602 /* Parses an MCP Trace dump buffer.
6603  * If result_buf is not NULL, the MCP Trace results are printed to it.
6604  * In any case, the required results buffer size is assigned to
6605  * parsed_results_bytes.
6606  * The parsing status is returned.
6607  */
6608 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6609 						u32 *dump_buf,
6610 						char *results_buf,
6611 						u32 *parsed_results_bytes)
6612 {
6613 	u32 end_offset, bytes_left, trace_data_dwords, trace_meta_dwords;
6614 	u32 param_mask, param_shift, param_num_val, num_section_params;
6615 	const char *section_name, *param_name, *param_str_val;
6616 	u32 offset, results_offset = 0;
6617 	struct mcp_trace_meta meta;
6618 	struct mcp_trace *trace;
6619 	enum dbg_status status;
6620 	const u32 *meta_buf;
6621 	u8 *trace_buf;
6622 
6623 	*parsed_results_bytes = 0;
6624 
6625 	/* Read global_params section */
6626 	dump_buf += qed_read_section_hdr(dump_buf,
6627 					 &section_name, &num_section_params);
6628 	if (strcmp(section_name, "global_params"))
6629 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6630 
6631 	/* Print global params */
6632 	dump_buf += qed_print_section_params(dump_buf,
6633 					     num_section_params,
6634 					     results_buf, &results_offset);
6635 
6636 	/* Read trace_data section */
6637 	dump_buf += qed_read_section_hdr(dump_buf,
6638 					 &section_name, &num_section_params);
6639 	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6640 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6641 	dump_buf += qed_read_param(dump_buf,
6642 				   &param_name, &param_str_val, &param_num_val);
6643 	if (strcmp(param_name, "size"))
6644 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6645 	trace_data_dwords = param_num_val;
6646 
6647 	/* Prepare trace info */
6648 	trace = (struct mcp_trace *)dump_buf;
6649 	trace_buf = (u8 *)dump_buf + sizeof(*trace);
6650 	offset = trace->trace_oldest;
6651 	end_offset = trace->trace_prod;
6652 	bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
6653 	dump_buf += trace_data_dwords;
6654 
6655 	/* Read meta_data section */
6656 	dump_buf += qed_read_section_hdr(dump_buf,
6657 					 &section_name, &num_section_params);
6658 	if (strcmp(section_name, "mcp_trace_meta"))
6659 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6660 	dump_buf += qed_read_param(dump_buf,
6661 				   &param_name, &param_str_val, &param_num_val);
6662 	if (strcmp(param_name, "size"))
6663 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6664 	trace_meta_dwords = param_num_val;
6665 
6666 	/* Choose meta data buffer */
6667 	if (!trace_meta_dwords) {
6668 		/* Dump doesn't include meta data */
6669 		if (!s_mcp_trace_meta.ptr)
6670 			return DBG_STATUS_MCP_TRACE_NO_META;
6671 		meta_buf = s_mcp_trace_meta.ptr;
6672 	} else {
6673 		/* Dump includes meta data */
6674 		meta_buf = dump_buf;
6675 	}
6676 
6677 	/* Allocate meta data memory */
6678 	status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta);
6679 	if (status != DBG_STATUS_OK)
6680 		goto free_mem;
6681 
6682 	/* Ignore the level and modules masks - just print everything that is
6683 	 * already in the buffer.
6684 	 */
6685 	while (bytes_left) {
6686 		struct mcp_trace_format *format_ptr;
6687 		u8 format_level, format_module;
6688 		u32 params[3] = { 0, 0, 0 };
6689 		u32 header, format_idx, i;
6690 
6691 		if (bytes_left < MFW_TRACE_ENTRY_SIZE) {
6692 			status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6693 			goto free_mem;
6694 		}
6695 
6696 		header = qed_read_from_cyclic_buf(trace_buf,
6697 						  &offset,
6698 						  trace->size,
6699 						  MFW_TRACE_ENTRY_SIZE);
6700 		bytes_left -= MFW_TRACE_ENTRY_SIZE;
6701 		format_idx = header & MFW_TRACE_EVENTID_MASK;
6702 
6703 		/* Skip message if its  index doesn't exist in the meta data */
6704 		if (format_idx > meta.formats_num) {
6705 			u8 format_size =
6706 			    (u8)((header &
6707 				  MFW_TRACE_PRM_SIZE_MASK) >>
6708 				 MFW_TRACE_PRM_SIZE_SHIFT);
6709 
6710 			if (bytes_left < format_size) {
6711 				status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6712 				goto free_mem;
6713 			}
6714 
6715 			offset = qed_cyclic_add(offset,
6716 						format_size, trace->size);
6717 			bytes_left -= format_size;
6718 			continue;
6719 		}
6720 
6721 		format_ptr = &meta.formats[format_idx];
6722 
6723 		for (i = 0,
6724 		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6725 		     MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
6726 		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6727 		     i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6728 		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6729 			/* Extract param size (0..3) */
6730 			u8 param_size =
6731 			    (u8)((format_ptr->data &
6732 				  param_mask) >> param_shift);
6733 
6734 			/* If the param size is zero, there are no other
6735 			 * parameters.
6736 			 */
6737 			if (!param_size)
6738 				break;
6739 
6740 			/* Size is encoded using 2 bits, where 3 is used to
6741 			 * encode 4.
6742 			 */
6743 			if (param_size == 3)
6744 				param_size = 4;
6745 
6746 			if (bytes_left < param_size) {
6747 				status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6748 				goto free_mem;
6749 			}
6750 
6751 			params[i] = qed_read_from_cyclic_buf(trace_buf,
6752 							     &offset,
6753 							     trace->size,
6754 							     param_size);
6755 
6756 			bytes_left -= param_size;
6757 		}
6758 
6759 		format_level =
6760 		    (u8)((format_ptr->data &
6761 			  MCP_TRACE_FORMAT_LEVEL_MASK) >>
6762 			 MCP_TRACE_FORMAT_LEVEL_SHIFT);
6763 		format_module =
6764 		    (u8)((format_ptr->data &
6765 			  MCP_TRACE_FORMAT_MODULE_MASK) >>
6766 			 MCP_TRACE_FORMAT_MODULE_SHIFT);
6767 		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) {
6768 			status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6769 			goto free_mem;
6770 		}
6771 
6772 		/* Print current message to results buffer */
6773 		results_offset +=
6774 		    sprintf(qed_get_buf_ptr(results_buf,
6775 					    results_offset), "%s %-8s: ",
6776 			    s_mcp_trace_level_str[format_level],
6777 			    meta.modules[format_module]);
6778 		results_offset +=
6779 		    sprintf(qed_get_buf_ptr(results_buf,
6780 					    results_offset),
6781 			    format_ptr->format_str, params[0], params[1],
6782 			    params[2]);
6783 	}
6784 
6785 free_mem:
6786 	*parsed_results_bytes = results_offset + 1;
6787 	qed_mcp_trace_free_meta(p_hwfn, &meta);
6788 	return status;
6789 }
6790 
6791 /* Parses a Reg FIFO dump buffer.
6792  * If result_buf is not NULL, the Reg FIFO results are printed to it.
6793  * In any case, the required results buffer size is assigned to
6794  * parsed_results_bytes.
6795  * The parsing status is returned.
6796  */
6797 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6798 					       char *results_buf,
6799 					       u32 *parsed_results_bytes)
6800 {
6801 	const char *section_name, *param_name, *param_str_val;
6802 	u32 param_num_val, num_section_params, num_elements;
6803 	struct reg_fifo_element *elements;
6804 	u8 i, j, err_val, vf_val;
6805 	u32 results_offset = 0;
6806 	char vf_str[4];
6807 
6808 	/* Read global_params section */
6809 	dump_buf += qed_read_section_hdr(dump_buf,
6810 					 &section_name, &num_section_params);
6811 	if (strcmp(section_name, "global_params"))
6812 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6813 
6814 	/* Print global params */
6815 	dump_buf += qed_print_section_params(dump_buf,
6816 					     num_section_params,
6817 					     results_buf, &results_offset);
6818 
6819 	/* Read reg_fifo_data section */
6820 	dump_buf += qed_read_section_hdr(dump_buf,
6821 					 &section_name, &num_section_params);
6822 	if (strcmp(section_name, "reg_fifo_data"))
6823 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6824 	dump_buf += qed_read_param(dump_buf,
6825 				   &param_name, &param_str_val, &param_num_val);
6826 	if (strcmp(param_name, "size"))
6827 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6828 	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6829 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6830 	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6831 	elements = (struct reg_fifo_element *)dump_buf;
6832 
6833 	/* Decode elements */
6834 	for (i = 0; i < num_elements; i++) {
6835 		bool err_printed = false;
6836 
6837 		/* Discover if element belongs to a VF or a PF */
6838 		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6839 		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6840 			sprintf(vf_str, "%s", "N/A");
6841 		else
6842 			sprintf(vf_str, "%d", vf_val);
6843 
6844 		/* Add parsed element to parsed buffer */
6845 		results_offset +=
6846 		    sprintf(qed_get_buf_ptr(results_buf,
6847 					    results_offset),
6848 			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
6849 			    elements[i].data,
6850 			    (u32)GET_FIELD(elements[i].data,
6851 					   REG_FIFO_ELEMENT_ADDRESS) *
6852 			    REG_FIFO_ELEMENT_ADDR_FACTOR,
6853 			    s_access_strs[GET_FIELD(elements[i].data,
6854 						    REG_FIFO_ELEMENT_ACCESS)],
6855 			    (u32)GET_FIELD(elements[i].data,
6856 					   REG_FIFO_ELEMENT_PF),
6857 			    vf_str,
6858 			    (u32)GET_FIELD(elements[i].data,
6859 					   REG_FIFO_ELEMENT_PORT),
6860 			    s_privilege_strs[GET_FIELD(elements[i].data,
6861 						REG_FIFO_ELEMENT_PRIVILEGE)],
6862 			    s_protection_strs[GET_FIELD(elements[i].data,
6863 						REG_FIFO_ELEMENT_PROTECTION)],
6864 			    s_master_strs[GET_FIELD(elements[i].data,
6865 						REG_FIFO_ELEMENT_MASTER)]);
6866 
6867 		/* Print errors */
6868 		for (j = 0,
6869 		     err_val = GET_FIELD(elements[i].data,
6870 					 REG_FIFO_ELEMENT_ERROR);
6871 		     j < ARRAY_SIZE(s_reg_fifo_error_strs);
6872 		     j++, err_val >>= 1) {
6873 			if (err_val & 0x1) {
6874 				if (err_printed)
6875 					results_offset +=
6876 					    sprintf(qed_get_buf_ptr
6877 						    (results_buf,
6878 						     results_offset), ", ");
6879 				results_offset +=
6880 				    sprintf(qed_get_buf_ptr
6881 					    (results_buf, results_offset), "%s",
6882 					    s_reg_fifo_error_strs[j]);
6883 				err_printed = true;
6884 			}
6885 		}
6886 
6887 		results_offset +=
6888 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6889 	}
6890 
6891 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6892 						  results_offset),
6893 				  "fifo contained %d elements", num_elements);
6894 
6895 	/* Add 1 for string NULL termination */
6896 	*parsed_results_bytes = results_offset + 1;
6897 
6898 	return DBG_STATUS_OK;
6899 }
6900 
6901 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6902 						  *element, char
6903 						  *results_buf,
6904 						  u32 *results_offset)
6905 {
6906 	const struct igu_fifo_addr_data *found_addr = NULL;
6907 	u8 source, err_type, i, is_cleanup;
6908 	char parsed_addr_data[32];
6909 	char parsed_wr_data[256];
6910 	u32 wr_data, prod_cons;
6911 	bool is_wr_cmd, is_pf;
6912 	u16 cmd_addr;
6913 	u64 dword12;
6914 
6915 	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
6916 	 * FIFO element.
6917 	 */
6918 	dword12 = ((u64)element->dword2 << 32) | element->dword1;
6919 	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6920 	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6921 	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6922 	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6923 	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6924 
6925 	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6926 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6927 	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6928 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6929 
6930 	/* Find address data */
6931 	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
6932 		const struct igu_fifo_addr_data *curr_addr =
6933 			&s_igu_fifo_addr_data[i];
6934 
6935 		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
6936 		    curr_addr->end_addr)
6937 			found_addr = curr_addr;
6938 	}
6939 
6940 	if (!found_addr)
6941 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6942 
6943 	/* Prepare parsed address data */
6944 	switch (found_addr->type) {
6945 	case IGU_ADDR_TYPE_MSIX_MEM:
6946 		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
6947 		break;
6948 	case IGU_ADDR_TYPE_WRITE_INT_ACK:
6949 	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6950 		sprintf(parsed_addr_data,
6951 			" SB = 0x%x", cmd_addr - found_addr->start_addr);
6952 		break;
6953 	default:
6954 		parsed_addr_data[0] = '\0';
6955 	}
6956 
6957 	if (!is_wr_cmd) {
6958 		parsed_wr_data[0] = '\0';
6959 		goto out;
6960 	}
6961 
6962 	/* Prepare parsed write data */
6963 	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
6964 	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
6965 	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
6966 
6967 	if (source == IGU_SRC_ATTN) {
6968 		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
6969 	} else {
6970 		if (is_cleanup) {
6971 			u8 cleanup_val, cleanup_type;
6972 
6973 			cleanup_val =
6974 				GET_FIELD(wr_data,
6975 					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
6976 			cleanup_type =
6977 			    GET_FIELD(wr_data,
6978 				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
6979 
6980 			sprintf(parsed_wr_data,
6981 				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
6982 				cleanup_val ? "set" : "clear",
6983 				cleanup_type);
6984 		} else {
6985 			u8 update_flag, en_dis_int_for_sb, segment;
6986 			u8 timer_mask;
6987 
6988 			update_flag = GET_FIELD(wr_data,
6989 						IGU_FIFO_WR_DATA_UPDATE_FLAG);
6990 			en_dis_int_for_sb =
6991 				GET_FIELD(wr_data,
6992 					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
6993 			segment = GET_FIELD(wr_data,
6994 					    IGU_FIFO_WR_DATA_SEGMENT);
6995 			timer_mask = GET_FIELD(wr_data,
6996 					       IGU_FIFO_WR_DATA_TIMER_MASK);
6997 
6998 			sprintf(parsed_wr_data,
6999 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
7000 				prod_cons,
7001 				update_flag ? "update" : "nop",
7002 				en_dis_int_for_sb ?
7003 				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
7004 				"enable",
7005 				segment ? "attn" : "regular",
7006 				timer_mask);
7007 		}
7008 	}
7009 out:
7010 	/* Add parsed element to parsed buffer */
7011 	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
7012 						   *results_offset),
7013 				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7014 				   element->dword2, element->dword1,
7015 				   element->dword0,
7016 				   is_pf ? "pf" : "vf",
7017 				   GET_FIELD(element->dword0,
7018 					     IGU_FIFO_ELEMENT_DWORD0_FID),
7019 				   s_igu_fifo_source_strs[source],
7020 				   is_wr_cmd ? "wr" : "rd",
7021 				   cmd_addr,
7022 				   (!is_pf && found_addr->vf_desc)
7023 				   ? found_addr->vf_desc
7024 				   : found_addr->desc,
7025 				   parsed_addr_data,
7026 				   parsed_wr_data,
7027 				   s_igu_fifo_error_strs[err_type]);
7028 
7029 	return DBG_STATUS_OK;
7030 }
7031 
7032 /* Parses an IGU FIFO dump buffer.
7033  * If result_buf is not NULL, the IGU FIFO results are printed to it.
7034  * In any case, the required results buffer size is assigned to
7035  * parsed_results_bytes.
7036  * The parsing status is returned.
7037  */
7038 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7039 					       char *results_buf,
7040 					       u32 *parsed_results_bytes)
7041 {
7042 	const char *section_name, *param_name, *param_str_val;
7043 	u32 param_num_val, num_section_params, num_elements;
7044 	struct igu_fifo_element *elements;
7045 	enum dbg_status status;
7046 	u32 results_offset = 0;
7047 	u8 i;
7048 
7049 	/* Read global_params section */
7050 	dump_buf += qed_read_section_hdr(dump_buf,
7051 					 &section_name, &num_section_params);
7052 	if (strcmp(section_name, "global_params"))
7053 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7054 
7055 	/* Print global params */
7056 	dump_buf += qed_print_section_params(dump_buf,
7057 					     num_section_params,
7058 					     results_buf, &results_offset);
7059 
7060 	/* Read igu_fifo_data section */
7061 	dump_buf += qed_read_section_hdr(dump_buf,
7062 					 &section_name, &num_section_params);
7063 	if (strcmp(section_name, "igu_fifo_data"))
7064 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7065 	dump_buf += qed_read_param(dump_buf,
7066 				   &param_name, &param_str_val, &param_num_val);
7067 	if (strcmp(param_name, "size"))
7068 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7069 	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7070 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7071 	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7072 	elements = (struct igu_fifo_element *)dump_buf;
7073 
7074 	/* Decode elements */
7075 	for (i = 0; i < num_elements; i++) {
7076 		status = qed_parse_igu_fifo_element(&elements[i],
7077 						    results_buf,
7078 						    &results_offset);
7079 		if (status != DBG_STATUS_OK)
7080 			return status;
7081 	}
7082 
7083 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7084 						  results_offset),
7085 				  "fifo contained %d elements", num_elements);
7086 
7087 	/* Add 1 for string NULL termination */
7088 	*parsed_results_bytes = results_offset + 1;
7089 
7090 	return DBG_STATUS_OK;
7091 }
7092 
7093 static enum dbg_status
7094 qed_parse_protection_override_dump(u32 *dump_buf,
7095 				   char *results_buf,
7096 				   u32 *parsed_results_bytes)
7097 {
7098 	const char *section_name, *param_name, *param_str_val;
7099 	u32 param_num_val, num_section_params, num_elements;
7100 	struct protection_override_element *elements;
7101 	u32 results_offset = 0;
7102 	u8 i;
7103 
7104 	/* Read global_params section */
7105 	dump_buf += qed_read_section_hdr(dump_buf,
7106 					 &section_name, &num_section_params);
7107 	if (strcmp(section_name, "global_params"))
7108 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7109 
7110 	/* Print global params */
7111 	dump_buf += qed_print_section_params(dump_buf,
7112 					     num_section_params,
7113 					     results_buf, &results_offset);
7114 
7115 	/* Read protection_override_data section */
7116 	dump_buf += qed_read_section_hdr(dump_buf,
7117 					 &section_name, &num_section_params);
7118 	if (strcmp(section_name, "protection_override_data"))
7119 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7120 	dump_buf += qed_read_param(dump_buf,
7121 				   &param_name, &param_str_val, &param_num_val);
7122 	if (strcmp(param_name, "size"))
7123 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7124 	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7125 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7126 	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7127 	elements = (struct protection_override_element *)dump_buf;
7128 
7129 	/* Decode elements */
7130 	for (i = 0; i < num_elements; i++) {
7131 		u32 address = GET_FIELD(elements[i].data,
7132 					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7133 			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7134 
7135 		results_offset +=
7136 		    sprintf(qed_get_buf_ptr(results_buf,
7137 					    results_offset),
7138 			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7139 			    i, address,
7140 			    (u32)GET_FIELD(elements[i].data,
7141 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7142 			    (u32)GET_FIELD(elements[i].data,
7143 				      PROTECTION_OVERRIDE_ELEMENT_READ),
7144 			    (u32)GET_FIELD(elements[i].data,
7145 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
7146 			    s_protection_strs[GET_FIELD(elements[i].data,
7147 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7148 			    s_protection_strs[GET_FIELD(elements[i].data,
7149 				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7150 	}
7151 
7152 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7153 						  results_offset),
7154 				  "protection override contained %d elements",
7155 				  num_elements);
7156 
7157 	/* Add 1 for string NULL termination */
7158 	*parsed_results_bytes = results_offset + 1;
7159 
7160 	return DBG_STATUS_OK;
7161 }
7162 
7163 /* Parses a FW Asserts dump buffer.
7164  * If result_buf is not NULL, the FW Asserts results are printed to it.
7165  * In any case, the required results buffer size is assigned to
7166  * parsed_results_bytes.
7167  * The parsing status is returned.
7168  */
7169 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7170 						 char *results_buf,
7171 						 u32 *parsed_results_bytes)
7172 {
7173 	u32 num_section_params, param_num_val, i, results_offset = 0;
7174 	const char *param_name, *param_str_val, *section_name;
7175 	bool last_section_found = false;
7176 
7177 	*parsed_results_bytes = 0;
7178 
7179 	/* Read global_params section */
7180 	dump_buf += qed_read_section_hdr(dump_buf,
7181 					 &section_name, &num_section_params);
7182 	if (strcmp(section_name, "global_params"))
7183 		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7184 
7185 	/* Print global params */
7186 	dump_buf += qed_print_section_params(dump_buf,
7187 					     num_section_params,
7188 					     results_buf, &results_offset);
7189 
7190 	while (!last_section_found) {
7191 		dump_buf += qed_read_section_hdr(dump_buf,
7192 						 &section_name,
7193 						 &num_section_params);
7194 		if (!strcmp(section_name, "fw_asserts")) {
7195 			/* Extract params */
7196 			const char *storm_letter = NULL;
7197 			u32 storm_dump_size = 0;
7198 
7199 			for (i = 0; i < num_section_params; i++) {
7200 				dump_buf += qed_read_param(dump_buf,
7201 							   &param_name,
7202 							   &param_str_val,
7203 							   &param_num_val);
7204 				if (!strcmp(param_name, "storm"))
7205 					storm_letter = param_str_val;
7206 				else if (!strcmp(param_name, "size"))
7207 					storm_dump_size = param_num_val;
7208 				else
7209 					return
7210 					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7211 			}
7212 
7213 			if (!storm_letter || !storm_dump_size)
7214 				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7215 
7216 			/* Print data */
7217 			results_offset +=
7218 			    sprintf(qed_get_buf_ptr(results_buf,
7219 						    results_offset),
7220 				    "\n%sSTORM_ASSERT: size=%d\n",
7221 				    storm_letter, storm_dump_size);
7222 			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7223 				results_offset +=
7224 				    sprintf(qed_get_buf_ptr(results_buf,
7225 							    results_offset),
7226 					    "%08x\n", *dump_buf);
7227 		} else if (!strcmp(section_name, "last")) {
7228 			last_section_found = true;
7229 		} else {
7230 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7231 		}
7232 	}
7233 
7234 	/* Add 1 for string NULL termination */
7235 	*parsed_results_bytes = results_offset + 1;
7236 
7237 	return DBG_STATUS_OK;
7238 }
7239 
7240 /***************************** Public Functions *******************************/
7241 
7242 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
7243 {
7244 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
7245 	u8 buf_id;
7246 
7247 	/* Convert binary data to debug arrays */
7248 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
7249 		s_user_dbg_arrays[buf_id].ptr =
7250 			(u32 *)(bin_ptr + buf_array[buf_id].offset);
7251 		s_user_dbg_arrays[buf_id].size_in_dwords =
7252 			BYTES_TO_DWORDS(buf_array[buf_id].length);
7253 	}
7254 
7255 	return DBG_STATUS_OK;
7256 }
7257 
7258 const char *qed_dbg_get_status_str(enum dbg_status status)
7259 {
7260 	return (status <
7261 		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7262 }
7263 
7264 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7265 						  u32 *dump_buf,
7266 						  u32 num_dumped_dwords,
7267 						  u32 *results_buf_size)
7268 {
7269 	u32 num_errors, num_warnings;
7270 
7271 	return qed_parse_idle_chk_dump(dump_buf,
7272 				       num_dumped_dwords,
7273 				       NULL,
7274 				       results_buf_size,
7275 				       &num_errors, &num_warnings);
7276 }
7277 
7278 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7279 					   u32 *dump_buf,
7280 					   u32 num_dumped_dwords,
7281 					   char *results_buf,
7282 					   u32 *num_errors,
7283 					   u32 *num_warnings)
7284 {
7285 	u32 parsed_buf_size;
7286 
7287 	return qed_parse_idle_chk_dump(dump_buf,
7288 				       num_dumped_dwords,
7289 				       results_buf,
7290 				       &parsed_buf_size,
7291 				       num_errors, num_warnings);
7292 }
7293 
7294 void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size)
7295 {
7296 	s_mcp_trace_meta.ptr = data;
7297 	s_mcp_trace_meta.size_in_dwords = size;
7298 }
7299 
7300 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7301 						   u32 *dump_buf,
7302 						   u32 num_dumped_dwords,
7303 						   u32 *results_buf_size)
7304 {
7305 	return qed_parse_mcp_trace_dump(p_hwfn,
7306 					dump_buf, NULL, results_buf_size);
7307 }
7308 
7309 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7310 					    u32 *dump_buf,
7311 					    u32 num_dumped_dwords,
7312 					    char *results_buf)
7313 {
7314 	u32 parsed_buf_size;
7315 
7316 	return qed_parse_mcp_trace_dump(p_hwfn,
7317 					dump_buf,
7318 					results_buf, &parsed_buf_size);
7319 }
7320 
7321 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7322 						  u32 *dump_buf,
7323 						  u32 num_dumped_dwords,
7324 						  u32 *results_buf_size)
7325 {
7326 	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7327 }
7328 
7329 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7330 					   u32 *dump_buf,
7331 					   u32 num_dumped_dwords,
7332 					   char *results_buf)
7333 {
7334 	u32 parsed_buf_size;
7335 
7336 	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7337 }
7338 
7339 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7340 						  u32 *dump_buf,
7341 						  u32 num_dumped_dwords,
7342 						  u32 *results_buf_size)
7343 {
7344 	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7345 }
7346 
7347 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7348 					   u32 *dump_buf,
7349 					   u32 num_dumped_dwords,
7350 					   char *results_buf)
7351 {
7352 	u32 parsed_buf_size;
7353 
7354 	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7355 }
7356 
7357 enum dbg_status
7358 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7359 					     u32 *dump_buf,
7360 					     u32 num_dumped_dwords,
7361 					     u32 *results_buf_size)
7362 {
7363 	return qed_parse_protection_override_dump(dump_buf,
7364 						  NULL, results_buf_size);
7365 }
7366 
7367 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7368 						      u32 *dump_buf,
7369 						      u32 num_dumped_dwords,
7370 						      char *results_buf)
7371 {
7372 	u32 parsed_buf_size;
7373 
7374 	return qed_parse_protection_override_dump(dump_buf,
7375 						  results_buf,
7376 						  &parsed_buf_size);
7377 }
7378 
7379 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7380 						    u32 *dump_buf,
7381 						    u32 num_dumped_dwords,
7382 						    u32 *results_buf_size)
7383 {
7384 	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7385 }
7386 
7387 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7388 					     u32 *dump_buf,
7389 					     u32 num_dumped_dwords,
7390 					     char *results_buf)
7391 {
7392 	u32 parsed_buf_size;
7393 
7394 	return qed_parse_fw_asserts_dump(dump_buf,
7395 					 results_buf, &parsed_buf_size);
7396 }
7397 
7398 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7399 				   struct dbg_attn_block_result *results)
7400 {
7401 	struct user_dbg_array *block_attn, *pstrings;
7402 	const u32 *block_attn_name_offsets;
7403 	enum dbg_attn_type attn_type;
7404 	const char *block_name;
7405 	u8 num_regs, i, j;
7406 
7407 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7408 	attn_type = (enum dbg_attn_type)
7409 		    GET_FIELD(results->data,
7410 			      DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7411 	block_name = s_block_info_arr[results->block_id].name;
7412 
7413 	if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7414 	    !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7415 	    !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7416 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7417 
7418 	block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
7419 	block_attn_name_offsets = &block_attn->ptr[results->names_offset];
7420 
7421 	/* Go over registers with a non-zero attention status */
7422 	for (i = 0; i < num_regs; i++) {
7423 		struct dbg_attn_bit_mapping *bit_mapping;
7424 		struct dbg_attn_reg_result *reg_result;
7425 		u8 num_reg_attn, bit_idx = 0;
7426 
7427 		reg_result = &results->reg_results[i];
7428 		num_reg_attn = GET_FIELD(reg_result->data,
7429 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7430 		block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
7431 		bit_mapping = &((struct dbg_attn_bit_mapping *)
7432 				block_attn->ptr)[reg_result->block_attn_offset];
7433 
7434 		pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
7435 
7436 		/* Go over attention status bits */
7437 		for (j = 0; j < num_reg_attn; j++) {
7438 			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7439 						     DBG_ATTN_BIT_MAPPING_VAL);
7440 			const char *attn_name, *attn_type_str, *masked_str;
7441 			u32 attn_name_offset, sts_addr;
7442 
7443 			/* Check if bit mask should be advanced (due to unused
7444 			 * bits).
7445 			 */
7446 			if (GET_FIELD(bit_mapping[j].data,
7447 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7448 				bit_idx += (u8)attn_idx_val;
7449 				continue;
7450 			}
7451 
7452 			/* Check current bit index */
7453 			if (!(reg_result->sts_val & BIT(bit_idx))) {
7454 				bit_idx++;
7455 				continue;
7456 			}
7457 
7458 			/* Find attention name */
7459 			attn_name_offset =
7460 				block_attn_name_offsets[attn_idx_val];
7461 			attn_name = &((const char *)
7462 				      pstrings->ptr)[attn_name_offset];
7463 			attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
7464 					"Interrupt" : "Parity";
7465 			masked_str = reg_result->mask_val & BIT(bit_idx) ?
7466 				     " [masked]" : "";
7467 			sts_addr = GET_FIELD(reg_result->data,
7468 					     DBG_ATTN_REG_RESULT_STS_ADDRESS);
7469 			DP_NOTICE(p_hwfn,
7470 				  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7471 				  block_name, attn_type_str, attn_name,
7472 				  sts_addr, bit_idx, masked_str);
7473 
7474 			bit_idx++;
7475 		}
7476 	}
7477 
7478 	return DBG_STATUS_OK;
7479 }
7480 
7481 /* Wrapper for unifying the idle_chk and mcp_trace api */
7482 static enum dbg_status
7483 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7484 				   u32 *dump_buf,
7485 				   u32 num_dumped_dwords,
7486 				   char *results_buf)
7487 {
7488 	u32 num_errors, num_warnnings;
7489 
7490 	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7491 					  results_buf, &num_errors,
7492 					  &num_warnnings);
7493 }
7494 
7495 /* Feature meta data lookup table */
7496 static struct {
7497 	char *name;
7498 	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7499 				    struct qed_ptt *p_ptt, u32 *size);
7500 	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7501 					struct qed_ptt *p_ptt, u32 *dump_buf,
7502 					u32 buf_size, u32 *dumped_dwords);
7503 	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7504 					 u32 *dump_buf, u32 num_dumped_dwords,
7505 					 char *results_buf);
7506 	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7507 					    u32 *dump_buf,
7508 					    u32 num_dumped_dwords,
7509 					    u32 *results_buf_size);
7510 } qed_features_lookup[] = {
7511 	{
7512 	"grc", qed_dbg_grc_get_dump_buf_size,
7513 		    qed_dbg_grc_dump, NULL, NULL}, {
7514 	"idle_chk",
7515 		    qed_dbg_idle_chk_get_dump_buf_size,
7516 		    qed_dbg_idle_chk_dump,
7517 		    qed_print_idle_chk_results_wrapper,
7518 		    qed_get_idle_chk_results_buf_size}, {
7519 	"mcp_trace",
7520 		    qed_dbg_mcp_trace_get_dump_buf_size,
7521 		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7522 		    qed_get_mcp_trace_results_buf_size}, {
7523 	"reg_fifo",
7524 		    qed_dbg_reg_fifo_get_dump_buf_size,
7525 		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7526 		    qed_get_reg_fifo_results_buf_size}, {
7527 	"igu_fifo",
7528 		    qed_dbg_igu_fifo_get_dump_buf_size,
7529 		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7530 		    qed_get_igu_fifo_results_buf_size}, {
7531 	"protection_override",
7532 		    qed_dbg_protection_override_get_dump_buf_size,
7533 		    qed_dbg_protection_override_dump,
7534 		    qed_print_protection_override_results,
7535 		    qed_get_protection_override_results_buf_size}, {
7536 	"fw_asserts",
7537 		    qed_dbg_fw_asserts_get_dump_buf_size,
7538 		    qed_dbg_fw_asserts_dump,
7539 		    qed_print_fw_asserts_results,
7540 		    qed_get_fw_asserts_results_buf_size},};
7541 
7542 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7543 {
7544 	u32 i, precision = 80;
7545 
7546 	if (!p_text_buf)
7547 		return;
7548 
7549 	pr_notice("\n%.*s", precision, p_text_buf);
7550 	for (i = precision; i < text_size; i += precision)
7551 		pr_cont("%.*s", precision, p_text_buf + i);
7552 	pr_cont("\n");
7553 }
7554 
7555 #define QED_RESULTS_BUF_MIN_SIZE 16
7556 /* Generic function for decoding debug feature info */
7557 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7558 				      enum qed_dbg_features feature_idx)
7559 {
7560 	struct qed_dbg_feature *feature =
7561 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7562 	u32 text_size_bytes, null_char_pos, i;
7563 	enum dbg_status rc;
7564 	char *text_buf;
7565 
7566 	/* Check if feature supports formatting capability */
7567 	if (!qed_features_lookup[feature_idx].results_buf_size)
7568 		return DBG_STATUS_OK;
7569 
7570 	/* Obtain size of formatted output */
7571 	rc = qed_features_lookup[feature_idx].
7572 		results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7573 				 feature->dumped_dwords, &text_size_bytes);
7574 	if (rc != DBG_STATUS_OK)
7575 		return rc;
7576 
7577 	/* Make sure that the allocated size is a multiple of dword (4 bytes) */
7578 	null_char_pos = text_size_bytes - 1;
7579 	text_size_bytes = (text_size_bytes + 3) & ~0x3;
7580 
7581 	if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7582 		DP_NOTICE(p_hwfn->cdev,
7583 			  "formatted size of feature was too small %d. Aborting\n",
7584 			  text_size_bytes);
7585 		return DBG_STATUS_INVALID_ARGS;
7586 	}
7587 
7588 	/* Allocate temp text buf */
7589 	text_buf = vzalloc(text_size_bytes);
7590 	if (!text_buf)
7591 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7592 
7593 	/* Decode feature opcodes to string on temp buf */
7594 	rc = qed_features_lookup[feature_idx].
7595 		print_results(p_hwfn, (u32 *)feature->dump_buf,
7596 			      feature->dumped_dwords, text_buf);
7597 	if (rc != DBG_STATUS_OK) {
7598 		vfree(text_buf);
7599 		return rc;
7600 	}
7601 
7602 	/* Replace the original null character with a '\n' character.
7603 	 * The bytes that were added as a result of the dword alignment are also
7604 	 * padded with '\n' characters.
7605 	 */
7606 	for (i = null_char_pos; i < text_size_bytes; i++)
7607 		text_buf[i] = '\n';
7608 
7609 	/* Dump printable feature to log */
7610 	if (p_hwfn->cdev->dbg_params.print_data)
7611 		qed_dbg_print_feature(text_buf, text_size_bytes);
7612 
7613 	/* Free the old dump_buf and point the dump_buf to the newly allocagted
7614 	 * and formatted text buffer.
7615 	 */
7616 	vfree(feature->dump_buf);
7617 	feature->dump_buf = text_buf;
7618 	feature->buf_size = text_size_bytes;
7619 	feature->dumped_dwords = text_size_bytes / 4;
7620 	return rc;
7621 }
7622 
7623 /* Generic function for performing the dump of a debug feature. */
7624 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7625 				    struct qed_ptt *p_ptt,
7626 				    enum qed_dbg_features feature_idx)
7627 {
7628 	struct qed_dbg_feature *feature =
7629 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7630 	u32 buf_size_dwords;
7631 	enum dbg_status rc;
7632 
7633 	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7634 		  qed_features_lookup[feature_idx].name);
7635 
7636 	/* Dump_buf was already allocated need to free (this can happen if dump
7637 	 * was called but file was never read).
7638 	 * We can't use the buffer as is since size may have changed.
7639 	 */
7640 	if (feature->dump_buf) {
7641 		vfree(feature->dump_buf);
7642 		feature->dump_buf = NULL;
7643 	}
7644 
7645 	/* Get buffer size from hsi, allocate accordingly, and perform the
7646 	 * dump.
7647 	 */
7648 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7649 						       &buf_size_dwords);
7650 	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7651 		return rc;
7652 	feature->buf_size = buf_size_dwords * sizeof(u32);
7653 	feature->dump_buf = vmalloc(feature->buf_size);
7654 	if (!feature->dump_buf)
7655 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7656 
7657 	rc = qed_features_lookup[feature_idx].
7658 		perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7659 			     feature->buf_size / sizeof(u32),
7660 			     &feature->dumped_dwords);
7661 
7662 	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7663 	 * In this case the buffer holds valid binary data, but we wont able
7664 	 * to parse it (since parsing relies on data in NVRAM which is only
7665 	 * accessible when MFW is responsive). skip the formatting but return
7666 	 * success so that binary data is provided.
7667 	 */
7668 	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7669 		return DBG_STATUS_OK;
7670 
7671 	if (rc != DBG_STATUS_OK)
7672 		return rc;
7673 
7674 	/* Format output */
7675 	rc = format_feature(p_hwfn, feature_idx);
7676 	return rc;
7677 }
7678 
7679 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7680 {
7681 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7682 }
7683 
7684 int qed_dbg_grc_size(struct qed_dev *cdev)
7685 {
7686 	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7687 }
7688 
7689 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7690 {
7691 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7692 			       num_dumped_bytes);
7693 }
7694 
7695 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7696 {
7697 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7698 }
7699 
7700 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7701 {
7702 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7703 			       num_dumped_bytes);
7704 }
7705 
7706 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7707 {
7708 	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7709 }
7710 
7711 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7712 {
7713 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7714 			       num_dumped_bytes);
7715 }
7716 
7717 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7718 {
7719 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7720 }
7721 
7722 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7723 				u32 *num_dumped_bytes)
7724 {
7725 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7726 			       num_dumped_bytes);
7727 }
7728 
7729 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7730 {
7731 	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7732 }
7733 
7734 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7735 		       u32 *num_dumped_bytes)
7736 {
7737 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7738 			       num_dumped_bytes);
7739 }
7740 
7741 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7742 {
7743 	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7744 }
7745 
7746 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7747 		      u32 *num_dumped_bytes)
7748 {
7749 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7750 			       num_dumped_bytes);
7751 }
7752 
7753 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7754 {
7755 	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7756 }
7757 
7758 /* Defines the amount of bytes allocated for recording the length of debugfs
7759  * feature buffer.
7760  */
7761 #define REGDUMP_HEADER_SIZE			sizeof(u32)
7762 #define REGDUMP_HEADER_FEATURE_SHIFT		24
7763 #define REGDUMP_HEADER_ENGINE_SHIFT		31
7764 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
7765 enum debug_print_features {
7766 	OLD_MODE = 0,
7767 	IDLE_CHK = 1,
7768 	GRC_DUMP = 2,
7769 	MCP_TRACE = 3,
7770 	REG_FIFO = 4,
7771 	PROTECTION_OVERRIDE = 5,
7772 	IGU_FIFO = 6,
7773 	PHY = 7,
7774 	FW_ASSERTS = 8,
7775 };
7776 
7777 static u32 qed_calc_regdump_header(enum debug_print_features feature,
7778 				   int engine, u32 feature_size, u8 omit_engine)
7779 {
7780 	/* Insert the engine, feature and mode inside the header and combine it
7781 	 * with feature size.
7782 	 */
7783 	return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
7784 	       (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
7785 	       (engine << REGDUMP_HEADER_ENGINE_SHIFT);
7786 }
7787 
7788 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
7789 {
7790 	u8 cur_engine, omit_engine = 0, org_engine;
7791 	u32 offset = 0, feature_size;
7792 	int rc;
7793 
7794 	if (cdev->num_hwfns == 1)
7795 		omit_engine = 1;
7796 
7797 	org_engine = qed_get_debug_engine(cdev);
7798 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7799 		/* Collect idle_chks and grcDump for each hw function */
7800 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
7801 			   "obtaining idle_chk and grcdump for current engine\n");
7802 		qed_set_debug_engine(cdev, cur_engine);
7803 
7804 		/* First idle_chk */
7805 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7806 				      REGDUMP_HEADER_SIZE, &feature_size);
7807 		if (!rc) {
7808 			*(u32 *)((u8 *)buffer + offset) =
7809 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
7810 						    feature_size, omit_engine);
7811 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7812 		} else {
7813 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7814 		}
7815 
7816 		/* Second idle_chk */
7817 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7818 				      REGDUMP_HEADER_SIZE, &feature_size);
7819 		if (!rc) {
7820 			*(u32 *)((u8 *)buffer + offset) =
7821 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
7822 						    feature_size, omit_engine);
7823 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7824 		} else {
7825 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7826 		}
7827 
7828 		/* reg_fifo dump */
7829 		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
7830 				      REGDUMP_HEADER_SIZE, &feature_size);
7831 		if (!rc) {
7832 			*(u32 *)((u8 *)buffer + offset) =
7833 			    qed_calc_regdump_header(REG_FIFO, cur_engine,
7834 						    feature_size, omit_engine);
7835 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7836 		} else {
7837 			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7838 		}
7839 
7840 		/* igu_fifo dump */
7841 		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
7842 				      REGDUMP_HEADER_SIZE, &feature_size);
7843 		if (!rc) {
7844 			*(u32 *)((u8 *)buffer + offset) =
7845 			    qed_calc_regdump_header(IGU_FIFO, cur_engine,
7846 						    feature_size, omit_engine);
7847 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7848 		} else {
7849 			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7850 		}
7851 
7852 		/* protection_override dump */
7853 		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
7854 						 REGDUMP_HEADER_SIZE,
7855 						 &feature_size);
7856 		if (!rc) {
7857 			*(u32 *)((u8 *)buffer + offset) =
7858 			    qed_calc_regdump_header(PROTECTION_OVERRIDE,
7859 						    cur_engine,
7860 						    feature_size, omit_engine);
7861 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7862 		} else {
7863 			DP_ERR(cdev,
7864 			       "qed_dbg_protection_override failed. rc = %d\n",
7865 			       rc);
7866 		}
7867 
7868 		/* fw_asserts dump */
7869 		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
7870 					REGDUMP_HEADER_SIZE, &feature_size);
7871 		if (!rc) {
7872 			*(u32 *)((u8 *)buffer + offset) =
7873 			    qed_calc_regdump_header(FW_ASSERTS, cur_engine,
7874 						    feature_size, omit_engine);
7875 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7876 		} else {
7877 			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
7878 			       rc);
7879 		}
7880 
7881 		/* GRC dump - must be last because when mcp stuck it will
7882 		 * clutter idle_chk, reg_fifo, ...
7883 		 */
7884 		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
7885 				 REGDUMP_HEADER_SIZE, &feature_size);
7886 		if (!rc) {
7887 			*(u32 *)((u8 *)buffer + offset) =
7888 			    qed_calc_regdump_header(GRC_DUMP, cur_engine,
7889 						    feature_size, omit_engine);
7890 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7891 		} else {
7892 			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
7893 		}
7894 	}
7895 
7896 	/* mcp_trace */
7897 	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
7898 			       REGDUMP_HEADER_SIZE, &feature_size);
7899 	if (!rc) {
7900 		*(u32 *)((u8 *)buffer + offset) =
7901 		    qed_calc_regdump_header(MCP_TRACE, cur_engine,
7902 					    feature_size, omit_engine);
7903 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7904 	} else {
7905 		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
7906 	}
7907 
7908 	qed_set_debug_engine(cdev, org_engine);
7909 
7910 	return 0;
7911 }
7912 
7913 int qed_dbg_all_data_size(struct qed_dev *cdev)
7914 {
7915 	u8 cur_engine, org_engine;
7916 	u32 regs_len = 0;
7917 
7918 	org_engine = qed_get_debug_engine(cdev);
7919 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7920 		/* Engine specific */
7921 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
7922 			   "calculating idle_chk and grcdump register length for current engine\n");
7923 		qed_set_debug_engine(cdev, cur_engine);
7924 		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
7925 			    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
7926 			    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
7927 			    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
7928 			    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
7929 			    REGDUMP_HEADER_SIZE +
7930 			    qed_dbg_protection_override_size(cdev) +
7931 			    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
7932 	}
7933 
7934 	/* Engine common */
7935 	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
7936 	qed_set_debug_engine(cdev, org_engine);
7937 
7938 	return regs_len;
7939 }
7940 
7941 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
7942 		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
7943 {
7944 	struct qed_hwfn *p_hwfn =
7945 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
7946 	struct qed_dbg_feature *qed_feature =
7947 		&cdev->dbg_params.features[feature];
7948 	enum dbg_status dbg_rc;
7949 	struct qed_ptt *p_ptt;
7950 	int rc = 0;
7951 
7952 	/* Acquire ptt */
7953 	p_ptt = qed_ptt_acquire(p_hwfn);
7954 	if (!p_ptt)
7955 		return -EINVAL;
7956 
7957 	/* Get dump */
7958 	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
7959 	if (dbg_rc != DBG_STATUS_OK) {
7960 		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
7961 			   qed_dbg_get_status_str(dbg_rc));
7962 		*num_dumped_bytes = 0;
7963 		rc = -EINVAL;
7964 		goto out;
7965 	}
7966 
7967 	DP_VERBOSE(cdev, QED_MSG_DEBUG,
7968 		   "copying debugfs feature to external buffer\n");
7969 	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
7970 	*num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
7971 			    4;
7972 
7973 out:
7974 	qed_ptt_release(p_hwfn, p_ptt);
7975 	return rc;
7976 }
7977 
7978 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
7979 {
7980 	struct qed_hwfn *p_hwfn =
7981 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
7982 	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
7983 	struct qed_dbg_feature *qed_feature =
7984 		&cdev->dbg_params.features[feature];
7985 	u32 buf_size_dwords;
7986 	enum dbg_status rc;
7987 
7988 	if (!p_ptt)
7989 		return -EINVAL;
7990 
7991 	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
7992 						   &buf_size_dwords);
7993 	if (rc != DBG_STATUS_OK)
7994 		buf_size_dwords = 0;
7995 
7996 	qed_ptt_release(p_hwfn, p_ptt);
7997 	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
7998 	return qed_feature->buf_size;
7999 }
8000 
8001 u8 qed_get_debug_engine(struct qed_dev *cdev)
8002 {
8003 	return cdev->dbg_params.engine_for_debug;
8004 }
8005 
8006 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8007 {
8008 	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8009 		   engine_number);
8010 	cdev->dbg_params.engine_for_debug = engine_number;
8011 }
8012 
8013 void qed_dbg_pf_init(struct qed_dev *cdev)
8014 {
8015 	const u8 *dbg_values;
8016 
8017 	/* Debug values are after init values.
8018 	 * The offset is the first dword of the file.
8019 	 */
8020 	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8021 	qed_dbg_set_bin_ptr((u8 *)dbg_values);
8022 	qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
8023 }
8024 
8025 void qed_dbg_pf_exit(struct qed_dev *cdev)
8026 {
8027 	struct qed_dbg_feature *feature = NULL;
8028 	enum qed_dbg_features feature_idx;
8029 
8030 	/* Debug features' buffers may be allocated if debug feature was used
8031 	 * but dump wasn't called.
8032 	 */
8033 	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8034 		feature = &cdev->dbg_params.features[feature_idx];
8035 		if (feature->dump_buf) {
8036 			vfree(feature->dump_buf);
8037 			feature->dump_buf = NULL;
8038 		}
8039 	}
8040 }
8041