1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/vmalloc.h>
11 #include <linux/crc32.h>
12 #include "qed.h"
13 #include "qed_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_mcp.h"
16 #include "qed_reg_addr.h"
17 
18 /* Memory groups enum */
19 enum mem_groups {
20 	MEM_GROUP_PXP_MEM,
21 	MEM_GROUP_DMAE_MEM,
22 	MEM_GROUP_CM_MEM,
23 	MEM_GROUP_QM_MEM,
24 	MEM_GROUP_DORQ_MEM,
25 	MEM_GROUP_BRB_RAM,
26 	MEM_GROUP_BRB_MEM,
27 	MEM_GROUP_PRS_MEM,
28 	MEM_GROUP_IOR,
29 	MEM_GROUP_BTB_RAM,
30 	MEM_GROUP_CONN_CFC_MEM,
31 	MEM_GROUP_TASK_CFC_MEM,
32 	MEM_GROUP_CAU_PI,
33 	MEM_GROUP_CAU_MEM,
34 	MEM_GROUP_PXP_ILT,
35 	MEM_GROUP_TM_MEM,
36 	MEM_GROUP_SDM_MEM,
37 	MEM_GROUP_PBUF,
38 	MEM_GROUP_RAM,
39 	MEM_GROUP_MULD_MEM,
40 	MEM_GROUP_BTB_MEM,
41 	MEM_GROUP_RDIF_CTX,
42 	MEM_GROUP_TDIF_CTX,
43 	MEM_GROUP_CFC_MEM,
44 	MEM_GROUP_IGU_MEM,
45 	MEM_GROUP_IGU_MSIX,
46 	MEM_GROUP_CAU_SB,
47 	MEM_GROUP_BMB_RAM,
48 	MEM_GROUP_BMB_MEM,
49 	MEM_GROUPS_NUM
50 };
51 
52 /* Memory groups names */
53 static const char * const s_mem_group_names[] = {
54 	"PXP_MEM",
55 	"DMAE_MEM",
56 	"CM_MEM",
57 	"QM_MEM",
58 	"DORQ_MEM",
59 	"BRB_RAM",
60 	"BRB_MEM",
61 	"PRS_MEM",
62 	"IOR",
63 	"BTB_RAM",
64 	"CONN_CFC_MEM",
65 	"TASK_CFC_MEM",
66 	"CAU_PI",
67 	"CAU_MEM",
68 	"PXP_ILT",
69 	"TM_MEM",
70 	"SDM_MEM",
71 	"PBUF",
72 	"RAM",
73 	"MULD_MEM",
74 	"BTB_MEM",
75 	"RDIF_CTX",
76 	"TDIF_CTX",
77 	"CFC_MEM",
78 	"IGU_MEM",
79 	"IGU_MSIX",
80 	"CAU_SB",
81 	"BMB_RAM",
82 	"BMB_MEM",
83 };
84 
85 /* Idle check conditions */
86 
87 static u32 cond5(const u32 *r, const u32 *imm)
88 {
89 	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
90 }
91 
92 static u32 cond7(const u32 *r, const u32 *imm)
93 {
94 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
95 }
96 
97 static u32 cond6(const u32 *r, const u32 *imm)
98 {
99 	return (r[0] & imm[0]) != imm[1];
100 }
101 
102 static u32 cond9(const u32 *r, const u32 *imm)
103 {
104 	return ((r[0] & imm[0]) >> imm[1]) !=
105 	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
106 }
107 
108 static u32 cond10(const u32 *r, const u32 *imm)
109 {
110 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
111 }
112 
113 static u32 cond4(const u32 *r, const u32 *imm)
114 {
115 	return (r[0] & ~imm[0]) != imm[1];
116 }
117 
118 static u32 cond0(const u32 *r, const u32 *imm)
119 {
120 	return (r[0] & ~r[1]) != imm[0];
121 }
122 
123 static u32 cond1(const u32 *r, const u32 *imm)
124 {
125 	return r[0] != imm[0];
126 }
127 
128 static u32 cond11(const u32 *r, const u32 *imm)
129 {
130 	return r[0] != r[1] && r[2] == imm[0];
131 }
132 
133 static u32 cond12(const u32 *r, const u32 *imm)
134 {
135 	return r[0] != r[1] && r[2] > imm[0];
136 }
137 
138 static u32 cond3(const u32 *r, const u32 *imm)
139 {
140 	return r[0] != r[1];
141 }
142 
143 static u32 cond13(const u32 *r, const u32 *imm)
144 {
145 	return r[0] & imm[0];
146 }
147 
148 static u32 cond8(const u32 *r, const u32 *imm)
149 {
150 	return r[0] < (r[1] - imm[0]);
151 }
152 
153 static u32 cond2(const u32 *r, const u32 *imm)
154 {
155 	return r[0] > imm[0];
156 }
157 
158 /* Array of Idle Check conditions */
159 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
160 	cond0,
161 	cond1,
162 	cond2,
163 	cond3,
164 	cond4,
165 	cond5,
166 	cond6,
167 	cond7,
168 	cond8,
169 	cond9,
170 	cond10,
171 	cond11,
172 	cond12,
173 	cond13,
174 };
175 
176 /******************************* Data Types **********************************/
177 
178 enum platform_ids {
179 	PLATFORM_ASIC,
180 	PLATFORM_RESERVED,
181 	PLATFORM_RESERVED2,
182 	PLATFORM_RESERVED3,
183 	MAX_PLATFORM_IDS
184 };
185 
186 struct chip_platform_defs {
187 	u8 num_ports;
188 	u8 num_pfs;
189 	u8 num_vfs;
190 };
191 
192 /* Chip constant definitions */
193 struct chip_defs {
194 	const char *name;
195 	struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
196 };
197 
198 /* Platform constant definitions */
199 struct platform_defs {
200 	const char *name;
201 	u32 delay_factor;
202 	u32 dmae_thresh;
203 	u32 log_thresh;
204 };
205 
206 /* Storm constant definitions.
207  * Addresses are in bytes, sizes are in quad-regs.
208  */
209 struct storm_defs {
210 	char letter;
211 	enum block_id block_id;
212 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
213 	bool has_vfc;
214 	u32 sem_fast_mem_addr;
215 	u32 sem_frame_mode_addr;
216 	u32 sem_slow_enable_addr;
217 	u32 sem_slow_mode_addr;
218 	u32 sem_slow_mode1_conf_addr;
219 	u32 sem_sync_dbg_empty_addr;
220 	u32 sem_slow_dbg_empty_addr;
221 	u32 cm_ctx_wr_addr;
222 	u32 cm_conn_ag_ctx_lid_size;
223 	u32 cm_conn_ag_ctx_rd_addr;
224 	u32 cm_conn_st_ctx_lid_size;
225 	u32 cm_conn_st_ctx_rd_addr;
226 	u32 cm_task_ag_ctx_lid_size;
227 	u32 cm_task_ag_ctx_rd_addr;
228 	u32 cm_task_st_ctx_lid_size;
229 	u32 cm_task_st_ctx_rd_addr;
230 };
231 
232 /* Block constant definitions */
233 struct block_defs {
234 	const char *name;
235 	bool exists[MAX_CHIP_IDS];
236 	bool associated_to_storm;
237 
238 	/* Valid only if associated_to_storm is true */
239 	u32 storm_id;
240 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
241 	u32 dbg_select_addr;
242 	u32 dbg_enable_addr;
243 	u32 dbg_shift_addr;
244 	u32 dbg_force_valid_addr;
245 	u32 dbg_force_frame_addr;
246 	bool has_reset_bit;
247 
248 	/* If true, block is taken out of reset before dump */
249 	bool unreset;
250 	enum dbg_reset_regs reset_reg;
251 
252 	/* Bit offset in reset register */
253 	u8 reset_bit_offset;
254 };
255 
256 /* Reset register definitions */
257 struct reset_reg_defs {
258 	u32 addr;
259 	bool exists[MAX_CHIP_IDS];
260 	u32 unreset_val[MAX_CHIP_IDS];
261 };
262 
263 struct grc_param_defs {
264 	u32 default_val[MAX_CHIP_IDS];
265 	u32 min;
266 	u32 max;
267 	bool is_preset;
268 	bool is_persistent;
269 	u32 exclude_all_preset_val;
270 	u32 crash_preset_val;
271 };
272 
273 /* Address is in 128b units. Width is in bits. */
274 struct rss_mem_defs {
275 	const char *mem_name;
276 	const char *type_name;
277 	u32 addr;
278 	u32 entry_width;
279 	u32 num_entries[MAX_CHIP_IDS];
280 };
281 
282 struct vfc_ram_defs {
283 	const char *mem_name;
284 	const char *type_name;
285 	u32 base_row;
286 	u32 num_rows;
287 };
288 
289 struct big_ram_defs {
290 	const char *instance_name;
291 	enum mem_groups mem_group_id;
292 	enum mem_groups ram_mem_group_id;
293 	enum dbg_grc_params grc_param;
294 	u32 addr_reg_addr;
295 	u32 data_reg_addr;
296 	u32 is_256b_reg_addr;
297 	u32 is_256b_bit_offset[MAX_CHIP_IDS];
298 	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
299 };
300 
301 struct phy_defs {
302 	const char *phy_name;
303 
304 	/* PHY base GRC address */
305 	u32 base_addr;
306 
307 	/* Relative address of indirect TBUS address register (bits 0..7) */
308 	u32 tbus_addr_lo_addr;
309 
310 	/* Relative address of indirect TBUS address register (bits 8..10) */
311 	u32 tbus_addr_hi_addr;
312 
313 	/* Relative address of indirect TBUS data register (bits 0..7) */
314 	u32 tbus_data_lo_addr;
315 
316 	/* Relative address of indirect TBUS data register (bits 8..11) */
317 	u32 tbus_data_hi_addr;
318 };
319 
320 /******************************** Constants **********************************/
321 
322 #define MAX_LCIDS			320
323 #define MAX_LTIDS			320
324 
325 #define NUM_IOR_SETS			2
326 #define IORS_PER_SET			176
327 #define IOR_SET_OFFSET(set_id)		((set_id) * 256)
328 
329 #define BYTES_IN_DWORD			sizeof(u32)
330 
331 /* In the macros below, size and offset are specified in bits */
332 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
333 #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
334 #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
335 #define FIELD_DWORD_OFFSET(type, field) \
336 	 (int)(FIELD_BIT_OFFSET(type, field) / 32)
337 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
338 #define FIELD_BIT_MASK(type, field) \
339 	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
340 	 FIELD_DWORD_SHIFT(type, field))
341 
342 #define SET_VAR_FIELD(var, type, field, val) \
343 	do { \
344 		var[FIELD_DWORD_OFFSET(type, field)] &=	\
345 		(~FIELD_BIT_MASK(type, field));	\
346 		var[FIELD_DWORD_OFFSET(type, field)] |= \
347 		(val) << FIELD_DWORD_SHIFT(type, field); \
348 	} while (0)
349 
350 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
351 	do { \
352 		for (i = 0; i < (arr_size); i++) \
353 			qed_wr(dev, ptt, addr,	(arr)[i]); \
354 	} while (0)
355 
356 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
357 	do { \
358 		for (i = 0; i < (arr_size); i++) \
359 			(arr)[i] = qed_rd(dev, ptt, addr); \
360 	} while (0)
361 
362 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
363 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
364 
365 /* Extra lines include a signature line + optional latency events line */
366 #define NUM_EXTRA_DBG_LINES(block_desc) \
367 	(1 + ((block_desc)->has_latency_events ? 1 : 0))
368 #define NUM_DBG_LINES(block_desc) \
369 	((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
370 
371 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
372 #define RAM_LINES_TO_BYTES(lines) \
373 	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
374 
375 #define REG_DUMP_LEN_SHIFT		24
376 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
377 	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
378 
379 #define IDLE_CHK_RULE_SIZE_DWORDS \
380 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
381 
382 #define IDLE_CHK_RESULT_HDR_DWORDS \
383 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
384 
385 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
386 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
387 
388 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
389 
390 /* The sizes and offsets below are specified in bits */
391 #define VFC_CAM_CMD_STRUCT_SIZE		64
392 #define VFC_CAM_CMD_ROW_OFFSET		48
393 #define VFC_CAM_CMD_ROW_SIZE		9
394 #define VFC_CAM_ADDR_STRUCT_SIZE	16
395 #define VFC_CAM_ADDR_OP_OFFSET		0
396 #define VFC_CAM_ADDR_OP_SIZE		4
397 #define VFC_CAM_RESP_STRUCT_SIZE	256
398 #define VFC_RAM_ADDR_STRUCT_SIZE	16
399 #define VFC_RAM_ADDR_OP_OFFSET		0
400 #define VFC_RAM_ADDR_OP_SIZE		2
401 #define VFC_RAM_ADDR_ROW_OFFSET		2
402 #define VFC_RAM_ADDR_ROW_SIZE		10
403 #define VFC_RAM_RESP_STRUCT_SIZE	256
404 
405 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
406 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
407 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
408 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
409 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
410 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
411 
412 #define NUM_VFC_RAM_TYPES		4
413 
414 #define VFC_CAM_NUM_ROWS		512
415 
416 #define VFC_OPCODE_CAM_RD		14
417 #define VFC_OPCODE_RAM_RD		0
418 
419 #define NUM_RSS_MEM_TYPES		5
420 
421 #define NUM_BIG_RAM_TYPES		3
422 #define BIG_RAM_NAME_LEN		3
423 
424 #define NUM_PHY_TBUS_ADDRESSES		2048
425 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
426 
427 #define RESET_REG_UNRESET_OFFSET	4
428 
429 #define STALL_DELAY_MS			500
430 
431 #define STATIC_DEBUG_LINE_DWORDS	9
432 
433 #define NUM_COMMON_GLOBAL_PARAMS	8
434 
435 #define FW_IMG_MAIN			1
436 
437 #define REG_FIFO_ELEMENT_DWORDS		2
438 #define REG_FIFO_DEPTH_ELEMENTS		32
439 #define REG_FIFO_DEPTH_DWORDS \
440 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
441 
442 #define IGU_FIFO_ELEMENT_DWORDS		4
443 #define IGU_FIFO_DEPTH_ELEMENTS		64
444 #define IGU_FIFO_DEPTH_DWORDS \
445 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
446 
447 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
448 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
449 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
450 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
451 	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
452 
453 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
454 	(MCP_REG_SCRATCH + \
455 	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
456 
457 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
458 #define EMPTY_FW_IMAGE_STR		"???????????????"
459 
460 /***************************** Constant Arrays *******************************/
461 
462 struct dbg_array {
463 	const u32 *ptr;
464 	u32 size_in_dwords;
465 };
466 
467 /* Debug arrays */
468 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
469 
470 /* Chip constant definitions array */
471 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
472 	{ "bb",
473 	  {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB},
474 	   {0, 0, 0},
475 	   {0, 0, 0},
476 	   {0, 0, 0} } },
477 	{ "ah",
478 	  {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
479 	   {0, 0, 0},
480 	   {0, 0, 0},
481 	   {0, 0, 0} } },
482 	{ "reserved",
483 	   {{0, 0, 0},
484 	   {0, 0, 0},
485 	   {0, 0, 0},
486 	   {0, 0, 0} } }
487 };
488 
489 /* Storm constant definitions array */
490 static struct storm_defs s_storm_defs[] = {
491 	/* Tstorm */
492 	{'T', BLOCK_TSEM,
493 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
494 	  DBG_BUS_CLIENT_RBCT}, true,
495 	 TSEM_REG_FAST_MEMORY,
496 	 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
497 	 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
498 	 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
499 	 TCM_REG_CTX_RBC_ACCS,
500 	 4, TCM_REG_AGG_CON_CTX,
501 	 16, TCM_REG_SM_CON_CTX,
502 	 2, TCM_REG_AGG_TASK_CTX,
503 	 4, TCM_REG_SM_TASK_CTX},
504 
505 	/* Mstorm */
506 	{'M', BLOCK_MSEM,
507 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
508 	  DBG_BUS_CLIENT_RBCM}, false,
509 	 MSEM_REG_FAST_MEMORY,
510 	 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
511 	 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
512 	 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
513 	 MCM_REG_CTX_RBC_ACCS,
514 	 1, MCM_REG_AGG_CON_CTX,
515 	 10, MCM_REG_SM_CON_CTX,
516 	 2, MCM_REG_AGG_TASK_CTX,
517 	 7, MCM_REG_SM_TASK_CTX},
518 
519 	/* Ustorm */
520 	{'U', BLOCK_USEM,
521 	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
522 	  DBG_BUS_CLIENT_RBCU}, false,
523 	 USEM_REG_FAST_MEMORY,
524 	 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
525 	 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
526 	 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
527 	 UCM_REG_CTX_RBC_ACCS,
528 	 2, UCM_REG_AGG_CON_CTX,
529 	 13, UCM_REG_SM_CON_CTX,
530 	 3, UCM_REG_AGG_TASK_CTX,
531 	 3, UCM_REG_SM_TASK_CTX},
532 
533 	/* Xstorm */
534 	{'X', BLOCK_XSEM,
535 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
536 	  DBG_BUS_CLIENT_RBCX}, false,
537 	 XSEM_REG_FAST_MEMORY,
538 	 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
539 	 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
540 	 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
541 	 XCM_REG_CTX_RBC_ACCS,
542 	 9, XCM_REG_AGG_CON_CTX,
543 	 15, XCM_REG_SM_CON_CTX,
544 	 0, 0,
545 	 0, 0},
546 
547 	/* Ystorm */
548 	{'Y', BLOCK_YSEM,
549 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
550 	  DBG_BUS_CLIENT_RBCY}, false,
551 	 YSEM_REG_FAST_MEMORY,
552 	 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
553 	 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
554 	 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
555 	 YCM_REG_CTX_RBC_ACCS,
556 	 2, YCM_REG_AGG_CON_CTX,
557 	 3, YCM_REG_SM_CON_CTX,
558 	 2, YCM_REG_AGG_TASK_CTX,
559 	 12, YCM_REG_SM_TASK_CTX},
560 
561 	/* Pstorm */
562 	{'P', BLOCK_PSEM,
563 	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
564 	  DBG_BUS_CLIENT_RBCS}, true,
565 	 PSEM_REG_FAST_MEMORY,
566 	 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
567 	 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
568 	 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
569 	 PCM_REG_CTX_RBC_ACCS,
570 	 0, 0,
571 	 10, PCM_REG_SM_CON_CTX,
572 	 0, 0,
573 	 0, 0}
574 };
575 
576 /* Block definitions array */
577 
578 static struct block_defs block_grc_defs = {
579 	"grc",
580 	{true, true, true}, false, 0,
581 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
582 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
583 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
584 	GRC_REG_DBG_FORCE_FRAME,
585 	true, false, DBG_RESET_REG_MISC_PL_UA, 1
586 };
587 
588 static struct block_defs block_miscs_defs = {
589 	"miscs", {true, true, true}, false, 0,
590 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
591 	0, 0, 0, 0, 0,
592 	false, false, MAX_DBG_RESET_REGS, 0
593 };
594 
595 static struct block_defs block_misc_defs = {
596 	"misc", {true, true, true}, false, 0,
597 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
598 	0, 0, 0, 0, 0,
599 	false, false, MAX_DBG_RESET_REGS, 0
600 };
601 
602 static struct block_defs block_dbu_defs = {
603 	"dbu", {true, true, true}, false, 0,
604 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
605 	0, 0, 0, 0, 0,
606 	false, false, MAX_DBG_RESET_REGS, 0
607 };
608 
609 static struct block_defs block_pglue_b_defs = {
610 	"pglue_b",
611 	{true, true, true}, false, 0,
612 	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
613 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
614 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
615 	PGLUE_B_REG_DBG_FORCE_FRAME,
616 	true, false, DBG_RESET_REG_MISCS_PL_HV, 1
617 };
618 
619 static struct block_defs block_cnig_defs = {
620 	"cnig",
621 	{true, true, true}, false, 0,
622 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
623 	 DBG_BUS_CLIENT_RBCW},
624 	CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
625 	CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
626 	CNIG_REG_DBG_FORCE_FRAME_K2_E5,
627 	true, false, DBG_RESET_REG_MISCS_PL_HV, 0
628 };
629 
630 static struct block_defs block_cpmu_defs = {
631 	"cpmu", {true, true, true}, false, 0,
632 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
633 	0, 0, 0, 0, 0,
634 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8
635 };
636 
637 static struct block_defs block_ncsi_defs = {
638 	"ncsi",
639 	{true, true, true}, false, 0,
640 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
641 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
642 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
643 	NCSI_REG_DBG_FORCE_FRAME,
644 	true, false, DBG_RESET_REG_MISCS_PL_HV, 5
645 };
646 
647 static struct block_defs block_opte_defs = {
648 	"opte", {true, true, false}, false, 0,
649 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
650 	0, 0, 0, 0, 0,
651 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4
652 };
653 
654 static struct block_defs block_bmb_defs = {
655 	"bmb",
656 	{true, true, true}, false, 0,
657 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
658 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
659 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
660 	BMB_REG_DBG_FORCE_FRAME,
661 	true, false, DBG_RESET_REG_MISCS_PL_UA, 7
662 };
663 
664 static struct block_defs block_pcie_defs = {
665 	"pcie",
666 	{true, true, true}, false, 0,
667 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
668 	 DBG_BUS_CLIENT_RBCH},
669 	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
670 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
671 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
672 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
673 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
674 	false, false, MAX_DBG_RESET_REGS, 0
675 };
676 
677 static struct block_defs block_mcp_defs = {
678 	"mcp", {true, true, true}, false, 0,
679 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
680 	0, 0, 0, 0, 0,
681 	false, false, MAX_DBG_RESET_REGS, 0
682 };
683 
684 static struct block_defs block_mcp2_defs = {
685 	"mcp2",
686 	{true, true, true}, false, 0,
687 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
688 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
689 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
690 	MCP2_REG_DBG_FORCE_FRAME,
691 	false, false, MAX_DBG_RESET_REGS, 0
692 };
693 
694 static struct block_defs block_pswhst_defs = {
695 	"pswhst",
696 	{true, true, true}, false, 0,
697 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
698 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
699 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
700 	PSWHST_REG_DBG_FORCE_FRAME,
701 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
702 };
703 
704 static struct block_defs block_pswhst2_defs = {
705 	"pswhst2",
706 	{true, true, true}, false, 0,
707 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
708 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
709 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
710 	PSWHST2_REG_DBG_FORCE_FRAME,
711 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
712 };
713 
714 static struct block_defs block_pswrd_defs = {
715 	"pswrd",
716 	{true, true, true}, false, 0,
717 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
718 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
719 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
720 	PSWRD_REG_DBG_FORCE_FRAME,
721 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
722 };
723 
724 static struct block_defs block_pswrd2_defs = {
725 	"pswrd2",
726 	{true, true, true}, false, 0,
727 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
728 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
729 	PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
730 	PSWRD2_REG_DBG_FORCE_FRAME,
731 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
732 };
733 
734 static struct block_defs block_pswwr_defs = {
735 	"pswwr",
736 	{true, true, true}, false, 0,
737 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
738 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
739 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
740 	PSWWR_REG_DBG_FORCE_FRAME,
741 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
742 };
743 
744 static struct block_defs block_pswwr2_defs = {
745 	"pswwr2", {true, true, true}, false, 0,
746 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
747 	0, 0, 0, 0, 0,
748 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
749 };
750 
751 static struct block_defs block_pswrq_defs = {
752 	"pswrq",
753 	{true, true, true}, false, 0,
754 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
755 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
756 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
757 	PSWRQ_REG_DBG_FORCE_FRAME,
758 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
759 };
760 
761 static struct block_defs block_pswrq2_defs = {
762 	"pswrq2",
763 	{true, true, true}, false, 0,
764 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
765 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
766 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
767 	PSWRQ2_REG_DBG_FORCE_FRAME,
768 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
769 };
770 
771 static struct block_defs block_pglcs_defs = {
772 	"pglcs",
773 	{true, true, true}, false, 0,
774 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
775 	 DBG_BUS_CLIENT_RBCH},
776 	PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
777 	PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
778 	PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
779 	true, false, DBG_RESET_REG_MISCS_PL_HV, 2
780 };
781 
782 static struct block_defs block_ptu_defs = {
783 	"ptu",
784 	{true, true, true}, false, 0,
785 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
786 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
787 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
788 	PTU_REG_DBG_FORCE_FRAME,
789 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
790 };
791 
792 static struct block_defs block_dmae_defs = {
793 	"dmae",
794 	{true, true, true}, false, 0,
795 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
796 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
797 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
798 	DMAE_REG_DBG_FORCE_FRAME,
799 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
800 };
801 
802 static struct block_defs block_tcm_defs = {
803 	"tcm",
804 	{true, true, true}, true, DBG_TSTORM_ID,
805 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
806 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
807 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
808 	TCM_REG_DBG_FORCE_FRAME,
809 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
810 };
811 
812 static struct block_defs block_mcm_defs = {
813 	"mcm",
814 	{true, true, true}, true, DBG_MSTORM_ID,
815 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
816 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
817 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
818 	MCM_REG_DBG_FORCE_FRAME,
819 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
820 };
821 
822 static struct block_defs block_ucm_defs = {
823 	"ucm",
824 	{true, true, true}, true, DBG_USTORM_ID,
825 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
826 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
827 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
828 	UCM_REG_DBG_FORCE_FRAME,
829 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
830 };
831 
832 static struct block_defs block_xcm_defs = {
833 	"xcm",
834 	{true, true, true}, true, DBG_XSTORM_ID,
835 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
836 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
837 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
838 	XCM_REG_DBG_FORCE_FRAME,
839 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
840 };
841 
842 static struct block_defs block_ycm_defs = {
843 	"ycm",
844 	{true, true, true}, true, DBG_YSTORM_ID,
845 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
846 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
847 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
848 	YCM_REG_DBG_FORCE_FRAME,
849 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
850 };
851 
852 static struct block_defs block_pcm_defs = {
853 	"pcm",
854 	{true, true, true}, true, DBG_PSTORM_ID,
855 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
856 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
857 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
858 	PCM_REG_DBG_FORCE_FRAME,
859 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
860 };
861 
862 static struct block_defs block_qm_defs = {
863 	"qm",
864 	{true, true, true}, false, 0,
865 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
866 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
867 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
868 	QM_REG_DBG_FORCE_FRAME,
869 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
870 };
871 
872 static struct block_defs block_tm_defs = {
873 	"tm",
874 	{true, true, true}, false, 0,
875 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
876 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
877 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
878 	TM_REG_DBG_FORCE_FRAME,
879 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
880 };
881 
882 static struct block_defs block_dorq_defs = {
883 	"dorq",
884 	{true, true, true}, false, 0,
885 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
886 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
887 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
888 	DORQ_REG_DBG_FORCE_FRAME,
889 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
890 };
891 
892 static struct block_defs block_brb_defs = {
893 	"brb",
894 	{true, true, true}, false, 0,
895 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
896 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
897 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
898 	BRB_REG_DBG_FORCE_FRAME,
899 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
900 };
901 
902 static struct block_defs block_src_defs = {
903 	"src",
904 	{true, true, true}, false, 0,
905 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
906 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
907 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
908 	SRC_REG_DBG_FORCE_FRAME,
909 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
910 };
911 
912 static struct block_defs block_prs_defs = {
913 	"prs",
914 	{true, true, true}, false, 0,
915 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
916 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
917 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
918 	PRS_REG_DBG_FORCE_FRAME,
919 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
920 };
921 
922 static struct block_defs block_tsdm_defs = {
923 	"tsdm",
924 	{true, true, true}, true, DBG_TSTORM_ID,
925 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
926 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
927 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
928 	TSDM_REG_DBG_FORCE_FRAME,
929 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
930 };
931 
932 static struct block_defs block_msdm_defs = {
933 	"msdm",
934 	{true, true, true}, true, DBG_MSTORM_ID,
935 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
936 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
937 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
938 	MSDM_REG_DBG_FORCE_FRAME,
939 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
940 };
941 
942 static struct block_defs block_usdm_defs = {
943 	"usdm",
944 	{true, true, true}, true, DBG_USTORM_ID,
945 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
946 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
947 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
948 	USDM_REG_DBG_FORCE_FRAME,
949 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
950 };
951 
952 static struct block_defs block_xsdm_defs = {
953 	"xsdm",
954 	{true, true, true}, true, DBG_XSTORM_ID,
955 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
956 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
957 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
958 	XSDM_REG_DBG_FORCE_FRAME,
959 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
960 };
961 
962 static struct block_defs block_ysdm_defs = {
963 	"ysdm",
964 	{true, true, true}, true, DBG_YSTORM_ID,
965 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
966 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
967 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
968 	YSDM_REG_DBG_FORCE_FRAME,
969 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
970 };
971 
972 static struct block_defs block_psdm_defs = {
973 	"psdm",
974 	{true, true, true}, true, DBG_PSTORM_ID,
975 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
976 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
977 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
978 	PSDM_REG_DBG_FORCE_FRAME,
979 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
980 };
981 
982 static struct block_defs block_tsem_defs = {
983 	"tsem",
984 	{true, true, true}, true, DBG_TSTORM_ID,
985 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
986 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
987 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
988 	TSEM_REG_DBG_FORCE_FRAME,
989 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
990 };
991 
992 static struct block_defs block_msem_defs = {
993 	"msem",
994 	{true, true, true}, true, DBG_MSTORM_ID,
995 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
996 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
997 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
998 	MSEM_REG_DBG_FORCE_FRAME,
999 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
1000 };
1001 
1002 static struct block_defs block_usem_defs = {
1003 	"usem",
1004 	{true, true, true}, true, DBG_USTORM_ID,
1005 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1006 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1007 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1008 	USEM_REG_DBG_FORCE_FRAME,
1009 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
1010 };
1011 
1012 static struct block_defs block_xsem_defs = {
1013 	"xsem",
1014 	{true, true, true}, true, DBG_XSTORM_ID,
1015 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1016 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1017 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1018 	XSEM_REG_DBG_FORCE_FRAME,
1019 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
1020 };
1021 
1022 static struct block_defs block_ysem_defs = {
1023 	"ysem",
1024 	{true, true, true}, true, DBG_YSTORM_ID,
1025 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
1026 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1027 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1028 	YSEM_REG_DBG_FORCE_FRAME,
1029 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
1030 };
1031 
1032 static struct block_defs block_psem_defs = {
1033 	"psem",
1034 	{true, true, true}, true, DBG_PSTORM_ID,
1035 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1036 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1037 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1038 	PSEM_REG_DBG_FORCE_FRAME,
1039 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
1040 };
1041 
1042 static struct block_defs block_rss_defs = {
1043 	"rss",
1044 	{true, true, true}, false, 0,
1045 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
1046 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1047 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1048 	RSS_REG_DBG_FORCE_FRAME,
1049 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
1050 };
1051 
1052 static struct block_defs block_tmld_defs = {
1053 	"tmld",
1054 	{true, true, true}, false, 0,
1055 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1056 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1057 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1058 	TMLD_REG_DBG_FORCE_FRAME,
1059 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
1060 };
1061 
1062 static struct block_defs block_muld_defs = {
1063 	"muld",
1064 	{true, true, true}, false, 0,
1065 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1066 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1067 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1068 	MULD_REG_DBG_FORCE_FRAME,
1069 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
1070 };
1071 
1072 static struct block_defs block_yuld_defs = {
1073 	"yuld",
1074 	{true, true, false}, false, 0,
1075 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
1076 	 MAX_DBG_BUS_CLIENTS},
1077 	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1078 	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1079 	YULD_REG_DBG_FORCE_FRAME_BB_K2,
1080 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1081 	15
1082 };
1083 
1084 static struct block_defs block_xyld_defs = {
1085 	"xyld",
1086 	{true, true, true}, false, 0,
1087 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1088 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1089 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1090 	XYLD_REG_DBG_FORCE_FRAME,
1091 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1092 };
1093 
1094 static struct block_defs block_ptld_defs = {
1095 	"ptld",
1096 	{false, false, true}, false, 0,
1097 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
1098 	PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1099 	PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1100 	PTLD_REG_DBG_FORCE_FRAME_E5,
1101 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1102 	28
1103 };
1104 
1105 static struct block_defs block_ypld_defs = {
1106 	"ypld",
1107 	{false, false, true}, false, 0,
1108 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
1109 	YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1110 	YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1111 	YPLD_REG_DBG_FORCE_FRAME_E5,
1112 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1113 	27
1114 };
1115 
1116 static struct block_defs block_prm_defs = {
1117 	"prm",
1118 	{true, true, true}, false, 0,
1119 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1120 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1121 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1122 	PRM_REG_DBG_FORCE_FRAME,
1123 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1124 };
1125 
1126 static struct block_defs block_pbf_pb1_defs = {
1127 	"pbf_pb1",
1128 	{true, true, true}, false, 0,
1129 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1130 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1131 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1132 	PBF_PB1_REG_DBG_FORCE_FRAME,
1133 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1134 	11
1135 };
1136 
1137 static struct block_defs block_pbf_pb2_defs = {
1138 	"pbf_pb2",
1139 	{true, true, true}, false, 0,
1140 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1141 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1142 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1143 	PBF_PB2_REG_DBG_FORCE_FRAME,
1144 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1145 	12
1146 };
1147 
1148 static struct block_defs block_rpb_defs = {
1149 	"rpb",
1150 	{true, true, true}, false, 0,
1151 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1152 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1153 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1154 	RPB_REG_DBG_FORCE_FRAME,
1155 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1156 };
1157 
1158 static struct block_defs block_btb_defs = {
1159 	"btb",
1160 	{true, true, true}, false, 0,
1161 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1162 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1163 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1164 	BTB_REG_DBG_FORCE_FRAME,
1165 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1166 };
1167 
1168 static struct block_defs block_pbf_defs = {
1169 	"pbf",
1170 	{true, true, true}, false, 0,
1171 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1172 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1173 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1174 	PBF_REG_DBG_FORCE_FRAME,
1175 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1176 };
1177 
1178 static struct block_defs block_rdif_defs = {
1179 	"rdif",
1180 	{true, true, true}, false, 0,
1181 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1182 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1183 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1184 	RDIF_REG_DBG_FORCE_FRAME,
1185 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1186 };
1187 
1188 static struct block_defs block_tdif_defs = {
1189 	"tdif",
1190 	{true, true, true}, false, 0,
1191 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1192 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1193 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1194 	TDIF_REG_DBG_FORCE_FRAME,
1195 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1196 };
1197 
1198 static struct block_defs block_cdu_defs = {
1199 	"cdu",
1200 	{true, true, true}, false, 0,
1201 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1202 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1203 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1204 	CDU_REG_DBG_FORCE_FRAME,
1205 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1206 };
1207 
1208 static struct block_defs block_ccfc_defs = {
1209 	"ccfc",
1210 	{true, true, true}, false, 0,
1211 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1212 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1213 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1214 	CCFC_REG_DBG_FORCE_FRAME,
1215 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1216 };
1217 
1218 static struct block_defs block_tcfc_defs = {
1219 	"tcfc",
1220 	{true, true, true}, false, 0,
1221 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1222 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1223 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1224 	TCFC_REG_DBG_FORCE_FRAME,
1225 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1226 };
1227 
1228 static struct block_defs block_igu_defs = {
1229 	"igu",
1230 	{true, true, true}, false, 0,
1231 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1232 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1233 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1234 	IGU_REG_DBG_FORCE_FRAME,
1235 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1236 };
1237 
1238 static struct block_defs block_cau_defs = {
1239 	"cau",
1240 	{true, true, true}, false, 0,
1241 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1242 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1243 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1244 	CAU_REG_DBG_FORCE_FRAME,
1245 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1246 };
1247 
1248 static struct block_defs block_rgfs_defs = {
1249 	"rgfs", {false, false, true}, false, 0,
1250 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1251 	0, 0, 0, 0, 0,
1252 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
1253 };
1254 
1255 static struct block_defs block_rgsrc_defs = {
1256 	"rgsrc",
1257 	{false, false, true}, false, 0,
1258 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1259 	RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1260 	RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1261 	RGSRC_REG_DBG_FORCE_FRAME_E5,
1262 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1263 	30
1264 };
1265 
1266 static struct block_defs block_tgfs_defs = {
1267 	"tgfs", {false, false, true}, false, 0,
1268 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1269 	0, 0, 0, 0, 0,
1270 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
1271 };
1272 
1273 static struct block_defs block_tgsrc_defs = {
1274 	"tgsrc",
1275 	{false, false, true}, false, 0,
1276 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
1277 	TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1278 	TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1279 	TGSRC_REG_DBG_FORCE_FRAME_E5,
1280 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1281 	31
1282 };
1283 
1284 static struct block_defs block_umac_defs = {
1285 	"umac",
1286 	{true, true, true}, false, 0,
1287 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
1288 	 DBG_BUS_CLIENT_RBCZ},
1289 	UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1290 	UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1291 	UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1292 	true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1293 };
1294 
1295 static struct block_defs block_xmac_defs = {
1296 	"xmac", {true, false, false}, false, 0,
1297 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1298 	0, 0, 0, 0, 0,
1299 	false, false, MAX_DBG_RESET_REGS, 0
1300 };
1301 
1302 static struct block_defs block_dbg_defs = {
1303 	"dbg", {true, true, true}, false, 0,
1304 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1305 	0, 0, 0, 0, 0,
1306 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1307 };
1308 
1309 static struct block_defs block_nig_defs = {
1310 	"nig",
1311 	{true, true, true}, false, 0,
1312 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1313 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1314 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1315 	NIG_REG_DBG_FORCE_FRAME,
1316 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1317 };
1318 
1319 static struct block_defs block_wol_defs = {
1320 	"wol",
1321 	{false, true, true}, false, 0,
1322 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1323 	WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1324 	WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1325 	WOL_REG_DBG_FORCE_FRAME_K2_E5,
1326 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1327 };
1328 
1329 static struct block_defs block_bmbn_defs = {
1330 	"bmbn",
1331 	{false, true, true}, false, 0,
1332 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
1333 	 DBG_BUS_CLIENT_RBCB},
1334 	BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1335 	BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1336 	BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1337 	false, false, MAX_DBG_RESET_REGS, 0
1338 };
1339 
1340 static struct block_defs block_ipc_defs = {
1341 	"ipc", {true, true, true}, false, 0,
1342 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1343 	0, 0, 0, 0, 0,
1344 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1345 };
1346 
1347 static struct block_defs block_nwm_defs = {
1348 	"nwm",
1349 	{false, true, true}, false, 0,
1350 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1351 	NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1352 	NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1353 	NWM_REG_DBG_FORCE_FRAME_K2_E5,
1354 	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1355 };
1356 
1357 static struct block_defs block_nws_defs = {
1358 	"nws",
1359 	{false, true, true}, false, 0,
1360 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1361 	NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1362 	NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1363 	NWS_REG_DBG_FORCE_FRAME_K2_E5,
1364 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1365 };
1366 
1367 static struct block_defs block_ms_defs = {
1368 	"ms",
1369 	{false, true, true}, false, 0,
1370 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1371 	MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1372 	MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1373 	MS_REG_DBG_FORCE_FRAME_K2_E5,
1374 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1375 };
1376 
1377 static struct block_defs block_phy_pcie_defs = {
1378 	"phy_pcie",
1379 	{false, true, true}, false, 0,
1380 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
1381 	 DBG_BUS_CLIENT_RBCH},
1382 	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
1383 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1384 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
1385 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1386 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1387 	false, false, MAX_DBG_RESET_REGS, 0
1388 };
1389 
1390 static struct block_defs block_led_defs = {
1391 	"led", {false, true, true}, false, 0,
1392 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1393 	0, 0, 0, 0, 0,
1394 	true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1395 };
1396 
1397 static struct block_defs block_avs_wrap_defs = {
1398 	"avs_wrap", {false, true, false}, false, 0,
1399 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1400 	0, 0, 0, 0, 0,
1401 	true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1402 };
1403 
1404 static struct block_defs block_pxpreqbus_defs = {
1405 	"pxpreqbus", {false, false, false}, false, 0,
1406 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1407 	0, 0, 0, 0, 0,
1408 	false, false, MAX_DBG_RESET_REGS, 0
1409 };
1410 
1411 static struct block_defs block_misc_aeu_defs = {
1412 	"misc_aeu", {true, true, true}, false, 0,
1413 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1414 	0, 0, 0, 0, 0,
1415 	false, false, MAX_DBG_RESET_REGS, 0
1416 };
1417 
1418 static struct block_defs block_bar0_map_defs = {
1419 	"bar0_map", {true, true, true}, false, 0,
1420 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1421 	0, 0, 0, 0, 0,
1422 	false, false, MAX_DBG_RESET_REGS, 0
1423 };
1424 
1425 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1426 	&block_grc_defs,
1427 	&block_miscs_defs,
1428 	&block_misc_defs,
1429 	&block_dbu_defs,
1430 	&block_pglue_b_defs,
1431 	&block_cnig_defs,
1432 	&block_cpmu_defs,
1433 	&block_ncsi_defs,
1434 	&block_opte_defs,
1435 	&block_bmb_defs,
1436 	&block_pcie_defs,
1437 	&block_mcp_defs,
1438 	&block_mcp2_defs,
1439 	&block_pswhst_defs,
1440 	&block_pswhst2_defs,
1441 	&block_pswrd_defs,
1442 	&block_pswrd2_defs,
1443 	&block_pswwr_defs,
1444 	&block_pswwr2_defs,
1445 	&block_pswrq_defs,
1446 	&block_pswrq2_defs,
1447 	&block_pglcs_defs,
1448 	&block_dmae_defs,
1449 	&block_ptu_defs,
1450 	&block_tcm_defs,
1451 	&block_mcm_defs,
1452 	&block_ucm_defs,
1453 	&block_xcm_defs,
1454 	&block_ycm_defs,
1455 	&block_pcm_defs,
1456 	&block_qm_defs,
1457 	&block_tm_defs,
1458 	&block_dorq_defs,
1459 	&block_brb_defs,
1460 	&block_src_defs,
1461 	&block_prs_defs,
1462 	&block_tsdm_defs,
1463 	&block_msdm_defs,
1464 	&block_usdm_defs,
1465 	&block_xsdm_defs,
1466 	&block_ysdm_defs,
1467 	&block_psdm_defs,
1468 	&block_tsem_defs,
1469 	&block_msem_defs,
1470 	&block_usem_defs,
1471 	&block_xsem_defs,
1472 	&block_ysem_defs,
1473 	&block_psem_defs,
1474 	&block_rss_defs,
1475 	&block_tmld_defs,
1476 	&block_muld_defs,
1477 	&block_yuld_defs,
1478 	&block_xyld_defs,
1479 	&block_ptld_defs,
1480 	&block_ypld_defs,
1481 	&block_prm_defs,
1482 	&block_pbf_pb1_defs,
1483 	&block_pbf_pb2_defs,
1484 	&block_rpb_defs,
1485 	&block_btb_defs,
1486 	&block_pbf_defs,
1487 	&block_rdif_defs,
1488 	&block_tdif_defs,
1489 	&block_cdu_defs,
1490 	&block_ccfc_defs,
1491 	&block_tcfc_defs,
1492 	&block_igu_defs,
1493 	&block_cau_defs,
1494 	&block_rgfs_defs,
1495 	&block_rgsrc_defs,
1496 	&block_tgfs_defs,
1497 	&block_tgsrc_defs,
1498 	&block_umac_defs,
1499 	&block_xmac_defs,
1500 	&block_dbg_defs,
1501 	&block_nig_defs,
1502 	&block_wol_defs,
1503 	&block_bmbn_defs,
1504 	&block_ipc_defs,
1505 	&block_nwm_defs,
1506 	&block_nws_defs,
1507 	&block_ms_defs,
1508 	&block_phy_pcie_defs,
1509 	&block_led_defs,
1510 	&block_avs_wrap_defs,
1511 	&block_pxpreqbus_defs,
1512 	&block_misc_aeu_defs,
1513 	&block_bar0_map_defs,
1514 };
1515 
1516 static struct platform_defs s_platform_defs[] = {
1517 	{"asic", 1, 256, 32768},
1518 	{"reserved", 0, 0, 0},
1519 	{"reserved2", 0, 0, 0},
1520 	{"reserved3", 0, 0, 0}
1521 };
1522 
1523 static struct grc_param_defs s_grc_param_defs[] = {
1524 	/* DBG_GRC_PARAM_DUMP_TSTORM */
1525 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1526 
1527 	/* DBG_GRC_PARAM_DUMP_MSTORM */
1528 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1529 
1530 	/* DBG_GRC_PARAM_DUMP_USTORM */
1531 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1532 
1533 	/* DBG_GRC_PARAM_DUMP_XSTORM */
1534 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1535 
1536 	/* DBG_GRC_PARAM_DUMP_YSTORM */
1537 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1538 
1539 	/* DBG_GRC_PARAM_DUMP_PSTORM */
1540 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1541 
1542 	/* DBG_GRC_PARAM_DUMP_REGS */
1543 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1544 
1545 	/* DBG_GRC_PARAM_DUMP_RAM */
1546 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1547 
1548 	/* DBG_GRC_PARAM_DUMP_PBUF */
1549 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1550 
1551 	/* DBG_GRC_PARAM_DUMP_IOR */
1552 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1553 
1554 	/* DBG_GRC_PARAM_DUMP_VFC */
1555 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1556 
1557 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
1558 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1559 
1560 	/* DBG_GRC_PARAM_DUMP_ILT */
1561 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1562 
1563 	/* DBG_GRC_PARAM_DUMP_RSS */
1564 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1565 
1566 	/* DBG_GRC_PARAM_DUMP_CAU */
1567 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1568 
1569 	/* DBG_GRC_PARAM_DUMP_QM */
1570 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1571 
1572 	/* DBG_GRC_PARAM_DUMP_MCP */
1573 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1574 
1575 	/* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
1576 	{{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1},
1577 
1578 	/* DBG_GRC_PARAM_DUMP_CFC */
1579 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1580 
1581 	/* DBG_GRC_PARAM_DUMP_IGU */
1582 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1583 
1584 	/* DBG_GRC_PARAM_DUMP_BRB */
1585 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1586 
1587 	/* DBG_GRC_PARAM_DUMP_BTB */
1588 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1589 
1590 	/* DBG_GRC_PARAM_DUMP_BMB */
1591 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1592 
1593 	/* DBG_GRC_PARAM_DUMP_NIG */
1594 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1595 
1596 	/* DBG_GRC_PARAM_DUMP_MULD */
1597 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1598 
1599 	/* DBG_GRC_PARAM_DUMP_PRS */
1600 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1601 
1602 	/* DBG_GRC_PARAM_DUMP_DMAE */
1603 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1604 
1605 	/* DBG_GRC_PARAM_DUMP_TM */
1606 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1607 
1608 	/* DBG_GRC_PARAM_DUMP_SDM */
1609 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1610 
1611 	/* DBG_GRC_PARAM_DUMP_DIF */
1612 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1613 
1614 	/* DBG_GRC_PARAM_DUMP_STATIC */
1615 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1616 
1617 	/* DBG_GRC_PARAM_UNSTALL */
1618 	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1619 
1620 	/* DBG_GRC_PARAM_NUM_LCIDS */
1621 	{{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false,
1622 	 MAX_LCIDS, MAX_LCIDS},
1623 
1624 	/* DBG_GRC_PARAM_NUM_LTIDS */
1625 	{{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false,
1626 	 MAX_LTIDS, MAX_LTIDS},
1627 
1628 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
1629 	{{0, 0, 0}, 0, 1, true, false, 0, 0},
1630 
1631 	/* DBG_GRC_PARAM_CRASH */
1632 	{{0, 0, 0}, 0, 1, true, false, 0, 0},
1633 
1634 	/* DBG_GRC_PARAM_PARITY_SAFE */
1635 	{{0, 0, 0}, 0, 1, false, false, 1, 0},
1636 
1637 	/* DBG_GRC_PARAM_DUMP_CM */
1638 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1639 
1640 	/* DBG_GRC_PARAM_DUMP_PHY */
1641 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1642 
1643 	/* DBG_GRC_PARAM_NO_MCP */
1644 	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1645 
1646 	/* DBG_GRC_PARAM_NO_FW_VER */
1647 	{{0, 0, 0}, 0, 1, false, false, 0, 0}
1648 };
1649 
1650 static struct rss_mem_defs s_rss_mem_defs[] = {
1651 	{ "rss_mem_cid", "rss_cid", 0, 32,
1652 	  {256, 320, 512} },
1653 
1654 	{ "rss_mem_key_msb", "rss_key", 1024, 256,
1655 	  {128, 208, 257} },
1656 
1657 	{ "rss_mem_key_lsb", "rss_key", 2048, 64,
1658 	  {128, 208, 257} },
1659 
1660 	{ "rss_mem_info", "rss_info", 3072, 16,
1661 	  {128, 208, 256} },
1662 
1663 	{ "rss_mem_ind", "rss_ind", 4096, 16,
1664 	  {16384, 26624, 32768} }
1665 };
1666 
1667 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1668 	{"vfc_ram_tt1", "vfc_ram", 0, 512},
1669 	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
1670 	{"vfc_ram_stt2", "vfc_ram", 640, 32},
1671 	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1672 };
1673 
1674 static struct big_ram_defs s_big_ram_defs[] = {
1675 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1676 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1677 	  MISC_REG_BLOCK_256B_EN, {0, 0, 0},
1678 	  {153600, 180224, 282624} },
1679 
1680 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1681 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1682 	  MISC_REG_BLOCK_256B_EN, {0, 1, 1},
1683 	  {92160, 117760, 168960} },
1684 
1685 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1686 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1687 	  MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
1688 	  {36864, 36864, 36864} }
1689 };
1690 
1691 static struct reset_reg_defs s_reset_regs_defs[] = {
1692 	/* DBG_RESET_REG_MISCS_PL_UA */
1693 	{ MISCS_REG_RESET_PL_UA,
1694 	  {true, true, true}, {0x0, 0x0, 0x0} },
1695 
1696 	/* DBG_RESET_REG_MISCS_PL_HV */
1697 	{ MISCS_REG_RESET_PL_HV,
1698 	  {true, true, true}, {0x0, 0x400, 0x600} },
1699 
1700 	/* DBG_RESET_REG_MISCS_PL_HV_2 */
1701 	{ MISCS_REG_RESET_PL_HV_2_K2_E5,
1702 	  {false, true, true}, {0x0, 0x0, 0x0} },
1703 
1704 	/* DBG_RESET_REG_MISC_PL_UA */
1705 	{ MISC_REG_RESET_PL_UA,
1706 	  {true, true, true}, {0x0, 0x0, 0x0} },
1707 
1708 	/* DBG_RESET_REG_MISC_PL_HV */
1709 	{ MISC_REG_RESET_PL_HV,
1710 	  {true, true, true}, {0x0, 0x0, 0x0} },
1711 
1712 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1713 	{ MISC_REG_RESET_PL_PDA_VMAIN_1,
1714 	  {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
1715 
1716 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1717 	{ MISC_REG_RESET_PL_PDA_VMAIN_2,
1718 	  {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
1719 
1720 	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1721 	{ MISC_REG_RESET_PL_PDA_VAUX,
1722 	  {true, true, true}, {0x2, 0x2, 0x2} },
1723 };
1724 
1725 static struct phy_defs s_phy_defs[] = {
1726 	{"nw_phy", NWS_REG_NWS_CMU_K2,
1727 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
1728 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
1729 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
1730 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
1731 	{"sgmii_phy", MS_REG_MS_CMU_K2_E5,
1732 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1733 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1734 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1735 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1736 	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
1737 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1738 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1739 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1740 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1741 	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
1742 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1743 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1744 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1745 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1746 };
1747 
1748 /**************************** Private Functions ******************************/
1749 
1750 /* Reads and returns a single dword from the specified unaligned buffer */
1751 static u32 qed_read_unaligned_dword(u8 *buf)
1752 {
1753 	u32 dword;
1754 
1755 	memcpy((u8 *)&dword, buf, sizeof(dword));
1756 	return dword;
1757 }
1758 
1759 /* Returns the value of the specified GRC param */
1760 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1761 			     enum dbg_grc_params grc_param)
1762 {
1763 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1764 
1765 	return dev_data->grc.param_val[grc_param];
1766 }
1767 
1768 /* Initializes the GRC parameters */
1769 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1770 {
1771 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1772 
1773 	if (!dev_data->grc.params_initialized) {
1774 		qed_dbg_grc_set_params_default(p_hwfn);
1775 		dev_data->grc.params_initialized = 1;
1776 	}
1777 }
1778 
1779 /* Initializes debug data for the specified device */
1780 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1781 					struct qed_ptt *p_ptt)
1782 {
1783 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1784 
1785 	if (dev_data->initialized)
1786 		return DBG_STATUS_OK;
1787 
1788 	if (QED_IS_K2(p_hwfn->cdev)) {
1789 		dev_data->chip_id = CHIP_K2;
1790 		dev_data->mode_enable[MODE_K2] = 1;
1791 	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1792 		dev_data->chip_id = CHIP_BB;
1793 		dev_data->mode_enable[MODE_BB] = 1;
1794 	} else {
1795 		return DBG_STATUS_UNKNOWN_CHIP;
1796 	}
1797 
1798 	dev_data->platform_id = PLATFORM_ASIC;
1799 	dev_data->mode_enable[MODE_ASIC] = 1;
1800 
1801 	/* Initializes the GRC parameters */
1802 	qed_dbg_grc_init_params(p_hwfn);
1803 
1804 	dev_data->use_dmae = true;
1805 	dev_data->num_regs_read = 0;
1806 	dev_data->initialized = 1;
1807 
1808 	return DBG_STATUS_OK;
1809 }
1810 
1811 static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
1812 						    enum block_id block_id)
1813 {
1814 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1815 
1816 	return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
1817 						       MAX_CHIP_IDS +
1818 						       dev_data->chip_id];
1819 }
1820 
1821 /* Reads the FW info structure for the specified Storm from the chip,
1822  * and writes it to the specified fw_info pointer.
1823  */
1824 static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
1825 			     struct qed_ptt *p_ptt,
1826 			     u8 storm_id, struct fw_info *fw_info)
1827 {
1828 	struct storm_defs *storm = &s_storm_defs[storm_id];
1829 	struct fw_info_location fw_info_location;
1830 	u32 addr, i, *dest;
1831 
1832 	memset(&fw_info_location, 0, sizeof(fw_info_location));
1833 	memset(fw_info, 0, sizeof(*fw_info));
1834 
1835 	/* Read first the address that points to fw_info location.
1836 	 * The address is located in the last line of the Storm RAM.
1837 	 */
1838 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1839 	       DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
1840 	       sizeof(fw_info_location);
1841 	dest = (u32 *)&fw_info_location;
1842 
1843 	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1844 	     i++, addr += BYTES_IN_DWORD)
1845 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1846 
1847 	/* Read FW version info from Storm RAM */
1848 	if (fw_info_location.size > 0 && fw_info_location.size <=
1849 	    sizeof(*fw_info)) {
1850 		addr = fw_info_location.grc_addr;
1851 		dest = (u32 *)fw_info;
1852 		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1853 		     i++, addr += BYTES_IN_DWORD)
1854 			dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1855 	}
1856 }
1857 
1858 /* Dumps the specified string to the specified buffer.
1859  * Returns the dumped size in bytes.
1860  */
1861 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1862 {
1863 	if (dump)
1864 		strcpy(dump_buf, str);
1865 
1866 	return (u32)strlen(str) + 1;
1867 }
1868 
1869 /* Dumps zeros to align the specified buffer to dwords.
1870  * Returns the dumped size in bytes.
1871  */
1872 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1873 {
1874 	u8 offset_in_dword, align_size;
1875 
1876 	offset_in_dword = (u8)(byte_offset & 0x3);
1877 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1878 
1879 	if (dump && align_size)
1880 		memset(dump_buf, 0, align_size);
1881 
1882 	return align_size;
1883 }
1884 
1885 /* Writes the specified string param to the specified buffer.
1886  * Returns the dumped size in dwords.
1887  */
1888 static u32 qed_dump_str_param(u32 *dump_buf,
1889 			      bool dump,
1890 			      const char *param_name, const char *param_val)
1891 {
1892 	char *char_buf = (char *)dump_buf;
1893 	u32 offset = 0;
1894 
1895 	/* Dump param name */
1896 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1897 
1898 	/* Indicate a string param value */
1899 	if (dump)
1900 		*(char_buf + offset) = 1;
1901 	offset++;
1902 
1903 	/* Dump param value */
1904 	offset += qed_dump_str(char_buf + offset, dump, param_val);
1905 
1906 	/* Align buffer to next dword */
1907 	offset += qed_dump_align(char_buf + offset, dump, offset);
1908 
1909 	return BYTES_TO_DWORDS(offset);
1910 }
1911 
1912 /* Writes the specified numeric param to the specified buffer.
1913  * Returns the dumped size in dwords.
1914  */
1915 static u32 qed_dump_num_param(u32 *dump_buf,
1916 			      bool dump, const char *param_name, u32 param_val)
1917 {
1918 	char *char_buf = (char *)dump_buf;
1919 	u32 offset = 0;
1920 
1921 	/* Dump param name */
1922 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1923 
1924 	/* Indicate a numeric param value */
1925 	if (dump)
1926 		*(char_buf + offset) = 0;
1927 	offset++;
1928 
1929 	/* Align buffer to next dword */
1930 	offset += qed_dump_align(char_buf + offset, dump, offset);
1931 
1932 	/* Dump param value (and change offset from bytes to dwords) */
1933 	offset = BYTES_TO_DWORDS(offset);
1934 	if (dump)
1935 		*(dump_buf + offset) = param_val;
1936 	offset++;
1937 
1938 	return offset;
1939 }
1940 
1941 /* Reads the FW version and writes it as a param to the specified buffer.
1942  * Returns the dumped size in dwords.
1943  */
1944 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1945 				 struct qed_ptt *p_ptt,
1946 				 u32 *dump_buf, bool dump)
1947 {
1948 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1949 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1950 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1951 	struct fw_info fw_info = { {0}, {0} };
1952 	u32 offset = 0;
1953 
1954 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1955 		/* Read FW image/version from PRAM in a non-reset SEMI */
1956 		bool found = false;
1957 		u8 storm_id;
1958 
1959 		for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
1960 		     storm_id++) {
1961 			struct storm_defs *storm = &s_storm_defs[storm_id];
1962 
1963 			/* Read FW version/image */
1964 			if (dev_data->block_in_reset[storm->block_id])
1965 				continue;
1966 
1967 			/* Read FW info for the current Storm */
1968 			qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
1969 
1970 			/* Create FW version/image strings */
1971 			if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1972 				     "%d_%d_%d_%d", fw_info.ver.num.major,
1973 				     fw_info.ver.num.minor, fw_info.ver.num.rev,
1974 				     fw_info.ver.num.eng) < 0)
1975 				DP_NOTICE(p_hwfn,
1976 					  "Unexpected debug error: invalid FW version string\n");
1977 			switch (fw_info.ver.image_id) {
1978 			case FW_IMG_MAIN:
1979 				strcpy(fw_img_str, "main");
1980 				break;
1981 			default:
1982 				strcpy(fw_img_str, "unknown");
1983 				break;
1984 			}
1985 
1986 			found = true;
1987 		}
1988 	}
1989 
1990 	/* Dump FW version, image and timestamp */
1991 	offset += qed_dump_str_param(dump_buf + offset,
1992 				     dump, "fw-version", fw_ver_str);
1993 	offset += qed_dump_str_param(dump_buf + offset,
1994 				     dump, "fw-image", fw_img_str);
1995 	offset += qed_dump_num_param(dump_buf + offset,
1996 				     dump,
1997 				     "fw-timestamp", fw_info.ver.timestamp);
1998 
1999 	return offset;
2000 }
2001 
2002 /* Reads the MFW version and writes it as a param to the specified buffer.
2003  * Returns the dumped size in dwords.
2004  */
2005 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
2006 				  struct qed_ptt *p_ptt,
2007 				  u32 *dump_buf, bool dump)
2008 {
2009 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2010 
2011 	if (dump &&
2012 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2013 		u32 global_section_offsize, global_section_addr, mfw_ver;
2014 		u32 public_data_addr, global_section_offsize_addr;
2015 
2016 		/* Find MCP public data GRC address. Needs to be ORed with
2017 		 * MCP_REG_SCRATCH due to a HW bug.
2018 		 */
2019 		public_data_addr = qed_rd(p_hwfn,
2020 					  p_ptt,
2021 					  MISC_REG_SHARED_MEM_ADDR) |
2022 				   MCP_REG_SCRATCH;
2023 
2024 		/* Find MCP public global section offset */
2025 		global_section_offsize_addr = public_data_addr +
2026 					      offsetof(struct mcp_public_data,
2027 						       sections) +
2028 					      sizeof(offsize_t) * PUBLIC_GLOBAL;
2029 		global_section_offsize = qed_rd(p_hwfn, p_ptt,
2030 						global_section_offsize_addr);
2031 		global_section_addr =
2032 			MCP_REG_SCRATCH +
2033 			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2034 
2035 		/* Read MFW version from MCP public global section */
2036 		mfw_ver = qed_rd(p_hwfn, p_ptt,
2037 				 global_section_addr +
2038 				 offsetof(struct public_global, mfw_ver));
2039 
2040 		/* Dump MFW version param */
2041 		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
2042 			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
2043 			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2044 			DP_NOTICE(p_hwfn,
2045 				  "Unexpected debug error: invalid MFW version string\n");
2046 	}
2047 
2048 	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2049 }
2050 
2051 /* Writes a section header to the specified buffer.
2052  * Returns the dumped size in dwords.
2053  */
2054 static u32 qed_dump_section_hdr(u32 *dump_buf,
2055 				bool dump, const char *name, u32 num_params)
2056 {
2057 	return qed_dump_num_param(dump_buf, dump, name, num_params);
2058 }
2059 
2060 /* Writes the common global params to the specified buffer.
2061  * Returns the dumped size in dwords.
2062  */
2063 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
2064 					 struct qed_ptt *p_ptt,
2065 					 u32 *dump_buf,
2066 					 bool dump,
2067 					 u8 num_specific_global_params)
2068 {
2069 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2070 	u32 offset = 0;
2071 	u8 num_params;
2072 
2073 	/* Dump global params section header */
2074 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2075 	offset += qed_dump_section_hdr(dump_buf + offset,
2076 				       dump, "global_params", num_params);
2077 
2078 	/* Store params */
2079 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2080 	offset += qed_dump_mfw_ver_param(p_hwfn,
2081 					 p_ptt, dump_buf + offset, dump);
2082 	offset += qed_dump_num_param(dump_buf + offset,
2083 				     dump, "tools-version", TOOLS_VERSION);
2084 	offset += qed_dump_str_param(dump_buf + offset,
2085 				     dump,
2086 				     "chip",
2087 				     s_chip_defs[dev_data->chip_id].name);
2088 	offset += qed_dump_str_param(dump_buf + offset,
2089 				     dump,
2090 				     "platform",
2091 				     s_platform_defs[dev_data->platform_id].
2092 				     name);
2093 	offset +=
2094 	    qed_dump_num_param(dump_buf + offset, dump, "pci-func",
2095 			       p_hwfn->abs_pf_id);
2096 
2097 	return offset;
2098 }
2099 
2100 /* Writes the "last" section (including CRC) to the specified buffer at the
2101  * given offset. Returns the dumped size in dwords.
2102  */
2103 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
2104 {
2105 	u32 start_offset = offset;
2106 
2107 	/* Dump CRC section header */
2108 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2109 
2110 	/* Calculate CRC32 and add it to the dword after the "last" section */
2111 	if (dump)
2112 		*(dump_buf + offset) = ~crc32(0xffffffff,
2113 					      (u8 *)dump_buf,
2114 					      DWORDS_TO_BYTES(offset));
2115 
2116 	offset++;
2117 
2118 	return offset - start_offset;
2119 }
2120 
2121 /* Update blocks reset state  */
2122 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
2123 					  struct qed_ptt *p_ptt)
2124 {
2125 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2126 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2127 	u32 i;
2128 
2129 	/* Read reset registers */
2130 	for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2131 		if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2132 			reg_val[i] = qed_rd(p_hwfn,
2133 					    p_ptt, s_reset_regs_defs[i].addr);
2134 
2135 	/* Check if blocks are in reset */
2136 	for (i = 0; i < MAX_BLOCK_ID; i++) {
2137 		struct block_defs *block = s_block_defs[i];
2138 
2139 		dev_data->block_in_reset[i] = block->has_reset_bit &&
2140 		    !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
2141 	}
2142 }
2143 
2144 /* Enable / disable the Debug block */
2145 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
2146 				     struct qed_ptt *p_ptt, bool enable)
2147 {
2148 	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2149 }
2150 
2151 /* Resets the Debug block */
2152 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
2153 				    struct qed_ptt *p_ptt)
2154 {
2155 	u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2156 	struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2157 
2158 	dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2159 	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2160 	new_reset_reg_val =
2161 	    old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
2162 
2163 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2164 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2165 }
2166 
2167 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
2168 				     struct qed_ptt *p_ptt,
2169 				     enum dbg_bus_frame_modes mode)
2170 {
2171 	qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2172 }
2173 
2174 /* Enable / disable Debug Bus clients according to the specified mask
2175  * (1 = enable, 0 = disable).
2176  */
2177 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
2178 				   struct qed_ptt *p_ptt, u32 client_mask)
2179 {
2180 	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2181 }
2182 
2183 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
2184 {
2185 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2186 	bool arg1, arg2;
2187 	const u32 *ptr;
2188 	u8 tree_val;
2189 
2190 	/* Get next element from modes tree buffer */
2191 	ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
2192 	tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
2193 
2194 	switch (tree_val) {
2195 	case INIT_MODE_OP_NOT:
2196 		return !qed_is_mode_match(p_hwfn, modes_buf_offset);
2197 	case INIT_MODE_OP_OR:
2198 	case INIT_MODE_OP_AND:
2199 		arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2200 		arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2201 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
2202 							arg2) : (arg1 && arg2);
2203 	default:
2204 		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2205 	}
2206 }
2207 
2208 /* Returns true if the specified entity (indicated by GRC param) should be
2209  * included in the dump, false otherwise.
2210  */
2211 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
2212 				enum dbg_grc_params grc_param)
2213 {
2214 	return qed_grc_get_param(p_hwfn, grc_param) > 0;
2215 }
2216 
2217 /* Returns true of the specified Storm should be included in the dump, false
2218  * otherwise.
2219  */
2220 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
2221 				      enum dbg_storms storm)
2222 {
2223 	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2224 }
2225 
2226 /* Returns true if the specified memory should be included in the dump, false
2227  * otherwise.
2228  */
2229 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
2230 				    enum block_id block_id, u8 mem_group_id)
2231 {
2232 	struct block_defs *block = s_block_defs[block_id];
2233 	u8 i;
2234 
2235 	/* Check Storm match */
2236 	if (block->associated_to_storm &&
2237 	    !qed_grc_is_storm_included(p_hwfn,
2238 				       (enum dbg_storms)block->storm_id))
2239 		return false;
2240 
2241 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2242 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2243 
2244 		if (mem_group_id == big_ram->mem_group_id ||
2245 		    mem_group_id == big_ram->ram_mem_group_id)
2246 			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
2247 	}
2248 
2249 	switch (mem_group_id) {
2250 	case MEM_GROUP_PXP_ILT:
2251 	case MEM_GROUP_PXP_MEM:
2252 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2253 	case MEM_GROUP_RAM:
2254 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2255 	case MEM_GROUP_PBUF:
2256 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2257 	case MEM_GROUP_CAU_MEM:
2258 	case MEM_GROUP_CAU_SB:
2259 	case MEM_GROUP_CAU_PI:
2260 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2261 	case MEM_GROUP_QM_MEM:
2262 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2263 	case MEM_GROUP_CFC_MEM:
2264 	case MEM_GROUP_CONN_CFC_MEM:
2265 	case MEM_GROUP_TASK_CFC_MEM:
2266 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
2267 		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2268 	case MEM_GROUP_IGU_MEM:
2269 	case MEM_GROUP_IGU_MSIX:
2270 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2271 	case MEM_GROUP_MULD_MEM:
2272 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2273 	case MEM_GROUP_PRS_MEM:
2274 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2275 	case MEM_GROUP_DMAE_MEM:
2276 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2277 	case MEM_GROUP_TM_MEM:
2278 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2279 	case MEM_GROUP_SDM_MEM:
2280 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2281 	case MEM_GROUP_TDIF_CTX:
2282 	case MEM_GROUP_RDIF_CTX:
2283 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2284 	case MEM_GROUP_CM_MEM:
2285 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2286 	case MEM_GROUP_IOR:
2287 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2288 	default:
2289 		return true;
2290 	}
2291 }
2292 
2293 /* Stalls all Storms */
2294 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2295 				 struct qed_ptt *p_ptt, bool stall)
2296 {
2297 	u32 reg_addr;
2298 	u8 storm_id;
2299 
2300 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2301 		if (!qed_grc_is_storm_included(p_hwfn,
2302 					       (enum dbg_storms)storm_id))
2303 			continue;
2304 
2305 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
2306 		    SEM_FAST_REG_STALL_0_BB_K2;
2307 		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2308 	}
2309 
2310 	msleep(STALL_DELAY_MS);
2311 }
2312 
2313 /* Takes all blocks out of reset */
2314 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2315 				   struct qed_ptt *p_ptt)
2316 {
2317 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2318 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2319 	u32 block_id, i;
2320 
2321 	/* Fill reset regs values */
2322 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2323 		struct block_defs *block = s_block_defs[block_id];
2324 
2325 		if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
2326 		    block->unreset)
2327 			reg_val[block->reset_reg] |=
2328 			    BIT(block->reset_bit_offset);
2329 	}
2330 
2331 	/* Write reset registers */
2332 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2333 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2334 			continue;
2335 
2336 		reg_val[i] |=
2337 			s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2338 
2339 		if (reg_val[i])
2340 			qed_wr(p_hwfn,
2341 			       p_ptt,
2342 			       s_reset_regs_defs[i].addr +
2343 			       RESET_REG_UNRESET_OFFSET, reg_val[i]);
2344 	}
2345 }
2346 
2347 /* Returns the attention block data of the specified block */
2348 static const struct dbg_attn_block_type_data *
2349 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
2350 {
2351 	const struct dbg_attn_block *base_attn_block_arr =
2352 		(const struct dbg_attn_block *)
2353 		s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2354 
2355 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
2356 }
2357 
2358 /* Returns the attention registers of the specified block */
2359 static const struct dbg_attn_reg *
2360 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
2361 			u8 *num_attn_regs)
2362 {
2363 	const struct dbg_attn_block_type_data *block_type_data =
2364 		qed_get_block_attn_data(block_id, attn_type);
2365 
2366 	*num_attn_regs = block_type_data->num_regs;
2367 
2368 	return &((const struct dbg_attn_reg *)
2369 		 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2370 							  regs_offset];
2371 }
2372 
2373 /* For each block, clear the status of all parities */
2374 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2375 				   struct qed_ptt *p_ptt)
2376 {
2377 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2378 	const struct dbg_attn_reg *attn_reg_arr;
2379 	u8 reg_idx, num_attn_regs;
2380 	u32 block_id;
2381 
2382 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2383 		if (dev_data->block_in_reset[block_id])
2384 			continue;
2385 
2386 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2387 						       ATTN_TYPE_PARITY,
2388 						       &num_attn_regs);
2389 
2390 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2391 			const struct dbg_attn_reg *reg_data =
2392 				&attn_reg_arr[reg_idx];
2393 			u16 modes_buf_offset;
2394 			bool eval_mode;
2395 
2396 			/* Check mode */
2397 			eval_mode = GET_FIELD(reg_data->mode.data,
2398 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2399 			modes_buf_offset =
2400 				GET_FIELD(reg_data->mode.data,
2401 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2402 
2403 			/* If Mode match: clear parity status */
2404 			if (!eval_mode ||
2405 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
2406 				qed_rd(p_hwfn, p_ptt,
2407 				       DWORDS_TO_BYTES(reg_data->
2408 						       sts_clr_address));
2409 		}
2410 	}
2411 }
2412 
2413 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2414  * The following parameters are dumped:
2415  * - count:	 no. of dumped entries
2416  * - split:	 split type
2417  * - id:	 split ID (dumped only if split_id >= 0)
2418  * - param_name: user parameter value (dumped only if param_name != NULL
2419  *		 and param_val != NULL).
2420  */
2421 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2422 				 bool dump,
2423 				 u32 num_reg_entries,
2424 				 const char *split_type,
2425 				 int split_id,
2426 				 const char *param_name, const char *param_val)
2427 {
2428 	u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2429 	u32 offset = 0;
2430 
2431 	offset += qed_dump_section_hdr(dump_buf + offset,
2432 				       dump, "grc_regs", num_params);
2433 	offset += qed_dump_num_param(dump_buf + offset,
2434 				     dump, "count", num_reg_entries);
2435 	offset += qed_dump_str_param(dump_buf + offset,
2436 				     dump, "split", split_type);
2437 	if (split_id >= 0)
2438 		offset += qed_dump_num_param(dump_buf + offset,
2439 					     dump, "id", split_id);
2440 	if (param_name && param_val)
2441 		offset += qed_dump_str_param(dump_buf + offset,
2442 					     dump, param_name, param_val);
2443 
2444 	return offset;
2445 }
2446 
2447 /* Reads the specified registers into the specified buffer.
2448  * The addr and len arguments are specified in dwords.
2449  */
2450 void qed_read_regs(struct qed_hwfn *p_hwfn,
2451 		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
2452 {
2453 	u32 i;
2454 
2455 	for (i = 0; i < len; i++)
2456 		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2457 }
2458 
2459 /* Dumps the GRC registers in the specified address range.
2460  * Returns the dumped size in dwords.
2461  * The addr and len arguments are specified in dwords.
2462  */
2463 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2464 				   struct qed_ptt *p_ptt,
2465 				   u32 *dump_buf,
2466 				   bool dump, u32 addr, u32 len, bool wide_bus)
2467 {
2468 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2469 
2470 	if (!dump)
2471 		return len;
2472 
2473 	/* Print log if needed */
2474 	dev_data->num_regs_read += len;
2475 	if (dev_data->num_regs_read >=
2476 	    s_platform_defs[dev_data->platform_id].log_thresh) {
2477 		DP_VERBOSE(p_hwfn,
2478 			   QED_MSG_DEBUG,
2479 			   "Dumping %d registers...\n",
2480 			   dev_data->num_regs_read);
2481 		dev_data->num_regs_read = 0;
2482 	}
2483 
2484 	/* Try reading using DMAE */
2485 	if (dev_data->use_dmae &&
2486 	    (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
2487 	     wide_bus)) {
2488 		if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
2489 				       (u64)(uintptr_t)(dump_buf), len, 0))
2490 			return len;
2491 		dev_data->use_dmae = 0;
2492 		DP_VERBOSE(p_hwfn,
2493 			   QED_MSG_DEBUG,
2494 			   "Failed reading from chip using DMAE, using GRC instead\n");
2495 	}
2496 
2497 	/* Read registers */
2498 	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2499 
2500 	return len;
2501 }
2502 
2503 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2504  * The addr and len arguments are specified in dwords.
2505  */
2506 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2507 				      bool dump, u32 addr, u32 len)
2508 {
2509 	if (dump)
2510 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2511 
2512 	return 1;
2513 }
2514 
2515 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2516  * The addr and len arguments are specified in dwords.
2517  */
2518 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2519 				  struct qed_ptt *p_ptt,
2520 				  u32 *dump_buf,
2521 				  bool dump, u32 addr, u32 len, bool wide_bus)
2522 {
2523 	u32 offset = 0;
2524 
2525 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2526 	offset += qed_grc_dump_addr_range(p_hwfn,
2527 					  p_ptt,
2528 					  dump_buf + offset,
2529 					  dump, addr, len, wide_bus);
2530 
2531 	return offset;
2532 }
2533 
2534 /* Dumps GRC registers sequence with skip cycle.
2535  * Returns the dumped size in dwords.
2536  * - addr:	start GRC address in dwords
2537  * - total_len:	total no. of dwords to dump
2538  * - read_len:	no. consecutive dwords to read
2539  * - skip_len:	no. of dwords to skip (and fill with zeros)
2540  */
2541 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2542 				       struct qed_ptt *p_ptt,
2543 				       u32 *dump_buf,
2544 				       bool dump,
2545 				       u32 addr,
2546 				       u32 total_len,
2547 				       u32 read_len, u32 skip_len)
2548 {
2549 	u32 offset = 0, reg_offset = 0;
2550 
2551 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2552 
2553 	if (!dump)
2554 		return offset + total_len;
2555 
2556 	while (reg_offset < total_len) {
2557 		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2558 
2559 		offset += qed_grc_dump_addr_range(p_hwfn,
2560 						  p_ptt,
2561 						  dump_buf + offset,
2562 						  dump, addr, curr_len, false);
2563 		reg_offset += curr_len;
2564 		addr += curr_len;
2565 
2566 		if (reg_offset < total_len) {
2567 			curr_len = min_t(u32, skip_len, total_len - skip_len);
2568 			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2569 			offset += curr_len;
2570 			reg_offset += curr_len;
2571 			addr += curr_len;
2572 		}
2573 	}
2574 
2575 	return offset;
2576 }
2577 
2578 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2579 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2580 				     struct qed_ptt *p_ptt,
2581 				     struct dbg_array input_regs_arr,
2582 				     u32 *dump_buf,
2583 				     bool dump,
2584 				     bool block_enable[MAX_BLOCK_ID],
2585 				     u32 *num_dumped_reg_entries)
2586 {
2587 	u32 i, offset = 0, input_offset = 0;
2588 	bool mode_match = true;
2589 
2590 	*num_dumped_reg_entries = 0;
2591 
2592 	while (input_offset < input_regs_arr.size_in_dwords) {
2593 		const struct dbg_dump_cond_hdr *cond_hdr =
2594 		    (const struct dbg_dump_cond_hdr *)
2595 		    &input_regs_arr.ptr[input_offset++];
2596 		u16 modes_buf_offset;
2597 		bool eval_mode;
2598 
2599 		/* Check mode/block */
2600 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2601 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2602 		if (eval_mode) {
2603 			modes_buf_offset =
2604 				GET_FIELD(cond_hdr->mode.data,
2605 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2606 			mode_match = qed_is_mode_match(p_hwfn,
2607 						       &modes_buf_offset);
2608 		}
2609 
2610 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2611 			input_offset += cond_hdr->data_size;
2612 			continue;
2613 		}
2614 
2615 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2616 			const struct dbg_dump_reg *reg =
2617 			    (const struct dbg_dump_reg *)
2618 			    &input_regs_arr.ptr[input_offset];
2619 			u32 addr, len;
2620 			bool wide_bus;
2621 
2622 			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2623 			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2624 			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2625 			offset += qed_grc_dump_reg_entry(p_hwfn,
2626 							 p_ptt,
2627 							 dump_buf + offset,
2628 							 dump,
2629 							 addr,
2630 							 len,
2631 							 wide_bus);
2632 			(*num_dumped_reg_entries)++;
2633 		}
2634 	}
2635 
2636 	return offset;
2637 }
2638 
2639 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2640 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2641 				   struct qed_ptt *p_ptt,
2642 				   struct dbg_array input_regs_arr,
2643 				   u32 *dump_buf,
2644 				   bool dump,
2645 				   bool block_enable[MAX_BLOCK_ID],
2646 				   const char *split_type_name,
2647 				   u32 split_id,
2648 				   const char *param_name,
2649 				   const char *param_val)
2650 {
2651 	u32 num_dumped_reg_entries, offset;
2652 
2653 	/* Calculate register dump header size (and skip it for now) */
2654 	offset = qed_grc_dump_regs_hdr(dump_buf,
2655 				       false,
2656 				       0,
2657 				       split_type_name,
2658 				       split_id, param_name, param_val);
2659 
2660 	/* Dump registers */
2661 	offset += qed_grc_dump_regs_entries(p_hwfn,
2662 					    p_ptt,
2663 					    input_regs_arr,
2664 					    dump_buf + offset,
2665 					    dump,
2666 					    block_enable,
2667 					    &num_dumped_reg_entries);
2668 
2669 	/* Write register dump header */
2670 	if (dump && num_dumped_reg_entries > 0)
2671 		qed_grc_dump_regs_hdr(dump_buf,
2672 				      dump,
2673 				      num_dumped_reg_entries,
2674 				      split_type_name,
2675 				      split_id, param_name, param_val);
2676 
2677 	return num_dumped_reg_entries > 0 ? offset : 0;
2678 }
2679 
2680 /* Dumps registers according to the input registers array. Returns the dumped
2681  * size in dwords.
2682  */
2683 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2684 				  struct qed_ptt *p_ptt,
2685 				  u32 *dump_buf,
2686 				  bool dump,
2687 				  bool block_enable[MAX_BLOCK_ID],
2688 				  const char *param_name, const char *param_val)
2689 {
2690 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2691 	struct chip_platform_defs *chip_platform;
2692 	u32 offset = 0, input_offset = 0;
2693 	struct chip_defs *chip;
2694 	u8 port_id, pf_id, vf_id;
2695 	u16 fid;
2696 
2697 	chip = &s_chip_defs[dev_data->chip_id];
2698 	chip_platform = &chip->per_platform[dev_data->platform_id];
2699 
2700 	while (input_offset <
2701 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2702 		const struct dbg_dump_split_hdr *split_hdr;
2703 		struct dbg_array curr_input_regs_arr;
2704 		u32 split_data_size;
2705 		u8 split_type_id;
2706 
2707 		split_hdr =
2708 			(const struct dbg_dump_split_hdr *)
2709 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2710 		split_type_id =
2711 			GET_FIELD(split_hdr->hdr,
2712 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2713 		split_data_size =
2714 			GET_FIELD(split_hdr->hdr,
2715 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2716 		curr_input_regs_arr.ptr =
2717 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
2718 		curr_input_regs_arr.size_in_dwords = split_data_size;
2719 
2720 		switch (split_type_id) {
2721 		case SPLIT_TYPE_NONE:
2722 			offset += qed_grc_dump_split_data(p_hwfn,
2723 							  p_ptt,
2724 							  curr_input_regs_arr,
2725 							  dump_buf + offset,
2726 							  dump,
2727 							  block_enable,
2728 							  "eng",
2729 							  (u32)(-1),
2730 							  param_name,
2731 							  param_val);
2732 			break;
2733 
2734 		case SPLIT_TYPE_PORT:
2735 			for (port_id = 0; port_id < chip_platform->num_ports;
2736 			     port_id++) {
2737 				if (dump)
2738 					qed_port_pretend(p_hwfn, p_ptt,
2739 							 port_id);
2740 				offset +=
2741 				    qed_grc_dump_split_data(p_hwfn, p_ptt,
2742 							    curr_input_regs_arr,
2743 							    dump_buf + offset,
2744 							    dump, block_enable,
2745 							    "port", port_id,
2746 							    param_name,
2747 							    param_val);
2748 			}
2749 			break;
2750 
2751 		case SPLIT_TYPE_PF:
2752 		case SPLIT_TYPE_PORT_PF:
2753 			for (pf_id = 0; pf_id < chip_platform->num_pfs;
2754 			     pf_id++) {
2755 				u8 pfid_shift =
2756 					PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2757 
2758 				if (dump) {
2759 					fid = pf_id << pfid_shift;
2760 					qed_fid_pretend(p_hwfn, p_ptt, fid);
2761 				}
2762 
2763 				offset +=
2764 				    qed_grc_dump_split_data(p_hwfn,
2765 							    p_ptt,
2766 							    curr_input_regs_arr,
2767 							    dump_buf + offset,
2768 							    dump,
2769 							    block_enable,
2770 							    "pf",
2771 							    pf_id,
2772 							    param_name,
2773 							    param_val);
2774 			}
2775 			break;
2776 
2777 		case SPLIT_TYPE_VF:
2778 			for (vf_id = 0; vf_id < chip_platform->num_vfs;
2779 			     vf_id++) {
2780 				u8 vfvalid_shift =
2781 					PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
2782 				u8 vfid_shift =
2783 					PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
2784 
2785 				if (dump) {
2786 					fid = BIT(vfvalid_shift) |
2787 					      (vf_id << vfid_shift);
2788 					qed_fid_pretend(p_hwfn, p_ptt, fid);
2789 				}
2790 
2791 				offset +=
2792 				    qed_grc_dump_split_data(p_hwfn, p_ptt,
2793 							    curr_input_regs_arr,
2794 							    dump_buf + offset,
2795 							    dump, block_enable,
2796 							    "vf", vf_id,
2797 							    param_name,
2798 							    param_val);
2799 			}
2800 			break;
2801 
2802 		default:
2803 			break;
2804 		}
2805 
2806 		input_offset += split_data_size;
2807 	}
2808 
2809 	/* Pretend to original PF */
2810 	if (dump) {
2811 		fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2812 		qed_fid_pretend(p_hwfn, p_ptt, fid);
2813 	}
2814 
2815 	return offset;
2816 }
2817 
2818 /* Dump reset registers. Returns the dumped size in dwords. */
2819 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2820 				   struct qed_ptt *p_ptt,
2821 				   u32 *dump_buf, bool dump)
2822 {
2823 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2824 	u32 i, offset = 0, num_regs = 0;
2825 
2826 	/* Calculate header size */
2827 	offset += qed_grc_dump_regs_hdr(dump_buf,
2828 					false, 0, "eng", -1, NULL, NULL);
2829 
2830 	/* Write reset registers */
2831 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2832 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2833 			continue;
2834 
2835 		offset += qed_grc_dump_reg_entry(p_hwfn,
2836 						 p_ptt,
2837 						 dump_buf + offset,
2838 						 dump,
2839 						 BYTES_TO_DWORDS
2840 						 (s_reset_regs_defs[i].addr), 1,
2841 						 false);
2842 		num_regs++;
2843 	}
2844 
2845 	/* Write header */
2846 	if (dump)
2847 		qed_grc_dump_regs_hdr(dump_buf,
2848 				      true, num_regs, "eng", -1, NULL, NULL);
2849 
2850 	return offset;
2851 }
2852 
2853 /* Dump registers that are modified during GRC Dump and therefore must be
2854  * dumped first. Returns the dumped size in dwords.
2855  */
2856 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2857 				      struct qed_ptt *p_ptt,
2858 				      u32 *dump_buf, bool dump)
2859 {
2860 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2861 	u32 block_id, offset = 0, num_reg_entries = 0;
2862 	const struct dbg_attn_reg *attn_reg_arr;
2863 	u8 storm_id, reg_idx, num_attn_regs;
2864 
2865 	/* Calculate header size */
2866 	offset += qed_grc_dump_regs_hdr(dump_buf,
2867 					false, 0, "eng", -1, NULL, NULL);
2868 
2869 	/* Write parity registers */
2870 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2871 		if (dev_data->block_in_reset[block_id] && dump)
2872 			continue;
2873 
2874 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2875 						       ATTN_TYPE_PARITY,
2876 						       &num_attn_regs);
2877 
2878 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2879 			const struct dbg_attn_reg *reg_data =
2880 				&attn_reg_arr[reg_idx];
2881 			u16 modes_buf_offset;
2882 			bool eval_mode;
2883 			u32 addr;
2884 
2885 			/* Check mode */
2886 			eval_mode = GET_FIELD(reg_data->mode.data,
2887 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2888 			modes_buf_offset =
2889 				GET_FIELD(reg_data->mode.data,
2890 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2891 			if (eval_mode &&
2892 			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2893 				continue;
2894 
2895 			/* Mode match: read & dump registers */
2896 			addr = reg_data->mask_address;
2897 			offset += qed_grc_dump_reg_entry(p_hwfn,
2898 							 p_ptt,
2899 							 dump_buf + offset,
2900 							 dump,
2901 							 addr,
2902 							 1, false);
2903 			addr = GET_FIELD(reg_data->data,
2904 					 DBG_ATTN_REG_STS_ADDRESS);
2905 			offset += qed_grc_dump_reg_entry(p_hwfn,
2906 							 p_ptt,
2907 							 dump_buf + offset,
2908 							 dump,
2909 							 addr,
2910 							 1, false);
2911 			num_reg_entries += 2;
2912 		}
2913 	}
2914 
2915 	/* Write Storm stall status registers */
2916 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2917 		struct storm_defs *storm = &s_storm_defs[storm_id];
2918 		u32 addr;
2919 
2920 		if (dev_data->block_in_reset[storm->block_id] && dump)
2921 			continue;
2922 
2923 		addr =
2924 		    BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2925 				    SEM_FAST_REG_STALLED);
2926 		offset += qed_grc_dump_reg_entry(p_hwfn,
2927 						 p_ptt,
2928 						 dump_buf + offset,
2929 						 dump,
2930 						 addr,
2931 						 1,
2932 						 false);
2933 		num_reg_entries++;
2934 	}
2935 
2936 	/* Write header */
2937 	if (dump)
2938 		qed_grc_dump_regs_hdr(dump_buf,
2939 				      true,
2940 				      num_reg_entries, "eng", -1, NULL, NULL);
2941 
2942 	return offset;
2943 }
2944 
2945 /* Dumps registers that can't be represented in the debug arrays */
2946 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2947 				     struct qed_ptt *p_ptt,
2948 				     u32 *dump_buf, bool dump)
2949 {
2950 	u32 offset = 0, addr;
2951 
2952 	offset += qed_grc_dump_regs_hdr(dump_buf,
2953 					dump, 2, "eng", -1, NULL, NULL);
2954 
2955 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2956 	 * skipped).
2957 	 */
2958 	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2959 	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2960 					      p_ptt,
2961 					      dump_buf + offset,
2962 					      dump,
2963 					      addr,
2964 					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2965 					      7,
2966 					      1);
2967 	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2968 	offset +=
2969 	    qed_grc_dump_reg_entry_skip(p_hwfn,
2970 					p_ptt,
2971 					dump_buf + offset,
2972 					dump,
2973 					addr,
2974 					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2975 					7,
2976 					1);
2977 
2978 	return offset;
2979 }
2980 
2981 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2982  * dwords. The following parameters are dumped:
2983  * - name:	   dumped only if it's not NULL.
2984  * - addr:	   in dwords, dumped only if name is NULL.
2985  * - len:	   in dwords, always dumped.
2986  * - width:	   dumped if it's not zero.
2987  * - packed:	   dumped only if it's not false.
2988  * - mem_group:	   always dumped.
2989  * - is_storm:	   true only if the memory is related to a Storm.
2990  * - storm_letter: valid only if is_storm is true.
2991  *
2992  */
2993 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2994 				u32 *dump_buf,
2995 				bool dump,
2996 				const char *name,
2997 				u32 addr,
2998 				u32 len,
2999 				u32 bit_width,
3000 				bool packed,
3001 				const char *mem_group,
3002 				bool is_storm, char storm_letter)
3003 {
3004 	u8 num_params = 3;
3005 	u32 offset = 0;
3006 	char buf[64];
3007 
3008 	if (!len)
3009 		DP_NOTICE(p_hwfn,
3010 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3011 
3012 	if (bit_width)
3013 		num_params++;
3014 	if (packed)
3015 		num_params++;
3016 
3017 	/* Dump section header */
3018 	offset += qed_dump_section_hdr(dump_buf + offset,
3019 				       dump, "grc_mem", num_params);
3020 
3021 	if (name) {
3022 		/* Dump name */
3023 		if (is_storm) {
3024 			strcpy(buf, "?STORM_");
3025 			buf[0] = storm_letter;
3026 			strcpy(buf + strlen(buf), name);
3027 		} else {
3028 			strcpy(buf, name);
3029 		}
3030 
3031 		offset += qed_dump_str_param(dump_buf + offset,
3032 					     dump, "name", buf);
3033 	} else {
3034 		/* Dump address */
3035 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3036 
3037 		offset += qed_dump_num_param(dump_buf + offset,
3038 					     dump, "addr", addr_in_bytes);
3039 	}
3040 
3041 	/* Dump len */
3042 	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
3043 
3044 	/* Dump bit width */
3045 	if (bit_width)
3046 		offset += qed_dump_num_param(dump_buf + offset,
3047 					     dump, "width", bit_width);
3048 
3049 	/* Dump packed */
3050 	if (packed)
3051 		offset += qed_dump_num_param(dump_buf + offset,
3052 					     dump, "packed", 1);
3053 
3054 	/* Dump reg type */
3055 	if (is_storm) {
3056 		strcpy(buf, "?STORM_");
3057 		buf[0] = storm_letter;
3058 		strcpy(buf + strlen(buf), mem_group);
3059 	} else {
3060 		strcpy(buf, mem_group);
3061 	}
3062 
3063 	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
3064 
3065 	return offset;
3066 }
3067 
3068 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
3069  * Returns the dumped size in dwords.
3070  * The addr and len arguments are specified in dwords.
3071  */
3072 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
3073 			    struct qed_ptt *p_ptt,
3074 			    u32 *dump_buf,
3075 			    bool dump,
3076 			    const char *name,
3077 			    u32 addr,
3078 			    u32 len,
3079 			    bool wide_bus,
3080 			    u32 bit_width,
3081 			    bool packed,
3082 			    const char *mem_group,
3083 			    bool is_storm, char storm_letter)
3084 {
3085 	u32 offset = 0;
3086 
3087 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3088 				       dump_buf + offset,
3089 				       dump,
3090 				       name,
3091 				       addr,
3092 				       len,
3093 				       bit_width,
3094 				       packed,
3095 				       mem_group, is_storm, storm_letter);
3096 	offset += qed_grc_dump_addr_range(p_hwfn,
3097 					  p_ptt,
3098 					  dump_buf + offset,
3099 					  dump, addr, len, wide_bus);
3100 
3101 	return offset;
3102 }
3103 
3104 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3105 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
3106 				    struct qed_ptt *p_ptt,
3107 				    struct dbg_array input_mems_arr,
3108 				    u32 *dump_buf, bool dump)
3109 {
3110 	u32 i, offset = 0, input_offset = 0;
3111 	bool mode_match = true;
3112 
3113 	while (input_offset < input_mems_arr.size_in_dwords) {
3114 		const struct dbg_dump_cond_hdr *cond_hdr;
3115 		u16 modes_buf_offset;
3116 		u32 num_entries;
3117 		bool eval_mode;
3118 
3119 		cond_hdr = (const struct dbg_dump_cond_hdr *)
3120 			   &input_mems_arr.ptr[input_offset++];
3121 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3122 
3123 		/* Check required mode */
3124 		eval_mode = GET_FIELD(cond_hdr->mode.data,
3125 				      DBG_MODE_HDR_EVAL_MODE) > 0;
3126 		if (eval_mode) {
3127 			modes_buf_offset =
3128 				GET_FIELD(cond_hdr->mode.data,
3129 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
3130 			mode_match = qed_is_mode_match(p_hwfn,
3131 						       &modes_buf_offset);
3132 		}
3133 
3134 		if (!mode_match) {
3135 			input_offset += cond_hdr->data_size;
3136 			continue;
3137 		}
3138 
3139 		for (i = 0; i < num_entries;
3140 		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3141 			const struct dbg_dump_mem *mem =
3142 				(const struct dbg_dump_mem *)
3143 				&input_mems_arr.ptr[input_offset];
3144 			u8 mem_group_id = GET_FIELD(mem->dword0,
3145 						    DBG_DUMP_MEM_MEM_GROUP_ID);
3146 			bool is_storm = false, mem_wide_bus;
3147 			enum dbg_grc_params grc_param;
3148 			char storm_letter = 'a';
3149 			enum block_id block_id;
3150 			u32 mem_addr, mem_len;
3151 
3152 			if (mem_group_id >= MEM_GROUPS_NUM) {
3153 				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
3154 				return 0;
3155 			}
3156 
3157 			block_id = (enum block_id)cond_hdr->block_id;
3158 			if (!qed_grc_is_mem_included(p_hwfn,
3159 						     block_id,
3160 						     mem_group_id))
3161 				continue;
3162 
3163 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3164 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3165 			mem_wide_bus = GET_FIELD(mem->dword1,
3166 						 DBG_DUMP_MEM_WIDE_BUS);
3167 
3168 			/* Update memory length for CCFC/TCFC memories
3169 			 * according to number of LCIDs/LTIDs.
3170 			 */
3171 			if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3172 				if (mem_len % MAX_LCIDS) {
3173 					DP_NOTICE(p_hwfn,
3174 						  "Invalid CCFC connection memory size\n");
3175 					return 0;
3176 				}
3177 
3178 				grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3179 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3180 					  (mem_len / MAX_LCIDS);
3181 			} else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3182 				if (mem_len % MAX_LTIDS) {
3183 					DP_NOTICE(p_hwfn,
3184 						  "Invalid TCFC task memory size\n");
3185 					return 0;
3186 				}
3187 
3188 				grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3189 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3190 					  (mem_len / MAX_LTIDS);
3191 			}
3192 
3193 			/* If memory is associated with Storm, update Storm
3194 			 * details.
3195 			 */
3196 			if (s_block_defs
3197 			    [cond_hdr->block_id]->associated_to_storm) {
3198 				is_storm = true;
3199 				storm_letter =
3200 				    s_storm_defs[s_block_defs
3201 						 [cond_hdr->block_id]->
3202 						 storm_id].letter;
3203 			}
3204 
3205 			/* Dump memory */
3206 			offset += qed_grc_dump_mem(p_hwfn,
3207 						p_ptt,
3208 						dump_buf + offset,
3209 						dump,
3210 						NULL,
3211 						mem_addr,
3212 						mem_len,
3213 						mem_wide_bus,
3214 						0,
3215 						false,
3216 						s_mem_group_names[mem_group_id],
3217 						is_storm,
3218 						storm_letter);
3219 		}
3220 	}
3221 
3222 	return offset;
3223 }
3224 
3225 /* Dumps GRC memories according to the input array dump_mem.
3226  * Returns the dumped size in dwords.
3227  */
3228 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
3229 				 struct qed_ptt *p_ptt,
3230 				 u32 *dump_buf, bool dump)
3231 {
3232 	u32 offset = 0, input_offset = 0;
3233 
3234 	while (input_offset <
3235 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3236 		const struct dbg_dump_split_hdr *split_hdr;
3237 		struct dbg_array curr_input_mems_arr;
3238 		u32 split_data_size;
3239 		u8 split_type_id;
3240 
3241 		split_hdr = (const struct dbg_dump_split_hdr *)
3242 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3243 		split_type_id =
3244 			GET_FIELD(split_hdr->hdr,
3245 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3246 		split_data_size =
3247 			GET_FIELD(split_hdr->hdr,
3248 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3249 		curr_input_mems_arr.ptr =
3250 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3251 		curr_input_mems_arr.size_in_dwords = split_data_size;
3252 
3253 		switch (split_type_id) {
3254 		case SPLIT_TYPE_NONE:
3255 			offset += qed_grc_dump_mem_entries(p_hwfn,
3256 							   p_ptt,
3257 							   curr_input_mems_arr,
3258 							   dump_buf + offset,
3259 							   dump);
3260 			break;
3261 
3262 		default:
3263 			DP_NOTICE(p_hwfn,
3264 				  "Dumping split memories is currently not supported\n");
3265 			break;
3266 		}
3267 
3268 		input_offset += split_data_size;
3269 	}
3270 
3271 	return offset;
3272 }
3273 
3274 /* Dumps GRC context data for the specified Storm.
3275  * Returns the dumped size in dwords.
3276  * The lid_size argument is specified in quad-regs.
3277  */
3278 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
3279 				 struct qed_ptt *p_ptt,
3280 				 u32 *dump_buf,
3281 				 bool dump,
3282 				 const char *name,
3283 				 u32 num_lids,
3284 				 u32 lid_size,
3285 				 u32 rd_reg_addr,
3286 				 u8 storm_id)
3287 {
3288 	struct storm_defs *storm = &s_storm_defs[storm_id];
3289 	u32 i, lid, total_size, offset = 0;
3290 
3291 	if (!lid_size)
3292 		return 0;
3293 
3294 	lid_size *= BYTES_IN_DWORD;
3295 	total_size = num_lids * lid_size;
3296 
3297 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3298 				       dump_buf + offset,
3299 				       dump,
3300 				       name,
3301 				       0,
3302 				       total_size,
3303 				       lid_size * 32,
3304 				       false, name, true, storm->letter);
3305 
3306 	if (!dump)
3307 		return offset + total_size;
3308 
3309 	/* Dump context data */
3310 	for (lid = 0; lid < num_lids; lid++) {
3311 		for (i = 0; i < lid_size; i++, offset++) {
3312 			qed_wr(p_hwfn,
3313 			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3314 			*(dump_buf + offset) = qed_rd(p_hwfn,
3315 						      p_ptt, rd_reg_addr);
3316 		}
3317 	}
3318 
3319 	return offset;
3320 }
3321 
3322 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3323 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
3324 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3325 {
3326 	enum dbg_grc_params grc_param;
3327 	u32 offset = 0;
3328 	u8 storm_id;
3329 
3330 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3331 		struct storm_defs *storm = &s_storm_defs[storm_id];
3332 
3333 		if (!qed_grc_is_storm_included(p_hwfn,
3334 					       (enum dbg_storms)storm_id))
3335 			continue;
3336 
3337 		/* Dump Conn AG context size */
3338 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3339 		offset +=
3340 			qed_grc_dump_ctx_data(p_hwfn,
3341 					      p_ptt,
3342 					      dump_buf + offset,
3343 					      dump,
3344 					      "CONN_AG_CTX",
3345 					      qed_grc_get_param(p_hwfn,
3346 								grc_param),
3347 					      storm->cm_conn_ag_ctx_lid_size,
3348 					      storm->cm_conn_ag_ctx_rd_addr,
3349 					      storm_id);
3350 
3351 		/* Dump Conn ST context size */
3352 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3353 		offset +=
3354 			qed_grc_dump_ctx_data(p_hwfn,
3355 					      p_ptt,
3356 					      dump_buf + offset,
3357 					      dump,
3358 					      "CONN_ST_CTX",
3359 					      qed_grc_get_param(p_hwfn,
3360 								grc_param),
3361 					      storm->cm_conn_st_ctx_lid_size,
3362 					      storm->cm_conn_st_ctx_rd_addr,
3363 					      storm_id);
3364 
3365 		/* Dump Task AG context size */
3366 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3367 		offset +=
3368 			qed_grc_dump_ctx_data(p_hwfn,
3369 					      p_ptt,
3370 					      dump_buf + offset,
3371 					      dump,
3372 					      "TASK_AG_CTX",
3373 					      qed_grc_get_param(p_hwfn,
3374 								grc_param),
3375 					      storm->cm_task_ag_ctx_lid_size,
3376 					      storm->cm_task_ag_ctx_rd_addr,
3377 					      storm_id);
3378 
3379 		/* Dump Task ST context size */
3380 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3381 		offset +=
3382 			qed_grc_dump_ctx_data(p_hwfn,
3383 					      p_ptt,
3384 					      dump_buf + offset,
3385 					      dump,
3386 					      "TASK_ST_CTX",
3387 					      qed_grc_get_param(p_hwfn,
3388 								grc_param),
3389 					      storm->cm_task_st_ctx_lid_size,
3390 					      storm->cm_task_st_ctx_rd_addr,
3391 					      storm_id);
3392 	}
3393 
3394 	return offset;
3395 }
3396 
3397 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3398 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3399 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3400 {
3401 	char buf[10] = "IOR_SET_?";
3402 	u32 addr, offset = 0;
3403 	u8 storm_id, set_id;
3404 
3405 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3406 		struct storm_defs *storm = &s_storm_defs[storm_id];
3407 
3408 		if (!qed_grc_is_storm_included(p_hwfn,
3409 					       (enum dbg_storms)storm_id))
3410 			continue;
3411 
3412 		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3413 			addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
3414 					       SEM_FAST_REG_STORM_REG_FILE) +
3415 			       IOR_SET_OFFSET(set_id);
3416 			buf[strlen(buf) - 1] = '0' + set_id;
3417 			offset += qed_grc_dump_mem(p_hwfn,
3418 						   p_ptt,
3419 						   dump_buf + offset,
3420 						   dump,
3421 						   buf,
3422 						   addr,
3423 						   IORS_PER_SET,
3424 						   false,
3425 						   32,
3426 						   false,
3427 						   "ior",
3428 						   true,
3429 						   storm->letter);
3430 		}
3431 	}
3432 
3433 	return offset;
3434 }
3435 
3436 /* Dump VFC CAM. Returns the dumped size in dwords. */
3437 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3438 				struct qed_ptt *p_ptt,
3439 				u32 *dump_buf, bool dump, u8 storm_id)
3440 {
3441 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3442 	struct storm_defs *storm = &s_storm_defs[storm_id];
3443 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3444 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3445 	u32 row, i, offset = 0;
3446 
3447 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3448 				       dump_buf + offset,
3449 				       dump,
3450 				       "vfc_cam",
3451 				       0,
3452 				       total_size,
3453 				       256,
3454 				       false, "vfc_cam", true, storm->letter);
3455 
3456 	if (!dump)
3457 		return offset + total_size;
3458 
3459 	/* Prepare CAM address */
3460 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3461 
3462 	for (row = 0; row < VFC_CAM_NUM_ROWS;
3463 	     row++, offset += VFC_CAM_RESP_DWORDS) {
3464 		/* Write VFC CAM command */
3465 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3466 		ARR_REG_WR(p_hwfn,
3467 			   p_ptt,
3468 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3469 			   cam_cmd, VFC_CAM_CMD_DWORDS);
3470 
3471 		/* Write VFC CAM address */
3472 		ARR_REG_WR(p_hwfn,
3473 			   p_ptt,
3474 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3475 			   cam_addr, VFC_CAM_ADDR_DWORDS);
3476 
3477 		/* Read VFC CAM read response */
3478 		ARR_REG_RD(p_hwfn,
3479 			   p_ptt,
3480 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3481 			   dump_buf + offset, VFC_CAM_RESP_DWORDS);
3482 	}
3483 
3484 	return offset;
3485 }
3486 
3487 /* Dump VFC RAM. Returns the dumped size in dwords. */
3488 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3489 				struct qed_ptt *p_ptt,
3490 				u32 *dump_buf,
3491 				bool dump,
3492 				u8 storm_id, struct vfc_ram_defs *ram_defs)
3493 {
3494 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3495 	struct storm_defs *storm = &s_storm_defs[storm_id];
3496 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3497 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3498 	u32 row, i, offset = 0;
3499 
3500 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3501 				       dump_buf + offset,
3502 				       dump,
3503 				       ram_defs->mem_name,
3504 				       0,
3505 				       total_size,
3506 				       256,
3507 				       false,
3508 				       ram_defs->type_name,
3509 				       true, storm->letter);
3510 
3511 	/* Prepare RAM address */
3512 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3513 
3514 	if (!dump)
3515 		return offset + total_size;
3516 
3517 	for (row = ram_defs->base_row;
3518 	     row < ram_defs->base_row + ram_defs->num_rows;
3519 	     row++, offset += VFC_RAM_RESP_DWORDS) {
3520 		/* Write VFC RAM command */
3521 		ARR_REG_WR(p_hwfn,
3522 			   p_ptt,
3523 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3524 			   ram_cmd, VFC_RAM_CMD_DWORDS);
3525 
3526 		/* Write VFC RAM address */
3527 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3528 		ARR_REG_WR(p_hwfn,
3529 			   p_ptt,
3530 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3531 			   ram_addr, VFC_RAM_ADDR_DWORDS);
3532 
3533 		/* Read VFC RAM read response */
3534 		ARR_REG_RD(p_hwfn,
3535 			   p_ptt,
3536 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3537 			   dump_buf + offset, VFC_RAM_RESP_DWORDS);
3538 	}
3539 
3540 	return offset;
3541 }
3542 
3543 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3544 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3545 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3546 {
3547 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3548 	u8 storm_id, i;
3549 	u32 offset = 0;
3550 
3551 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3552 		if (!qed_grc_is_storm_included(p_hwfn,
3553 					       (enum dbg_storms)storm_id) ||
3554 		    !s_storm_defs[storm_id].has_vfc ||
3555 		    (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
3556 		     PLATFORM_ASIC))
3557 			continue;
3558 
3559 		/* Read CAM */
3560 		offset += qed_grc_dump_vfc_cam(p_hwfn,
3561 					       p_ptt,
3562 					       dump_buf + offset,
3563 					       dump, storm_id);
3564 
3565 		/* Read RAM */
3566 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3567 			offset += qed_grc_dump_vfc_ram(p_hwfn,
3568 						       p_ptt,
3569 						       dump_buf + offset,
3570 						       dump,
3571 						       storm_id,
3572 						       &s_vfc_ram_defs[i]);
3573 	}
3574 
3575 	return offset;
3576 }
3577 
3578 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3579 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3580 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3581 {
3582 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3583 	u32 offset = 0;
3584 	u8 rss_mem_id;
3585 
3586 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3587 		u32 rss_addr, num_entries, total_dwords;
3588 		struct rss_mem_defs *rss_defs;
3589 		u32 addr, num_dwords_to_read;
3590 		bool packed;
3591 
3592 		rss_defs = &s_rss_mem_defs[rss_mem_id];
3593 		rss_addr = rss_defs->addr;
3594 		num_entries = rss_defs->num_entries[dev_data->chip_id];
3595 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3596 		packed = (rss_defs->entry_width == 16);
3597 
3598 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3599 					       dump_buf + offset,
3600 					       dump,
3601 					       rss_defs->mem_name,
3602 					       0,
3603 					       total_dwords,
3604 					       rss_defs->entry_width,
3605 					       packed,
3606 					       rss_defs->type_name, false, 0);
3607 
3608 		/* Dump RSS data */
3609 		if (!dump) {
3610 			offset += total_dwords;
3611 			continue;
3612 		}
3613 
3614 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3615 		while (total_dwords) {
3616 			num_dwords_to_read = min_t(u32,
3617 						   RSS_REG_RSS_RAM_DATA_SIZE,
3618 						   total_dwords);
3619 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3620 			offset += qed_grc_dump_addr_range(p_hwfn,
3621 							  p_ptt,
3622 							  dump_buf + offset,
3623 							  dump,
3624 							  addr,
3625 							  num_dwords_to_read,
3626 							  false);
3627 			total_dwords -= num_dwords_to_read;
3628 			rss_addr++;
3629 		}
3630 	}
3631 
3632 	return offset;
3633 }
3634 
3635 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3636 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3637 				struct qed_ptt *p_ptt,
3638 				u32 *dump_buf, bool dump, u8 big_ram_id)
3639 {
3640 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3641 	u32 block_size, ram_size, offset = 0, reg_val, i;
3642 	char mem_name[12] = "???_BIG_RAM";
3643 	char type_name[8] = "???_RAM";
3644 	struct big_ram_defs *big_ram;
3645 
3646 	big_ram = &s_big_ram_defs[big_ram_id];
3647 	ram_size = big_ram->ram_size[dev_data->chip_id];
3648 
3649 	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3650 	block_size = reg_val &
3651 		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3652 									 : 128;
3653 
3654 	strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3655 	strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3656 
3657 	/* Dump memory header */
3658 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3659 				       dump_buf + offset,
3660 				       dump,
3661 				       mem_name,
3662 				       0,
3663 				       ram_size,
3664 				       block_size * 8,
3665 				       false, type_name, false, 0);
3666 
3667 	/* Read and dump Big RAM data */
3668 	if (!dump)
3669 		return offset + ram_size;
3670 
3671 	/* Dump Big RAM */
3672 	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3673 	     i++) {
3674 		u32 addr, len;
3675 
3676 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3677 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3678 		len = BRB_REG_BIG_RAM_DATA_SIZE;
3679 		offset += qed_grc_dump_addr_range(p_hwfn,
3680 						  p_ptt,
3681 						  dump_buf + offset,
3682 						  dump,
3683 						  addr,
3684 						  len,
3685 						  false);
3686 	}
3687 
3688 	return offset;
3689 }
3690 
3691 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3692 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3693 {
3694 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3695 	u32 offset = 0, addr;
3696 	bool halted = false;
3697 
3698 	/* Halt MCP */
3699 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3700 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3701 		if (!halted)
3702 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3703 	}
3704 
3705 	/* Dump MCP scratchpad */
3706 	offset += qed_grc_dump_mem(p_hwfn,
3707 				   p_ptt,
3708 				   dump_buf + offset,
3709 				   dump,
3710 				   NULL,
3711 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3712 				   MCP_REG_SCRATCH_SIZE_BB_K2,
3713 				   false, 0, false, "MCP", false, 0);
3714 
3715 	/* Dump MCP cpu_reg_file */
3716 	offset += qed_grc_dump_mem(p_hwfn,
3717 				   p_ptt,
3718 				   dump_buf + offset,
3719 				   dump,
3720 				   NULL,
3721 				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3722 				   MCP_REG_CPU_REG_FILE_SIZE,
3723 				   false, 0, false, "MCP", false, 0);
3724 
3725 	/* Dump MCP registers */
3726 	block_enable[BLOCK_MCP] = true;
3727 	offset += qed_grc_dump_registers(p_hwfn,
3728 					 p_ptt,
3729 					 dump_buf + offset,
3730 					 dump, block_enable, "block", "MCP");
3731 
3732 	/* Dump required non-MCP registers */
3733 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3734 					dump, 1, "eng", -1, "block", "MCP");
3735 	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3736 	offset += qed_grc_dump_reg_entry(p_hwfn,
3737 					 p_ptt,
3738 					 dump_buf + offset,
3739 					 dump,
3740 					 addr,
3741 					 1,
3742 					 false);
3743 
3744 	/* Release MCP */
3745 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3746 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3747 
3748 	return offset;
3749 }
3750 
3751 /* Dumps the tbus indirect memory for all PHYs. */
3752 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3753 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3754 {
3755 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3756 	char mem_name[32];
3757 	u8 phy_id;
3758 
3759 	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3760 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3761 		struct phy_defs *phy_defs;
3762 		u8 *bytes_buf;
3763 
3764 		phy_defs = &s_phy_defs[phy_id];
3765 		addr_lo_addr = phy_defs->base_addr +
3766 			       phy_defs->tbus_addr_lo_addr;
3767 		addr_hi_addr = phy_defs->base_addr +
3768 			       phy_defs->tbus_addr_hi_addr;
3769 		data_lo_addr = phy_defs->base_addr +
3770 			       phy_defs->tbus_data_lo_addr;
3771 		data_hi_addr = phy_defs->base_addr +
3772 			       phy_defs->tbus_data_hi_addr;
3773 
3774 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3775 			     phy_defs->phy_name) < 0)
3776 			DP_NOTICE(p_hwfn,
3777 				  "Unexpected debug error: invalid PHY memory name\n");
3778 
3779 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3780 					       dump_buf + offset,
3781 					       dump,
3782 					       mem_name,
3783 					       0,
3784 					       PHY_DUMP_SIZE_DWORDS,
3785 					       16, true, mem_name, false, 0);
3786 
3787 		if (!dump) {
3788 			offset += PHY_DUMP_SIZE_DWORDS;
3789 			continue;
3790 		}
3791 
3792 		bytes_buf = (u8 *)(dump_buf + offset);
3793 		for (tbus_hi_offset = 0;
3794 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3795 		     tbus_hi_offset++) {
3796 			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3797 			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3798 			     tbus_lo_offset++) {
3799 				qed_wr(p_hwfn,
3800 				       p_ptt, addr_lo_addr, tbus_lo_offset);
3801 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3802 							    p_ptt,
3803 							    data_lo_addr);
3804 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3805 							    p_ptt,
3806 							    data_hi_addr);
3807 			}
3808 		}
3809 
3810 		offset += PHY_DUMP_SIZE_DWORDS;
3811 	}
3812 
3813 	return offset;
3814 }
3815 
3816 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3817 				struct qed_ptt *p_ptt,
3818 				enum block_id block_id,
3819 				u8 line_id,
3820 				u8 enable_mask,
3821 				u8 right_shift,
3822 				u8 force_valid_mask, u8 force_frame_mask)
3823 {
3824 	struct block_defs *block = s_block_defs[block_id];
3825 
3826 	qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3827 	qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3828 	qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3829 	qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3830 	qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3831 }
3832 
3833 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3834 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3835 				     struct qed_ptt *p_ptt,
3836 				     u32 *dump_buf, bool dump)
3837 {
3838 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3839 	u32 block_id, line_id, offset = 0;
3840 
3841 	/* Don't dump static debug if a debug bus recording is in progress */
3842 	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3843 		return 0;
3844 
3845 	if (dump) {
3846 		/* Disable all blocks debug output */
3847 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3848 			struct block_defs *block = s_block_defs[block_id];
3849 
3850 			if (block->dbg_client_id[dev_data->chip_id] !=
3851 			    MAX_DBG_BUS_CLIENTS)
3852 				qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
3853 				       0);
3854 		}
3855 
3856 		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3857 		qed_bus_set_framing_mode(p_hwfn,
3858 					 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3859 		qed_wr(p_hwfn,
3860 		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3861 		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3862 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3863 	}
3864 
3865 	/* Dump all static debug lines for each relevant block */
3866 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3867 		struct block_defs *block = s_block_defs[block_id];
3868 		struct dbg_bus_block *block_desc;
3869 		u32 block_dwords, addr, len;
3870 		u8 dbg_client_id;
3871 
3872 		if (block->dbg_client_id[dev_data->chip_id] ==
3873 		    MAX_DBG_BUS_CLIENTS)
3874 			continue;
3875 
3876 		block_desc = get_dbg_bus_block_desc(p_hwfn,
3877 						    (enum block_id)block_id);
3878 		block_dwords = NUM_DBG_LINES(block_desc) *
3879 			       STATIC_DEBUG_LINE_DWORDS;
3880 
3881 		/* Dump static section params */
3882 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3883 					       dump_buf + offset,
3884 					       dump,
3885 					       block->name,
3886 					       0,
3887 					       block_dwords,
3888 					       32, false, "STATIC", false, 0);
3889 
3890 		if (!dump) {
3891 			offset += block_dwords;
3892 			continue;
3893 		}
3894 
3895 		/* If all lines are invalid - dump zeros */
3896 		if (dev_data->block_in_reset[block_id]) {
3897 			memset(dump_buf + offset, 0,
3898 			       DWORDS_TO_BYTES(block_dwords));
3899 			offset += block_dwords;
3900 			continue;
3901 		}
3902 
3903 		/* Enable block's client */
3904 		dbg_client_id = block->dbg_client_id[dev_data->chip_id];
3905 		qed_bus_enable_clients(p_hwfn,
3906 				       p_ptt,
3907 				       BIT(dbg_client_id));
3908 
3909 		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3910 		len = STATIC_DEBUG_LINE_DWORDS;
3911 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
3912 		     line_id++) {
3913 			/* Configure debug line ID */
3914 			qed_config_dbg_line(p_hwfn,
3915 					    p_ptt,
3916 					    (enum block_id)block_id,
3917 					    (u8)line_id, 0xf, 0, 0, 0);
3918 
3919 			/* Read debug line info */
3920 			offset += qed_grc_dump_addr_range(p_hwfn,
3921 							  p_ptt,
3922 							  dump_buf + offset,
3923 							  dump,
3924 							  addr,
3925 							  len,
3926 							  true);
3927 		}
3928 
3929 		/* Disable block's client and debug output */
3930 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3931 		qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3932 	}
3933 
3934 	if (dump) {
3935 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3936 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3937 	}
3938 
3939 	return offset;
3940 }
3941 
3942 /* Performs GRC Dump to the specified buffer.
3943  * Returns the dumped size in dwords.
3944  */
3945 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3946 				    struct qed_ptt *p_ptt,
3947 				    u32 *dump_buf,
3948 				    bool dump, u32 *num_dumped_dwords)
3949 {
3950 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3951 	bool parities_masked = false;
3952 	u8 i, port_mode = 0;
3953 	u32 offset = 0;
3954 
3955 	*num_dumped_dwords = 0;
3956 
3957 	if (dump) {
3958 		/* Find port mode */
3959 		switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
3960 		case 0:
3961 			port_mode = 1;
3962 			break;
3963 		case 1:
3964 			port_mode = 2;
3965 			break;
3966 		case 2:
3967 			port_mode = 4;
3968 			break;
3969 		}
3970 
3971 		/* Update reset state */
3972 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3973 	}
3974 
3975 	/* Dump global params */
3976 	offset += qed_dump_common_global_params(p_hwfn,
3977 						p_ptt,
3978 						dump_buf + offset, dump, 4);
3979 	offset += qed_dump_str_param(dump_buf + offset,
3980 				     dump, "dump-type", "grc-dump");
3981 	offset += qed_dump_num_param(dump_buf + offset,
3982 				     dump,
3983 				     "num-lcids",
3984 				     qed_grc_get_param(p_hwfn,
3985 						DBG_GRC_PARAM_NUM_LCIDS));
3986 	offset += qed_dump_num_param(dump_buf + offset,
3987 				     dump,
3988 				     "num-ltids",
3989 				     qed_grc_get_param(p_hwfn,
3990 						DBG_GRC_PARAM_NUM_LTIDS));
3991 	offset += qed_dump_num_param(dump_buf + offset,
3992 				     dump, "num-ports", port_mode);
3993 
3994 	/* Dump reset registers (dumped before taking blocks out of reset ) */
3995 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3996 		offset += qed_grc_dump_reset_regs(p_hwfn,
3997 						  p_ptt,
3998 						  dump_buf + offset, dump);
3999 
4000 	/* Take all blocks out of reset (using reset registers) */
4001 	if (dump) {
4002 		qed_grc_unreset_blocks(p_hwfn, p_ptt);
4003 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
4004 	}
4005 
4006 	/* Disable all parities using MFW command */
4007 	if (dump &&
4008 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4009 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
4010 		if (!parities_masked) {
4011 			DP_NOTICE(p_hwfn,
4012 				  "Failed to mask parities using MFW\n");
4013 			if (qed_grc_get_param
4014 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4015 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4016 		}
4017 	}
4018 
4019 	/* Dump modified registers (dumped before modifying them) */
4020 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4021 		offset += qed_grc_dump_modified_regs(p_hwfn,
4022 						     p_ptt,
4023 						     dump_buf + offset, dump);
4024 
4025 	/* Stall storms */
4026 	if (dump &&
4027 	    (qed_grc_is_included(p_hwfn,
4028 				 DBG_GRC_PARAM_DUMP_IOR) ||
4029 	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4030 		qed_grc_stall_storms(p_hwfn, p_ptt, true);
4031 
4032 	/* Dump all regs  */
4033 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4034 		bool block_enable[MAX_BLOCK_ID];
4035 
4036 		/* Dump all blocks except MCP */
4037 		for (i = 0; i < MAX_BLOCK_ID; i++)
4038 			block_enable[i] = true;
4039 		block_enable[BLOCK_MCP] = false;
4040 		offset += qed_grc_dump_registers(p_hwfn,
4041 						 p_ptt,
4042 						 dump_buf +
4043 						 offset,
4044 						 dump,
4045 						 block_enable, NULL, NULL);
4046 
4047 		/* Dump special registers */
4048 		offset += qed_grc_dump_special_regs(p_hwfn,
4049 						    p_ptt,
4050 						    dump_buf + offset, dump);
4051 	}
4052 
4053 	/* Dump memories */
4054 	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4055 
4056 	/* Dump MCP */
4057 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4058 		offset += qed_grc_dump_mcp(p_hwfn,
4059 					   p_ptt, dump_buf + offset, dump);
4060 
4061 	/* Dump context */
4062 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4063 		offset += qed_grc_dump_ctx(p_hwfn,
4064 					   p_ptt, dump_buf + offset, dump);
4065 
4066 	/* Dump RSS memories */
4067 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4068 		offset += qed_grc_dump_rss(p_hwfn,
4069 					   p_ptt, dump_buf + offset, dump);
4070 
4071 	/* Dump Big RAM */
4072 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4073 		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4074 			offset += qed_grc_dump_big_ram(p_hwfn,
4075 						       p_ptt,
4076 						       dump_buf + offset,
4077 						       dump, i);
4078 
4079 	/* Dump IORs */
4080 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4081 		offset += qed_grc_dump_iors(p_hwfn,
4082 					    p_ptt, dump_buf + offset, dump);
4083 
4084 	/* Dump VFC */
4085 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4086 		offset += qed_grc_dump_vfc(p_hwfn,
4087 					   p_ptt, dump_buf + offset, dump);
4088 
4089 	/* Dump PHY tbus */
4090 	if (qed_grc_is_included(p_hwfn,
4091 				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
4092 	    CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4093 		offset += qed_grc_dump_phy(p_hwfn,
4094 					   p_ptt, dump_buf + offset, dump);
4095 
4096 	/* Dump static debug data  */
4097 	if (qed_grc_is_included(p_hwfn,
4098 				DBG_GRC_PARAM_DUMP_STATIC) &&
4099 	    dev_data->bus.state == DBG_BUS_STATE_IDLE)
4100 		offset += qed_grc_dump_static_debug(p_hwfn,
4101 						    p_ptt,
4102 						    dump_buf + offset, dump);
4103 
4104 	/* Dump last section */
4105 	offset += qed_dump_last_section(dump_buf, offset, dump);
4106 
4107 	if (dump) {
4108 		/* Unstall storms */
4109 		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4110 			qed_grc_stall_storms(p_hwfn, p_ptt, false);
4111 
4112 		/* Clear parity status */
4113 		qed_grc_clear_all_prty(p_hwfn, p_ptt);
4114 
4115 		/* Enable all parities using MFW command */
4116 		if (parities_masked)
4117 			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
4118 	}
4119 
4120 	*num_dumped_dwords = offset;
4121 
4122 	return DBG_STATUS_OK;
4123 }
4124 
4125 /* Writes the specified failing Idle Check rule to the specified buffer.
4126  * Returns the dumped size in dwords.
4127  */
4128 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
4129 				     struct qed_ptt *p_ptt,
4130 				     u32 *
4131 				     dump_buf,
4132 				     bool dump,
4133 				     u16 rule_id,
4134 				     const struct dbg_idle_chk_rule *rule,
4135 				     u16 fail_entry_id, u32 *cond_reg_values)
4136 {
4137 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4138 	const struct dbg_idle_chk_cond_reg *cond_regs;
4139 	const struct dbg_idle_chk_info_reg *info_regs;
4140 	u32 i, next_reg_offset = 0, offset = 0;
4141 	struct dbg_idle_chk_result_hdr *hdr;
4142 	const union dbg_idle_chk_reg *regs;
4143 	u8 reg_id;
4144 
4145 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4146 	regs = &((const union dbg_idle_chk_reg *)
4147 		 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4148 	cond_regs = &regs[0].cond_reg;
4149 	info_regs = &regs[rule->num_cond_regs].info_reg;
4150 
4151 	/* Dump rule data */
4152 	if (dump) {
4153 		memset(hdr, 0, sizeof(*hdr));
4154 		hdr->rule_id = rule_id;
4155 		hdr->mem_entry_id = fail_entry_id;
4156 		hdr->severity = rule->severity;
4157 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
4158 	}
4159 
4160 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
4161 
4162 	/* Dump condition register values */
4163 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4164 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4165 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4166 
4167 		reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4168 			  (dump_buf + offset);
4169 
4170 		/* Write register header */
4171 		if (!dump) {
4172 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
4173 			    reg->entry_size;
4174 			continue;
4175 		}
4176 
4177 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4178 		memset(reg_hdr, 0, sizeof(*reg_hdr));
4179 		reg_hdr->start_entry = reg->start_entry;
4180 		reg_hdr->size = reg->entry_size;
4181 		SET_FIELD(reg_hdr->data,
4182 			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
4183 			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4184 		SET_FIELD(reg_hdr->data,
4185 			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4186 
4187 		/* Write register values */
4188 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4189 			dump_buf[offset] = cond_reg_values[next_reg_offset];
4190 	}
4191 
4192 	/* Dump info register values */
4193 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4194 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4195 		u32 block_id;
4196 
4197 		/* Check if register's block is in reset */
4198 		if (!dump) {
4199 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4200 			continue;
4201 		}
4202 
4203 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4204 		if (block_id >= MAX_BLOCK_ID) {
4205 			DP_NOTICE(p_hwfn, "Invalid block_id\n");
4206 			return 0;
4207 		}
4208 
4209 		if (!dev_data->block_in_reset[block_id]) {
4210 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4211 			bool wide_bus, eval_mode, mode_match = true;
4212 			u16 modes_buf_offset;
4213 			u32 addr;
4214 
4215 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4216 				  (dump_buf + offset);
4217 
4218 			/* Check mode */
4219 			eval_mode = GET_FIELD(reg->mode.data,
4220 					      DBG_MODE_HDR_EVAL_MODE) > 0;
4221 			if (eval_mode) {
4222 				modes_buf_offset =
4223 				    GET_FIELD(reg->mode.data,
4224 					      DBG_MODE_HDR_MODES_BUF_OFFSET);
4225 				mode_match =
4226 					qed_is_mode_match(p_hwfn,
4227 							  &modes_buf_offset);
4228 			}
4229 
4230 			if (!mode_match)
4231 				continue;
4232 
4233 			addr = GET_FIELD(reg->data,
4234 					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
4235 			wide_bus = GET_FIELD(reg->data,
4236 					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4237 
4238 			/* Write register header */
4239 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4240 			hdr->num_dumped_info_regs++;
4241 			memset(reg_hdr, 0, sizeof(*reg_hdr));
4242 			reg_hdr->size = reg->size;
4243 			SET_FIELD(reg_hdr->data,
4244 				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
4245 				  rule->num_cond_regs + reg_id);
4246 
4247 			/* Write register values */
4248 			offset += qed_grc_dump_addr_range(p_hwfn,
4249 							  p_ptt,
4250 							  dump_buf + offset,
4251 							  dump,
4252 							  addr,
4253 							  reg->size, wide_bus);
4254 		}
4255 	}
4256 
4257 	return offset;
4258 }
4259 
4260 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4261 static u32
4262 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4263 			       u32 *dump_buf, bool dump,
4264 			       const struct dbg_idle_chk_rule *input_rules,
4265 			       u32 num_input_rules, u32 *num_failing_rules)
4266 {
4267 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4268 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4269 	u32 i, offset = 0;
4270 	u16 entry_id;
4271 	u8 reg_id;
4272 
4273 	*num_failing_rules = 0;
4274 
4275 	for (i = 0; i < num_input_rules; i++) {
4276 		const struct dbg_idle_chk_cond_reg *cond_regs;
4277 		const struct dbg_idle_chk_rule *rule;
4278 		const union dbg_idle_chk_reg *regs;
4279 		u16 num_reg_entries = 1;
4280 		bool check_rule = true;
4281 		const u32 *imm_values;
4282 
4283 		rule = &input_rules[i];
4284 		regs = &((const union dbg_idle_chk_reg *)
4285 			 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
4286 			[rule->reg_offset];
4287 		cond_regs = &regs[0].cond_reg;
4288 		imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
4289 			     [rule->imm_offset];
4290 
4291 		/* Check if all condition register blocks are out of reset, and
4292 		 * find maximal number of entries (all condition registers that
4293 		 * are memories must have the same size, which is > 1).
4294 		 */
4295 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
4296 		     reg_id++) {
4297 			u32 block_id =
4298 				GET_FIELD(cond_regs[reg_id].data,
4299 					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4300 
4301 			if (block_id >= MAX_BLOCK_ID) {
4302 				DP_NOTICE(p_hwfn, "Invalid block_id\n");
4303 				return 0;
4304 			}
4305 
4306 			check_rule = !dev_data->block_in_reset[block_id];
4307 			if (cond_regs[reg_id].num_entries > num_reg_entries)
4308 				num_reg_entries = cond_regs[reg_id].num_entries;
4309 		}
4310 
4311 		if (!check_rule && dump)
4312 			continue;
4313 
4314 		if (!dump) {
4315 			u32 entry_dump_size =
4316 				qed_idle_chk_dump_failure(p_hwfn,
4317 							  p_ptt,
4318 							  dump_buf + offset,
4319 							  false,
4320 							  rule->rule_id,
4321 							  rule,
4322 							  0,
4323 							  NULL);
4324 
4325 			offset += num_reg_entries * entry_dump_size;
4326 			(*num_failing_rules) += num_reg_entries;
4327 			continue;
4328 		}
4329 
4330 		/* Go over all register entries (number of entries is the same
4331 		 * for all condition registers).
4332 		 */
4333 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4334 			u32 next_reg_offset = 0;
4335 
4336 			/* Read current entry of all condition registers */
4337 			for (reg_id = 0; reg_id < rule->num_cond_regs;
4338 			     reg_id++) {
4339 				const struct dbg_idle_chk_cond_reg *reg =
4340 					&cond_regs[reg_id];
4341 				u32 padded_entry_size, addr;
4342 				bool wide_bus;
4343 
4344 				/* Find GRC address (if it's a memory, the
4345 				 * address of the specific entry is calculated).
4346 				 */
4347 				addr = GET_FIELD(reg->data,
4348 						 DBG_IDLE_CHK_COND_REG_ADDRESS);
4349 				wide_bus =
4350 				    GET_FIELD(reg->data,
4351 					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4352 				if (reg->num_entries > 1 ||
4353 				    reg->start_entry > 0) {
4354 					padded_entry_size =
4355 					   reg->entry_size > 1 ?
4356 					   roundup_pow_of_two(reg->entry_size) :
4357 					   1;
4358 					addr += (reg->start_entry + entry_id) *
4359 						padded_entry_size;
4360 				}
4361 
4362 				/* Read registers */
4363 				if (next_reg_offset + reg->entry_size >=
4364 				    IDLE_CHK_MAX_ENTRIES_SIZE) {
4365 					DP_NOTICE(p_hwfn,
4366 						  "idle check registers entry is too large\n");
4367 					return 0;
4368 				}
4369 
4370 				next_reg_offset +=
4371 				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4372 							    cond_reg_values +
4373 							    next_reg_offset,
4374 							    dump, addr,
4375 							    reg->entry_size,
4376 							    wide_bus);
4377 			}
4378 
4379 			/* Call rule condition function.
4380 			 * If returns true, it's a failure.
4381 			 */
4382 			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
4383 							imm_values)) {
4384 				offset += qed_idle_chk_dump_failure(p_hwfn,
4385 							p_ptt,
4386 							dump_buf + offset,
4387 							dump,
4388 							rule->rule_id,
4389 							rule,
4390 							entry_id,
4391 							cond_reg_values);
4392 				(*num_failing_rules)++;
4393 			}
4394 		}
4395 	}
4396 
4397 	return offset;
4398 }
4399 
4400 /* Performs Idle Check Dump to the specified buffer.
4401  * Returns the dumped size in dwords.
4402  */
4403 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4404 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4405 {
4406 	u32 num_failing_rules_offset, offset = 0, input_offset = 0;
4407 	u32 num_failing_rules = 0;
4408 
4409 	/* Dump global params */
4410 	offset += qed_dump_common_global_params(p_hwfn,
4411 						p_ptt,
4412 						dump_buf + offset, dump, 1);
4413 	offset += qed_dump_str_param(dump_buf + offset,
4414 				     dump, "dump-type", "idle-chk");
4415 
4416 	/* Dump idle check section header with a single parameter */
4417 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4418 	num_failing_rules_offset = offset;
4419 	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4420 
4421 	while (input_offset <
4422 	       s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4423 		const struct dbg_idle_chk_cond_hdr *cond_hdr =
4424 			(const struct dbg_idle_chk_cond_hdr *)
4425 			&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4426 			[input_offset++];
4427 		bool eval_mode, mode_match = true;
4428 		u32 curr_failing_rules;
4429 		u16 modes_buf_offset;
4430 
4431 		/* Check mode */
4432 		eval_mode = GET_FIELD(cond_hdr->mode.data,
4433 				      DBG_MODE_HDR_EVAL_MODE) > 0;
4434 		if (eval_mode) {
4435 			modes_buf_offset =
4436 				GET_FIELD(cond_hdr->mode.data,
4437 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
4438 			mode_match = qed_is_mode_match(p_hwfn,
4439 						       &modes_buf_offset);
4440 		}
4441 
4442 		if (mode_match) {
4443 			offset +=
4444 			    qed_idle_chk_dump_rule_entries(p_hwfn,
4445 				p_ptt,
4446 				dump_buf + offset,
4447 				dump,
4448 				(const struct dbg_idle_chk_rule *)
4449 				&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4450 				ptr[input_offset],
4451 				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4452 				&curr_failing_rules);
4453 			num_failing_rules += curr_failing_rules;
4454 		}
4455 
4456 		input_offset += cond_hdr->data_size;
4457 	}
4458 
4459 	/* Overwrite num_rules parameter */
4460 	if (dump)
4461 		qed_dump_num_param(dump_buf + num_failing_rules_offset,
4462 				   dump, "num_rules", num_failing_rules);
4463 
4464 	/* Dump last section */
4465 	offset += qed_dump_last_section(dump_buf, offset, dump);
4466 
4467 	return offset;
4468 }
4469 
4470 /* Finds the meta data image in NVRAM */
4471 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4472 					    struct qed_ptt *p_ptt,
4473 					    u32 image_type,
4474 					    u32 *nvram_offset_bytes,
4475 					    u32 *nvram_size_bytes)
4476 {
4477 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4478 	struct mcp_file_att file_att;
4479 	int nvm_result;
4480 
4481 	/* Call NVRAM get file command */
4482 	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4483 					p_ptt,
4484 					DRV_MSG_CODE_NVM_GET_FILE_ATT,
4485 					image_type,
4486 					&ret_mcp_resp,
4487 					&ret_mcp_param,
4488 					&ret_txn_size, (u32 *)&file_att);
4489 
4490 	/* Check response */
4491 	if (nvm_result ||
4492 	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4493 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4494 
4495 	/* Update return values */
4496 	*nvram_offset_bytes = file_att.nvm_start_addr;
4497 	*nvram_size_bytes = file_att.len;
4498 
4499 	DP_VERBOSE(p_hwfn,
4500 		   QED_MSG_DEBUG,
4501 		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4502 		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
4503 
4504 	/* Check alignment */
4505 	if (*nvram_size_bytes & 0x3)
4506 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4507 
4508 	return DBG_STATUS_OK;
4509 }
4510 
4511 /* Reads data from NVRAM */
4512 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4513 				      struct qed_ptt *p_ptt,
4514 				      u32 nvram_offset_bytes,
4515 				      u32 nvram_size_bytes, u32 *ret_buf)
4516 {
4517 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4518 	s32 bytes_left = nvram_size_bytes;
4519 	u32 read_offset = 0;
4520 
4521 	DP_VERBOSE(p_hwfn,
4522 		   QED_MSG_DEBUG,
4523 		   "nvram_read: reading image of size %d bytes from NVRAM\n",
4524 		   nvram_size_bytes);
4525 
4526 	do {
4527 		bytes_to_copy =
4528 		    (bytes_left >
4529 		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4530 
4531 		/* Call NVRAM read command */
4532 		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4533 				       DRV_MSG_CODE_NVM_READ_NVRAM,
4534 				       (nvram_offset_bytes +
4535 					read_offset) |
4536 				       (bytes_to_copy <<
4537 					DRV_MB_PARAM_NVM_LEN_OFFSET),
4538 				       &ret_mcp_resp, &ret_mcp_param,
4539 				       &ret_read_size,
4540 				       (u32 *)((u8 *)ret_buf + read_offset)))
4541 			return DBG_STATUS_NVRAM_READ_FAILED;
4542 
4543 		/* Check response */
4544 		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4545 			return DBG_STATUS_NVRAM_READ_FAILED;
4546 
4547 		/* Update read offset */
4548 		read_offset += ret_read_size;
4549 		bytes_left -= ret_read_size;
4550 	} while (bytes_left > 0);
4551 
4552 	return DBG_STATUS_OK;
4553 }
4554 
4555 /* Get info on the MCP Trace data in the scratchpad:
4556  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4557  * - trace_data_size (OUT): trace data size in bytes (without the header)
4558  */
4559 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4560 						   struct qed_ptt *p_ptt,
4561 						   u32 *trace_data_grc_addr,
4562 						   u32 *trace_data_size)
4563 {
4564 	u32 spad_trace_offsize, signature;
4565 
4566 	/* Read trace section offsize structure from MCP scratchpad */
4567 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4568 
4569 	/* Extract trace section address from offsize (in scratchpad) */
4570 	*trace_data_grc_addr =
4571 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4572 
4573 	/* Read signature from MCP trace section */
4574 	signature = qed_rd(p_hwfn, p_ptt,
4575 			   *trace_data_grc_addr +
4576 			   offsetof(struct mcp_trace, signature));
4577 
4578 	if (signature != MFW_TRACE_SIGNATURE)
4579 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4580 
4581 	/* Read trace size from MCP trace section */
4582 	*trace_data_size = qed_rd(p_hwfn,
4583 				  p_ptt,
4584 				  *trace_data_grc_addr +
4585 				  offsetof(struct mcp_trace, size));
4586 
4587 	return DBG_STATUS_OK;
4588 }
4589 
4590 /* Reads MCP trace meta data image from NVRAM
4591  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4592  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4593  *			      loaded from file).
4594  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4595  */
4596 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4597 						   struct qed_ptt *p_ptt,
4598 						   u32 trace_data_size_bytes,
4599 						   u32 *running_bundle_id,
4600 						   u32 *trace_meta_offset,
4601 						   u32 *trace_meta_size)
4602 {
4603 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4604 
4605 	/* Read MCP trace section offsize structure from MCP scratchpad */
4606 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4607 
4608 	/* Find running bundle ID */
4609 	running_mfw_addr =
4610 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4611 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4612 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4613 	if (*running_bundle_id > 1)
4614 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4615 
4616 	/* Find image in NVRAM */
4617 	nvram_image_type =
4618 	    (*running_bundle_id ==
4619 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4620 	return qed_find_nvram_image(p_hwfn,
4621 				    p_ptt,
4622 				    nvram_image_type,
4623 				    trace_meta_offset, trace_meta_size);
4624 }
4625 
4626 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4627 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4628 					       struct qed_ptt *p_ptt,
4629 					       u32 nvram_offset_in_bytes,
4630 					       u32 size_in_bytes, u32 *buf)
4631 {
4632 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4633 	enum dbg_status status;
4634 	u32 signature;
4635 
4636 	/* Read meta data from NVRAM */
4637 	status = qed_nvram_read(p_hwfn,
4638 				p_ptt,
4639 				nvram_offset_in_bytes, size_in_bytes, buf);
4640 	if (status != DBG_STATUS_OK)
4641 		return status;
4642 
4643 	/* Extract and check first signature */
4644 	signature = qed_read_unaligned_dword(byte_buf);
4645 	byte_buf += sizeof(signature);
4646 	if (signature != NVM_MAGIC_VALUE)
4647 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4648 
4649 	/* Extract number of modules */
4650 	modules_num = *(byte_buf++);
4651 
4652 	/* Skip all modules */
4653 	for (i = 0; i < modules_num; i++) {
4654 		module_len = *(byte_buf++);
4655 		byte_buf += module_len;
4656 	}
4657 
4658 	/* Extract and check second signature */
4659 	signature = qed_read_unaligned_dword(byte_buf);
4660 	byte_buf += sizeof(signature);
4661 	if (signature != NVM_MAGIC_VALUE)
4662 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4663 
4664 	return DBG_STATUS_OK;
4665 }
4666 
4667 /* Dump MCP Trace */
4668 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4669 					  struct qed_ptt *p_ptt,
4670 					  u32 *dump_buf,
4671 					  bool dump, u32 *num_dumped_dwords)
4672 {
4673 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4674 	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4675 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4676 	enum dbg_status status;
4677 	bool mcp_access;
4678 	int halted = 0;
4679 
4680 	*num_dumped_dwords = 0;
4681 
4682 	mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4683 
4684 	/* Get trace data info */
4685 	status = qed_mcp_trace_get_data_info(p_hwfn,
4686 					     p_ptt,
4687 					     &trace_data_grc_addr,
4688 					     &trace_data_size_bytes);
4689 	if (status != DBG_STATUS_OK)
4690 		return status;
4691 
4692 	/* Dump global params */
4693 	offset += qed_dump_common_global_params(p_hwfn,
4694 						p_ptt,
4695 						dump_buf + offset, dump, 1);
4696 	offset += qed_dump_str_param(dump_buf + offset,
4697 				     dump, "dump-type", "mcp-trace");
4698 
4699 	/* Halt MCP while reading from scratchpad so the read data will be
4700 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4701 	 * risk that it may be corrupt.
4702 	 */
4703 	if (dump && mcp_access) {
4704 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4705 		if (!halted)
4706 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4707 	}
4708 
4709 	/* Find trace data size */
4710 	trace_data_size_dwords =
4711 	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4712 			 BYTES_IN_DWORD);
4713 
4714 	/* Dump trace data section header and param */
4715 	offset += qed_dump_section_hdr(dump_buf + offset,
4716 				       dump, "mcp_trace_data", 1);
4717 	offset += qed_dump_num_param(dump_buf + offset,
4718 				     dump, "size", trace_data_size_dwords);
4719 
4720 	/* Read trace data from scratchpad into dump buffer */
4721 	offset += qed_grc_dump_addr_range(p_hwfn,
4722 					  p_ptt,
4723 					  dump_buf + offset,
4724 					  dump,
4725 					  BYTES_TO_DWORDS(trace_data_grc_addr),
4726 					  trace_data_size_dwords, false);
4727 
4728 	/* Resume MCP (only if halt succeeded) */
4729 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4730 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4731 
4732 	/* Dump trace meta section header */
4733 	offset += qed_dump_section_hdr(dump_buf + offset,
4734 				       dump, "mcp_trace_meta", 1);
4735 
4736 	/* If MCP Trace meta size parameter was set, use it.
4737 	 * Otherwise, read trace meta.
4738 	 * trace_meta_size_bytes is dword-aligned.
4739 	 */
4740 	trace_meta_size_bytes =
4741 		qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4742 	if ((!trace_meta_size_bytes || dump) && mcp_access) {
4743 		status = qed_mcp_trace_get_meta_info(p_hwfn,
4744 						     p_ptt,
4745 						     trace_data_size_bytes,
4746 						     &running_bundle_id,
4747 						     &trace_meta_offset_bytes,
4748 						     &trace_meta_size_bytes);
4749 		if (status == DBG_STATUS_OK)
4750 			trace_meta_size_dwords =
4751 				BYTES_TO_DWORDS(trace_meta_size_bytes);
4752 	}
4753 
4754 	/* Dump trace meta size param */
4755 	offset += qed_dump_num_param(dump_buf + offset,
4756 				     dump, "size", trace_meta_size_dwords);
4757 
4758 	/* Read trace meta image into dump buffer */
4759 	if (dump && trace_meta_size_dwords)
4760 		status = qed_mcp_trace_read_meta(p_hwfn,
4761 						 p_ptt,
4762 						 trace_meta_offset_bytes,
4763 						 trace_meta_size_bytes,
4764 						 dump_buf + offset);
4765 	if (status == DBG_STATUS_OK)
4766 		offset += trace_meta_size_dwords;
4767 
4768 	/* Dump last section */
4769 	offset += qed_dump_last_section(dump_buf, offset, dump);
4770 
4771 	*num_dumped_dwords = offset;
4772 
4773 	/* If no mcp access, indicate that the dump doesn't contain the meta
4774 	 * data from NVRAM.
4775 	 */
4776 	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4777 }
4778 
4779 /* Dump GRC FIFO */
4780 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4781 					 struct qed_ptt *p_ptt,
4782 					 u32 *dump_buf,
4783 					 bool dump, u32 *num_dumped_dwords)
4784 {
4785 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4786 	bool fifo_has_data;
4787 
4788 	*num_dumped_dwords = 0;
4789 
4790 	/* Dump global params */
4791 	offset += qed_dump_common_global_params(p_hwfn,
4792 						p_ptt,
4793 						dump_buf + offset, dump, 1);
4794 	offset += qed_dump_str_param(dump_buf + offset,
4795 				     dump, "dump-type", "reg-fifo");
4796 
4797 	/* Dump fifo data section header and param. The size param is 0 for
4798 	 * now, and is overwritten after reading the FIFO.
4799 	 */
4800 	offset += qed_dump_section_hdr(dump_buf + offset,
4801 				       dump, "reg_fifo_data", 1);
4802 	size_param_offset = offset;
4803 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4804 
4805 	if (!dump) {
4806 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4807 		 * test how much data is available, except for reading it.
4808 		 */
4809 		offset += REG_FIFO_DEPTH_DWORDS;
4810 		goto out;
4811 	}
4812 
4813 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4814 			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4815 
4816 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4817 	 * and must be accessed atomically. Test for dwords_read not passing
4818 	 * buffer size since more entries could be added to the buffer as we are
4819 	 * emptying it.
4820 	 */
4821 	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4822 	len = REG_FIFO_ELEMENT_DWORDS;
4823 	for (dwords_read = 0;
4824 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4825 	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4826 		offset += qed_grc_dump_addr_range(p_hwfn,
4827 						  p_ptt,
4828 						  dump_buf + offset,
4829 						  true,
4830 						  addr,
4831 						  len,
4832 						  true);
4833 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4834 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4835 	}
4836 
4837 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4838 			   dwords_read);
4839 out:
4840 	/* Dump last section */
4841 	offset += qed_dump_last_section(dump_buf, offset, dump);
4842 
4843 	*num_dumped_dwords = offset;
4844 
4845 	return DBG_STATUS_OK;
4846 }
4847 
4848 /* Dump IGU FIFO */
4849 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4850 					 struct qed_ptt *p_ptt,
4851 					 u32 *dump_buf,
4852 					 bool dump, u32 *num_dumped_dwords)
4853 {
4854 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4855 	bool fifo_has_data;
4856 
4857 	*num_dumped_dwords = 0;
4858 
4859 	/* Dump global params */
4860 	offset += qed_dump_common_global_params(p_hwfn,
4861 						p_ptt,
4862 						dump_buf + offset, dump, 1);
4863 	offset += qed_dump_str_param(dump_buf + offset,
4864 				     dump, "dump-type", "igu-fifo");
4865 
4866 	/* Dump fifo data section header and param. The size param is 0 for
4867 	 * now, and is overwritten after reading the FIFO.
4868 	 */
4869 	offset += qed_dump_section_hdr(dump_buf + offset,
4870 				       dump, "igu_fifo_data", 1);
4871 	size_param_offset = offset;
4872 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4873 
4874 	if (!dump) {
4875 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4876 		 * test how much data is available, except for reading it.
4877 		 */
4878 		offset += IGU_FIFO_DEPTH_DWORDS;
4879 		goto out;
4880 	}
4881 
4882 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4883 			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4884 
4885 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4886 	 * and must be accessed atomically. Test for dwords_read not passing
4887 	 * buffer size since more entries could be added to the buffer as we are
4888 	 * emptying it.
4889 	 */
4890 	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4891 	len = IGU_FIFO_ELEMENT_DWORDS;
4892 	for (dwords_read = 0;
4893 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4894 	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4895 		offset += qed_grc_dump_addr_range(p_hwfn,
4896 						  p_ptt,
4897 						  dump_buf + offset,
4898 						  true,
4899 						  addr,
4900 						  len,
4901 						  true);
4902 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4903 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4904 	}
4905 
4906 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4907 			   dwords_read);
4908 out:
4909 	/* Dump last section */
4910 	offset += qed_dump_last_section(dump_buf, offset, dump);
4911 
4912 	*num_dumped_dwords = offset;
4913 
4914 	return DBG_STATUS_OK;
4915 }
4916 
4917 /* Protection Override dump */
4918 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4919 						    struct qed_ptt *p_ptt,
4920 						    u32 *dump_buf,
4921 						    bool dump,
4922 						    u32 *num_dumped_dwords)
4923 {
4924 	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4925 
4926 	*num_dumped_dwords = 0;
4927 
4928 	/* Dump global params */
4929 	offset += qed_dump_common_global_params(p_hwfn,
4930 						p_ptt,
4931 						dump_buf + offset, dump, 1);
4932 	offset += qed_dump_str_param(dump_buf + offset,
4933 				     dump, "dump-type", "protection-override");
4934 
4935 	/* Dump data section header and param. The size param is 0 for now,
4936 	 * and is overwritten after reading the data.
4937 	 */
4938 	offset += qed_dump_section_hdr(dump_buf + offset,
4939 				       dump, "protection_override_data", 1);
4940 	size_param_offset = offset;
4941 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4942 
4943 	if (!dump) {
4944 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4945 		goto out;
4946 	}
4947 
4948 	/* Add override window info to buffer */
4949 	override_window_dwords =
4950 		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4951 		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4952 	addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4953 	offset += qed_grc_dump_addr_range(p_hwfn,
4954 					  p_ptt,
4955 					  dump_buf + offset,
4956 					  true,
4957 					  addr,
4958 					  override_window_dwords,
4959 					  true);
4960 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4961 			   override_window_dwords);
4962 out:
4963 	/* Dump last section */
4964 	offset += qed_dump_last_section(dump_buf, offset, dump);
4965 
4966 	*num_dumped_dwords = offset;
4967 
4968 	return DBG_STATUS_OK;
4969 }
4970 
4971 /* Performs FW Asserts Dump to the specified buffer.
4972  * Returns the dumped size in dwords.
4973  */
4974 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4975 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4976 {
4977 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4978 	struct fw_asserts_ram_section *asserts;
4979 	char storm_letter_str[2] = "?";
4980 	struct fw_info fw_info;
4981 	u32 offset = 0;
4982 	u8 storm_id;
4983 
4984 	/* Dump global params */
4985 	offset += qed_dump_common_global_params(p_hwfn,
4986 						p_ptt,
4987 						dump_buf + offset, dump, 1);
4988 	offset += qed_dump_str_param(dump_buf + offset,
4989 				     dump, "dump-type", "fw-asserts");
4990 
4991 	/* Find Storm dump size */
4992 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4993 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4994 		struct storm_defs *storm = &s_storm_defs[storm_id];
4995 		u32 last_list_idx, addr;
4996 
4997 		if (dev_data->block_in_reset[storm->block_id])
4998 			continue;
4999 
5000 		/* Read FW info for the current Storm */
5001 		qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
5002 
5003 		asserts = &fw_info.fw_asserts_section;
5004 
5005 		/* Dump FW Asserts section header and params */
5006 		storm_letter_str[0] = storm->letter;
5007 		offset += qed_dump_section_hdr(dump_buf + offset,
5008 					       dump, "fw_asserts", 2);
5009 		offset += qed_dump_str_param(dump_buf + offset,
5010 					     dump, "storm", storm_letter_str);
5011 		offset += qed_dump_num_param(dump_buf + offset,
5012 					     dump,
5013 					     "size",
5014 					     asserts->list_element_dword_size);
5015 
5016 		/* Read and dump FW Asserts data */
5017 		if (!dump) {
5018 			offset += asserts->list_element_dword_size;
5019 			continue;
5020 		}
5021 
5022 		fw_asserts_section_addr = storm->sem_fast_mem_addr +
5023 			SEM_FAST_REG_INT_RAM +
5024 			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
5025 		next_list_idx_addr = fw_asserts_section_addr +
5026 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
5027 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
5028 		last_list_idx = (next_list_idx > 0 ?
5029 				 next_list_idx :
5030 				 asserts->list_num_elements) - 1;
5031 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
5032 		       asserts->list_dword_offset +
5033 		       last_list_idx * asserts->list_element_dword_size;
5034 		offset +=
5035 		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
5036 					    dump_buf + offset,
5037 					    dump, addr,
5038 					    asserts->list_element_dword_size,
5039 					    false);
5040 	}
5041 
5042 	/* Dump last section */
5043 	offset += qed_dump_last_section(dump_buf, offset, dump);
5044 
5045 	return offset;
5046 }
5047 
5048 /***************************** Public Functions *******************************/
5049 
5050 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
5051 {
5052 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
5053 	u8 buf_id;
5054 
5055 	/* convert binary data to debug arrays */
5056 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
5057 		s_dbg_arrays[buf_id].ptr =
5058 		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
5059 		s_dbg_arrays[buf_id].size_in_dwords =
5060 		    BYTES_TO_DWORDS(buf_array[buf_id].length);
5061 	}
5062 
5063 	return DBG_STATUS_OK;
5064 }
5065 
5066 /* Assign default GRC param values */
5067 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5068 {
5069 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5070 	u32 i;
5071 
5072 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5073 		if (!s_grc_param_defs[i].is_persistent)
5074 			dev_data->grc.param_val[i] =
5075 			    s_grc_param_defs[i].default_val[dev_data->chip_id];
5076 }
5077 
5078 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5079 					      struct qed_ptt *p_ptt,
5080 					      u32 *buf_size)
5081 {
5082 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5083 
5084 	*buf_size = 0;
5085 
5086 	if (status != DBG_STATUS_OK)
5087 		return status;
5088 
5089 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5090 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5091 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5092 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5093 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5094 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5095 
5096 	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5097 }
5098 
5099 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5100 				 struct qed_ptt *p_ptt,
5101 				 u32 *dump_buf,
5102 				 u32 buf_size_in_dwords,
5103 				 u32 *num_dumped_dwords)
5104 {
5105 	u32 needed_buf_size_in_dwords;
5106 	enum dbg_status status;
5107 
5108 	*num_dumped_dwords = 0;
5109 
5110 	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5111 					       p_ptt,
5112 					       &needed_buf_size_in_dwords);
5113 	if (status != DBG_STATUS_OK)
5114 		return status;
5115 
5116 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5117 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5118 
5119 	/* GRC Dump */
5120 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5121 
5122 	/* Revert GRC params to their default */
5123 	qed_dbg_grc_set_params_default(p_hwfn);
5124 
5125 	return status;
5126 }
5127 
5128 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5129 						   struct qed_ptt *p_ptt,
5130 						   u32 *buf_size)
5131 {
5132 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5133 	struct idle_chk_data *idle_chk;
5134 	enum dbg_status status;
5135 
5136 	idle_chk = &dev_data->idle_chk;
5137 	*buf_size = 0;
5138 
5139 	status = qed_dbg_dev_init(p_hwfn, p_ptt);
5140 	if (status != DBG_STATUS_OK)
5141 		return status;
5142 
5143 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5144 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5145 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5146 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5147 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5148 
5149 	if (!idle_chk->buf_size_set) {
5150 		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5151 						       p_ptt, NULL, false);
5152 		idle_chk->buf_size_set = true;
5153 	}
5154 
5155 	*buf_size = idle_chk->buf_size;
5156 
5157 	return DBG_STATUS_OK;
5158 }
5159 
5160 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5161 				      struct qed_ptt *p_ptt,
5162 				      u32 *dump_buf,
5163 				      u32 buf_size_in_dwords,
5164 				      u32 *num_dumped_dwords)
5165 {
5166 	u32 needed_buf_size_in_dwords;
5167 	enum dbg_status status;
5168 
5169 	*num_dumped_dwords = 0;
5170 
5171 	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5172 						    p_ptt,
5173 						    &needed_buf_size_in_dwords);
5174 	if (status != DBG_STATUS_OK)
5175 		return status;
5176 
5177 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5178 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5179 
5180 	/* Update reset state */
5181 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5182 
5183 	/* Idle Check Dump */
5184 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5185 
5186 	/* Revert GRC params to their default */
5187 	qed_dbg_grc_set_params_default(p_hwfn);
5188 
5189 	return DBG_STATUS_OK;
5190 }
5191 
5192 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5193 						    struct qed_ptt *p_ptt,
5194 						    u32 *buf_size)
5195 {
5196 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5197 
5198 	*buf_size = 0;
5199 
5200 	if (status != DBG_STATUS_OK)
5201 		return status;
5202 
5203 	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5204 }
5205 
5206 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5207 				       struct qed_ptt *p_ptt,
5208 				       u32 *dump_buf,
5209 				       u32 buf_size_in_dwords,
5210 				       u32 *num_dumped_dwords)
5211 {
5212 	u32 needed_buf_size_in_dwords;
5213 	enum dbg_status status;
5214 
5215 	status =
5216 		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5217 						    p_ptt,
5218 						    &needed_buf_size_in_dwords);
5219 	if (status != DBG_STATUS_OK && status !=
5220 	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5221 		return status;
5222 
5223 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5224 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5225 
5226 	/* Update reset state */
5227 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5228 
5229 	/* Perform dump */
5230 	status = qed_mcp_trace_dump(p_hwfn,
5231 				    p_ptt, dump_buf, true, num_dumped_dwords);
5232 
5233 	/* Revert GRC params to their default */
5234 	qed_dbg_grc_set_params_default(p_hwfn);
5235 
5236 	return status;
5237 }
5238 
5239 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5240 						   struct qed_ptt *p_ptt,
5241 						   u32 *buf_size)
5242 {
5243 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5244 
5245 	*buf_size = 0;
5246 
5247 	if (status != DBG_STATUS_OK)
5248 		return status;
5249 
5250 	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5251 }
5252 
5253 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5254 				      struct qed_ptt *p_ptt,
5255 				      u32 *dump_buf,
5256 				      u32 buf_size_in_dwords,
5257 				      u32 *num_dumped_dwords)
5258 {
5259 	u32 needed_buf_size_in_dwords;
5260 	enum dbg_status status;
5261 
5262 	*num_dumped_dwords = 0;
5263 
5264 	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5265 						    p_ptt,
5266 						    &needed_buf_size_in_dwords);
5267 	if (status != DBG_STATUS_OK)
5268 		return status;
5269 
5270 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5271 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5272 
5273 	/* Update reset state */
5274 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5275 
5276 	status = qed_reg_fifo_dump(p_hwfn,
5277 				   p_ptt, dump_buf, true, num_dumped_dwords);
5278 
5279 	/* Revert GRC params to their default */
5280 	qed_dbg_grc_set_params_default(p_hwfn);
5281 
5282 	return status;
5283 }
5284 
5285 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5286 						   struct qed_ptt *p_ptt,
5287 						   u32 *buf_size)
5288 {
5289 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5290 
5291 	*buf_size = 0;
5292 
5293 	if (status != DBG_STATUS_OK)
5294 		return status;
5295 
5296 	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5297 }
5298 
5299 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5300 				      struct qed_ptt *p_ptt,
5301 				      u32 *dump_buf,
5302 				      u32 buf_size_in_dwords,
5303 				      u32 *num_dumped_dwords)
5304 {
5305 	u32 needed_buf_size_in_dwords;
5306 	enum dbg_status status;
5307 
5308 	*num_dumped_dwords = 0;
5309 
5310 	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5311 						    p_ptt,
5312 						    &needed_buf_size_in_dwords);
5313 	if (status != DBG_STATUS_OK)
5314 		return status;
5315 
5316 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5317 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5318 
5319 	/* Update reset state */
5320 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5321 
5322 	status = qed_igu_fifo_dump(p_hwfn,
5323 				   p_ptt, dump_buf, true, num_dumped_dwords);
5324 	/* Revert GRC params to their default */
5325 	qed_dbg_grc_set_params_default(p_hwfn);
5326 
5327 	return status;
5328 }
5329 
5330 enum dbg_status
5331 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5332 					      struct qed_ptt *p_ptt,
5333 					      u32 *buf_size)
5334 {
5335 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5336 
5337 	*buf_size = 0;
5338 
5339 	if (status != DBG_STATUS_OK)
5340 		return status;
5341 
5342 	return qed_protection_override_dump(p_hwfn,
5343 					    p_ptt, NULL, false, buf_size);
5344 }
5345 
5346 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5347 						 struct qed_ptt *p_ptt,
5348 						 u32 *dump_buf,
5349 						 u32 buf_size_in_dwords,
5350 						 u32 *num_dumped_dwords)
5351 {
5352 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5353 	enum dbg_status status;
5354 
5355 	*num_dumped_dwords = 0;
5356 
5357 	status =
5358 		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5359 							      p_ptt,
5360 							      p_size);
5361 	if (status != DBG_STATUS_OK)
5362 		return status;
5363 
5364 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5365 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5366 
5367 	/* Update reset state */
5368 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5369 
5370 	status = qed_protection_override_dump(p_hwfn,
5371 					      p_ptt,
5372 					      dump_buf,
5373 					      true, num_dumped_dwords);
5374 
5375 	/* Revert GRC params to their default */
5376 	qed_dbg_grc_set_params_default(p_hwfn);
5377 
5378 	return status;
5379 }
5380 
5381 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5382 						     struct qed_ptt *p_ptt,
5383 						     u32 *buf_size)
5384 {
5385 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5386 
5387 	*buf_size = 0;
5388 
5389 	if (status != DBG_STATUS_OK)
5390 		return status;
5391 
5392 	/* Update reset state */
5393 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5394 
5395 	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5396 
5397 	return DBG_STATUS_OK;
5398 }
5399 
5400 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5401 					struct qed_ptt *p_ptt,
5402 					u32 *dump_buf,
5403 					u32 buf_size_in_dwords,
5404 					u32 *num_dumped_dwords)
5405 {
5406 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5407 	enum dbg_status status;
5408 
5409 	*num_dumped_dwords = 0;
5410 
5411 	status =
5412 		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5413 						     p_ptt,
5414 						     p_size);
5415 	if (status != DBG_STATUS_OK)
5416 		return status;
5417 
5418 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5419 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5420 
5421 	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5422 
5423 	/* Revert GRC params to their default */
5424 	qed_dbg_grc_set_params_default(p_hwfn);
5425 
5426 	return DBG_STATUS_OK;
5427 }
5428 
5429 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5430 				  struct qed_ptt *p_ptt,
5431 				  enum block_id block_id,
5432 				  enum dbg_attn_type attn_type,
5433 				  bool clear_status,
5434 				  struct dbg_attn_block_result *results)
5435 {
5436 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5437 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5438 	const struct dbg_attn_reg *attn_reg_arr;
5439 
5440 	if (status != DBG_STATUS_OK)
5441 		return status;
5442 
5443 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5444 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5445 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5446 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5447 
5448 	attn_reg_arr = qed_get_block_attn_regs(block_id,
5449 					       attn_type, &num_attn_regs);
5450 
5451 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5452 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5453 		struct dbg_attn_reg_result *reg_result;
5454 		u32 sts_addr, sts_val;
5455 		u16 modes_buf_offset;
5456 		bool eval_mode;
5457 
5458 		/* Check mode */
5459 		eval_mode = GET_FIELD(reg_data->mode.data,
5460 				      DBG_MODE_HDR_EVAL_MODE) > 0;
5461 		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5462 					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5463 		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5464 			continue;
5465 
5466 		/* Mode match - read attention status register */
5467 		sts_addr = DWORDS_TO_BYTES(clear_status ?
5468 					   reg_data->sts_clr_address :
5469 					   GET_FIELD(reg_data->data,
5470 						     DBG_ATTN_REG_STS_ADDRESS));
5471 		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5472 		if (!sts_val)
5473 			continue;
5474 
5475 		/* Non-zero attention status - add to results */
5476 		reg_result = &results->reg_results[num_result_regs];
5477 		SET_FIELD(reg_result->data,
5478 			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5479 		SET_FIELD(reg_result->data,
5480 			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5481 			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5482 		reg_result->block_attn_offset = reg_data->block_attn_offset;
5483 		reg_result->sts_val = sts_val;
5484 		reg_result->mask_val = qed_rd(p_hwfn,
5485 					      p_ptt,
5486 					      DWORDS_TO_BYTES
5487 					      (reg_data->mask_address));
5488 		num_result_regs++;
5489 	}
5490 
5491 	results->block_id = (u8)block_id;
5492 	results->names_offset =
5493 	    qed_get_block_attn_data(block_id, attn_type)->names_offset;
5494 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5495 	SET_FIELD(results->data,
5496 		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5497 
5498 	return DBG_STATUS_OK;
5499 }
5500 
5501 /******************************* Data Types **********************************/
5502 
5503 struct block_info {
5504 	const char *name;
5505 	enum block_id id;
5506 };
5507 
5508 struct mcp_trace_format {
5509 	u32 data;
5510 #define MCP_TRACE_FORMAT_MODULE_MASK	0x0000ffff
5511 #define MCP_TRACE_FORMAT_MODULE_SHIFT	0
5512 #define MCP_TRACE_FORMAT_LEVEL_MASK	0x00030000
5513 #define MCP_TRACE_FORMAT_LEVEL_SHIFT	16
5514 #define MCP_TRACE_FORMAT_P1_SIZE_MASK	0x000c0000
5515 #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT	18
5516 #define MCP_TRACE_FORMAT_P2_SIZE_MASK	0x00300000
5517 #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT	20
5518 #define MCP_TRACE_FORMAT_P3_SIZE_MASK	0x00c00000
5519 #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT	22
5520 #define MCP_TRACE_FORMAT_LEN_MASK	0xff000000
5521 #define MCP_TRACE_FORMAT_LEN_SHIFT	24
5522 
5523 	char *format_str;
5524 };
5525 
5526 /* Meta data structure, generated by a perl script during MFW build. therefore,
5527  * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl
5528  * script.
5529  */
5530 struct mcp_trace_meta {
5531 	u32 modules_num;
5532 	char **modules;
5533 	u32 formats_num;
5534 	struct mcp_trace_format *formats;
5535 };
5536 
5537 /* REG fifo element */
5538 struct reg_fifo_element {
5539 	u64 data;
5540 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5541 #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5542 #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5543 #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5544 #define REG_FIFO_ELEMENT_PF_SHIFT		24
5545 #define REG_FIFO_ELEMENT_PF_MASK		0xf
5546 #define REG_FIFO_ELEMENT_VF_SHIFT		28
5547 #define REG_FIFO_ELEMENT_VF_MASK		0xff
5548 #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5549 #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5550 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5551 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5552 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5553 #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5554 #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5555 #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5556 #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5557 #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5558 };
5559 
5560 /* IGU fifo element */
5561 struct igu_fifo_element {
5562 	u32 dword0;
5563 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5564 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5565 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5566 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5567 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5568 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5569 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5570 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5571 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5572 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5573 	u32 dword1;
5574 	u32 dword2;
5575 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5576 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5577 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5578 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5579 	u32 reserved;
5580 };
5581 
5582 struct igu_fifo_wr_data {
5583 	u32 data;
5584 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5585 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5586 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5587 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5588 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5589 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5590 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5591 #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5592 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5593 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5594 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5595 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5596 };
5597 
5598 struct igu_fifo_cleanup_wr_data {
5599 	u32 data;
5600 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5601 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5602 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5603 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5604 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5605 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5606 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5607 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5608 };
5609 
5610 /* Protection override element */
5611 struct protection_override_element {
5612 	u64 data;
5613 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5614 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5615 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5616 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5617 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5618 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5619 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5620 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5621 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5622 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5623 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5624 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5625 };
5626 
5627 enum igu_fifo_sources {
5628 	IGU_SRC_PXP0,
5629 	IGU_SRC_PXP1,
5630 	IGU_SRC_PXP2,
5631 	IGU_SRC_PXP3,
5632 	IGU_SRC_PXP4,
5633 	IGU_SRC_PXP5,
5634 	IGU_SRC_PXP6,
5635 	IGU_SRC_PXP7,
5636 	IGU_SRC_CAU,
5637 	IGU_SRC_ATTN,
5638 	IGU_SRC_GRC
5639 };
5640 
5641 enum igu_fifo_addr_types {
5642 	IGU_ADDR_TYPE_MSIX_MEM,
5643 	IGU_ADDR_TYPE_WRITE_PBA,
5644 	IGU_ADDR_TYPE_WRITE_INT_ACK,
5645 	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5646 	IGU_ADDR_TYPE_READ_INT,
5647 	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5648 	IGU_ADDR_TYPE_RESERVED
5649 };
5650 
5651 struct igu_fifo_addr_data {
5652 	u16 start_addr;
5653 	u16 end_addr;
5654 	char *desc;
5655 	char *vf_desc;
5656 	enum igu_fifo_addr_types type;
5657 };
5658 
5659 /******************************** Constants **********************************/
5660 
5661 #define MAX_MSG_LEN				1024
5662 
5663 #define MCP_TRACE_MAX_MODULE_LEN		8
5664 #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5665 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5666 	(MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
5667 
5668 #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5669 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5670 
5671 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5672 
5673 /***************************** Constant Arrays *******************************/
5674 
5675 struct user_dbg_array {
5676 	const u32 *ptr;
5677 	u32 size_in_dwords;
5678 };
5679 
5680 /* Debug arrays */
5681 static struct user_dbg_array
5682 s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
5683 
5684 /* Block names array */
5685 static struct block_info s_block_info_arr[] = {
5686 	{"grc", BLOCK_GRC},
5687 	{"miscs", BLOCK_MISCS},
5688 	{"misc", BLOCK_MISC},
5689 	{"dbu", BLOCK_DBU},
5690 	{"pglue_b", BLOCK_PGLUE_B},
5691 	{"cnig", BLOCK_CNIG},
5692 	{"cpmu", BLOCK_CPMU},
5693 	{"ncsi", BLOCK_NCSI},
5694 	{"opte", BLOCK_OPTE},
5695 	{"bmb", BLOCK_BMB},
5696 	{"pcie", BLOCK_PCIE},
5697 	{"mcp", BLOCK_MCP},
5698 	{"mcp2", BLOCK_MCP2},
5699 	{"pswhst", BLOCK_PSWHST},
5700 	{"pswhst2", BLOCK_PSWHST2},
5701 	{"pswrd", BLOCK_PSWRD},
5702 	{"pswrd2", BLOCK_PSWRD2},
5703 	{"pswwr", BLOCK_PSWWR},
5704 	{"pswwr2", BLOCK_PSWWR2},
5705 	{"pswrq", BLOCK_PSWRQ},
5706 	{"pswrq2", BLOCK_PSWRQ2},
5707 	{"pglcs", BLOCK_PGLCS},
5708 	{"ptu", BLOCK_PTU},
5709 	{"dmae", BLOCK_DMAE},
5710 	{"tcm", BLOCK_TCM},
5711 	{"mcm", BLOCK_MCM},
5712 	{"ucm", BLOCK_UCM},
5713 	{"xcm", BLOCK_XCM},
5714 	{"ycm", BLOCK_YCM},
5715 	{"pcm", BLOCK_PCM},
5716 	{"qm", BLOCK_QM},
5717 	{"tm", BLOCK_TM},
5718 	{"dorq", BLOCK_DORQ},
5719 	{"brb", BLOCK_BRB},
5720 	{"src", BLOCK_SRC},
5721 	{"prs", BLOCK_PRS},
5722 	{"tsdm", BLOCK_TSDM},
5723 	{"msdm", BLOCK_MSDM},
5724 	{"usdm", BLOCK_USDM},
5725 	{"xsdm", BLOCK_XSDM},
5726 	{"ysdm", BLOCK_YSDM},
5727 	{"psdm", BLOCK_PSDM},
5728 	{"tsem", BLOCK_TSEM},
5729 	{"msem", BLOCK_MSEM},
5730 	{"usem", BLOCK_USEM},
5731 	{"xsem", BLOCK_XSEM},
5732 	{"ysem", BLOCK_YSEM},
5733 	{"psem", BLOCK_PSEM},
5734 	{"rss", BLOCK_RSS},
5735 	{"tmld", BLOCK_TMLD},
5736 	{"muld", BLOCK_MULD},
5737 	{"yuld", BLOCK_YULD},
5738 	{"xyld", BLOCK_XYLD},
5739 	{"ptld", BLOCK_PTLD},
5740 	{"ypld", BLOCK_YPLD},
5741 	{"prm", BLOCK_PRM},
5742 	{"pbf_pb1", BLOCK_PBF_PB1},
5743 	{"pbf_pb2", BLOCK_PBF_PB2},
5744 	{"rpb", BLOCK_RPB},
5745 	{"btb", BLOCK_BTB},
5746 	{"pbf", BLOCK_PBF},
5747 	{"rdif", BLOCK_RDIF},
5748 	{"tdif", BLOCK_TDIF},
5749 	{"cdu", BLOCK_CDU},
5750 	{"ccfc", BLOCK_CCFC},
5751 	{"tcfc", BLOCK_TCFC},
5752 	{"igu", BLOCK_IGU},
5753 	{"cau", BLOCK_CAU},
5754 	{"rgfs", BLOCK_RGFS},
5755 	{"rgsrc", BLOCK_RGSRC},
5756 	{"tgfs", BLOCK_TGFS},
5757 	{"tgsrc", BLOCK_TGSRC},
5758 	{"umac", BLOCK_UMAC},
5759 	{"xmac", BLOCK_XMAC},
5760 	{"dbg", BLOCK_DBG},
5761 	{"nig", BLOCK_NIG},
5762 	{"wol", BLOCK_WOL},
5763 	{"bmbn", BLOCK_BMBN},
5764 	{"ipc", BLOCK_IPC},
5765 	{"nwm", BLOCK_NWM},
5766 	{"nws", BLOCK_NWS},
5767 	{"ms", BLOCK_MS},
5768 	{"phy_pcie", BLOCK_PHY_PCIE},
5769 	{"led", BLOCK_LED},
5770 	{"avs_wrap", BLOCK_AVS_WRAP},
5771 	{"pxpreqbus", BLOCK_PXPREQBUS},
5772 	{"misc_aeu", BLOCK_MISC_AEU},
5773 	{"bar0_map", BLOCK_BAR0_MAP}
5774 };
5775 
5776 /* Status string array */
5777 static const char * const s_status_str[] = {
5778 	/* DBG_STATUS_OK */
5779 	"Operation completed successfully",
5780 
5781 	/* DBG_STATUS_APP_VERSION_NOT_SET */
5782 	"Debug application version wasn't set",
5783 
5784 	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5785 	"Unsupported debug application version",
5786 
5787 	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5788 	"The debug block wasn't reset since the last recording",
5789 
5790 	/* DBG_STATUS_INVALID_ARGS */
5791 	"Invalid arguments",
5792 
5793 	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5794 	"The debug output was already set",
5795 
5796 	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5797 	"Invalid PCI buffer size",
5798 
5799 	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5800 	"PCI buffer allocation failed",
5801 
5802 	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5803 	"A PCI buffer wasn't allocated",
5804 
5805 	/* DBG_STATUS_TOO_MANY_INPUTS */
5806 	"Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
5807 
5808 	/* DBG_STATUS_INPUT_OVERLAP */
5809 	"Overlapping debug bus inputs",
5810 
5811 	/* DBG_STATUS_HW_ONLY_RECORDING */
5812 	"Cannot record Storm data since the entire recording cycle is used by HW",
5813 
5814 	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5815 	"The Storm was already enabled",
5816 
5817 	/* DBG_STATUS_STORM_NOT_ENABLED */
5818 	"The specified Storm wasn't enabled",
5819 
5820 	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5821 	"The block was already enabled",
5822 
5823 	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5824 	"The specified block wasn't enabled",
5825 
5826 	/* DBG_STATUS_NO_INPUT_ENABLED */
5827 	"No input was enabled for recording",
5828 
5829 	/* DBG_STATUS_NO_FILTER_TRIGGER_64B */
5830 	"Filters and triggers are not allowed when recording in 64b units",
5831 
5832 	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5833 	"The filter was already enabled",
5834 
5835 	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5836 	"The trigger was already enabled",
5837 
5838 	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5839 	"The trigger wasn't enabled",
5840 
5841 	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5842 	"A constraint can be added only after a filter was enabled or a trigger state was added",
5843 
5844 	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5845 	"Cannot add more than 3 trigger states",
5846 
5847 	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5848 	"Cannot add more than 4 constraints per filter or trigger state",
5849 
5850 	/* DBG_STATUS_RECORDING_NOT_STARTED */
5851 	"The recording wasn't started",
5852 
5853 	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
5854 	"A trigger was configured, but it didn't trigger",
5855 
5856 	/* DBG_STATUS_NO_DATA_RECORDED */
5857 	"No data was recorded",
5858 
5859 	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5860 	"Dump buffer is too small",
5861 
5862 	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5863 	"Dumped data is not aligned to chunks",
5864 
5865 	/* DBG_STATUS_UNKNOWN_CHIP */
5866 	"Unknown chip",
5867 
5868 	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5869 	"Failed allocating virtual memory",
5870 
5871 	/* DBG_STATUS_BLOCK_IN_RESET */
5872 	"The input block is in reset",
5873 
5874 	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5875 	"Invalid MCP trace signature found in NVRAM",
5876 
5877 	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5878 	"Invalid bundle ID found in NVRAM",
5879 
5880 	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5881 	"Failed getting NVRAM image",
5882 
5883 	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5884 	"NVRAM image is not dword-aligned",
5885 
5886 	/* DBG_STATUS_NVRAM_READ_FAILED */
5887 	"Failed reading from NVRAM",
5888 
5889 	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5890 	"Idle check parsing failed",
5891 
5892 	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
5893 	"MCP Trace data is corrupt",
5894 
5895 	/* DBG_STATUS_MCP_TRACE_NO_META */
5896 	"Dump doesn't contain meta data - it must be provided in image file",
5897 
5898 	/* DBG_STATUS_MCP_COULD_NOT_HALT */
5899 	"Failed to halt MCP",
5900 
5901 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
5902 	"Failed to resume MCP after halt",
5903 
5904 	/* DBG_STATUS_RESERVED2 */
5905 	"Reserved debug status - shouldn't be returned",
5906 
5907 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5908 	"Failed to empty SEMI sync FIFO",
5909 
5910 	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
5911 	"IGU FIFO data is corrupt",
5912 
5913 	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5914 	"MCP failed to mask parities",
5915 
5916 	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5917 	"FW Asserts parsing failed",
5918 
5919 	/* DBG_STATUS_REG_FIFO_BAD_DATA */
5920 	"GRC FIFO data is corrupt",
5921 
5922 	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5923 	"Protection Override data is corrupt",
5924 
5925 	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
5926 	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5927 
5928 	/* DBG_STATUS_FILTER_BUG */
5929 	"Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
5930 
5931 	/* DBG_STATUS_NON_MATCHING_LINES */
5932 	"Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
5933 
5934 	/* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
5935 	"The selected trigger dword offset wasn't enabled in the recorded HW block",
5936 
5937 	/* DBG_STATUS_DBG_BUS_IN_USE */
5938 	"The debug bus is in use"
5939 };
5940 
5941 /* Idle check severity names array */
5942 static const char * const s_idle_chk_severity_str[] = {
5943 	"Error",
5944 	"Error if no traffic",
5945 	"Warning"
5946 };
5947 
5948 /* MCP Trace level names array */
5949 static const char * const s_mcp_trace_level_str[] = {
5950 	"ERROR",
5951 	"TRACE",
5952 	"DEBUG"
5953 };
5954 
5955 /* Access type names array */
5956 static const char * const s_access_strs[] = {
5957 	"read",
5958 	"write"
5959 };
5960 
5961 /* Privilege type names array */
5962 static const char * const s_privilege_strs[] = {
5963 	"VF",
5964 	"PDA",
5965 	"HV",
5966 	"UA"
5967 };
5968 
5969 /* Protection type names array */
5970 static const char * const s_protection_strs[] = {
5971 	"(default)",
5972 	"(default)",
5973 	"(default)",
5974 	"(default)",
5975 	"override VF",
5976 	"override PDA",
5977 	"override HV",
5978 	"override UA"
5979 };
5980 
5981 /* Master type names array */
5982 static const char * const s_master_strs[] = {
5983 	"???",
5984 	"pxp",
5985 	"mcp",
5986 	"msdm",
5987 	"psdm",
5988 	"ysdm",
5989 	"usdm",
5990 	"tsdm",
5991 	"xsdm",
5992 	"dbu",
5993 	"dmae",
5994 	"???",
5995 	"???",
5996 	"???",
5997 	"???",
5998 	"???"
5999 };
6000 
6001 /* REG FIFO error messages array */
6002 static const char * const s_reg_fifo_error_strs[] = {
6003 	"grc timeout",
6004 	"address doesn't belong to any block",
6005 	"reserved address in block or write to read-only address",
6006 	"privilege/protection mismatch",
6007 	"path isolation error"
6008 };
6009 
6010 /* IGU FIFO sources array */
6011 static const char * const s_igu_fifo_source_strs[] = {
6012 	"TSTORM",
6013 	"MSTORM",
6014 	"USTORM",
6015 	"XSTORM",
6016 	"YSTORM",
6017 	"PSTORM",
6018 	"PCIE",
6019 	"NIG_QM_PBF",
6020 	"CAU",
6021 	"ATTN",
6022 	"GRC",
6023 };
6024 
6025 /* IGU FIFO error messages */
6026 static const char * const s_igu_fifo_error_strs[] = {
6027 	"no error",
6028 	"length error",
6029 	"function disabled",
6030 	"VF sent command to attnetion address",
6031 	"host sent prod update command",
6032 	"read of during interrupt register while in MIMD mode",
6033 	"access to PXP BAR reserved address",
6034 	"producer update command to attention index",
6035 	"unknown error",
6036 	"SB index not valid",
6037 	"SB relative index and FID not found",
6038 	"FID not match",
6039 	"command with error flag asserted (PCI error or CAU discard)",
6040 	"VF sent cleanup and RF cleanup is disabled",
6041 	"cleanup command on type bigger than 4"
6042 };
6043 
6044 /* IGU FIFO address data */
6045 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6046 	{0x0, 0x101, "MSI-X Memory", NULL,
6047 	 IGU_ADDR_TYPE_MSIX_MEM},
6048 	{0x102, 0x1ff, "reserved", NULL,
6049 	 IGU_ADDR_TYPE_RESERVED},
6050 	{0x200, 0x200, "Write PBA[0:63]", NULL,
6051 	 IGU_ADDR_TYPE_WRITE_PBA},
6052 	{0x201, 0x201, "Write PBA[64:127]", "reserved",
6053 	 IGU_ADDR_TYPE_WRITE_PBA},
6054 	{0x202, 0x202, "Write PBA[128]", "reserved",
6055 	 IGU_ADDR_TYPE_WRITE_PBA},
6056 	{0x203, 0x3ff, "reserved", NULL,
6057 	 IGU_ADDR_TYPE_RESERVED},
6058 	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6059 	 IGU_ADDR_TYPE_WRITE_INT_ACK},
6060 	{0x5f0, 0x5f0, "Attention bits update", NULL,
6061 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6062 	{0x5f1, 0x5f1, "Attention bits set", NULL,
6063 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6064 	{0x5f2, 0x5f2, "Attention bits clear", NULL,
6065 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6066 	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6067 	 IGU_ADDR_TYPE_READ_INT},
6068 	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6069 	 IGU_ADDR_TYPE_READ_INT},
6070 	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6071 	 IGU_ADDR_TYPE_READ_INT},
6072 	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6073 	 IGU_ADDR_TYPE_READ_INT},
6074 	{0x5f7, 0x5ff, "reserved", NULL,
6075 	 IGU_ADDR_TYPE_RESERVED},
6076 	{0x600, 0x7ff, "Producer update", NULL,
6077 	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6078 };
6079 
6080 /******************************** Variables **********************************/
6081 
6082 /* MCP Trace meta data array - used in case the dump doesn't contain the
6083  * meta data (e.g. due to no NVRAM access).
6084  */
6085 static struct user_dbg_array s_mcp_trace_meta_arr = { NULL, 0 };
6086 
6087 /* Parsed MCP Trace meta data info, based on MCP trace meta array */
6088 static struct mcp_trace_meta s_mcp_trace_meta;
6089 static bool s_mcp_trace_meta_valid;
6090 
6091 /* Temporary buffer, used for print size calculations */
6092 static char s_temp_buf[MAX_MSG_LEN];
6093 
6094 /**************************** Private Functions ******************************/
6095 
6096 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6097 {
6098 	return (a + b) % size;
6099 }
6100 
6101 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6102 {
6103 	return (size + a - b) % size;
6104 }
6105 
6106 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6107  * bytes) and returns them as a dword value. the specified buffer offset is
6108  * updated.
6109  */
6110 static u32 qed_read_from_cyclic_buf(void *buf,
6111 				    u32 *offset,
6112 				    u32 buf_size, u8 num_bytes_to_read)
6113 {
6114 	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6115 	u32 val = 0;
6116 
6117 	val_ptr = (u8 *)&val;
6118 
6119 	/* Assume running on a LITTLE ENDIAN and the buffer is network order
6120 	 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
6121 	 */
6122 	for (i = 0; i < num_bytes_to_read; i++) {
6123 		val_ptr[i] = bytes_buf[*offset];
6124 		*offset = qed_cyclic_add(*offset, 1, buf_size);
6125 	}
6126 
6127 	return val;
6128 }
6129 
6130 /* Reads and returns the next byte from the specified buffer.
6131  * The specified buffer offset is updated.
6132  */
6133 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6134 {
6135 	return ((u8 *)buf)[(*offset)++];
6136 }
6137 
6138 /* Reads and returns the next dword from the specified buffer.
6139  * The specified buffer offset is updated.
6140  */
6141 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6142 {
6143 	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6144 
6145 	*offset += 4;
6146 
6147 	return dword_val;
6148 }
6149 
6150 /* Reads the next string from the specified buffer, and copies it to the
6151  * specified pointer. The specified buffer offset is updated.
6152  */
6153 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6154 {
6155 	const char *source_str = &((const char *)buf)[*offset];
6156 
6157 	strncpy(dest, source_str, size);
6158 	dest[size - 1] = '\0';
6159 	*offset += size;
6160 }
6161 
6162 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6163  * If the specified buffer in NULL, a temporary buffer pointer is returned.
6164  */
6165 static char *qed_get_buf_ptr(void *buf, u32 offset)
6166 {
6167 	return buf ? (char *)buf + offset : s_temp_buf;
6168 }
6169 
6170 /* Reads a param from the specified buffer. Returns the number of dwords read.
6171  * If the returned str_param is NULL, the param is numeric and its value is
6172  * returned in num_param.
6173  * Otheriwise, the param is a string and its pointer is returned in str_param.
6174  */
6175 static u32 qed_read_param(u32 *dump_buf,
6176 			  const char **param_name,
6177 			  const char **param_str_val, u32 *param_num_val)
6178 {
6179 	char *char_buf = (char *)dump_buf;
6180 	size_t offset = 0;
6181 
6182 	/* Extract param name */
6183 	*param_name = char_buf;
6184 	offset += strlen(*param_name) + 1;
6185 
6186 	/* Check param type */
6187 	if (*(char_buf + offset++)) {
6188 		/* String param */
6189 		*param_str_val = char_buf + offset;
6190 		*param_num_val = 0;
6191 		offset += strlen(*param_str_val) + 1;
6192 		if (offset & 0x3)
6193 			offset += (4 - (offset & 0x3));
6194 	} else {
6195 		/* Numeric param */
6196 		*param_str_val = NULL;
6197 		if (offset & 0x3)
6198 			offset += (4 - (offset & 0x3));
6199 		*param_num_val = *(u32 *)(char_buf + offset);
6200 		offset += 4;
6201 	}
6202 
6203 	return (u32)offset / 4;
6204 }
6205 
6206 /* Reads a section header from the specified buffer.
6207  * Returns the number of dwords read.
6208  */
6209 static u32 qed_read_section_hdr(u32 *dump_buf,
6210 				const char **section_name,
6211 				u32 *num_section_params)
6212 {
6213 	const char *param_str_val;
6214 
6215 	return qed_read_param(dump_buf,
6216 			      section_name, &param_str_val, num_section_params);
6217 }
6218 
6219 /* Reads section params from the specified buffer and prints them to the results
6220  * buffer. Returns the number of dwords read.
6221  */
6222 static u32 qed_print_section_params(u32 *dump_buf,
6223 				    u32 num_section_params,
6224 				    char *results_buf, u32 *num_chars_printed)
6225 {
6226 	u32 i, dump_offset = 0, results_offset = 0;
6227 
6228 	for (i = 0; i < num_section_params; i++) {
6229 		const char *param_name, *param_str_val;
6230 		u32 param_num_val = 0;
6231 
6232 		dump_offset += qed_read_param(dump_buf + dump_offset,
6233 					      &param_name,
6234 					      &param_str_val, &param_num_val);
6235 
6236 		if (param_str_val)
6237 			results_offset +=
6238 				sprintf(qed_get_buf_ptr(results_buf,
6239 							results_offset),
6240 					"%s: %s\n", param_name, param_str_val);
6241 		else if (strcmp(param_name, "fw-timestamp"))
6242 			results_offset +=
6243 				sprintf(qed_get_buf_ptr(results_buf,
6244 							results_offset),
6245 					"%s: %d\n", param_name, param_num_val);
6246 	}
6247 
6248 	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6249 				  "\n");
6250 
6251 	*num_chars_printed = results_offset;
6252 
6253 	return dump_offset;
6254 }
6255 
6256 /* Parses the idle check rules and returns the number of characters printed.
6257  * In case of parsing error, returns 0.
6258  */
6259 static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
6260 					 u32 *dump_buf_end,
6261 					 u32 num_rules,
6262 					 bool print_fw_idle_chk,
6263 					 char *results_buf,
6264 					 u32 *num_errors, u32 *num_warnings)
6265 {
6266 	/* Offset in results_buf in bytes */
6267 	u32 results_offset = 0;
6268 
6269 	u32 rule_idx;
6270 	u16 i, j;
6271 
6272 	*num_errors = 0;
6273 	*num_warnings = 0;
6274 
6275 	/* Go over dumped results */
6276 	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6277 	     rule_idx++) {
6278 		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6279 		struct dbg_idle_chk_result_hdr *hdr;
6280 		const char *parsing_str, *lsi_msg;
6281 		u32 parsing_str_offset;
6282 		bool has_fw_msg;
6283 		u8 curr_reg_id;
6284 
6285 		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6286 		rule_parsing_data =
6287 			(const struct dbg_idle_chk_rule_parsing_data *)
6288 			&s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
6289 			ptr[hdr->rule_id];
6290 		parsing_str_offset =
6291 			GET_FIELD(rule_parsing_data->data,
6292 				  DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6293 		has_fw_msg =
6294 			GET_FIELD(rule_parsing_data->data,
6295 				DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6296 		parsing_str =
6297 			&((const char *)
6298 			s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
6299 			[parsing_str_offset];
6300 		lsi_msg = parsing_str;
6301 		curr_reg_id = 0;
6302 
6303 		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6304 			return 0;
6305 
6306 		/* Skip rule header */
6307 		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6308 
6309 		/* Update errors/warnings count */
6310 		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6311 		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6312 			(*num_errors)++;
6313 		else
6314 			(*num_warnings)++;
6315 
6316 		/* Print rule severity */
6317 		results_offset +=
6318 		    sprintf(qed_get_buf_ptr(results_buf,
6319 					    results_offset), "%s: ",
6320 			    s_idle_chk_severity_str[hdr->severity]);
6321 
6322 		/* Print rule message */
6323 		if (has_fw_msg)
6324 			parsing_str += strlen(parsing_str) + 1;
6325 		results_offset +=
6326 		    sprintf(qed_get_buf_ptr(results_buf,
6327 					    results_offset), "%s.",
6328 			    has_fw_msg &&
6329 			    print_fw_idle_chk ? parsing_str : lsi_msg);
6330 		parsing_str += strlen(parsing_str) + 1;
6331 
6332 		/* Print register values */
6333 		results_offset +=
6334 		    sprintf(qed_get_buf_ptr(results_buf,
6335 					    results_offset), " Registers:");
6336 		for (i = 0;
6337 		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6338 		     i++) {
6339 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6340 			bool is_mem;
6341 			u8 reg_id;
6342 
6343 			reg_hdr =
6344 				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6345 			is_mem = GET_FIELD(reg_hdr->data,
6346 					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6347 			reg_id = GET_FIELD(reg_hdr->data,
6348 					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6349 
6350 			/* Skip reg header */
6351 			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6352 
6353 			/* Skip register names until the required reg_id is
6354 			 * reached.
6355 			 */
6356 			for (; reg_id > curr_reg_id;
6357 			     curr_reg_id++,
6358 			     parsing_str += strlen(parsing_str) + 1);
6359 
6360 			results_offset +=
6361 			    sprintf(qed_get_buf_ptr(results_buf,
6362 						    results_offset), " %s",
6363 				    parsing_str);
6364 			if (i < hdr->num_dumped_cond_regs && is_mem)
6365 				results_offset +=
6366 				    sprintf(qed_get_buf_ptr(results_buf,
6367 							    results_offset),
6368 					    "[%d]", hdr->mem_entry_id +
6369 					    reg_hdr->start_entry);
6370 			results_offset +=
6371 			    sprintf(qed_get_buf_ptr(results_buf,
6372 						    results_offset), "=");
6373 			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6374 				results_offset +=
6375 				    sprintf(qed_get_buf_ptr(results_buf,
6376 							    results_offset),
6377 					    "0x%x", *dump_buf);
6378 				if (j < reg_hdr->size - 1)
6379 					results_offset +=
6380 					    sprintf(qed_get_buf_ptr
6381 						    (results_buf,
6382 						     results_offset), ",");
6383 			}
6384 		}
6385 
6386 		results_offset +=
6387 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6388 	}
6389 
6390 	/* Check if end of dump buffer was exceeded */
6391 	if (dump_buf > dump_buf_end)
6392 		return 0;
6393 
6394 	return results_offset;
6395 }
6396 
6397 /* Parses an idle check dump buffer.
6398  * If result_buf is not NULL, the idle check results are printed to it.
6399  * In any case, the required results buffer size is assigned to
6400  * parsed_results_bytes.
6401  * The parsing status is returned.
6402  */
6403 static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
6404 					       u32 num_dumped_dwords,
6405 					       char *results_buf,
6406 					       u32 *parsed_results_bytes,
6407 					       u32 *num_errors,
6408 					       u32 *num_warnings)
6409 {
6410 	const char *section_name, *param_name, *param_str_val;
6411 	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6412 	u32 num_section_params = 0, num_rules;
6413 
6414 	/* Offset in results_buf in bytes */
6415 	u32 results_offset = 0;
6416 
6417 	*parsed_results_bytes = 0;
6418 	*num_errors = 0;
6419 	*num_warnings = 0;
6420 
6421 	if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6422 	    !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6423 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6424 
6425 	/* Read global_params section */
6426 	dump_buf += qed_read_section_hdr(dump_buf,
6427 					 &section_name, &num_section_params);
6428 	if (strcmp(section_name, "global_params"))
6429 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6430 
6431 	/* Print global params */
6432 	dump_buf += qed_print_section_params(dump_buf,
6433 					     num_section_params,
6434 					     results_buf, &results_offset);
6435 
6436 	/* Read idle_chk section */
6437 	dump_buf += qed_read_section_hdr(dump_buf,
6438 					 &section_name, &num_section_params);
6439 	if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6440 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6441 	dump_buf += qed_read_param(dump_buf,
6442 				   &param_name, &param_str_val, &num_rules);
6443 	if (strcmp(param_name, "num_rules"))
6444 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6445 
6446 	if (num_rules) {
6447 		u32 rules_print_size;
6448 
6449 		/* Print FW output */
6450 		results_offset +=
6451 		    sprintf(qed_get_buf_ptr(results_buf,
6452 					    results_offset),
6453 			    "FW_IDLE_CHECK:\n");
6454 		rules_print_size =
6455 			qed_parse_idle_chk_dump_rules(dump_buf,
6456 						      dump_buf_end,
6457 						      num_rules,
6458 						      true,
6459 						      results_buf ?
6460 						      results_buf +
6461 						      results_offset :
6462 						      NULL,
6463 						      num_errors,
6464 						      num_warnings);
6465 		results_offset += rules_print_size;
6466 		if (!rules_print_size)
6467 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6468 
6469 		/* Print LSI output */
6470 		results_offset +=
6471 		    sprintf(qed_get_buf_ptr(results_buf,
6472 					    results_offset),
6473 			    "\nLSI_IDLE_CHECK:\n");
6474 		rules_print_size =
6475 			qed_parse_idle_chk_dump_rules(dump_buf,
6476 						      dump_buf_end,
6477 						      num_rules,
6478 						      false,
6479 						      results_buf ?
6480 						      results_buf +
6481 						      results_offset :
6482 						      NULL,
6483 						      num_errors,
6484 						      num_warnings);
6485 		results_offset += rules_print_size;
6486 		if (!rules_print_size)
6487 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6488 	}
6489 
6490 	/* Print errors/warnings count */
6491 	if (*num_errors)
6492 		results_offset +=
6493 		    sprintf(qed_get_buf_ptr(results_buf,
6494 					    results_offset),
6495 			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6496 			    *num_errors, *num_warnings);
6497 	else if (*num_warnings)
6498 		results_offset +=
6499 		    sprintf(qed_get_buf_ptr(results_buf,
6500 					    results_offset),
6501 			    "\nIdle Check completed successfully (with %d warnings)\n",
6502 			    *num_warnings);
6503 	else
6504 		results_offset +=
6505 		    sprintf(qed_get_buf_ptr(results_buf,
6506 					    results_offset),
6507 			    "\nIdle Check completed successfully\n");
6508 
6509 	/* Add 1 for string NULL termination */
6510 	*parsed_results_bytes = results_offset + 1;
6511 
6512 	return DBG_STATUS_OK;
6513 }
6514 
6515 /* Frees the specified MCP Trace meta data */
6516 static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
6517 				    struct mcp_trace_meta *meta)
6518 {
6519 	u32 i;
6520 
6521 	s_mcp_trace_meta_valid = false;
6522 
6523 	/* Release modules */
6524 	if (meta->modules) {
6525 		for (i = 0; i < meta->modules_num; i++)
6526 			kfree(meta->modules[i]);
6527 		kfree(meta->modules);
6528 	}
6529 
6530 	/* Release formats */
6531 	if (meta->formats) {
6532 		for (i = 0; i < meta->formats_num; i++)
6533 			kfree(meta->formats[i].format_str);
6534 		kfree(meta->formats);
6535 	}
6536 }
6537 
6538 /* Allocates and fills MCP Trace meta data based on the specified meta data
6539  * dump buffer.
6540  * Returns debug status code.
6541  */
6542 static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
6543 						const u32 *meta_buf,
6544 						struct mcp_trace_meta *meta)
6545 {
6546 	u8 *meta_buf_bytes = (u8 *)meta_buf;
6547 	u32 offset = 0, signature, i;
6548 
6549 	/* Free the previous meta before loading a new one. */
6550 	if (s_mcp_trace_meta_valid)
6551 		qed_mcp_trace_free_meta(p_hwfn, meta);
6552 
6553 	memset(meta, 0, sizeof(*meta));
6554 
6555 	/* Read first signature */
6556 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6557 	if (signature != NVM_MAGIC_VALUE)
6558 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6559 
6560 	/* Read no. of modules and allocate memory for their pointers */
6561 	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6562 	meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
6563 	if (!meta->modules)
6564 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6565 
6566 	/* Allocate and read all module strings */
6567 	for (i = 0; i < meta->modules_num; i++) {
6568 		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6569 
6570 		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6571 		if (!(*(meta->modules + i))) {
6572 			/* Update number of modules to be released */
6573 			meta->modules_num = i ? i - 1 : 0;
6574 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6575 		}
6576 
6577 		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6578 				      *(meta->modules + i));
6579 		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6580 			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6581 	}
6582 
6583 	/* Read second signature */
6584 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6585 	if (signature != NVM_MAGIC_VALUE)
6586 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6587 
6588 	/* Read number of formats and allocate memory for all formats */
6589 	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6590 	meta->formats = kzalloc(meta->formats_num *
6591 				sizeof(struct mcp_trace_format),
6592 				GFP_KERNEL);
6593 	if (!meta->formats)
6594 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6595 
6596 	/* Allocate and read all strings */
6597 	for (i = 0; i < meta->formats_num; i++) {
6598 		struct mcp_trace_format *format_ptr = &meta->formats[i];
6599 		u8 format_len;
6600 
6601 		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6602 							   &offset);
6603 		format_len =
6604 		    (format_ptr->data &
6605 		     MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
6606 		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6607 		if (!format_ptr->format_str) {
6608 			/* Update number of modules to be released */
6609 			meta->formats_num = i ? i - 1 : 0;
6610 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6611 		}
6612 
6613 		qed_read_str_from_buf(meta_buf_bytes,
6614 				      &offset,
6615 				      format_len, format_ptr->format_str);
6616 	}
6617 
6618 	s_mcp_trace_meta_valid = true;
6619 	return DBG_STATUS_OK;
6620 }
6621 
6622 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6623  * are printed to it. The parsing status is returned.
6624  * Arguments:
6625  * trace_buf - MCP trace cyclic buffer
6626  * trace_buf_size - MCP trace cyclic buffer size in bytes
6627  * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6628  *               buffer.
6629  * data_size - size in bytes of data to parse.
6630  * parsed_buf - destination buffer for parsed data.
6631  * parsed_bytes - size of parsed data in bytes.
6632  */
6633 static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
6634 					       u32 trace_buf_size,
6635 					       u32 data_offset,
6636 					       u32 data_size,
6637 					       char *parsed_buf,
6638 					       u32 *parsed_bytes)
6639 {
6640 	u32 param_mask, param_shift;
6641 	enum dbg_status status;
6642 
6643 	*parsed_bytes = 0;
6644 
6645 	if (!s_mcp_trace_meta_valid)
6646 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6647 
6648 	status = DBG_STATUS_OK;
6649 
6650 	while (data_size) {
6651 		struct mcp_trace_format *format_ptr;
6652 		u8 format_level, format_module;
6653 		u32 params[3] = { 0, 0, 0 };
6654 		u32 header, format_idx, i;
6655 
6656 		if (data_size < MFW_TRACE_ENTRY_SIZE)
6657 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6658 
6659 		header = qed_read_from_cyclic_buf(trace_buf,
6660 						  &data_offset,
6661 						  trace_buf_size,
6662 						  MFW_TRACE_ENTRY_SIZE);
6663 		data_size -= MFW_TRACE_ENTRY_SIZE;
6664 		format_idx = header & MFW_TRACE_EVENTID_MASK;
6665 
6666 		/* Skip message if its index doesn't exist in the meta data */
6667 		if (format_idx > s_mcp_trace_meta.formats_num) {
6668 			u8 format_size =
6669 				(u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
6670 				     MFW_TRACE_PRM_SIZE_SHIFT);
6671 
6672 			if (data_size < format_size)
6673 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6674 
6675 			data_offset = qed_cyclic_add(data_offset,
6676 						     format_size,
6677 						     trace_buf_size);
6678 			data_size -= format_size;
6679 			continue;
6680 		}
6681 
6682 		format_ptr = &s_mcp_trace_meta.formats[format_idx];
6683 
6684 		for (i = 0,
6685 		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
6686 		     param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
6687 		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6688 		     i++,
6689 		     param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6690 		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6691 			/* Extract param size (0..3) */
6692 			u8 param_size = (u8)((format_ptr->data & param_mask) >>
6693 					     param_shift);
6694 
6695 			/* If the param size is zero, there are no other
6696 			 * parameters.
6697 			 */
6698 			if (!param_size)
6699 				break;
6700 
6701 			/* Size is encoded using 2 bits, where 3 is used to
6702 			 * encode 4.
6703 			 */
6704 			if (param_size == 3)
6705 				param_size = 4;
6706 
6707 			if (data_size < param_size)
6708 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6709 
6710 			params[i] = qed_read_from_cyclic_buf(trace_buf,
6711 							     &data_offset,
6712 							     trace_buf_size,
6713 							     param_size);
6714 			data_size -= param_size;
6715 		}
6716 
6717 		format_level = (u8)((format_ptr->data &
6718 				     MCP_TRACE_FORMAT_LEVEL_MASK) >>
6719 				    MCP_TRACE_FORMAT_LEVEL_SHIFT);
6720 		format_module = (u8)((format_ptr->data &
6721 				      MCP_TRACE_FORMAT_MODULE_MASK) >>
6722 				     MCP_TRACE_FORMAT_MODULE_SHIFT);
6723 		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6724 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6725 
6726 		/* Print current message to results buffer */
6727 		*parsed_bytes +=
6728 			sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes),
6729 				"%s %-8s: ",
6730 				s_mcp_trace_level_str[format_level],
6731 				s_mcp_trace_meta.modules[format_module]);
6732 		*parsed_bytes +=
6733 		    sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes),
6734 			    format_ptr->format_str,
6735 			    params[0], params[1], params[2]);
6736 	}
6737 
6738 	/* Add string NULL terminator */
6739 	(*parsed_bytes)++;
6740 
6741 	return status;
6742 }
6743 
6744 /* Parses an MCP Trace dump buffer.
6745  * If result_buf is not NULL, the MCP Trace results are printed to it.
6746  * In any case, the required results buffer size is assigned to
6747  * parsed_bytes.
6748  * The parsing status is returned.
6749  */
6750 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6751 						u32 *dump_buf,
6752 						char *parsed_buf,
6753 						u32 *parsed_bytes)
6754 {
6755 	const char *section_name, *param_name, *param_str_val;
6756 	u32 data_size, trace_data_dwords, trace_meta_dwords;
6757 	u32 offset, results_offset, parsed_buf_bytes;
6758 	u32 param_num_val, num_section_params;
6759 	struct mcp_trace *trace;
6760 	enum dbg_status status;
6761 	const u32 *meta_buf;
6762 	u8 *trace_buf;
6763 
6764 	*parsed_bytes = 0;
6765 
6766 	/* Read global_params section */
6767 	dump_buf += qed_read_section_hdr(dump_buf,
6768 					 &section_name, &num_section_params);
6769 	if (strcmp(section_name, "global_params"))
6770 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6771 
6772 	/* Print global params */
6773 	dump_buf += qed_print_section_params(dump_buf,
6774 					     num_section_params,
6775 					     parsed_buf, &results_offset);
6776 
6777 	/* Read trace_data section */
6778 	dump_buf += qed_read_section_hdr(dump_buf,
6779 					 &section_name, &num_section_params);
6780 	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6781 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6782 	dump_buf += qed_read_param(dump_buf,
6783 				   &param_name, &param_str_val, &param_num_val);
6784 	if (strcmp(param_name, "size"))
6785 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6786 	trace_data_dwords = param_num_val;
6787 
6788 	/* Prepare trace info */
6789 	trace = (struct mcp_trace *)dump_buf;
6790 	trace_buf = (u8 *)dump_buf + sizeof(*trace);
6791 	offset = trace->trace_oldest;
6792 	data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
6793 	dump_buf += trace_data_dwords;
6794 
6795 	/* Read meta_data section */
6796 	dump_buf += qed_read_section_hdr(dump_buf,
6797 					 &section_name, &num_section_params);
6798 	if (strcmp(section_name, "mcp_trace_meta"))
6799 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6800 	dump_buf += qed_read_param(dump_buf,
6801 				   &param_name, &param_str_val, &param_num_val);
6802 	if (strcmp(param_name, "size"))
6803 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6804 	trace_meta_dwords = param_num_val;
6805 
6806 	/* Choose meta data buffer */
6807 	if (!trace_meta_dwords) {
6808 		/* Dump doesn't include meta data */
6809 		if (!s_mcp_trace_meta_arr.ptr)
6810 			return DBG_STATUS_MCP_TRACE_NO_META;
6811 		meta_buf = s_mcp_trace_meta_arr.ptr;
6812 	} else {
6813 		/* Dump includes meta data */
6814 		meta_buf = dump_buf;
6815 	}
6816 
6817 	/* Allocate meta data memory */
6818 	status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &s_mcp_trace_meta);
6819 	if (status != DBG_STATUS_OK)
6820 		return status;
6821 
6822 	status = qed_parse_mcp_trace_buf(trace_buf,
6823 					 trace->size,
6824 					 offset,
6825 					 data_size,
6826 					 parsed_buf ?
6827 					 parsed_buf + results_offset :
6828 					 NULL,
6829 					 &parsed_buf_bytes);
6830 	if (status != DBG_STATUS_OK)
6831 		return status;
6832 
6833 	*parsed_bytes = results_offset + parsed_buf_bytes;
6834 
6835 	return DBG_STATUS_OK;
6836 }
6837 
6838 /* Parses a Reg FIFO dump buffer.
6839  * If result_buf is not NULL, the Reg FIFO results are printed to it.
6840  * In any case, the required results buffer size is assigned to
6841  * parsed_results_bytes.
6842  * The parsing status is returned.
6843  */
6844 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6845 					       char *results_buf,
6846 					       u32 *parsed_results_bytes)
6847 {
6848 	const char *section_name, *param_name, *param_str_val;
6849 	u32 param_num_val, num_section_params, num_elements;
6850 	struct reg_fifo_element *elements;
6851 	u8 i, j, err_val, vf_val;
6852 	u32 results_offset = 0;
6853 	char vf_str[4];
6854 
6855 	/* Read global_params section */
6856 	dump_buf += qed_read_section_hdr(dump_buf,
6857 					 &section_name, &num_section_params);
6858 	if (strcmp(section_name, "global_params"))
6859 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6860 
6861 	/* Print global params */
6862 	dump_buf += qed_print_section_params(dump_buf,
6863 					     num_section_params,
6864 					     results_buf, &results_offset);
6865 
6866 	/* Read reg_fifo_data section */
6867 	dump_buf += qed_read_section_hdr(dump_buf,
6868 					 &section_name, &num_section_params);
6869 	if (strcmp(section_name, "reg_fifo_data"))
6870 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6871 	dump_buf += qed_read_param(dump_buf,
6872 				   &param_name, &param_str_val, &param_num_val);
6873 	if (strcmp(param_name, "size"))
6874 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6875 	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6876 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6877 	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6878 	elements = (struct reg_fifo_element *)dump_buf;
6879 
6880 	/* Decode elements */
6881 	for (i = 0; i < num_elements; i++) {
6882 		bool err_printed = false;
6883 
6884 		/* Discover if element belongs to a VF or a PF */
6885 		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6886 		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6887 			sprintf(vf_str, "%s", "N/A");
6888 		else
6889 			sprintf(vf_str, "%d", vf_val);
6890 
6891 		/* Add parsed element to parsed buffer */
6892 		results_offset +=
6893 		    sprintf(qed_get_buf_ptr(results_buf,
6894 					    results_offset),
6895 			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
6896 			    elements[i].data,
6897 			    (u32)GET_FIELD(elements[i].data,
6898 					   REG_FIFO_ELEMENT_ADDRESS) *
6899 			    REG_FIFO_ELEMENT_ADDR_FACTOR,
6900 			    s_access_strs[GET_FIELD(elements[i].data,
6901 						    REG_FIFO_ELEMENT_ACCESS)],
6902 			    (u32)GET_FIELD(elements[i].data,
6903 					   REG_FIFO_ELEMENT_PF),
6904 			    vf_str,
6905 			    (u32)GET_FIELD(elements[i].data,
6906 					   REG_FIFO_ELEMENT_PORT),
6907 			    s_privilege_strs[GET_FIELD(elements[i].data,
6908 						REG_FIFO_ELEMENT_PRIVILEGE)],
6909 			    s_protection_strs[GET_FIELD(elements[i].data,
6910 						REG_FIFO_ELEMENT_PROTECTION)],
6911 			    s_master_strs[GET_FIELD(elements[i].data,
6912 						REG_FIFO_ELEMENT_MASTER)]);
6913 
6914 		/* Print errors */
6915 		for (j = 0,
6916 		     err_val = GET_FIELD(elements[i].data,
6917 					 REG_FIFO_ELEMENT_ERROR);
6918 		     j < ARRAY_SIZE(s_reg_fifo_error_strs);
6919 		     j++, err_val >>= 1) {
6920 			if (err_val & 0x1) {
6921 				if (err_printed)
6922 					results_offset +=
6923 					    sprintf(qed_get_buf_ptr
6924 						    (results_buf,
6925 						     results_offset), ", ");
6926 				results_offset +=
6927 				    sprintf(qed_get_buf_ptr
6928 					    (results_buf, results_offset), "%s",
6929 					    s_reg_fifo_error_strs[j]);
6930 				err_printed = true;
6931 			}
6932 		}
6933 
6934 		results_offset +=
6935 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6936 	}
6937 
6938 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6939 						  results_offset),
6940 				  "fifo contained %d elements", num_elements);
6941 
6942 	/* Add 1 for string NULL termination */
6943 	*parsed_results_bytes = results_offset + 1;
6944 
6945 	return DBG_STATUS_OK;
6946 }
6947 
6948 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6949 						  *element, char
6950 						  *results_buf,
6951 						  u32 *results_offset)
6952 {
6953 	const struct igu_fifo_addr_data *found_addr = NULL;
6954 	u8 source, err_type, i, is_cleanup;
6955 	char parsed_addr_data[32];
6956 	char parsed_wr_data[256];
6957 	u32 wr_data, prod_cons;
6958 	bool is_wr_cmd, is_pf;
6959 	u16 cmd_addr;
6960 	u64 dword12;
6961 
6962 	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
6963 	 * FIFO element.
6964 	 */
6965 	dword12 = ((u64)element->dword2 << 32) | element->dword1;
6966 	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6967 	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6968 	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6969 	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6970 	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6971 
6972 	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6973 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6974 	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6975 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6976 
6977 	/* Find address data */
6978 	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
6979 		const struct igu_fifo_addr_data *curr_addr =
6980 			&s_igu_fifo_addr_data[i];
6981 
6982 		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
6983 		    curr_addr->end_addr)
6984 			found_addr = curr_addr;
6985 	}
6986 
6987 	if (!found_addr)
6988 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6989 
6990 	/* Prepare parsed address data */
6991 	switch (found_addr->type) {
6992 	case IGU_ADDR_TYPE_MSIX_MEM:
6993 		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
6994 		break;
6995 	case IGU_ADDR_TYPE_WRITE_INT_ACK:
6996 	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6997 		sprintf(parsed_addr_data,
6998 			" SB = 0x%x", cmd_addr - found_addr->start_addr);
6999 		break;
7000 	default:
7001 		parsed_addr_data[0] = '\0';
7002 	}
7003 
7004 	if (!is_wr_cmd) {
7005 		parsed_wr_data[0] = '\0';
7006 		goto out;
7007 	}
7008 
7009 	/* Prepare parsed write data */
7010 	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
7011 	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
7012 	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
7013 
7014 	if (source == IGU_SRC_ATTN) {
7015 		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
7016 	} else {
7017 		if (is_cleanup) {
7018 			u8 cleanup_val, cleanup_type;
7019 
7020 			cleanup_val =
7021 				GET_FIELD(wr_data,
7022 					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
7023 			cleanup_type =
7024 			    GET_FIELD(wr_data,
7025 				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
7026 
7027 			sprintf(parsed_wr_data,
7028 				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
7029 				cleanup_val ? "set" : "clear",
7030 				cleanup_type);
7031 		} else {
7032 			u8 update_flag, en_dis_int_for_sb, segment;
7033 			u8 timer_mask;
7034 
7035 			update_flag = GET_FIELD(wr_data,
7036 						IGU_FIFO_WR_DATA_UPDATE_FLAG);
7037 			en_dis_int_for_sb =
7038 				GET_FIELD(wr_data,
7039 					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
7040 			segment = GET_FIELD(wr_data,
7041 					    IGU_FIFO_WR_DATA_SEGMENT);
7042 			timer_mask = GET_FIELD(wr_data,
7043 					       IGU_FIFO_WR_DATA_TIMER_MASK);
7044 
7045 			sprintf(parsed_wr_data,
7046 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
7047 				prod_cons,
7048 				update_flag ? "update" : "nop",
7049 				en_dis_int_for_sb ?
7050 				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
7051 				"enable",
7052 				segment ? "attn" : "regular",
7053 				timer_mask);
7054 		}
7055 	}
7056 out:
7057 	/* Add parsed element to parsed buffer */
7058 	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
7059 						   *results_offset),
7060 				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7061 				   element->dword2, element->dword1,
7062 				   element->dword0,
7063 				   is_pf ? "pf" : "vf",
7064 				   GET_FIELD(element->dword0,
7065 					     IGU_FIFO_ELEMENT_DWORD0_FID),
7066 				   s_igu_fifo_source_strs[source],
7067 				   is_wr_cmd ? "wr" : "rd",
7068 				   cmd_addr,
7069 				   (!is_pf && found_addr->vf_desc)
7070 				   ? found_addr->vf_desc
7071 				   : found_addr->desc,
7072 				   parsed_addr_data,
7073 				   parsed_wr_data,
7074 				   s_igu_fifo_error_strs[err_type]);
7075 
7076 	return DBG_STATUS_OK;
7077 }
7078 
7079 /* Parses an IGU FIFO dump buffer.
7080  * If result_buf is not NULL, the IGU FIFO results are printed to it.
7081  * In any case, the required results buffer size is assigned to
7082  * parsed_results_bytes.
7083  * The parsing status is returned.
7084  */
7085 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7086 					       char *results_buf,
7087 					       u32 *parsed_results_bytes)
7088 {
7089 	const char *section_name, *param_name, *param_str_val;
7090 	u32 param_num_val, num_section_params, num_elements;
7091 	struct igu_fifo_element *elements;
7092 	enum dbg_status status;
7093 	u32 results_offset = 0;
7094 	u8 i;
7095 
7096 	/* Read global_params section */
7097 	dump_buf += qed_read_section_hdr(dump_buf,
7098 					 &section_name, &num_section_params);
7099 	if (strcmp(section_name, "global_params"))
7100 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7101 
7102 	/* Print global params */
7103 	dump_buf += qed_print_section_params(dump_buf,
7104 					     num_section_params,
7105 					     results_buf, &results_offset);
7106 
7107 	/* Read igu_fifo_data section */
7108 	dump_buf += qed_read_section_hdr(dump_buf,
7109 					 &section_name, &num_section_params);
7110 	if (strcmp(section_name, "igu_fifo_data"))
7111 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7112 	dump_buf += qed_read_param(dump_buf,
7113 				   &param_name, &param_str_val, &param_num_val);
7114 	if (strcmp(param_name, "size"))
7115 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7116 	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7117 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7118 	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7119 	elements = (struct igu_fifo_element *)dump_buf;
7120 
7121 	/* Decode elements */
7122 	for (i = 0; i < num_elements; i++) {
7123 		status = qed_parse_igu_fifo_element(&elements[i],
7124 						    results_buf,
7125 						    &results_offset);
7126 		if (status != DBG_STATUS_OK)
7127 			return status;
7128 	}
7129 
7130 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7131 						  results_offset),
7132 				  "fifo contained %d elements", num_elements);
7133 
7134 	/* Add 1 for string NULL termination */
7135 	*parsed_results_bytes = results_offset + 1;
7136 
7137 	return DBG_STATUS_OK;
7138 }
7139 
7140 static enum dbg_status
7141 qed_parse_protection_override_dump(u32 *dump_buf,
7142 				   char *results_buf,
7143 				   u32 *parsed_results_bytes)
7144 {
7145 	const char *section_name, *param_name, *param_str_val;
7146 	u32 param_num_val, num_section_params, num_elements;
7147 	struct protection_override_element *elements;
7148 	u32 results_offset = 0;
7149 	u8 i;
7150 
7151 	/* Read global_params section */
7152 	dump_buf += qed_read_section_hdr(dump_buf,
7153 					 &section_name, &num_section_params);
7154 	if (strcmp(section_name, "global_params"))
7155 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7156 
7157 	/* Print global params */
7158 	dump_buf += qed_print_section_params(dump_buf,
7159 					     num_section_params,
7160 					     results_buf, &results_offset);
7161 
7162 	/* Read protection_override_data section */
7163 	dump_buf += qed_read_section_hdr(dump_buf,
7164 					 &section_name, &num_section_params);
7165 	if (strcmp(section_name, "protection_override_data"))
7166 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7167 	dump_buf += qed_read_param(dump_buf,
7168 				   &param_name, &param_str_val, &param_num_val);
7169 	if (strcmp(param_name, "size"))
7170 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7171 	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7172 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7173 	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7174 	elements = (struct protection_override_element *)dump_buf;
7175 
7176 	/* Decode elements */
7177 	for (i = 0; i < num_elements; i++) {
7178 		u32 address = GET_FIELD(elements[i].data,
7179 					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7180 			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7181 
7182 		results_offset +=
7183 		    sprintf(qed_get_buf_ptr(results_buf,
7184 					    results_offset),
7185 			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7186 			    i, address,
7187 			    (u32)GET_FIELD(elements[i].data,
7188 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7189 			    (u32)GET_FIELD(elements[i].data,
7190 				      PROTECTION_OVERRIDE_ELEMENT_READ),
7191 			    (u32)GET_FIELD(elements[i].data,
7192 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
7193 			    s_protection_strs[GET_FIELD(elements[i].data,
7194 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7195 			    s_protection_strs[GET_FIELD(elements[i].data,
7196 				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7197 	}
7198 
7199 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7200 						  results_offset),
7201 				  "protection override contained %d elements",
7202 				  num_elements);
7203 
7204 	/* Add 1 for string NULL termination */
7205 	*parsed_results_bytes = results_offset + 1;
7206 
7207 	return DBG_STATUS_OK;
7208 }
7209 
7210 /* Parses a FW Asserts dump buffer.
7211  * If result_buf is not NULL, the FW Asserts results are printed to it.
7212  * In any case, the required results buffer size is assigned to
7213  * parsed_results_bytes.
7214  * The parsing status is returned.
7215  */
7216 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7217 						 char *results_buf,
7218 						 u32 *parsed_results_bytes)
7219 {
7220 	u32 num_section_params, param_num_val, i, results_offset = 0;
7221 	const char *param_name, *param_str_val, *section_name;
7222 	bool last_section_found = false;
7223 
7224 	*parsed_results_bytes = 0;
7225 
7226 	/* Read global_params section */
7227 	dump_buf += qed_read_section_hdr(dump_buf,
7228 					 &section_name, &num_section_params);
7229 	if (strcmp(section_name, "global_params"))
7230 		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7231 
7232 	/* Print global params */
7233 	dump_buf += qed_print_section_params(dump_buf,
7234 					     num_section_params,
7235 					     results_buf, &results_offset);
7236 
7237 	while (!last_section_found) {
7238 		dump_buf += qed_read_section_hdr(dump_buf,
7239 						 &section_name,
7240 						 &num_section_params);
7241 		if (!strcmp(section_name, "fw_asserts")) {
7242 			/* Extract params */
7243 			const char *storm_letter = NULL;
7244 			u32 storm_dump_size = 0;
7245 
7246 			for (i = 0; i < num_section_params; i++) {
7247 				dump_buf += qed_read_param(dump_buf,
7248 							   &param_name,
7249 							   &param_str_val,
7250 							   &param_num_val);
7251 				if (!strcmp(param_name, "storm"))
7252 					storm_letter = param_str_val;
7253 				else if (!strcmp(param_name, "size"))
7254 					storm_dump_size = param_num_val;
7255 				else
7256 					return
7257 					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7258 			}
7259 
7260 			if (!storm_letter || !storm_dump_size)
7261 				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7262 
7263 			/* Print data */
7264 			results_offset +=
7265 			    sprintf(qed_get_buf_ptr(results_buf,
7266 						    results_offset),
7267 				    "\n%sSTORM_ASSERT: size=%d\n",
7268 				    storm_letter, storm_dump_size);
7269 			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7270 				results_offset +=
7271 				    sprintf(qed_get_buf_ptr(results_buf,
7272 							    results_offset),
7273 					    "%08x\n", *dump_buf);
7274 		} else if (!strcmp(section_name, "last")) {
7275 			last_section_found = true;
7276 		} else {
7277 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7278 		}
7279 	}
7280 
7281 	/* Add 1 for string NULL termination */
7282 	*parsed_results_bytes = results_offset + 1;
7283 
7284 	return DBG_STATUS_OK;
7285 }
7286 
7287 /***************************** Public Functions *******************************/
7288 
7289 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
7290 {
7291 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
7292 	u8 buf_id;
7293 
7294 	/* Convert binary data to debug arrays */
7295 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
7296 		s_user_dbg_arrays[buf_id].ptr =
7297 			(u32 *)(bin_ptr + buf_array[buf_id].offset);
7298 		s_user_dbg_arrays[buf_id].size_in_dwords =
7299 			BYTES_TO_DWORDS(buf_array[buf_id].length);
7300 	}
7301 
7302 	return DBG_STATUS_OK;
7303 }
7304 
7305 const char *qed_dbg_get_status_str(enum dbg_status status)
7306 {
7307 	return (status <
7308 		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7309 }
7310 
7311 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7312 						  u32 *dump_buf,
7313 						  u32 num_dumped_dwords,
7314 						  u32 *results_buf_size)
7315 {
7316 	u32 num_errors, num_warnings;
7317 
7318 	return qed_parse_idle_chk_dump(dump_buf,
7319 				       num_dumped_dwords,
7320 				       NULL,
7321 				       results_buf_size,
7322 				       &num_errors, &num_warnings);
7323 }
7324 
7325 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7326 					   u32 *dump_buf,
7327 					   u32 num_dumped_dwords,
7328 					   char *results_buf,
7329 					   u32 *num_errors,
7330 					   u32 *num_warnings)
7331 {
7332 	u32 parsed_buf_size;
7333 
7334 	return qed_parse_idle_chk_dump(dump_buf,
7335 				       num_dumped_dwords,
7336 				       results_buf,
7337 				       &parsed_buf_size,
7338 				       num_errors, num_warnings);
7339 }
7340 
7341 void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size)
7342 {
7343 	s_mcp_trace_meta_arr.ptr = data;
7344 	s_mcp_trace_meta_arr.size_in_dwords = size;
7345 }
7346 
7347 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7348 						   u32 *dump_buf,
7349 						   u32 num_dumped_dwords,
7350 						   u32 *results_buf_size)
7351 {
7352 	return qed_parse_mcp_trace_dump(p_hwfn,
7353 					dump_buf, NULL, results_buf_size);
7354 }
7355 
7356 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7357 					    u32 *dump_buf,
7358 					    u32 num_dumped_dwords,
7359 					    char *results_buf)
7360 {
7361 	u32 parsed_buf_size;
7362 
7363 	return qed_parse_mcp_trace_dump(p_hwfn,
7364 					dump_buf,
7365 					results_buf, &parsed_buf_size);
7366 }
7367 
7368 enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf,
7369 					 u32 num_dumped_bytes,
7370 					 char *results_buf)
7371 {
7372 	u32 parsed_bytes;
7373 
7374 	return qed_parse_mcp_trace_buf(dump_buf,
7375 				       num_dumped_bytes,
7376 				       0,
7377 				       num_dumped_bytes,
7378 				       results_buf, &parsed_bytes);
7379 }
7380 
7381 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7382 						  u32 *dump_buf,
7383 						  u32 num_dumped_dwords,
7384 						  u32 *results_buf_size)
7385 {
7386 	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7387 }
7388 
7389 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7390 					   u32 *dump_buf,
7391 					   u32 num_dumped_dwords,
7392 					   char *results_buf)
7393 {
7394 	u32 parsed_buf_size;
7395 
7396 	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7397 }
7398 
7399 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7400 						  u32 *dump_buf,
7401 						  u32 num_dumped_dwords,
7402 						  u32 *results_buf_size)
7403 {
7404 	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7405 }
7406 
7407 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7408 					   u32 *dump_buf,
7409 					   u32 num_dumped_dwords,
7410 					   char *results_buf)
7411 {
7412 	u32 parsed_buf_size;
7413 
7414 	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7415 }
7416 
7417 enum dbg_status
7418 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7419 					     u32 *dump_buf,
7420 					     u32 num_dumped_dwords,
7421 					     u32 *results_buf_size)
7422 {
7423 	return qed_parse_protection_override_dump(dump_buf,
7424 						  NULL, results_buf_size);
7425 }
7426 
7427 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7428 						      u32 *dump_buf,
7429 						      u32 num_dumped_dwords,
7430 						      char *results_buf)
7431 {
7432 	u32 parsed_buf_size;
7433 
7434 	return qed_parse_protection_override_dump(dump_buf,
7435 						  results_buf,
7436 						  &parsed_buf_size);
7437 }
7438 
7439 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7440 						    u32 *dump_buf,
7441 						    u32 num_dumped_dwords,
7442 						    u32 *results_buf_size)
7443 {
7444 	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7445 }
7446 
7447 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7448 					     u32 *dump_buf,
7449 					     u32 num_dumped_dwords,
7450 					     char *results_buf)
7451 {
7452 	u32 parsed_buf_size;
7453 
7454 	return qed_parse_fw_asserts_dump(dump_buf,
7455 					 results_buf, &parsed_buf_size);
7456 }
7457 
7458 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7459 				   struct dbg_attn_block_result *results)
7460 {
7461 	struct user_dbg_array *block_attn, *pstrings;
7462 	const u32 *block_attn_name_offsets;
7463 	enum dbg_attn_type attn_type;
7464 	const char *block_name;
7465 	u8 num_regs, i, j;
7466 
7467 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7468 	attn_type = (enum dbg_attn_type)
7469 		    GET_FIELD(results->data,
7470 			      DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7471 	block_name = s_block_info_arr[results->block_id].name;
7472 
7473 	if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7474 	    !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7475 	    !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7476 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7477 
7478 	block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
7479 	block_attn_name_offsets = &block_attn->ptr[results->names_offset];
7480 
7481 	/* Go over registers with a non-zero attention status */
7482 	for (i = 0; i < num_regs; i++) {
7483 		struct dbg_attn_bit_mapping *bit_mapping;
7484 		struct dbg_attn_reg_result *reg_result;
7485 		u8 num_reg_attn, bit_idx = 0;
7486 
7487 		reg_result = &results->reg_results[i];
7488 		num_reg_attn = GET_FIELD(reg_result->data,
7489 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7490 		block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
7491 		bit_mapping = &((struct dbg_attn_bit_mapping *)
7492 				block_attn->ptr)[reg_result->block_attn_offset];
7493 
7494 		pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
7495 
7496 		/* Go over attention status bits */
7497 		for (j = 0; j < num_reg_attn; j++) {
7498 			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7499 						     DBG_ATTN_BIT_MAPPING_VAL);
7500 			const char *attn_name, *attn_type_str, *masked_str;
7501 			u32 attn_name_offset, sts_addr;
7502 
7503 			/* Check if bit mask should be advanced (due to unused
7504 			 * bits).
7505 			 */
7506 			if (GET_FIELD(bit_mapping[j].data,
7507 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7508 				bit_idx += (u8)attn_idx_val;
7509 				continue;
7510 			}
7511 
7512 			/* Check current bit index */
7513 			if (!(reg_result->sts_val & BIT(bit_idx))) {
7514 				bit_idx++;
7515 				continue;
7516 			}
7517 
7518 			/* Find attention name */
7519 			attn_name_offset =
7520 				block_attn_name_offsets[attn_idx_val];
7521 			attn_name = &((const char *)
7522 				      pstrings->ptr)[attn_name_offset];
7523 			attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
7524 					"Interrupt" : "Parity";
7525 			masked_str = reg_result->mask_val & BIT(bit_idx) ?
7526 				     " [masked]" : "";
7527 			sts_addr = GET_FIELD(reg_result->data,
7528 					     DBG_ATTN_REG_RESULT_STS_ADDRESS);
7529 			DP_NOTICE(p_hwfn,
7530 				  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7531 				  block_name, attn_type_str, attn_name,
7532 				  sts_addr, bit_idx, masked_str);
7533 
7534 			bit_idx++;
7535 		}
7536 	}
7537 
7538 	return DBG_STATUS_OK;
7539 }
7540 
7541 /* Wrapper for unifying the idle_chk and mcp_trace api */
7542 static enum dbg_status
7543 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7544 				   u32 *dump_buf,
7545 				   u32 num_dumped_dwords,
7546 				   char *results_buf)
7547 {
7548 	u32 num_errors, num_warnnings;
7549 
7550 	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7551 					  results_buf, &num_errors,
7552 					  &num_warnnings);
7553 }
7554 
7555 /* Feature meta data lookup table */
7556 static struct {
7557 	char *name;
7558 	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7559 				    struct qed_ptt *p_ptt, u32 *size);
7560 	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7561 					struct qed_ptt *p_ptt, u32 *dump_buf,
7562 					u32 buf_size, u32 *dumped_dwords);
7563 	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7564 					 u32 *dump_buf, u32 num_dumped_dwords,
7565 					 char *results_buf);
7566 	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7567 					    u32 *dump_buf,
7568 					    u32 num_dumped_dwords,
7569 					    u32 *results_buf_size);
7570 } qed_features_lookup[] = {
7571 	{
7572 	"grc", qed_dbg_grc_get_dump_buf_size,
7573 		    qed_dbg_grc_dump, NULL, NULL}, {
7574 	"idle_chk",
7575 		    qed_dbg_idle_chk_get_dump_buf_size,
7576 		    qed_dbg_idle_chk_dump,
7577 		    qed_print_idle_chk_results_wrapper,
7578 		    qed_get_idle_chk_results_buf_size}, {
7579 	"mcp_trace",
7580 		    qed_dbg_mcp_trace_get_dump_buf_size,
7581 		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7582 		    qed_get_mcp_trace_results_buf_size}, {
7583 	"reg_fifo",
7584 		    qed_dbg_reg_fifo_get_dump_buf_size,
7585 		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7586 		    qed_get_reg_fifo_results_buf_size}, {
7587 	"igu_fifo",
7588 		    qed_dbg_igu_fifo_get_dump_buf_size,
7589 		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7590 		    qed_get_igu_fifo_results_buf_size}, {
7591 	"protection_override",
7592 		    qed_dbg_protection_override_get_dump_buf_size,
7593 		    qed_dbg_protection_override_dump,
7594 		    qed_print_protection_override_results,
7595 		    qed_get_protection_override_results_buf_size}, {
7596 	"fw_asserts",
7597 		    qed_dbg_fw_asserts_get_dump_buf_size,
7598 		    qed_dbg_fw_asserts_dump,
7599 		    qed_print_fw_asserts_results,
7600 		    qed_get_fw_asserts_results_buf_size},};
7601 
7602 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7603 {
7604 	u32 i, precision = 80;
7605 
7606 	if (!p_text_buf)
7607 		return;
7608 
7609 	pr_notice("\n%.*s", precision, p_text_buf);
7610 	for (i = precision; i < text_size; i += precision)
7611 		pr_cont("%.*s", precision, p_text_buf + i);
7612 	pr_cont("\n");
7613 }
7614 
7615 #define QED_RESULTS_BUF_MIN_SIZE 16
7616 /* Generic function for decoding debug feature info */
7617 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7618 				      enum qed_dbg_features feature_idx)
7619 {
7620 	struct qed_dbg_feature *feature =
7621 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7622 	u32 text_size_bytes, null_char_pos, i;
7623 	enum dbg_status rc;
7624 	char *text_buf;
7625 
7626 	/* Check if feature supports formatting capability */
7627 	if (!qed_features_lookup[feature_idx].results_buf_size)
7628 		return DBG_STATUS_OK;
7629 
7630 	/* Obtain size of formatted output */
7631 	rc = qed_features_lookup[feature_idx].
7632 		results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7633 				 feature->dumped_dwords, &text_size_bytes);
7634 	if (rc != DBG_STATUS_OK)
7635 		return rc;
7636 
7637 	/* Make sure that the allocated size is a multiple of dword (4 bytes) */
7638 	null_char_pos = text_size_bytes - 1;
7639 	text_size_bytes = (text_size_bytes + 3) & ~0x3;
7640 
7641 	if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7642 		DP_NOTICE(p_hwfn->cdev,
7643 			  "formatted size of feature was too small %d. Aborting\n",
7644 			  text_size_bytes);
7645 		return DBG_STATUS_INVALID_ARGS;
7646 	}
7647 
7648 	/* Allocate temp text buf */
7649 	text_buf = vzalloc(text_size_bytes);
7650 	if (!text_buf)
7651 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7652 
7653 	/* Decode feature opcodes to string on temp buf */
7654 	rc = qed_features_lookup[feature_idx].
7655 		print_results(p_hwfn, (u32 *)feature->dump_buf,
7656 			      feature->dumped_dwords, text_buf);
7657 	if (rc != DBG_STATUS_OK) {
7658 		vfree(text_buf);
7659 		return rc;
7660 	}
7661 
7662 	/* Replace the original null character with a '\n' character.
7663 	 * The bytes that were added as a result of the dword alignment are also
7664 	 * padded with '\n' characters.
7665 	 */
7666 	for (i = null_char_pos; i < text_size_bytes; i++)
7667 		text_buf[i] = '\n';
7668 
7669 	/* Dump printable feature to log */
7670 	if (p_hwfn->cdev->dbg_params.print_data)
7671 		qed_dbg_print_feature(text_buf, text_size_bytes);
7672 
7673 	/* Free the old dump_buf and point the dump_buf to the newly allocagted
7674 	 * and formatted text buffer.
7675 	 */
7676 	vfree(feature->dump_buf);
7677 	feature->dump_buf = text_buf;
7678 	feature->buf_size = text_size_bytes;
7679 	feature->dumped_dwords = text_size_bytes / 4;
7680 	return rc;
7681 }
7682 
7683 /* Generic function for performing the dump of a debug feature. */
7684 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7685 				    struct qed_ptt *p_ptt,
7686 				    enum qed_dbg_features feature_idx)
7687 {
7688 	struct qed_dbg_feature *feature =
7689 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7690 	u32 buf_size_dwords;
7691 	enum dbg_status rc;
7692 
7693 	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7694 		  qed_features_lookup[feature_idx].name);
7695 
7696 	/* Dump_buf was already allocated need to free (this can happen if dump
7697 	 * was called but file was never read).
7698 	 * We can't use the buffer as is since size may have changed.
7699 	 */
7700 	if (feature->dump_buf) {
7701 		vfree(feature->dump_buf);
7702 		feature->dump_buf = NULL;
7703 	}
7704 
7705 	/* Get buffer size from hsi, allocate accordingly, and perform the
7706 	 * dump.
7707 	 */
7708 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7709 						       &buf_size_dwords);
7710 	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7711 		return rc;
7712 	feature->buf_size = buf_size_dwords * sizeof(u32);
7713 	feature->dump_buf = vmalloc(feature->buf_size);
7714 	if (!feature->dump_buf)
7715 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7716 
7717 	rc = qed_features_lookup[feature_idx].
7718 		perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7719 			     feature->buf_size / sizeof(u32),
7720 			     &feature->dumped_dwords);
7721 
7722 	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7723 	 * In this case the buffer holds valid binary data, but we wont able
7724 	 * to parse it (since parsing relies on data in NVRAM which is only
7725 	 * accessible when MFW is responsive). skip the formatting but return
7726 	 * success so that binary data is provided.
7727 	 */
7728 	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7729 		return DBG_STATUS_OK;
7730 
7731 	if (rc != DBG_STATUS_OK)
7732 		return rc;
7733 
7734 	/* Format output */
7735 	rc = format_feature(p_hwfn, feature_idx);
7736 	return rc;
7737 }
7738 
7739 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7740 {
7741 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7742 }
7743 
7744 int qed_dbg_grc_size(struct qed_dev *cdev)
7745 {
7746 	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7747 }
7748 
7749 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7750 {
7751 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7752 			       num_dumped_bytes);
7753 }
7754 
7755 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7756 {
7757 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7758 }
7759 
7760 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7761 {
7762 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7763 			       num_dumped_bytes);
7764 }
7765 
7766 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7767 {
7768 	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7769 }
7770 
7771 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7772 {
7773 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7774 			       num_dumped_bytes);
7775 }
7776 
7777 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7778 {
7779 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7780 }
7781 
7782 int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
7783 			     enum qed_nvm_images image_id, u32 *length)
7784 {
7785 	struct qed_nvm_image_att image_att;
7786 	int rc;
7787 
7788 	*length = 0;
7789 	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
7790 	if (rc)
7791 		return rc;
7792 
7793 	*length = image_att.length;
7794 
7795 	return rc;
7796 }
7797 
7798 int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
7799 		      u32 *num_dumped_bytes, enum qed_nvm_images image_id)
7800 {
7801 	struct qed_hwfn *p_hwfn =
7802 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
7803 	u32 len_rounded, i;
7804 	__be32 val;
7805 	int rc;
7806 
7807 	*num_dumped_bytes = 0;
7808 	rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
7809 	if (rc)
7810 		return rc;
7811 
7812 	DP_NOTICE(p_hwfn->cdev,
7813 		  "Collecting a debug feature [\"nvram image %d\"]\n",
7814 		  image_id);
7815 
7816 	len_rounded = roundup(len_rounded, sizeof(u32));
7817 	rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
7818 	if (rc)
7819 		return rc;
7820 
7821 	/* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
7822 	if (image_id != QED_NVM_IMAGE_NVM_META)
7823 		for (i = 0; i < len_rounded; i += 4) {
7824 			val = cpu_to_be32(*(u32 *)(buffer + i));
7825 			*(u32 *)(buffer + i) = val;
7826 		}
7827 
7828 	*num_dumped_bytes = len_rounded;
7829 
7830 	return rc;
7831 }
7832 
7833 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7834 				u32 *num_dumped_bytes)
7835 {
7836 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7837 			       num_dumped_bytes);
7838 }
7839 
7840 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7841 {
7842 	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7843 }
7844 
7845 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7846 		       u32 *num_dumped_bytes)
7847 {
7848 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7849 			       num_dumped_bytes);
7850 }
7851 
7852 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7853 {
7854 	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7855 }
7856 
7857 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7858 		      u32 *num_dumped_bytes)
7859 {
7860 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7861 			       num_dumped_bytes);
7862 }
7863 
7864 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7865 {
7866 	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7867 }
7868 
7869 /* Defines the amount of bytes allocated for recording the length of debugfs
7870  * feature buffer.
7871  */
7872 #define REGDUMP_HEADER_SIZE			sizeof(u32)
7873 #define REGDUMP_HEADER_FEATURE_SHIFT		24
7874 #define REGDUMP_HEADER_ENGINE_SHIFT		31
7875 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
7876 enum debug_print_features {
7877 	OLD_MODE = 0,
7878 	IDLE_CHK = 1,
7879 	GRC_DUMP = 2,
7880 	MCP_TRACE = 3,
7881 	REG_FIFO = 4,
7882 	PROTECTION_OVERRIDE = 5,
7883 	IGU_FIFO = 6,
7884 	PHY = 7,
7885 	FW_ASSERTS = 8,
7886 	NVM_CFG1 = 9,
7887 	DEFAULT_CFG = 10,
7888 	NVM_META = 11,
7889 };
7890 
7891 static u32 qed_calc_regdump_header(enum debug_print_features feature,
7892 				   int engine, u32 feature_size, u8 omit_engine)
7893 {
7894 	/* Insert the engine, feature and mode inside the header and combine it
7895 	 * with feature size.
7896 	 */
7897 	return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
7898 	       (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
7899 	       (engine << REGDUMP_HEADER_ENGINE_SHIFT);
7900 }
7901 
7902 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
7903 {
7904 	u8 cur_engine, omit_engine = 0, org_engine;
7905 	u32 offset = 0, feature_size;
7906 	int rc;
7907 
7908 	if (cdev->num_hwfns == 1)
7909 		omit_engine = 1;
7910 
7911 	org_engine = qed_get_debug_engine(cdev);
7912 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7913 		/* Collect idle_chks and grcDump for each hw function */
7914 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
7915 			   "obtaining idle_chk and grcdump for current engine\n");
7916 		qed_set_debug_engine(cdev, cur_engine);
7917 
7918 		/* First idle_chk */
7919 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7920 				      REGDUMP_HEADER_SIZE, &feature_size);
7921 		if (!rc) {
7922 			*(u32 *)((u8 *)buffer + offset) =
7923 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
7924 						    feature_size, omit_engine);
7925 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7926 		} else {
7927 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7928 		}
7929 
7930 		/* Second idle_chk */
7931 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7932 				      REGDUMP_HEADER_SIZE, &feature_size);
7933 		if (!rc) {
7934 			*(u32 *)((u8 *)buffer + offset) =
7935 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
7936 						    feature_size, omit_engine);
7937 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7938 		} else {
7939 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7940 		}
7941 
7942 		/* reg_fifo dump */
7943 		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
7944 				      REGDUMP_HEADER_SIZE, &feature_size);
7945 		if (!rc) {
7946 			*(u32 *)((u8 *)buffer + offset) =
7947 			    qed_calc_regdump_header(REG_FIFO, cur_engine,
7948 						    feature_size, omit_engine);
7949 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7950 		} else {
7951 			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7952 		}
7953 
7954 		/* igu_fifo dump */
7955 		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
7956 				      REGDUMP_HEADER_SIZE, &feature_size);
7957 		if (!rc) {
7958 			*(u32 *)((u8 *)buffer + offset) =
7959 			    qed_calc_regdump_header(IGU_FIFO, cur_engine,
7960 						    feature_size, omit_engine);
7961 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7962 		} else {
7963 			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7964 		}
7965 
7966 		/* protection_override dump */
7967 		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
7968 						 REGDUMP_HEADER_SIZE,
7969 						 &feature_size);
7970 		if (!rc) {
7971 			*(u32 *)((u8 *)buffer + offset) =
7972 			    qed_calc_regdump_header(PROTECTION_OVERRIDE,
7973 						    cur_engine,
7974 						    feature_size, omit_engine);
7975 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7976 		} else {
7977 			DP_ERR(cdev,
7978 			       "qed_dbg_protection_override failed. rc = %d\n",
7979 			       rc);
7980 		}
7981 
7982 		/* fw_asserts dump */
7983 		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
7984 					REGDUMP_HEADER_SIZE, &feature_size);
7985 		if (!rc) {
7986 			*(u32 *)((u8 *)buffer + offset) =
7987 			    qed_calc_regdump_header(FW_ASSERTS, cur_engine,
7988 						    feature_size, omit_engine);
7989 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7990 		} else {
7991 			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
7992 			       rc);
7993 		}
7994 
7995 		/* GRC dump - must be last because when mcp stuck it will
7996 		 * clutter idle_chk, reg_fifo, ...
7997 		 */
7998 		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
7999 				 REGDUMP_HEADER_SIZE, &feature_size);
8000 		if (!rc) {
8001 			*(u32 *)((u8 *)buffer + offset) =
8002 			    qed_calc_regdump_header(GRC_DUMP, cur_engine,
8003 						    feature_size, omit_engine);
8004 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8005 		} else {
8006 			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
8007 		}
8008 	}
8009 
8010 	qed_set_debug_engine(cdev, org_engine);
8011 	/* mcp_trace */
8012 	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
8013 			       REGDUMP_HEADER_SIZE, &feature_size);
8014 	if (!rc) {
8015 		*(u32 *)((u8 *)buffer + offset) =
8016 		    qed_calc_regdump_header(MCP_TRACE, cur_engine,
8017 					    feature_size, omit_engine);
8018 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8019 	} else {
8020 		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
8021 	}
8022 
8023 	/* nvm cfg1 */
8024 	rc = qed_dbg_nvm_image(cdev,
8025 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8026 			       &feature_size, QED_NVM_IMAGE_NVM_CFG1);
8027 	if (!rc) {
8028 		*(u32 *)((u8 *)buffer + offset) =
8029 		    qed_calc_regdump_header(NVM_CFG1, cur_engine,
8030 					    feature_size, omit_engine);
8031 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8032 	} else if (rc != -ENOENT) {
8033 		DP_ERR(cdev,
8034 		       "qed_dbg_nvm_image failed for image  %d (%s), rc = %d\n",
8035 		       QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
8036 	}
8037 
8038 	/* nvm default */
8039 	rc = qed_dbg_nvm_image(cdev,
8040 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8041 			       &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
8042 	if (!rc) {
8043 		*(u32 *)((u8 *)buffer + offset) =
8044 		    qed_calc_regdump_header(DEFAULT_CFG, cur_engine,
8045 					    feature_size, omit_engine);
8046 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8047 	} else if (rc != -ENOENT) {
8048 		DP_ERR(cdev,
8049 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8050 		       QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
8051 		       rc);
8052 	}
8053 
8054 	/* nvm meta */
8055 	rc = qed_dbg_nvm_image(cdev,
8056 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8057 			       &feature_size, QED_NVM_IMAGE_NVM_META);
8058 	if (!rc) {
8059 		*(u32 *)((u8 *)buffer + offset) =
8060 		    qed_calc_regdump_header(NVM_META, cur_engine,
8061 					    feature_size, omit_engine);
8062 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8063 	} else if (rc != -ENOENT) {
8064 		DP_ERR(cdev,
8065 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8066 		       QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
8067 	}
8068 
8069 	return 0;
8070 }
8071 
8072 int qed_dbg_all_data_size(struct qed_dev *cdev)
8073 {
8074 	struct qed_hwfn *p_hwfn =
8075 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8076 	u32 regs_len = 0, image_len = 0;
8077 	u8 cur_engine, org_engine;
8078 
8079 	org_engine = qed_get_debug_engine(cdev);
8080 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8081 		/* Engine specific */
8082 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8083 			   "calculating idle_chk and grcdump register length for current engine\n");
8084 		qed_set_debug_engine(cdev, cur_engine);
8085 		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8086 			    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8087 			    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8088 			    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8089 			    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8090 			    REGDUMP_HEADER_SIZE +
8091 			    qed_dbg_protection_override_size(cdev) +
8092 			    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8093 	}
8094 
8095 	qed_set_debug_engine(cdev, org_engine);
8096 
8097 	/* Engine common */
8098 	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
8099 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8100 	if (image_len)
8101 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8102 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8103 	if (image_len)
8104 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8105 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8106 	if (image_len)
8107 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8108 
8109 	return regs_len;
8110 }
8111 
8112 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8113 		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
8114 {
8115 	struct qed_hwfn *p_hwfn =
8116 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8117 	struct qed_dbg_feature *qed_feature =
8118 		&cdev->dbg_params.features[feature];
8119 	enum dbg_status dbg_rc;
8120 	struct qed_ptt *p_ptt;
8121 	int rc = 0;
8122 
8123 	/* Acquire ptt */
8124 	p_ptt = qed_ptt_acquire(p_hwfn);
8125 	if (!p_ptt)
8126 		return -EINVAL;
8127 
8128 	/* Get dump */
8129 	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8130 	if (dbg_rc != DBG_STATUS_OK) {
8131 		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8132 			   qed_dbg_get_status_str(dbg_rc));
8133 		*num_dumped_bytes = 0;
8134 		rc = -EINVAL;
8135 		goto out;
8136 	}
8137 
8138 	DP_VERBOSE(cdev, QED_MSG_DEBUG,
8139 		   "copying debugfs feature to external buffer\n");
8140 	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8141 	*num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
8142 			    4;
8143 
8144 out:
8145 	qed_ptt_release(p_hwfn, p_ptt);
8146 	return rc;
8147 }
8148 
8149 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8150 {
8151 	struct qed_hwfn *p_hwfn =
8152 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8153 	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8154 	struct qed_dbg_feature *qed_feature =
8155 		&cdev->dbg_params.features[feature];
8156 	u32 buf_size_dwords;
8157 	enum dbg_status rc;
8158 
8159 	if (!p_ptt)
8160 		return -EINVAL;
8161 
8162 	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8163 						   &buf_size_dwords);
8164 	if (rc != DBG_STATUS_OK)
8165 		buf_size_dwords = 0;
8166 
8167 	qed_ptt_release(p_hwfn, p_ptt);
8168 	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8169 	return qed_feature->buf_size;
8170 }
8171 
8172 u8 qed_get_debug_engine(struct qed_dev *cdev)
8173 {
8174 	return cdev->dbg_params.engine_for_debug;
8175 }
8176 
8177 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8178 {
8179 	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8180 		   engine_number);
8181 	cdev->dbg_params.engine_for_debug = engine_number;
8182 }
8183 
8184 void qed_dbg_pf_init(struct qed_dev *cdev)
8185 {
8186 	const u8 *dbg_values;
8187 
8188 	/* Debug values are after init values.
8189 	 * The offset is the first dword of the file.
8190 	 */
8191 	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8192 	qed_dbg_set_bin_ptr((u8 *)dbg_values);
8193 	qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
8194 }
8195 
8196 void qed_dbg_pf_exit(struct qed_dev *cdev)
8197 {
8198 	struct qed_dbg_feature *feature = NULL;
8199 	enum qed_dbg_features feature_idx;
8200 
8201 	/* Debug features' buffers may be allocated if debug feature was used
8202 	 * but dump wasn't called.
8203 	 */
8204 	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8205 		feature = &cdev->dbg_params.features[feature_idx];
8206 		if (feature->dump_buf) {
8207 			vfree(feature->dump_buf);
8208 			feature->dump_buf = NULL;
8209 		}
8210 	}
8211 }
8212