1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/vmalloc.h>
11 #include <linux/crc32.h>
12 #include "qed.h"
13 #include "qed_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_mcp.h"
16 #include "qed_reg_addr.h"
17 
18 /* Chip IDs enum */
19 enum chip_ids {
20 	CHIP_BB_B0,
21 	CHIP_K2,
22 	MAX_CHIP_IDS
23 };
24 
25 /* Memory groups enum */
26 enum mem_groups {
27 	MEM_GROUP_PXP_MEM,
28 	MEM_GROUP_DMAE_MEM,
29 	MEM_GROUP_CM_MEM,
30 	MEM_GROUP_QM_MEM,
31 	MEM_GROUP_TM_MEM,
32 	MEM_GROUP_BRB_RAM,
33 	MEM_GROUP_BRB_MEM,
34 	MEM_GROUP_PRS_MEM,
35 	MEM_GROUP_SDM_MEM,
36 	MEM_GROUP_PBUF,
37 	MEM_GROUP_IOR,
38 	MEM_GROUP_RAM,
39 	MEM_GROUP_BTB_RAM,
40 	MEM_GROUP_RDIF_CTX,
41 	MEM_GROUP_TDIF_CTX,
42 	MEM_GROUP_CFC_MEM,
43 	MEM_GROUP_CONN_CFC_MEM,
44 	MEM_GROUP_TASK_CFC_MEM,
45 	MEM_GROUP_CAU_PI,
46 	MEM_GROUP_CAU_MEM,
47 	MEM_GROUP_PXP_ILT,
48 	MEM_GROUP_MULD_MEM,
49 	MEM_GROUP_BTB_MEM,
50 	MEM_GROUP_IGU_MEM,
51 	MEM_GROUP_IGU_MSIX,
52 	MEM_GROUP_CAU_SB,
53 	MEM_GROUP_BMB_RAM,
54 	MEM_GROUP_BMB_MEM,
55 	MEM_GROUPS_NUM
56 };
57 
58 /* Memory groups names */
59 static const char * const s_mem_group_names[] = {
60 	"PXP_MEM",
61 	"DMAE_MEM",
62 	"CM_MEM",
63 	"QM_MEM",
64 	"TM_MEM",
65 	"BRB_RAM",
66 	"BRB_MEM",
67 	"PRS_MEM",
68 	"SDM_MEM",
69 	"PBUF",
70 	"IOR",
71 	"RAM",
72 	"BTB_RAM",
73 	"RDIF_CTX",
74 	"TDIF_CTX",
75 	"CFC_MEM",
76 	"CONN_CFC_MEM",
77 	"TASK_CFC_MEM",
78 	"CAU_PI",
79 	"CAU_MEM",
80 	"PXP_ILT",
81 	"MULD_MEM",
82 	"BTB_MEM",
83 	"IGU_MEM",
84 	"IGU_MSIX",
85 	"CAU_SB",
86 	"BMB_RAM",
87 	"BMB_MEM",
88 };
89 
90 /* Idle check conditions */
91 static u32 cond4(const u32 *r, const u32 *imm)
92 {
93 	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
94 }
95 
96 static u32 cond6(const u32 *r, const u32 *imm)
97 {
98 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
99 }
100 
101 static u32 cond5(const u32 *r, const u32 *imm)
102 {
103 	return (r[0] & imm[0]) != imm[1];
104 }
105 
106 static u32 cond8(const u32 *r, const u32 *imm)
107 {
108 	return ((r[0] & imm[0]) >> imm[1]) !=
109 	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
110 }
111 
112 static u32 cond9(const u32 *r, const u32 *imm)
113 {
114 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
115 }
116 
117 static u32 cond1(const u32 *r, const u32 *imm)
118 {
119 	return (r[0] & ~imm[0]) != imm[1];
120 }
121 
122 static u32 cond0(const u32 *r, const u32 *imm)
123 {
124 	return r[0] != imm[0];
125 }
126 
127 static u32 cond10(const u32 *r, const u32 *imm)
128 {
129 	return r[0] != r[1] && r[2] == imm[0];
130 }
131 
132 static u32 cond11(const u32 *r, const u32 *imm)
133 {
134 	return r[0] != r[1] && r[2] > imm[0];
135 }
136 
137 static u32 cond3(const u32 *r, const u32 *imm)
138 {
139 	return r[0] != r[1];
140 }
141 
142 static u32 cond12(const u32 *r, const u32 *imm)
143 {
144 	return r[0] & imm[0];
145 }
146 
147 static u32 cond7(const u32 *r, const u32 *imm)
148 {
149 	return r[0] < (r[1] - imm[0]);
150 }
151 
152 static u32 cond2(const u32 *r, const u32 *imm)
153 {
154 	return r[0] > imm[0];
155 }
156 
157 /* Array of Idle Check conditions */
158 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
159 	cond0,
160 	cond1,
161 	cond2,
162 	cond3,
163 	cond4,
164 	cond5,
165 	cond6,
166 	cond7,
167 	cond8,
168 	cond9,
169 	cond10,
170 	cond11,
171 	cond12,
172 };
173 
174 /******************************* Data Types **********************************/
175 
176 enum platform_ids {
177 	PLATFORM_ASIC,
178 	PLATFORM_RESERVED,
179 	PLATFORM_RESERVED2,
180 	PLATFORM_RESERVED3,
181 	MAX_PLATFORM_IDS
182 };
183 
184 struct dbg_array {
185 	const u32 *ptr;
186 	u32 size_in_dwords;
187 };
188 
189 struct chip_platform_defs {
190 	u8 num_ports;
191 	u8 num_pfs;
192 	u8 num_vfs;
193 };
194 
195 /* Chip constant definitions */
196 struct chip_defs {
197 	const char *name;
198 	struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
199 };
200 
201 /* Platform constant definitions */
202 struct platform_defs {
203 	const char *name;
204 	u32 delay_factor;
205 };
206 
207 /* Storm constant definitions */
208 struct storm_defs {
209 	char letter;
210 	enum block_id block_id;
211 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
212 	bool has_vfc;
213 	u32 sem_fast_mem_addr;
214 	u32 sem_frame_mode_addr;
215 	u32 sem_slow_enable_addr;
216 	u32 sem_slow_mode_addr;
217 	u32 sem_slow_mode1_conf_addr;
218 	u32 sem_sync_dbg_empty_addr;
219 	u32 sem_slow_dbg_empty_addr;
220 	u32 cm_ctx_wr_addr;
221 	u32 cm_conn_ag_ctx_lid_size; /* In quad-regs */
222 	u32 cm_conn_ag_ctx_rd_addr;
223 	u32 cm_conn_st_ctx_lid_size; /* In quad-regs */
224 	u32 cm_conn_st_ctx_rd_addr;
225 	u32 cm_task_ag_ctx_lid_size; /* In quad-regs */
226 	u32 cm_task_ag_ctx_rd_addr;
227 	u32 cm_task_st_ctx_lid_size; /* In quad-regs */
228 	u32 cm_task_st_ctx_rd_addr;
229 };
230 
231 /* Block constant definitions */
232 struct block_defs {
233 	const char *name;
234 	bool has_dbg_bus[MAX_CHIP_IDS];
235 	bool associated_to_storm;
236 	u32 storm_id; /* Valid only if associated_to_storm is true */
237 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
238 	u32 dbg_select_addr;
239 	u32 dbg_cycle_enable_addr;
240 	u32 dbg_shift_addr;
241 	u32 dbg_force_valid_addr;
242 	u32 dbg_force_frame_addr;
243 	bool has_reset_bit;
244 	bool unreset; /* If true, the block is taken out of reset before dump */
245 	enum dbg_reset_regs reset_reg;
246 	u8 reset_bit_offset; /* Bit offset in reset register */
247 };
248 
249 /* Reset register definitions */
250 struct reset_reg_defs {
251 	u32 addr;
252 	u32 unreset_val;
253 	bool exists[MAX_CHIP_IDS];
254 };
255 
256 struct grc_param_defs {
257 	u32 default_val[MAX_CHIP_IDS];
258 	u32 min;
259 	u32 max;
260 	bool is_preset;
261 	u32 exclude_all_preset_val;
262 	u32 crash_preset_val;
263 };
264 
265 struct rss_mem_defs {
266 	const char *mem_name;
267 	const char *type_name;
268 	u32 addr; /* In 128b units */
269 	u32 num_entries[MAX_CHIP_IDS];
270 	u32 entry_width[MAX_CHIP_IDS]; /* In bits */
271 };
272 
273 struct vfc_ram_defs {
274 	const char *mem_name;
275 	const char *type_name;
276 	u32 base_row;
277 	u32 num_rows;
278 };
279 
280 struct big_ram_defs {
281 	const char *instance_name;
282 	enum mem_groups mem_group_id;
283 	enum mem_groups ram_mem_group_id;
284 	enum dbg_grc_params grc_param;
285 	u32 addr_reg_addr;
286 	u32 data_reg_addr;
287 	u32 num_of_blocks[MAX_CHIP_IDS];
288 };
289 
290 struct phy_defs {
291 	const char *phy_name;
292 	u32 base_addr;
293 	u32 tbus_addr_lo_addr;
294 	u32 tbus_addr_hi_addr;
295 	u32 tbus_data_lo_addr;
296 	u32 tbus_data_hi_addr;
297 };
298 
299 /******************************** Constants **********************************/
300 
301 #define MAX_LCIDS			320
302 #define MAX_LTIDS			320
303 #define NUM_IOR_SETS			2
304 #define IORS_PER_SET			176
305 #define IOR_SET_OFFSET(set_id)		((set_id) * 256)
306 #define BYTES_IN_DWORD			sizeof(u32)
307 
308 /* In the macros below, size and offset are specified in bits */
309 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
310 #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
311 #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
312 #define FIELD_DWORD_OFFSET(type, field) \
313 	 (int)(FIELD_BIT_OFFSET(type, field) / 32)
314 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
315 #define FIELD_BIT_MASK(type, field) \
316 	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
317 	 FIELD_DWORD_SHIFT(type, field))
318 #define SET_VAR_FIELD(var, type, field, val) \
319 	do { \
320 		var[FIELD_DWORD_OFFSET(type, field)] &=	\
321 		(~FIELD_BIT_MASK(type, field));	\
322 		var[FIELD_DWORD_OFFSET(type, field)] |= \
323 		(val) << FIELD_DWORD_SHIFT(type, field); \
324 	} while (0)
325 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
326 	do { \
327 		for (i = 0; i < (arr_size); i++) \
328 			qed_wr(dev, ptt, addr,	(arr)[i]); \
329 	} while (0)
330 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
331 	do { \
332 		for (i = 0; i < (arr_size); i++) \
333 			(arr)[i] = qed_rd(dev, ptt, addr); \
334 	} while (0)
335 
336 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
337 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
338 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
339 #define RAM_LINES_TO_BYTES(lines) \
340 	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
341 #define REG_DUMP_LEN_SHIFT		24
342 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
343 	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
344 #define IDLE_CHK_RULE_SIZE_DWORDS \
345 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
346 #define IDLE_CHK_RESULT_HDR_DWORDS \
347 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
348 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
349 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
350 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
351 
352 /* The sizes and offsets below are specified in bits */
353 #define VFC_CAM_CMD_STRUCT_SIZE		64
354 #define VFC_CAM_CMD_ROW_OFFSET		48
355 #define VFC_CAM_CMD_ROW_SIZE		9
356 #define VFC_CAM_ADDR_STRUCT_SIZE	16
357 #define VFC_CAM_ADDR_OP_OFFSET		0
358 #define VFC_CAM_ADDR_OP_SIZE		4
359 #define VFC_CAM_RESP_STRUCT_SIZE	256
360 #define VFC_RAM_ADDR_STRUCT_SIZE	16
361 #define VFC_RAM_ADDR_OP_OFFSET		0
362 #define VFC_RAM_ADDR_OP_SIZE		2
363 #define VFC_RAM_ADDR_ROW_OFFSET		2
364 #define VFC_RAM_ADDR_ROW_SIZE		10
365 #define VFC_RAM_RESP_STRUCT_SIZE	256
366 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
367 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
368 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
369 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
370 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
371 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
372 #define NUM_VFC_RAM_TYPES		4
373 #define VFC_CAM_NUM_ROWS		512
374 #define VFC_OPCODE_CAM_RD		14
375 #define VFC_OPCODE_RAM_RD		0
376 #define NUM_RSS_MEM_TYPES		5
377 #define NUM_BIG_RAM_TYPES		3
378 #define BIG_RAM_BLOCK_SIZE_BYTES	128
379 #define BIG_RAM_BLOCK_SIZE_DWORDS \
380 	BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
381 #define NUM_PHY_TBUS_ADDRESSES		2048
382 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
383 #define RESET_REG_UNRESET_OFFSET	4
384 #define STALL_DELAY_MS			500
385 #define STATIC_DEBUG_LINE_DWORDS	9
386 #define NUM_DBG_BUS_LINES		256
387 #define NUM_COMMON_GLOBAL_PARAMS	8
388 #define FW_IMG_MAIN			1
389 #define REG_FIFO_DEPTH_ELEMENTS		32
390 #define REG_FIFO_ELEMENT_DWORDS		2
391 #define REG_FIFO_DEPTH_DWORDS \
392 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
393 #define IGU_FIFO_DEPTH_ELEMENTS		64
394 #define IGU_FIFO_ELEMENT_DWORDS		4
395 #define IGU_FIFO_DEPTH_DWORDS \
396 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
397 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
398 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
399 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
400 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
401 	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
402 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
403 	(MCP_REG_SCRATCH + \
404 	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
405 #define MCP_TRACE_META_IMAGE_SIGNATURE  0x669955aa
406 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
407 #define EMPTY_FW_IMAGE_STR		"???????????????"
408 
409 /***************************** Constant Arrays *******************************/
410 
411 /* Debug arrays */
412 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
413 
414 /* Chip constant definitions array */
415 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
416 	{ "bb_b0",
417 	  { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0},
418 	    {0, 0, 0}, {0, 0, 0} } },
419 	{ "k2",
420 	  { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0},
421 	    {0, 0, 0}, {0, 0, 0} } }
422 };
423 
424 /* Storm constant definitions array */
425 static struct storm_defs s_storm_defs[] = {
426 	/* Tstorm */
427 	{'T', BLOCK_TSEM,
428 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
429 	 TSEM_REG_FAST_MEMORY,
430 	 TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
431 	 TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
432 	 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
433 	 TCM_REG_CTX_RBC_ACCS,
434 	 4, TCM_REG_AGG_CON_CTX,
435 	 16, TCM_REG_SM_CON_CTX,
436 	 2, TCM_REG_AGG_TASK_CTX,
437 	 4, TCM_REG_SM_TASK_CTX},
438 	/* Mstorm */
439 	{'M', BLOCK_MSEM,
440 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
441 	 MSEM_REG_FAST_MEMORY,
442 	 MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
443 	 MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
444 	 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY,
445 	 MCM_REG_CTX_RBC_ACCS,
446 	 1, MCM_REG_AGG_CON_CTX,
447 	 10, MCM_REG_SM_CON_CTX,
448 	 2, MCM_REG_AGG_TASK_CTX,
449 	 7, MCM_REG_SM_TASK_CTX},
450 	/* Ustorm */
451 	{'U', BLOCK_USEM,
452 	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
453 	 USEM_REG_FAST_MEMORY,
454 	 USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
455 	 USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
456 	 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY,
457 	 UCM_REG_CTX_RBC_ACCS,
458 	 2, UCM_REG_AGG_CON_CTX,
459 	 13, UCM_REG_SM_CON_CTX,
460 	 3, UCM_REG_AGG_TASK_CTX,
461 	 3, UCM_REG_SM_TASK_CTX},
462 	/* Xstorm */
463 	{'X', BLOCK_XSEM,
464 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
465 	 XSEM_REG_FAST_MEMORY,
466 	 XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
467 	 XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
468 	 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY,
469 	 XCM_REG_CTX_RBC_ACCS,
470 	 9, XCM_REG_AGG_CON_CTX,
471 	 15, XCM_REG_SM_CON_CTX,
472 	 0, 0,
473 	 0, 0},
474 	/* Ystorm */
475 	{'Y', BLOCK_YSEM,
476 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
477 	 YSEM_REG_FAST_MEMORY,
478 	 YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
479 	 YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
480 	 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
481 	 YCM_REG_CTX_RBC_ACCS,
482 	 2, YCM_REG_AGG_CON_CTX,
483 	 3, YCM_REG_SM_CON_CTX,
484 	 2, YCM_REG_AGG_TASK_CTX,
485 	 12, YCM_REG_SM_TASK_CTX},
486 	/* Pstorm */
487 	{'P', BLOCK_PSEM,
488 	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
489 	 PSEM_REG_FAST_MEMORY,
490 	 PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
491 	 PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
492 	 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY,
493 	 PCM_REG_CTX_RBC_ACCS,
494 	 0, 0,
495 	 10, PCM_REG_SM_CON_CTX,
496 	 0, 0,
497 	 0, 0}
498 };
499 
500 /* Block definitions array */
501 static struct block_defs block_grc_defs = {
502 	"grc",
503 	{true, true}, false, 0,
504 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
505 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
506 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
507 	GRC_REG_DBG_FORCE_FRAME,
508 	true, false, DBG_RESET_REG_MISC_PL_UA, 1
509 };
510 
511 static struct block_defs block_miscs_defs = {
512 	"miscs", {false, false}, false, 0,
513 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
514 	0, 0, 0, 0, 0,
515 	false, false, MAX_DBG_RESET_REGS, 0
516 };
517 
518 static struct block_defs block_misc_defs = {
519 	"misc", {false, false}, false, 0,
520 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
521 	0, 0, 0, 0, 0,
522 	false, false, MAX_DBG_RESET_REGS, 0
523 };
524 
525 static struct block_defs block_dbu_defs = {
526 	"dbu", {false, false}, false, 0,
527 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
528 	0, 0, 0, 0, 0,
529 	false, false, MAX_DBG_RESET_REGS, 0
530 };
531 
532 static struct block_defs block_pglue_b_defs = {
533 	"pglue_b",
534 	{true, true}, false, 0,
535 	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
536 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
537 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
538 	PGLUE_B_REG_DBG_FORCE_FRAME,
539 	true, false, DBG_RESET_REG_MISCS_PL_HV, 1
540 };
541 
542 static struct block_defs block_cnig_defs = {
543 	"cnig",
544 	{false, true}, false, 0,
545 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
546 	CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
547 	CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
548 	CNIG_REG_DBG_FORCE_FRAME_K2,
549 	true, false, DBG_RESET_REG_MISCS_PL_HV, 0
550 };
551 
552 static struct block_defs block_cpmu_defs = {
553 	"cpmu", {false, false}, false, 0,
554 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
555 	0, 0, 0, 0, 0,
556 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8
557 };
558 
559 static struct block_defs block_ncsi_defs = {
560 	"ncsi",
561 	{true, true}, false, 0,
562 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
563 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
564 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
565 	NCSI_REG_DBG_FORCE_FRAME,
566 	true, false, DBG_RESET_REG_MISCS_PL_HV, 5
567 };
568 
569 static struct block_defs block_opte_defs = {
570 	"opte", {false, false}, false, 0,
571 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
572 	0, 0, 0, 0, 0,
573 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4
574 };
575 
576 static struct block_defs block_bmb_defs = {
577 	"bmb",
578 	{true, true}, false, 0,
579 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
580 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
581 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
582 	BMB_REG_DBG_FORCE_FRAME,
583 	true, false, DBG_RESET_REG_MISCS_PL_UA, 7
584 };
585 
586 static struct block_defs block_pcie_defs = {
587 	"pcie",
588 	{false, true}, false, 0,
589 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
590 	PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
591 	PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
592 	PCIE_REG_DBG_COMMON_FORCE_FRAME,
593 	false, false, MAX_DBG_RESET_REGS, 0
594 };
595 
596 static struct block_defs block_mcp_defs = {
597 	"mcp", {false, false}, false, 0,
598 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
599 	0, 0, 0, 0, 0,
600 	false, false, MAX_DBG_RESET_REGS, 0
601 };
602 
603 static struct block_defs block_mcp2_defs = {
604 	"mcp2",
605 	{true, true}, false, 0,
606 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
607 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
608 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
609 	MCP2_REG_DBG_FORCE_FRAME,
610 	false, false, MAX_DBG_RESET_REGS, 0
611 };
612 
613 static struct block_defs block_pswhst_defs = {
614 	"pswhst",
615 	{true, true}, false, 0,
616 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
617 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
618 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
619 	PSWHST_REG_DBG_FORCE_FRAME,
620 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
621 };
622 
623 static struct block_defs block_pswhst2_defs = {
624 	"pswhst2",
625 	{true, true}, false, 0,
626 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
627 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
628 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
629 	PSWHST2_REG_DBG_FORCE_FRAME,
630 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
631 };
632 
633 static struct block_defs block_pswrd_defs = {
634 	"pswrd",
635 	{true, true}, false, 0,
636 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
637 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
638 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
639 	PSWRD_REG_DBG_FORCE_FRAME,
640 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
641 };
642 
643 static struct block_defs block_pswrd2_defs = {
644 	"pswrd2",
645 	{true, true}, false, 0,
646 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
647 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
648 	PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
649 	PSWRD2_REG_DBG_FORCE_FRAME,
650 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
651 };
652 
653 static struct block_defs block_pswwr_defs = {
654 	"pswwr",
655 	{true, true}, false, 0,
656 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
657 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
658 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
659 	PSWWR_REG_DBG_FORCE_FRAME,
660 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
661 };
662 
663 static struct block_defs block_pswwr2_defs = {
664 	"pswwr2", {false, false}, false, 0,
665 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
666 	0, 0, 0, 0, 0,
667 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
668 };
669 
670 static struct block_defs block_pswrq_defs = {
671 	"pswrq",
672 	{true, true}, false, 0,
673 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
674 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
675 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
676 	PSWRQ_REG_DBG_FORCE_FRAME,
677 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
678 };
679 
680 static struct block_defs block_pswrq2_defs = {
681 	"pswrq2",
682 	{true, true}, false, 0,
683 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
684 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
685 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
686 	PSWRQ2_REG_DBG_FORCE_FRAME,
687 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
688 };
689 
690 static struct block_defs block_pglcs_defs = {
691 	"pglcs",
692 	{false, true}, false, 0,
693 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
694 	PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
695 	PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
696 	PGLCS_REG_DBG_FORCE_FRAME,
697 	true, false, DBG_RESET_REG_MISCS_PL_HV, 2
698 };
699 
700 static struct block_defs block_ptu_defs = {
701 	"ptu",
702 	{true, true}, false, 0,
703 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
704 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
705 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
706 	PTU_REG_DBG_FORCE_FRAME,
707 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
708 };
709 
710 static struct block_defs block_dmae_defs = {
711 	"dmae",
712 	{true, true}, false, 0,
713 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
714 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
715 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
716 	DMAE_REG_DBG_FORCE_FRAME,
717 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
718 };
719 
720 static struct block_defs block_tcm_defs = {
721 	"tcm",
722 	{true, true}, true, DBG_TSTORM_ID,
723 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
724 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
725 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
726 	TCM_REG_DBG_FORCE_FRAME,
727 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
728 };
729 
730 static struct block_defs block_mcm_defs = {
731 	"mcm",
732 	{true, true}, true, DBG_MSTORM_ID,
733 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
734 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
735 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
736 	MCM_REG_DBG_FORCE_FRAME,
737 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
738 };
739 
740 static struct block_defs block_ucm_defs = {
741 	"ucm",
742 	{true, true}, true, DBG_USTORM_ID,
743 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
744 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
745 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
746 	UCM_REG_DBG_FORCE_FRAME,
747 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
748 };
749 
750 static struct block_defs block_xcm_defs = {
751 	"xcm",
752 	{true, true}, true, DBG_XSTORM_ID,
753 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
754 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
755 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
756 	XCM_REG_DBG_FORCE_FRAME,
757 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
758 };
759 
760 static struct block_defs block_ycm_defs = {
761 	"ycm",
762 	{true, true}, true, DBG_YSTORM_ID,
763 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
764 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
765 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
766 	YCM_REG_DBG_FORCE_FRAME,
767 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
768 };
769 
770 static struct block_defs block_pcm_defs = {
771 	"pcm",
772 	{true, true}, true, DBG_PSTORM_ID,
773 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
774 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
775 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
776 	PCM_REG_DBG_FORCE_FRAME,
777 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
778 };
779 
780 static struct block_defs block_qm_defs = {
781 	"qm",
782 	{true, true}, false, 0,
783 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
784 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
785 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
786 	QM_REG_DBG_FORCE_FRAME,
787 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
788 };
789 
790 static struct block_defs block_tm_defs = {
791 	"tm",
792 	{true, true}, false, 0,
793 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
794 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
795 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
796 	TM_REG_DBG_FORCE_FRAME,
797 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
798 };
799 
800 static struct block_defs block_dorq_defs = {
801 	"dorq",
802 	{true, true}, false, 0,
803 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
804 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
805 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
806 	DORQ_REG_DBG_FORCE_FRAME,
807 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
808 };
809 
810 static struct block_defs block_brb_defs = {
811 	"brb",
812 	{true, true}, false, 0,
813 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
814 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
815 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
816 	BRB_REG_DBG_FORCE_FRAME,
817 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
818 };
819 
820 static struct block_defs block_src_defs = {
821 	"src",
822 	{true, true}, false, 0,
823 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
824 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
825 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
826 	SRC_REG_DBG_FORCE_FRAME,
827 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
828 };
829 
830 static struct block_defs block_prs_defs = {
831 	"prs",
832 	{true, true}, false, 0,
833 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
834 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
835 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
836 	PRS_REG_DBG_FORCE_FRAME,
837 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
838 };
839 
840 static struct block_defs block_tsdm_defs = {
841 	"tsdm",
842 	{true, true}, true, DBG_TSTORM_ID,
843 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
844 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
845 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
846 	TSDM_REG_DBG_FORCE_FRAME,
847 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
848 };
849 
850 static struct block_defs block_msdm_defs = {
851 	"msdm",
852 	{true, true}, true, DBG_MSTORM_ID,
853 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
854 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
855 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
856 	MSDM_REG_DBG_FORCE_FRAME,
857 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
858 };
859 
860 static struct block_defs block_usdm_defs = {
861 	"usdm",
862 	{true, true}, true, DBG_USTORM_ID,
863 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
864 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
865 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
866 	USDM_REG_DBG_FORCE_FRAME,
867 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
868 };
869 
870 static struct block_defs block_xsdm_defs = {
871 	"xsdm",
872 	{true, true}, true, DBG_XSTORM_ID,
873 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
874 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
875 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
876 	XSDM_REG_DBG_FORCE_FRAME,
877 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
878 };
879 
880 static struct block_defs block_ysdm_defs = {
881 	"ysdm",
882 	{true, true}, true, DBG_YSTORM_ID,
883 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
884 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
885 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
886 	YSDM_REG_DBG_FORCE_FRAME,
887 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
888 };
889 
890 static struct block_defs block_psdm_defs = {
891 	"psdm",
892 	{true, true}, true, DBG_PSTORM_ID,
893 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
894 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
895 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
896 	PSDM_REG_DBG_FORCE_FRAME,
897 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
898 };
899 
900 static struct block_defs block_tsem_defs = {
901 	"tsem",
902 	{true, true}, true, DBG_TSTORM_ID,
903 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
904 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
905 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
906 	TSEM_REG_DBG_FORCE_FRAME,
907 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
908 };
909 
910 static struct block_defs block_msem_defs = {
911 	"msem",
912 	{true, true}, true, DBG_MSTORM_ID,
913 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
914 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
915 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
916 	MSEM_REG_DBG_FORCE_FRAME,
917 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
918 };
919 
920 static struct block_defs block_usem_defs = {
921 	"usem",
922 	{true, true}, true, DBG_USTORM_ID,
923 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
924 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
925 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
926 	USEM_REG_DBG_FORCE_FRAME,
927 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
928 };
929 
930 static struct block_defs block_xsem_defs = {
931 	"xsem",
932 	{true, true}, true, DBG_XSTORM_ID,
933 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
934 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
935 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
936 	XSEM_REG_DBG_FORCE_FRAME,
937 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
938 };
939 
940 static struct block_defs block_ysem_defs = {
941 	"ysem",
942 	{true, true}, true, DBG_YSTORM_ID,
943 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
944 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
945 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
946 	YSEM_REG_DBG_FORCE_FRAME,
947 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
948 };
949 
950 static struct block_defs block_psem_defs = {
951 	"psem",
952 	{true, true}, true, DBG_PSTORM_ID,
953 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
954 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
955 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
956 	PSEM_REG_DBG_FORCE_FRAME,
957 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
958 };
959 
960 static struct block_defs block_rss_defs = {
961 	"rss",
962 	{true, true}, false, 0,
963 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
964 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
965 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
966 	RSS_REG_DBG_FORCE_FRAME,
967 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
968 };
969 
970 static struct block_defs block_tmld_defs = {
971 	"tmld",
972 	{true, true}, false, 0,
973 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
974 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
975 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
976 	TMLD_REG_DBG_FORCE_FRAME,
977 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
978 };
979 
980 static struct block_defs block_muld_defs = {
981 	"muld",
982 	{true, true}, false, 0,
983 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
984 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
985 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
986 	MULD_REG_DBG_FORCE_FRAME,
987 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
988 };
989 
990 static struct block_defs block_yuld_defs = {
991 	"yuld",
992 	{true, true}, false, 0,
993 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
994 	YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
995 	YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
996 	YULD_REG_DBG_FORCE_FRAME,
997 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15
998 };
999 
1000 static struct block_defs block_xyld_defs = {
1001 	"xyld",
1002 	{true, true}, false, 0,
1003 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1004 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1005 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1006 	XYLD_REG_DBG_FORCE_FRAME,
1007 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1008 };
1009 
1010 static struct block_defs block_prm_defs = {
1011 	"prm",
1012 	{true, true}, false, 0,
1013 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1014 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1015 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1016 	PRM_REG_DBG_FORCE_FRAME,
1017 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1018 };
1019 
1020 static struct block_defs block_pbf_pb1_defs = {
1021 	"pbf_pb1",
1022 	{true, true}, false, 0,
1023 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
1024 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1025 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1026 	PBF_PB1_REG_DBG_FORCE_FRAME,
1027 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1028 	11
1029 };
1030 
1031 static struct block_defs block_pbf_pb2_defs = {
1032 	"pbf_pb2",
1033 	{true, true}, false, 0,
1034 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
1035 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1036 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1037 	PBF_PB2_REG_DBG_FORCE_FRAME,
1038 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1039 	12
1040 };
1041 
1042 static struct block_defs block_rpb_defs = {
1043 	"rpb",
1044 	{true, true}, false, 0,
1045 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1046 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1047 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1048 	RPB_REG_DBG_FORCE_FRAME,
1049 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1050 };
1051 
1052 static struct block_defs block_btb_defs = {
1053 	"btb",
1054 	{true, true}, false, 0,
1055 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
1056 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1057 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1058 	BTB_REG_DBG_FORCE_FRAME,
1059 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1060 };
1061 
1062 static struct block_defs block_pbf_defs = {
1063 	"pbf",
1064 	{true, true}, false, 0,
1065 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
1066 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1067 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1068 	PBF_REG_DBG_FORCE_FRAME,
1069 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1070 };
1071 
1072 static struct block_defs block_rdif_defs = {
1073 	"rdif",
1074 	{true, true}, false, 0,
1075 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1076 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1077 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1078 	RDIF_REG_DBG_FORCE_FRAME,
1079 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1080 };
1081 
1082 static struct block_defs block_tdif_defs = {
1083 	"tdif",
1084 	{true, true}, false, 0,
1085 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1086 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1087 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1088 	TDIF_REG_DBG_FORCE_FRAME,
1089 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1090 };
1091 
1092 static struct block_defs block_cdu_defs = {
1093 	"cdu",
1094 	{true, true}, false, 0,
1095 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1096 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1097 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1098 	CDU_REG_DBG_FORCE_FRAME,
1099 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1100 };
1101 
1102 static struct block_defs block_ccfc_defs = {
1103 	"ccfc",
1104 	{true, true}, false, 0,
1105 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1106 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1107 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1108 	CCFC_REG_DBG_FORCE_FRAME,
1109 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1110 };
1111 
1112 static struct block_defs block_tcfc_defs = {
1113 	"tcfc",
1114 	{true, true}, false, 0,
1115 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1116 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1117 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1118 	TCFC_REG_DBG_FORCE_FRAME,
1119 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1120 };
1121 
1122 static struct block_defs block_igu_defs = {
1123 	"igu",
1124 	{true, true}, false, 0,
1125 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1126 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1127 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1128 	IGU_REG_DBG_FORCE_FRAME,
1129 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1130 };
1131 
1132 static struct block_defs block_cau_defs = {
1133 	"cau",
1134 	{true, true}, false, 0,
1135 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1136 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1137 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1138 	CAU_REG_DBG_FORCE_FRAME,
1139 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1140 };
1141 
1142 static struct block_defs block_umac_defs = {
1143 	"umac",
1144 	{false, true}, false, 0,
1145 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1146 	UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
1147 	UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
1148 	UMAC_REG_DBG_FORCE_FRAME,
1149 	true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1150 };
1151 
1152 static struct block_defs block_xmac_defs = {
1153 	"xmac", {false, false}, false, 0,
1154 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1155 	0, 0, 0, 0, 0,
1156 	false, false, MAX_DBG_RESET_REGS, 0
1157 };
1158 
1159 static struct block_defs block_dbg_defs = {
1160 	"dbg", {false, false}, false, 0,
1161 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1162 	0, 0, 0, 0, 0,
1163 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1164 };
1165 
1166 static struct block_defs block_nig_defs = {
1167 	"nig",
1168 	{true, true}, false, 0,
1169 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1170 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1171 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1172 	NIG_REG_DBG_FORCE_FRAME,
1173 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1174 };
1175 
1176 static struct block_defs block_wol_defs = {
1177 	"wol",
1178 	{false, true}, false, 0,
1179 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1180 	WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
1181 	WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
1182 	WOL_REG_DBG_FORCE_FRAME,
1183 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1184 };
1185 
1186 static struct block_defs block_bmbn_defs = {
1187 	"bmbn",
1188 	{false, true}, false, 0,
1189 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
1190 	BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
1191 	BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
1192 	BMBN_REG_DBG_FORCE_FRAME,
1193 	false, false, MAX_DBG_RESET_REGS, 0
1194 };
1195 
1196 static struct block_defs block_ipc_defs = {
1197 	"ipc", {false, false}, false, 0,
1198 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1199 	0, 0, 0, 0, 0,
1200 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1201 };
1202 
1203 static struct block_defs block_nwm_defs = {
1204 	"nwm",
1205 	{false, true}, false, 0,
1206 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
1207 	NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
1208 	NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
1209 	NWM_REG_DBG_FORCE_FRAME,
1210 	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1211 };
1212 
1213 static struct block_defs block_nws_defs = {
1214 	"nws",
1215 	{false, true}, false, 0,
1216 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
1217 	NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE,
1218 	NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID,
1219 	NWS_REG_DBG_FORCE_FRAME,
1220 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1221 };
1222 
1223 static struct block_defs block_ms_defs = {
1224 	"ms",
1225 	{false, true}, false, 0,
1226 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1227 	MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE,
1228 	MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID,
1229 	MS_REG_DBG_FORCE_FRAME,
1230 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1231 };
1232 
1233 static struct block_defs block_phy_pcie_defs = {
1234 	"phy_pcie",
1235 	{false, true}, false, 0,
1236 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1237 	PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
1238 	PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
1239 	PCIE_REG_DBG_COMMON_FORCE_FRAME,
1240 	false, false, MAX_DBG_RESET_REGS, 0
1241 };
1242 
1243 static struct block_defs block_led_defs = {
1244 	"led", {false, false}, false, 0,
1245 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1246 	0, 0, 0, 0, 0,
1247 	true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1248 };
1249 
1250 static struct block_defs block_avs_wrap_defs = {
1251 	"avs_wrap", {false, false}, false, 0,
1252 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1253 	0, 0, 0, 0, 0,
1254 	true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1255 };
1256 
1257 static struct block_defs block_rgfs_defs = {
1258 	"rgfs", {false, false}, false, 0,
1259 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1260 	0, 0, 0, 0, 0,
1261 	false, false, MAX_DBG_RESET_REGS, 0
1262 };
1263 
1264 static struct block_defs block_tgfs_defs = {
1265 	"tgfs", {false, false}, false, 0,
1266 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1267 	0, 0, 0, 0, 0,
1268 	false, false, MAX_DBG_RESET_REGS, 0
1269 };
1270 
1271 static struct block_defs block_ptld_defs = {
1272 	"ptld", {false, false}, false, 0,
1273 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1274 	0, 0, 0, 0, 0,
1275 	false, false, MAX_DBG_RESET_REGS, 0
1276 };
1277 
1278 static struct block_defs block_ypld_defs = {
1279 	"ypld", {false, false}, false, 0,
1280 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1281 	0, 0, 0, 0, 0,
1282 	false, false, MAX_DBG_RESET_REGS, 0
1283 };
1284 
1285 static struct block_defs block_misc_aeu_defs = {
1286 	"misc_aeu", {false, false}, false, 0,
1287 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1288 	0, 0, 0, 0, 0,
1289 	false, false, MAX_DBG_RESET_REGS, 0
1290 };
1291 
1292 static struct block_defs block_bar0_map_defs = {
1293 	"bar0_map", {false, false}, false, 0,
1294 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1295 	0, 0, 0, 0, 0,
1296 	false, false, MAX_DBG_RESET_REGS, 0
1297 };
1298 
1299 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1300 	&block_grc_defs,
1301 	&block_miscs_defs,
1302 	&block_misc_defs,
1303 	&block_dbu_defs,
1304 	&block_pglue_b_defs,
1305 	&block_cnig_defs,
1306 	&block_cpmu_defs,
1307 	&block_ncsi_defs,
1308 	&block_opte_defs,
1309 	&block_bmb_defs,
1310 	&block_pcie_defs,
1311 	&block_mcp_defs,
1312 	&block_mcp2_defs,
1313 	&block_pswhst_defs,
1314 	&block_pswhst2_defs,
1315 	&block_pswrd_defs,
1316 	&block_pswrd2_defs,
1317 	&block_pswwr_defs,
1318 	&block_pswwr2_defs,
1319 	&block_pswrq_defs,
1320 	&block_pswrq2_defs,
1321 	&block_pglcs_defs,
1322 	&block_dmae_defs,
1323 	&block_ptu_defs,
1324 	&block_tcm_defs,
1325 	&block_mcm_defs,
1326 	&block_ucm_defs,
1327 	&block_xcm_defs,
1328 	&block_ycm_defs,
1329 	&block_pcm_defs,
1330 	&block_qm_defs,
1331 	&block_tm_defs,
1332 	&block_dorq_defs,
1333 	&block_brb_defs,
1334 	&block_src_defs,
1335 	&block_prs_defs,
1336 	&block_tsdm_defs,
1337 	&block_msdm_defs,
1338 	&block_usdm_defs,
1339 	&block_xsdm_defs,
1340 	&block_ysdm_defs,
1341 	&block_psdm_defs,
1342 	&block_tsem_defs,
1343 	&block_msem_defs,
1344 	&block_usem_defs,
1345 	&block_xsem_defs,
1346 	&block_ysem_defs,
1347 	&block_psem_defs,
1348 	&block_rss_defs,
1349 	&block_tmld_defs,
1350 	&block_muld_defs,
1351 	&block_yuld_defs,
1352 	&block_xyld_defs,
1353 	&block_prm_defs,
1354 	&block_pbf_pb1_defs,
1355 	&block_pbf_pb2_defs,
1356 	&block_rpb_defs,
1357 	&block_btb_defs,
1358 	&block_pbf_defs,
1359 	&block_rdif_defs,
1360 	&block_tdif_defs,
1361 	&block_cdu_defs,
1362 	&block_ccfc_defs,
1363 	&block_tcfc_defs,
1364 	&block_igu_defs,
1365 	&block_cau_defs,
1366 	&block_umac_defs,
1367 	&block_xmac_defs,
1368 	&block_dbg_defs,
1369 	&block_nig_defs,
1370 	&block_wol_defs,
1371 	&block_bmbn_defs,
1372 	&block_ipc_defs,
1373 	&block_nwm_defs,
1374 	&block_nws_defs,
1375 	&block_ms_defs,
1376 	&block_phy_pcie_defs,
1377 	&block_led_defs,
1378 	&block_avs_wrap_defs,
1379 	&block_rgfs_defs,
1380 	&block_tgfs_defs,
1381 	&block_ptld_defs,
1382 	&block_ypld_defs,
1383 	&block_misc_aeu_defs,
1384 	&block_bar0_map_defs,
1385 };
1386 
1387 static struct platform_defs s_platform_defs[] = {
1388 	{"asic", 1},
1389 	{"reserved", 0},
1390 	{"reserved2", 0},
1391 	{"reserved3", 0}
1392 };
1393 
1394 static struct grc_param_defs s_grc_param_defs[] = {
1395 	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_TSTORM */
1396 	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_MSTORM */
1397 	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_USTORM */
1398 	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_XSTORM */
1399 	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_YSTORM */
1400 	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_PSTORM */
1401 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_REGS */
1402 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_RAM */
1403 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_PBUF */
1404 	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_IOR */
1405 	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_VFC */
1406 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CM_CTX */
1407 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_ILT */
1408 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_RSS */
1409 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CAU */
1410 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_QM */
1411 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_MCP */
1412 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_RESERVED */
1413 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CFC */
1414 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_IGU */
1415 	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BRB */
1416 	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BTB */
1417 	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BMB */
1418 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_NIG */
1419 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_MULD */
1420 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_PRS */
1421 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_DMAE */
1422 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_TM */
1423 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_SDM */
1424 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_DIF */
1425 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_STATIC */
1426 	{{0, 0}, 0, 1, false, 0, 0},	/* DBG_GRC_PARAM_UNSTALL */
1427 	{{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
1428 	 MAX_LCIDS},			/* DBG_GRC_PARAM_NUM_LCIDS */
1429 	{{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
1430 	 MAX_LTIDS},			/* DBG_GRC_PARAM_NUM_LTIDS */
1431 	{{0, 0}, 0, 1, true, 0, 0},	/* DBG_GRC_PARAM_EXCLUDE_ALL */
1432 	{{0, 0}, 0, 1, true, 0, 0},	/* DBG_GRC_PARAM_CRASH */
1433 	{{0, 0}, 0, 1, false, 1, 0},	/* DBG_GRC_PARAM_PARITY_SAFE */
1434 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CM */
1435 	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_PHY */
1436 	{{0, 0}, 0, 1, false, 0, 0},	/* DBG_GRC_PARAM_NO_MCP */
1437 	{{0, 0}, 0, 1, false, 0, 0}	/* DBG_GRC_PARAM_NO_FW_VER */
1438 };
1439 
1440 static struct rss_mem_defs s_rss_mem_defs[] = {
1441 	{ "rss_mem_cid", "rss_cid", 0,
1442 	  {256, 320},
1443 	  {32, 32} },
1444 	{ "rss_mem_key_msb", "rss_key", 1024,
1445 	  {128, 208},
1446 	  {256, 256} },
1447 	{ "rss_mem_key_lsb", "rss_key", 2048,
1448 	  {128, 208},
1449 	  {64, 64} },
1450 	{ "rss_mem_info", "rss_info", 3072,
1451 	  {128, 208},
1452 	  {16, 16} },
1453 	{ "rss_mem_ind", "rss_ind", 4096,
1454 	  {(128 * 128), (128 * 208)},
1455 	  {16, 16} }
1456 };
1457 
1458 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1459 	{"vfc_ram_tt1", "vfc_ram", 0, 512},
1460 	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
1461 	{"vfc_ram_stt2", "vfc_ram", 640, 32},
1462 	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1463 };
1464 
1465 static struct big_ram_defs s_big_ram_defs[] = {
1466 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1467 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1468 	  {4800, 5632} },
1469 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1470 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1471 	  {2880, 3680} },
1472 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1473 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1474 	  {1152, 1152} }
1475 };
1476 
1477 static struct reset_reg_defs s_reset_regs_defs[] = {
1478 	{ MISCS_REG_RESET_PL_UA, 0x0,
1479 	  {true, true} },		/* DBG_RESET_REG_MISCS_PL_UA */
1480 	{ MISCS_REG_RESET_PL_HV, 0x0,
1481 	  {true, true} },		/* DBG_RESET_REG_MISCS_PL_HV */
1482 	{ MISCS_REG_RESET_PL_HV_2, 0x0,
1483 	  {false, true} },	/* DBG_RESET_REG_MISCS_PL_HV_2 */
1484 	{ MISC_REG_RESET_PL_UA, 0x0,
1485 	  {true, true} },		/* DBG_RESET_REG_MISC_PL_UA */
1486 	{ MISC_REG_RESET_PL_HV, 0x0,
1487 	  {true, true} },		/* DBG_RESET_REG_MISC_PL_HV */
1488 	{ MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
1489 	  {true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1490 	{ MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
1491 	  {true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1492 	{ MISC_REG_RESET_PL_PDA_VAUX, 0x2,
1493 	  {true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1494 };
1495 
1496 static struct phy_defs s_phy_defs[] = {
1497 	{"nw_phy", NWS_REG_NWS_CMU, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0,
1498 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8,
1499 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0,
1500 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8},
1501 	{"sgmii_phy", MS_REG_MS_CMU, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132,
1502 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133,
1503 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130,
1504 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131},
1505 	{"pcie_phy0", PHY_PCIE_REG_PHY0, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
1506 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
1507 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
1508 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
1509 	{"pcie_phy1", PHY_PCIE_REG_PHY1, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
1510 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
1511 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
1512 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
1513 };
1514 
1515 /**************************** Private Functions ******************************/
1516 
1517 /* Reads and returns a single dword from the specified unaligned buffer */
1518 static u32 qed_read_unaligned_dword(u8 *buf)
1519 {
1520 	u32 dword;
1521 
1522 	memcpy((u8 *)&dword, buf, sizeof(dword));
1523 	return dword;
1524 }
1525 
1526 /* Returns the value of the specified GRC param */
1527 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1528 			     enum dbg_grc_params grc_param)
1529 {
1530 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1531 
1532 	return dev_data->grc.param_val[grc_param];
1533 }
1534 
1535 /* Initializes the GRC parameters */
1536 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1537 {
1538 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1539 
1540 	if (!dev_data->grc.params_initialized) {
1541 		qed_dbg_grc_set_params_default(p_hwfn);
1542 		dev_data->grc.params_initialized = 1;
1543 	}
1544 }
1545 
1546 /* Initializes debug data for the specified device */
1547 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1548 					struct qed_ptt *p_ptt)
1549 {
1550 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1551 
1552 	if (dev_data->initialized)
1553 		return DBG_STATUS_OK;
1554 
1555 	if (QED_IS_K2(p_hwfn->cdev)) {
1556 		dev_data->chip_id = CHIP_K2;
1557 		dev_data->mode_enable[MODE_K2] = 1;
1558 	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1559 		dev_data->chip_id = CHIP_BB_B0;
1560 		dev_data->mode_enable[MODE_BB] = 1;
1561 	} else {
1562 		return DBG_STATUS_UNKNOWN_CHIP;
1563 	}
1564 
1565 	dev_data->platform_id = PLATFORM_ASIC;
1566 	dev_data->mode_enable[MODE_ASIC] = 1;
1567 
1568 	/* Initializes the GRC parameters */
1569 	qed_dbg_grc_init_params(p_hwfn);
1570 
1571 	dev_data->initialized = true;
1572 	return DBG_STATUS_OK;
1573 }
1574 
1575 /* Reads the FW info structure for the specified Storm from the chip,
1576  * and writes it to the specified fw_info pointer.
1577  */
1578 static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
1579 			     struct qed_ptt *p_ptt,
1580 			     u8 storm_id, struct fw_info *fw_info)
1581 {
1582 	/* Read first the address that points to fw_info location.
1583 	 * The address is located in the last line of the Storm RAM.
1584 	 */
1585 	u32 addr = s_storm_defs[storm_id].sem_fast_mem_addr +
1586 		   SEM_FAST_REG_INT_RAM +
1587 		   DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
1588 		   sizeof(struct fw_info_location);
1589 	struct fw_info_location fw_info_location;
1590 	u32 *dest = (u32 *)&fw_info_location;
1591 	u32 i;
1592 
1593 	memset(&fw_info_location, 0, sizeof(fw_info_location));
1594 	memset(fw_info, 0, sizeof(*fw_info));
1595 	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1596 	     i++, addr += BYTES_IN_DWORD)
1597 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1598 	if (fw_info_location.size > 0 && fw_info_location.size <=
1599 	    sizeof(*fw_info)) {
1600 		/* Read FW version info from Storm RAM */
1601 		addr = fw_info_location.grc_addr;
1602 		dest = (u32 *)fw_info;
1603 		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1604 		     i++, addr += BYTES_IN_DWORD)
1605 			dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1606 	}
1607 }
1608 
1609 /* Dumps the specified string to the specified buffer. Returns the dumped size
1610  * in bytes (actual length + 1 for the null character termination).
1611  */
1612 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1613 {
1614 	if (dump)
1615 		strcpy(dump_buf, str);
1616 	return (u32)strlen(str) + 1;
1617 }
1618 
1619 /* Dumps zeros to align the specified buffer to dwords. Returns the dumped size
1620  * in bytes.
1621  */
1622 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1623 {
1624 	u8 offset_in_dword = (u8)(byte_offset & 0x3), align_size;
1625 
1626 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1627 
1628 	if (dump && align_size)
1629 		memset(dump_buf, 0, align_size);
1630 	return align_size;
1631 }
1632 
1633 /* Writes the specified string param to the specified buffer.
1634  * Returns the dumped size in dwords.
1635  */
1636 static u32 qed_dump_str_param(u32 *dump_buf,
1637 			      bool dump,
1638 			      const char *param_name, const char *param_val)
1639 {
1640 	char *char_buf = (char *)dump_buf;
1641 	u32 offset = 0;
1642 
1643 	/* Dump param name */
1644 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1645 
1646 	/* Indicate a string param value */
1647 	if (dump)
1648 		*(char_buf + offset) = 1;
1649 	offset++;
1650 
1651 	/* Dump param value */
1652 	offset += qed_dump_str(char_buf + offset, dump, param_val);
1653 
1654 	/* Align buffer to next dword */
1655 	offset += qed_dump_align(char_buf + offset, dump, offset);
1656 	return BYTES_TO_DWORDS(offset);
1657 }
1658 
1659 /* Writes the specified numeric param to the specified buffer.
1660  * Returns the dumped size in dwords.
1661  */
1662 static u32 qed_dump_num_param(u32 *dump_buf,
1663 			      bool dump, const char *param_name, u32 param_val)
1664 {
1665 	char *char_buf = (char *)dump_buf;
1666 	u32 offset = 0;
1667 
1668 	/* Dump param name */
1669 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1670 
1671 	/* Indicate a numeric param value */
1672 	if (dump)
1673 		*(char_buf + offset) = 0;
1674 	offset++;
1675 
1676 	/* Align buffer to next dword */
1677 	offset += qed_dump_align(char_buf + offset, dump, offset);
1678 
1679 	/* Dump param value (and change offset from bytes to dwords) */
1680 	offset = BYTES_TO_DWORDS(offset);
1681 	if (dump)
1682 		*(dump_buf + offset) = param_val;
1683 	offset++;
1684 	return offset;
1685 }
1686 
1687 /* Reads the FW version and writes it as a param to the specified buffer.
1688  * Returns the dumped size in dwords.
1689  */
1690 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1691 				 struct qed_ptt *p_ptt,
1692 				 u32 *dump_buf, bool dump)
1693 {
1694 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1695 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1696 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1697 	struct fw_info fw_info = { {0}, {0} };
1698 	int printed_chars;
1699 	u32 offset = 0;
1700 
1701 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1702 		/* Read FW image/version from PRAM in a non-reset SEMI */
1703 		bool found = false;
1704 		u8 storm_id;
1705 
1706 		for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
1707 		     storm_id++) {
1708 			/* Read FW version/image  */
1709 			if (!dev_data->block_in_reset
1710 			    [s_storm_defs[storm_id].block_id]) {
1711 				/* read FW info for the current Storm */
1712 				qed_read_fw_info(p_hwfn,
1713 						 p_ptt, storm_id, &fw_info);
1714 
1715 				/* Create FW version/image strings */
1716 				printed_chars =
1717 				    snprintf(fw_ver_str,
1718 					     sizeof(fw_ver_str),
1719 					     "%d_%d_%d_%d",
1720 					     fw_info.ver.num.major,
1721 					     fw_info.ver.num.minor,
1722 					     fw_info.ver.num.rev,
1723 					     fw_info.ver.num.eng);
1724 				if (printed_chars < 0 || printed_chars >=
1725 				    sizeof(fw_ver_str))
1726 					DP_NOTICE(p_hwfn,
1727 						  "Unexpected debug error: invalid FW version string\n");
1728 				switch (fw_info.ver.image_id) {
1729 				case FW_IMG_MAIN:
1730 					strcpy(fw_img_str, "main");
1731 					break;
1732 				default:
1733 					strcpy(fw_img_str, "unknown");
1734 					break;
1735 				}
1736 
1737 				found = true;
1738 			}
1739 		}
1740 	}
1741 
1742 	/* Dump FW version, image and timestamp */
1743 	offset += qed_dump_str_param(dump_buf + offset,
1744 				     dump, "fw-version", fw_ver_str);
1745 	offset += qed_dump_str_param(dump_buf + offset,
1746 				     dump, "fw-image", fw_img_str);
1747 	offset += qed_dump_num_param(dump_buf + offset,
1748 				     dump,
1749 				     "fw-timestamp", fw_info.ver.timestamp);
1750 	return offset;
1751 }
1752 
1753 /* Reads the MFW version and writes it as a param to the specified buffer.
1754  * Returns the dumped size in dwords.
1755  */
1756 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1757 				  struct qed_ptt *p_ptt,
1758 				  u32 *dump_buf, bool dump)
1759 {
1760 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1761 
1762 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1763 		u32 global_section_offsize, global_section_addr, mfw_ver;
1764 		u32 public_data_addr, global_section_offsize_addr;
1765 		int printed_chars;
1766 
1767 		/* Find MCP public data GRC address.
1768 		 * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug.
1769 		 */
1770 		public_data_addr = qed_rd(p_hwfn, p_ptt,
1771 					  MISC_REG_SHARED_MEM_ADDR) |
1772 					  MCP_REG_SCRATCH;
1773 
1774 		/* Find MCP public global section offset */
1775 		global_section_offsize_addr = public_data_addr +
1776 					      offsetof(struct mcp_public_data,
1777 						       sections) +
1778 					      sizeof(offsize_t) * PUBLIC_GLOBAL;
1779 		global_section_offsize = qed_rd(p_hwfn, p_ptt,
1780 						global_section_offsize_addr);
1781 		global_section_addr = MCP_REG_SCRATCH +
1782 				      (global_section_offsize &
1783 				       OFFSIZE_OFFSET_MASK) * 4;
1784 
1785 		/* Read MFW version from MCP public global section */
1786 		mfw_ver = qed_rd(p_hwfn, p_ptt,
1787 				 global_section_addr +
1788 				 offsetof(struct public_global, mfw_ver));
1789 
1790 		/* Dump MFW version param */
1791 		printed_chars = snprintf(mfw_ver_str, sizeof(mfw_ver_str),
1792 					 "%d_%d_%d_%d",
1793 					 (u8) (mfw_ver >> 24),
1794 					 (u8) (mfw_ver >> 16),
1795 					 (u8) (mfw_ver >> 8),
1796 					 (u8) mfw_ver);
1797 		if (printed_chars < 0 || printed_chars >= sizeof(mfw_ver_str))
1798 			DP_NOTICE(p_hwfn,
1799 				  "Unexpected debug error: invalid MFW version string\n");
1800 	}
1801 
1802 	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
1803 }
1804 
1805 /* Writes a section header to the specified buffer.
1806  * Returns the dumped size in dwords.
1807  */
1808 static u32 qed_dump_section_hdr(u32 *dump_buf,
1809 				bool dump, const char *name, u32 num_params)
1810 {
1811 	return qed_dump_num_param(dump_buf, dump, name, num_params);
1812 }
1813 
1814 /* Writes the common global params to the specified buffer.
1815  * Returns the dumped size in dwords.
1816  */
1817 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
1818 					 struct qed_ptt *p_ptt,
1819 					 u32 *dump_buf,
1820 					 bool dump,
1821 					 u8 num_specific_global_params)
1822 {
1823 	u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
1824 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1825 	u32 offset = 0;
1826 
1827 	/* Find platform string and dump global params section header */
1828 	offset += qed_dump_section_hdr(dump_buf + offset,
1829 				       dump, "global_params", num_params);
1830 
1831 	/* Store params */
1832 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
1833 	offset += qed_dump_mfw_ver_param(p_hwfn,
1834 					 p_ptt, dump_buf + offset, dump);
1835 	offset += qed_dump_num_param(dump_buf + offset,
1836 				     dump, "tools-version", TOOLS_VERSION);
1837 	offset += qed_dump_str_param(dump_buf + offset,
1838 				     dump,
1839 				     "chip",
1840 				     s_chip_defs[dev_data->chip_id].name);
1841 	offset += qed_dump_str_param(dump_buf + offset,
1842 				     dump,
1843 				     "platform",
1844 				     s_platform_defs[dev_data->platform_id].
1845 				     name);
1846 	offset +=
1847 	    qed_dump_num_param(dump_buf + offset, dump, "pci-func",
1848 			       p_hwfn->abs_pf_id);
1849 	return offset;
1850 }
1851 
1852 /* Writes the last section to the specified buffer at the given offset.
1853  * Returns the dumped size in dwords.
1854  */
1855 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
1856 {
1857 	u32 start_offset = offset, crc = ~0;
1858 
1859 	/* Dump CRC section header */
1860 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
1861 
1862 	/* Calculate CRC32 and add it to the dword following the "last" section.
1863 	 */
1864 	if (dump)
1865 		*(dump_buf + offset) = ~crc32(crc, (u8 *)dump_buf,
1866 					      DWORDS_TO_BYTES(offset));
1867 	offset++;
1868 	return offset - start_offset;
1869 }
1870 
1871 /* Update blocks reset state  */
1872 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
1873 					  struct qed_ptt *p_ptt)
1874 {
1875 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1876 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
1877 	u32 i;
1878 
1879 	/* Read reset registers */
1880 	for (i = 0; i < MAX_DBG_RESET_REGS; i++)
1881 		if (s_reset_regs_defs[i].exists[dev_data->chip_id])
1882 			reg_val[i] = qed_rd(p_hwfn,
1883 					    p_ptt, s_reset_regs_defs[i].addr);
1884 
1885 	/* Check if blocks are in reset */
1886 	for (i = 0; i < MAX_BLOCK_ID; i++)
1887 		dev_data->block_in_reset[i] =
1888 		    s_block_defs[i]->has_reset_bit &&
1889 		    !(reg_val[s_block_defs[i]->reset_reg] &
1890 		      BIT(s_block_defs[i]->reset_bit_offset));
1891 }
1892 
1893 /* Enable / disable the Debug block */
1894 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
1895 				     struct qed_ptt *p_ptt, bool enable)
1896 {
1897 	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
1898 }
1899 
1900 /* Resets the Debug block */
1901 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
1902 				    struct qed_ptt *p_ptt)
1903 {
1904 	u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
1905 
1906 	dbg_reset_reg_addr =
1907 		s_reset_regs_defs[s_block_defs[BLOCK_DBG]->reset_reg].addr;
1908 	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
1909 	new_reset_reg_val = old_reset_reg_val &
1910 			    ~BIT(s_block_defs[BLOCK_DBG]->reset_bit_offset);
1911 
1912 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
1913 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
1914 }
1915 
1916 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
1917 				     struct qed_ptt *p_ptt,
1918 				     enum dbg_bus_frame_modes mode)
1919 {
1920 	qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
1921 }
1922 
1923 /* Enable / disable Debug Bus clients according to the specified mask.
1924  * (1 = enable, 0 = disable)
1925  */
1926 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
1927 				   struct qed_ptt *p_ptt, u32 client_mask)
1928 {
1929 	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
1930 }
1931 
1932 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
1933 {
1934 	const u32 *ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
1935 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1936 	u8 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
1937 	bool arg1, arg2;
1938 
1939 	switch (tree_val) {
1940 	case INIT_MODE_OP_NOT:
1941 		return !qed_is_mode_match(p_hwfn, modes_buf_offset);
1942 	case INIT_MODE_OP_OR:
1943 	case INIT_MODE_OP_AND:
1944 		arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
1945 		arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
1946 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
1947 							arg2) : (arg1 && arg2);
1948 	default:
1949 		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
1950 	}
1951 }
1952 
1953 /* Returns true if the specified entity (indicated by GRC param) should be
1954  * included in the dump, false otherwise.
1955  */
1956 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
1957 				enum dbg_grc_params grc_param)
1958 {
1959 	return qed_grc_get_param(p_hwfn, grc_param) > 0;
1960 }
1961 
1962 /* Returns true of the specified Storm should be included in the dump, false
1963  * otherwise.
1964  */
1965 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
1966 				      enum dbg_storms storm)
1967 {
1968 	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
1969 }
1970 
1971 /* Returns true if the specified memory should be included in the dump, false
1972  * otherwise.
1973  */
1974 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
1975 				    enum block_id block_id, u8 mem_group_id)
1976 {
1977 	u8 i;
1978 
1979 	/* Check Storm match */
1980 	if (s_block_defs[block_id]->associated_to_storm &&
1981 	    !qed_grc_is_storm_included(p_hwfn,
1982 			(enum dbg_storms)s_block_defs[block_id]->storm_id))
1983 		return false;
1984 
1985 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
1986 		if (mem_group_id == s_big_ram_defs[i].mem_group_id ||
1987 		    mem_group_id == s_big_ram_defs[i].ram_mem_group_id)
1988 			return qed_grc_is_included(p_hwfn,
1989 						   s_big_ram_defs[i].grc_param);
1990 	if (mem_group_id == MEM_GROUP_PXP_ILT || mem_group_id ==
1991 	    MEM_GROUP_PXP_MEM)
1992 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
1993 	if (mem_group_id == MEM_GROUP_RAM)
1994 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
1995 	if (mem_group_id == MEM_GROUP_PBUF)
1996 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
1997 	if (mem_group_id == MEM_GROUP_CAU_MEM ||
1998 	    mem_group_id == MEM_GROUP_CAU_SB ||
1999 	    mem_group_id == MEM_GROUP_CAU_PI)
2000 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2001 	if (mem_group_id == MEM_GROUP_QM_MEM)
2002 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2003 	if (mem_group_id == MEM_GROUP_CONN_CFC_MEM ||
2004 	    mem_group_id == MEM_GROUP_TASK_CFC_MEM)
2005 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
2006 	if (mem_group_id == MEM_GROUP_IGU_MEM || mem_group_id ==
2007 	    MEM_GROUP_IGU_MSIX)
2008 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2009 	if (mem_group_id == MEM_GROUP_MULD_MEM)
2010 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2011 	if (mem_group_id == MEM_GROUP_PRS_MEM)
2012 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2013 	if (mem_group_id == MEM_GROUP_DMAE_MEM)
2014 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2015 	if (mem_group_id == MEM_GROUP_TM_MEM)
2016 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2017 	if (mem_group_id == MEM_GROUP_SDM_MEM)
2018 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2019 	if (mem_group_id == MEM_GROUP_TDIF_CTX || mem_group_id ==
2020 	    MEM_GROUP_RDIF_CTX)
2021 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2022 	if (mem_group_id == MEM_GROUP_CM_MEM)
2023 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2024 	if (mem_group_id == MEM_GROUP_IOR)
2025 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2026 
2027 	return true;
2028 }
2029 
2030 /* Stalls all Storms */
2031 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2032 				 struct qed_ptt *p_ptt, bool stall)
2033 {
2034 	u8 reg_val = stall ? 1 : 0;
2035 	u8 storm_id;
2036 
2037 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2038 		if (qed_grc_is_storm_included(p_hwfn,
2039 					      (enum dbg_storms)storm_id)) {
2040 			u32 reg_addr =
2041 			    s_storm_defs[storm_id].sem_fast_mem_addr +
2042 			    SEM_FAST_REG_STALL_0;
2043 
2044 			qed_wr(p_hwfn, p_ptt, reg_addr, reg_val);
2045 		}
2046 	}
2047 
2048 	msleep(STALL_DELAY_MS);
2049 }
2050 
2051 /* Takes all blocks out of reset */
2052 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2053 				   struct qed_ptt *p_ptt)
2054 {
2055 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2056 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2057 	u32 i;
2058 
2059 	/* Fill reset regs values */
2060 	for (i = 0; i < MAX_BLOCK_ID; i++)
2061 		if (s_block_defs[i]->has_reset_bit && s_block_defs[i]->unreset)
2062 			reg_val[s_block_defs[i]->reset_reg] |=
2063 			    BIT(s_block_defs[i]->reset_bit_offset);
2064 
2065 	/* Write reset registers */
2066 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2067 		if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
2068 			reg_val[i] |= s_reset_regs_defs[i].unreset_val;
2069 			if (reg_val[i])
2070 				qed_wr(p_hwfn,
2071 				       p_ptt,
2072 				       s_reset_regs_defs[i].addr +
2073 				       RESET_REG_UNRESET_OFFSET, reg_val[i]);
2074 		}
2075 	}
2076 }
2077 
2078 /* Returns the attention block data of the specified block */
2079 static const struct dbg_attn_block_type_data *
2080 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
2081 {
2082 	const struct dbg_attn_block *base_attn_block_arr =
2083 		(const struct dbg_attn_block *)
2084 		s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2085 
2086 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
2087 }
2088 
2089 /* Returns the attention registers of the specified block */
2090 static const struct dbg_attn_reg *
2091 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
2092 			u8 *num_attn_regs)
2093 {
2094 	const struct dbg_attn_block_type_data *block_type_data =
2095 		qed_get_block_attn_data(block_id, attn_type);
2096 
2097 	*num_attn_regs = block_type_data->num_regs;
2098 	return &((const struct dbg_attn_reg *)
2099 		 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2100 							  regs_offset];
2101 }
2102 
2103 /* For each block, clear the status of all parities */
2104 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2105 				   struct qed_ptt *p_ptt)
2106 {
2107 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2108 	u8 reg_idx, num_attn_regs;
2109 	u32 block_id;
2110 
2111 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2112 		const struct dbg_attn_reg *attn_reg_arr;
2113 
2114 		if (dev_data->block_in_reset[block_id])
2115 			continue;
2116 
2117 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2118 						       ATTN_TYPE_PARITY,
2119 						       &num_attn_regs);
2120 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2121 			const struct dbg_attn_reg *reg_data =
2122 				&attn_reg_arr[reg_idx];
2123 
2124 			/* Check mode */
2125 			bool eval_mode = GET_FIELD(reg_data->mode.data,
2126 						   DBG_MODE_HDR_EVAL_MODE) > 0;
2127 			u16 modes_buf_offset =
2128 				GET_FIELD(reg_data->mode.data,
2129 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2130 
2131 			if (!eval_mode ||
2132 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
2133 				/* Mode match - read parity status read-clear
2134 				 * register.
2135 				 */
2136 				qed_rd(p_hwfn, p_ptt,
2137 				       DWORDS_TO_BYTES(reg_data->
2138 						       sts_clr_address));
2139 		}
2140 	}
2141 }
2142 
2143 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2144  * The following parameters are dumped:
2145  * - 'count' = num_dumped_entries
2146  * - 'split' = split_type
2147  * - 'id' = split_id (dumped only if split_id >= 0)
2148  * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
2149  *	param_val != NULL)
2150  */
2151 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2152 				 bool dump,
2153 				 u32 num_reg_entries,
2154 				 const char *split_type,
2155 				 int split_id,
2156 				 const char *param_name, const char *param_val)
2157 {
2158 	u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2159 	u32 offset = 0;
2160 
2161 	offset += qed_dump_section_hdr(dump_buf + offset,
2162 				       dump, "grc_regs", num_params);
2163 	offset += qed_dump_num_param(dump_buf + offset,
2164 				     dump, "count", num_reg_entries);
2165 	offset += qed_dump_str_param(dump_buf + offset,
2166 				     dump, "split", split_type);
2167 	if (split_id >= 0)
2168 		offset += qed_dump_num_param(dump_buf + offset,
2169 					     dump, "id", split_id);
2170 	if (param_name && param_val)
2171 		offset += qed_dump_str_param(dump_buf + offset,
2172 					     dump, param_name, param_val);
2173 	return offset;
2174 }
2175 
2176 /* Dumps the GRC registers in the specified address range.
2177  * Returns the dumped size in dwords.
2178  */
2179 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2180 				   struct qed_ptt *p_ptt, u32 *dump_buf,
2181 				   bool dump, u32 addr, u32 len)
2182 {
2183 	u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
2184 
2185 	if (dump)
2186 		for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
2187 			*(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
2188 	else
2189 		offset += len;
2190 	return offset;
2191 }
2192 
2193 /* Dumps GRC registers sequence header. Returns the dumped size in dwords. */
2194 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr,
2195 				      u32 len)
2196 {
2197 	if (dump)
2198 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2199 	return 1;
2200 }
2201 
2202 /* Dumps GRC registers sequence. Returns the dumped size in dwords. */
2203 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2204 				  struct qed_ptt *p_ptt, u32 *dump_buf,
2205 				  bool dump, u32 addr, u32 len)
2206 {
2207 	u32 offset = 0;
2208 
2209 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2210 	offset += qed_grc_dump_addr_range(p_hwfn,
2211 					  p_ptt,
2212 					  dump_buf + offset, dump, addr, len);
2213 	return offset;
2214 }
2215 
2216 /* Dumps GRC registers sequence with skip cycle.
2217  * Returns the dumped size in dwords.
2218  */
2219 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2220 				       struct qed_ptt *p_ptt, u32 *dump_buf,
2221 				       bool dump, u32 addr, u32 total_len,
2222 				       u32 read_len, u32 skip_len)
2223 {
2224 	u32 offset = 0, reg_offset = 0;
2225 
2226 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2227 	if (dump) {
2228 		while (reg_offset < total_len) {
2229 			u32 curr_len = min_t(u32,
2230 					     read_len,
2231 					     total_len - reg_offset);
2232 			offset += qed_grc_dump_addr_range(p_hwfn,
2233 							  p_ptt,
2234 							  dump_buf + offset,
2235 							  dump, addr, curr_len);
2236 			reg_offset += curr_len;
2237 			addr += curr_len;
2238 			if (reg_offset < total_len) {
2239 				curr_len = min_t(u32,
2240 						 skip_len,
2241 						 total_len - skip_len);
2242 				memset(dump_buf + offset, 0,
2243 				       DWORDS_TO_BYTES(curr_len));
2244 				offset += curr_len;
2245 				reg_offset += curr_len;
2246 				addr += curr_len;
2247 			}
2248 		}
2249 	} else {
2250 		offset += total_len;
2251 	}
2252 
2253 	return offset;
2254 }
2255 
2256 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2257 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2258 				     struct qed_ptt *p_ptt,
2259 				     struct dbg_array input_regs_arr,
2260 				     u32 *dump_buf,
2261 				     bool dump,
2262 				     bool block_enable[MAX_BLOCK_ID],
2263 				     u32 *num_dumped_reg_entries)
2264 {
2265 	u32 i, offset = 0, input_offset = 0;
2266 	bool mode_match = true;
2267 
2268 	*num_dumped_reg_entries = 0;
2269 	while (input_offset < input_regs_arr.size_in_dwords) {
2270 		const struct dbg_dump_cond_hdr *cond_hdr =
2271 		    (const struct dbg_dump_cond_hdr *)
2272 		    &input_regs_arr.ptr[input_offset++];
2273 		bool eval_mode = GET_FIELD(cond_hdr->mode.data,
2274 					   DBG_MODE_HDR_EVAL_MODE) > 0;
2275 
2276 		/* Check mode/block */
2277 		if (eval_mode) {
2278 			u16 modes_buf_offset =
2279 				GET_FIELD(cond_hdr->mode.data,
2280 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2281 			mode_match = qed_is_mode_match(p_hwfn,
2282 						       &modes_buf_offset);
2283 		}
2284 
2285 		if (mode_match && block_enable[cond_hdr->block_id]) {
2286 			for (i = 0; i < cond_hdr->data_size;
2287 			     i++, input_offset++) {
2288 				const struct dbg_dump_reg *reg =
2289 				    (const struct dbg_dump_reg *)
2290 				    &input_regs_arr.ptr[input_offset];
2291 				u32 addr, len;
2292 
2293 				addr = GET_FIELD(reg->data,
2294 						 DBG_DUMP_REG_ADDRESS);
2295 				len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2296 				offset +=
2297 				    qed_grc_dump_reg_entry(p_hwfn, p_ptt,
2298 							   dump_buf + offset,
2299 							   dump,
2300 							   addr,
2301 							   len);
2302 				(*num_dumped_reg_entries)++;
2303 			}
2304 		} else {
2305 			input_offset += cond_hdr->data_size;
2306 		}
2307 	}
2308 
2309 	return offset;
2310 }
2311 
2312 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2313 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2314 				   struct qed_ptt *p_ptt,
2315 				   struct dbg_array input_regs_arr,
2316 				   u32 *dump_buf,
2317 				   bool dump,
2318 				   bool block_enable[MAX_BLOCK_ID],
2319 				   const char *split_type_name,
2320 				   u32 split_id,
2321 				   const char *param_name,
2322 				   const char *param_val)
2323 {
2324 	u32 num_dumped_reg_entries, offset;
2325 
2326 	/* Calculate register dump header size (and skip it for now) */
2327 	offset = qed_grc_dump_regs_hdr(dump_buf,
2328 				       false,
2329 				       0,
2330 				       split_type_name,
2331 				       split_id, param_name, param_val);
2332 
2333 	/* Dump registers */
2334 	offset += qed_grc_dump_regs_entries(p_hwfn,
2335 					    p_ptt,
2336 					    input_regs_arr,
2337 					    dump_buf + offset,
2338 					    dump,
2339 					    block_enable,
2340 					    &num_dumped_reg_entries);
2341 
2342 	/* Write register dump header */
2343 	if (dump && num_dumped_reg_entries > 0)
2344 		qed_grc_dump_regs_hdr(dump_buf,
2345 				      dump,
2346 				      num_dumped_reg_entries,
2347 				      split_type_name,
2348 				      split_id, param_name, param_val);
2349 
2350 	return num_dumped_reg_entries > 0 ? offset : 0;
2351 }
2352 
2353 /* Dumps registers according to the input registers array.
2354  * Returns the dumped size in dwords.
2355  */
2356 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2357 				  struct qed_ptt *p_ptt,
2358 				  u32 *dump_buf,
2359 				  bool dump,
2360 				  bool block_enable[MAX_BLOCK_ID],
2361 				  const char *param_name, const char *param_val)
2362 {
2363 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2364 	struct chip_platform_defs *p_platform_defs;
2365 	u32 offset = 0, input_offset = 0;
2366 	struct chip_defs *p_chip_defs;
2367 	u8 port_id, pf_id, vf_id;
2368 	u16 fid;
2369 
2370 	p_chip_defs = &s_chip_defs[dev_data->chip_id];
2371 	p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id];
2372 
2373 	if (dump)
2374 		DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
2375 	while (input_offset <
2376 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2377 		const struct dbg_dump_split_hdr *split_hdr =
2378 			(const struct dbg_dump_split_hdr *)
2379 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2380 		u8 split_type_id = GET_FIELD(split_hdr->hdr,
2381 					     DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2382 		u32 split_data_size = GET_FIELD(split_hdr->hdr,
2383 						DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2384 		struct dbg_array curr_input_regs_arr = {
2385 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset],
2386 			split_data_size};
2387 
2388 		switch (split_type_id) {
2389 		case SPLIT_TYPE_NONE:
2390 			offset += qed_grc_dump_split_data(p_hwfn,
2391 							  p_ptt,
2392 							  curr_input_regs_arr,
2393 							  dump_buf + offset,
2394 							  dump,
2395 							  block_enable,
2396 							  "eng",
2397 							  (u32)(-1),
2398 							  param_name,
2399 							  param_val);
2400 			break;
2401 		case SPLIT_TYPE_PORT:
2402 			for (port_id = 0; port_id < p_platform_defs->num_ports;
2403 			     port_id++) {
2404 				if (dump)
2405 					qed_port_pretend(p_hwfn, p_ptt,
2406 							 port_id);
2407 				offset +=
2408 				    qed_grc_dump_split_data(p_hwfn, p_ptt,
2409 							    curr_input_regs_arr,
2410 							    dump_buf + offset,
2411 							    dump, block_enable,
2412 							    "port", port_id,
2413 							    param_name,
2414 							    param_val);
2415 			}
2416 			break;
2417 		case SPLIT_TYPE_PF:
2418 		case SPLIT_TYPE_PORT_PF:
2419 			for (pf_id = 0; pf_id < p_platform_defs->num_pfs;
2420 			     pf_id++) {
2421 				u8 pfid_shift =
2422 					PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2423 
2424 				if (dump) {
2425 					fid = pf_id << pfid_shift;
2426 					qed_fid_pretend(p_hwfn, p_ptt, fid);
2427 				}
2428 
2429 				offset +=
2430 				    qed_grc_dump_split_data(p_hwfn, p_ptt,
2431 							    curr_input_regs_arr,
2432 							    dump_buf + offset,
2433 							    dump, block_enable,
2434 							    "pf", pf_id,
2435 							    param_name,
2436 							    param_val);
2437 			}
2438 			break;
2439 		case SPLIT_TYPE_VF:
2440 			for (vf_id = 0; vf_id < p_platform_defs->num_vfs;
2441 			     vf_id++) {
2442 				u8 vfvalid_shift =
2443 					PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
2444 				u8 vfid_shift =
2445 					PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
2446 
2447 				if (dump) {
2448 					fid = BIT(vfvalid_shift) |
2449 					      (vf_id << vfid_shift);
2450 					qed_fid_pretend(p_hwfn, p_ptt, fid);
2451 				}
2452 
2453 				offset +=
2454 				    qed_grc_dump_split_data(p_hwfn, p_ptt,
2455 							    curr_input_regs_arr,
2456 							    dump_buf + offset,
2457 							    dump, block_enable,
2458 							    "vf", vf_id,
2459 							    param_name,
2460 							    param_val);
2461 			}
2462 			break;
2463 		default:
2464 			break;
2465 		}
2466 
2467 		input_offset += split_data_size;
2468 	}
2469 
2470 	/* Pretend to original PF */
2471 	if (dump) {
2472 		fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2473 		qed_fid_pretend(p_hwfn, p_ptt, fid);
2474 	}
2475 
2476 	return offset;
2477 }
2478 
2479 /* Dump reset registers. Returns the dumped size in dwords. */
2480 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2481 				   struct qed_ptt *p_ptt,
2482 				   u32 *dump_buf, bool dump)
2483 {
2484 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2485 	u32 i, offset = 0, num_regs = 0;
2486 
2487 	/* Calculate header size */
2488 	offset += qed_grc_dump_regs_hdr(dump_buf,
2489 					false, 0, "eng", -1, NULL, NULL);
2490 
2491 	/* Write reset registers */
2492 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2493 		if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
2494 			u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr);
2495 
2496 			offset += qed_grc_dump_reg_entry(p_hwfn,
2497 							 p_ptt,
2498 							 dump_buf + offset,
2499 							 dump,
2500 							 addr,
2501 							 1);
2502 			num_regs++;
2503 		}
2504 	}
2505 
2506 	/* Write header */
2507 	if (dump)
2508 		qed_grc_dump_regs_hdr(dump_buf,
2509 				      true, num_regs, "eng", -1, NULL, NULL);
2510 	return offset;
2511 }
2512 
2513 /* Dump registers that are modified during GRC Dump and therefore must be dumped
2514  * first. Returns the dumped size in dwords.
2515  */
2516 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2517 				      struct qed_ptt *p_ptt,
2518 				      u32 *dump_buf, bool dump)
2519 {
2520 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2521 	u32 offset = 0, num_reg_entries = 0, block_id;
2522 	u8 storm_id, reg_idx, num_attn_regs;
2523 
2524 	/* Calculate header size */
2525 	offset += qed_grc_dump_regs_hdr(dump_buf,
2526 					false, 0, "eng", -1, NULL, NULL);
2527 
2528 	/* Write parity registers */
2529 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2530 		const struct dbg_attn_reg *attn_reg_arr;
2531 
2532 		if (dev_data->block_in_reset[block_id] && dump)
2533 			continue;
2534 
2535 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2536 						       ATTN_TYPE_PARITY,
2537 						       &num_attn_regs);
2538 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2539 			const struct dbg_attn_reg *reg_data =
2540 				&attn_reg_arr[reg_idx];
2541 			u16 modes_buf_offset;
2542 			bool eval_mode;
2543 			u32 addr;
2544 
2545 			/* Check mode */
2546 			eval_mode = GET_FIELD(reg_data->mode.data,
2547 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2548 			modes_buf_offset =
2549 				GET_FIELD(reg_data->mode.data,
2550 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2551 			if (!eval_mode ||
2552 			    qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
2553 				/* Mode match - read and dump registers */
2554 				addr = reg_data->mask_address;
2555 				offset +=
2556 				    qed_grc_dump_reg_entry(p_hwfn,
2557 							   p_ptt,
2558 							   dump_buf + offset,
2559 							   dump,
2560 							   addr,
2561 							   1);
2562 				addr = GET_FIELD(reg_data->data,
2563 						 DBG_ATTN_REG_STS_ADDRESS);
2564 				offset +=
2565 				    qed_grc_dump_reg_entry(p_hwfn,
2566 							   p_ptt,
2567 							   dump_buf + offset,
2568 							   dump,
2569 							   addr,
2570 							   1);
2571 				num_reg_entries += 2;
2572 			}
2573 		}
2574 	}
2575 
2576 	/* Write storm stall status registers */
2577 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2578 		u32 addr;
2579 
2580 		if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
2581 		    dump)
2582 			continue;
2583 
2584 		addr =
2585 		    BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2586 				    SEM_FAST_REG_STALLED);
2587 		offset += qed_grc_dump_reg_entry(p_hwfn,
2588 						 p_ptt,
2589 						 dump_buf + offset,
2590 						 dump,
2591 						 addr,
2592 						 1);
2593 		num_reg_entries++;
2594 	}
2595 
2596 	/* Write header */
2597 	if (dump)
2598 		qed_grc_dump_regs_hdr(dump_buf,
2599 				      true,
2600 				      num_reg_entries, "eng", -1, NULL, NULL);
2601 	return offset;
2602 }
2603 
2604 /* Dumps registers that can't be represented in the debug arrays */
2605 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2606 				     struct qed_ptt *p_ptt,
2607 				     u32 *dump_buf, bool dump)
2608 {
2609 	u32 offset = 0, addr;
2610 
2611 	offset += qed_grc_dump_regs_hdr(dump_buf,
2612 					dump, 2, "eng", -1, NULL, NULL);
2613 
2614 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2615 	 * skipped).
2616 	 */
2617 	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2618 	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2619 					      p_ptt,
2620 					      dump_buf + offset,
2621 					      dump,
2622 					      addr,
2623 					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2624 					      7,
2625 					      1);
2626 	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2627 	offset +=
2628 	    qed_grc_dump_reg_entry_skip(p_hwfn,
2629 					p_ptt,
2630 					dump_buf + offset,
2631 					dump,
2632 					addr,
2633 					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2634 					7,
2635 					1);
2636 
2637 	return offset;
2638 }
2639 
2640 /* Dumps a GRC memory header (section and params).
2641  * The following parameters are dumped:
2642  * name - name is dumped only if it's not NULL.
2643  * addr - addr is dumped only if name is NULL.
2644  * len - len is always dumped.
2645  * width - bit_width is dumped if it's not zero.
2646  * packed - packed=1 is dumped if it's not false.
2647  * mem_group - mem_group is always dumped.
2648  * is_storm - true only if the memory is related to a Storm.
2649  * storm_letter - storm letter (valid only if is_storm is true).
2650  * Returns the dumped size in dwords.
2651  */
2652 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2653 				u32 *dump_buf,
2654 				bool dump,
2655 				const char *name,
2656 				u32 addr,
2657 				u32 len,
2658 				u32 bit_width,
2659 				bool packed,
2660 				const char *mem_group,
2661 				bool is_storm, char storm_letter)
2662 {
2663 	u8 num_params = 3;
2664 	u32 offset = 0;
2665 	char buf[64];
2666 
2667 	if (!len)
2668 		DP_NOTICE(p_hwfn,
2669 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2670 	if (bit_width)
2671 		num_params++;
2672 	if (packed)
2673 		num_params++;
2674 
2675 	/* Dump section header */
2676 	offset += qed_dump_section_hdr(dump_buf + offset,
2677 				       dump, "grc_mem", num_params);
2678 	if (name) {
2679 		/* Dump name */
2680 		if (is_storm) {
2681 			strcpy(buf, "?STORM_");
2682 			buf[0] = storm_letter;
2683 			strcpy(buf + strlen(buf), name);
2684 		} else {
2685 			strcpy(buf, name);
2686 		}
2687 
2688 		offset += qed_dump_str_param(dump_buf + offset,
2689 					     dump, "name", buf);
2690 		if (dump)
2691 			DP_VERBOSE(p_hwfn,
2692 				   QED_MSG_DEBUG,
2693 				   "Dumping %d registers from %s...\n",
2694 				   len, buf);
2695 	} else {
2696 		/* Dump address */
2697 		offset += qed_dump_num_param(dump_buf + offset,
2698 					     dump, "addr",
2699 					     DWORDS_TO_BYTES(addr));
2700 		if (dump && len > 64)
2701 			DP_VERBOSE(p_hwfn,
2702 				   QED_MSG_DEBUG,
2703 				   "Dumping %d registers from address 0x%x...\n",
2704 				   len, (u32)DWORDS_TO_BYTES(addr));
2705 	}
2706 
2707 	/* Dump len */
2708 	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2709 
2710 	/* Dump bit width */
2711 	if (bit_width)
2712 		offset += qed_dump_num_param(dump_buf + offset,
2713 					     dump, "width", bit_width);
2714 
2715 	/* Dump packed */
2716 	if (packed)
2717 		offset += qed_dump_num_param(dump_buf + offset,
2718 					     dump, "packed", 1);
2719 
2720 	/* Dump reg type */
2721 	if (is_storm) {
2722 		strcpy(buf, "?STORM_");
2723 		buf[0] = storm_letter;
2724 		strcpy(buf + strlen(buf), mem_group);
2725 	} else {
2726 		strcpy(buf, mem_group);
2727 	}
2728 
2729 	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
2730 	return offset;
2731 }
2732 
2733 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
2734  * Returns the dumped size in dwords.
2735  */
2736 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2737 			    struct qed_ptt *p_ptt,
2738 			    u32 *dump_buf,
2739 			    bool dump,
2740 			    const char *name,
2741 			    u32 addr,
2742 			    u32 len,
2743 			    u32 bit_width,
2744 			    bool packed,
2745 			    const char *mem_group,
2746 			    bool is_storm, char storm_letter)
2747 {
2748 	u32 offset = 0;
2749 
2750 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2751 				       dump_buf + offset,
2752 				       dump,
2753 				       name,
2754 				       addr,
2755 				       len,
2756 				       bit_width,
2757 				       packed,
2758 				       mem_group, is_storm, storm_letter);
2759 	offset += qed_grc_dump_addr_range(p_hwfn,
2760 					  p_ptt,
2761 					  dump_buf + offset, dump, addr, len);
2762 	return offset;
2763 }
2764 
2765 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
2766 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2767 				    struct qed_ptt *p_ptt,
2768 				    struct dbg_array input_mems_arr,
2769 				    u32 *dump_buf, bool dump)
2770 {
2771 	u32 i, offset = 0, input_offset = 0;
2772 	bool mode_match = true;
2773 
2774 	while (input_offset < input_mems_arr.size_in_dwords) {
2775 		const struct dbg_dump_cond_hdr *cond_hdr;
2776 		u32 num_entries;
2777 		bool eval_mode;
2778 
2779 		cond_hdr = (const struct dbg_dump_cond_hdr *)
2780 			   &input_mems_arr.ptr[input_offset++];
2781 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2782 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2783 
2784 		/* Check required mode */
2785 		if (eval_mode) {
2786 			u16 modes_buf_offset =
2787 				GET_FIELD(cond_hdr->mode.data,
2788 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2789 
2790 			mode_match = qed_is_mode_match(p_hwfn,
2791 						       &modes_buf_offset);
2792 		}
2793 
2794 		if (!mode_match) {
2795 			input_offset += cond_hdr->data_size;
2796 			continue;
2797 		}
2798 
2799 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
2800 		for (i = 0; i < num_entries;
2801 		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
2802 			const struct dbg_dump_mem *mem =
2803 				(const struct dbg_dump_mem *)
2804 				&input_mems_arr.ptr[input_offset];
2805 			u8 mem_group_id;
2806 
2807 			mem_group_id = GET_FIELD(mem->dword0,
2808 						 DBG_DUMP_MEM_MEM_GROUP_ID);
2809 			if (mem_group_id >= MEM_GROUPS_NUM) {
2810 				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
2811 				return 0;
2812 			}
2813 
2814 			if (qed_grc_is_mem_included(p_hwfn,
2815 					(enum block_id)cond_hdr->block_id,
2816 					mem_group_id)) {
2817 				u32 mem_addr = GET_FIELD(mem->dword0,
2818 							 DBG_DUMP_MEM_ADDRESS);
2819 				u32 mem_len = GET_FIELD(mem->dword1,
2820 							DBG_DUMP_MEM_LENGTH);
2821 				enum dbg_grc_params grc_param;
2822 				char storm_letter = 'a';
2823 				bool is_storm = false;
2824 
2825 				/* Update memory length for CCFC/TCFC memories
2826 				 * according to number of LCIDs/LTIDs.
2827 				 */
2828 				if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
2829 					if (mem_len % MAX_LCIDS != 0) {
2830 						DP_NOTICE(p_hwfn,
2831 							  "Invalid CCFC connection memory size\n");
2832 						return 0;
2833 					}
2834 
2835 					grc_param = DBG_GRC_PARAM_NUM_LCIDS;
2836 					mem_len = qed_grc_get_param(p_hwfn,
2837 								    grc_param) *
2838 						  (mem_len / MAX_LCIDS);
2839 				} else if (mem_group_id ==
2840 					   MEM_GROUP_TASK_CFC_MEM) {
2841 					if (mem_len % MAX_LTIDS != 0) {
2842 						DP_NOTICE(p_hwfn,
2843 							  "Invalid TCFC task memory size\n");
2844 						return 0;
2845 					}
2846 
2847 					grc_param = DBG_GRC_PARAM_NUM_LTIDS;
2848 					mem_len = qed_grc_get_param(p_hwfn,
2849 								    grc_param) *
2850 						  (mem_len / MAX_LTIDS);
2851 				}
2852 
2853 				/* If memory is associated with Storm, update
2854 				 * Storm details.
2855 				 */
2856 				if (s_block_defs[cond_hdr->block_id]->
2857 							associated_to_storm) {
2858 					is_storm = true;
2859 					storm_letter =
2860 						s_storm_defs[s_block_defs[
2861 						cond_hdr->block_id]->
2862 						storm_id].letter;
2863 				}
2864 
2865 				/* Dump memory */
2866 				offset += qed_grc_dump_mem(p_hwfn, p_ptt,
2867 						dump_buf + offset, dump, NULL,
2868 						mem_addr, mem_len, 0,
2869 						false,
2870 						s_mem_group_names[mem_group_id],
2871 						is_storm, storm_letter);
2872 				}
2873 			}
2874 	}
2875 
2876 	return offset;
2877 }
2878 
2879 /* Dumps GRC memories according to the input array dump_mem.
2880  * Returns the dumped size in dwords.
2881  */
2882 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
2883 				 struct qed_ptt *p_ptt,
2884 				 u32 *dump_buf, bool dump)
2885 {
2886 	u32 offset = 0, input_offset = 0;
2887 
2888 	while (input_offset <
2889 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
2890 		const struct dbg_dump_split_hdr *split_hdr =
2891 			(const struct dbg_dump_split_hdr *)
2892 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
2893 		u8 split_type_id = GET_FIELD(split_hdr->hdr,
2894 					     DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2895 		u32 split_data_size = GET_FIELD(split_hdr->hdr,
2896 						DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2897 		struct dbg_array curr_input_mems_arr = {
2898 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset],
2899 			split_data_size};
2900 
2901 		switch (split_type_id) {
2902 		case SPLIT_TYPE_NONE:
2903 			offset += qed_grc_dump_mem_entries(p_hwfn,
2904 							   p_ptt,
2905 							   curr_input_mems_arr,
2906 							   dump_buf + offset,
2907 							   dump);
2908 			break;
2909 		default:
2910 			DP_NOTICE(p_hwfn,
2911 				  "Dumping split memories is currently not supported\n");
2912 			break;
2913 		}
2914 
2915 		input_offset += split_data_size;
2916 	}
2917 
2918 	return offset;
2919 }
2920 
2921 /* Dumps GRC context data for the specified Storm.
2922  * Returns the dumped size in dwords.
2923  */
2924 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2925 				 struct qed_ptt *p_ptt,
2926 				 u32 *dump_buf,
2927 				 bool dump,
2928 				 const char *name,
2929 				 u32 num_lids,
2930 				 u32 lid_size,
2931 				 u32 rd_reg_addr,
2932 				 u8 storm_id)
2933 {
2934 	u32 i, lid, total_size;
2935 	u32 offset = 0;
2936 
2937 	if (!lid_size)
2938 		return 0;
2939 	lid_size *= BYTES_IN_DWORD;
2940 	total_size = num_lids * lid_size;
2941 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2942 				       dump_buf + offset,
2943 				       dump,
2944 				       name,
2945 				       0,
2946 				       total_size,
2947 				       lid_size * 32,
2948 				       false,
2949 				       name,
2950 				       true, s_storm_defs[storm_id].letter);
2951 
2952 	/* Dump context data */
2953 	if (dump) {
2954 		for (lid = 0; lid < num_lids; lid++) {
2955 			for (i = 0; i < lid_size; i++, offset++) {
2956 				qed_wr(p_hwfn,
2957 				       p_ptt,
2958 				       s_storm_defs[storm_id].cm_ctx_wr_addr,
2959 				       BIT(9) | lid);
2960 				*(dump_buf + offset) = qed_rd(p_hwfn,
2961 							      p_ptt,
2962 							      rd_reg_addr);
2963 			}
2964 		}
2965 	} else {
2966 		offset += total_size;
2967 	}
2968 
2969 	return offset;
2970 }
2971 
2972 /* Dumps GRC contexts. Returns the dumped size in dwords. */
2973 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
2974 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2975 {
2976 	u32 offset = 0;
2977 	u8 storm_id;
2978 
2979 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2980 		if (!qed_grc_is_storm_included(p_hwfn,
2981 					       (enum dbg_storms)storm_id))
2982 			continue;
2983 
2984 		/* Dump Conn AG context size */
2985 		offset +=
2986 			qed_grc_dump_ctx_data(p_hwfn,
2987 					      p_ptt,
2988 					      dump_buf + offset,
2989 					      dump,
2990 					      "CONN_AG_CTX",
2991 					      qed_grc_get_param(p_hwfn,
2992 						    DBG_GRC_PARAM_NUM_LCIDS),
2993 					      s_storm_defs[storm_id].
2994 						    cm_conn_ag_ctx_lid_size,
2995 					      s_storm_defs[storm_id].
2996 						    cm_conn_ag_ctx_rd_addr,
2997 					      storm_id);
2998 
2999 		/* Dump Conn ST context size */
3000 		offset +=
3001 			qed_grc_dump_ctx_data(p_hwfn,
3002 					      p_ptt,
3003 					      dump_buf + offset,
3004 					      dump,
3005 					      "CONN_ST_CTX",
3006 					      qed_grc_get_param(p_hwfn,
3007 						    DBG_GRC_PARAM_NUM_LCIDS),
3008 					      s_storm_defs[storm_id].
3009 						    cm_conn_st_ctx_lid_size,
3010 					      s_storm_defs[storm_id].
3011 						    cm_conn_st_ctx_rd_addr,
3012 					      storm_id);
3013 
3014 		/* Dump Task AG context size */
3015 		offset +=
3016 			qed_grc_dump_ctx_data(p_hwfn,
3017 					      p_ptt,
3018 					      dump_buf + offset,
3019 					      dump,
3020 					      "TASK_AG_CTX",
3021 					      qed_grc_get_param(p_hwfn,
3022 						    DBG_GRC_PARAM_NUM_LTIDS),
3023 					      s_storm_defs[storm_id].
3024 						    cm_task_ag_ctx_lid_size,
3025 					      s_storm_defs[storm_id].
3026 						    cm_task_ag_ctx_rd_addr,
3027 					      storm_id);
3028 
3029 		/* Dump Task ST context size */
3030 		offset +=
3031 			qed_grc_dump_ctx_data(p_hwfn,
3032 					      p_ptt,
3033 					      dump_buf + offset,
3034 					      dump,
3035 					      "TASK_ST_CTX",
3036 					      qed_grc_get_param(p_hwfn,
3037 						    DBG_GRC_PARAM_NUM_LTIDS),
3038 					      s_storm_defs[storm_id].
3039 						    cm_task_st_ctx_lid_size,
3040 					      s_storm_defs[storm_id].
3041 						    cm_task_st_ctx_rd_addr,
3042 					      storm_id);
3043 	}
3044 
3045 	return offset;
3046 }
3047 
3048 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3049 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3050 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3051 {
3052 	char buf[10] = "IOR_SET_?";
3053 	u8 storm_id, set_id;
3054 	u32 offset = 0;
3055 
3056 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3057 		struct storm_defs *storm = &s_storm_defs[storm_id];
3058 
3059 		if (!qed_grc_is_storm_included(p_hwfn,
3060 					       (enum dbg_storms)storm_id))
3061 			continue;
3062 
3063 		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3064 			u32 dwords, addr;
3065 
3066 			dwords = storm->sem_fast_mem_addr +
3067 				 SEM_FAST_REG_STORM_REG_FILE;
3068 			addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id);
3069 			buf[strlen(buf) - 1] = '0' + set_id;
3070 			offset += qed_grc_dump_mem(p_hwfn,
3071 						   p_ptt,
3072 						   dump_buf + offset,
3073 						   dump,
3074 						   buf,
3075 						   addr,
3076 						   IORS_PER_SET,
3077 						   32,
3078 						   false,
3079 						   "ior",
3080 						   true,
3081 						   storm->letter);
3082 		}
3083 	}
3084 
3085 	return offset;
3086 }
3087 
3088 /* Dump VFC CAM. Returns the dumped size in dwords. */
3089 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3090 				struct qed_ptt *p_ptt,
3091 				u32 *dump_buf, bool dump, u8 storm_id)
3092 {
3093 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3094 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3095 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3096 	u32 offset = 0;
3097 	u32 row, i;
3098 
3099 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3100 				       dump_buf + offset,
3101 				       dump,
3102 				       "vfc_cam",
3103 				       0,
3104 				       total_size,
3105 				       256,
3106 				       false,
3107 				       "vfc_cam",
3108 				       true, s_storm_defs[storm_id].letter);
3109 	if (dump) {
3110 		/* Prepare CAM address */
3111 		SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3112 		for (row = 0; row < VFC_CAM_NUM_ROWS;
3113 		     row++, offset += VFC_CAM_RESP_DWORDS) {
3114 			/* Write VFC CAM command */
3115 			SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3116 			ARR_REG_WR(p_hwfn,
3117 				   p_ptt,
3118 				   s_storm_defs[storm_id].sem_fast_mem_addr +
3119 				   SEM_FAST_REG_VFC_DATA_WR,
3120 				   cam_cmd, VFC_CAM_CMD_DWORDS);
3121 
3122 			/* Write VFC CAM address */
3123 			ARR_REG_WR(p_hwfn,
3124 				   p_ptt,
3125 				   s_storm_defs[storm_id].sem_fast_mem_addr +
3126 				   SEM_FAST_REG_VFC_ADDR,
3127 				   cam_addr, VFC_CAM_ADDR_DWORDS);
3128 
3129 			/* Read VFC CAM read response */
3130 			ARR_REG_RD(p_hwfn,
3131 				   p_ptt,
3132 				   s_storm_defs[storm_id].sem_fast_mem_addr +
3133 				   SEM_FAST_REG_VFC_DATA_RD,
3134 				   dump_buf + offset, VFC_CAM_RESP_DWORDS);
3135 		}
3136 	} else {
3137 		offset += total_size;
3138 	}
3139 
3140 	return offset;
3141 }
3142 
3143 /* Dump VFC RAM. Returns the dumped size in dwords. */
3144 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3145 				struct qed_ptt *p_ptt,
3146 				u32 *dump_buf,
3147 				bool dump,
3148 				u8 storm_id, struct vfc_ram_defs *ram_defs)
3149 {
3150 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3151 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3152 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3153 	u32 offset = 0;
3154 	u32 row, i;
3155 
3156 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3157 				       dump_buf + offset,
3158 				       dump,
3159 				       ram_defs->mem_name,
3160 				       0,
3161 				       total_size,
3162 				       256,
3163 				       false,
3164 				       ram_defs->type_name,
3165 				       true, s_storm_defs[storm_id].letter);
3166 
3167 	/* Prepare RAM address */
3168 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3169 
3170 	if (!dump)
3171 		return offset + total_size;
3172 
3173 	for (row = ram_defs->base_row;
3174 	     row < ram_defs->base_row + ram_defs->num_rows;
3175 	     row++, offset += VFC_RAM_RESP_DWORDS) {
3176 		/* Write VFC RAM command */
3177 		ARR_REG_WR(p_hwfn,
3178 			   p_ptt,
3179 			   s_storm_defs[storm_id].sem_fast_mem_addr +
3180 			   SEM_FAST_REG_VFC_DATA_WR,
3181 			   ram_cmd, VFC_RAM_CMD_DWORDS);
3182 
3183 		/* Write VFC RAM address */
3184 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3185 		ARR_REG_WR(p_hwfn,
3186 			   p_ptt,
3187 			   s_storm_defs[storm_id].sem_fast_mem_addr +
3188 			   SEM_FAST_REG_VFC_ADDR,
3189 			   ram_addr, VFC_RAM_ADDR_DWORDS);
3190 
3191 		/* Read VFC RAM read response */
3192 		ARR_REG_RD(p_hwfn,
3193 			   p_ptt,
3194 			   s_storm_defs[storm_id].sem_fast_mem_addr +
3195 			   SEM_FAST_REG_VFC_DATA_RD,
3196 			   dump_buf + offset, VFC_RAM_RESP_DWORDS);
3197 	}
3198 
3199 	return offset;
3200 }
3201 
3202 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3203 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3204 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3205 {
3206 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3207 	u8 storm_id, i;
3208 	u32 offset = 0;
3209 
3210 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3211 		if (qed_grc_is_storm_included(p_hwfn,
3212 					      (enum dbg_storms)storm_id) &&
3213 		    s_storm_defs[storm_id].has_vfc &&
3214 		    (storm_id != DBG_PSTORM_ID ||
3215 		     dev_data->platform_id == PLATFORM_ASIC)) {
3216 			/* Read CAM */
3217 			offset += qed_grc_dump_vfc_cam(p_hwfn,
3218 						       p_ptt,
3219 						       dump_buf + offset,
3220 						       dump, storm_id);
3221 
3222 			/* Read RAM */
3223 			for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3224 				offset += qed_grc_dump_vfc_ram(p_hwfn,
3225 							       p_ptt,
3226 							       dump_buf +
3227 							       offset,
3228 							       dump,
3229 							       storm_id,
3230 							       &s_vfc_ram_defs
3231 							       [i]);
3232 		}
3233 	}
3234 
3235 	return offset;
3236 }
3237 
3238 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3239 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3240 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3241 {
3242 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3243 	u32 offset = 0;
3244 	u8 rss_mem_id;
3245 
3246 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3247 		struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
3248 		u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
3249 		u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
3250 		u32 total_dwords = (num_entries * entry_width) / 32;
3251 		u32 size = RSS_REG_RSS_RAM_DATA_SIZE;
3252 		bool packed = (entry_width == 16);
3253 		u32 rss_addr = rss_defs->addr;
3254 		u32 i, addr;
3255 
3256 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3257 					       dump_buf + offset,
3258 					       dump,
3259 					       rss_defs->mem_name,
3260 					       0,
3261 					       total_dwords,
3262 					       entry_width,
3263 					       packed,
3264 					       rss_defs->type_name, false, 0);
3265 
3266 		if (!dump) {
3267 			offset += total_dwords;
3268 			continue;
3269 		}
3270 
3271 		/* Dump RSS data */
3272 		for (i = 0; i < total_dwords;
3273 		     i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
3274 			addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3275 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3276 				offset += qed_grc_dump_addr_range(p_hwfn,
3277 								  p_ptt,
3278 								  dump_buf +
3279 								  offset,
3280 								  dump,
3281 								  addr,
3282 								  size);
3283 		}
3284 	}
3285 
3286 	return offset;
3287 }
3288 
3289 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3290 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3291 				struct qed_ptt *p_ptt,
3292 				u32 *dump_buf, bool dump, u8 big_ram_id)
3293 {
3294 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3295 	u32 total_blocks, ram_size, offset = 0, i;
3296 	char mem_name[12] = "???_BIG_RAM";
3297 	char type_name[8] = "???_RAM";
3298 	struct big_ram_defs *big_ram;
3299 
3300 	big_ram = &s_big_ram_defs[big_ram_id];
3301 	total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
3302 	ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
3303 
3304 	strncpy(type_name, big_ram->instance_name,
3305 		strlen(big_ram->instance_name));
3306 	strncpy(mem_name, big_ram->instance_name,
3307 		strlen(big_ram->instance_name));
3308 
3309 	/* Dump memory header */
3310 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3311 				       dump_buf + offset,
3312 				       dump,
3313 				       mem_name,
3314 				       0,
3315 				       ram_size,
3316 				       BIG_RAM_BLOCK_SIZE_BYTES * 8,
3317 				       false, type_name, false, 0);
3318 
3319 	if (!dump)
3320 		return offset + ram_size;
3321 
3322 	/* Read and dump Big RAM data */
3323 	for (i = 0; i < total_blocks / 2; i++) {
3324 		u32 addr, len;
3325 
3326 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3327 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3328 		len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
3329 		offset += qed_grc_dump_addr_range(p_hwfn,
3330 						  p_ptt,
3331 						  dump_buf + offset,
3332 						  dump,
3333 						  addr,
3334 						  len);
3335 	}
3336 
3337 	return offset;
3338 }
3339 
3340 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3341 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3342 {
3343 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3344 	u32 offset = 0, addr;
3345 	bool halted = false;
3346 
3347 	/* Halt MCP */
3348 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3349 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3350 		if (!halted)
3351 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3352 	}
3353 
3354 	/* Dump MCP scratchpad */
3355 	offset += qed_grc_dump_mem(p_hwfn,
3356 				   p_ptt,
3357 				   dump_buf + offset,
3358 				   dump,
3359 				   NULL,
3360 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3361 				   MCP_REG_SCRATCH_SIZE,
3362 				   0, false, "MCP", false, 0);
3363 
3364 	/* Dump MCP cpu_reg_file */
3365 	offset += qed_grc_dump_mem(p_hwfn,
3366 				   p_ptt,
3367 				   dump_buf + offset,
3368 				   dump,
3369 				   NULL,
3370 				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3371 				   MCP_REG_CPU_REG_FILE_SIZE,
3372 				   0, false, "MCP", false, 0);
3373 
3374 	/* Dump MCP registers */
3375 	block_enable[BLOCK_MCP] = true;
3376 	offset += qed_grc_dump_registers(p_hwfn,
3377 					 p_ptt,
3378 					 dump_buf + offset,
3379 					 dump, block_enable, "block", "MCP");
3380 
3381 	/* Dump required non-MCP registers */
3382 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3383 					dump, 1, "eng", -1, "block", "MCP");
3384 	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3385 	offset += qed_grc_dump_reg_entry(p_hwfn,
3386 					 p_ptt,
3387 					 dump_buf + offset,
3388 					 dump,
3389 					 addr,
3390 					 1);
3391 
3392 	/* Release MCP */
3393 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3394 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3395 	return offset;
3396 }
3397 
3398 /* Dumps the tbus indirect memory for all PHYs. */
3399 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3400 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3401 {
3402 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3403 	char mem_name[32];
3404 	u8 phy_id;
3405 
3406 	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3407 		struct phy_defs *phy_defs = &s_phy_defs[phy_id];
3408 		int printed_chars;
3409 
3410 		printed_chars = snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3411 					 phy_defs->phy_name);
3412 		if (printed_chars < 0 || printed_chars >= sizeof(mem_name))
3413 			DP_NOTICE(p_hwfn,
3414 				  "Unexpected debug error: invalid PHY memory name\n");
3415 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3416 					       dump_buf + offset,
3417 					       dump,
3418 					       mem_name,
3419 					       0,
3420 					       PHY_DUMP_SIZE_DWORDS,
3421 					       16, true, mem_name, false, 0);
3422 		if (dump) {
3423 			u32 addr_lo_addr = phy_defs->base_addr +
3424 					   phy_defs->tbus_addr_lo_addr;
3425 			u32 addr_hi_addr = phy_defs->base_addr +
3426 					   phy_defs->tbus_addr_hi_addr;
3427 			u32 data_lo_addr = phy_defs->base_addr +
3428 					   phy_defs->tbus_data_lo_addr;
3429 			u32 data_hi_addr = phy_defs->base_addr +
3430 					   phy_defs->tbus_data_hi_addr;
3431 			u8 *bytes_buf = (u8 *)(dump_buf + offset);
3432 
3433 			for (tbus_hi_offset = 0;
3434 			     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3435 			     tbus_hi_offset++) {
3436 				qed_wr(p_hwfn,
3437 				       p_ptt, addr_hi_addr, tbus_hi_offset);
3438 				for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3439 				     tbus_lo_offset++) {
3440 					qed_wr(p_hwfn,
3441 					       p_ptt,
3442 					       addr_lo_addr, tbus_lo_offset);
3443 					*(bytes_buf++) =
3444 						(u8)qed_rd(p_hwfn, p_ptt,
3445 							   data_lo_addr);
3446 					*(bytes_buf++) =
3447 						(u8)qed_rd(p_hwfn, p_ptt,
3448 							   data_hi_addr);
3449 				}
3450 			}
3451 		}
3452 
3453 		offset += PHY_DUMP_SIZE_DWORDS;
3454 	}
3455 
3456 	return offset;
3457 }
3458 
3459 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3460 				struct qed_ptt *p_ptt,
3461 				enum block_id block_id,
3462 				u8 line_id,
3463 				u8 cycle_en,
3464 				u8 right_shift, u8 force_valid, u8 force_frame)
3465 {
3466 	struct block_defs *p_block_defs = s_block_defs[block_id];
3467 
3468 	qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_select_addr, line_id);
3469 	qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_cycle_enable_addr, cycle_en);
3470 	qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_shift_addr, right_shift);
3471 	qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_valid_addr, force_valid);
3472 	qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_frame_addr, force_frame);
3473 }
3474 
3475 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3476 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3477 				     struct qed_ptt *p_ptt,
3478 				     u32 *dump_buf, bool dump)
3479 {
3480 	u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
3481 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3482 	u32 offset = 0, block_id, line_id;
3483 	struct block_defs *p_block_defs;
3484 
3485 	if (dump) {
3486 		DP_VERBOSE(p_hwfn,
3487 			   QED_MSG_DEBUG, "Dumping static debug data...\n");
3488 
3489 		/* Disable all blocks debug output */
3490 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3491 			p_block_defs = s_block_defs[block_id];
3492 
3493 			if (p_block_defs->has_dbg_bus[dev_data->chip_id])
3494 				qed_wr(p_hwfn, p_ptt,
3495 				       p_block_defs->dbg_cycle_enable_addr, 0);
3496 		}
3497 
3498 		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3499 		qed_bus_set_framing_mode(p_hwfn,
3500 					 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3501 		qed_wr(p_hwfn,
3502 		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3503 		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3504 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3505 	}
3506 
3507 	/* Dump all static debug lines for each relevant block */
3508 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3509 		p_block_defs = s_block_defs[block_id];
3510 
3511 		if (!p_block_defs->has_dbg_bus[dev_data->chip_id])
3512 			continue;
3513 
3514 		/* Dump static section params */
3515 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3516 					       dump_buf + offset,
3517 					       dump,
3518 					       p_block_defs->name, 0,
3519 					       block_dwords, 32, false,
3520 					       "STATIC", false, 0);
3521 
3522 		if (dump && !dev_data->block_in_reset[block_id]) {
3523 			u8 dbg_client_id =
3524 				p_block_defs->dbg_client_id[dev_data->chip_id];
3525 			u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3526 			u32 len = STATIC_DEBUG_LINE_DWORDS;
3527 
3528 			/* Enable block's client */
3529 			qed_bus_enable_clients(p_hwfn, p_ptt,
3530 					       BIT(dbg_client_id));
3531 
3532 			for (line_id = 0; line_id < NUM_DBG_BUS_LINES;
3533 			     line_id++) {
3534 				/* Configure debug line ID */
3535 				qed_config_dbg_line(p_hwfn,
3536 						    p_ptt,
3537 						    (enum block_id)block_id,
3538 						    (u8)line_id,
3539 						    0xf, 0, 0, 0);
3540 
3541 				/* Read debug line info */
3542 				offset +=
3543 				    qed_grc_dump_addr_range(p_hwfn,
3544 							    p_ptt,
3545 							    dump_buf + offset,
3546 							    dump,
3547 							    addr,
3548 							    len);
3549 			}
3550 
3551 			/* Disable block's client and debug output */
3552 			qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3553 			qed_wr(p_hwfn, p_ptt,
3554 			       p_block_defs->dbg_cycle_enable_addr, 0);
3555 		} else {
3556 			/* All lines are invalid - dump zeros */
3557 			if (dump)
3558 				memset(dump_buf + offset, 0,
3559 				       DWORDS_TO_BYTES(block_dwords));
3560 			offset += block_dwords;
3561 		}
3562 	}
3563 
3564 	if (dump) {
3565 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3566 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3567 	}
3568 
3569 	return offset;
3570 }
3571 
3572 /* Performs GRC Dump to the specified buffer.
3573  * Returns the dumped size in dwords.
3574  */
3575 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3576 				    struct qed_ptt *p_ptt,
3577 				    u32 *dump_buf,
3578 				    bool dump, u32 *num_dumped_dwords)
3579 {
3580 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3581 	bool parities_masked = false;
3582 	u8 i, port_mode = 0;
3583 	u32 offset = 0;
3584 
3585 	*num_dumped_dwords = 0;
3586 
3587 	/* Find port mode */
3588 	if (dump) {
3589 		switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
3590 		case 0:
3591 			port_mode = 1;
3592 			break;
3593 		case 1:
3594 			port_mode = 2;
3595 			break;
3596 		case 2:
3597 			port_mode = 4;
3598 			break;
3599 		}
3600 	}
3601 
3602 	/* Update reset state */
3603 	if (dump)
3604 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3605 
3606 	/* Dump global params */
3607 	offset += qed_dump_common_global_params(p_hwfn,
3608 						p_ptt,
3609 						dump_buf + offset, dump, 4);
3610 	offset += qed_dump_str_param(dump_buf + offset,
3611 				     dump, "dump-type", "grc-dump");
3612 	offset += qed_dump_num_param(dump_buf + offset,
3613 				     dump,
3614 				     "num-lcids",
3615 				     qed_grc_get_param(p_hwfn,
3616 						DBG_GRC_PARAM_NUM_LCIDS));
3617 	offset += qed_dump_num_param(dump_buf + offset,
3618 				     dump,
3619 				     "num-ltids",
3620 				     qed_grc_get_param(p_hwfn,
3621 						DBG_GRC_PARAM_NUM_LTIDS));
3622 	offset += qed_dump_num_param(dump_buf + offset,
3623 				     dump, "num-ports", port_mode);
3624 
3625 	/* Dump reset registers (dumped before taking blocks out of reset ) */
3626 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3627 		offset += qed_grc_dump_reset_regs(p_hwfn,
3628 						  p_ptt,
3629 						  dump_buf + offset, dump);
3630 
3631 	/* Take all blocks out of reset (using reset registers) */
3632 	if (dump) {
3633 		qed_grc_unreset_blocks(p_hwfn, p_ptt);
3634 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3635 	}
3636 
3637 	/* Disable all parities using MFW command */
3638 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3639 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3640 		if (!parities_masked) {
3641 			DP_NOTICE(p_hwfn,
3642 				  "Failed to mask parities using MFW\n");
3643 			if (qed_grc_get_param
3644 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3645 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3646 		}
3647 	}
3648 
3649 	/* Dump modified registers (dumped before modifying them) */
3650 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3651 		offset += qed_grc_dump_modified_regs(p_hwfn,
3652 						     p_ptt,
3653 						     dump_buf + offset, dump);
3654 
3655 	/* Stall storms */
3656 	if (dump &&
3657 	    (qed_grc_is_included(p_hwfn,
3658 				 DBG_GRC_PARAM_DUMP_IOR) ||
3659 	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3660 		qed_grc_stall_storms(p_hwfn, p_ptt, true);
3661 
3662 	/* Dump all regs  */
3663 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3664 		/* Dump all blocks except MCP */
3665 		bool block_enable[MAX_BLOCK_ID];
3666 
3667 		for (i = 0; i < MAX_BLOCK_ID; i++)
3668 			block_enable[i] = true;
3669 		block_enable[BLOCK_MCP] = false;
3670 		offset += qed_grc_dump_registers(p_hwfn,
3671 						 p_ptt,
3672 						 dump_buf +
3673 						 offset,
3674 						 dump,
3675 						 block_enable, NULL, NULL);
3676 
3677 		/* Dump special registers */
3678 		offset += qed_grc_dump_special_regs(p_hwfn,
3679 						    p_ptt,
3680 						    dump_buf + offset, dump);
3681 	}
3682 
3683 	/* Dump memories */
3684 	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3685 
3686 	/* Dump MCP */
3687 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
3688 		offset += qed_grc_dump_mcp(p_hwfn,
3689 					   p_ptt, dump_buf + offset, dump);
3690 
3691 	/* Dump context */
3692 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
3693 		offset += qed_grc_dump_ctx(p_hwfn,
3694 					   p_ptt, dump_buf + offset, dump);
3695 
3696 	/* Dump RSS memories */
3697 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
3698 		offset += qed_grc_dump_rss(p_hwfn,
3699 					   p_ptt, dump_buf + offset, dump);
3700 
3701 	/* Dump Big RAM */
3702 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
3703 		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
3704 			offset += qed_grc_dump_big_ram(p_hwfn,
3705 						       p_ptt,
3706 						       dump_buf + offset,
3707 						       dump, i);
3708 
3709 	/* Dump IORs */
3710 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
3711 		offset += qed_grc_dump_iors(p_hwfn,
3712 					    p_ptt, dump_buf + offset, dump);
3713 
3714 	/* Dump VFC */
3715 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
3716 		offset += qed_grc_dump_vfc(p_hwfn,
3717 					   p_ptt, dump_buf + offset, dump);
3718 
3719 	/* Dump PHY tbus */
3720 	if (qed_grc_is_included(p_hwfn,
3721 				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
3722 	    CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
3723 		offset += qed_grc_dump_phy(p_hwfn,
3724 					   p_ptt, dump_buf + offset, dump);
3725 
3726 	/* Dump static debug data  */
3727 	if (qed_grc_is_included(p_hwfn,
3728 				DBG_GRC_PARAM_DUMP_STATIC) &&
3729 	    dev_data->bus.state == DBG_BUS_STATE_IDLE)
3730 		offset += qed_grc_dump_static_debug(p_hwfn,
3731 						    p_ptt,
3732 						    dump_buf + offset, dump);
3733 
3734 	/* Dump last section */
3735 	offset += qed_dump_last_section(dump_buf, offset, dump);
3736 	if (dump) {
3737 		/* Unstall storms */
3738 		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
3739 			qed_grc_stall_storms(p_hwfn, p_ptt, false);
3740 
3741 		/* Clear parity status */
3742 		qed_grc_clear_all_prty(p_hwfn, p_ptt);
3743 
3744 		/* Enable all parities using MFW command */
3745 		if (parities_masked)
3746 			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
3747 	}
3748 
3749 	*num_dumped_dwords = offset;
3750 
3751 	return DBG_STATUS_OK;
3752 }
3753 
3754 /* Writes the specified failing Idle Check rule to the specified buffer.
3755  * Returns the dumped size in dwords.
3756  */
3757 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3758 				     struct qed_ptt *p_ptt,
3759 				     u32 *
3760 				     dump_buf,
3761 				     bool dump,
3762 				     u16 rule_id,
3763 				     const struct dbg_idle_chk_rule *rule,
3764 				     u16 fail_entry_id, u32 *cond_reg_values)
3765 {
3766 	const union dbg_idle_chk_reg *regs = &((const union dbg_idle_chk_reg *)
3767 					       s_dbg_arrays
3768 					       [BIN_BUF_DBG_IDLE_CHK_REGS].
3769 					       ptr)[rule->reg_offset];
3770 	const struct dbg_idle_chk_cond_reg *cond_regs = &regs[0].cond_reg;
3771 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3772 	struct dbg_idle_chk_result_hdr *hdr =
3773 		(struct dbg_idle_chk_result_hdr *)dump_buf;
3774 	const struct dbg_idle_chk_info_reg *info_regs =
3775 		&regs[rule->num_cond_regs].info_reg;
3776 	u32 next_reg_offset = 0, i, offset = 0;
3777 	u8 reg_id;
3778 
3779 	/* Dump rule data */
3780 	if (dump) {
3781 		memset(hdr, 0, sizeof(*hdr));
3782 		hdr->rule_id = rule_id;
3783 		hdr->mem_entry_id = fail_entry_id;
3784 		hdr->severity = rule->severity;
3785 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
3786 	}
3787 
3788 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
3789 
3790 	/* Dump condition register values */
3791 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
3792 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
3793 
3794 		/* Write register header */
3795 		if (dump) {
3796 			struct dbg_idle_chk_result_reg_hdr *reg_hdr =
3797 			    (struct dbg_idle_chk_result_reg_hdr *)(dump_buf
3798 								   + offset);
3799 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3800 			memset(reg_hdr, 0,
3801 			       sizeof(struct dbg_idle_chk_result_reg_hdr));
3802 			reg_hdr->start_entry = reg->start_entry;
3803 			reg_hdr->size = reg->entry_size;
3804 			SET_FIELD(reg_hdr->data,
3805 				  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
3806 				  reg->num_entries > 1 || reg->start_entry > 0
3807 				  ? 1 : 0);
3808 			SET_FIELD(reg_hdr->data,
3809 				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
3810 
3811 			/* Write register values */
3812 			for (i = 0; i < reg_hdr->size;
3813 			     i++, next_reg_offset++, offset++)
3814 				dump_buf[offset] =
3815 				    cond_reg_values[next_reg_offset];
3816 		} else {
3817 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
3818 			    reg->entry_size;
3819 		}
3820 	}
3821 
3822 	/* Dump info register values */
3823 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
3824 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
3825 		u32 block_id;
3826 
3827 		if (!dump) {
3828 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
3829 			continue;
3830 		}
3831 
3832 		/* Check if register's block is in reset */
3833 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
3834 		if (block_id >= MAX_BLOCK_ID) {
3835 			DP_NOTICE(p_hwfn, "Invalid block_id\n");
3836 			return 0;
3837 		}
3838 
3839 		if (!dev_data->block_in_reset[block_id]) {
3840 			bool eval_mode = GET_FIELD(reg->mode.data,
3841 						   DBG_MODE_HDR_EVAL_MODE) > 0;
3842 			bool mode_match = true;
3843 
3844 			/* Check mode */
3845 			if (eval_mode) {
3846 				u16 modes_buf_offset =
3847 					GET_FIELD(reg->mode.data,
3848 						DBG_MODE_HDR_MODES_BUF_OFFSET);
3849 				mode_match =
3850 					qed_is_mode_match(p_hwfn,
3851 							  &modes_buf_offset);
3852 			}
3853 
3854 			if (mode_match) {
3855 				u32 addr =
3856 				    GET_FIELD(reg->data,
3857 					      DBG_IDLE_CHK_INFO_REG_ADDRESS);
3858 
3859 				/* Write register header */
3860 				struct dbg_idle_chk_result_reg_hdr *reg_hdr =
3861 					(struct dbg_idle_chk_result_reg_hdr *)
3862 					(dump_buf + offset);
3863 
3864 				offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3865 				hdr->num_dumped_info_regs++;
3866 				memset(reg_hdr, 0, sizeof(*reg_hdr));
3867 				reg_hdr->size = reg->size;
3868 				SET_FIELD(reg_hdr->data,
3869 					  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3870 					  rule->num_cond_regs + reg_id);
3871 
3872 				/* Write register values */
3873 				offset +=
3874 				    qed_grc_dump_addr_range(p_hwfn,
3875 							    p_ptt,
3876 							    dump_buf + offset,
3877 							    dump,
3878 							    addr,
3879 							    reg->size);
3880 			}
3881 		}
3882 	}
3883 
3884 	return offset;
3885 }
3886 
3887 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
3888 static u32
3889 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3890 			       u32 *dump_buf, bool dump,
3891 			       const struct dbg_idle_chk_rule *input_rules,
3892 			       u32 num_input_rules, u32 *num_failing_rules)
3893 {
3894 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3895 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3896 	u32 i, offset = 0;
3897 	u16 entry_id;
3898 	u8 reg_id;
3899 
3900 	*num_failing_rules = 0;
3901 	for (i = 0; i < num_input_rules; i++) {
3902 		const struct dbg_idle_chk_cond_reg *cond_regs;
3903 		const struct dbg_idle_chk_rule *rule;
3904 		const union dbg_idle_chk_reg *regs;
3905 		u16 num_reg_entries = 1;
3906 		bool check_rule = true;
3907 		const u32 *imm_values;
3908 
3909 		rule = &input_rules[i];
3910 		regs = &((const union dbg_idle_chk_reg *)
3911 			 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
3912 			[rule->reg_offset];
3913 		cond_regs = &regs[0].cond_reg;
3914 		imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
3915 			     [rule->imm_offset];
3916 
3917 		/* Check if all condition register blocks are out of reset, and
3918 		 * find maximal number of entries (all condition registers that
3919 		 * are memories must have the same size, which is > 1).
3920 		 */
3921 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
3922 		     reg_id++) {
3923 			u32 block_id = GET_FIELD(cond_regs[reg_id].data,
3924 						DBG_IDLE_CHK_COND_REG_BLOCK_ID);
3925 
3926 			if (block_id >= MAX_BLOCK_ID) {
3927 				DP_NOTICE(p_hwfn, "Invalid block_id\n");
3928 				return 0;
3929 			}
3930 
3931 			check_rule = !dev_data->block_in_reset[block_id];
3932 			if (cond_regs[reg_id].num_entries > num_reg_entries)
3933 				num_reg_entries = cond_regs[reg_id].num_entries;
3934 		}
3935 
3936 		if (!check_rule && dump)
3937 			continue;
3938 
3939 		if (!dump) {
3940 			u32 entry_dump_size =
3941 				qed_idle_chk_dump_failure(p_hwfn,
3942 							  p_ptt,
3943 							  dump_buf + offset,
3944 							  false,
3945 							  rule->rule_id,
3946 							  rule,
3947 							  0,
3948 							  NULL);
3949 
3950 			offset += num_reg_entries * entry_dump_size;
3951 			(*num_failing_rules) += num_reg_entries;
3952 			continue;
3953 		}
3954 
3955 		/* Go over all register entries (number of entries is the same
3956 		 * for all condition registers).
3957 		 */
3958 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3959 			/* Read current entry of all condition registers */
3960 			u32 next_reg_offset = 0;
3961 
3962 			for (reg_id = 0; reg_id < rule->num_cond_regs;
3963 			     reg_id++) {
3964 				const struct dbg_idle_chk_cond_reg *reg =
3965 					&cond_regs[reg_id];
3966 
3967 				/* Find GRC address (if it's a memory,the
3968 				 * address of the specific entry is calculated).
3969 				 */
3970 				u32 addr =
3971 				    GET_FIELD(reg->data,
3972 					      DBG_IDLE_CHK_COND_REG_ADDRESS);
3973 
3974 				if (reg->num_entries > 1 ||
3975 				    reg->start_entry > 0) {
3976 					u32 padded_entry_size =
3977 					   reg->entry_size > 1 ?
3978 					   roundup_pow_of_two(reg->entry_size) :
3979 					   1;
3980 
3981 					addr += (reg->start_entry + entry_id) *
3982 						padded_entry_size;
3983 				}
3984 
3985 				/* Read registers */
3986 				if (next_reg_offset + reg->entry_size >=
3987 				    IDLE_CHK_MAX_ENTRIES_SIZE) {
3988 					DP_NOTICE(p_hwfn,
3989 						  "idle check registers entry is too large\n");
3990 					return 0;
3991 				}
3992 
3993 				next_reg_offset +=
3994 				    qed_grc_dump_addr_range(p_hwfn,
3995 							    p_ptt,
3996 							    cond_reg_values +
3997 							    next_reg_offset,
3998 							    dump, addr,
3999 							    reg->entry_size);
4000 			}
4001 
4002 			/* Call rule's condition function - a return value of
4003 			 * true indicates failure.
4004 			 */
4005 			if ((*cond_arr[rule->cond_id])(cond_reg_values,
4006 						       imm_values)) {
4007 				offset +=
4008 				    qed_idle_chk_dump_failure(p_hwfn,
4009 							      p_ptt,
4010 							      dump_buf + offset,
4011 							      dump,
4012 							      rule->rule_id,
4013 							      rule,
4014 							      entry_id,
4015 							      cond_reg_values);
4016 				(*num_failing_rules)++;
4017 				break;
4018 			}
4019 		}
4020 	}
4021 
4022 	return offset;
4023 }
4024 
4025 /* Performs Idle Check Dump to the specified buffer.
4026  * Returns the dumped size in dwords.
4027  */
4028 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4029 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4030 {
4031 	u32 offset = 0, input_offset = 0, num_failing_rules = 0;
4032 	u32 num_failing_rules_offset;
4033 
4034 	/* Dump global params */
4035 	offset += qed_dump_common_global_params(p_hwfn,
4036 						p_ptt,
4037 						dump_buf + offset, dump, 1);
4038 	offset += qed_dump_str_param(dump_buf + offset,
4039 				     dump, "dump-type", "idle-chk");
4040 
4041 	/* Dump idle check section header with a single parameter */
4042 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4043 	num_failing_rules_offset = offset;
4044 	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4045 	while (input_offset <
4046 	       s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4047 		const struct dbg_idle_chk_cond_hdr *cond_hdr =
4048 			(const struct dbg_idle_chk_cond_hdr *)
4049 			&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4050 			[input_offset++];
4051 		bool eval_mode = GET_FIELD(cond_hdr->mode.data,
4052 					   DBG_MODE_HDR_EVAL_MODE) > 0;
4053 		bool mode_match = true;
4054 
4055 		/* Check mode */
4056 		if (eval_mode) {
4057 			u16 modes_buf_offset =
4058 				GET_FIELD(cond_hdr->mode.data,
4059 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
4060 
4061 			mode_match = qed_is_mode_match(p_hwfn,
4062 						       &modes_buf_offset);
4063 		}
4064 
4065 		if (mode_match) {
4066 			u32 curr_failing_rules;
4067 
4068 			offset +=
4069 			    qed_idle_chk_dump_rule_entries(p_hwfn,
4070 				p_ptt,
4071 				dump_buf + offset,
4072 				dump,
4073 				(const struct dbg_idle_chk_rule *)
4074 				&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4075 				ptr[input_offset],
4076 				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4077 				&curr_failing_rules);
4078 			num_failing_rules += curr_failing_rules;
4079 		}
4080 
4081 		input_offset += cond_hdr->data_size;
4082 	}
4083 
4084 	/* Overwrite num_rules parameter */
4085 	if (dump)
4086 		qed_dump_num_param(dump_buf + num_failing_rules_offset,
4087 				   dump, "num_rules", num_failing_rules);
4088 
4089 	return offset;
4090 }
4091 
4092 /* Finds the meta data image in NVRAM. */
4093 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4094 					    struct qed_ptt *p_ptt,
4095 					    u32 image_type,
4096 					    u32 *nvram_offset_bytes,
4097 					    u32 *nvram_size_bytes)
4098 {
4099 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4100 	struct mcp_file_att file_att;
4101 
4102 	/* Call NVRAM get file command */
4103 	int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4104 					    p_ptt,
4105 					    DRV_MSG_CODE_NVM_GET_FILE_ATT,
4106 					    image_type,
4107 					    &ret_mcp_resp,
4108 					    &ret_mcp_param,
4109 					    &ret_txn_size,
4110 					    (u32 *)&file_att);
4111 
4112 	/* Check response */
4113 	if (nvm_result ||
4114 	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4115 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4116 
4117 	/* Update return values */
4118 	*nvram_offset_bytes = file_att.nvm_start_addr;
4119 	*nvram_size_bytes = file_att.len;
4120 	DP_VERBOSE(p_hwfn,
4121 		   QED_MSG_DEBUG,
4122 		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4123 		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
4124 
4125 	/* Check alignment */
4126 	if (*nvram_size_bytes & 0x3)
4127 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4128 	return DBG_STATUS_OK;
4129 }
4130 
4131 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4132 				      struct qed_ptt *p_ptt,
4133 				      u32 nvram_offset_bytes,
4134 				      u32 nvram_size_bytes, u32 *ret_buf)
4135 {
4136 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size;
4137 	u32 bytes_to_copy, read_offset = 0;
4138 	s32 bytes_left = nvram_size_bytes;
4139 
4140 	DP_VERBOSE(p_hwfn,
4141 		   QED_MSG_DEBUG,
4142 		   "nvram_read: reading image of size %d bytes from NVRAM\n",
4143 		   nvram_size_bytes);
4144 	do {
4145 		bytes_to_copy =
4146 		    (bytes_left >
4147 		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4148 
4149 		/* Call NVRAM read command */
4150 		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4151 				       DRV_MSG_CODE_NVM_READ_NVRAM,
4152 				       (nvram_offset_bytes +
4153 					read_offset) |
4154 				       (bytes_to_copy <<
4155 					DRV_MB_PARAM_NVM_LEN_SHIFT),
4156 				       &ret_mcp_resp, &ret_mcp_param,
4157 				       &ret_read_size,
4158 				       (u32 *)((u8 *)ret_buf +
4159 					       read_offset)) != 0)
4160 			return DBG_STATUS_NVRAM_READ_FAILED;
4161 
4162 		/* Check response */
4163 		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4164 			return DBG_STATUS_NVRAM_READ_FAILED;
4165 
4166 		/* Update read offset */
4167 		read_offset += ret_read_size;
4168 		bytes_left -= ret_read_size;
4169 	} while (bytes_left > 0);
4170 
4171 	return DBG_STATUS_OK;
4172 }
4173 
4174 /* Get info on the MCP Trace data in the scratchpad:
4175  * - trace_data_grc_addr - the GRC address of the trace data
4176  * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without
4177  *	the header)
4178  */
4179 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4180 						   struct qed_ptt *p_ptt,
4181 						   u32 *trace_data_grc_addr,
4182 						   u32 *trace_data_size_bytes)
4183 {
4184 	/* Read MCP trace section offsize structure from MCP scratchpad */
4185 	u32 spad_trace_offsize = qed_rd(p_hwfn,
4186 					p_ptt,
4187 					MCP_SPAD_TRACE_OFFSIZE_ADDR);
4188 	u32 signature;
4189 
4190 	/* Extract MCP trace section GRC address from offsize structure (within
4191 	 * scratchpad).
4192 	 */
4193 	*trace_data_grc_addr =
4194 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4195 
4196 	/* Read signature from MCP trace section */
4197 	signature = qed_rd(p_hwfn, p_ptt,
4198 			   *trace_data_grc_addr +
4199 			   offsetof(struct mcp_trace, signature));
4200 	if (signature != MFW_TRACE_SIGNATURE)
4201 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4202 
4203 	/* Read trace size from MCP trace section */
4204 	*trace_data_size_bytes = qed_rd(p_hwfn,
4205 					p_ptt,
4206 					*trace_data_grc_addr +
4207 					offsetof(struct mcp_trace, size));
4208 	return DBG_STATUS_OK;
4209 }
4210 
4211 /* Reads MCP trace meta data image from NVRAM.
4212  * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from
4213  *	file)
4214  * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP
4215  *	Trace meta data starts (invalid when loaded from file)
4216  * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data
4217  */
4218 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4219 						   struct qed_ptt *p_ptt,
4220 						   u32 trace_data_size_bytes,
4221 						   u32 *running_bundle_id,
4222 						   u32 *trace_meta_offset_bytes,
4223 						   u32 *trace_meta_size_bytes)
4224 {
4225 	/* Read MCP trace section offsize structure from MCP scratchpad */
4226 	u32 spad_trace_offsize = qed_rd(p_hwfn,
4227 					p_ptt,
4228 					MCP_SPAD_TRACE_OFFSIZE_ADDR);
4229 
4230 	/* Find running bundle ID */
4231 	u32 running_mfw_addr =
4232 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4233 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4234 	u32 nvram_image_type;
4235 
4236 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4237 	if (*running_bundle_id > 1)
4238 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4239 
4240 	/* Find image in NVRAM */
4241 	nvram_image_type =
4242 	    (*running_bundle_id ==
4243 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4244 
4245 	return qed_find_nvram_image(p_hwfn,
4246 				    p_ptt,
4247 				    nvram_image_type,
4248 				    trace_meta_offset_bytes,
4249 				    trace_meta_size_bytes);
4250 }
4251 
4252 /* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
4253  * buffer.
4254  */
4255 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4256 					       struct qed_ptt *p_ptt,
4257 					       u32 nvram_offset_in_bytes,
4258 					       u32 size_in_bytes, u32 *buf)
4259 {
4260 	u8 *byte_buf = (u8 *)buf;
4261 	u8 modules_num, i;
4262 	u32 signature;
4263 
4264 	/* Read meta data from NVRAM */
4265 	enum dbg_status status = qed_nvram_read(p_hwfn,
4266 						p_ptt,
4267 						nvram_offset_in_bytes,
4268 						size_in_bytes,
4269 						buf);
4270 
4271 	if (status != DBG_STATUS_OK)
4272 		return status;
4273 
4274 	/* Extract and check first signature */
4275 	signature = qed_read_unaligned_dword(byte_buf);
4276 	byte_buf += sizeof(u32);
4277 	if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
4278 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4279 
4280 	/* Extract number of modules */
4281 	modules_num = *(byte_buf++);
4282 
4283 	/* Skip all modules */
4284 	for (i = 0; i < modules_num; i++) {
4285 		u8 module_len = *(byte_buf++);
4286 
4287 		byte_buf += module_len;
4288 	}
4289 
4290 	/* Extract and check second signature */
4291 	signature = qed_read_unaligned_dword(byte_buf);
4292 	byte_buf += sizeof(u32);
4293 	if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
4294 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4295 	return DBG_STATUS_OK;
4296 }
4297 
4298 /* Dump MCP Trace */
4299 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4300 					  struct qed_ptt *p_ptt,
4301 					  u32 *dump_buf,
4302 					  bool dump, u32 *num_dumped_dwords)
4303 {
4304 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4305 	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4306 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4307 	enum dbg_status status;
4308 	bool mcp_access;
4309 	int halted = 0;
4310 
4311 	mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4312 
4313 	*num_dumped_dwords = 0;
4314 
4315 	/* Get trace data info */
4316 	status = qed_mcp_trace_get_data_info(p_hwfn,
4317 					     p_ptt,
4318 					     &trace_data_grc_addr,
4319 					     &trace_data_size_bytes);
4320 	if (status != DBG_STATUS_OK)
4321 		return status;
4322 
4323 	/* Dump global params */
4324 	offset += qed_dump_common_global_params(p_hwfn,
4325 						p_ptt,
4326 						dump_buf + offset, dump, 1);
4327 	offset += qed_dump_str_param(dump_buf + offset,
4328 				     dump, "dump-type", "mcp-trace");
4329 
4330 	/* Halt MCP while reading from scratchpad so the read data will be
4331 	 * consistent if halt fails, MCP trace is taken anyway, with a small
4332 	 * risk that it may be corrupt.
4333 	 */
4334 	if (dump && mcp_access) {
4335 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4336 		if (!halted)
4337 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4338 	}
4339 
4340 	/* Find trace data size */
4341 	trace_data_size_dwords =
4342 		DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4343 			     BYTES_IN_DWORD);
4344 
4345 	/* Dump trace data section header and param */
4346 	offset += qed_dump_section_hdr(dump_buf + offset,
4347 				       dump, "mcp_trace_data", 1);
4348 	offset += qed_dump_num_param(dump_buf + offset,
4349 				     dump, "size", trace_data_size_dwords);
4350 
4351 	/* Read trace data from scratchpad into dump buffer */
4352 	offset += qed_grc_dump_addr_range(p_hwfn,
4353 					  p_ptt,
4354 					  dump_buf + offset,
4355 					  dump,
4356 					  BYTES_TO_DWORDS(trace_data_grc_addr),
4357 					  trace_data_size_dwords);
4358 
4359 	/* Resume MCP (only if halt succeeded) */
4360 	if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
4361 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4362 
4363 	/* Dump trace meta section header */
4364 	offset += qed_dump_section_hdr(dump_buf + offset,
4365 				       dump, "mcp_trace_meta", 1);
4366 
4367 	/* Read trace meta info */
4368 	if (mcp_access) {
4369 		status = qed_mcp_trace_get_meta_info(p_hwfn,
4370 						     p_ptt,
4371 						     trace_data_size_bytes,
4372 						     &running_bundle_id,
4373 						     &trace_meta_offset_bytes,
4374 						     &trace_meta_size_bytes);
4375 		if (status == DBG_STATUS_OK)
4376 			trace_meta_size_dwords =
4377 				BYTES_TO_DWORDS(trace_meta_size_bytes);
4378 	}
4379 
4380 	/* Dump trace meta size param */
4381 	offset += qed_dump_num_param(dump_buf + offset,
4382 				     dump, "size", trace_meta_size_dwords);
4383 
4384 	/* Read trace meta image into dump buffer */
4385 	if (dump && trace_meta_size_dwords)
4386 		status = qed_mcp_trace_read_meta(p_hwfn,
4387 						 p_ptt,
4388 						 trace_meta_offset_bytes,
4389 						 trace_meta_size_bytes,
4390 						 dump_buf + offset);
4391 	if (status == DBG_STATUS_OK)
4392 		offset += trace_meta_size_dwords;
4393 
4394 	*num_dumped_dwords = offset;
4395 
4396 	/* If no mcp access, indicate that the dump doesn't contain the meta
4397 	 * data from NVRAM.
4398 	 */
4399 	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4400 }
4401 
4402 /* Dump GRC FIFO */
4403 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4404 					 struct qed_ptt *p_ptt,
4405 					 u32 *dump_buf,
4406 					 bool dump, u32 *num_dumped_dwords)
4407 {
4408 	u32 offset = 0, dwords_read, size_param_offset;
4409 	bool fifo_has_data;
4410 
4411 	*num_dumped_dwords = 0;
4412 
4413 	/* Dump global params */
4414 	offset += qed_dump_common_global_params(p_hwfn,
4415 						p_ptt,
4416 						dump_buf + offset, dump, 1);
4417 	offset += qed_dump_str_param(dump_buf + offset,
4418 				     dump, "dump-type", "reg-fifo");
4419 
4420 	/* Dump fifo data section header and param. The size param is 0 for now,
4421 	 * and is overwritten after reading the FIFO.
4422 	 */
4423 	offset += qed_dump_section_hdr(dump_buf + offset,
4424 				       dump, "reg_fifo_data", 1);
4425 	size_param_offset = offset;
4426 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4427 
4428 	if (!dump) {
4429 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4430 		 * test how much data is available, except for reading it.
4431 		 */
4432 		offset += REG_FIFO_DEPTH_DWORDS;
4433 		*num_dumped_dwords = offset;
4434 		return DBG_STATUS_OK;
4435 	}
4436 
4437 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4438 			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4439 
4440 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4441 	 * and must be accessed atomically. Test for dwords_read not passing
4442 	 * buffer size since more entries could be added to the buffer as we are
4443 	 * emptying it.
4444 	 */
4445 	for (dwords_read = 0;
4446 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4447 	     dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
4448 	     REG_FIFO_ELEMENT_DWORDS) {
4449 		if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
4450 				      (u64)(uintptr_t)(&dump_buf[offset]),
4451 				      REG_FIFO_ELEMENT_DWORDS, 0))
4452 			return DBG_STATUS_DMAE_FAILED;
4453 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4454 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4455 	}
4456 
4457 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4458 			   dwords_read);
4459 
4460 	*num_dumped_dwords = offset;
4461 	return DBG_STATUS_OK;
4462 }
4463 
4464 /* Dump IGU FIFO */
4465 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4466 					 struct qed_ptt *p_ptt,
4467 					 u32 *dump_buf,
4468 					 bool dump, u32 *num_dumped_dwords)
4469 {
4470 	u32 offset = 0, dwords_read, size_param_offset;
4471 	bool fifo_has_data;
4472 
4473 	*num_dumped_dwords = 0;
4474 
4475 	/* Dump global params */
4476 	offset += qed_dump_common_global_params(p_hwfn,
4477 						p_ptt,
4478 						dump_buf + offset, dump, 1);
4479 	offset += qed_dump_str_param(dump_buf + offset,
4480 				     dump, "dump-type", "igu-fifo");
4481 
4482 	/* Dump fifo data section header and param. The size param is 0 for now,
4483 	 * and is overwritten after reading the FIFO.
4484 	 */
4485 	offset += qed_dump_section_hdr(dump_buf + offset,
4486 				       dump, "igu_fifo_data", 1);
4487 	size_param_offset = offset;
4488 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4489 
4490 	if (!dump) {
4491 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4492 		 * test how much data is available, except for reading it.
4493 		 */
4494 		offset += IGU_FIFO_DEPTH_DWORDS;
4495 		*num_dumped_dwords = offset;
4496 		return DBG_STATUS_OK;
4497 	}
4498 
4499 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4500 			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4501 
4502 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4503 	 * and must be accessed atomically. Test for dwords_read not passing
4504 	 * buffer size since more entries could be added to the buffer as we are
4505 	 * emptying it.
4506 	 */
4507 	for (dwords_read = 0;
4508 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4509 	     dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
4510 	     IGU_FIFO_ELEMENT_DWORDS) {
4511 		if (qed_dmae_grc2host(p_hwfn, p_ptt,
4512 				      IGU_REG_ERROR_HANDLING_MEMORY,
4513 				      (u64)(uintptr_t)(&dump_buf[offset]),
4514 				      IGU_FIFO_ELEMENT_DWORDS, 0))
4515 			return DBG_STATUS_DMAE_FAILED;
4516 		fifo_has_data =	qed_rd(p_hwfn, p_ptt,
4517 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4518 	}
4519 
4520 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4521 			   dwords_read);
4522 
4523 	*num_dumped_dwords = offset;
4524 	return DBG_STATUS_OK;
4525 }
4526 
4527 /* Protection Override dump */
4528 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4529 						    struct qed_ptt *p_ptt,
4530 						    u32 *dump_buf,
4531 						    bool dump,
4532 						    u32 *num_dumped_dwords)
4533 {
4534 	u32 offset = 0, size_param_offset, override_window_dwords;
4535 
4536 	*num_dumped_dwords = 0;
4537 
4538 	/* Dump global params */
4539 	offset += qed_dump_common_global_params(p_hwfn,
4540 						p_ptt,
4541 						dump_buf + offset, dump, 1);
4542 	offset += qed_dump_str_param(dump_buf + offset,
4543 				     dump, "dump-type", "protection-override");
4544 
4545 	/* Dump data section header and param. The size param is 0 for now, and
4546 	 * is overwritten after reading the data.
4547 	 */
4548 	offset += qed_dump_section_hdr(dump_buf + offset,
4549 				       dump, "protection_override_data", 1);
4550 	size_param_offset = offset;
4551 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4552 
4553 	if (!dump) {
4554 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4555 		*num_dumped_dwords = offset;
4556 		return DBG_STATUS_OK;
4557 	}
4558 
4559 	/* Add override window info to buffer */
4560 	override_window_dwords =
4561 		qed_rd(p_hwfn, p_ptt,
4562 		       GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4563 		       PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4564 	if (qed_dmae_grc2host(p_hwfn, p_ptt,
4565 			      GRC_REG_PROTECTION_OVERRIDE_WINDOW,
4566 			      (u64)(uintptr_t)(dump_buf + offset),
4567 			      override_window_dwords, 0))
4568 		return DBG_STATUS_DMAE_FAILED;
4569 	offset += override_window_dwords;
4570 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4571 			   override_window_dwords);
4572 
4573 	*num_dumped_dwords = offset;
4574 	return DBG_STATUS_OK;
4575 }
4576 
4577 /* Performs FW Asserts Dump to the specified buffer.
4578  * Returns the dumped size in dwords.
4579  */
4580 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4581 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4582 {
4583 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4584 	struct fw_asserts_ram_section *asserts;
4585 	char storm_letter_str[2] = "?";
4586 	struct fw_info fw_info;
4587 	u32 offset = 0;
4588 	u8 storm_id;
4589 
4590 	/* Dump global params */
4591 	offset += qed_dump_common_global_params(p_hwfn,
4592 						p_ptt,
4593 						dump_buf + offset, dump, 1);
4594 	offset += qed_dump_str_param(dump_buf + offset,
4595 				     dump, "dump-type", "fw-asserts");
4596 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4597 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4598 		u32 last_list_idx, addr;
4599 
4600 		if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
4601 			continue;
4602 
4603 		/* Read FW info for the current Storm */
4604 		qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4605 
4606 		asserts = &fw_info.fw_asserts_section;
4607 
4608 		/* Dump FW Asserts section header and params */
4609 		storm_letter_str[0] = s_storm_defs[storm_id].letter;
4610 		offset += qed_dump_section_hdr(dump_buf + offset, dump,
4611 					       "fw_asserts", 2);
4612 		offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
4613 					     storm_letter_str);
4614 		offset += qed_dump_num_param(dump_buf + offset, dump, "size",
4615 					     asserts->list_element_dword_size);
4616 
4617 		if (!dump) {
4618 			offset += asserts->list_element_dword_size;
4619 			continue;
4620 		}
4621 
4622 		/* Read and dump FW Asserts data */
4623 		fw_asserts_section_addr =
4624 			s_storm_defs[storm_id].sem_fast_mem_addr +
4625 			SEM_FAST_REG_INT_RAM +
4626 			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4627 		next_list_idx_addr =
4628 			fw_asserts_section_addr +
4629 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4630 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4631 		last_list_idx = (next_list_idx > 0
4632 				 ? next_list_idx
4633 				 : asserts->list_num_elements) - 1;
4634 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4635 		       asserts->list_dword_offset +
4636 		       last_list_idx * asserts->list_element_dword_size;
4637 		offset +=
4638 		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4639 					    dump_buf + offset,
4640 					    dump, addr,
4641 					    asserts->list_element_dword_size);
4642 	}
4643 
4644 	/* Dump last section */
4645 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
4646 	return offset;
4647 }
4648 
4649 /***************************** Public Functions *******************************/
4650 
4651 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
4652 {
4653 	/* Convert binary data to debug arrays */
4654 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
4655 	u8 buf_id;
4656 
4657 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
4658 		s_dbg_arrays[buf_id].ptr =
4659 		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
4660 		s_dbg_arrays[buf_id].size_in_dwords =
4661 		    BYTES_TO_DWORDS(buf_array[buf_id].length);
4662 	}
4663 
4664 	return DBG_STATUS_OK;
4665 }
4666 
4667 /* Assign default GRC param values */
4668 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
4669 {
4670 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4671 	u32 i;
4672 
4673 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
4674 		dev_data->grc.param_val[i] =
4675 		    s_grc_param_defs[i].default_val[dev_data->chip_id];
4676 }
4677 
4678 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4679 					      struct qed_ptt *p_ptt,
4680 					      u32 *buf_size)
4681 {
4682 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4683 
4684 	*buf_size = 0;
4685 	if (status != DBG_STATUS_OK)
4686 		return status;
4687 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4688 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
4689 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
4690 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
4691 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
4692 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
4693 	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4694 }
4695 
4696 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
4697 				 struct qed_ptt *p_ptt,
4698 				 u32 *dump_buf,
4699 				 u32 buf_size_in_dwords,
4700 				 u32 *num_dumped_dwords)
4701 {
4702 	u32 needed_buf_size_in_dwords;
4703 	enum dbg_status status;
4704 
4705 	status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt,
4706 					       &needed_buf_size_in_dwords);
4707 
4708 	*num_dumped_dwords = 0;
4709 	if (status != DBG_STATUS_OK)
4710 		return status;
4711 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
4712 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4713 
4714 	/* GRC Dump */
4715 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
4716 
4717 	/* Revert GRC params to their default */
4718 	qed_dbg_grc_set_params_default(p_hwfn);
4719 
4720 	return status;
4721 }
4722 
4723 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4724 						   struct qed_ptt *p_ptt,
4725 						   u32 *buf_size)
4726 {
4727 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4728 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4729 
4730 	*buf_size = 0;
4731 	if (status != DBG_STATUS_OK)
4732 		return status;
4733 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4734 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
4735 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
4736 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
4737 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
4738 	if (!dev_data->idle_chk.buf_size_set) {
4739 		dev_data->idle_chk.buf_size = qed_idle_chk_dump(p_hwfn,
4740 								p_ptt,
4741 								NULL, false);
4742 		dev_data->idle_chk.buf_size_set = true;
4743 	}
4744 
4745 	*buf_size = dev_data->idle_chk.buf_size;
4746 	return DBG_STATUS_OK;
4747 }
4748 
4749 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
4750 				      struct qed_ptt *p_ptt,
4751 				      u32 *dump_buf,
4752 				      u32 buf_size_in_dwords,
4753 				      u32 *num_dumped_dwords)
4754 {
4755 	u32 needed_buf_size_in_dwords;
4756 	enum dbg_status status;
4757 
4758 	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt,
4759 						    &needed_buf_size_in_dwords);
4760 
4761 	*num_dumped_dwords = 0;
4762 	if (status != DBG_STATUS_OK)
4763 		return status;
4764 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
4765 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4766 
4767 	/* Update reset state */
4768 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
4769 
4770 	/* Idle Check Dump */
4771 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
4772 
4773 	/* Revert GRC params to their default */
4774 	qed_dbg_grc_set_params_default(p_hwfn);
4775 
4776 	return DBG_STATUS_OK;
4777 }
4778 
4779 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4780 						    struct qed_ptt *p_ptt,
4781 						    u32 *buf_size)
4782 {
4783 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4784 
4785 	*buf_size = 0;
4786 	if (status != DBG_STATUS_OK)
4787 		return status;
4788 	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4789 }
4790 
4791 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4792 				       struct qed_ptt *p_ptt,
4793 				       u32 *dump_buf,
4794 				       u32 buf_size_in_dwords,
4795 				       u32 *num_dumped_dwords)
4796 {
4797 	u32 needed_buf_size_in_dwords;
4798 	enum dbg_status status;
4799 
4800 	/* validate buffer size */
4801 	status =
4802 	    qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
4803 						&needed_buf_size_in_dwords);
4804 
4805 	if (status != DBG_STATUS_OK &&
4806 	    status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
4807 		return status;
4808 
4809 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
4810 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4811 
4812 	/* Update reset state */
4813 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
4814 
4815 	/* Perform dump */
4816 	status = qed_mcp_trace_dump(p_hwfn,
4817 				    p_ptt, dump_buf, true, num_dumped_dwords);
4818 
4819 	/* Revert GRC params to their default */
4820 	qed_dbg_grc_set_params_default(p_hwfn);
4821 
4822 	return status;
4823 }
4824 
4825 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4826 						   struct qed_ptt *p_ptt,
4827 						   u32 *buf_size)
4828 {
4829 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4830 
4831 	*buf_size = 0;
4832 	if (status != DBG_STATUS_OK)
4833 		return status;
4834 	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4835 }
4836 
4837 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4838 				      struct qed_ptt *p_ptt,
4839 				      u32 *dump_buf,
4840 				      u32 buf_size_in_dwords,
4841 				      u32 *num_dumped_dwords)
4842 {
4843 	u32 needed_buf_size_in_dwords;
4844 	enum dbg_status status;
4845 
4846 	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt,
4847 						    &needed_buf_size_in_dwords);
4848 
4849 	*num_dumped_dwords = 0;
4850 	if (status != DBG_STATUS_OK)
4851 		return status;
4852 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
4853 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4854 
4855 	/* Update reset state */
4856 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
4857 
4858 	status = qed_reg_fifo_dump(p_hwfn,
4859 				   p_ptt, dump_buf, true, num_dumped_dwords);
4860 
4861 	/* Revert GRC params to their default */
4862 	qed_dbg_grc_set_params_default(p_hwfn);
4863 
4864 	return status;
4865 }
4866 
4867 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4868 						   struct qed_ptt *p_ptt,
4869 						   u32 *buf_size)
4870 {
4871 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4872 
4873 	*buf_size = 0;
4874 	if (status != DBG_STATUS_OK)
4875 		return status;
4876 	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4877 }
4878 
4879 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4880 				      struct qed_ptt *p_ptt,
4881 				      u32 *dump_buf,
4882 				      u32 buf_size_in_dwords,
4883 				      u32 *num_dumped_dwords)
4884 {
4885 	u32 needed_buf_size_in_dwords;
4886 	enum dbg_status status;
4887 
4888 	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt,
4889 						    &needed_buf_size_in_dwords);
4890 
4891 	*num_dumped_dwords = 0;
4892 	if (status != DBG_STATUS_OK)
4893 		return status;
4894 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
4895 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4896 
4897 	/* Update reset state */
4898 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
4899 
4900 	status = qed_igu_fifo_dump(p_hwfn,
4901 				   p_ptt, dump_buf, true, num_dumped_dwords);
4902 	/* Revert GRC params to their default */
4903 	qed_dbg_grc_set_params_default(p_hwfn);
4904 
4905 	return status;
4906 }
4907 
4908 enum dbg_status
4909 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4910 					      struct qed_ptt *p_ptt,
4911 					      u32 *buf_size)
4912 {
4913 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4914 
4915 	*buf_size = 0;
4916 	if (status != DBG_STATUS_OK)
4917 		return status;
4918 	return qed_protection_override_dump(p_hwfn,
4919 					    p_ptt, NULL, false, buf_size);
4920 }
4921 
4922 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
4923 						 struct qed_ptt *p_ptt,
4924 						 u32 *dump_buf,
4925 						 u32 buf_size_in_dwords,
4926 						 u32 *num_dumped_dwords)
4927 {
4928 	u32 needed_buf_size_in_dwords;
4929 	enum dbg_status status;
4930 
4931 	status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt,
4932 						&needed_buf_size_in_dwords);
4933 
4934 	*num_dumped_dwords = 0;
4935 	if (status != DBG_STATUS_OK)
4936 		return status;
4937 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
4938 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4939 
4940 	/* Update reset state */
4941 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
4942 
4943 	status = qed_protection_override_dump(p_hwfn,
4944 					      p_ptt,
4945 					      dump_buf,
4946 					      true, num_dumped_dwords);
4947 
4948 	/* Revert GRC params to their default */
4949 	qed_dbg_grc_set_params_default(p_hwfn);
4950 
4951 	return status;
4952 }
4953 
4954 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4955 						     struct qed_ptt *p_ptt,
4956 						     u32 *buf_size)
4957 {
4958 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
4959 
4960 	*buf_size = 0;
4961 	if (status != DBG_STATUS_OK)
4962 		return status;
4963 
4964 	/* Update reset state */
4965 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
4966 	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
4967 	return DBG_STATUS_OK;
4968 }
4969 
4970 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4971 					struct qed_ptt *p_ptt,
4972 					u32 *dump_buf,
4973 					u32 buf_size_in_dwords,
4974 					u32 *num_dumped_dwords)
4975 {
4976 	u32 needed_buf_size_in_dwords;
4977 	enum dbg_status status;
4978 
4979 	status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt,
4980 						&needed_buf_size_in_dwords);
4981 
4982 	*num_dumped_dwords = 0;
4983 	if (status != DBG_STATUS_OK)
4984 		return status;
4985 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
4986 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4987 
4988 	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
4989 	return DBG_STATUS_OK;
4990 }
4991 
4992 /******************************* Data Types **********************************/
4993 
4994 struct mcp_trace_format {
4995 	u32 data;
4996 #define MCP_TRACE_FORMAT_MODULE_MASK	0x0000ffff
4997 #define MCP_TRACE_FORMAT_MODULE_SHIFT	0
4998 #define MCP_TRACE_FORMAT_LEVEL_MASK	0x00030000
4999 #define MCP_TRACE_FORMAT_LEVEL_SHIFT	16
5000 #define MCP_TRACE_FORMAT_P1_SIZE_MASK	0x000c0000
5001 #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT	18
5002 #define MCP_TRACE_FORMAT_P2_SIZE_MASK	0x00300000
5003 #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT	20
5004 #define MCP_TRACE_FORMAT_P3_SIZE_MASK	0x00c00000
5005 #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT	22
5006 #define MCP_TRACE_FORMAT_LEN_MASK	0xff000000
5007 #define MCP_TRACE_FORMAT_LEN_SHIFT	24
5008 	char *format_str;
5009 };
5010 
5011 struct mcp_trace_meta {
5012 	u32 modules_num;
5013 	char **modules;
5014 	u32 formats_num;
5015 	struct mcp_trace_format *formats;
5016 };
5017 
5018 /* Reg fifo element */
5019 struct reg_fifo_element {
5020 	u64 data;
5021 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5022 #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5023 #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5024 #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5025 #define REG_FIFO_ELEMENT_PF_SHIFT		24
5026 #define REG_FIFO_ELEMENT_PF_MASK		0xf
5027 #define REG_FIFO_ELEMENT_VF_SHIFT		28
5028 #define REG_FIFO_ELEMENT_VF_MASK		0xff
5029 #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5030 #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5031 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5032 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5033 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5034 #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5035 #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5036 #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5037 #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5038 #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5039 };
5040 
5041 /* IGU fifo element */
5042 struct igu_fifo_element {
5043 	u32 dword0;
5044 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5045 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5046 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5047 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5048 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5049 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5050 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5051 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5052 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5053 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5054 	u32 dword1;
5055 	u32 dword2;
5056 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5057 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5058 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5059 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5060 	u32 reserved;
5061 };
5062 
5063 struct igu_fifo_wr_data {
5064 	u32 data;
5065 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5066 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5067 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5068 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5069 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5070 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5071 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5072 #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5073 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5074 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5075 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5076 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5077 };
5078 
5079 struct igu_fifo_cleanup_wr_data {
5080 	u32 data;
5081 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5082 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5083 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5084 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5085 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5086 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5087 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5088 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5089 };
5090 
5091 /* Protection override element */
5092 struct protection_override_element {
5093 	u64 data;
5094 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5095 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5096 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5097 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5098 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5099 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5100 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5101 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5102 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5103 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5104 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5105 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5106 };
5107 
5108 enum igu_fifo_sources {
5109 	IGU_SRC_PXP0,
5110 	IGU_SRC_PXP1,
5111 	IGU_SRC_PXP2,
5112 	IGU_SRC_PXP3,
5113 	IGU_SRC_PXP4,
5114 	IGU_SRC_PXP5,
5115 	IGU_SRC_PXP6,
5116 	IGU_SRC_PXP7,
5117 	IGU_SRC_CAU,
5118 	IGU_SRC_ATTN,
5119 	IGU_SRC_GRC
5120 };
5121 
5122 enum igu_fifo_addr_types {
5123 	IGU_ADDR_TYPE_MSIX_MEM,
5124 	IGU_ADDR_TYPE_WRITE_PBA,
5125 	IGU_ADDR_TYPE_WRITE_INT_ACK,
5126 	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5127 	IGU_ADDR_TYPE_READ_INT,
5128 	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5129 	IGU_ADDR_TYPE_RESERVED
5130 };
5131 
5132 struct igu_fifo_addr_data {
5133 	u16 start_addr;
5134 	u16 end_addr;
5135 	char *desc;
5136 	char *vf_desc;
5137 	enum igu_fifo_addr_types type;
5138 };
5139 
5140 /******************************** Constants **********************************/
5141 
5142 #define MAX_MSG_LEN				1024
5143 #define MCP_TRACE_MAX_MODULE_LEN		8
5144 #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5145 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5146 	(MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
5147 #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5148 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5149 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5150 
5151 /********************************* Macros ************************************/
5152 
5153 #define BYTES_TO_DWORDS(bytes)			((bytes) / BYTES_IN_DWORD)
5154 
5155 /***************************** Constant Arrays *******************************/
5156 
5157 /* Status string array */
5158 static const char * const s_status_str[] = {
5159 	"Operation completed successfully",
5160 	"Debug application version wasn't set",
5161 	"Unsupported debug application version",
5162 	"The debug block wasn't reset since the last recording",
5163 	"Invalid arguments",
5164 	"The debug output was already set",
5165 	"Invalid PCI buffer size",
5166 	"PCI buffer allocation failed",
5167 	"A PCI buffer wasn't allocated",
5168 	"Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
5169 	"GRC/Timestamp input overlap in cycle dword 0",
5170 	"Cannot record Storm data since the entire recording cycle is used by HW",
5171 	"The Storm was already enabled",
5172 	"The specified Storm wasn't enabled",
5173 	"The block was already enabled",
5174 	"The specified block wasn't enabled",
5175 	"No input was enabled for recording",
5176 	"Filters and triggers are not allowed when recording in 64b units",
5177 	"The filter was already enabled",
5178 	"The trigger was already enabled",
5179 	"The trigger wasn't enabled",
5180 	"A constraint can be added only after a filter was enabled or a trigger state was added",
5181 	"Cannot add more than 3 trigger states",
5182 	"Cannot add more than 4 constraints per filter or trigger state",
5183 	"The recording wasn't started",
5184 	"A trigger was configured, but it didn't trigger",
5185 	"No data was recorded",
5186 	"Dump buffer is too small",
5187 	"Dumped data is not aligned to chunks",
5188 	"Unknown chip",
5189 	"Failed allocating virtual memory",
5190 	"The input block is in reset",
5191 	"Invalid MCP trace signature found in NVRAM",
5192 	"Invalid bundle ID found in NVRAM",
5193 	"Failed getting NVRAM image",
5194 	"NVRAM image is not dword-aligned",
5195 	"Failed reading from NVRAM",
5196 	"Idle check parsing failed",
5197 	"MCP Trace data is corrupt",
5198 	"Dump doesn't contain meta data - it must be provided in an image file",
5199 	"Failed to halt MCP",
5200 	"Failed to resume MCP after halt",
5201 	"DMAE transaction failed",
5202 	"Failed to empty SEMI sync FIFO",
5203 	"IGU FIFO data is corrupt",
5204 	"MCP failed to mask parities",
5205 	"FW Asserts parsing failed",
5206 	"GRC FIFO data is corrupt",
5207 	"Protection Override data is corrupt",
5208 	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5209 	"When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)"
5210 };
5211 
5212 /* Idle check severity names array */
5213 static const char * const s_idle_chk_severity_str[] = {
5214 	"Error",
5215 	"Error if no traffic",
5216 	"Warning"
5217 };
5218 
5219 /* MCP Trace level names array */
5220 static const char * const s_mcp_trace_level_str[] = {
5221 	"ERROR",
5222 	"TRACE",
5223 	"DEBUG"
5224 };
5225 
5226 /* Parsing strings */
5227 static const char * const s_access_strs[] = {
5228 	"read",
5229 	"write"
5230 };
5231 
5232 static const char * const s_privilege_strs[] = {
5233 	"VF",
5234 	"PDA",
5235 	"HV",
5236 	"UA"
5237 };
5238 
5239 static const char * const s_protection_strs[] = {
5240 	"(default)",
5241 	"(default)",
5242 	"(default)",
5243 	"(default)",
5244 	"override VF",
5245 	"override PDA",
5246 	"override HV",
5247 	"override UA"
5248 };
5249 
5250 static const char * const s_master_strs[] = {
5251 	"???",
5252 	"pxp",
5253 	"mcp",
5254 	"msdm",
5255 	"psdm",
5256 	"ysdm",
5257 	"usdm",
5258 	"tsdm",
5259 	"xsdm",
5260 	"dbu",
5261 	"dmae",
5262 	"???",
5263 	"???",
5264 	"???",
5265 	"???",
5266 	"???"
5267 };
5268 
5269 static const char * const s_reg_fifo_error_strs[] = {
5270 	"grc timeout",
5271 	"address doesn't belong to any block",
5272 	"reserved address in block or write to read-only address",
5273 	"privilege/protection mismatch",
5274 	"path isolation error"
5275 };
5276 
5277 static const char * const s_igu_fifo_source_strs[] = {
5278 	"TSTORM",
5279 	"MSTORM",
5280 	"USTORM",
5281 	"XSTORM",
5282 	"YSTORM",
5283 	"PSTORM",
5284 	"PCIE",
5285 	"NIG_QM_PBF",
5286 	"CAU",
5287 	"ATTN",
5288 	"GRC",
5289 };
5290 
5291 static const char * const s_igu_fifo_error_strs[] = {
5292 	"no error",
5293 	"length error",
5294 	"function disabled",
5295 	"VF sent command to attnetion address",
5296 	"host sent prod update command",
5297 	"read of during interrupt register while in MIMD mode",
5298 	"access to PXP BAR reserved address",
5299 	"producer update command to attention index",
5300 	"unknown error",
5301 	"SB index not valid",
5302 	"SB relative index and FID not found",
5303 	"FID not match",
5304 	"command with error flag asserted (PCI error or CAU discard)",
5305 	"VF sent cleanup and RF cleanup is disabled",
5306 	"cleanup command on type bigger than 4"
5307 };
5308 
5309 /* IGU FIFO address data */
5310 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
5311 	{0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM},
5312 	{0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
5313 	{0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA},
5314 	{0x201, 0x201, "Write PBA[64:127]", "reserved",
5315 	 IGU_ADDR_TYPE_WRITE_PBA},
5316 	{0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA},
5317 	{0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
5318 	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
5319 	 IGU_ADDR_TYPE_WRITE_INT_ACK},
5320 	{0x5f0, 0x5f0, "Attention bits update", NULL,
5321 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5322 	{0x5f1, 0x5f1, "Attention bits set", NULL,
5323 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5324 	{0x5f2, 0x5f2, "Attention bits clear", NULL,
5325 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5326 	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
5327 	 IGU_ADDR_TYPE_READ_INT},
5328 	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
5329 	 IGU_ADDR_TYPE_READ_INT},
5330 	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
5331 	 IGU_ADDR_TYPE_READ_INT},
5332 	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
5333 	 IGU_ADDR_TYPE_READ_INT},
5334 	{0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
5335 	{0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
5336 };
5337 
5338 /******************************** Variables **********************************/
5339 
5340 /* MCP Trace meta data - used in case the dump doesn't contain the meta data
5341  * (e.g. due to no NVRAM access).
5342  */
5343 static struct dbg_array s_mcp_trace_meta = { NULL, 0 };
5344 
5345 /* Temporary buffer, used for print size calculations */
5346 static char s_temp_buf[MAX_MSG_LEN];
5347 
5348 /***************************** Public Functions *******************************/
5349 
5350 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
5351 {
5352 	/* Convert binary data to debug arrays */
5353 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
5354 	u8 buf_id;
5355 
5356 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
5357 		s_dbg_arrays[buf_id].ptr =
5358 		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
5359 		s_dbg_arrays[buf_id].size_in_dwords =
5360 		    BYTES_TO_DWORDS(buf_array[buf_id].length);
5361 	}
5362 
5363 	return DBG_STATUS_OK;
5364 }
5365 
5366 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
5367 {
5368 	return (a + b) % size;
5369 }
5370 
5371 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
5372 {
5373 	return (size + a - b) % size;
5374 }
5375 
5376 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
5377  * bytes) and returns them as a dword value. the specified buffer offset is
5378  * updated.
5379  */
5380 static u32 qed_read_from_cyclic_buf(void *buf,
5381 				    u32 *offset,
5382 				    u32 buf_size, u8 num_bytes_to_read)
5383 {
5384 	u8 *bytes_buf = (u8 *)buf;
5385 	u8 *val_ptr;
5386 	u32 val = 0;
5387 	u8 i;
5388 
5389 	val_ptr = (u8 *)&val;
5390 
5391 	for (i = 0; i < num_bytes_to_read; i++) {
5392 		val_ptr[i] = bytes_buf[*offset];
5393 		*offset = qed_cyclic_add(*offset, 1, buf_size);
5394 	}
5395 
5396 	return val;
5397 }
5398 
5399 /* Reads and returns the next byte from the specified buffer.
5400  * The specified buffer offset is updated.
5401  */
5402 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
5403 {
5404 	return ((u8 *)buf)[(*offset)++];
5405 }
5406 
5407 /* Reads and returns the next dword from the specified buffer.
5408  * The specified buffer offset is updated.
5409  */
5410 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
5411 {
5412 	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
5413 
5414 	*offset += 4;
5415 	return dword_val;
5416 }
5417 
5418 /* Reads the next string from the specified buffer, and copies it to the
5419  * specified pointer. The specified buffer offset is updated.
5420  */
5421 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
5422 {
5423 	const char *source_str = &((const char *)buf)[*offset];
5424 
5425 	strncpy(dest, source_str, size);
5426 	dest[size - 1] = '\0';
5427 	*offset += size;
5428 }
5429 
5430 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
5431  * If the specified buffer in NULL, a temporary buffer pointer is returned.
5432  */
5433 static char *qed_get_buf_ptr(void *buf, u32 offset)
5434 {
5435 	return buf ? (char *)buf + offset : s_temp_buf;
5436 }
5437 
5438 /* Reads a param from the specified buffer. Returns the number of dwords read.
5439  * If the returned str_param is NULL, the param is numeric and its value is
5440  * returned in num_param.
5441  * Otheriwise, the param is a string and its pointer is returned in str_param.
5442  */
5443 static u32 qed_read_param(u32 *dump_buf,
5444 			  const char **param_name,
5445 			  const char **param_str_val, u32 *param_num_val)
5446 {
5447 	char *char_buf = (char *)dump_buf;
5448 	u32 offset = 0; /* In bytes */
5449 
5450 	/* Extract param name */
5451 	*param_name = char_buf;
5452 	offset += strlen(*param_name) + 1;
5453 
5454 	/* Check param type */
5455 	if (*(char_buf + offset++)) {
5456 		/* String param */
5457 		*param_str_val = char_buf + offset;
5458 		offset += strlen(*param_str_val) + 1;
5459 		if (offset & 0x3)
5460 			offset += (4 - (offset & 0x3));
5461 	} else {
5462 		/* Numeric param */
5463 		*param_str_val = NULL;
5464 		if (offset & 0x3)
5465 			offset += (4 - (offset & 0x3));
5466 		*param_num_val = *(u32 *)(char_buf + offset);
5467 		offset += 4;
5468 	}
5469 
5470 	return offset / 4;
5471 }
5472 
5473 /* Reads a section header from the specified buffer.
5474  * Returns the number of dwords read.
5475  */
5476 static u32 qed_read_section_hdr(u32 *dump_buf,
5477 				const char **section_name,
5478 				u32 *num_section_params)
5479 {
5480 	const char *param_str_val;
5481 
5482 	return qed_read_param(dump_buf,
5483 			      section_name, &param_str_val, num_section_params);
5484 }
5485 
5486 /* Reads section params from the specified buffer and prints them to the results
5487  * buffer. Returns the number of dwords read.
5488  */
5489 static u32 qed_print_section_params(u32 *dump_buf,
5490 				    u32 num_section_params,
5491 				    char *results_buf, u32 *num_chars_printed)
5492 {
5493 	u32 i, dump_offset = 0, results_offset = 0;
5494 
5495 	for (i = 0; i < num_section_params; i++) {
5496 		const char *param_name;
5497 		const char *param_str_val;
5498 		u32 param_num_val = 0;
5499 
5500 		dump_offset += qed_read_param(dump_buf + dump_offset,
5501 					      &param_name,
5502 					      &param_str_val, &param_num_val);
5503 		if (param_str_val)
5504 			/* String param */
5505 			results_offset +=
5506 				sprintf(qed_get_buf_ptr(results_buf,
5507 							results_offset),
5508 					"%s: %s\n", param_name, param_str_val);
5509 		else if (strcmp(param_name, "fw-timestamp"))
5510 			/* Numeric param */
5511 			results_offset +=
5512 				sprintf(qed_get_buf_ptr(results_buf,
5513 							results_offset),
5514 					"%s: %d\n", param_name, param_num_val);
5515 	}
5516 
5517 	results_offset +=
5518 	    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
5519 	*num_chars_printed = results_offset;
5520 	return dump_offset;
5521 }
5522 
5523 const char *qed_dbg_get_status_str(enum dbg_status status)
5524 {
5525 	return (status <
5526 		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
5527 }
5528 
5529 /* Parses the idle check rules and returns the number of characters printed.
5530  * In case of parsing error, returns 0.
5531  */
5532 static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
5533 					 u32 *dump_buf,
5534 					 u32 *dump_buf_end,
5535 					 u32 num_rules,
5536 					 bool print_fw_idle_chk,
5537 					 char *results_buf,
5538 					 u32 *num_errors, u32 *num_warnings)
5539 {
5540 	u32 rule_idx, results_offset = 0; /* Offset in results_buf in bytes */
5541 	u16 i, j;
5542 
5543 	*num_errors = 0;
5544 	*num_warnings = 0;
5545 
5546 	/* Go over dumped results */
5547 	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
5548 	     rule_idx++) {
5549 		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
5550 		struct dbg_idle_chk_result_hdr *hdr;
5551 		const char *parsing_str;
5552 		u32 parsing_str_offset;
5553 		const char *lsi_msg;
5554 		u8 curr_reg_id = 0;
5555 		bool has_fw_msg;
5556 
5557 		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
5558 		rule_parsing_data =
5559 			(const struct dbg_idle_chk_rule_parsing_data *)
5560 			&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
5561 			ptr[hdr->rule_id];
5562 		parsing_str_offset =
5563 			GET_FIELD(rule_parsing_data->data,
5564 				  DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
5565 		has_fw_msg =
5566 			GET_FIELD(rule_parsing_data->data,
5567 				DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
5568 		parsing_str = &((const char *)
5569 				s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
5570 				[parsing_str_offset];
5571 		lsi_msg = parsing_str;
5572 
5573 		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
5574 			return 0;
5575 
5576 		/* Skip rule header */
5577 		dump_buf += (sizeof(struct dbg_idle_chk_result_hdr) / 4);
5578 
5579 		/* Update errors/warnings count */
5580 		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
5581 		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
5582 			(*num_errors)++;
5583 		else
5584 			(*num_warnings)++;
5585 
5586 		/* Print rule severity */
5587 		results_offset +=
5588 		    sprintf(qed_get_buf_ptr(results_buf,
5589 					    results_offset), "%s: ",
5590 			    s_idle_chk_severity_str[hdr->severity]);
5591 
5592 		/* Print rule message */
5593 		if (has_fw_msg)
5594 			parsing_str += strlen(parsing_str) + 1;
5595 		results_offset +=
5596 		    sprintf(qed_get_buf_ptr(results_buf,
5597 					    results_offset), "%s.",
5598 			    has_fw_msg &&
5599 			    print_fw_idle_chk ? parsing_str : lsi_msg);
5600 		parsing_str += strlen(parsing_str) + 1;
5601 
5602 		/* Print register values */
5603 		results_offset +=
5604 		    sprintf(qed_get_buf_ptr(results_buf,
5605 					    results_offset), " Registers:");
5606 		for (i = 0;
5607 		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
5608 		     i++) {
5609 			struct dbg_idle_chk_result_reg_hdr *reg_hdr
5610 			    = (struct dbg_idle_chk_result_reg_hdr *)
5611 			    dump_buf;
5612 			bool is_mem =
5613 				GET_FIELD(reg_hdr->data,
5614 					  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
5615 			u8 reg_id =
5616 				GET_FIELD(reg_hdr->data,
5617 					  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
5618 
5619 			/* Skip reg header */
5620 			dump_buf +=
5621 			    (sizeof(struct dbg_idle_chk_result_reg_hdr) / 4);
5622 
5623 			/* Skip register names until the required reg_id is
5624 			 * reached.
5625 			 */
5626 			for (; reg_id > curr_reg_id;
5627 			     curr_reg_id++,
5628 			     parsing_str += strlen(parsing_str) + 1);
5629 
5630 			results_offset +=
5631 			    sprintf(qed_get_buf_ptr(results_buf,
5632 						    results_offset), " %s",
5633 				    parsing_str);
5634 			if (i < hdr->num_dumped_cond_regs && is_mem)
5635 				results_offset +=
5636 				    sprintf(qed_get_buf_ptr(results_buf,
5637 							    results_offset),
5638 					    "[%d]", hdr->mem_entry_id +
5639 					    reg_hdr->start_entry);
5640 			results_offset +=
5641 			    sprintf(qed_get_buf_ptr(results_buf,
5642 						    results_offset), "=");
5643 			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
5644 				results_offset +=
5645 				    sprintf(qed_get_buf_ptr(results_buf,
5646 							    results_offset),
5647 					    "0x%x", *dump_buf);
5648 				if (j < reg_hdr->size - 1)
5649 					results_offset +=
5650 					    sprintf(qed_get_buf_ptr
5651 						    (results_buf,
5652 						     results_offset), ",");
5653 			}
5654 		}
5655 
5656 		results_offset +=
5657 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
5658 	}
5659 
5660 	/* Check if end of dump buffer was exceeded */
5661 	if (dump_buf > dump_buf_end)
5662 		return 0;
5663 	return results_offset;
5664 }
5665 
5666 /* Parses an idle check dump buffer.
5667  * If result_buf is not NULL, the idle check results are printed to it.
5668  * In any case, the required results buffer size is assigned to
5669  * parsed_results_bytes.
5670  * The parsing status is returned.
5671  */
5672 static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
5673 					       u32 *dump_buf,
5674 					       u32 num_dumped_dwords,
5675 					       char *results_buf,
5676 					       u32 *parsed_results_bytes,
5677 					       u32 *num_errors,
5678 					       u32 *num_warnings)
5679 {
5680 	const char *section_name, *param_name, *param_str_val;
5681 	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
5682 	u32 num_section_params = 0, num_rules;
5683 	u32 results_offset = 0;	/* Offset in results_buf in bytes */
5684 
5685 	*parsed_results_bytes = 0;
5686 	*num_errors = 0;
5687 	*num_warnings = 0;
5688 	if (!s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
5689 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
5690 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5691 
5692 	/* Read global_params section */
5693 	dump_buf += qed_read_section_hdr(dump_buf,
5694 					 &section_name, &num_section_params);
5695 	if (strcmp(section_name, "global_params"))
5696 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5697 
5698 	/* Print global params */
5699 	dump_buf += qed_print_section_params(dump_buf,
5700 					     num_section_params,
5701 					     results_buf, &results_offset);
5702 
5703 	/* Read idle_chk section */
5704 	dump_buf += qed_read_section_hdr(dump_buf,
5705 					 &section_name, &num_section_params);
5706 	if (strcmp(section_name, "idle_chk") || num_section_params != 1)
5707 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5708 
5709 	dump_buf += qed_read_param(dump_buf,
5710 				   &param_name, &param_str_val, &num_rules);
5711 	if (strcmp(param_name, "num_rules") != 0)
5712 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5713 
5714 	if (num_rules) {
5715 		u32 rules_print_size;
5716 
5717 		/* Print FW output */
5718 		results_offset +=
5719 		    sprintf(qed_get_buf_ptr(results_buf,
5720 					    results_offset),
5721 			    "FW_IDLE_CHECK:\n");
5722 		rules_print_size =
5723 			qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
5724 						      dump_buf_end, num_rules,
5725 						      true,
5726 						      results_buf ?
5727 						      results_buf +
5728 						      results_offset : NULL,
5729 						      num_errors, num_warnings);
5730 		results_offset += rules_print_size;
5731 		if (rules_print_size == 0)
5732 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5733 
5734 		/* Print LSI output */
5735 		results_offset +=
5736 		    sprintf(qed_get_buf_ptr(results_buf,
5737 					    results_offset),
5738 			    "\nLSI_IDLE_CHECK:\n");
5739 		rules_print_size =
5740 			qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
5741 						      dump_buf_end, num_rules,
5742 						      false,
5743 						      results_buf ?
5744 						      results_buf +
5745 						      results_offset : NULL,
5746 						      num_errors, num_warnings);
5747 		results_offset += rules_print_size;
5748 		if (rules_print_size == 0)
5749 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
5750 	}
5751 
5752 	/* Print errors/warnings count */
5753 	if (*num_errors) {
5754 		results_offset +=
5755 		    sprintf(qed_get_buf_ptr(results_buf,
5756 					    results_offset),
5757 			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
5758 			    *num_errors, *num_warnings);
5759 	} else if (*num_warnings) {
5760 		results_offset +=
5761 		    sprintf(qed_get_buf_ptr(results_buf,
5762 					    results_offset),
5763 			    "\nIdle Check completed successfuly (with %d warnings)\n",
5764 			    *num_warnings);
5765 	} else {
5766 		results_offset +=
5767 		    sprintf(qed_get_buf_ptr(results_buf,
5768 					    results_offset),
5769 			    "\nIdle Check completed successfuly\n");
5770 	}
5771 
5772 	/* Add 1 for string NULL termination */
5773 	*parsed_results_bytes = results_offset + 1;
5774 	return DBG_STATUS_OK;
5775 }
5776 
5777 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
5778 						  u32 *dump_buf,
5779 						  u32 num_dumped_dwords,
5780 						  u32 *results_buf_size)
5781 {
5782 	u32 num_errors, num_warnings;
5783 
5784 	return qed_parse_idle_chk_dump(p_hwfn,
5785 				       dump_buf,
5786 				       num_dumped_dwords,
5787 				       NULL,
5788 				       results_buf_size,
5789 				       &num_errors, &num_warnings);
5790 }
5791 
5792 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
5793 					   u32 *dump_buf,
5794 					   u32 num_dumped_dwords,
5795 					   char *results_buf,
5796 					   u32 *num_errors, u32 *num_warnings)
5797 {
5798 	u32 parsed_buf_size;
5799 
5800 	return qed_parse_idle_chk_dump(p_hwfn,
5801 				       dump_buf,
5802 				       num_dumped_dwords,
5803 				       results_buf,
5804 				       &parsed_buf_size,
5805 				       num_errors, num_warnings);
5806 }
5807 
5808 /* Frees the specified MCP Trace meta data */
5809 static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
5810 				    struct mcp_trace_meta *meta)
5811 {
5812 	u32 i;
5813 
5814 	/* Release modules */
5815 	if (meta->modules) {
5816 		for (i = 0; i < meta->modules_num; i++)
5817 			kfree(meta->modules[i]);
5818 		kfree(meta->modules);
5819 	}
5820 
5821 	/* Release formats */
5822 	if (meta->formats) {
5823 		for (i = 0; i < meta->formats_num; i++)
5824 			kfree(meta->formats[i].format_str);
5825 		kfree(meta->formats);
5826 	}
5827 }
5828 
5829 /* Allocates and fills MCP Trace meta data based on the specified meta data
5830  * dump buffer.
5831  * Returns debug status code.
5832  */
5833 static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
5834 						const u32 *meta_buf,
5835 						struct mcp_trace_meta *meta)
5836 {
5837 	u8 *meta_buf_bytes = (u8 *)meta_buf;
5838 	u32 offset = 0, signature, i;
5839 
5840 	memset(meta, 0, sizeof(*meta));
5841 
5842 	/* Read first signature */
5843 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
5844 	if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
5845 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
5846 
5847 	/* Read number of modules and allocate memory for all the modules
5848 	 * pointers.
5849 	 */
5850 	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
5851 	meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
5852 	if (!meta->modules)
5853 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
5854 
5855 	/* Allocate and read all module strings */
5856 	for (i = 0; i < meta->modules_num; i++) {
5857 		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
5858 
5859 		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
5860 		if (!(*(meta->modules + i))) {
5861 			/* Update number of modules to be released */
5862 			meta->modules_num = i ? i - 1 : 0;
5863 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
5864 		}
5865 
5866 		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
5867 				      *(meta->modules + i));
5868 		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
5869 			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
5870 	}
5871 
5872 	/* Read second signature */
5873 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
5874 	if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
5875 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
5876 
5877 	/* Read number of formats and allocate memory for all formats */
5878 	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
5879 	meta->formats = kzalloc(meta->formats_num *
5880 				sizeof(struct mcp_trace_format),
5881 				GFP_KERNEL);
5882 	if (!meta->formats)
5883 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
5884 
5885 	/* Allocate and read all strings */
5886 	for (i = 0; i < meta->formats_num; i++) {
5887 		struct mcp_trace_format *format_ptr = &meta->formats[i];
5888 		u8 format_len;
5889 
5890 		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
5891 							   &offset);
5892 		format_len =
5893 		    (format_ptr->data &
5894 		     MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
5895 		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
5896 		if (!format_ptr->format_str) {
5897 			/* Update number of modules to be released */
5898 			meta->formats_num = i ? i - 1 : 0;
5899 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
5900 		}
5901 
5902 		qed_read_str_from_buf(meta_buf_bytes,
5903 				      &offset,
5904 				      format_len, format_ptr->format_str);
5905 	}
5906 
5907 	return DBG_STATUS_OK;
5908 }
5909 
5910 /* Parses an MCP Trace dump buffer.
5911  * If result_buf is not NULL, the MCP Trace results are printed to it.
5912  * In any case, the required results buffer size is assigned to
5913  * parsed_results_bytes.
5914  * The parsing status is returned.
5915  */
5916 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5917 						u32 *dump_buf,
5918 						u32 num_dumped_dwords,
5919 						char *results_buf,
5920 						u32 *parsed_results_bytes)
5921 {
5922 	u32 results_offset = 0, param_mask, param_shift, param_num_val;
5923 	u32 num_section_params, offset, end_offset, bytes_left;
5924 	const char *section_name, *param_name, *param_str_val;
5925 	u32 trace_data_dwords, trace_meta_dwords;
5926 	struct mcp_trace_meta meta;
5927 	struct mcp_trace *trace;
5928 	enum dbg_status status;
5929 	const u32 *meta_buf;
5930 	u8 *trace_buf;
5931 
5932 	*parsed_results_bytes = 0;
5933 
5934 	/* Read global_params section */
5935 	dump_buf += qed_read_section_hdr(dump_buf,
5936 					 &section_name, &num_section_params);
5937 	if (strcmp(section_name, "global_params"))
5938 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
5939 
5940 	/* Print global params */
5941 	dump_buf += qed_print_section_params(dump_buf,
5942 					     num_section_params,
5943 					     results_buf, &results_offset);
5944 
5945 	/* Read trace_data section */
5946 	dump_buf += qed_read_section_hdr(dump_buf,
5947 					 &section_name, &num_section_params);
5948 	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
5949 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
5950 	dump_buf += qed_read_param(dump_buf,
5951 				   &param_name, &param_str_val, &param_num_val);
5952 	if (strcmp(param_name, "size"))
5953 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
5954 	trace_data_dwords = param_num_val;
5955 
5956 	/* Prepare trace info */
5957 	trace = (struct mcp_trace *)dump_buf;
5958 	trace_buf = (u8 *)dump_buf + sizeof(struct mcp_trace);
5959 	offset = trace->trace_oldest;
5960 	end_offset = trace->trace_prod;
5961 	bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
5962 	dump_buf += trace_data_dwords;
5963 
5964 	/* Read meta_data section */
5965 	dump_buf += qed_read_section_hdr(dump_buf,
5966 					 &section_name, &num_section_params);
5967 	if (strcmp(section_name, "mcp_trace_meta"))
5968 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
5969 	dump_buf += qed_read_param(dump_buf,
5970 				   &param_name, &param_str_val, &param_num_val);
5971 	if (strcmp(param_name, "size") != 0)
5972 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
5973 	trace_meta_dwords = param_num_val;
5974 
5975 	/* Choose meta data buffer */
5976 	if (!trace_meta_dwords) {
5977 		/* Dump doesn't include meta data */
5978 		if (!s_mcp_trace_meta.ptr)
5979 			return DBG_STATUS_MCP_TRACE_NO_META;
5980 		meta_buf = s_mcp_trace_meta.ptr;
5981 	} else {
5982 		/* Dump includes meta data */
5983 		meta_buf = dump_buf;
5984 	}
5985 
5986 	/* Allocate meta data memory */
5987 	status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta);
5988 	if (status != DBG_STATUS_OK)
5989 		goto free_mem;
5990 
5991 	/* Ignore the level and modules masks - just print everything that is
5992 	 * already in the buffer.
5993 	 */
5994 	while (bytes_left) {
5995 		struct mcp_trace_format *format_ptr;
5996 		u8 format_level, format_module;
5997 		u32 params[3] = { 0, 0, 0 };
5998 		u32 header, format_idx, i;
5999 
6000 		if (bytes_left < MFW_TRACE_ENTRY_SIZE) {
6001 			status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6002 			goto free_mem;
6003 		}
6004 
6005 		header = qed_read_from_cyclic_buf(trace_buf,
6006 						  &offset,
6007 						  trace->size,
6008 						  MFW_TRACE_ENTRY_SIZE);
6009 		bytes_left -= MFW_TRACE_ENTRY_SIZE;
6010 		format_idx = header & MFW_TRACE_EVENTID_MASK;
6011 
6012 		/* Skip message if its  index doesn't exist in the meta data */
6013 		if (format_idx > meta.formats_num) {
6014 			u8 format_size =
6015 			    (u8)((header &
6016 				  MFW_TRACE_PRM_SIZE_MASK) >>
6017 				 MFW_TRACE_PRM_SIZE_SHIFT);
6018 
6019 			if (bytes_left < format_size) {
6020 				status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6021 				goto free_mem;
6022 			}
6023 
6024 			offset = qed_cyclic_add(offset,
6025 						format_size, trace->size);
6026 			bytes_left -= format_size;
6027 			continue;
6028 		}
6029 
6030 		format_ptr = &meta.formats[format_idx];
6031 		for (i = 0,
6032 		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6033 		     MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
6034 		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6035 		     i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6036 		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6037 			/* Extract param size (0..3) */
6038 			u8 param_size =
6039 			    (u8)((format_ptr->data &
6040 				  param_mask) >> param_shift);
6041 
6042 			/* If the param size is zero, there are no other
6043 			 * parameters.
6044 			 */
6045 			if (!param_size)
6046 				break;
6047 
6048 			/* Size is encoded using 2 bits, where 3 is used to
6049 			 * encode 4.
6050 			 */
6051 			if (param_size == 3)
6052 				param_size = 4;
6053 			if (bytes_left < param_size) {
6054 				status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6055 				goto free_mem;
6056 			}
6057 
6058 			params[i] = qed_read_from_cyclic_buf(trace_buf,
6059 							     &offset,
6060 							     trace->size,
6061 							     param_size);
6062 			bytes_left -= param_size;
6063 		}
6064 
6065 		format_level =
6066 		    (u8)((format_ptr->data &
6067 			  MCP_TRACE_FORMAT_LEVEL_MASK) >>
6068 			  MCP_TRACE_FORMAT_LEVEL_SHIFT);
6069 		format_module =
6070 		    (u8)((format_ptr->data &
6071 			  MCP_TRACE_FORMAT_MODULE_MASK) >>
6072 			 MCP_TRACE_FORMAT_MODULE_SHIFT);
6073 		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) {
6074 			status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6075 			goto free_mem;
6076 		}
6077 
6078 		/* Print current message to results buffer */
6079 		results_offset +=
6080 		    sprintf(qed_get_buf_ptr(results_buf,
6081 					    results_offset), "%s %-8s: ",
6082 			    s_mcp_trace_level_str[format_level],
6083 			    meta.modules[format_module]);
6084 		results_offset +=
6085 		    sprintf(qed_get_buf_ptr(results_buf,
6086 					    results_offset),
6087 			    format_ptr->format_str, params[0], params[1],
6088 			    params[2]);
6089 	}
6090 
6091 free_mem:
6092 	*parsed_results_bytes = results_offset + 1;
6093 	qed_mcp_trace_free_meta(p_hwfn, &meta);
6094 	return status;
6095 }
6096 
6097 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
6098 						   u32 *dump_buf,
6099 						   u32 num_dumped_dwords,
6100 						   u32 *results_buf_size)
6101 {
6102 	return qed_parse_mcp_trace_dump(p_hwfn,
6103 					dump_buf,
6104 					num_dumped_dwords,
6105 					NULL, results_buf_size);
6106 }
6107 
6108 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
6109 					    u32 *dump_buf,
6110 					    u32 num_dumped_dwords,
6111 					    char *results_buf)
6112 {
6113 	u32 parsed_buf_size;
6114 
6115 	return qed_parse_mcp_trace_dump(p_hwfn,
6116 					dump_buf,
6117 					num_dumped_dwords,
6118 					results_buf, &parsed_buf_size);
6119 }
6120 
6121 /* Parses a Reg FIFO dump buffer.
6122  * If result_buf is not NULL, the Reg FIFO results are printed to it.
6123  * In any case, the required results buffer size is assigned to
6124  * parsed_results_bytes.
6125  * The parsing status is returned.
6126  */
6127 static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
6128 					       u32 *dump_buf,
6129 					       u32 num_dumped_dwords,
6130 					       char *results_buf,
6131 					       u32 *parsed_results_bytes)
6132 {
6133 	u32 results_offset = 0, param_num_val, num_section_params, num_elements;
6134 	const char *section_name, *param_name, *param_str_val;
6135 	struct reg_fifo_element *elements;
6136 	u8 i, j, err_val, vf_val;
6137 	char vf_str[4];
6138 
6139 	/* Read global_params section */
6140 	dump_buf += qed_read_section_hdr(dump_buf,
6141 					 &section_name, &num_section_params);
6142 	if (strcmp(section_name, "global_params"))
6143 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6144 
6145 	/* Print global params */
6146 	dump_buf += qed_print_section_params(dump_buf,
6147 					     num_section_params,
6148 					     results_buf, &results_offset);
6149 
6150 	/* Read reg_fifo_data section */
6151 	dump_buf += qed_read_section_hdr(dump_buf,
6152 					 &section_name, &num_section_params);
6153 	if (strcmp(section_name, "reg_fifo_data"))
6154 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6155 	dump_buf += qed_read_param(dump_buf,
6156 				   &param_name, &param_str_val, &param_num_val);
6157 	if (strcmp(param_name, "size"))
6158 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6159 	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6160 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6161 	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6162 	elements = (struct reg_fifo_element *)dump_buf;
6163 
6164 	/* Decode elements */
6165 	for (i = 0; i < num_elements; i++) {
6166 		bool err_printed = false;
6167 
6168 		/* Discover if element belongs to a VF or a PF */
6169 		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6170 		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6171 			sprintf(vf_str, "%s", "N/A");
6172 		else
6173 			sprintf(vf_str, "%d", vf_val);
6174 
6175 		/* Add parsed element to parsed buffer */
6176 		results_offset +=
6177 		    sprintf(qed_get_buf_ptr(results_buf,
6178 					    results_offset),
6179 			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
6180 			    elements[i].data,
6181 			    (u32)GET_FIELD(elements[i].data,
6182 				      REG_FIFO_ELEMENT_ADDRESS) *
6183 				      REG_FIFO_ELEMENT_ADDR_FACTOR,
6184 				      s_access_strs[GET_FIELD(elements[i].data,
6185 						    REG_FIFO_ELEMENT_ACCESS)],
6186 			    (u32)GET_FIELD(elements[i].data,
6187 					   REG_FIFO_ELEMENT_PF), vf_str,
6188 			    (u32)GET_FIELD(elements[i].data,
6189 				      REG_FIFO_ELEMENT_PORT),
6190 				      s_privilege_strs[GET_FIELD(elements[i].
6191 				      data,
6192 				      REG_FIFO_ELEMENT_PRIVILEGE)],
6193 			    s_protection_strs[GET_FIELD(elements[i].data,
6194 						REG_FIFO_ELEMENT_PROTECTION)],
6195 			    s_master_strs[GET_FIELD(elements[i].data,
6196 						REG_FIFO_ELEMENT_MASTER)]);
6197 
6198 		/* Print errors */
6199 		for (j = 0,
6200 		     err_val = GET_FIELD(elements[i].data,
6201 					 REG_FIFO_ELEMENT_ERROR);
6202 		     j < ARRAY_SIZE(s_reg_fifo_error_strs);
6203 		     j++, err_val >>= 1) {
6204 			if (!(err_val & 0x1))
6205 				continue;
6206 			if (err_printed)
6207 				results_offset +=
6208 					sprintf(qed_get_buf_ptr(results_buf,
6209 								results_offset),
6210 						", ");
6211 			results_offset +=
6212 				sprintf(qed_get_buf_ptr(results_buf,
6213 							results_offset), "%s",
6214 					s_reg_fifo_error_strs[j]);
6215 			err_printed = true;
6216 		}
6217 
6218 		results_offset +=
6219 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6220 	}
6221 
6222 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6223 						  results_offset),
6224 				  "fifo contained %d elements", num_elements);
6225 
6226 	/* Add 1 for string NULL termination */
6227 	*parsed_results_bytes = results_offset + 1;
6228 	return DBG_STATUS_OK;
6229 }
6230 
6231 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
6232 						  u32 *dump_buf,
6233 						  u32 num_dumped_dwords,
6234 						  u32 *results_buf_size)
6235 {
6236 	return qed_parse_reg_fifo_dump(p_hwfn,
6237 				       dump_buf,
6238 				       num_dumped_dwords,
6239 				       NULL, results_buf_size);
6240 }
6241 
6242 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
6243 					   u32 *dump_buf,
6244 					   u32 num_dumped_dwords,
6245 					   char *results_buf)
6246 {
6247 	u32 parsed_buf_size;
6248 
6249 	return qed_parse_reg_fifo_dump(p_hwfn,
6250 				       dump_buf,
6251 				       num_dumped_dwords,
6252 				       results_buf, &parsed_buf_size);
6253 }
6254 
6255 /* Parses an IGU FIFO dump buffer.
6256  * If result_buf is not NULL, the IGU FIFO results are printed to it.
6257  * In any case, the required results buffer size is assigned to
6258  * parsed_results_bytes.
6259  * The parsing status is returned.
6260  */
6261 static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
6262 					       u32 *dump_buf,
6263 					       u32 num_dumped_dwords,
6264 					       char *results_buf,
6265 					       u32 *parsed_results_bytes)
6266 {
6267 	u32 results_offset = 0, param_num_val, num_section_params, num_elements;
6268 	const char *section_name, *param_name, *param_str_val;
6269 	struct igu_fifo_element *elements;
6270 	char parsed_addr_data[32];
6271 	char parsed_wr_data[256];
6272 	u8 i, j;
6273 
6274 	/* Read global_params section */
6275 	dump_buf += qed_read_section_hdr(dump_buf,
6276 					 &section_name, &num_section_params);
6277 	if (strcmp(section_name, "global_params"))
6278 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6279 
6280 	/* Print global params */
6281 	dump_buf += qed_print_section_params(dump_buf,
6282 					     num_section_params,
6283 					     results_buf, &results_offset);
6284 
6285 	/* Read igu_fifo_data section */
6286 	dump_buf += qed_read_section_hdr(dump_buf,
6287 					 &section_name, &num_section_params);
6288 	if (strcmp(section_name, "igu_fifo_data"))
6289 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6290 	dump_buf += qed_read_param(dump_buf,
6291 				   &param_name, &param_str_val, &param_num_val);
6292 	if (strcmp(param_name, "size"))
6293 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6294 	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
6295 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6296 	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
6297 	elements = (struct igu_fifo_element *)dump_buf;
6298 
6299 	/* Decode elements */
6300 	for (i = 0; i < num_elements; i++) {
6301 		/* dword12 (dword index 1 and 2) contains bits 32..95 of the
6302 		 * FIFO element.
6303 		 */
6304 		u64 dword12 =
6305 		    ((u64)elements[i].dword2 << 32) | elements[i].dword1;
6306 		bool is_wr_cmd = GET_FIELD(dword12,
6307 					   IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6308 		bool is_pf = GET_FIELD(elements[i].dword0,
6309 				       IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6310 		u16 cmd_addr = GET_FIELD(elements[i].dword0,
6311 					 IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6312 		u8 source = GET_FIELD(elements[i].dword0,
6313 				      IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6314 		u8 err_type = GET_FIELD(elements[i].dword0,
6315 					IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6316 		const struct igu_fifo_addr_data *addr_data = NULL;
6317 
6318 		if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6319 			return DBG_STATUS_IGU_FIFO_BAD_DATA;
6320 		if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6321 			return DBG_STATUS_IGU_FIFO_BAD_DATA;
6322 
6323 		/* Find address data */
6324 		for (j = 0; j < ARRAY_SIZE(s_igu_fifo_addr_data) && !addr_data;
6325 		     j++)
6326 			if (cmd_addr >= s_igu_fifo_addr_data[j].start_addr &&
6327 			    cmd_addr <= s_igu_fifo_addr_data[j].end_addr)
6328 				addr_data = &s_igu_fifo_addr_data[j];
6329 		if (!addr_data)
6330 			return DBG_STATUS_IGU_FIFO_BAD_DATA;
6331 
6332 		/* Prepare parsed address data */
6333 		switch (addr_data->type) {
6334 		case IGU_ADDR_TYPE_MSIX_MEM:
6335 			sprintf(parsed_addr_data,
6336 				" vector_num=0x%x", cmd_addr / 2);
6337 			break;
6338 		case IGU_ADDR_TYPE_WRITE_INT_ACK:
6339 		case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6340 			sprintf(parsed_addr_data,
6341 				" SB=0x%x", cmd_addr - addr_data->start_addr);
6342 			break;
6343 		default:
6344 			parsed_addr_data[0] = '\0';
6345 		}
6346 
6347 		/* Prepare parsed write data */
6348 		if (is_wr_cmd) {
6349 			u32 wr_data = GET_FIELD(dword12,
6350 					IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
6351 			u32 prod_cons = GET_FIELD(wr_data,
6352 						  IGU_FIFO_WR_DATA_PROD_CONS);
6353 			u8 is_cleanup = GET_FIELD(wr_data,
6354 						  IGU_FIFO_WR_DATA_CMD_TYPE);
6355 
6356 			if (source == IGU_SRC_ATTN) {
6357 				sprintf(parsed_wr_data,
6358 					"prod: 0x%x, ", prod_cons);
6359 			} else {
6360 				if (is_cleanup) {
6361 					u8 cleanup_val = GET_FIELD(wr_data,
6362 								   IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
6363 					u8 cleanup_type = GET_FIELD(wr_data,
6364 								    IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
6365 
6366 					sprintf(parsed_wr_data,
6367 						"cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ",
6368 						cleanup_val ? "set" : "clear",
6369 						cleanup_type);
6370 				} else {
6371 					u8 update_flag = GET_FIELD(wr_data,
6372 								   IGU_FIFO_WR_DATA_UPDATE_FLAG);
6373 					u8 en_dis_int_for_sb =
6374 					    GET_FIELD(wr_data,
6375 						      IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
6376 					u8 segment = GET_FIELD(wr_data,
6377 							       IGU_FIFO_WR_DATA_SEGMENT);
6378 					u8 timer_mask = GET_FIELD(wr_data,
6379 								  IGU_FIFO_WR_DATA_TIMER_MASK);
6380 
6381 					sprintf(parsed_wr_data,
6382 						"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ",
6383 						prod_cons,
6384 						update_flag ? "update" : "nop",
6385 						en_dis_int_for_sb
6386 						? (en_dis_int_for_sb ==
6387 						   1 ? "disable" : "nop") :
6388 						"enable",
6389 						segment ? "attn" : "regular",
6390 						timer_mask);
6391 				}
6392 			}
6393 		} else {
6394 			parsed_wr_data[0] = '\0';
6395 		}
6396 
6397 		/* Add parsed element to parsed buffer */
6398 		results_offset +=
6399 		    sprintf(qed_get_buf_ptr(results_buf,
6400 					    results_offset),
6401 			    "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n",
6402 			    elements[i].dword2, elements[i].dword1,
6403 			    elements[i].dword0,
6404 			    is_pf ? "pf" : "vf",
6405 			    GET_FIELD(elements[i].dword0,
6406 				      IGU_FIFO_ELEMENT_DWORD0_FID),
6407 			    s_igu_fifo_source_strs[source],
6408 			    is_wr_cmd ? "wr" : "rd", cmd_addr,
6409 			    (!is_pf && addr_data->vf_desc)
6410 			    ? addr_data->vf_desc : addr_data->desc,
6411 			    parsed_addr_data, parsed_wr_data,
6412 			    s_igu_fifo_error_strs[err_type]);
6413 	}
6414 
6415 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6416 						  results_offset),
6417 				  "fifo contained %d elements", num_elements);
6418 
6419 	/* Add 1 for string NULL termination */
6420 	*parsed_results_bytes = results_offset + 1;
6421 	return DBG_STATUS_OK;
6422 }
6423 
6424 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
6425 						  u32 *dump_buf,
6426 						  u32 num_dumped_dwords,
6427 						  u32 *results_buf_size)
6428 {
6429 	return qed_parse_igu_fifo_dump(p_hwfn,
6430 				       dump_buf,
6431 				       num_dumped_dwords,
6432 				       NULL, results_buf_size);
6433 }
6434 
6435 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
6436 					   u32 *dump_buf,
6437 					   u32 num_dumped_dwords,
6438 					   char *results_buf)
6439 {
6440 	u32 parsed_buf_size;
6441 
6442 	return qed_parse_igu_fifo_dump(p_hwfn,
6443 				       dump_buf,
6444 				       num_dumped_dwords,
6445 				       results_buf, &parsed_buf_size);
6446 }
6447 
6448 static enum dbg_status
6449 qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
6450 				   u32 *dump_buf,
6451 				   u32 num_dumped_dwords,
6452 				   char *results_buf,
6453 				   u32 *parsed_results_bytes)
6454 {
6455 	u32 results_offset = 0, param_num_val, num_section_params, num_elements;
6456 	const char *section_name, *param_name, *param_str_val;
6457 	struct protection_override_element *elements;
6458 	u8 i;
6459 
6460 	/* Read global_params section */
6461 	dump_buf += qed_read_section_hdr(dump_buf,
6462 					 &section_name, &num_section_params);
6463 	if (strcmp(section_name, "global_params"))
6464 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6465 
6466 	/* Print global params */
6467 	dump_buf += qed_print_section_params(dump_buf,
6468 					     num_section_params,
6469 					     results_buf, &results_offset);
6470 
6471 	/* Read protection_override_data section */
6472 	dump_buf += qed_read_section_hdr(dump_buf,
6473 					 &section_name, &num_section_params);
6474 	if (strcmp(section_name, "protection_override_data"))
6475 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6476 	dump_buf += qed_read_param(dump_buf,
6477 				   &param_name, &param_str_val, &param_num_val);
6478 	if (strcmp(param_name, "size"))
6479 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6480 	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS != 0)
6481 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6482 	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
6483 	elements = (struct protection_override_element *)dump_buf;
6484 
6485 	/* Decode elements */
6486 	for (i = 0; i < num_elements; i++) {
6487 		u32 address = GET_FIELD(elements[i].data,
6488 					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
6489 					PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
6490 
6491 		results_offset +=
6492 		    sprintf(qed_get_buf_ptr(results_buf,
6493 					    results_offset),
6494 			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
6495 			    i, address,
6496 			    (u32)GET_FIELD(elements[i].data,
6497 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
6498 			    (u32)GET_FIELD(elements[i].data,
6499 				      PROTECTION_OVERRIDE_ELEMENT_READ),
6500 			    (u32)GET_FIELD(elements[i].data,
6501 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
6502 			    s_protection_strs[GET_FIELD(elements[i].data,
6503 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
6504 			    s_protection_strs[GET_FIELD(elements[i].data,
6505 				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
6506 	}
6507 
6508 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6509 						  results_offset),
6510 				  "protection override contained %d elements",
6511 				  num_elements);
6512 
6513 	/* Add 1 for string NULL termination */
6514 	*parsed_results_bytes = results_offset + 1;
6515 	return DBG_STATUS_OK;
6516 }
6517 
6518 enum dbg_status
6519 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
6520 					     u32 *dump_buf,
6521 					     u32 num_dumped_dwords,
6522 					     u32 *results_buf_size)
6523 {
6524 	return qed_parse_protection_override_dump(p_hwfn,
6525 						  dump_buf,
6526 						  num_dumped_dwords,
6527 						  NULL, results_buf_size);
6528 }
6529 
6530 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
6531 						      u32 *dump_buf,
6532 						      u32 num_dumped_dwords,
6533 						      char *results_buf)
6534 {
6535 	u32 parsed_buf_size;
6536 
6537 	return qed_parse_protection_override_dump(p_hwfn,
6538 						  dump_buf,
6539 						  num_dumped_dwords,
6540 						  results_buf,
6541 						  &parsed_buf_size);
6542 }
6543 
6544 /* Parses a FW Asserts dump buffer.
6545  * If result_buf is not NULL, the FW Asserts results are printed to it.
6546  * In any case, the required results buffer size is assigned to
6547  * parsed_results_bytes.
6548  * The parsing status is returned.
6549  */
6550 static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
6551 						 u32 *dump_buf,
6552 						 u32 num_dumped_dwords,
6553 						 char *results_buf,
6554 						 u32 *parsed_results_bytes)
6555 {
6556 	u32 results_offset = 0, num_section_params, param_num_val, i;
6557 	const char *param_name, *param_str_val, *section_name;
6558 	bool last_section_found = false;
6559 
6560 	*parsed_results_bytes = 0;
6561 
6562 	/* Read global_params section */
6563 	dump_buf += qed_read_section_hdr(dump_buf,
6564 					 &section_name, &num_section_params);
6565 	if (strcmp(section_name, "global_params"))
6566 		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
6567 
6568 	/* Print global params */
6569 	dump_buf += qed_print_section_params(dump_buf,
6570 					     num_section_params,
6571 					     results_buf, &results_offset);
6572 	while (!last_section_found) {
6573 		const char *storm_letter = NULL;
6574 		u32 storm_dump_size = 0;
6575 
6576 		dump_buf += qed_read_section_hdr(dump_buf,
6577 						 &section_name,
6578 						 &num_section_params);
6579 		if (!strcmp(section_name, "last")) {
6580 			last_section_found = true;
6581 			continue;
6582 		} else if (strcmp(section_name, "fw_asserts")) {
6583 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
6584 		}
6585 
6586 		/* Extract params */
6587 		for (i = 0; i < num_section_params; i++) {
6588 			dump_buf += qed_read_param(dump_buf,
6589 						   &param_name,
6590 						   &param_str_val,
6591 						   &param_num_val);
6592 			if (!strcmp(param_name, "storm"))
6593 				storm_letter = param_str_val;
6594 			else if (!strcmp(param_name, "size"))
6595 				storm_dump_size = param_num_val;
6596 			else
6597 				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
6598 		}
6599 
6600 		if (!storm_letter || !storm_dump_size)
6601 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
6602 
6603 		/* Print data */
6604 		results_offset += sprintf(qed_get_buf_ptr(results_buf,
6605 							  results_offset),
6606 					  "\n%sSTORM_ASSERT: size=%d\n",
6607 					  storm_letter, storm_dump_size);
6608 		for (i = 0; i < storm_dump_size; i++, dump_buf++)
6609 			results_offset +=
6610 			    sprintf(qed_get_buf_ptr(results_buf,
6611 						    results_offset),
6612 				    "%08x\n", *dump_buf);
6613 	}
6614 
6615 	/* Add 1 for string NULL termination */
6616 	*parsed_results_bytes = results_offset + 1;
6617 	return DBG_STATUS_OK;
6618 }
6619 
6620 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
6621 						    u32 *dump_buf,
6622 						    u32 num_dumped_dwords,
6623 						    u32 *results_buf_size)
6624 {
6625 	return qed_parse_fw_asserts_dump(p_hwfn,
6626 					 dump_buf,
6627 					 num_dumped_dwords,
6628 					 NULL, results_buf_size);
6629 }
6630 
6631 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
6632 					     u32 *dump_buf,
6633 					     u32 num_dumped_dwords,
6634 					     char *results_buf)
6635 {
6636 	u32 parsed_buf_size;
6637 
6638 	return qed_parse_fw_asserts_dump(p_hwfn,
6639 					 dump_buf,
6640 					 num_dumped_dwords,
6641 					 results_buf, &parsed_buf_size);
6642 }
6643 
6644 /* Wrapper for unifying the idle_chk and mcp_trace api */
6645 static enum dbg_status
6646 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
6647 				   u32 *dump_buf,
6648 				   u32 num_dumped_dwords,
6649 				   char *results_buf)
6650 {
6651 	u32 num_errors, num_warnnings;
6652 
6653 	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
6654 					  results_buf, &num_errors,
6655 					  &num_warnnings);
6656 }
6657 
6658 /* Feature meta data lookup table */
6659 static struct {
6660 	char *name;
6661 	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
6662 				    struct qed_ptt *p_ptt, u32 *size);
6663 	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
6664 					struct qed_ptt *p_ptt, u32 *dump_buf,
6665 					u32 buf_size, u32 *dumped_dwords);
6666 	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
6667 					 u32 *dump_buf, u32 num_dumped_dwords,
6668 					 char *results_buf);
6669 	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
6670 					    u32 *dump_buf,
6671 					    u32 num_dumped_dwords,
6672 					    u32 *results_buf_size);
6673 } qed_features_lookup[] = {
6674 	{
6675 	"grc", qed_dbg_grc_get_dump_buf_size,
6676 		    qed_dbg_grc_dump, NULL, NULL}, {
6677 	"idle_chk",
6678 		    qed_dbg_idle_chk_get_dump_buf_size,
6679 		    qed_dbg_idle_chk_dump,
6680 		    qed_print_idle_chk_results_wrapper,
6681 		    qed_get_idle_chk_results_buf_size}, {
6682 	"mcp_trace",
6683 		    qed_dbg_mcp_trace_get_dump_buf_size,
6684 		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
6685 		    qed_get_mcp_trace_results_buf_size}, {
6686 	"reg_fifo",
6687 		    qed_dbg_reg_fifo_get_dump_buf_size,
6688 		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
6689 		    qed_get_reg_fifo_results_buf_size}, {
6690 	"igu_fifo",
6691 		    qed_dbg_igu_fifo_get_dump_buf_size,
6692 		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
6693 		    qed_get_igu_fifo_results_buf_size}, {
6694 	"protection_override",
6695 		    qed_dbg_protection_override_get_dump_buf_size,
6696 		    qed_dbg_protection_override_dump,
6697 		    qed_print_protection_override_results,
6698 		    qed_get_protection_override_results_buf_size}, {
6699 	"fw_asserts",
6700 		    qed_dbg_fw_asserts_get_dump_buf_size,
6701 		    qed_dbg_fw_asserts_dump,
6702 		    qed_print_fw_asserts_results,
6703 		    qed_get_fw_asserts_results_buf_size},};
6704 
6705 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
6706 {
6707 	u32 i, precision = 80;
6708 
6709 	if (!p_text_buf)
6710 		return;
6711 
6712 	pr_notice("\n%.*s", precision, p_text_buf);
6713 	for (i = precision; i < text_size; i += precision)
6714 		pr_cont("%.*s", precision, p_text_buf + i);
6715 	pr_cont("\n");
6716 }
6717 
6718 #define QED_RESULTS_BUF_MIN_SIZE 16
6719 /* Generic function for decoding debug feature info */
6720 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
6721 				      enum qed_dbg_features feature_idx)
6722 {
6723 	struct qed_dbg_feature *feature =
6724 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
6725 	u32 text_size_bytes, null_char_pos, i;
6726 	enum dbg_status rc;
6727 	char *text_buf;
6728 
6729 	/* Check if feature supports formatting capability */
6730 	if (!qed_features_lookup[feature_idx].results_buf_size)
6731 		return DBG_STATUS_OK;
6732 
6733 	/* Obtain size of formatted output */
6734 	rc = qed_features_lookup[feature_idx].
6735 		results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
6736 				 feature->dumped_dwords, &text_size_bytes);
6737 	if (rc != DBG_STATUS_OK)
6738 		return rc;
6739 
6740 	/* Make sure that the allocated size is a multiple of dword (4 bytes) */
6741 	null_char_pos = text_size_bytes - 1;
6742 	text_size_bytes = (text_size_bytes + 3) & ~0x3;
6743 
6744 	if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
6745 		DP_NOTICE(p_hwfn->cdev,
6746 			  "formatted size of feature was too small %d. Aborting\n",
6747 			  text_size_bytes);
6748 		return DBG_STATUS_INVALID_ARGS;
6749 	}
6750 
6751 	/* Allocate temp text buf */
6752 	text_buf = vzalloc(text_size_bytes);
6753 	if (!text_buf)
6754 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6755 
6756 	/* Decode feature opcodes to string on temp buf */
6757 	rc = qed_features_lookup[feature_idx].
6758 		print_results(p_hwfn, (u32 *)feature->dump_buf,
6759 			      feature->dumped_dwords, text_buf);
6760 	if (rc != DBG_STATUS_OK) {
6761 		vfree(text_buf);
6762 		return rc;
6763 	}
6764 
6765 	/* Replace the original null character with a '\n' character.
6766 	 * The bytes that were added as a result of the dword alignment are also
6767 	 * padded with '\n' characters.
6768 	 */
6769 	for (i = null_char_pos; i < text_size_bytes; i++)
6770 		text_buf[i] = '\n';
6771 
6772 	/* Dump printable feature to log */
6773 	if (p_hwfn->cdev->dbg_params.print_data)
6774 		qed_dbg_print_feature(text_buf, text_size_bytes);
6775 
6776 	/* Free the old dump_buf and point the dump_buf to the newly allocagted
6777 	 * and formatted text buffer.
6778 	 */
6779 	vfree(feature->dump_buf);
6780 	feature->dump_buf = text_buf;
6781 	feature->buf_size = text_size_bytes;
6782 	feature->dumped_dwords = text_size_bytes / 4;
6783 	return rc;
6784 }
6785 
6786 /* Generic function for performing the dump of a debug feature. */
6787 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
6788 				    struct qed_ptt *p_ptt,
6789 				    enum qed_dbg_features feature_idx)
6790 {
6791 	struct qed_dbg_feature *feature =
6792 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
6793 	u32 buf_size_dwords;
6794 	enum dbg_status rc;
6795 
6796 	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
6797 		  qed_features_lookup[feature_idx].name);
6798 
6799 	/* Dump_buf was already allocated need to free (this can happen if dump
6800 	 * was called but file was never read).
6801 	 * We can't use the buffer as is since size may have changed.
6802 	 */
6803 	if (feature->dump_buf) {
6804 		vfree(feature->dump_buf);
6805 		feature->dump_buf = NULL;
6806 	}
6807 
6808 	/* Get buffer size from hsi, allocate accordingly, and perform the
6809 	 * dump.
6810 	 */
6811 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
6812 						       &buf_size_dwords);
6813 	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6814 		return rc;
6815 	feature->buf_size = buf_size_dwords * sizeof(u32);
6816 	feature->dump_buf = vmalloc(feature->buf_size);
6817 	if (!feature->dump_buf)
6818 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6819 
6820 	rc = qed_features_lookup[feature_idx].
6821 		perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
6822 			     feature->buf_size / sizeof(u32),
6823 			     &feature->dumped_dwords);
6824 
6825 	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
6826 	 * In this case the buffer holds valid binary data, but we wont able
6827 	 * to parse it (since parsing relies on data in NVRAM which is only
6828 	 * accessible when MFW is responsive). skip the formatting but return
6829 	 * success so that binary data is provided.
6830 	 */
6831 	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
6832 		return DBG_STATUS_OK;
6833 
6834 	if (rc != DBG_STATUS_OK)
6835 		return rc;
6836 
6837 	/* Format output */
6838 	rc = format_feature(p_hwfn, feature_idx);
6839 	return rc;
6840 }
6841 
6842 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
6843 {
6844 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
6845 }
6846 
6847 int qed_dbg_grc_size(struct qed_dev *cdev)
6848 {
6849 	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
6850 }
6851 
6852 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
6853 {
6854 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
6855 			       num_dumped_bytes);
6856 }
6857 
6858 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
6859 {
6860 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
6861 }
6862 
6863 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
6864 {
6865 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
6866 			       num_dumped_bytes);
6867 }
6868 
6869 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
6870 {
6871 	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
6872 }
6873 
6874 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
6875 {
6876 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
6877 			       num_dumped_bytes);
6878 }
6879 
6880 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
6881 {
6882 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
6883 }
6884 
6885 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
6886 				u32 *num_dumped_bytes)
6887 {
6888 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
6889 			       num_dumped_bytes);
6890 }
6891 
6892 int qed_dbg_protection_override_size(struct qed_dev *cdev)
6893 {
6894 	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
6895 }
6896 
6897 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
6898 		       u32 *num_dumped_bytes)
6899 {
6900 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
6901 			       num_dumped_bytes);
6902 }
6903 
6904 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
6905 {
6906 	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
6907 }
6908 
6909 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
6910 		      u32 *num_dumped_bytes)
6911 {
6912 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
6913 			       num_dumped_bytes);
6914 }
6915 
6916 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
6917 {
6918 	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
6919 }
6920 
6921 /* Defines the amount of bytes allocated for recording the length of debugfs
6922  * feature buffer.
6923  */
6924 #define REGDUMP_HEADER_SIZE			sizeof(u32)
6925 #define REGDUMP_HEADER_FEATURE_SHIFT		24
6926 #define REGDUMP_HEADER_ENGINE_SHIFT		31
6927 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
6928 enum debug_print_features {
6929 	OLD_MODE = 0,
6930 	IDLE_CHK = 1,
6931 	GRC_DUMP = 2,
6932 	MCP_TRACE = 3,
6933 	REG_FIFO = 4,
6934 	PROTECTION_OVERRIDE = 5,
6935 	IGU_FIFO = 6,
6936 	PHY = 7,
6937 	FW_ASSERTS = 8,
6938 };
6939 
6940 static u32 qed_calc_regdump_header(enum debug_print_features feature,
6941 				   int engine, u32 feature_size, u8 omit_engine)
6942 {
6943 	/* Insert the engine, feature and mode inside the header and combine it
6944 	 * with feature size.
6945 	 */
6946 	return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
6947 	       (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
6948 	       (engine << REGDUMP_HEADER_ENGINE_SHIFT);
6949 }
6950 
6951 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
6952 {
6953 	u8 cur_engine, omit_engine = 0, org_engine;
6954 	u32 offset = 0, feature_size;
6955 	int rc;
6956 
6957 	if (cdev->num_hwfns == 1)
6958 		omit_engine = 1;
6959 
6960 	org_engine = qed_get_debug_engine(cdev);
6961 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
6962 		/* Collect idle_chks and grcDump for each hw function */
6963 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
6964 			   "obtaining idle_chk and grcdump for current engine\n");
6965 		qed_set_debug_engine(cdev, cur_engine);
6966 
6967 		/* First idle_chk */
6968 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
6969 				      REGDUMP_HEADER_SIZE, &feature_size);
6970 		if (!rc) {
6971 			*(u32 *)((u8 *)buffer + offset) =
6972 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
6973 						    feature_size, omit_engine);
6974 			offset += (feature_size + REGDUMP_HEADER_SIZE);
6975 		} else {
6976 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
6977 		}
6978 
6979 		/* Second idle_chk */
6980 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
6981 				      REGDUMP_HEADER_SIZE, &feature_size);
6982 		if (!rc) {
6983 			*(u32 *)((u8 *)buffer + offset) =
6984 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
6985 						    feature_size, omit_engine);
6986 			offset += (feature_size + REGDUMP_HEADER_SIZE);
6987 		} else {
6988 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
6989 		}
6990 
6991 		/* reg_fifo dump */
6992 		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
6993 				      REGDUMP_HEADER_SIZE, &feature_size);
6994 		if (!rc) {
6995 			*(u32 *)((u8 *)buffer + offset) =
6996 			    qed_calc_regdump_header(REG_FIFO, cur_engine,
6997 						    feature_size, omit_engine);
6998 			offset += (feature_size + REGDUMP_HEADER_SIZE);
6999 		} else {
7000 			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7001 		}
7002 
7003 		/* igu_fifo dump */
7004 		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
7005 				      REGDUMP_HEADER_SIZE, &feature_size);
7006 		if (!rc) {
7007 			*(u32 *)((u8 *)buffer + offset) =
7008 			    qed_calc_regdump_header(IGU_FIFO, cur_engine,
7009 						    feature_size, omit_engine);
7010 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7011 		} else {
7012 			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7013 		}
7014 
7015 		/* protection_override dump */
7016 		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
7017 						 REGDUMP_HEADER_SIZE,
7018 						 &feature_size);
7019 		if (!rc) {
7020 			*(u32 *)((u8 *)buffer + offset) =
7021 			    qed_calc_regdump_header(PROTECTION_OVERRIDE,
7022 						    cur_engine,
7023 						    feature_size, omit_engine);
7024 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7025 		} else {
7026 			DP_ERR(cdev,
7027 			       "qed_dbg_protection_override failed. rc = %d\n",
7028 			       rc);
7029 		}
7030 
7031 		/* fw_asserts dump */
7032 		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
7033 					REGDUMP_HEADER_SIZE, &feature_size);
7034 		if (!rc) {
7035 			*(u32 *)((u8 *)buffer + offset) =
7036 			    qed_calc_regdump_header(FW_ASSERTS, cur_engine,
7037 						    feature_size, omit_engine);
7038 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7039 		} else {
7040 			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
7041 			       rc);
7042 		}
7043 
7044 		/* GRC dump - must be last because when mcp stuck it will
7045 		 * clutter idle_chk, reg_fifo, ...
7046 		 */
7047 		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
7048 				 REGDUMP_HEADER_SIZE, &feature_size);
7049 		if (!rc) {
7050 			*(u32 *)((u8 *)buffer + offset) =
7051 			    qed_calc_regdump_header(GRC_DUMP, cur_engine,
7052 						    feature_size, omit_engine);
7053 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7054 		} else {
7055 			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
7056 		}
7057 	}
7058 
7059 	/* mcp_trace */
7060 	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
7061 			       REGDUMP_HEADER_SIZE, &feature_size);
7062 	if (!rc) {
7063 		*(u32 *)((u8 *)buffer + offset) =
7064 		    qed_calc_regdump_header(MCP_TRACE, cur_engine,
7065 					    feature_size, omit_engine);
7066 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7067 	} else {
7068 		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
7069 	}
7070 
7071 	qed_set_debug_engine(cdev, org_engine);
7072 
7073 	return 0;
7074 }
7075 
7076 int qed_dbg_all_data_size(struct qed_dev *cdev)
7077 {
7078 	u8 cur_engine, org_engine;
7079 	u32 regs_len = 0;
7080 
7081 	org_engine = qed_get_debug_engine(cdev);
7082 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7083 		/* Engine specific */
7084 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
7085 			   "calculating idle_chk and grcdump register length for current engine\n");
7086 		qed_set_debug_engine(cdev, cur_engine);
7087 		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
7088 			    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
7089 			    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
7090 			    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
7091 			    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
7092 			    REGDUMP_HEADER_SIZE +
7093 			    qed_dbg_protection_override_size(cdev) +
7094 			    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
7095 	}
7096 
7097 	/* Engine common */
7098 	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
7099 	qed_set_debug_engine(cdev, org_engine);
7100 
7101 	return regs_len;
7102 }
7103 
7104 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
7105 		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
7106 {
7107 	struct qed_hwfn *p_hwfn =
7108 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
7109 	struct qed_dbg_feature *qed_feature =
7110 		&cdev->dbg_params.features[feature];
7111 	enum dbg_status dbg_rc;
7112 	struct qed_ptt *p_ptt;
7113 	int rc = 0;
7114 
7115 	/* Acquire ptt */
7116 	p_ptt = qed_ptt_acquire(p_hwfn);
7117 	if (!p_ptt)
7118 		return -EINVAL;
7119 
7120 	/* Get dump */
7121 	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
7122 	if (dbg_rc != DBG_STATUS_OK) {
7123 		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
7124 			   qed_dbg_get_status_str(dbg_rc));
7125 		*num_dumped_bytes = 0;
7126 		rc = -EINVAL;
7127 		goto out;
7128 	}
7129 
7130 	DP_VERBOSE(cdev, QED_MSG_DEBUG,
7131 		   "copying debugfs feature to external buffer\n");
7132 	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
7133 	*num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
7134 			    4;
7135 
7136 out:
7137 	qed_ptt_release(p_hwfn, p_ptt);
7138 	return rc;
7139 }
7140 
7141 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
7142 {
7143 	struct qed_hwfn *p_hwfn =
7144 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
7145 	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
7146 	struct qed_dbg_feature *qed_feature =
7147 		&cdev->dbg_params.features[feature];
7148 	u32 buf_size_dwords;
7149 	enum dbg_status rc;
7150 
7151 	if (!p_ptt)
7152 		return -EINVAL;
7153 
7154 	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
7155 						   &buf_size_dwords);
7156 	if (rc != DBG_STATUS_OK)
7157 		buf_size_dwords = 0;
7158 
7159 	qed_ptt_release(p_hwfn, p_ptt);
7160 	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
7161 	return qed_feature->buf_size;
7162 }
7163 
7164 u8 qed_get_debug_engine(struct qed_dev *cdev)
7165 {
7166 	return cdev->dbg_params.engine_for_debug;
7167 }
7168 
7169 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
7170 {
7171 	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
7172 		   engine_number);
7173 	cdev->dbg_params.engine_for_debug = engine_number;
7174 }
7175 
7176 void qed_dbg_pf_init(struct qed_dev *cdev)
7177 {
7178 	const u8 *dbg_values;
7179 
7180 	/* Debug values are after init values.
7181 	 * The offset is the first dword of the file.
7182 	 */
7183 	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
7184 	qed_dbg_set_bin_ptr((u8 *)dbg_values);
7185 	qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
7186 }
7187 
7188 void qed_dbg_pf_exit(struct qed_dev *cdev)
7189 {
7190 	struct qed_dbg_feature *feature = NULL;
7191 	enum qed_dbg_features feature_idx;
7192 
7193 	/* Debug features' buffers may be allocated if debug feature was used
7194 	 * but dump wasn't called.
7195 	 */
7196 	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
7197 		feature = &cdev->dbg_params.features[feature_idx];
7198 		if (feature->dump_buf) {
7199 			vfree(feature->dump_buf);
7200 			feature->dump_buf = NULL;
7201 		}
7202 	}
7203 }
7204