1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/vmalloc.h>
11 #include <linux/crc32.h>
12 #include "qed.h"
13 #include "qed_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_mcp.h"
16 #include "qed_reg_addr.h"
17 
18 /* Memory groups enum */
19 enum mem_groups {
20 	MEM_GROUP_PXP_MEM,
21 	MEM_GROUP_DMAE_MEM,
22 	MEM_GROUP_CM_MEM,
23 	MEM_GROUP_QM_MEM,
24 	MEM_GROUP_DORQ_MEM,
25 	MEM_GROUP_BRB_RAM,
26 	MEM_GROUP_BRB_MEM,
27 	MEM_GROUP_PRS_MEM,
28 	MEM_GROUP_IOR,
29 	MEM_GROUP_BTB_RAM,
30 	MEM_GROUP_CONN_CFC_MEM,
31 	MEM_GROUP_TASK_CFC_MEM,
32 	MEM_GROUP_CAU_PI,
33 	MEM_GROUP_CAU_MEM,
34 	MEM_GROUP_PXP_ILT,
35 	MEM_GROUP_TM_MEM,
36 	MEM_GROUP_SDM_MEM,
37 	MEM_GROUP_PBUF,
38 	MEM_GROUP_RAM,
39 	MEM_GROUP_MULD_MEM,
40 	MEM_GROUP_BTB_MEM,
41 	MEM_GROUP_RDIF_CTX,
42 	MEM_GROUP_TDIF_CTX,
43 	MEM_GROUP_CFC_MEM,
44 	MEM_GROUP_IGU_MEM,
45 	MEM_GROUP_IGU_MSIX,
46 	MEM_GROUP_CAU_SB,
47 	MEM_GROUP_BMB_RAM,
48 	MEM_GROUP_BMB_MEM,
49 	MEM_GROUPS_NUM
50 };
51 
52 /* Memory groups names */
53 static const char * const s_mem_group_names[] = {
54 	"PXP_MEM",
55 	"DMAE_MEM",
56 	"CM_MEM",
57 	"QM_MEM",
58 	"DORQ_MEM",
59 	"BRB_RAM",
60 	"BRB_MEM",
61 	"PRS_MEM",
62 	"IOR",
63 	"BTB_RAM",
64 	"CONN_CFC_MEM",
65 	"TASK_CFC_MEM",
66 	"CAU_PI",
67 	"CAU_MEM",
68 	"PXP_ILT",
69 	"TM_MEM",
70 	"SDM_MEM",
71 	"PBUF",
72 	"RAM",
73 	"MULD_MEM",
74 	"BTB_MEM",
75 	"RDIF_CTX",
76 	"TDIF_CTX",
77 	"CFC_MEM",
78 	"IGU_MEM",
79 	"IGU_MSIX",
80 	"CAU_SB",
81 	"BMB_RAM",
82 	"BMB_MEM",
83 };
84 
85 /* Idle check conditions */
86 
87 static u32 cond5(const u32 *r, const u32 *imm)
88 {
89 	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
90 }
91 
92 static u32 cond7(const u32 *r, const u32 *imm)
93 {
94 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
95 }
96 
97 static u32 cond6(const u32 *r, const u32 *imm)
98 {
99 	return (r[0] & imm[0]) != imm[1];
100 }
101 
102 static u32 cond9(const u32 *r, const u32 *imm)
103 {
104 	return ((r[0] & imm[0]) >> imm[1]) !=
105 	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
106 }
107 
108 static u32 cond10(const u32 *r, const u32 *imm)
109 {
110 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
111 }
112 
113 static u32 cond4(const u32 *r, const u32 *imm)
114 {
115 	return (r[0] & ~imm[0]) != imm[1];
116 }
117 
118 static u32 cond0(const u32 *r, const u32 *imm)
119 {
120 	return (r[0] & ~r[1]) != imm[0];
121 }
122 
123 static u32 cond1(const u32 *r, const u32 *imm)
124 {
125 	return r[0] != imm[0];
126 }
127 
128 static u32 cond11(const u32 *r, const u32 *imm)
129 {
130 	return r[0] != r[1] && r[2] == imm[0];
131 }
132 
133 static u32 cond12(const u32 *r, const u32 *imm)
134 {
135 	return r[0] != r[1] && r[2] > imm[0];
136 }
137 
138 static u32 cond3(const u32 *r, const u32 *imm)
139 {
140 	return r[0] != r[1];
141 }
142 
143 static u32 cond13(const u32 *r, const u32 *imm)
144 {
145 	return r[0] & imm[0];
146 }
147 
148 static u32 cond8(const u32 *r, const u32 *imm)
149 {
150 	return r[0] < (r[1] - imm[0]);
151 }
152 
153 static u32 cond2(const u32 *r, const u32 *imm)
154 {
155 	return r[0] > imm[0];
156 }
157 
158 /* Array of Idle Check conditions */
159 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
160 	cond0,
161 	cond1,
162 	cond2,
163 	cond3,
164 	cond4,
165 	cond5,
166 	cond6,
167 	cond7,
168 	cond8,
169 	cond9,
170 	cond10,
171 	cond11,
172 	cond12,
173 	cond13,
174 };
175 
176 /******************************* Data Types **********************************/
177 
178 enum platform_ids {
179 	PLATFORM_ASIC,
180 	PLATFORM_RESERVED,
181 	PLATFORM_RESERVED2,
182 	PLATFORM_RESERVED3,
183 	MAX_PLATFORM_IDS
184 };
185 
186 struct chip_platform_defs {
187 	u8 num_ports;
188 	u8 num_pfs;
189 	u8 num_vfs;
190 };
191 
192 /* Chip constant definitions */
193 struct chip_defs {
194 	const char *name;
195 	struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
196 };
197 
198 /* Platform constant definitions */
199 struct platform_defs {
200 	const char *name;
201 	u32 delay_factor;
202 	u32 dmae_thresh;
203 	u32 log_thresh;
204 };
205 
206 /* Storm constant definitions.
207  * Addresses are in bytes, sizes are in quad-regs.
208  */
209 struct storm_defs {
210 	char letter;
211 	enum block_id block_id;
212 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
213 	bool has_vfc;
214 	u32 sem_fast_mem_addr;
215 	u32 sem_frame_mode_addr;
216 	u32 sem_slow_enable_addr;
217 	u32 sem_slow_mode_addr;
218 	u32 sem_slow_mode1_conf_addr;
219 	u32 sem_sync_dbg_empty_addr;
220 	u32 sem_slow_dbg_empty_addr;
221 	u32 cm_ctx_wr_addr;
222 	u32 cm_conn_ag_ctx_lid_size;
223 	u32 cm_conn_ag_ctx_rd_addr;
224 	u32 cm_conn_st_ctx_lid_size;
225 	u32 cm_conn_st_ctx_rd_addr;
226 	u32 cm_task_ag_ctx_lid_size;
227 	u32 cm_task_ag_ctx_rd_addr;
228 	u32 cm_task_st_ctx_lid_size;
229 	u32 cm_task_st_ctx_rd_addr;
230 };
231 
232 /* Block constant definitions */
233 struct block_defs {
234 	const char *name;
235 	bool exists[MAX_CHIP_IDS];
236 	bool associated_to_storm;
237 
238 	/* Valid only if associated_to_storm is true */
239 	u32 storm_id;
240 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
241 	u32 dbg_select_addr;
242 	u32 dbg_enable_addr;
243 	u32 dbg_shift_addr;
244 	u32 dbg_force_valid_addr;
245 	u32 dbg_force_frame_addr;
246 	bool has_reset_bit;
247 
248 	/* If true, block is taken out of reset before dump */
249 	bool unreset;
250 	enum dbg_reset_regs reset_reg;
251 
252 	/* Bit offset in reset register */
253 	u8 reset_bit_offset;
254 };
255 
256 /* Reset register definitions */
257 struct reset_reg_defs {
258 	u32 addr;
259 	bool exists[MAX_CHIP_IDS];
260 	u32 unreset_val[MAX_CHIP_IDS];
261 };
262 
263 struct grc_param_defs {
264 	u32 default_val[MAX_CHIP_IDS];
265 	u32 min;
266 	u32 max;
267 	bool is_preset;
268 	u32 exclude_all_preset_val;
269 	u32 crash_preset_val;
270 };
271 
272 /* Address is in 128b units. Width is in bits. */
273 struct rss_mem_defs {
274 	const char *mem_name;
275 	const char *type_name;
276 	u32 addr;
277 	u32 entry_width;
278 	u32 num_entries[MAX_CHIP_IDS];
279 };
280 
281 struct vfc_ram_defs {
282 	const char *mem_name;
283 	const char *type_name;
284 	u32 base_row;
285 	u32 num_rows;
286 };
287 
288 struct big_ram_defs {
289 	const char *instance_name;
290 	enum mem_groups mem_group_id;
291 	enum mem_groups ram_mem_group_id;
292 	enum dbg_grc_params grc_param;
293 	u32 addr_reg_addr;
294 	u32 data_reg_addr;
295 	u32 is_256b_reg_addr;
296 	u32 is_256b_bit_offset[MAX_CHIP_IDS];
297 	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
298 };
299 
300 struct phy_defs {
301 	const char *phy_name;
302 
303 	/* PHY base GRC address */
304 	u32 base_addr;
305 
306 	/* Relative address of indirect TBUS address register (bits 0..7) */
307 	u32 tbus_addr_lo_addr;
308 
309 	/* Relative address of indirect TBUS address register (bits 8..10) */
310 	u32 tbus_addr_hi_addr;
311 
312 	/* Relative address of indirect TBUS data register (bits 0..7) */
313 	u32 tbus_data_lo_addr;
314 
315 	/* Relative address of indirect TBUS data register (bits 8..11) */
316 	u32 tbus_data_hi_addr;
317 };
318 
319 /******************************** Constants **********************************/
320 
321 #define MAX_LCIDS			320
322 #define MAX_LTIDS			320
323 
324 #define NUM_IOR_SETS			2
325 #define IORS_PER_SET			176
326 #define IOR_SET_OFFSET(set_id)		((set_id) * 256)
327 
328 #define BYTES_IN_DWORD			sizeof(u32)
329 
330 /* In the macros below, size and offset are specified in bits */
331 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
332 #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
333 #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
334 #define FIELD_DWORD_OFFSET(type, field) \
335 	 (int)(FIELD_BIT_OFFSET(type, field) / 32)
336 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
337 #define FIELD_BIT_MASK(type, field) \
338 	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
339 	 FIELD_DWORD_SHIFT(type, field))
340 
341 #define SET_VAR_FIELD(var, type, field, val) \
342 	do { \
343 		var[FIELD_DWORD_OFFSET(type, field)] &=	\
344 		(~FIELD_BIT_MASK(type, field));	\
345 		var[FIELD_DWORD_OFFSET(type, field)] |= \
346 		(val) << FIELD_DWORD_SHIFT(type, field); \
347 	} while (0)
348 
349 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
350 	do { \
351 		for (i = 0; i < (arr_size); i++) \
352 			qed_wr(dev, ptt, addr,	(arr)[i]); \
353 	} while (0)
354 
355 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
356 	do { \
357 		for (i = 0; i < (arr_size); i++) \
358 			(arr)[i] = qed_rd(dev, ptt, addr); \
359 	} while (0)
360 
361 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
362 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
363 
364 /* Extra lines include a signature line + optional latency events line */
365 #define NUM_EXTRA_DBG_LINES(block_desc) \
366 	(1 + ((block_desc)->has_latency_events ? 1 : 0))
367 #define NUM_DBG_LINES(block_desc) \
368 	((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
369 
370 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
371 #define RAM_LINES_TO_BYTES(lines) \
372 	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
373 
374 #define REG_DUMP_LEN_SHIFT		24
375 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
376 	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
377 
378 #define IDLE_CHK_RULE_SIZE_DWORDS \
379 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
380 
381 #define IDLE_CHK_RESULT_HDR_DWORDS \
382 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
383 
384 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
385 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
386 
387 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
388 
389 /* The sizes and offsets below are specified in bits */
390 #define VFC_CAM_CMD_STRUCT_SIZE		64
391 #define VFC_CAM_CMD_ROW_OFFSET		48
392 #define VFC_CAM_CMD_ROW_SIZE		9
393 #define VFC_CAM_ADDR_STRUCT_SIZE	16
394 #define VFC_CAM_ADDR_OP_OFFSET		0
395 #define VFC_CAM_ADDR_OP_SIZE		4
396 #define VFC_CAM_RESP_STRUCT_SIZE	256
397 #define VFC_RAM_ADDR_STRUCT_SIZE	16
398 #define VFC_RAM_ADDR_OP_OFFSET		0
399 #define VFC_RAM_ADDR_OP_SIZE		2
400 #define VFC_RAM_ADDR_ROW_OFFSET		2
401 #define VFC_RAM_ADDR_ROW_SIZE		10
402 #define VFC_RAM_RESP_STRUCT_SIZE	256
403 
404 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
405 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
406 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
407 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
408 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
409 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
410 
411 #define NUM_VFC_RAM_TYPES		4
412 
413 #define VFC_CAM_NUM_ROWS		512
414 
415 #define VFC_OPCODE_CAM_RD		14
416 #define VFC_OPCODE_RAM_RD		0
417 
418 #define NUM_RSS_MEM_TYPES		5
419 
420 #define NUM_BIG_RAM_TYPES		3
421 
422 #define NUM_PHY_TBUS_ADDRESSES		2048
423 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
424 
425 #define RESET_REG_UNRESET_OFFSET	4
426 
427 #define STALL_DELAY_MS			500
428 
429 #define STATIC_DEBUG_LINE_DWORDS	9
430 
431 #define NUM_COMMON_GLOBAL_PARAMS	8
432 
433 #define FW_IMG_MAIN			1
434 
435 #define REG_FIFO_ELEMENT_DWORDS		2
436 #define REG_FIFO_DEPTH_ELEMENTS		32
437 #define REG_FIFO_DEPTH_DWORDS \
438 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
439 
440 #define IGU_FIFO_ELEMENT_DWORDS		4
441 #define IGU_FIFO_DEPTH_ELEMENTS		64
442 #define IGU_FIFO_DEPTH_DWORDS \
443 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
444 
445 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
446 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
447 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
448 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
449 	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
450 
451 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
452 	(MCP_REG_SCRATCH + \
453 	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
454 
455 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
456 #define EMPTY_FW_IMAGE_STR		"???????????????"
457 
458 /***************************** Constant Arrays *******************************/
459 
460 struct dbg_array {
461 	const u32 *ptr;
462 	u32 size_in_dwords;
463 };
464 
465 /* Debug arrays */
466 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
467 
468 /* Chip constant definitions array */
469 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
470 	{ "bb",
471 	  {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB},
472 	   {0, 0, 0},
473 	   {0, 0, 0},
474 	   {0, 0, 0} } },
475 	{ "ah",
476 	  {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
477 	   {0, 0, 0},
478 	   {0, 0, 0},
479 	   {0, 0, 0} } },
480 	{ "reserved",
481 	   {{0, 0, 0},
482 	   {0, 0, 0},
483 	   {0, 0, 0},
484 	   {0, 0, 0} } }
485 };
486 
487 /* Storm constant definitions array */
488 static struct storm_defs s_storm_defs[] = {
489 	/* Tstorm */
490 	{'T', BLOCK_TSEM,
491 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
492 	  DBG_BUS_CLIENT_RBCT}, true,
493 	 TSEM_REG_FAST_MEMORY,
494 	 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
495 	 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
496 	 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
497 	 TCM_REG_CTX_RBC_ACCS,
498 	 4, TCM_REG_AGG_CON_CTX,
499 	 16, TCM_REG_SM_CON_CTX,
500 	 2, TCM_REG_AGG_TASK_CTX,
501 	 4, TCM_REG_SM_TASK_CTX},
502 
503 	/* Mstorm */
504 	{'M', BLOCK_MSEM,
505 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
506 	  DBG_BUS_CLIENT_RBCM}, false,
507 	 MSEM_REG_FAST_MEMORY,
508 	 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
509 	 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
510 	 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
511 	 MCM_REG_CTX_RBC_ACCS,
512 	 1, MCM_REG_AGG_CON_CTX,
513 	 10, MCM_REG_SM_CON_CTX,
514 	 2, MCM_REG_AGG_TASK_CTX,
515 	 7, MCM_REG_SM_TASK_CTX},
516 
517 	/* Ustorm */
518 	{'U', BLOCK_USEM,
519 	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
520 	  DBG_BUS_CLIENT_RBCU}, false,
521 	 USEM_REG_FAST_MEMORY,
522 	 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
523 	 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
524 	 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
525 	 UCM_REG_CTX_RBC_ACCS,
526 	 2, UCM_REG_AGG_CON_CTX,
527 	 13, UCM_REG_SM_CON_CTX,
528 	 3, UCM_REG_AGG_TASK_CTX,
529 	 3, UCM_REG_SM_TASK_CTX},
530 
531 	/* Xstorm */
532 	{'X', BLOCK_XSEM,
533 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
534 	  DBG_BUS_CLIENT_RBCX}, false,
535 	 XSEM_REG_FAST_MEMORY,
536 	 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
537 	 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
538 	 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
539 	 XCM_REG_CTX_RBC_ACCS,
540 	 9, XCM_REG_AGG_CON_CTX,
541 	 15, XCM_REG_SM_CON_CTX,
542 	 0, 0,
543 	 0, 0},
544 
545 	/* Ystorm */
546 	{'Y', BLOCK_YSEM,
547 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
548 	  DBG_BUS_CLIENT_RBCY}, false,
549 	 YSEM_REG_FAST_MEMORY,
550 	 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
551 	 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
552 	 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
553 	 YCM_REG_CTX_RBC_ACCS,
554 	 2, YCM_REG_AGG_CON_CTX,
555 	 3, YCM_REG_SM_CON_CTX,
556 	 2, YCM_REG_AGG_TASK_CTX,
557 	 12, YCM_REG_SM_TASK_CTX},
558 
559 	/* Pstorm */
560 	{'P', BLOCK_PSEM,
561 	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
562 	  DBG_BUS_CLIENT_RBCS}, true,
563 	 PSEM_REG_FAST_MEMORY,
564 	 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
565 	 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
566 	 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
567 	 PCM_REG_CTX_RBC_ACCS,
568 	 0, 0,
569 	 10, PCM_REG_SM_CON_CTX,
570 	 0, 0,
571 	 0, 0}
572 };
573 
574 /* Block definitions array */
575 
576 static struct block_defs block_grc_defs = {
577 	"grc",
578 	{true, true, true}, false, 0,
579 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
580 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
581 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
582 	GRC_REG_DBG_FORCE_FRAME,
583 	true, false, DBG_RESET_REG_MISC_PL_UA, 1
584 };
585 
586 static struct block_defs block_miscs_defs = {
587 	"miscs", {true, true, true}, false, 0,
588 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
589 	0, 0, 0, 0, 0,
590 	false, false, MAX_DBG_RESET_REGS, 0
591 };
592 
593 static struct block_defs block_misc_defs = {
594 	"misc", {true, true, true}, false, 0,
595 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
596 	0, 0, 0, 0, 0,
597 	false, false, MAX_DBG_RESET_REGS, 0
598 };
599 
600 static struct block_defs block_dbu_defs = {
601 	"dbu", {true, true, true}, false, 0,
602 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
603 	0, 0, 0, 0, 0,
604 	false, false, MAX_DBG_RESET_REGS, 0
605 };
606 
607 static struct block_defs block_pglue_b_defs = {
608 	"pglue_b",
609 	{true, true, true}, false, 0,
610 	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
611 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
612 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
613 	PGLUE_B_REG_DBG_FORCE_FRAME,
614 	true, false, DBG_RESET_REG_MISCS_PL_HV, 1
615 };
616 
617 static struct block_defs block_cnig_defs = {
618 	"cnig",
619 	{true, true, true}, false, 0,
620 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
621 	 DBG_BUS_CLIENT_RBCW},
622 	CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
623 	CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
624 	CNIG_REG_DBG_FORCE_FRAME_K2_E5,
625 	true, false, DBG_RESET_REG_MISCS_PL_HV, 0
626 };
627 
628 static struct block_defs block_cpmu_defs = {
629 	"cpmu", {true, true, true}, false, 0,
630 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
631 	0, 0, 0, 0, 0,
632 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8
633 };
634 
635 static struct block_defs block_ncsi_defs = {
636 	"ncsi",
637 	{true, true, true}, false, 0,
638 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
639 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
640 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
641 	NCSI_REG_DBG_FORCE_FRAME,
642 	true, false, DBG_RESET_REG_MISCS_PL_HV, 5
643 };
644 
645 static struct block_defs block_opte_defs = {
646 	"opte", {true, true, false}, false, 0,
647 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
648 	0, 0, 0, 0, 0,
649 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4
650 };
651 
652 static struct block_defs block_bmb_defs = {
653 	"bmb",
654 	{true, true, true}, false, 0,
655 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
656 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
657 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
658 	BMB_REG_DBG_FORCE_FRAME,
659 	true, false, DBG_RESET_REG_MISCS_PL_UA, 7
660 };
661 
662 static struct block_defs block_pcie_defs = {
663 	"pcie",
664 	{true, true, true}, false, 0,
665 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
666 	 DBG_BUS_CLIENT_RBCH},
667 	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
668 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
669 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
670 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
671 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
672 	false, false, MAX_DBG_RESET_REGS, 0
673 };
674 
675 static struct block_defs block_mcp_defs = {
676 	"mcp", {true, true, true}, false, 0,
677 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
678 	0, 0, 0, 0, 0,
679 	false, false, MAX_DBG_RESET_REGS, 0
680 };
681 
682 static struct block_defs block_mcp2_defs = {
683 	"mcp2",
684 	{true, true, true}, false, 0,
685 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
686 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
687 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
688 	MCP2_REG_DBG_FORCE_FRAME,
689 	false, false, MAX_DBG_RESET_REGS, 0
690 };
691 
692 static struct block_defs block_pswhst_defs = {
693 	"pswhst",
694 	{true, true, true}, false, 0,
695 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
696 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
697 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
698 	PSWHST_REG_DBG_FORCE_FRAME,
699 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
700 };
701 
702 static struct block_defs block_pswhst2_defs = {
703 	"pswhst2",
704 	{true, true, true}, false, 0,
705 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
706 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
707 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
708 	PSWHST2_REG_DBG_FORCE_FRAME,
709 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
710 };
711 
712 static struct block_defs block_pswrd_defs = {
713 	"pswrd",
714 	{true, true, true}, false, 0,
715 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
716 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
717 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
718 	PSWRD_REG_DBG_FORCE_FRAME,
719 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
720 };
721 
722 static struct block_defs block_pswrd2_defs = {
723 	"pswrd2",
724 	{true, true, true}, false, 0,
725 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
726 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
727 	PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
728 	PSWRD2_REG_DBG_FORCE_FRAME,
729 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
730 };
731 
732 static struct block_defs block_pswwr_defs = {
733 	"pswwr",
734 	{true, true, true}, false, 0,
735 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
736 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
737 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
738 	PSWWR_REG_DBG_FORCE_FRAME,
739 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
740 };
741 
742 static struct block_defs block_pswwr2_defs = {
743 	"pswwr2", {true, true, true}, false, 0,
744 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
745 	0, 0, 0, 0, 0,
746 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
747 };
748 
749 static struct block_defs block_pswrq_defs = {
750 	"pswrq",
751 	{true, true, true}, false, 0,
752 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
753 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
754 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
755 	PSWRQ_REG_DBG_FORCE_FRAME,
756 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
757 };
758 
759 static struct block_defs block_pswrq2_defs = {
760 	"pswrq2",
761 	{true, true, true}, false, 0,
762 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
763 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
764 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
765 	PSWRQ2_REG_DBG_FORCE_FRAME,
766 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
767 };
768 
769 static struct block_defs block_pglcs_defs = {
770 	"pglcs",
771 	{true, true, true}, false, 0,
772 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
773 	 DBG_BUS_CLIENT_RBCH},
774 	PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
775 	PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
776 	PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
777 	true, false, DBG_RESET_REG_MISCS_PL_HV, 2
778 };
779 
780 static struct block_defs block_ptu_defs = {
781 	"ptu",
782 	{true, true, true}, false, 0,
783 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
784 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
785 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
786 	PTU_REG_DBG_FORCE_FRAME,
787 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
788 };
789 
790 static struct block_defs block_dmae_defs = {
791 	"dmae",
792 	{true, true, true}, false, 0,
793 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
794 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
795 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
796 	DMAE_REG_DBG_FORCE_FRAME,
797 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
798 };
799 
800 static struct block_defs block_tcm_defs = {
801 	"tcm",
802 	{true, true, true}, true, DBG_TSTORM_ID,
803 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
804 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
805 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
806 	TCM_REG_DBG_FORCE_FRAME,
807 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
808 };
809 
810 static struct block_defs block_mcm_defs = {
811 	"mcm",
812 	{true, true, true}, true, DBG_MSTORM_ID,
813 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
814 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
815 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
816 	MCM_REG_DBG_FORCE_FRAME,
817 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
818 };
819 
820 static struct block_defs block_ucm_defs = {
821 	"ucm",
822 	{true, true, true}, true, DBG_USTORM_ID,
823 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
824 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
825 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
826 	UCM_REG_DBG_FORCE_FRAME,
827 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
828 };
829 
830 static struct block_defs block_xcm_defs = {
831 	"xcm",
832 	{true, true, true}, true, DBG_XSTORM_ID,
833 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
834 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
835 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
836 	XCM_REG_DBG_FORCE_FRAME,
837 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
838 };
839 
840 static struct block_defs block_ycm_defs = {
841 	"ycm",
842 	{true, true, true}, true, DBG_YSTORM_ID,
843 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
844 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
845 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
846 	YCM_REG_DBG_FORCE_FRAME,
847 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
848 };
849 
850 static struct block_defs block_pcm_defs = {
851 	"pcm",
852 	{true, true, true}, true, DBG_PSTORM_ID,
853 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
854 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
855 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
856 	PCM_REG_DBG_FORCE_FRAME,
857 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
858 };
859 
860 static struct block_defs block_qm_defs = {
861 	"qm",
862 	{true, true, true}, false, 0,
863 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
864 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
865 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
866 	QM_REG_DBG_FORCE_FRAME,
867 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
868 };
869 
870 static struct block_defs block_tm_defs = {
871 	"tm",
872 	{true, true, true}, false, 0,
873 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
874 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
875 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
876 	TM_REG_DBG_FORCE_FRAME,
877 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
878 };
879 
880 static struct block_defs block_dorq_defs = {
881 	"dorq",
882 	{true, true, true}, false, 0,
883 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
884 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
885 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
886 	DORQ_REG_DBG_FORCE_FRAME,
887 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
888 };
889 
890 static struct block_defs block_brb_defs = {
891 	"brb",
892 	{true, true, true}, false, 0,
893 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
894 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
895 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
896 	BRB_REG_DBG_FORCE_FRAME,
897 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
898 };
899 
900 static struct block_defs block_src_defs = {
901 	"src",
902 	{true, true, true}, false, 0,
903 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
904 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
905 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
906 	SRC_REG_DBG_FORCE_FRAME,
907 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
908 };
909 
910 static struct block_defs block_prs_defs = {
911 	"prs",
912 	{true, true, true}, false, 0,
913 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
914 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
915 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
916 	PRS_REG_DBG_FORCE_FRAME,
917 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
918 };
919 
920 static struct block_defs block_tsdm_defs = {
921 	"tsdm",
922 	{true, true, true}, true, DBG_TSTORM_ID,
923 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
924 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
925 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
926 	TSDM_REG_DBG_FORCE_FRAME,
927 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
928 };
929 
930 static struct block_defs block_msdm_defs = {
931 	"msdm",
932 	{true, true, true}, true, DBG_MSTORM_ID,
933 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
934 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
935 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
936 	MSDM_REG_DBG_FORCE_FRAME,
937 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
938 };
939 
940 static struct block_defs block_usdm_defs = {
941 	"usdm",
942 	{true, true, true}, true, DBG_USTORM_ID,
943 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
944 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
945 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
946 	USDM_REG_DBG_FORCE_FRAME,
947 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
948 };
949 
950 static struct block_defs block_xsdm_defs = {
951 	"xsdm",
952 	{true, true, true}, true, DBG_XSTORM_ID,
953 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
954 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
955 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
956 	XSDM_REG_DBG_FORCE_FRAME,
957 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
958 };
959 
960 static struct block_defs block_ysdm_defs = {
961 	"ysdm",
962 	{true, true, true}, true, DBG_YSTORM_ID,
963 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
964 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
965 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
966 	YSDM_REG_DBG_FORCE_FRAME,
967 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
968 };
969 
970 static struct block_defs block_psdm_defs = {
971 	"psdm",
972 	{true, true, true}, true, DBG_PSTORM_ID,
973 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
974 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
975 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
976 	PSDM_REG_DBG_FORCE_FRAME,
977 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
978 };
979 
980 static struct block_defs block_tsem_defs = {
981 	"tsem",
982 	{true, true, true}, true, DBG_TSTORM_ID,
983 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
984 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
985 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
986 	TSEM_REG_DBG_FORCE_FRAME,
987 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
988 };
989 
990 static struct block_defs block_msem_defs = {
991 	"msem",
992 	{true, true, true}, true, DBG_MSTORM_ID,
993 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
994 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
995 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
996 	MSEM_REG_DBG_FORCE_FRAME,
997 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
998 };
999 
1000 static struct block_defs block_usem_defs = {
1001 	"usem",
1002 	{true, true, true}, true, DBG_USTORM_ID,
1003 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1004 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1005 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1006 	USEM_REG_DBG_FORCE_FRAME,
1007 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
1008 };
1009 
1010 static struct block_defs block_xsem_defs = {
1011 	"xsem",
1012 	{true, true, true}, true, DBG_XSTORM_ID,
1013 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1014 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1015 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1016 	XSEM_REG_DBG_FORCE_FRAME,
1017 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
1018 };
1019 
1020 static struct block_defs block_ysem_defs = {
1021 	"ysem",
1022 	{true, true, true}, true, DBG_YSTORM_ID,
1023 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
1024 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1025 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1026 	YSEM_REG_DBG_FORCE_FRAME,
1027 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
1028 };
1029 
1030 static struct block_defs block_psem_defs = {
1031 	"psem",
1032 	{true, true, true}, true, DBG_PSTORM_ID,
1033 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1034 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1035 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1036 	PSEM_REG_DBG_FORCE_FRAME,
1037 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
1038 };
1039 
1040 static struct block_defs block_rss_defs = {
1041 	"rss",
1042 	{true, true, true}, false, 0,
1043 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
1044 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1045 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1046 	RSS_REG_DBG_FORCE_FRAME,
1047 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
1048 };
1049 
1050 static struct block_defs block_tmld_defs = {
1051 	"tmld",
1052 	{true, true, true}, false, 0,
1053 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1054 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1055 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1056 	TMLD_REG_DBG_FORCE_FRAME,
1057 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
1058 };
1059 
1060 static struct block_defs block_muld_defs = {
1061 	"muld",
1062 	{true, true, true}, false, 0,
1063 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1064 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1065 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1066 	MULD_REG_DBG_FORCE_FRAME,
1067 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
1068 };
1069 
1070 static struct block_defs block_yuld_defs = {
1071 	"yuld",
1072 	{true, true, false}, false, 0,
1073 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
1074 	 MAX_DBG_BUS_CLIENTS},
1075 	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1076 	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1077 	YULD_REG_DBG_FORCE_FRAME_BB_K2,
1078 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1079 	15
1080 };
1081 
1082 static struct block_defs block_xyld_defs = {
1083 	"xyld",
1084 	{true, true, true}, false, 0,
1085 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1086 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1087 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1088 	XYLD_REG_DBG_FORCE_FRAME,
1089 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1090 };
1091 
1092 static struct block_defs block_ptld_defs = {
1093 	"ptld",
1094 	{false, false, true}, false, 0,
1095 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
1096 	PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1097 	PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1098 	PTLD_REG_DBG_FORCE_FRAME_E5,
1099 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1100 	28
1101 };
1102 
1103 static struct block_defs block_ypld_defs = {
1104 	"ypld",
1105 	{false, false, true}, false, 0,
1106 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
1107 	YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1108 	YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1109 	YPLD_REG_DBG_FORCE_FRAME_E5,
1110 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1111 	27
1112 };
1113 
1114 static struct block_defs block_prm_defs = {
1115 	"prm",
1116 	{true, true, true}, false, 0,
1117 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1118 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1119 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1120 	PRM_REG_DBG_FORCE_FRAME,
1121 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1122 };
1123 
1124 static struct block_defs block_pbf_pb1_defs = {
1125 	"pbf_pb1",
1126 	{true, true, true}, false, 0,
1127 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1128 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1129 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1130 	PBF_PB1_REG_DBG_FORCE_FRAME,
1131 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1132 	11
1133 };
1134 
1135 static struct block_defs block_pbf_pb2_defs = {
1136 	"pbf_pb2",
1137 	{true, true, true}, false, 0,
1138 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1139 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1140 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1141 	PBF_PB2_REG_DBG_FORCE_FRAME,
1142 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1143 	12
1144 };
1145 
1146 static struct block_defs block_rpb_defs = {
1147 	"rpb",
1148 	{true, true, true}, false, 0,
1149 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1150 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1151 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1152 	RPB_REG_DBG_FORCE_FRAME,
1153 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1154 };
1155 
1156 static struct block_defs block_btb_defs = {
1157 	"btb",
1158 	{true, true, true}, false, 0,
1159 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1160 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1161 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1162 	BTB_REG_DBG_FORCE_FRAME,
1163 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1164 };
1165 
1166 static struct block_defs block_pbf_defs = {
1167 	"pbf",
1168 	{true, true, true}, false, 0,
1169 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1170 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1171 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1172 	PBF_REG_DBG_FORCE_FRAME,
1173 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1174 };
1175 
1176 static struct block_defs block_rdif_defs = {
1177 	"rdif",
1178 	{true, true, true}, false, 0,
1179 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1180 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1181 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1182 	RDIF_REG_DBG_FORCE_FRAME,
1183 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1184 };
1185 
1186 static struct block_defs block_tdif_defs = {
1187 	"tdif",
1188 	{true, true, true}, false, 0,
1189 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1190 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1191 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1192 	TDIF_REG_DBG_FORCE_FRAME,
1193 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1194 };
1195 
1196 static struct block_defs block_cdu_defs = {
1197 	"cdu",
1198 	{true, true, true}, false, 0,
1199 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1200 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1201 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1202 	CDU_REG_DBG_FORCE_FRAME,
1203 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1204 };
1205 
1206 static struct block_defs block_ccfc_defs = {
1207 	"ccfc",
1208 	{true, true, true}, false, 0,
1209 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1210 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1211 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1212 	CCFC_REG_DBG_FORCE_FRAME,
1213 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1214 };
1215 
1216 static struct block_defs block_tcfc_defs = {
1217 	"tcfc",
1218 	{true, true, true}, false, 0,
1219 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1220 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1221 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1222 	TCFC_REG_DBG_FORCE_FRAME,
1223 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1224 };
1225 
1226 static struct block_defs block_igu_defs = {
1227 	"igu",
1228 	{true, true, true}, false, 0,
1229 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1230 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1231 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1232 	IGU_REG_DBG_FORCE_FRAME,
1233 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1234 };
1235 
1236 static struct block_defs block_cau_defs = {
1237 	"cau",
1238 	{true, true, true}, false, 0,
1239 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1240 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1241 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1242 	CAU_REG_DBG_FORCE_FRAME,
1243 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1244 };
1245 
1246 static struct block_defs block_rgfs_defs = {
1247 	"rgfs", {false, false, true}, false, 0,
1248 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1249 	0, 0, 0, 0, 0,
1250 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
1251 };
1252 
1253 static struct block_defs block_rgsrc_defs = {
1254 	"rgsrc",
1255 	{false, false, true}, false, 0,
1256 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1257 	RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1258 	RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1259 	RGSRC_REG_DBG_FORCE_FRAME_E5,
1260 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1261 	30
1262 };
1263 
1264 static struct block_defs block_tgfs_defs = {
1265 	"tgfs", {false, false, true}, false, 0,
1266 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1267 	0, 0, 0, 0, 0,
1268 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
1269 };
1270 
1271 static struct block_defs block_tgsrc_defs = {
1272 	"tgsrc",
1273 	{false, false, true}, false, 0,
1274 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
1275 	TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1276 	TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1277 	TGSRC_REG_DBG_FORCE_FRAME_E5,
1278 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1279 	31
1280 };
1281 
1282 static struct block_defs block_umac_defs = {
1283 	"umac",
1284 	{true, true, true}, false, 0,
1285 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
1286 	 DBG_BUS_CLIENT_RBCZ},
1287 	UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1288 	UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1289 	UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1290 	true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1291 };
1292 
1293 static struct block_defs block_xmac_defs = {
1294 	"xmac", {true, false, false}, false, 0,
1295 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1296 	0, 0, 0, 0, 0,
1297 	false, false, MAX_DBG_RESET_REGS, 0
1298 };
1299 
1300 static struct block_defs block_dbg_defs = {
1301 	"dbg", {true, true, true}, false, 0,
1302 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1303 	0, 0, 0, 0, 0,
1304 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1305 };
1306 
1307 static struct block_defs block_nig_defs = {
1308 	"nig",
1309 	{true, true, true}, false, 0,
1310 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1311 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1312 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1313 	NIG_REG_DBG_FORCE_FRAME,
1314 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1315 };
1316 
1317 static struct block_defs block_wol_defs = {
1318 	"wol",
1319 	{false, true, true}, false, 0,
1320 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1321 	WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1322 	WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1323 	WOL_REG_DBG_FORCE_FRAME_K2_E5,
1324 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1325 };
1326 
1327 static struct block_defs block_bmbn_defs = {
1328 	"bmbn",
1329 	{false, true, true}, false, 0,
1330 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
1331 	 DBG_BUS_CLIENT_RBCB},
1332 	BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1333 	BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1334 	BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1335 	false, false, MAX_DBG_RESET_REGS, 0
1336 };
1337 
1338 static struct block_defs block_ipc_defs = {
1339 	"ipc", {true, true, true}, false, 0,
1340 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1341 	0, 0, 0, 0, 0,
1342 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1343 };
1344 
1345 static struct block_defs block_nwm_defs = {
1346 	"nwm",
1347 	{false, true, true}, false, 0,
1348 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1349 	NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1350 	NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1351 	NWM_REG_DBG_FORCE_FRAME_K2_E5,
1352 	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1353 };
1354 
1355 static struct block_defs block_nws_defs = {
1356 	"nws",
1357 	{false, true, true}, false, 0,
1358 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1359 	NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1360 	NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1361 	NWS_REG_DBG_FORCE_FRAME_K2_E5,
1362 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1363 };
1364 
1365 static struct block_defs block_ms_defs = {
1366 	"ms",
1367 	{false, true, true}, false, 0,
1368 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1369 	MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1370 	MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1371 	MS_REG_DBG_FORCE_FRAME_K2_E5,
1372 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1373 };
1374 
1375 static struct block_defs block_phy_pcie_defs = {
1376 	"phy_pcie",
1377 	{false, true, true}, false, 0,
1378 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
1379 	 DBG_BUS_CLIENT_RBCH},
1380 	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
1381 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1382 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
1383 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1384 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1385 	false, false, MAX_DBG_RESET_REGS, 0
1386 };
1387 
1388 static struct block_defs block_led_defs = {
1389 	"led", {false, true, true}, false, 0,
1390 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1391 	0, 0, 0, 0, 0,
1392 	true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1393 };
1394 
1395 static struct block_defs block_avs_wrap_defs = {
1396 	"avs_wrap", {false, true, false}, false, 0,
1397 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1398 	0, 0, 0, 0, 0,
1399 	true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1400 };
1401 
1402 static struct block_defs block_pxpreqbus_defs = {
1403 	"pxpreqbus", {false, false, false}, false, 0,
1404 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1405 	0, 0, 0, 0, 0,
1406 	false, false, MAX_DBG_RESET_REGS, 0
1407 };
1408 
1409 static struct block_defs block_misc_aeu_defs = {
1410 	"misc_aeu", {true, true, true}, false, 0,
1411 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1412 	0, 0, 0, 0, 0,
1413 	false, false, MAX_DBG_RESET_REGS, 0
1414 };
1415 
1416 static struct block_defs block_bar0_map_defs = {
1417 	"bar0_map", {true, true, true}, false, 0,
1418 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1419 	0, 0, 0, 0, 0,
1420 	false, false, MAX_DBG_RESET_REGS, 0
1421 };
1422 
1423 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1424 	&block_grc_defs,
1425 	&block_miscs_defs,
1426 	&block_misc_defs,
1427 	&block_dbu_defs,
1428 	&block_pglue_b_defs,
1429 	&block_cnig_defs,
1430 	&block_cpmu_defs,
1431 	&block_ncsi_defs,
1432 	&block_opte_defs,
1433 	&block_bmb_defs,
1434 	&block_pcie_defs,
1435 	&block_mcp_defs,
1436 	&block_mcp2_defs,
1437 	&block_pswhst_defs,
1438 	&block_pswhst2_defs,
1439 	&block_pswrd_defs,
1440 	&block_pswrd2_defs,
1441 	&block_pswwr_defs,
1442 	&block_pswwr2_defs,
1443 	&block_pswrq_defs,
1444 	&block_pswrq2_defs,
1445 	&block_pglcs_defs,
1446 	&block_dmae_defs,
1447 	&block_ptu_defs,
1448 	&block_tcm_defs,
1449 	&block_mcm_defs,
1450 	&block_ucm_defs,
1451 	&block_xcm_defs,
1452 	&block_ycm_defs,
1453 	&block_pcm_defs,
1454 	&block_qm_defs,
1455 	&block_tm_defs,
1456 	&block_dorq_defs,
1457 	&block_brb_defs,
1458 	&block_src_defs,
1459 	&block_prs_defs,
1460 	&block_tsdm_defs,
1461 	&block_msdm_defs,
1462 	&block_usdm_defs,
1463 	&block_xsdm_defs,
1464 	&block_ysdm_defs,
1465 	&block_psdm_defs,
1466 	&block_tsem_defs,
1467 	&block_msem_defs,
1468 	&block_usem_defs,
1469 	&block_xsem_defs,
1470 	&block_ysem_defs,
1471 	&block_psem_defs,
1472 	&block_rss_defs,
1473 	&block_tmld_defs,
1474 	&block_muld_defs,
1475 	&block_yuld_defs,
1476 	&block_xyld_defs,
1477 	&block_ptld_defs,
1478 	&block_ypld_defs,
1479 	&block_prm_defs,
1480 	&block_pbf_pb1_defs,
1481 	&block_pbf_pb2_defs,
1482 	&block_rpb_defs,
1483 	&block_btb_defs,
1484 	&block_pbf_defs,
1485 	&block_rdif_defs,
1486 	&block_tdif_defs,
1487 	&block_cdu_defs,
1488 	&block_ccfc_defs,
1489 	&block_tcfc_defs,
1490 	&block_igu_defs,
1491 	&block_cau_defs,
1492 	&block_rgfs_defs,
1493 	&block_rgsrc_defs,
1494 	&block_tgfs_defs,
1495 	&block_tgsrc_defs,
1496 	&block_umac_defs,
1497 	&block_xmac_defs,
1498 	&block_dbg_defs,
1499 	&block_nig_defs,
1500 	&block_wol_defs,
1501 	&block_bmbn_defs,
1502 	&block_ipc_defs,
1503 	&block_nwm_defs,
1504 	&block_nws_defs,
1505 	&block_ms_defs,
1506 	&block_phy_pcie_defs,
1507 	&block_led_defs,
1508 	&block_avs_wrap_defs,
1509 	&block_pxpreqbus_defs,
1510 	&block_misc_aeu_defs,
1511 	&block_bar0_map_defs,
1512 };
1513 
1514 static struct platform_defs s_platform_defs[] = {
1515 	{"asic", 1, 256, 32768},
1516 	{"reserved", 0, 0, 0},
1517 	{"reserved2", 0, 0, 0},
1518 	{"reserved3", 0, 0, 0}
1519 };
1520 
1521 static struct grc_param_defs s_grc_param_defs[] = {
1522 	/* DBG_GRC_PARAM_DUMP_TSTORM */
1523 	{{1, 1, 1}, 0, 1, false, 1, 1},
1524 
1525 	/* DBG_GRC_PARAM_DUMP_MSTORM */
1526 	{{1, 1, 1}, 0, 1, false, 1, 1},
1527 
1528 	/* DBG_GRC_PARAM_DUMP_USTORM */
1529 	{{1, 1, 1}, 0, 1, false, 1, 1},
1530 
1531 	/* DBG_GRC_PARAM_DUMP_XSTORM */
1532 	{{1, 1, 1}, 0, 1, false, 1, 1},
1533 
1534 	/* DBG_GRC_PARAM_DUMP_YSTORM */
1535 	{{1, 1, 1}, 0, 1, false, 1, 1},
1536 
1537 	/* DBG_GRC_PARAM_DUMP_PSTORM */
1538 	{{1, 1, 1}, 0, 1, false, 1, 1},
1539 
1540 	/* DBG_GRC_PARAM_DUMP_REGS */
1541 	{{1, 1, 1}, 0, 1, false, 0, 1},
1542 
1543 	/* DBG_GRC_PARAM_DUMP_RAM */
1544 	{{1, 1, 1}, 0, 1, false, 0, 1},
1545 
1546 	/* DBG_GRC_PARAM_DUMP_PBUF */
1547 	{{1, 1, 1}, 0, 1, false, 0, 1},
1548 
1549 	/* DBG_GRC_PARAM_DUMP_IOR */
1550 	{{0, 0, 0}, 0, 1, false, 0, 1},
1551 
1552 	/* DBG_GRC_PARAM_DUMP_VFC */
1553 	{{0, 0, 0}, 0, 1, false, 0, 1},
1554 
1555 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
1556 	{{1, 1, 1}, 0, 1, false, 0, 1},
1557 
1558 	/* DBG_GRC_PARAM_DUMP_ILT */
1559 	{{1, 1, 1}, 0, 1, false, 0, 1},
1560 
1561 	/* DBG_GRC_PARAM_DUMP_RSS */
1562 	{{1, 1, 1}, 0, 1, false, 0, 1},
1563 
1564 	/* DBG_GRC_PARAM_DUMP_CAU */
1565 	{{1, 1, 1}, 0, 1, false, 0, 1},
1566 
1567 	/* DBG_GRC_PARAM_DUMP_QM */
1568 	{{1, 1, 1}, 0, 1, false, 0, 1},
1569 
1570 	/* DBG_GRC_PARAM_DUMP_MCP */
1571 	{{1, 1, 1}, 0, 1, false, 0, 1},
1572 
1573 	/* DBG_GRC_PARAM_RESERVED */
1574 	{{1, 1, 1}, 0, 1, false, 0, 1},
1575 
1576 	/* DBG_GRC_PARAM_DUMP_CFC */
1577 	{{1, 1, 1}, 0, 1, false, 0, 1},
1578 
1579 	/* DBG_GRC_PARAM_DUMP_IGU */
1580 	{{1, 1, 1}, 0, 1, false, 0, 1},
1581 
1582 	/* DBG_GRC_PARAM_DUMP_BRB */
1583 	{{0, 0, 0}, 0, 1, false, 0, 1},
1584 
1585 	/* DBG_GRC_PARAM_DUMP_BTB */
1586 	{{0, 0, 0}, 0, 1, false, 0, 1},
1587 
1588 	/* DBG_GRC_PARAM_DUMP_BMB */
1589 	{{0, 0, 0}, 0, 1, false, 0, 1},
1590 
1591 	/* DBG_GRC_PARAM_DUMP_NIG */
1592 	{{1, 1, 1}, 0, 1, false, 0, 1},
1593 
1594 	/* DBG_GRC_PARAM_DUMP_MULD */
1595 	{{1, 1, 1}, 0, 1, false, 0, 1},
1596 
1597 	/* DBG_GRC_PARAM_DUMP_PRS */
1598 	{{1, 1, 1}, 0, 1, false, 0, 1},
1599 
1600 	/* DBG_GRC_PARAM_DUMP_DMAE */
1601 	{{1, 1, 1}, 0, 1, false, 0, 1},
1602 
1603 	/* DBG_GRC_PARAM_DUMP_TM */
1604 	{{1, 1, 1}, 0, 1, false, 0, 1},
1605 
1606 	/* DBG_GRC_PARAM_DUMP_SDM */
1607 	{{1, 1, 1}, 0, 1, false, 0, 1},
1608 
1609 	/* DBG_GRC_PARAM_DUMP_DIF */
1610 	{{1, 1, 1}, 0, 1, false, 0, 1},
1611 
1612 	/* DBG_GRC_PARAM_DUMP_STATIC */
1613 	{{1, 1, 1}, 0, 1, false, 0, 1},
1614 
1615 	/* DBG_GRC_PARAM_UNSTALL */
1616 	{{0, 0, 0}, 0, 1, false, 0, 0},
1617 
1618 	/* DBG_GRC_PARAM_NUM_LCIDS */
1619 	{{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
1620 	 MAX_LCIDS},
1621 
1622 	/* DBG_GRC_PARAM_NUM_LTIDS */
1623 	{{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
1624 	 MAX_LTIDS},
1625 
1626 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
1627 	{{0, 0, 0}, 0, 1, true, 0, 0},
1628 
1629 	/* DBG_GRC_PARAM_CRASH */
1630 	{{0, 0, 0}, 0, 1, true, 0, 0},
1631 
1632 	/* DBG_GRC_PARAM_PARITY_SAFE */
1633 	{{0, 0, 0}, 0, 1, false, 1, 0},
1634 
1635 	/* DBG_GRC_PARAM_DUMP_CM */
1636 	{{1, 1, 1}, 0, 1, false, 0, 1},
1637 
1638 	/* DBG_GRC_PARAM_DUMP_PHY */
1639 	{{1, 1, 1}, 0, 1, false, 0, 1},
1640 
1641 	/* DBG_GRC_PARAM_NO_MCP */
1642 	{{0, 0, 0}, 0, 1, false, 0, 0},
1643 
1644 	/* DBG_GRC_PARAM_NO_FW_VER */
1645 	{{0, 0, 0}, 0, 1, false, 0, 0}
1646 };
1647 
1648 static struct rss_mem_defs s_rss_mem_defs[] = {
1649 	{ "rss_mem_cid", "rss_cid", 0, 32,
1650 	  {256, 320, 512} },
1651 
1652 	{ "rss_mem_key_msb", "rss_key", 1024, 256,
1653 	  {128, 208, 257} },
1654 
1655 	{ "rss_mem_key_lsb", "rss_key", 2048, 64,
1656 	  {128, 208, 257} },
1657 
1658 	{ "rss_mem_info", "rss_info", 3072, 16,
1659 	  {128, 208, 256} },
1660 
1661 	{ "rss_mem_ind", "rss_ind", 4096, 16,
1662 	  {16384, 26624, 32768} }
1663 };
1664 
1665 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1666 	{"vfc_ram_tt1", "vfc_ram", 0, 512},
1667 	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
1668 	{"vfc_ram_stt2", "vfc_ram", 640, 32},
1669 	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1670 };
1671 
1672 static struct big_ram_defs s_big_ram_defs[] = {
1673 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1674 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1675 	  MISC_REG_BLOCK_256B_EN, {0, 0, 0},
1676 	  {153600, 180224, 282624} },
1677 
1678 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1679 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1680 	  MISC_REG_BLOCK_256B_EN, {0, 1, 1},
1681 	  {92160, 117760, 168960} },
1682 
1683 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1684 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1685 	  MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
1686 	  {36864, 36864, 36864} }
1687 };
1688 
1689 static struct reset_reg_defs s_reset_regs_defs[] = {
1690 	/* DBG_RESET_REG_MISCS_PL_UA */
1691 	{ MISCS_REG_RESET_PL_UA,
1692 	  {true, true, true}, {0x0, 0x0, 0x0} },
1693 
1694 	/* DBG_RESET_REG_MISCS_PL_HV */
1695 	{ MISCS_REG_RESET_PL_HV,
1696 	  {true, true, true}, {0x0, 0x400, 0x600} },
1697 
1698 	/* DBG_RESET_REG_MISCS_PL_HV_2 */
1699 	{ MISCS_REG_RESET_PL_HV_2_K2_E5,
1700 	  {false, true, true}, {0x0, 0x0, 0x0} },
1701 
1702 	/* DBG_RESET_REG_MISC_PL_UA */
1703 	{ MISC_REG_RESET_PL_UA,
1704 	  {true, true, true}, {0x0, 0x0, 0x0} },
1705 
1706 	/* DBG_RESET_REG_MISC_PL_HV */
1707 	{ MISC_REG_RESET_PL_HV,
1708 	  {true, true, true}, {0x0, 0x0, 0x0} },
1709 
1710 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1711 	{ MISC_REG_RESET_PL_PDA_VMAIN_1,
1712 	  {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
1713 
1714 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1715 	{ MISC_REG_RESET_PL_PDA_VMAIN_2,
1716 	  {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
1717 
1718 	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1719 	{ MISC_REG_RESET_PL_PDA_VAUX,
1720 	  {true, true, true}, {0x2, 0x2, 0x2} },
1721 };
1722 
1723 static struct phy_defs s_phy_defs[] = {
1724 	{"nw_phy", NWS_REG_NWS_CMU_K2,
1725 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
1726 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
1727 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
1728 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
1729 	{"sgmii_phy", MS_REG_MS_CMU_K2_E5,
1730 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1731 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1732 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1733 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1734 	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
1735 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1736 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1737 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1738 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1739 	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
1740 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1741 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1742 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1743 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1744 };
1745 
1746 /**************************** Private Functions ******************************/
1747 
1748 /* Reads and returns a single dword from the specified unaligned buffer */
1749 static u32 qed_read_unaligned_dword(u8 *buf)
1750 {
1751 	u32 dword;
1752 
1753 	memcpy((u8 *)&dword, buf, sizeof(dword));
1754 	return dword;
1755 }
1756 
1757 /* Returns the value of the specified GRC param */
1758 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1759 			     enum dbg_grc_params grc_param)
1760 {
1761 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1762 
1763 	return dev_data->grc.param_val[grc_param];
1764 }
1765 
1766 /* Initializes the GRC parameters */
1767 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1768 {
1769 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1770 
1771 	if (!dev_data->grc.params_initialized) {
1772 		qed_dbg_grc_set_params_default(p_hwfn);
1773 		dev_data->grc.params_initialized = 1;
1774 	}
1775 }
1776 
1777 /* Initializes debug data for the specified device */
1778 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1779 					struct qed_ptt *p_ptt)
1780 {
1781 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1782 
1783 	if (dev_data->initialized)
1784 		return DBG_STATUS_OK;
1785 
1786 	if (QED_IS_K2(p_hwfn->cdev)) {
1787 		dev_data->chip_id = CHIP_K2;
1788 		dev_data->mode_enable[MODE_K2] = 1;
1789 	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1790 		dev_data->chip_id = CHIP_BB;
1791 		dev_data->mode_enable[MODE_BB] = 1;
1792 	} else {
1793 		return DBG_STATUS_UNKNOWN_CHIP;
1794 	}
1795 
1796 	dev_data->platform_id = PLATFORM_ASIC;
1797 	dev_data->mode_enable[MODE_ASIC] = 1;
1798 
1799 	/* Initializes the GRC parameters */
1800 	qed_dbg_grc_init_params(p_hwfn);
1801 
1802 	dev_data->use_dmae = true;
1803 	dev_data->num_regs_read = 0;
1804 	dev_data->initialized = 1;
1805 
1806 	return DBG_STATUS_OK;
1807 }
1808 
1809 static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
1810 						    enum block_id block_id)
1811 {
1812 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1813 
1814 	return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
1815 						       MAX_CHIP_IDS +
1816 						       dev_data->chip_id];
1817 }
1818 
1819 /* Reads the FW info structure for the specified Storm from the chip,
1820  * and writes it to the specified fw_info pointer.
1821  */
1822 static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
1823 			     struct qed_ptt *p_ptt,
1824 			     u8 storm_id, struct fw_info *fw_info)
1825 {
1826 	struct storm_defs *storm = &s_storm_defs[storm_id];
1827 	struct fw_info_location fw_info_location;
1828 	u32 addr, i, *dest;
1829 
1830 	memset(&fw_info_location, 0, sizeof(fw_info_location));
1831 	memset(fw_info, 0, sizeof(*fw_info));
1832 
1833 	/* Read first the address that points to fw_info location.
1834 	 * The address is located in the last line of the Storm RAM.
1835 	 */
1836 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1837 	       DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
1838 	       sizeof(fw_info_location);
1839 	dest = (u32 *)&fw_info_location;
1840 
1841 	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1842 	     i++, addr += BYTES_IN_DWORD)
1843 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1844 
1845 	/* Read FW version info from Storm RAM */
1846 	if (fw_info_location.size > 0 && fw_info_location.size <=
1847 	    sizeof(*fw_info)) {
1848 		addr = fw_info_location.grc_addr;
1849 		dest = (u32 *)fw_info;
1850 		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1851 		     i++, addr += BYTES_IN_DWORD)
1852 			dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1853 	}
1854 }
1855 
1856 /* Dumps the specified string to the specified buffer.
1857  * Returns the dumped size in bytes.
1858  */
1859 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1860 {
1861 	if (dump)
1862 		strcpy(dump_buf, str);
1863 
1864 	return (u32)strlen(str) + 1;
1865 }
1866 
1867 /* Dumps zeros to align the specified buffer to dwords.
1868  * Returns the dumped size in bytes.
1869  */
1870 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1871 {
1872 	u8 offset_in_dword, align_size;
1873 
1874 	offset_in_dword = (u8)(byte_offset & 0x3);
1875 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1876 
1877 	if (dump && align_size)
1878 		memset(dump_buf, 0, align_size);
1879 
1880 	return align_size;
1881 }
1882 
1883 /* Writes the specified string param to the specified buffer.
1884  * Returns the dumped size in dwords.
1885  */
1886 static u32 qed_dump_str_param(u32 *dump_buf,
1887 			      bool dump,
1888 			      const char *param_name, const char *param_val)
1889 {
1890 	char *char_buf = (char *)dump_buf;
1891 	u32 offset = 0;
1892 
1893 	/* Dump param name */
1894 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1895 
1896 	/* Indicate a string param value */
1897 	if (dump)
1898 		*(char_buf + offset) = 1;
1899 	offset++;
1900 
1901 	/* Dump param value */
1902 	offset += qed_dump_str(char_buf + offset, dump, param_val);
1903 
1904 	/* Align buffer to next dword */
1905 	offset += qed_dump_align(char_buf + offset, dump, offset);
1906 
1907 	return BYTES_TO_DWORDS(offset);
1908 }
1909 
1910 /* Writes the specified numeric param to the specified buffer.
1911  * Returns the dumped size in dwords.
1912  */
1913 static u32 qed_dump_num_param(u32 *dump_buf,
1914 			      bool dump, const char *param_name, u32 param_val)
1915 {
1916 	char *char_buf = (char *)dump_buf;
1917 	u32 offset = 0;
1918 
1919 	/* Dump param name */
1920 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1921 
1922 	/* Indicate a numeric param value */
1923 	if (dump)
1924 		*(char_buf + offset) = 0;
1925 	offset++;
1926 
1927 	/* Align buffer to next dword */
1928 	offset += qed_dump_align(char_buf + offset, dump, offset);
1929 
1930 	/* Dump param value (and change offset from bytes to dwords) */
1931 	offset = BYTES_TO_DWORDS(offset);
1932 	if (dump)
1933 		*(dump_buf + offset) = param_val;
1934 	offset++;
1935 
1936 	return offset;
1937 }
1938 
1939 /* Reads the FW version and writes it as a param to the specified buffer.
1940  * Returns the dumped size in dwords.
1941  */
1942 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1943 				 struct qed_ptt *p_ptt,
1944 				 u32 *dump_buf, bool dump)
1945 {
1946 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1947 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1948 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1949 	struct fw_info fw_info = { {0}, {0} };
1950 	u32 offset = 0;
1951 
1952 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1953 		/* Read FW image/version from PRAM in a non-reset SEMI */
1954 		bool found = false;
1955 		u8 storm_id;
1956 
1957 		for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
1958 		     storm_id++) {
1959 			struct storm_defs *storm = &s_storm_defs[storm_id];
1960 
1961 			/* Read FW version/image */
1962 			if (dev_data->block_in_reset[storm->block_id])
1963 				continue;
1964 
1965 			/* Read FW info for the current Storm */
1966 			qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
1967 
1968 			/* Create FW version/image strings */
1969 			if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1970 				     "%d_%d_%d_%d", fw_info.ver.num.major,
1971 				     fw_info.ver.num.minor, fw_info.ver.num.rev,
1972 				     fw_info.ver.num.eng) < 0)
1973 				DP_NOTICE(p_hwfn,
1974 					  "Unexpected debug error: invalid FW version string\n");
1975 			switch (fw_info.ver.image_id) {
1976 			case FW_IMG_MAIN:
1977 				strcpy(fw_img_str, "main");
1978 				break;
1979 			default:
1980 				strcpy(fw_img_str, "unknown");
1981 				break;
1982 			}
1983 
1984 			found = true;
1985 		}
1986 	}
1987 
1988 	/* Dump FW version, image and timestamp */
1989 	offset += qed_dump_str_param(dump_buf + offset,
1990 				     dump, "fw-version", fw_ver_str);
1991 	offset += qed_dump_str_param(dump_buf + offset,
1992 				     dump, "fw-image", fw_img_str);
1993 	offset += qed_dump_num_param(dump_buf + offset,
1994 				     dump,
1995 				     "fw-timestamp", fw_info.ver.timestamp);
1996 
1997 	return offset;
1998 }
1999 
2000 /* Reads the MFW version and writes it as a param to the specified buffer.
2001  * Returns the dumped size in dwords.
2002  */
2003 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
2004 				  struct qed_ptt *p_ptt,
2005 				  u32 *dump_buf, bool dump)
2006 {
2007 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2008 
2009 	if (dump &&
2010 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2011 		u32 global_section_offsize, global_section_addr, mfw_ver;
2012 		u32 public_data_addr, global_section_offsize_addr;
2013 
2014 		/* Find MCP public data GRC address. Needs to be ORed with
2015 		 * MCP_REG_SCRATCH due to a HW bug.
2016 		 */
2017 		public_data_addr = qed_rd(p_hwfn,
2018 					  p_ptt,
2019 					  MISC_REG_SHARED_MEM_ADDR) |
2020 				   MCP_REG_SCRATCH;
2021 
2022 		/* Find MCP public global section offset */
2023 		global_section_offsize_addr = public_data_addr +
2024 					      offsetof(struct mcp_public_data,
2025 						       sections) +
2026 					      sizeof(offsize_t) * PUBLIC_GLOBAL;
2027 		global_section_offsize = qed_rd(p_hwfn, p_ptt,
2028 						global_section_offsize_addr);
2029 		global_section_addr =
2030 			MCP_REG_SCRATCH +
2031 			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2032 
2033 		/* Read MFW version from MCP public global section */
2034 		mfw_ver = qed_rd(p_hwfn, p_ptt,
2035 				 global_section_addr +
2036 				 offsetof(struct public_global, mfw_ver));
2037 
2038 		/* Dump MFW version param */
2039 		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
2040 			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
2041 			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2042 			DP_NOTICE(p_hwfn,
2043 				  "Unexpected debug error: invalid MFW version string\n");
2044 	}
2045 
2046 	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2047 }
2048 
2049 /* Writes a section header to the specified buffer.
2050  * Returns the dumped size in dwords.
2051  */
2052 static u32 qed_dump_section_hdr(u32 *dump_buf,
2053 				bool dump, const char *name, u32 num_params)
2054 {
2055 	return qed_dump_num_param(dump_buf, dump, name, num_params);
2056 }
2057 
2058 /* Writes the common global params to the specified buffer.
2059  * Returns the dumped size in dwords.
2060  */
2061 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
2062 					 struct qed_ptt *p_ptt,
2063 					 u32 *dump_buf,
2064 					 bool dump,
2065 					 u8 num_specific_global_params)
2066 {
2067 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2068 	u32 offset = 0;
2069 	u8 num_params;
2070 
2071 	/* Dump global params section header */
2072 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2073 	offset += qed_dump_section_hdr(dump_buf + offset,
2074 				       dump, "global_params", num_params);
2075 
2076 	/* Store params */
2077 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2078 	offset += qed_dump_mfw_ver_param(p_hwfn,
2079 					 p_ptt, dump_buf + offset, dump);
2080 	offset += qed_dump_num_param(dump_buf + offset,
2081 				     dump, "tools-version", TOOLS_VERSION);
2082 	offset += qed_dump_str_param(dump_buf + offset,
2083 				     dump,
2084 				     "chip",
2085 				     s_chip_defs[dev_data->chip_id].name);
2086 	offset += qed_dump_str_param(dump_buf + offset,
2087 				     dump,
2088 				     "platform",
2089 				     s_platform_defs[dev_data->platform_id].
2090 				     name);
2091 	offset +=
2092 	    qed_dump_num_param(dump_buf + offset, dump, "pci-func",
2093 			       p_hwfn->abs_pf_id);
2094 
2095 	return offset;
2096 }
2097 
2098 /* Writes the "last" section (including CRC) to the specified buffer at the
2099  * given offset. Returns the dumped size in dwords.
2100  */
2101 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
2102 {
2103 	u32 start_offset = offset;
2104 
2105 	/* Dump CRC section header */
2106 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2107 
2108 	/* Calculate CRC32 and add it to the dword after the "last" section */
2109 	if (dump)
2110 		*(dump_buf + offset) = ~crc32(0xffffffff,
2111 					      (u8 *)dump_buf,
2112 					      DWORDS_TO_BYTES(offset));
2113 
2114 	offset++;
2115 
2116 	return offset - start_offset;
2117 }
2118 
2119 /* Update blocks reset state  */
2120 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
2121 					  struct qed_ptt *p_ptt)
2122 {
2123 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2124 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2125 	u32 i;
2126 
2127 	/* Read reset registers */
2128 	for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2129 		if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2130 			reg_val[i] = qed_rd(p_hwfn,
2131 					    p_ptt, s_reset_regs_defs[i].addr);
2132 
2133 	/* Check if blocks are in reset */
2134 	for (i = 0; i < MAX_BLOCK_ID; i++) {
2135 		struct block_defs *block = s_block_defs[i];
2136 
2137 		dev_data->block_in_reset[i] = block->has_reset_bit &&
2138 		    !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
2139 	}
2140 }
2141 
2142 /* Enable / disable the Debug block */
2143 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
2144 				     struct qed_ptt *p_ptt, bool enable)
2145 {
2146 	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2147 }
2148 
2149 /* Resets the Debug block */
2150 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
2151 				    struct qed_ptt *p_ptt)
2152 {
2153 	u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2154 	struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2155 
2156 	dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2157 	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2158 	new_reset_reg_val =
2159 	    old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
2160 
2161 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2162 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2163 }
2164 
2165 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
2166 				     struct qed_ptt *p_ptt,
2167 				     enum dbg_bus_frame_modes mode)
2168 {
2169 	qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2170 }
2171 
2172 /* Enable / disable Debug Bus clients according to the specified mask
2173  * (1 = enable, 0 = disable).
2174  */
2175 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
2176 				   struct qed_ptt *p_ptt, u32 client_mask)
2177 {
2178 	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2179 }
2180 
2181 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
2182 {
2183 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2184 	bool arg1, arg2;
2185 	const u32 *ptr;
2186 	u8 tree_val;
2187 
2188 	/* Get next element from modes tree buffer */
2189 	ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
2190 	tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
2191 
2192 	switch (tree_val) {
2193 	case INIT_MODE_OP_NOT:
2194 		return !qed_is_mode_match(p_hwfn, modes_buf_offset);
2195 	case INIT_MODE_OP_OR:
2196 	case INIT_MODE_OP_AND:
2197 		arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2198 		arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2199 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
2200 							arg2) : (arg1 && arg2);
2201 	default:
2202 		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2203 	}
2204 }
2205 
2206 /* Returns true if the specified entity (indicated by GRC param) should be
2207  * included in the dump, false otherwise.
2208  */
2209 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
2210 				enum dbg_grc_params grc_param)
2211 {
2212 	return qed_grc_get_param(p_hwfn, grc_param) > 0;
2213 }
2214 
2215 /* Returns true of the specified Storm should be included in the dump, false
2216  * otherwise.
2217  */
2218 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
2219 				      enum dbg_storms storm)
2220 {
2221 	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2222 }
2223 
2224 /* Returns true if the specified memory should be included in the dump, false
2225  * otherwise.
2226  */
2227 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
2228 				    enum block_id block_id, u8 mem_group_id)
2229 {
2230 	struct block_defs *block = s_block_defs[block_id];
2231 	u8 i;
2232 
2233 	/* Check Storm match */
2234 	if (block->associated_to_storm &&
2235 	    !qed_grc_is_storm_included(p_hwfn,
2236 				       (enum dbg_storms)block->storm_id))
2237 		return false;
2238 
2239 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2240 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2241 
2242 		if (mem_group_id == big_ram->mem_group_id ||
2243 		    mem_group_id == big_ram->ram_mem_group_id)
2244 			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
2245 	}
2246 
2247 	switch (mem_group_id) {
2248 	case MEM_GROUP_PXP_ILT:
2249 	case MEM_GROUP_PXP_MEM:
2250 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2251 	case MEM_GROUP_RAM:
2252 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2253 	case MEM_GROUP_PBUF:
2254 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2255 	case MEM_GROUP_CAU_MEM:
2256 	case MEM_GROUP_CAU_SB:
2257 	case MEM_GROUP_CAU_PI:
2258 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2259 	case MEM_GROUP_QM_MEM:
2260 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2261 	case MEM_GROUP_CFC_MEM:
2262 	case MEM_GROUP_CONN_CFC_MEM:
2263 	case MEM_GROUP_TASK_CFC_MEM:
2264 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
2265 		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2266 	case MEM_GROUP_IGU_MEM:
2267 	case MEM_GROUP_IGU_MSIX:
2268 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2269 	case MEM_GROUP_MULD_MEM:
2270 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2271 	case MEM_GROUP_PRS_MEM:
2272 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2273 	case MEM_GROUP_DMAE_MEM:
2274 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2275 	case MEM_GROUP_TM_MEM:
2276 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2277 	case MEM_GROUP_SDM_MEM:
2278 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2279 	case MEM_GROUP_TDIF_CTX:
2280 	case MEM_GROUP_RDIF_CTX:
2281 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2282 	case MEM_GROUP_CM_MEM:
2283 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2284 	case MEM_GROUP_IOR:
2285 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2286 	default:
2287 		return true;
2288 	}
2289 }
2290 
2291 /* Stalls all Storms */
2292 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2293 				 struct qed_ptt *p_ptt, bool stall)
2294 {
2295 	u32 reg_addr;
2296 	u8 storm_id;
2297 
2298 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2299 		if (!qed_grc_is_storm_included(p_hwfn,
2300 					       (enum dbg_storms)storm_id))
2301 			continue;
2302 
2303 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
2304 		    SEM_FAST_REG_STALL_0_BB_K2;
2305 		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2306 	}
2307 
2308 	msleep(STALL_DELAY_MS);
2309 }
2310 
2311 /* Takes all blocks out of reset */
2312 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2313 				   struct qed_ptt *p_ptt)
2314 {
2315 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2316 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2317 	u32 block_id, i;
2318 
2319 	/* Fill reset regs values */
2320 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2321 		struct block_defs *block = s_block_defs[block_id];
2322 
2323 		if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
2324 		    block->unreset)
2325 			reg_val[block->reset_reg] |=
2326 			    BIT(block->reset_bit_offset);
2327 	}
2328 
2329 	/* Write reset registers */
2330 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2331 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2332 			continue;
2333 
2334 		reg_val[i] |=
2335 			s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2336 
2337 		if (reg_val[i])
2338 			qed_wr(p_hwfn,
2339 			       p_ptt,
2340 			       s_reset_regs_defs[i].addr +
2341 			       RESET_REG_UNRESET_OFFSET, reg_val[i]);
2342 	}
2343 }
2344 
2345 /* Returns the attention block data of the specified block */
2346 static const struct dbg_attn_block_type_data *
2347 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
2348 {
2349 	const struct dbg_attn_block *base_attn_block_arr =
2350 		(const struct dbg_attn_block *)
2351 		s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2352 
2353 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
2354 }
2355 
2356 /* Returns the attention registers of the specified block */
2357 static const struct dbg_attn_reg *
2358 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
2359 			u8 *num_attn_regs)
2360 {
2361 	const struct dbg_attn_block_type_data *block_type_data =
2362 		qed_get_block_attn_data(block_id, attn_type);
2363 
2364 	*num_attn_regs = block_type_data->num_regs;
2365 
2366 	return &((const struct dbg_attn_reg *)
2367 		 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2368 							  regs_offset];
2369 }
2370 
2371 /* For each block, clear the status of all parities */
2372 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2373 				   struct qed_ptt *p_ptt)
2374 {
2375 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2376 	const struct dbg_attn_reg *attn_reg_arr;
2377 	u8 reg_idx, num_attn_regs;
2378 	u32 block_id;
2379 
2380 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2381 		if (dev_data->block_in_reset[block_id])
2382 			continue;
2383 
2384 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2385 						       ATTN_TYPE_PARITY,
2386 						       &num_attn_regs);
2387 
2388 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2389 			const struct dbg_attn_reg *reg_data =
2390 				&attn_reg_arr[reg_idx];
2391 			u16 modes_buf_offset;
2392 			bool eval_mode;
2393 
2394 			/* Check mode */
2395 			eval_mode = GET_FIELD(reg_data->mode.data,
2396 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2397 			modes_buf_offset =
2398 				GET_FIELD(reg_data->mode.data,
2399 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2400 
2401 			/* If Mode match: clear parity status */
2402 			if (!eval_mode ||
2403 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
2404 				qed_rd(p_hwfn, p_ptt,
2405 				       DWORDS_TO_BYTES(reg_data->
2406 						       sts_clr_address));
2407 		}
2408 	}
2409 }
2410 
2411 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2412  * The following parameters are dumped:
2413  * - count:	 no. of dumped entries
2414  * - split:	 split type
2415  * - id:	 split ID (dumped only if split_id >= 0)
2416  * - param_name: user parameter value (dumped only if param_name != NULL
2417  *		 and param_val != NULL).
2418  */
2419 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2420 				 bool dump,
2421 				 u32 num_reg_entries,
2422 				 const char *split_type,
2423 				 int split_id,
2424 				 const char *param_name, const char *param_val)
2425 {
2426 	u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2427 	u32 offset = 0;
2428 
2429 	offset += qed_dump_section_hdr(dump_buf + offset,
2430 				       dump, "grc_regs", num_params);
2431 	offset += qed_dump_num_param(dump_buf + offset,
2432 				     dump, "count", num_reg_entries);
2433 	offset += qed_dump_str_param(dump_buf + offset,
2434 				     dump, "split", split_type);
2435 	if (split_id >= 0)
2436 		offset += qed_dump_num_param(dump_buf + offset,
2437 					     dump, "id", split_id);
2438 	if (param_name && param_val)
2439 		offset += qed_dump_str_param(dump_buf + offset,
2440 					     dump, param_name, param_val);
2441 
2442 	return offset;
2443 }
2444 
2445 /* Reads the specified registers into the specified buffer.
2446  * The addr and len arguments are specified in dwords.
2447  */
2448 void qed_read_regs(struct qed_hwfn *p_hwfn,
2449 		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
2450 {
2451 	u32 i;
2452 
2453 	for (i = 0; i < len; i++)
2454 		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2455 }
2456 
2457 /* Dumps the GRC registers in the specified address range.
2458  * Returns the dumped size in dwords.
2459  * The addr and len arguments are specified in dwords.
2460  */
2461 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2462 				   struct qed_ptt *p_ptt,
2463 				   u32 *dump_buf,
2464 				   bool dump, u32 addr, u32 len, bool wide_bus)
2465 {
2466 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2467 
2468 	if (!dump)
2469 		return len;
2470 
2471 	/* Print log if needed */
2472 	dev_data->num_regs_read += len;
2473 	if (dev_data->num_regs_read >=
2474 	    s_platform_defs[dev_data->platform_id].log_thresh) {
2475 		DP_VERBOSE(p_hwfn,
2476 			   QED_MSG_DEBUG,
2477 			   "Dumping %d registers...\n",
2478 			   dev_data->num_regs_read);
2479 		dev_data->num_regs_read = 0;
2480 	}
2481 
2482 	/* Try reading using DMAE */
2483 	if (dev_data->use_dmae &&
2484 	    (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
2485 	     wide_bus)) {
2486 		if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
2487 				       (u64)(uintptr_t)(dump_buf), len, 0))
2488 			return len;
2489 		dev_data->use_dmae = 0;
2490 		DP_VERBOSE(p_hwfn,
2491 			   QED_MSG_DEBUG,
2492 			   "Failed reading from chip using DMAE, using GRC instead\n");
2493 	}
2494 
2495 	/* Read registers */
2496 	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2497 
2498 	return len;
2499 }
2500 
2501 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2502  * The addr and len arguments are specified in dwords.
2503  */
2504 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2505 				      bool dump, u32 addr, u32 len)
2506 {
2507 	if (dump)
2508 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2509 
2510 	return 1;
2511 }
2512 
2513 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2514  * The addr and len arguments are specified in dwords.
2515  */
2516 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2517 				  struct qed_ptt *p_ptt,
2518 				  u32 *dump_buf,
2519 				  bool dump, u32 addr, u32 len, bool wide_bus)
2520 {
2521 	u32 offset = 0;
2522 
2523 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2524 	offset += qed_grc_dump_addr_range(p_hwfn,
2525 					  p_ptt,
2526 					  dump_buf + offset,
2527 					  dump, addr, len, wide_bus);
2528 
2529 	return offset;
2530 }
2531 
2532 /* Dumps GRC registers sequence with skip cycle.
2533  * Returns the dumped size in dwords.
2534  * - addr:	start GRC address in dwords
2535  * - total_len:	total no. of dwords to dump
2536  * - read_len:	no. consecutive dwords to read
2537  * - skip_len:	no. of dwords to skip (and fill with zeros)
2538  */
2539 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2540 				       struct qed_ptt *p_ptt,
2541 				       u32 *dump_buf,
2542 				       bool dump,
2543 				       u32 addr,
2544 				       u32 total_len,
2545 				       u32 read_len, u32 skip_len)
2546 {
2547 	u32 offset = 0, reg_offset = 0;
2548 
2549 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2550 
2551 	if (!dump)
2552 		return offset + total_len;
2553 
2554 	while (reg_offset < total_len) {
2555 		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2556 
2557 		offset += qed_grc_dump_addr_range(p_hwfn,
2558 						  p_ptt,
2559 						  dump_buf + offset,
2560 						  dump, addr, curr_len, false);
2561 		reg_offset += curr_len;
2562 		addr += curr_len;
2563 
2564 		if (reg_offset < total_len) {
2565 			curr_len = min_t(u32, skip_len, total_len - skip_len);
2566 			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2567 			offset += curr_len;
2568 			reg_offset += curr_len;
2569 			addr += curr_len;
2570 		}
2571 	}
2572 
2573 	return offset;
2574 }
2575 
2576 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2577 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2578 				     struct qed_ptt *p_ptt,
2579 				     struct dbg_array input_regs_arr,
2580 				     u32 *dump_buf,
2581 				     bool dump,
2582 				     bool block_enable[MAX_BLOCK_ID],
2583 				     u32 *num_dumped_reg_entries)
2584 {
2585 	u32 i, offset = 0, input_offset = 0;
2586 	bool mode_match = true;
2587 
2588 	*num_dumped_reg_entries = 0;
2589 
2590 	while (input_offset < input_regs_arr.size_in_dwords) {
2591 		const struct dbg_dump_cond_hdr *cond_hdr =
2592 		    (const struct dbg_dump_cond_hdr *)
2593 		    &input_regs_arr.ptr[input_offset++];
2594 		u16 modes_buf_offset;
2595 		bool eval_mode;
2596 
2597 		/* Check mode/block */
2598 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2599 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2600 		if (eval_mode) {
2601 			modes_buf_offset =
2602 				GET_FIELD(cond_hdr->mode.data,
2603 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2604 			mode_match = qed_is_mode_match(p_hwfn,
2605 						       &modes_buf_offset);
2606 		}
2607 
2608 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2609 			input_offset += cond_hdr->data_size;
2610 			continue;
2611 		}
2612 
2613 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2614 			const struct dbg_dump_reg *reg =
2615 			    (const struct dbg_dump_reg *)
2616 			    &input_regs_arr.ptr[input_offset];
2617 			u32 addr, len;
2618 			bool wide_bus;
2619 
2620 			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2621 			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2622 			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2623 			offset += qed_grc_dump_reg_entry(p_hwfn,
2624 							 p_ptt,
2625 							 dump_buf + offset,
2626 							 dump,
2627 							 addr,
2628 							 len,
2629 							 wide_bus);
2630 			(*num_dumped_reg_entries)++;
2631 		}
2632 	}
2633 
2634 	return offset;
2635 }
2636 
2637 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2638 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2639 				   struct qed_ptt *p_ptt,
2640 				   struct dbg_array input_regs_arr,
2641 				   u32 *dump_buf,
2642 				   bool dump,
2643 				   bool block_enable[MAX_BLOCK_ID],
2644 				   const char *split_type_name,
2645 				   u32 split_id,
2646 				   const char *param_name,
2647 				   const char *param_val)
2648 {
2649 	u32 num_dumped_reg_entries, offset;
2650 
2651 	/* Calculate register dump header size (and skip it for now) */
2652 	offset = qed_grc_dump_regs_hdr(dump_buf,
2653 				       false,
2654 				       0,
2655 				       split_type_name,
2656 				       split_id, param_name, param_val);
2657 
2658 	/* Dump registers */
2659 	offset += qed_grc_dump_regs_entries(p_hwfn,
2660 					    p_ptt,
2661 					    input_regs_arr,
2662 					    dump_buf + offset,
2663 					    dump,
2664 					    block_enable,
2665 					    &num_dumped_reg_entries);
2666 
2667 	/* Write register dump header */
2668 	if (dump && num_dumped_reg_entries > 0)
2669 		qed_grc_dump_regs_hdr(dump_buf,
2670 				      dump,
2671 				      num_dumped_reg_entries,
2672 				      split_type_name,
2673 				      split_id, param_name, param_val);
2674 
2675 	return num_dumped_reg_entries > 0 ? offset : 0;
2676 }
2677 
2678 /* Dumps registers according to the input registers array. Returns the dumped
2679  * size in dwords.
2680  */
2681 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2682 				  struct qed_ptt *p_ptt,
2683 				  u32 *dump_buf,
2684 				  bool dump,
2685 				  bool block_enable[MAX_BLOCK_ID],
2686 				  const char *param_name, const char *param_val)
2687 {
2688 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2689 	struct chip_platform_defs *chip_platform;
2690 	u32 offset = 0, input_offset = 0;
2691 	struct chip_defs *chip;
2692 	u8 port_id, pf_id, vf_id;
2693 	u16 fid;
2694 
2695 	chip = &s_chip_defs[dev_data->chip_id];
2696 	chip_platform = &chip->per_platform[dev_data->platform_id];
2697 
2698 	while (input_offset <
2699 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2700 		const struct dbg_dump_split_hdr *split_hdr;
2701 		struct dbg_array curr_input_regs_arr;
2702 		u32 split_data_size;
2703 		u8 split_type_id;
2704 
2705 		split_hdr =
2706 			(const struct dbg_dump_split_hdr *)
2707 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2708 		split_type_id =
2709 			GET_FIELD(split_hdr->hdr,
2710 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2711 		split_data_size =
2712 			GET_FIELD(split_hdr->hdr,
2713 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2714 		curr_input_regs_arr.ptr =
2715 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
2716 		curr_input_regs_arr.size_in_dwords = split_data_size;
2717 
2718 		switch (split_type_id) {
2719 		case SPLIT_TYPE_NONE:
2720 			offset += qed_grc_dump_split_data(p_hwfn,
2721 							  p_ptt,
2722 							  curr_input_regs_arr,
2723 							  dump_buf + offset,
2724 							  dump,
2725 							  block_enable,
2726 							  "eng",
2727 							  (u32)(-1),
2728 							  param_name,
2729 							  param_val);
2730 			break;
2731 
2732 		case SPLIT_TYPE_PORT:
2733 			for (port_id = 0; port_id < chip_platform->num_ports;
2734 			     port_id++) {
2735 				if (dump)
2736 					qed_port_pretend(p_hwfn, p_ptt,
2737 							 port_id);
2738 				offset +=
2739 				    qed_grc_dump_split_data(p_hwfn, p_ptt,
2740 							    curr_input_regs_arr,
2741 							    dump_buf + offset,
2742 							    dump, block_enable,
2743 							    "port", port_id,
2744 							    param_name,
2745 							    param_val);
2746 			}
2747 			break;
2748 
2749 		case SPLIT_TYPE_PF:
2750 		case SPLIT_TYPE_PORT_PF:
2751 			for (pf_id = 0; pf_id < chip_platform->num_pfs;
2752 			     pf_id++) {
2753 				u8 pfid_shift =
2754 					PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2755 
2756 				if (dump) {
2757 					fid = pf_id << pfid_shift;
2758 					qed_fid_pretend(p_hwfn, p_ptt, fid);
2759 				}
2760 
2761 				offset +=
2762 				    qed_grc_dump_split_data(p_hwfn,
2763 							    p_ptt,
2764 							    curr_input_regs_arr,
2765 							    dump_buf + offset,
2766 							    dump,
2767 							    block_enable,
2768 							    "pf",
2769 							    pf_id,
2770 							    param_name,
2771 							    param_val);
2772 			}
2773 			break;
2774 
2775 		case SPLIT_TYPE_VF:
2776 			for (vf_id = 0; vf_id < chip_platform->num_vfs;
2777 			     vf_id++) {
2778 				u8 vfvalid_shift =
2779 					PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
2780 				u8 vfid_shift =
2781 					PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
2782 
2783 				if (dump) {
2784 					fid = BIT(vfvalid_shift) |
2785 					      (vf_id << vfid_shift);
2786 					qed_fid_pretend(p_hwfn, p_ptt, fid);
2787 				}
2788 
2789 				offset +=
2790 				    qed_grc_dump_split_data(p_hwfn, p_ptt,
2791 							    curr_input_regs_arr,
2792 							    dump_buf + offset,
2793 							    dump, block_enable,
2794 							    "vf", vf_id,
2795 							    param_name,
2796 							    param_val);
2797 			}
2798 			break;
2799 
2800 		default:
2801 			break;
2802 		}
2803 
2804 		input_offset += split_data_size;
2805 	}
2806 
2807 	/* Pretend to original PF */
2808 	if (dump) {
2809 		fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2810 		qed_fid_pretend(p_hwfn, p_ptt, fid);
2811 	}
2812 
2813 	return offset;
2814 }
2815 
2816 /* Dump reset registers. Returns the dumped size in dwords. */
2817 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2818 				   struct qed_ptt *p_ptt,
2819 				   u32 *dump_buf, bool dump)
2820 {
2821 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2822 	u32 i, offset = 0, num_regs = 0;
2823 
2824 	/* Calculate header size */
2825 	offset += qed_grc_dump_regs_hdr(dump_buf,
2826 					false, 0, "eng", -1, NULL, NULL);
2827 
2828 	/* Write reset registers */
2829 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2830 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2831 			continue;
2832 
2833 		offset += qed_grc_dump_reg_entry(p_hwfn,
2834 						 p_ptt,
2835 						 dump_buf + offset,
2836 						 dump,
2837 						 BYTES_TO_DWORDS
2838 						 (s_reset_regs_defs[i].addr), 1,
2839 						 false);
2840 		num_regs++;
2841 	}
2842 
2843 	/* Write header */
2844 	if (dump)
2845 		qed_grc_dump_regs_hdr(dump_buf,
2846 				      true, num_regs, "eng", -1, NULL, NULL);
2847 
2848 	return offset;
2849 }
2850 
2851 /* Dump registers that are modified during GRC Dump and therefore must be
2852  * dumped first. Returns the dumped size in dwords.
2853  */
2854 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2855 				      struct qed_ptt *p_ptt,
2856 				      u32 *dump_buf, bool dump)
2857 {
2858 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2859 	u32 block_id, offset = 0, num_reg_entries = 0;
2860 	const struct dbg_attn_reg *attn_reg_arr;
2861 	u8 storm_id, reg_idx, num_attn_regs;
2862 
2863 	/* Calculate header size */
2864 	offset += qed_grc_dump_regs_hdr(dump_buf,
2865 					false, 0, "eng", -1, NULL, NULL);
2866 
2867 	/* Write parity registers */
2868 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2869 		if (dev_data->block_in_reset[block_id] && dump)
2870 			continue;
2871 
2872 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2873 						       ATTN_TYPE_PARITY,
2874 						       &num_attn_regs);
2875 
2876 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2877 			const struct dbg_attn_reg *reg_data =
2878 				&attn_reg_arr[reg_idx];
2879 			u16 modes_buf_offset;
2880 			bool eval_mode;
2881 			u32 addr;
2882 
2883 			/* Check mode */
2884 			eval_mode = GET_FIELD(reg_data->mode.data,
2885 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2886 			modes_buf_offset =
2887 				GET_FIELD(reg_data->mode.data,
2888 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2889 			if (eval_mode &&
2890 			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2891 				continue;
2892 
2893 			/* Mode match: read & dump registers */
2894 			addr = reg_data->mask_address;
2895 			offset += qed_grc_dump_reg_entry(p_hwfn,
2896 							 p_ptt,
2897 							 dump_buf + offset,
2898 							 dump,
2899 							 addr,
2900 							 1, false);
2901 			addr = GET_FIELD(reg_data->data,
2902 					 DBG_ATTN_REG_STS_ADDRESS);
2903 			offset += qed_grc_dump_reg_entry(p_hwfn,
2904 							 p_ptt,
2905 							 dump_buf + offset,
2906 							 dump,
2907 							 addr,
2908 							 1, false);
2909 			num_reg_entries += 2;
2910 		}
2911 	}
2912 
2913 	/* Write Storm stall status registers */
2914 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2915 		struct storm_defs *storm = &s_storm_defs[storm_id];
2916 		u32 addr;
2917 
2918 		if (dev_data->block_in_reset[storm->block_id] && dump)
2919 			continue;
2920 
2921 		addr =
2922 		    BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2923 				    SEM_FAST_REG_STALLED);
2924 		offset += qed_grc_dump_reg_entry(p_hwfn,
2925 						 p_ptt,
2926 						 dump_buf + offset,
2927 						 dump,
2928 						 addr,
2929 						 1,
2930 						 false);
2931 		num_reg_entries++;
2932 	}
2933 
2934 	/* Write header */
2935 	if (dump)
2936 		qed_grc_dump_regs_hdr(dump_buf,
2937 				      true,
2938 				      num_reg_entries, "eng", -1, NULL, NULL);
2939 
2940 	return offset;
2941 }
2942 
2943 /* Dumps registers that can't be represented in the debug arrays */
2944 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2945 				     struct qed_ptt *p_ptt,
2946 				     u32 *dump_buf, bool dump)
2947 {
2948 	u32 offset = 0, addr;
2949 
2950 	offset += qed_grc_dump_regs_hdr(dump_buf,
2951 					dump, 2, "eng", -1, NULL, NULL);
2952 
2953 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2954 	 * skipped).
2955 	 */
2956 	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2957 	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2958 					      p_ptt,
2959 					      dump_buf + offset,
2960 					      dump,
2961 					      addr,
2962 					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2963 					      7,
2964 					      1);
2965 	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2966 	offset +=
2967 	    qed_grc_dump_reg_entry_skip(p_hwfn,
2968 					p_ptt,
2969 					dump_buf + offset,
2970 					dump,
2971 					addr,
2972 					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2973 					7,
2974 					1);
2975 
2976 	return offset;
2977 }
2978 
2979 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2980  * dwords. The following parameters are dumped:
2981  * - name:	   dumped only if it's not NULL.
2982  * - addr:	   in dwords, dumped only if name is NULL.
2983  * - len:	   in dwords, always dumped.
2984  * - width:	   dumped if it's not zero.
2985  * - packed:	   dumped only if it's not false.
2986  * - mem_group:	   always dumped.
2987  * - is_storm:	   true only if the memory is related to a Storm.
2988  * - storm_letter: valid only if is_storm is true.
2989  *
2990  */
2991 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2992 				u32 *dump_buf,
2993 				bool dump,
2994 				const char *name,
2995 				u32 addr,
2996 				u32 len,
2997 				u32 bit_width,
2998 				bool packed,
2999 				const char *mem_group,
3000 				bool is_storm, char storm_letter)
3001 {
3002 	u8 num_params = 3;
3003 	u32 offset = 0;
3004 	char buf[64];
3005 
3006 	if (!len)
3007 		DP_NOTICE(p_hwfn,
3008 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3009 
3010 	if (bit_width)
3011 		num_params++;
3012 	if (packed)
3013 		num_params++;
3014 
3015 	/* Dump section header */
3016 	offset += qed_dump_section_hdr(dump_buf + offset,
3017 				       dump, "grc_mem", num_params);
3018 
3019 	if (name) {
3020 		/* Dump name */
3021 		if (is_storm) {
3022 			strcpy(buf, "?STORM_");
3023 			buf[0] = storm_letter;
3024 			strcpy(buf + strlen(buf), name);
3025 		} else {
3026 			strcpy(buf, name);
3027 		}
3028 
3029 		offset += qed_dump_str_param(dump_buf + offset,
3030 					     dump, "name", buf);
3031 	} else {
3032 		/* Dump address */
3033 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3034 
3035 		offset += qed_dump_num_param(dump_buf + offset,
3036 					     dump, "addr", addr_in_bytes);
3037 	}
3038 
3039 	/* Dump len */
3040 	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
3041 
3042 	/* Dump bit width */
3043 	if (bit_width)
3044 		offset += qed_dump_num_param(dump_buf + offset,
3045 					     dump, "width", bit_width);
3046 
3047 	/* Dump packed */
3048 	if (packed)
3049 		offset += qed_dump_num_param(dump_buf + offset,
3050 					     dump, "packed", 1);
3051 
3052 	/* Dump reg type */
3053 	if (is_storm) {
3054 		strcpy(buf, "?STORM_");
3055 		buf[0] = storm_letter;
3056 		strcpy(buf + strlen(buf), mem_group);
3057 	} else {
3058 		strcpy(buf, mem_group);
3059 	}
3060 
3061 	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
3062 
3063 	return offset;
3064 }
3065 
3066 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
3067  * Returns the dumped size in dwords.
3068  * The addr and len arguments are specified in dwords.
3069  */
3070 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
3071 			    struct qed_ptt *p_ptt,
3072 			    u32 *dump_buf,
3073 			    bool dump,
3074 			    const char *name,
3075 			    u32 addr,
3076 			    u32 len,
3077 			    bool wide_bus,
3078 			    u32 bit_width,
3079 			    bool packed,
3080 			    const char *mem_group,
3081 			    bool is_storm, char storm_letter)
3082 {
3083 	u32 offset = 0;
3084 
3085 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3086 				       dump_buf + offset,
3087 				       dump,
3088 				       name,
3089 				       addr,
3090 				       len,
3091 				       bit_width,
3092 				       packed,
3093 				       mem_group, is_storm, storm_letter);
3094 	offset += qed_grc_dump_addr_range(p_hwfn,
3095 					  p_ptt,
3096 					  dump_buf + offset,
3097 					  dump, addr, len, wide_bus);
3098 
3099 	return offset;
3100 }
3101 
3102 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3103 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
3104 				    struct qed_ptt *p_ptt,
3105 				    struct dbg_array input_mems_arr,
3106 				    u32 *dump_buf, bool dump)
3107 {
3108 	u32 i, offset = 0, input_offset = 0;
3109 	bool mode_match = true;
3110 
3111 	while (input_offset < input_mems_arr.size_in_dwords) {
3112 		const struct dbg_dump_cond_hdr *cond_hdr;
3113 		u16 modes_buf_offset;
3114 		u32 num_entries;
3115 		bool eval_mode;
3116 
3117 		cond_hdr = (const struct dbg_dump_cond_hdr *)
3118 			   &input_mems_arr.ptr[input_offset++];
3119 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3120 
3121 		/* Check required mode */
3122 		eval_mode = GET_FIELD(cond_hdr->mode.data,
3123 				      DBG_MODE_HDR_EVAL_MODE) > 0;
3124 		if (eval_mode) {
3125 			modes_buf_offset =
3126 				GET_FIELD(cond_hdr->mode.data,
3127 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
3128 			mode_match = qed_is_mode_match(p_hwfn,
3129 						       &modes_buf_offset);
3130 		}
3131 
3132 		if (!mode_match) {
3133 			input_offset += cond_hdr->data_size;
3134 			continue;
3135 		}
3136 
3137 		for (i = 0; i < num_entries;
3138 		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3139 			const struct dbg_dump_mem *mem =
3140 				(const struct dbg_dump_mem *)
3141 				&input_mems_arr.ptr[input_offset];
3142 			u8 mem_group_id = GET_FIELD(mem->dword0,
3143 						    DBG_DUMP_MEM_MEM_GROUP_ID);
3144 			bool is_storm = false, mem_wide_bus;
3145 			enum dbg_grc_params grc_param;
3146 			char storm_letter = 'a';
3147 			enum block_id block_id;
3148 			u32 mem_addr, mem_len;
3149 
3150 			if (mem_group_id >= MEM_GROUPS_NUM) {
3151 				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
3152 				return 0;
3153 			}
3154 
3155 			block_id = (enum block_id)cond_hdr->block_id;
3156 			if (!qed_grc_is_mem_included(p_hwfn,
3157 						     block_id,
3158 						     mem_group_id))
3159 				continue;
3160 
3161 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3162 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3163 			mem_wide_bus = GET_FIELD(mem->dword1,
3164 						 DBG_DUMP_MEM_WIDE_BUS);
3165 
3166 			/* Update memory length for CCFC/TCFC memories
3167 			 * according to number of LCIDs/LTIDs.
3168 			 */
3169 			if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3170 				if (mem_len % MAX_LCIDS) {
3171 					DP_NOTICE(p_hwfn,
3172 						  "Invalid CCFC connection memory size\n");
3173 					return 0;
3174 				}
3175 
3176 				grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3177 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3178 					  (mem_len / MAX_LCIDS);
3179 			} else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3180 				if (mem_len % MAX_LTIDS) {
3181 					DP_NOTICE(p_hwfn,
3182 						  "Invalid TCFC task memory size\n");
3183 					return 0;
3184 				}
3185 
3186 				grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3187 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3188 					  (mem_len / MAX_LTIDS);
3189 			}
3190 
3191 			/* If memory is associated with Storm, update Storm
3192 			 * details.
3193 			 */
3194 			if (s_block_defs
3195 			    [cond_hdr->block_id]->associated_to_storm) {
3196 				is_storm = true;
3197 				storm_letter =
3198 				    s_storm_defs[s_block_defs
3199 						 [cond_hdr->block_id]->
3200 						 storm_id].letter;
3201 			}
3202 
3203 			/* Dump memory */
3204 			offset += qed_grc_dump_mem(p_hwfn,
3205 						p_ptt,
3206 						dump_buf + offset,
3207 						dump,
3208 						NULL,
3209 						mem_addr,
3210 						mem_len,
3211 						mem_wide_bus,
3212 						0,
3213 						false,
3214 						s_mem_group_names[mem_group_id],
3215 						is_storm,
3216 						storm_letter);
3217 		}
3218 	}
3219 
3220 	return offset;
3221 }
3222 
3223 /* Dumps GRC memories according to the input array dump_mem.
3224  * Returns the dumped size in dwords.
3225  */
3226 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
3227 				 struct qed_ptt *p_ptt,
3228 				 u32 *dump_buf, bool dump)
3229 {
3230 	u32 offset = 0, input_offset = 0;
3231 
3232 	while (input_offset <
3233 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3234 		const struct dbg_dump_split_hdr *split_hdr;
3235 		struct dbg_array curr_input_mems_arr;
3236 		u32 split_data_size;
3237 		u8 split_type_id;
3238 
3239 		split_hdr = (const struct dbg_dump_split_hdr *)
3240 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3241 		split_type_id =
3242 			GET_FIELD(split_hdr->hdr,
3243 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3244 		split_data_size =
3245 			GET_FIELD(split_hdr->hdr,
3246 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3247 		curr_input_mems_arr.ptr =
3248 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3249 		curr_input_mems_arr.size_in_dwords = split_data_size;
3250 
3251 		switch (split_type_id) {
3252 		case SPLIT_TYPE_NONE:
3253 			offset += qed_grc_dump_mem_entries(p_hwfn,
3254 							   p_ptt,
3255 							   curr_input_mems_arr,
3256 							   dump_buf + offset,
3257 							   dump);
3258 			break;
3259 
3260 		default:
3261 			DP_NOTICE(p_hwfn,
3262 				  "Dumping split memories is currently not supported\n");
3263 			break;
3264 		}
3265 
3266 		input_offset += split_data_size;
3267 	}
3268 
3269 	return offset;
3270 }
3271 
3272 /* Dumps GRC context data for the specified Storm.
3273  * Returns the dumped size in dwords.
3274  * The lid_size argument is specified in quad-regs.
3275  */
3276 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
3277 				 struct qed_ptt *p_ptt,
3278 				 u32 *dump_buf,
3279 				 bool dump,
3280 				 const char *name,
3281 				 u32 num_lids,
3282 				 u32 lid_size,
3283 				 u32 rd_reg_addr,
3284 				 u8 storm_id)
3285 {
3286 	struct storm_defs *storm = &s_storm_defs[storm_id];
3287 	u32 i, lid, total_size, offset = 0;
3288 
3289 	if (!lid_size)
3290 		return 0;
3291 
3292 	lid_size *= BYTES_IN_DWORD;
3293 	total_size = num_lids * lid_size;
3294 
3295 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3296 				       dump_buf + offset,
3297 				       dump,
3298 				       name,
3299 				       0,
3300 				       total_size,
3301 				       lid_size * 32,
3302 				       false, name, true, storm->letter);
3303 
3304 	if (!dump)
3305 		return offset + total_size;
3306 
3307 	/* Dump context data */
3308 	for (lid = 0; lid < num_lids; lid++) {
3309 		for (i = 0; i < lid_size; i++, offset++) {
3310 			qed_wr(p_hwfn,
3311 			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3312 			*(dump_buf + offset) = qed_rd(p_hwfn,
3313 						      p_ptt, rd_reg_addr);
3314 		}
3315 	}
3316 
3317 	return offset;
3318 }
3319 
3320 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3321 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
3322 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3323 {
3324 	enum dbg_grc_params grc_param;
3325 	u32 offset = 0;
3326 	u8 storm_id;
3327 
3328 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3329 		struct storm_defs *storm = &s_storm_defs[storm_id];
3330 
3331 		if (!qed_grc_is_storm_included(p_hwfn,
3332 					       (enum dbg_storms)storm_id))
3333 			continue;
3334 
3335 		/* Dump Conn AG context size */
3336 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3337 		offset +=
3338 			qed_grc_dump_ctx_data(p_hwfn,
3339 					      p_ptt,
3340 					      dump_buf + offset,
3341 					      dump,
3342 					      "CONN_AG_CTX",
3343 					      qed_grc_get_param(p_hwfn,
3344 								grc_param),
3345 					      storm->cm_conn_ag_ctx_lid_size,
3346 					      storm->cm_conn_ag_ctx_rd_addr,
3347 					      storm_id);
3348 
3349 		/* Dump Conn ST context size */
3350 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3351 		offset +=
3352 			qed_grc_dump_ctx_data(p_hwfn,
3353 					      p_ptt,
3354 					      dump_buf + offset,
3355 					      dump,
3356 					      "CONN_ST_CTX",
3357 					      qed_grc_get_param(p_hwfn,
3358 								grc_param),
3359 					      storm->cm_conn_st_ctx_lid_size,
3360 					      storm->cm_conn_st_ctx_rd_addr,
3361 					      storm_id);
3362 
3363 		/* Dump Task AG context size */
3364 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3365 		offset +=
3366 			qed_grc_dump_ctx_data(p_hwfn,
3367 					      p_ptt,
3368 					      dump_buf + offset,
3369 					      dump,
3370 					      "TASK_AG_CTX",
3371 					      qed_grc_get_param(p_hwfn,
3372 								grc_param),
3373 					      storm->cm_task_ag_ctx_lid_size,
3374 					      storm->cm_task_ag_ctx_rd_addr,
3375 					      storm_id);
3376 
3377 		/* Dump Task ST context size */
3378 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3379 		offset +=
3380 			qed_grc_dump_ctx_data(p_hwfn,
3381 					      p_ptt,
3382 					      dump_buf + offset,
3383 					      dump,
3384 					      "TASK_ST_CTX",
3385 					      qed_grc_get_param(p_hwfn,
3386 								grc_param),
3387 					      storm->cm_task_st_ctx_lid_size,
3388 					      storm->cm_task_st_ctx_rd_addr,
3389 					      storm_id);
3390 	}
3391 
3392 	return offset;
3393 }
3394 
3395 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3396 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3397 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3398 {
3399 	char buf[10] = "IOR_SET_?";
3400 	u32 addr, offset = 0;
3401 	u8 storm_id, set_id;
3402 
3403 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3404 		struct storm_defs *storm = &s_storm_defs[storm_id];
3405 
3406 		if (!qed_grc_is_storm_included(p_hwfn,
3407 					       (enum dbg_storms)storm_id))
3408 			continue;
3409 
3410 		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3411 			addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
3412 					       SEM_FAST_REG_STORM_REG_FILE) +
3413 			       IOR_SET_OFFSET(set_id);
3414 			buf[strlen(buf) - 1] = '0' + set_id;
3415 			offset += qed_grc_dump_mem(p_hwfn,
3416 						   p_ptt,
3417 						   dump_buf + offset,
3418 						   dump,
3419 						   buf,
3420 						   addr,
3421 						   IORS_PER_SET,
3422 						   false,
3423 						   32,
3424 						   false,
3425 						   "ior",
3426 						   true,
3427 						   storm->letter);
3428 		}
3429 	}
3430 
3431 	return offset;
3432 }
3433 
3434 /* Dump VFC CAM. Returns the dumped size in dwords. */
3435 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3436 				struct qed_ptt *p_ptt,
3437 				u32 *dump_buf, bool dump, u8 storm_id)
3438 {
3439 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3440 	struct storm_defs *storm = &s_storm_defs[storm_id];
3441 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3442 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3443 	u32 row, i, offset = 0;
3444 
3445 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3446 				       dump_buf + offset,
3447 				       dump,
3448 				       "vfc_cam",
3449 				       0,
3450 				       total_size,
3451 				       256,
3452 				       false, "vfc_cam", true, storm->letter);
3453 
3454 	if (!dump)
3455 		return offset + total_size;
3456 
3457 	/* Prepare CAM address */
3458 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3459 
3460 	for (row = 0; row < VFC_CAM_NUM_ROWS;
3461 	     row++, offset += VFC_CAM_RESP_DWORDS) {
3462 		/* Write VFC CAM command */
3463 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3464 		ARR_REG_WR(p_hwfn,
3465 			   p_ptt,
3466 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3467 			   cam_cmd, VFC_CAM_CMD_DWORDS);
3468 
3469 		/* Write VFC CAM address */
3470 		ARR_REG_WR(p_hwfn,
3471 			   p_ptt,
3472 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3473 			   cam_addr, VFC_CAM_ADDR_DWORDS);
3474 
3475 		/* Read VFC CAM read response */
3476 		ARR_REG_RD(p_hwfn,
3477 			   p_ptt,
3478 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3479 			   dump_buf + offset, VFC_CAM_RESP_DWORDS);
3480 	}
3481 
3482 	return offset;
3483 }
3484 
3485 /* Dump VFC RAM. Returns the dumped size in dwords. */
3486 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3487 				struct qed_ptt *p_ptt,
3488 				u32 *dump_buf,
3489 				bool dump,
3490 				u8 storm_id, struct vfc_ram_defs *ram_defs)
3491 {
3492 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3493 	struct storm_defs *storm = &s_storm_defs[storm_id];
3494 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3495 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3496 	u32 row, i, offset = 0;
3497 
3498 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3499 				       dump_buf + offset,
3500 				       dump,
3501 				       ram_defs->mem_name,
3502 				       0,
3503 				       total_size,
3504 				       256,
3505 				       false,
3506 				       ram_defs->type_name,
3507 				       true, storm->letter);
3508 
3509 	/* Prepare RAM address */
3510 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3511 
3512 	if (!dump)
3513 		return offset + total_size;
3514 
3515 	for (row = ram_defs->base_row;
3516 	     row < ram_defs->base_row + ram_defs->num_rows;
3517 	     row++, offset += VFC_RAM_RESP_DWORDS) {
3518 		/* Write VFC RAM command */
3519 		ARR_REG_WR(p_hwfn,
3520 			   p_ptt,
3521 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3522 			   ram_cmd, VFC_RAM_CMD_DWORDS);
3523 
3524 		/* Write VFC RAM address */
3525 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3526 		ARR_REG_WR(p_hwfn,
3527 			   p_ptt,
3528 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3529 			   ram_addr, VFC_RAM_ADDR_DWORDS);
3530 
3531 		/* Read VFC RAM read response */
3532 		ARR_REG_RD(p_hwfn,
3533 			   p_ptt,
3534 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3535 			   dump_buf + offset, VFC_RAM_RESP_DWORDS);
3536 	}
3537 
3538 	return offset;
3539 }
3540 
3541 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3542 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3543 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3544 {
3545 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3546 	u8 storm_id, i;
3547 	u32 offset = 0;
3548 
3549 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3550 		if (!qed_grc_is_storm_included(p_hwfn,
3551 					       (enum dbg_storms)storm_id) ||
3552 		    !s_storm_defs[storm_id].has_vfc ||
3553 		    (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
3554 		     PLATFORM_ASIC))
3555 			continue;
3556 
3557 		/* Read CAM */
3558 		offset += qed_grc_dump_vfc_cam(p_hwfn,
3559 					       p_ptt,
3560 					       dump_buf + offset,
3561 					       dump, storm_id);
3562 
3563 		/* Read RAM */
3564 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3565 			offset += qed_grc_dump_vfc_ram(p_hwfn,
3566 						       p_ptt,
3567 						       dump_buf + offset,
3568 						       dump,
3569 						       storm_id,
3570 						       &s_vfc_ram_defs[i]);
3571 	}
3572 
3573 	return offset;
3574 }
3575 
3576 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3577 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3578 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3579 {
3580 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3581 	u32 offset = 0;
3582 	u8 rss_mem_id;
3583 
3584 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3585 		u32 rss_addr, num_entries, total_dwords;
3586 		struct rss_mem_defs *rss_defs;
3587 		u32 addr, num_dwords_to_read;
3588 		bool packed;
3589 
3590 		rss_defs = &s_rss_mem_defs[rss_mem_id];
3591 		rss_addr = rss_defs->addr;
3592 		num_entries = rss_defs->num_entries[dev_data->chip_id];
3593 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3594 		packed = (rss_defs->entry_width == 16);
3595 
3596 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3597 					       dump_buf + offset,
3598 					       dump,
3599 					       rss_defs->mem_name,
3600 					       0,
3601 					       total_dwords,
3602 					       rss_defs->entry_width,
3603 					       packed,
3604 					       rss_defs->type_name, false, 0);
3605 
3606 		/* Dump RSS data */
3607 		if (!dump) {
3608 			offset += total_dwords;
3609 			continue;
3610 		}
3611 
3612 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3613 		while (total_dwords) {
3614 			num_dwords_to_read = min_t(u32,
3615 						   RSS_REG_RSS_RAM_DATA_SIZE,
3616 						   total_dwords);
3617 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3618 			offset += qed_grc_dump_addr_range(p_hwfn,
3619 							  p_ptt,
3620 							  dump_buf + offset,
3621 							  dump,
3622 							  addr,
3623 							  num_dwords_to_read,
3624 							  false);
3625 			total_dwords -= num_dwords_to_read;
3626 			rss_addr++;
3627 		}
3628 	}
3629 
3630 	return offset;
3631 }
3632 
3633 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3634 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3635 				struct qed_ptt *p_ptt,
3636 				u32 *dump_buf, bool dump, u8 big_ram_id)
3637 {
3638 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3639 	u32 block_size, ram_size, offset = 0, reg_val, i;
3640 	char mem_name[12] = "???_BIG_RAM";
3641 	char type_name[8] = "???_RAM";
3642 	struct big_ram_defs *big_ram;
3643 
3644 	big_ram = &s_big_ram_defs[big_ram_id];
3645 	ram_size = big_ram->ram_size[dev_data->chip_id];
3646 
3647 	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3648 	block_size = reg_val &
3649 		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3650 									 : 128;
3651 
3652 	strscpy(type_name, big_ram->instance_name, sizeof(type_name));
3653 	strscpy(mem_name, big_ram->instance_name, sizeof(mem_name));
3654 
3655 	/* Dump memory header */
3656 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3657 				       dump_buf + offset,
3658 				       dump,
3659 				       mem_name,
3660 				       0,
3661 				       ram_size,
3662 				       block_size * 8,
3663 				       false, type_name, false, 0);
3664 
3665 	/* Read and dump Big RAM data */
3666 	if (!dump)
3667 		return offset + ram_size;
3668 
3669 	/* Dump Big RAM */
3670 	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3671 	     i++) {
3672 		u32 addr, len;
3673 
3674 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3675 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3676 		len = BRB_REG_BIG_RAM_DATA_SIZE;
3677 		offset += qed_grc_dump_addr_range(p_hwfn,
3678 						  p_ptt,
3679 						  dump_buf + offset,
3680 						  dump,
3681 						  addr,
3682 						  len,
3683 						  false);
3684 	}
3685 
3686 	return offset;
3687 }
3688 
3689 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3690 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3691 {
3692 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3693 	u32 offset = 0, addr;
3694 	bool halted = false;
3695 
3696 	/* Halt MCP */
3697 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3698 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3699 		if (!halted)
3700 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3701 	}
3702 
3703 	/* Dump MCP scratchpad */
3704 	offset += qed_grc_dump_mem(p_hwfn,
3705 				   p_ptt,
3706 				   dump_buf + offset,
3707 				   dump,
3708 				   NULL,
3709 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3710 				   MCP_REG_SCRATCH_SIZE_BB_K2,
3711 				   false, 0, false, "MCP", false, 0);
3712 
3713 	/* Dump MCP cpu_reg_file */
3714 	offset += qed_grc_dump_mem(p_hwfn,
3715 				   p_ptt,
3716 				   dump_buf + offset,
3717 				   dump,
3718 				   NULL,
3719 				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3720 				   MCP_REG_CPU_REG_FILE_SIZE,
3721 				   false, 0, false, "MCP", false, 0);
3722 
3723 	/* Dump MCP registers */
3724 	block_enable[BLOCK_MCP] = true;
3725 	offset += qed_grc_dump_registers(p_hwfn,
3726 					 p_ptt,
3727 					 dump_buf + offset,
3728 					 dump, block_enable, "block", "MCP");
3729 
3730 	/* Dump required non-MCP registers */
3731 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3732 					dump, 1, "eng", -1, "block", "MCP");
3733 	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3734 	offset += qed_grc_dump_reg_entry(p_hwfn,
3735 					 p_ptt,
3736 					 dump_buf + offset,
3737 					 dump,
3738 					 addr,
3739 					 1,
3740 					 false);
3741 
3742 	/* Release MCP */
3743 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3744 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3745 
3746 	return offset;
3747 }
3748 
3749 /* Dumps the tbus indirect memory for all PHYs. */
3750 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3751 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3752 {
3753 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3754 	char mem_name[32];
3755 	u8 phy_id;
3756 
3757 	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3758 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3759 		struct phy_defs *phy_defs;
3760 		u8 *bytes_buf;
3761 
3762 		phy_defs = &s_phy_defs[phy_id];
3763 		addr_lo_addr = phy_defs->base_addr +
3764 			       phy_defs->tbus_addr_lo_addr;
3765 		addr_hi_addr = phy_defs->base_addr +
3766 			       phy_defs->tbus_addr_hi_addr;
3767 		data_lo_addr = phy_defs->base_addr +
3768 			       phy_defs->tbus_data_lo_addr;
3769 		data_hi_addr = phy_defs->base_addr +
3770 			       phy_defs->tbus_data_hi_addr;
3771 
3772 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3773 			     phy_defs->phy_name) < 0)
3774 			DP_NOTICE(p_hwfn,
3775 				  "Unexpected debug error: invalid PHY memory name\n");
3776 
3777 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3778 					       dump_buf + offset,
3779 					       dump,
3780 					       mem_name,
3781 					       0,
3782 					       PHY_DUMP_SIZE_DWORDS,
3783 					       16, true, mem_name, false, 0);
3784 
3785 		if (!dump) {
3786 			offset += PHY_DUMP_SIZE_DWORDS;
3787 			continue;
3788 		}
3789 
3790 		bytes_buf = (u8 *)(dump_buf + offset);
3791 		for (tbus_hi_offset = 0;
3792 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3793 		     tbus_hi_offset++) {
3794 			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3795 			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3796 			     tbus_lo_offset++) {
3797 				qed_wr(p_hwfn,
3798 				       p_ptt, addr_lo_addr, tbus_lo_offset);
3799 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3800 							    p_ptt,
3801 							    data_lo_addr);
3802 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3803 							    p_ptt,
3804 							    data_hi_addr);
3805 			}
3806 		}
3807 
3808 		offset += PHY_DUMP_SIZE_DWORDS;
3809 	}
3810 
3811 	return offset;
3812 }
3813 
3814 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3815 				struct qed_ptt *p_ptt,
3816 				enum block_id block_id,
3817 				u8 line_id,
3818 				u8 enable_mask,
3819 				u8 right_shift,
3820 				u8 force_valid_mask, u8 force_frame_mask)
3821 {
3822 	struct block_defs *block = s_block_defs[block_id];
3823 
3824 	qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3825 	qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3826 	qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3827 	qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3828 	qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3829 }
3830 
3831 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3832 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3833 				     struct qed_ptt *p_ptt,
3834 				     u32 *dump_buf, bool dump)
3835 {
3836 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3837 	u32 block_id, line_id, offset = 0;
3838 
3839 	/* Don't dump static debug if a debug bus recording is in progress */
3840 	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3841 		return 0;
3842 
3843 	if (dump) {
3844 		/* Disable all blocks debug output */
3845 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3846 			struct block_defs *block = s_block_defs[block_id];
3847 
3848 			if (block->dbg_client_id[dev_data->chip_id] !=
3849 			    MAX_DBG_BUS_CLIENTS)
3850 				qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
3851 				       0);
3852 		}
3853 
3854 		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3855 		qed_bus_set_framing_mode(p_hwfn,
3856 					 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3857 		qed_wr(p_hwfn,
3858 		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3859 		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3860 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3861 	}
3862 
3863 	/* Dump all static debug lines for each relevant block */
3864 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3865 		struct block_defs *block = s_block_defs[block_id];
3866 		struct dbg_bus_block *block_desc;
3867 		u32 block_dwords, addr, len;
3868 		u8 dbg_client_id;
3869 
3870 		if (block->dbg_client_id[dev_data->chip_id] ==
3871 		    MAX_DBG_BUS_CLIENTS)
3872 			continue;
3873 
3874 		block_desc = get_dbg_bus_block_desc(p_hwfn,
3875 						    (enum block_id)block_id);
3876 		block_dwords = NUM_DBG_LINES(block_desc) *
3877 			       STATIC_DEBUG_LINE_DWORDS;
3878 
3879 		/* Dump static section params */
3880 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3881 					       dump_buf + offset,
3882 					       dump,
3883 					       block->name,
3884 					       0,
3885 					       block_dwords,
3886 					       32, false, "STATIC", false, 0);
3887 
3888 		if (!dump) {
3889 			offset += block_dwords;
3890 			continue;
3891 		}
3892 
3893 		/* If all lines are invalid - dump zeros */
3894 		if (dev_data->block_in_reset[block_id]) {
3895 			memset(dump_buf + offset, 0,
3896 			       DWORDS_TO_BYTES(block_dwords));
3897 			offset += block_dwords;
3898 			continue;
3899 		}
3900 
3901 		/* Enable block's client */
3902 		dbg_client_id = block->dbg_client_id[dev_data->chip_id];
3903 		qed_bus_enable_clients(p_hwfn,
3904 				       p_ptt,
3905 				       BIT(dbg_client_id));
3906 
3907 		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3908 		len = STATIC_DEBUG_LINE_DWORDS;
3909 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
3910 		     line_id++) {
3911 			/* Configure debug line ID */
3912 			qed_config_dbg_line(p_hwfn,
3913 					    p_ptt,
3914 					    (enum block_id)block_id,
3915 					    (u8)line_id, 0xf, 0, 0, 0);
3916 
3917 			/* Read debug line info */
3918 			offset += qed_grc_dump_addr_range(p_hwfn,
3919 							  p_ptt,
3920 							  dump_buf + offset,
3921 							  dump,
3922 							  addr,
3923 							  len,
3924 							  true);
3925 		}
3926 
3927 		/* Disable block's client and debug output */
3928 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3929 		qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3930 	}
3931 
3932 	if (dump) {
3933 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3934 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3935 	}
3936 
3937 	return offset;
3938 }
3939 
3940 /* Performs GRC Dump to the specified buffer.
3941  * Returns the dumped size in dwords.
3942  */
3943 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3944 				    struct qed_ptt *p_ptt,
3945 				    u32 *dump_buf,
3946 				    bool dump, u32 *num_dumped_dwords)
3947 {
3948 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3949 	bool parities_masked = false;
3950 	u8 i, port_mode = 0;
3951 	u32 offset = 0;
3952 
3953 	*num_dumped_dwords = 0;
3954 
3955 	if (dump) {
3956 		/* Find port mode */
3957 		switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
3958 		case 0:
3959 			port_mode = 1;
3960 			break;
3961 		case 1:
3962 			port_mode = 2;
3963 			break;
3964 		case 2:
3965 			port_mode = 4;
3966 			break;
3967 		}
3968 
3969 		/* Update reset state */
3970 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3971 	}
3972 
3973 	/* Dump global params */
3974 	offset += qed_dump_common_global_params(p_hwfn,
3975 						p_ptt,
3976 						dump_buf + offset, dump, 4);
3977 	offset += qed_dump_str_param(dump_buf + offset,
3978 				     dump, "dump-type", "grc-dump");
3979 	offset += qed_dump_num_param(dump_buf + offset,
3980 				     dump,
3981 				     "num-lcids",
3982 				     qed_grc_get_param(p_hwfn,
3983 						DBG_GRC_PARAM_NUM_LCIDS));
3984 	offset += qed_dump_num_param(dump_buf + offset,
3985 				     dump,
3986 				     "num-ltids",
3987 				     qed_grc_get_param(p_hwfn,
3988 						DBG_GRC_PARAM_NUM_LTIDS));
3989 	offset += qed_dump_num_param(dump_buf + offset,
3990 				     dump, "num-ports", port_mode);
3991 
3992 	/* Dump reset registers (dumped before taking blocks out of reset ) */
3993 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3994 		offset += qed_grc_dump_reset_regs(p_hwfn,
3995 						  p_ptt,
3996 						  dump_buf + offset, dump);
3997 
3998 	/* Take all blocks out of reset (using reset registers) */
3999 	if (dump) {
4000 		qed_grc_unreset_blocks(p_hwfn, p_ptt);
4001 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
4002 	}
4003 
4004 	/* Disable all parities using MFW command */
4005 	if (dump &&
4006 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4007 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
4008 		if (!parities_masked) {
4009 			DP_NOTICE(p_hwfn,
4010 				  "Failed to mask parities using MFW\n");
4011 			if (qed_grc_get_param
4012 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4013 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4014 		}
4015 	}
4016 
4017 	/* Dump modified registers (dumped before modifying them) */
4018 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4019 		offset += qed_grc_dump_modified_regs(p_hwfn,
4020 						     p_ptt,
4021 						     dump_buf + offset, dump);
4022 
4023 	/* Stall storms */
4024 	if (dump &&
4025 	    (qed_grc_is_included(p_hwfn,
4026 				 DBG_GRC_PARAM_DUMP_IOR) ||
4027 	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4028 		qed_grc_stall_storms(p_hwfn, p_ptt, true);
4029 
4030 	/* Dump all regs  */
4031 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4032 		bool block_enable[MAX_BLOCK_ID];
4033 
4034 		/* Dump all blocks except MCP */
4035 		for (i = 0; i < MAX_BLOCK_ID; i++)
4036 			block_enable[i] = true;
4037 		block_enable[BLOCK_MCP] = false;
4038 		offset += qed_grc_dump_registers(p_hwfn,
4039 						 p_ptt,
4040 						 dump_buf +
4041 						 offset,
4042 						 dump,
4043 						 block_enable, NULL, NULL);
4044 
4045 		/* Dump special registers */
4046 		offset += qed_grc_dump_special_regs(p_hwfn,
4047 						    p_ptt,
4048 						    dump_buf + offset, dump);
4049 	}
4050 
4051 	/* Dump memories */
4052 	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4053 
4054 	/* Dump MCP */
4055 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4056 		offset += qed_grc_dump_mcp(p_hwfn,
4057 					   p_ptt, dump_buf + offset, dump);
4058 
4059 	/* Dump context */
4060 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4061 		offset += qed_grc_dump_ctx(p_hwfn,
4062 					   p_ptt, dump_buf + offset, dump);
4063 
4064 	/* Dump RSS memories */
4065 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4066 		offset += qed_grc_dump_rss(p_hwfn,
4067 					   p_ptt, dump_buf + offset, dump);
4068 
4069 	/* Dump Big RAM */
4070 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4071 		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4072 			offset += qed_grc_dump_big_ram(p_hwfn,
4073 						       p_ptt,
4074 						       dump_buf + offset,
4075 						       dump, i);
4076 
4077 	/* Dump IORs */
4078 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4079 		offset += qed_grc_dump_iors(p_hwfn,
4080 					    p_ptt, dump_buf + offset, dump);
4081 
4082 	/* Dump VFC */
4083 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4084 		offset += qed_grc_dump_vfc(p_hwfn,
4085 					   p_ptt, dump_buf + offset, dump);
4086 
4087 	/* Dump PHY tbus */
4088 	if (qed_grc_is_included(p_hwfn,
4089 				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
4090 	    CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4091 		offset += qed_grc_dump_phy(p_hwfn,
4092 					   p_ptt, dump_buf + offset, dump);
4093 
4094 	/* Dump static debug data  */
4095 	if (qed_grc_is_included(p_hwfn,
4096 				DBG_GRC_PARAM_DUMP_STATIC) &&
4097 	    dev_data->bus.state == DBG_BUS_STATE_IDLE)
4098 		offset += qed_grc_dump_static_debug(p_hwfn,
4099 						    p_ptt,
4100 						    dump_buf + offset, dump);
4101 
4102 	/* Dump last section */
4103 	offset += qed_dump_last_section(dump_buf, offset, dump);
4104 
4105 	if (dump) {
4106 		/* Unstall storms */
4107 		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4108 			qed_grc_stall_storms(p_hwfn, p_ptt, false);
4109 
4110 		/* Clear parity status */
4111 		qed_grc_clear_all_prty(p_hwfn, p_ptt);
4112 
4113 		/* Enable all parities using MFW command */
4114 		if (parities_masked)
4115 			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
4116 	}
4117 
4118 	*num_dumped_dwords = offset;
4119 
4120 	return DBG_STATUS_OK;
4121 }
4122 
4123 /* Writes the specified failing Idle Check rule to the specified buffer.
4124  * Returns the dumped size in dwords.
4125  */
4126 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
4127 				     struct qed_ptt *p_ptt,
4128 				     u32 *
4129 				     dump_buf,
4130 				     bool dump,
4131 				     u16 rule_id,
4132 				     const struct dbg_idle_chk_rule *rule,
4133 				     u16 fail_entry_id, u32 *cond_reg_values)
4134 {
4135 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4136 	const struct dbg_idle_chk_cond_reg *cond_regs;
4137 	const struct dbg_idle_chk_info_reg *info_regs;
4138 	u32 i, next_reg_offset = 0, offset = 0;
4139 	struct dbg_idle_chk_result_hdr *hdr;
4140 	const union dbg_idle_chk_reg *regs;
4141 	u8 reg_id;
4142 
4143 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4144 	regs = &((const union dbg_idle_chk_reg *)
4145 		 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4146 	cond_regs = &regs[0].cond_reg;
4147 	info_regs = &regs[rule->num_cond_regs].info_reg;
4148 
4149 	/* Dump rule data */
4150 	if (dump) {
4151 		memset(hdr, 0, sizeof(*hdr));
4152 		hdr->rule_id = rule_id;
4153 		hdr->mem_entry_id = fail_entry_id;
4154 		hdr->severity = rule->severity;
4155 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
4156 	}
4157 
4158 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
4159 
4160 	/* Dump condition register values */
4161 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4162 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4163 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4164 
4165 		reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4166 			  (dump_buf + offset);
4167 
4168 		/* Write register header */
4169 		if (!dump) {
4170 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
4171 			    reg->entry_size;
4172 			continue;
4173 		}
4174 
4175 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4176 		memset(reg_hdr, 0, sizeof(*reg_hdr));
4177 		reg_hdr->start_entry = reg->start_entry;
4178 		reg_hdr->size = reg->entry_size;
4179 		SET_FIELD(reg_hdr->data,
4180 			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
4181 			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4182 		SET_FIELD(reg_hdr->data,
4183 			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4184 
4185 		/* Write register values */
4186 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4187 			dump_buf[offset] = cond_reg_values[next_reg_offset];
4188 	}
4189 
4190 	/* Dump info register values */
4191 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4192 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4193 		u32 block_id;
4194 
4195 		/* Check if register's block is in reset */
4196 		if (!dump) {
4197 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4198 			continue;
4199 		}
4200 
4201 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4202 		if (block_id >= MAX_BLOCK_ID) {
4203 			DP_NOTICE(p_hwfn, "Invalid block_id\n");
4204 			return 0;
4205 		}
4206 
4207 		if (!dev_data->block_in_reset[block_id]) {
4208 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4209 			bool wide_bus, eval_mode, mode_match = true;
4210 			u16 modes_buf_offset;
4211 			u32 addr;
4212 
4213 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4214 				  (dump_buf + offset);
4215 
4216 			/* Check mode */
4217 			eval_mode = GET_FIELD(reg->mode.data,
4218 					      DBG_MODE_HDR_EVAL_MODE) > 0;
4219 			if (eval_mode) {
4220 				modes_buf_offset =
4221 				    GET_FIELD(reg->mode.data,
4222 					      DBG_MODE_HDR_MODES_BUF_OFFSET);
4223 				mode_match =
4224 					qed_is_mode_match(p_hwfn,
4225 							  &modes_buf_offset);
4226 			}
4227 
4228 			if (!mode_match)
4229 				continue;
4230 
4231 			addr = GET_FIELD(reg->data,
4232 					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
4233 			wide_bus = GET_FIELD(reg->data,
4234 					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4235 
4236 			/* Write register header */
4237 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4238 			hdr->num_dumped_info_regs++;
4239 			memset(reg_hdr, 0, sizeof(*reg_hdr));
4240 			reg_hdr->size = reg->size;
4241 			SET_FIELD(reg_hdr->data,
4242 				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
4243 				  rule->num_cond_regs + reg_id);
4244 
4245 			/* Write register values */
4246 			offset += qed_grc_dump_addr_range(p_hwfn,
4247 							  p_ptt,
4248 							  dump_buf + offset,
4249 							  dump,
4250 							  addr,
4251 							  reg->size, wide_bus);
4252 		}
4253 	}
4254 
4255 	return offset;
4256 }
4257 
4258 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4259 static u32
4260 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4261 			       u32 *dump_buf, bool dump,
4262 			       const struct dbg_idle_chk_rule *input_rules,
4263 			       u32 num_input_rules, u32 *num_failing_rules)
4264 {
4265 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4266 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4267 	u32 i, offset = 0;
4268 	u16 entry_id;
4269 	u8 reg_id;
4270 
4271 	*num_failing_rules = 0;
4272 
4273 	for (i = 0; i < num_input_rules; i++) {
4274 		const struct dbg_idle_chk_cond_reg *cond_regs;
4275 		const struct dbg_idle_chk_rule *rule;
4276 		const union dbg_idle_chk_reg *regs;
4277 		u16 num_reg_entries = 1;
4278 		bool check_rule = true;
4279 		const u32 *imm_values;
4280 
4281 		rule = &input_rules[i];
4282 		regs = &((const union dbg_idle_chk_reg *)
4283 			 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
4284 			[rule->reg_offset];
4285 		cond_regs = &regs[0].cond_reg;
4286 		imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
4287 			     [rule->imm_offset];
4288 
4289 		/* Check if all condition register blocks are out of reset, and
4290 		 * find maximal number of entries (all condition registers that
4291 		 * are memories must have the same size, which is > 1).
4292 		 */
4293 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
4294 		     reg_id++) {
4295 			u32 block_id =
4296 				GET_FIELD(cond_regs[reg_id].data,
4297 					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4298 
4299 			if (block_id >= MAX_BLOCK_ID) {
4300 				DP_NOTICE(p_hwfn, "Invalid block_id\n");
4301 				return 0;
4302 			}
4303 
4304 			check_rule = !dev_data->block_in_reset[block_id];
4305 			if (cond_regs[reg_id].num_entries > num_reg_entries)
4306 				num_reg_entries = cond_regs[reg_id].num_entries;
4307 		}
4308 
4309 		if (!check_rule && dump)
4310 			continue;
4311 
4312 		if (!dump) {
4313 			u32 entry_dump_size =
4314 				qed_idle_chk_dump_failure(p_hwfn,
4315 							  p_ptt,
4316 							  dump_buf + offset,
4317 							  false,
4318 							  rule->rule_id,
4319 							  rule,
4320 							  0,
4321 							  NULL);
4322 
4323 			offset += num_reg_entries * entry_dump_size;
4324 			(*num_failing_rules) += num_reg_entries;
4325 			continue;
4326 		}
4327 
4328 		/* Go over all register entries (number of entries is the same
4329 		 * for all condition registers).
4330 		 */
4331 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4332 			u32 next_reg_offset = 0;
4333 
4334 			/* Read current entry of all condition registers */
4335 			for (reg_id = 0; reg_id < rule->num_cond_regs;
4336 			     reg_id++) {
4337 				const struct dbg_idle_chk_cond_reg *reg =
4338 					&cond_regs[reg_id];
4339 				u32 padded_entry_size, addr;
4340 				bool wide_bus;
4341 
4342 				/* Find GRC address (if it's a memory, the
4343 				 * address of the specific entry is calculated).
4344 				 */
4345 				addr = GET_FIELD(reg->data,
4346 						 DBG_IDLE_CHK_COND_REG_ADDRESS);
4347 				wide_bus =
4348 				    GET_FIELD(reg->data,
4349 					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4350 				if (reg->num_entries > 1 ||
4351 				    reg->start_entry > 0) {
4352 					padded_entry_size =
4353 					   reg->entry_size > 1 ?
4354 					   roundup_pow_of_two(reg->entry_size) :
4355 					   1;
4356 					addr += (reg->start_entry + entry_id) *
4357 						padded_entry_size;
4358 				}
4359 
4360 				/* Read registers */
4361 				if (next_reg_offset + reg->entry_size >=
4362 				    IDLE_CHK_MAX_ENTRIES_SIZE) {
4363 					DP_NOTICE(p_hwfn,
4364 						  "idle check registers entry is too large\n");
4365 					return 0;
4366 				}
4367 
4368 				next_reg_offset +=
4369 				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4370 							    cond_reg_values +
4371 							    next_reg_offset,
4372 							    dump, addr,
4373 							    reg->entry_size,
4374 							    wide_bus);
4375 			}
4376 
4377 			/* Call rule condition function.
4378 			 * If returns true, it's a failure.
4379 			 */
4380 			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
4381 							imm_values)) {
4382 				offset += qed_idle_chk_dump_failure(p_hwfn,
4383 							p_ptt,
4384 							dump_buf + offset,
4385 							dump,
4386 							rule->rule_id,
4387 							rule,
4388 							entry_id,
4389 							cond_reg_values);
4390 				(*num_failing_rules)++;
4391 			}
4392 		}
4393 	}
4394 
4395 	return offset;
4396 }
4397 
4398 /* Performs Idle Check Dump to the specified buffer.
4399  * Returns the dumped size in dwords.
4400  */
4401 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4402 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4403 {
4404 	u32 num_failing_rules_offset, offset = 0, input_offset = 0;
4405 	u32 num_failing_rules = 0;
4406 
4407 	/* Dump global params */
4408 	offset += qed_dump_common_global_params(p_hwfn,
4409 						p_ptt,
4410 						dump_buf + offset, dump, 1);
4411 	offset += qed_dump_str_param(dump_buf + offset,
4412 				     dump, "dump-type", "idle-chk");
4413 
4414 	/* Dump idle check section header with a single parameter */
4415 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4416 	num_failing_rules_offset = offset;
4417 	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4418 
4419 	while (input_offset <
4420 	       s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4421 		const struct dbg_idle_chk_cond_hdr *cond_hdr =
4422 			(const struct dbg_idle_chk_cond_hdr *)
4423 			&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4424 			[input_offset++];
4425 		bool eval_mode, mode_match = true;
4426 		u32 curr_failing_rules;
4427 		u16 modes_buf_offset;
4428 
4429 		/* Check mode */
4430 		eval_mode = GET_FIELD(cond_hdr->mode.data,
4431 				      DBG_MODE_HDR_EVAL_MODE) > 0;
4432 		if (eval_mode) {
4433 			modes_buf_offset =
4434 				GET_FIELD(cond_hdr->mode.data,
4435 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
4436 			mode_match = qed_is_mode_match(p_hwfn,
4437 						       &modes_buf_offset);
4438 		}
4439 
4440 		if (mode_match) {
4441 			offset +=
4442 			    qed_idle_chk_dump_rule_entries(p_hwfn,
4443 				p_ptt,
4444 				dump_buf + offset,
4445 				dump,
4446 				(const struct dbg_idle_chk_rule *)
4447 				&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4448 				ptr[input_offset],
4449 				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4450 				&curr_failing_rules);
4451 			num_failing_rules += curr_failing_rules;
4452 		}
4453 
4454 		input_offset += cond_hdr->data_size;
4455 	}
4456 
4457 	/* Overwrite num_rules parameter */
4458 	if (dump)
4459 		qed_dump_num_param(dump_buf + num_failing_rules_offset,
4460 				   dump, "num_rules", num_failing_rules);
4461 
4462 	/* Dump last section */
4463 	offset += qed_dump_last_section(dump_buf, offset, dump);
4464 
4465 	return offset;
4466 }
4467 
4468 /* Finds the meta data image in NVRAM */
4469 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4470 					    struct qed_ptt *p_ptt,
4471 					    u32 image_type,
4472 					    u32 *nvram_offset_bytes,
4473 					    u32 *nvram_size_bytes)
4474 {
4475 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4476 	struct mcp_file_att file_att;
4477 	int nvm_result;
4478 
4479 	/* Call NVRAM get file command */
4480 	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4481 					p_ptt,
4482 					DRV_MSG_CODE_NVM_GET_FILE_ATT,
4483 					image_type,
4484 					&ret_mcp_resp,
4485 					&ret_mcp_param,
4486 					&ret_txn_size, (u32 *)&file_att);
4487 
4488 	/* Check response */
4489 	if (nvm_result ||
4490 	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4491 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4492 
4493 	/* Update return values */
4494 	*nvram_offset_bytes = file_att.nvm_start_addr;
4495 	*nvram_size_bytes = file_att.len;
4496 
4497 	DP_VERBOSE(p_hwfn,
4498 		   QED_MSG_DEBUG,
4499 		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4500 		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
4501 
4502 	/* Check alignment */
4503 	if (*nvram_size_bytes & 0x3)
4504 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4505 
4506 	return DBG_STATUS_OK;
4507 }
4508 
4509 /* Reads data from NVRAM */
4510 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4511 				      struct qed_ptt *p_ptt,
4512 				      u32 nvram_offset_bytes,
4513 				      u32 nvram_size_bytes, u32 *ret_buf)
4514 {
4515 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4516 	s32 bytes_left = nvram_size_bytes;
4517 	u32 read_offset = 0;
4518 
4519 	DP_VERBOSE(p_hwfn,
4520 		   QED_MSG_DEBUG,
4521 		   "nvram_read: reading image of size %d bytes from NVRAM\n",
4522 		   nvram_size_bytes);
4523 
4524 	do {
4525 		bytes_to_copy =
4526 		    (bytes_left >
4527 		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4528 
4529 		/* Call NVRAM read command */
4530 		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4531 				       DRV_MSG_CODE_NVM_READ_NVRAM,
4532 				       (nvram_offset_bytes +
4533 					read_offset) |
4534 				       (bytes_to_copy <<
4535 					DRV_MB_PARAM_NVM_LEN_OFFSET),
4536 				       &ret_mcp_resp, &ret_mcp_param,
4537 				       &ret_read_size,
4538 				       (u32 *)((u8 *)ret_buf + read_offset)))
4539 			return DBG_STATUS_NVRAM_READ_FAILED;
4540 
4541 		/* Check response */
4542 		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4543 			return DBG_STATUS_NVRAM_READ_FAILED;
4544 
4545 		/* Update read offset */
4546 		read_offset += ret_read_size;
4547 		bytes_left -= ret_read_size;
4548 	} while (bytes_left > 0);
4549 
4550 	return DBG_STATUS_OK;
4551 }
4552 
4553 /* Get info on the MCP Trace data in the scratchpad:
4554  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4555  * - trace_data_size (OUT): trace data size in bytes (without the header)
4556  */
4557 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4558 						   struct qed_ptt *p_ptt,
4559 						   u32 *trace_data_grc_addr,
4560 						   u32 *trace_data_size)
4561 {
4562 	u32 spad_trace_offsize, signature;
4563 
4564 	/* Read trace section offsize structure from MCP scratchpad */
4565 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4566 
4567 	/* Extract trace section address from offsize (in scratchpad) */
4568 	*trace_data_grc_addr =
4569 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4570 
4571 	/* Read signature from MCP trace section */
4572 	signature = qed_rd(p_hwfn, p_ptt,
4573 			   *trace_data_grc_addr +
4574 			   offsetof(struct mcp_trace, signature));
4575 
4576 	if (signature != MFW_TRACE_SIGNATURE)
4577 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4578 
4579 	/* Read trace size from MCP trace section */
4580 	*trace_data_size = qed_rd(p_hwfn,
4581 				  p_ptt,
4582 				  *trace_data_grc_addr +
4583 				  offsetof(struct mcp_trace, size));
4584 
4585 	return DBG_STATUS_OK;
4586 }
4587 
4588 /* Reads MCP trace meta data image from NVRAM
4589  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4590  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4591  *			      loaded from file).
4592  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4593  */
4594 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4595 						   struct qed_ptt *p_ptt,
4596 						   u32 trace_data_size_bytes,
4597 						   u32 *running_bundle_id,
4598 						   u32 *trace_meta_offset,
4599 						   u32 *trace_meta_size)
4600 {
4601 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4602 
4603 	/* Read MCP trace section offsize structure from MCP scratchpad */
4604 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4605 
4606 	/* Find running bundle ID */
4607 	running_mfw_addr =
4608 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4609 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4610 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4611 	if (*running_bundle_id > 1)
4612 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4613 
4614 	/* Find image in NVRAM */
4615 	nvram_image_type =
4616 	    (*running_bundle_id ==
4617 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4618 	return qed_find_nvram_image(p_hwfn,
4619 				    p_ptt,
4620 				    nvram_image_type,
4621 				    trace_meta_offset, trace_meta_size);
4622 }
4623 
4624 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4625 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4626 					       struct qed_ptt *p_ptt,
4627 					       u32 nvram_offset_in_bytes,
4628 					       u32 size_in_bytes, u32 *buf)
4629 {
4630 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4631 	enum dbg_status status;
4632 	u32 signature;
4633 
4634 	/* Read meta data from NVRAM */
4635 	status = qed_nvram_read(p_hwfn,
4636 				p_ptt,
4637 				nvram_offset_in_bytes, size_in_bytes, buf);
4638 	if (status != DBG_STATUS_OK)
4639 		return status;
4640 
4641 	/* Extract and check first signature */
4642 	signature = qed_read_unaligned_dword(byte_buf);
4643 	byte_buf += sizeof(signature);
4644 	if (signature != NVM_MAGIC_VALUE)
4645 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4646 
4647 	/* Extract number of modules */
4648 	modules_num = *(byte_buf++);
4649 
4650 	/* Skip all modules */
4651 	for (i = 0; i < modules_num; i++) {
4652 		module_len = *(byte_buf++);
4653 		byte_buf += module_len;
4654 	}
4655 
4656 	/* Extract and check second signature */
4657 	signature = qed_read_unaligned_dword(byte_buf);
4658 	byte_buf += sizeof(signature);
4659 	if (signature != NVM_MAGIC_VALUE)
4660 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4661 
4662 	return DBG_STATUS_OK;
4663 }
4664 
4665 /* Dump MCP Trace */
4666 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4667 					  struct qed_ptt *p_ptt,
4668 					  u32 *dump_buf,
4669 					  bool dump, u32 *num_dumped_dwords)
4670 {
4671 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4672 	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4673 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4674 	enum dbg_status status;
4675 	bool mcp_access;
4676 	int halted = 0;
4677 
4678 	*num_dumped_dwords = 0;
4679 
4680 	mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4681 
4682 	/* Get trace data info */
4683 	status = qed_mcp_trace_get_data_info(p_hwfn,
4684 					     p_ptt,
4685 					     &trace_data_grc_addr,
4686 					     &trace_data_size_bytes);
4687 	if (status != DBG_STATUS_OK)
4688 		return status;
4689 
4690 	/* Dump global params */
4691 	offset += qed_dump_common_global_params(p_hwfn,
4692 						p_ptt,
4693 						dump_buf + offset, dump, 1);
4694 	offset += qed_dump_str_param(dump_buf + offset,
4695 				     dump, "dump-type", "mcp-trace");
4696 
4697 	/* Halt MCP while reading from scratchpad so the read data will be
4698 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4699 	 * risk that it may be corrupt.
4700 	 */
4701 	if (dump && mcp_access) {
4702 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4703 		if (!halted)
4704 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4705 	}
4706 
4707 	/* Find trace data size */
4708 	trace_data_size_dwords =
4709 	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4710 			 BYTES_IN_DWORD);
4711 
4712 	/* Dump trace data section header and param */
4713 	offset += qed_dump_section_hdr(dump_buf + offset,
4714 				       dump, "mcp_trace_data", 1);
4715 	offset += qed_dump_num_param(dump_buf + offset,
4716 				     dump, "size", trace_data_size_dwords);
4717 
4718 	/* Read trace data from scratchpad into dump buffer */
4719 	offset += qed_grc_dump_addr_range(p_hwfn,
4720 					  p_ptt,
4721 					  dump_buf + offset,
4722 					  dump,
4723 					  BYTES_TO_DWORDS(trace_data_grc_addr),
4724 					  trace_data_size_dwords, false);
4725 
4726 	/* Resume MCP (only if halt succeeded) */
4727 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4728 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4729 
4730 	/* Dump trace meta section header */
4731 	offset += qed_dump_section_hdr(dump_buf + offset,
4732 				       dump, "mcp_trace_meta", 1);
4733 
4734 	/* Read trace meta info (trace_meta_size_bytes is dword-aligned) */
4735 	if (mcp_access) {
4736 		status = qed_mcp_trace_get_meta_info(p_hwfn,
4737 						     p_ptt,
4738 						     trace_data_size_bytes,
4739 						     &running_bundle_id,
4740 						     &trace_meta_offset_bytes,
4741 						     &trace_meta_size_bytes);
4742 		if (status == DBG_STATUS_OK)
4743 			trace_meta_size_dwords =
4744 				BYTES_TO_DWORDS(trace_meta_size_bytes);
4745 	}
4746 
4747 	/* Dump trace meta size param */
4748 	offset += qed_dump_num_param(dump_buf + offset,
4749 				     dump, "size", trace_meta_size_dwords);
4750 
4751 	/* Read trace meta image into dump buffer */
4752 	if (dump && trace_meta_size_dwords)
4753 		status = qed_mcp_trace_read_meta(p_hwfn,
4754 						 p_ptt,
4755 						 trace_meta_offset_bytes,
4756 						 trace_meta_size_bytes,
4757 						 dump_buf + offset);
4758 	if (status == DBG_STATUS_OK)
4759 		offset += trace_meta_size_dwords;
4760 
4761 	/* Dump last section */
4762 	offset += qed_dump_last_section(dump_buf, offset, dump);
4763 
4764 	*num_dumped_dwords = offset;
4765 
4766 	/* If no mcp access, indicate that the dump doesn't contain the meta
4767 	 * data from NVRAM.
4768 	 */
4769 	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4770 }
4771 
4772 /* Dump GRC FIFO */
4773 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4774 					 struct qed_ptt *p_ptt,
4775 					 u32 *dump_buf,
4776 					 bool dump, u32 *num_dumped_dwords)
4777 {
4778 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4779 	bool fifo_has_data;
4780 
4781 	*num_dumped_dwords = 0;
4782 
4783 	/* Dump global params */
4784 	offset += qed_dump_common_global_params(p_hwfn,
4785 						p_ptt,
4786 						dump_buf + offset, dump, 1);
4787 	offset += qed_dump_str_param(dump_buf + offset,
4788 				     dump, "dump-type", "reg-fifo");
4789 
4790 	/* Dump fifo data section header and param. The size param is 0 for
4791 	 * now, and is overwritten after reading the FIFO.
4792 	 */
4793 	offset += qed_dump_section_hdr(dump_buf + offset,
4794 				       dump, "reg_fifo_data", 1);
4795 	size_param_offset = offset;
4796 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4797 
4798 	if (!dump) {
4799 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4800 		 * test how much data is available, except for reading it.
4801 		 */
4802 		offset += REG_FIFO_DEPTH_DWORDS;
4803 		goto out;
4804 	}
4805 
4806 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4807 			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4808 
4809 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4810 	 * and must be accessed atomically. Test for dwords_read not passing
4811 	 * buffer size since more entries could be added to the buffer as we are
4812 	 * emptying it.
4813 	 */
4814 	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4815 	len = REG_FIFO_ELEMENT_DWORDS;
4816 	for (dwords_read = 0;
4817 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4818 	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4819 		offset += qed_grc_dump_addr_range(p_hwfn,
4820 						  p_ptt,
4821 						  dump_buf + offset,
4822 						  true,
4823 						  addr,
4824 						  len,
4825 						  true);
4826 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4827 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4828 	}
4829 
4830 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4831 			   dwords_read);
4832 out:
4833 	/* Dump last section */
4834 	offset += qed_dump_last_section(dump_buf, offset, dump);
4835 
4836 	*num_dumped_dwords = offset;
4837 
4838 	return DBG_STATUS_OK;
4839 }
4840 
4841 /* Dump IGU FIFO */
4842 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4843 					 struct qed_ptt *p_ptt,
4844 					 u32 *dump_buf,
4845 					 bool dump, u32 *num_dumped_dwords)
4846 {
4847 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4848 	bool fifo_has_data;
4849 
4850 	*num_dumped_dwords = 0;
4851 
4852 	/* Dump global params */
4853 	offset += qed_dump_common_global_params(p_hwfn,
4854 						p_ptt,
4855 						dump_buf + offset, dump, 1);
4856 	offset += qed_dump_str_param(dump_buf + offset,
4857 				     dump, "dump-type", "igu-fifo");
4858 
4859 	/* Dump fifo data section header and param. The size param is 0 for
4860 	 * now, and is overwritten after reading the FIFO.
4861 	 */
4862 	offset += qed_dump_section_hdr(dump_buf + offset,
4863 				       dump, "igu_fifo_data", 1);
4864 	size_param_offset = offset;
4865 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4866 
4867 	if (!dump) {
4868 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4869 		 * test how much data is available, except for reading it.
4870 		 */
4871 		offset += IGU_FIFO_DEPTH_DWORDS;
4872 		goto out;
4873 	}
4874 
4875 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4876 			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4877 
4878 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4879 	 * and must be accessed atomically. Test for dwords_read not passing
4880 	 * buffer size since more entries could be added to the buffer as we are
4881 	 * emptying it.
4882 	 */
4883 	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4884 	len = IGU_FIFO_ELEMENT_DWORDS;
4885 	for (dwords_read = 0;
4886 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4887 	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4888 		offset += qed_grc_dump_addr_range(p_hwfn,
4889 						  p_ptt,
4890 						  dump_buf + offset,
4891 						  true,
4892 						  addr,
4893 						  len,
4894 						  true);
4895 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4896 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4897 	}
4898 
4899 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4900 			   dwords_read);
4901 out:
4902 	/* Dump last section */
4903 	offset += qed_dump_last_section(dump_buf, offset, dump);
4904 
4905 	*num_dumped_dwords = offset;
4906 
4907 	return DBG_STATUS_OK;
4908 }
4909 
4910 /* Protection Override dump */
4911 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4912 						    struct qed_ptt *p_ptt,
4913 						    u32 *dump_buf,
4914 						    bool dump,
4915 						    u32 *num_dumped_dwords)
4916 {
4917 	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4918 
4919 	*num_dumped_dwords = 0;
4920 
4921 	/* Dump global params */
4922 	offset += qed_dump_common_global_params(p_hwfn,
4923 						p_ptt,
4924 						dump_buf + offset, dump, 1);
4925 	offset += qed_dump_str_param(dump_buf + offset,
4926 				     dump, "dump-type", "protection-override");
4927 
4928 	/* Dump data section header and param. The size param is 0 for now,
4929 	 * and is overwritten after reading the data.
4930 	 */
4931 	offset += qed_dump_section_hdr(dump_buf + offset,
4932 				       dump, "protection_override_data", 1);
4933 	size_param_offset = offset;
4934 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4935 
4936 	if (!dump) {
4937 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4938 		goto out;
4939 	}
4940 
4941 	/* Add override window info to buffer */
4942 	override_window_dwords =
4943 		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4944 		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4945 	addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4946 	offset += qed_grc_dump_addr_range(p_hwfn,
4947 					  p_ptt,
4948 					  dump_buf + offset,
4949 					  true,
4950 					  addr,
4951 					  override_window_dwords,
4952 					  true);
4953 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4954 			   override_window_dwords);
4955 out:
4956 	/* Dump last section */
4957 	offset += qed_dump_last_section(dump_buf, offset, dump);
4958 
4959 	*num_dumped_dwords = offset;
4960 
4961 	return DBG_STATUS_OK;
4962 }
4963 
4964 /* Performs FW Asserts Dump to the specified buffer.
4965  * Returns the dumped size in dwords.
4966  */
4967 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4968 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4969 {
4970 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4971 	struct fw_asserts_ram_section *asserts;
4972 	char storm_letter_str[2] = "?";
4973 	struct fw_info fw_info;
4974 	u32 offset = 0;
4975 	u8 storm_id;
4976 
4977 	/* Dump global params */
4978 	offset += qed_dump_common_global_params(p_hwfn,
4979 						p_ptt,
4980 						dump_buf + offset, dump, 1);
4981 	offset += qed_dump_str_param(dump_buf + offset,
4982 				     dump, "dump-type", "fw-asserts");
4983 
4984 	/* Find Storm dump size */
4985 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4986 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4987 		struct storm_defs *storm = &s_storm_defs[storm_id];
4988 		u32 last_list_idx, addr;
4989 
4990 		if (dev_data->block_in_reset[storm->block_id])
4991 			continue;
4992 
4993 		/* Read FW info for the current Storm */
4994 		qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4995 
4996 		asserts = &fw_info.fw_asserts_section;
4997 
4998 		/* Dump FW Asserts section header and params */
4999 		storm_letter_str[0] = storm->letter;
5000 		offset += qed_dump_section_hdr(dump_buf + offset,
5001 					       dump, "fw_asserts", 2);
5002 		offset += qed_dump_str_param(dump_buf + offset,
5003 					     dump, "storm", storm_letter_str);
5004 		offset += qed_dump_num_param(dump_buf + offset,
5005 					     dump,
5006 					     "size",
5007 					     asserts->list_element_dword_size);
5008 
5009 		/* Read and dump FW Asserts data */
5010 		if (!dump) {
5011 			offset += asserts->list_element_dword_size;
5012 			continue;
5013 		}
5014 
5015 		fw_asserts_section_addr = storm->sem_fast_mem_addr +
5016 			SEM_FAST_REG_INT_RAM +
5017 			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
5018 		next_list_idx_addr = fw_asserts_section_addr +
5019 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
5020 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
5021 		last_list_idx = (next_list_idx > 0 ?
5022 				 next_list_idx :
5023 				 asserts->list_num_elements) - 1;
5024 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
5025 		       asserts->list_dword_offset +
5026 		       last_list_idx * asserts->list_element_dword_size;
5027 		offset +=
5028 		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
5029 					    dump_buf + offset,
5030 					    dump, addr,
5031 					    asserts->list_element_dword_size,
5032 					    false);
5033 	}
5034 
5035 	/* Dump last section */
5036 	offset += qed_dump_last_section(dump_buf, offset, dump);
5037 
5038 	return offset;
5039 }
5040 
5041 /***************************** Public Functions *******************************/
5042 
5043 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
5044 {
5045 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
5046 	u8 buf_id;
5047 
5048 	/* convert binary data to debug arrays */
5049 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
5050 		s_dbg_arrays[buf_id].ptr =
5051 		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
5052 		s_dbg_arrays[buf_id].size_in_dwords =
5053 		    BYTES_TO_DWORDS(buf_array[buf_id].length);
5054 	}
5055 
5056 	return DBG_STATUS_OK;
5057 }
5058 
5059 /* Assign default GRC param values */
5060 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5061 {
5062 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5063 	u32 i;
5064 
5065 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5066 		dev_data->grc.param_val[i] =
5067 		    s_grc_param_defs[i].default_val[dev_data->chip_id];
5068 }
5069 
5070 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5071 					      struct qed_ptt *p_ptt,
5072 					      u32 *buf_size)
5073 {
5074 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5075 
5076 	*buf_size = 0;
5077 
5078 	if (status != DBG_STATUS_OK)
5079 		return status;
5080 
5081 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5082 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5083 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5084 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5085 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5086 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5087 
5088 	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5089 }
5090 
5091 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5092 				 struct qed_ptt *p_ptt,
5093 				 u32 *dump_buf,
5094 				 u32 buf_size_in_dwords,
5095 				 u32 *num_dumped_dwords)
5096 {
5097 	u32 needed_buf_size_in_dwords;
5098 	enum dbg_status status;
5099 
5100 	*num_dumped_dwords = 0;
5101 
5102 	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5103 					       p_ptt,
5104 					       &needed_buf_size_in_dwords);
5105 	if (status != DBG_STATUS_OK)
5106 		return status;
5107 
5108 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5109 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5110 
5111 	/* GRC Dump */
5112 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5113 
5114 	/* Revert GRC params to their default */
5115 	qed_dbg_grc_set_params_default(p_hwfn);
5116 
5117 	return status;
5118 }
5119 
5120 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5121 						   struct qed_ptt *p_ptt,
5122 						   u32 *buf_size)
5123 {
5124 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5125 	struct idle_chk_data *idle_chk;
5126 	enum dbg_status status;
5127 
5128 	idle_chk = &dev_data->idle_chk;
5129 	*buf_size = 0;
5130 
5131 	status = qed_dbg_dev_init(p_hwfn, p_ptt);
5132 	if (status != DBG_STATUS_OK)
5133 		return status;
5134 
5135 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5136 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5137 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5138 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5139 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5140 
5141 	if (!idle_chk->buf_size_set) {
5142 		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5143 						       p_ptt, NULL, false);
5144 		idle_chk->buf_size_set = true;
5145 	}
5146 
5147 	*buf_size = idle_chk->buf_size;
5148 
5149 	return DBG_STATUS_OK;
5150 }
5151 
5152 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5153 				      struct qed_ptt *p_ptt,
5154 				      u32 *dump_buf,
5155 				      u32 buf_size_in_dwords,
5156 				      u32 *num_dumped_dwords)
5157 {
5158 	u32 needed_buf_size_in_dwords;
5159 	enum dbg_status status;
5160 
5161 	*num_dumped_dwords = 0;
5162 
5163 	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5164 						    p_ptt,
5165 						    &needed_buf_size_in_dwords);
5166 	if (status != DBG_STATUS_OK)
5167 		return status;
5168 
5169 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5170 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5171 
5172 	/* Update reset state */
5173 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5174 
5175 	/* Idle Check Dump */
5176 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5177 
5178 	/* Revert GRC params to their default */
5179 	qed_dbg_grc_set_params_default(p_hwfn);
5180 
5181 	return DBG_STATUS_OK;
5182 }
5183 
5184 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5185 						    struct qed_ptt *p_ptt,
5186 						    u32 *buf_size)
5187 {
5188 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5189 
5190 	*buf_size = 0;
5191 
5192 	if (status != DBG_STATUS_OK)
5193 		return status;
5194 
5195 	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5196 }
5197 
5198 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5199 				       struct qed_ptt *p_ptt,
5200 				       u32 *dump_buf,
5201 				       u32 buf_size_in_dwords,
5202 				       u32 *num_dumped_dwords)
5203 {
5204 	u32 needed_buf_size_in_dwords;
5205 	enum dbg_status status;
5206 
5207 	status =
5208 		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5209 						    p_ptt,
5210 						    &needed_buf_size_in_dwords);
5211 	if (status != DBG_STATUS_OK && status !=
5212 	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5213 		return status;
5214 
5215 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5216 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5217 
5218 	/* Update reset state */
5219 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5220 
5221 	/* Perform dump */
5222 	status = qed_mcp_trace_dump(p_hwfn,
5223 				    p_ptt, dump_buf, true, num_dumped_dwords);
5224 
5225 	/* Revert GRC params to their default */
5226 	qed_dbg_grc_set_params_default(p_hwfn);
5227 
5228 	return status;
5229 }
5230 
5231 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5232 						   struct qed_ptt *p_ptt,
5233 						   u32 *buf_size)
5234 {
5235 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5236 
5237 	*buf_size = 0;
5238 
5239 	if (status != DBG_STATUS_OK)
5240 		return status;
5241 
5242 	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5243 }
5244 
5245 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5246 				      struct qed_ptt *p_ptt,
5247 				      u32 *dump_buf,
5248 				      u32 buf_size_in_dwords,
5249 				      u32 *num_dumped_dwords)
5250 {
5251 	u32 needed_buf_size_in_dwords;
5252 	enum dbg_status status;
5253 
5254 	*num_dumped_dwords = 0;
5255 
5256 	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5257 						    p_ptt,
5258 						    &needed_buf_size_in_dwords);
5259 	if (status != DBG_STATUS_OK)
5260 		return status;
5261 
5262 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5263 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5264 
5265 	/* Update reset state */
5266 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5267 
5268 	status = qed_reg_fifo_dump(p_hwfn,
5269 				   p_ptt, dump_buf, true, num_dumped_dwords);
5270 
5271 	/* Revert GRC params to their default */
5272 	qed_dbg_grc_set_params_default(p_hwfn);
5273 
5274 	return status;
5275 }
5276 
5277 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5278 						   struct qed_ptt *p_ptt,
5279 						   u32 *buf_size)
5280 {
5281 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5282 
5283 	*buf_size = 0;
5284 
5285 	if (status != DBG_STATUS_OK)
5286 		return status;
5287 
5288 	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5289 }
5290 
5291 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5292 				      struct qed_ptt *p_ptt,
5293 				      u32 *dump_buf,
5294 				      u32 buf_size_in_dwords,
5295 				      u32 *num_dumped_dwords)
5296 {
5297 	u32 needed_buf_size_in_dwords;
5298 	enum dbg_status status;
5299 
5300 	*num_dumped_dwords = 0;
5301 
5302 	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5303 						    p_ptt,
5304 						    &needed_buf_size_in_dwords);
5305 	if (status != DBG_STATUS_OK)
5306 		return status;
5307 
5308 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5309 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5310 
5311 	/* Update reset state */
5312 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5313 
5314 	status = qed_igu_fifo_dump(p_hwfn,
5315 				   p_ptt, dump_buf, true, num_dumped_dwords);
5316 	/* Revert GRC params to their default */
5317 	qed_dbg_grc_set_params_default(p_hwfn);
5318 
5319 	return status;
5320 }
5321 
5322 enum dbg_status
5323 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5324 					      struct qed_ptt *p_ptt,
5325 					      u32 *buf_size)
5326 {
5327 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5328 
5329 	*buf_size = 0;
5330 
5331 	if (status != DBG_STATUS_OK)
5332 		return status;
5333 
5334 	return qed_protection_override_dump(p_hwfn,
5335 					    p_ptt, NULL, false, buf_size);
5336 }
5337 
5338 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5339 						 struct qed_ptt *p_ptt,
5340 						 u32 *dump_buf,
5341 						 u32 buf_size_in_dwords,
5342 						 u32 *num_dumped_dwords)
5343 {
5344 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5345 	enum dbg_status status;
5346 
5347 	*num_dumped_dwords = 0;
5348 
5349 	status =
5350 		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5351 							      p_ptt,
5352 							      p_size);
5353 	if (status != DBG_STATUS_OK)
5354 		return status;
5355 
5356 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5357 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5358 
5359 	/* Update reset state */
5360 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5361 
5362 	status = qed_protection_override_dump(p_hwfn,
5363 					      p_ptt,
5364 					      dump_buf,
5365 					      true, num_dumped_dwords);
5366 
5367 	/* Revert GRC params to their default */
5368 	qed_dbg_grc_set_params_default(p_hwfn);
5369 
5370 	return status;
5371 }
5372 
5373 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5374 						     struct qed_ptt *p_ptt,
5375 						     u32 *buf_size)
5376 {
5377 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5378 
5379 	*buf_size = 0;
5380 
5381 	if (status != DBG_STATUS_OK)
5382 		return status;
5383 
5384 	/* Update reset state */
5385 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5386 
5387 	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5388 
5389 	return DBG_STATUS_OK;
5390 }
5391 
5392 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5393 					struct qed_ptt *p_ptt,
5394 					u32 *dump_buf,
5395 					u32 buf_size_in_dwords,
5396 					u32 *num_dumped_dwords)
5397 {
5398 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5399 	enum dbg_status status;
5400 
5401 	*num_dumped_dwords = 0;
5402 
5403 	status =
5404 		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5405 						     p_ptt,
5406 						     p_size);
5407 	if (status != DBG_STATUS_OK)
5408 		return status;
5409 
5410 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5411 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5412 
5413 	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5414 
5415 	/* Revert GRC params to their default */
5416 	qed_dbg_grc_set_params_default(p_hwfn);
5417 
5418 	return DBG_STATUS_OK;
5419 }
5420 
5421 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5422 				  struct qed_ptt *p_ptt,
5423 				  enum block_id block_id,
5424 				  enum dbg_attn_type attn_type,
5425 				  bool clear_status,
5426 				  struct dbg_attn_block_result *results)
5427 {
5428 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5429 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5430 	const struct dbg_attn_reg *attn_reg_arr;
5431 
5432 	if (status != DBG_STATUS_OK)
5433 		return status;
5434 
5435 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5436 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5437 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5438 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5439 
5440 	attn_reg_arr = qed_get_block_attn_regs(block_id,
5441 					       attn_type, &num_attn_regs);
5442 
5443 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5444 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5445 		struct dbg_attn_reg_result *reg_result;
5446 		u32 sts_addr, sts_val;
5447 		u16 modes_buf_offset;
5448 		bool eval_mode;
5449 
5450 		/* Check mode */
5451 		eval_mode = GET_FIELD(reg_data->mode.data,
5452 				      DBG_MODE_HDR_EVAL_MODE) > 0;
5453 		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5454 					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5455 		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5456 			continue;
5457 
5458 		/* Mode match - read attention status register */
5459 		sts_addr = DWORDS_TO_BYTES(clear_status ?
5460 					   reg_data->sts_clr_address :
5461 					   GET_FIELD(reg_data->data,
5462 						     DBG_ATTN_REG_STS_ADDRESS));
5463 		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5464 		if (!sts_val)
5465 			continue;
5466 
5467 		/* Non-zero attention status - add to results */
5468 		reg_result = &results->reg_results[num_result_regs];
5469 		SET_FIELD(reg_result->data,
5470 			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5471 		SET_FIELD(reg_result->data,
5472 			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5473 			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5474 		reg_result->block_attn_offset = reg_data->block_attn_offset;
5475 		reg_result->sts_val = sts_val;
5476 		reg_result->mask_val = qed_rd(p_hwfn,
5477 					      p_ptt,
5478 					      DWORDS_TO_BYTES
5479 					      (reg_data->mask_address));
5480 		num_result_regs++;
5481 	}
5482 
5483 	results->block_id = (u8)block_id;
5484 	results->names_offset =
5485 	    qed_get_block_attn_data(block_id, attn_type)->names_offset;
5486 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5487 	SET_FIELD(results->data,
5488 		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5489 
5490 	return DBG_STATUS_OK;
5491 }
5492 
5493 /******************************* Data Types **********************************/
5494 
5495 struct block_info {
5496 	const char *name;
5497 	enum block_id id;
5498 };
5499 
5500 struct mcp_trace_format {
5501 	u32 data;
5502 #define MCP_TRACE_FORMAT_MODULE_MASK	0x0000ffff
5503 #define MCP_TRACE_FORMAT_MODULE_SHIFT	0
5504 #define MCP_TRACE_FORMAT_LEVEL_MASK	0x00030000
5505 #define MCP_TRACE_FORMAT_LEVEL_SHIFT	16
5506 #define MCP_TRACE_FORMAT_P1_SIZE_MASK	0x000c0000
5507 #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT	18
5508 #define MCP_TRACE_FORMAT_P2_SIZE_MASK	0x00300000
5509 #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT	20
5510 #define MCP_TRACE_FORMAT_P3_SIZE_MASK	0x00c00000
5511 #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT	22
5512 #define MCP_TRACE_FORMAT_LEN_MASK	0xff000000
5513 #define MCP_TRACE_FORMAT_LEN_SHIFT	24
5514 
5515 	char *format_str;
5516 };
5517 
5518 /* Meta data structure, generated by a perl script during MFW build. therefore,
5519  * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl
5520  * script.
5521  */
5522 struct mcp_trace_meta {
5523 	u32 modules_num;
5524 	char **modules;
5525 	u32 formats_num;
5526 	struct mcp_trace_format *formats;
5527 };
5528 
5529 /* REG fifo element */
5530 struct reg_fifo_element {
5531 	u64 data;
5532 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5533 #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5534 #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5535 #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5536 #define REG_FIFO_ELEMENT_PF_SHIFT		24
5537 #define REG_FIFO_ELEMENT_PF_MASK		0xf
5538 #define REG_FIFO_ELEMENT_VF_SHIFT		28
5539 #define REG_FIFO_ELEMENT_VF_MASK		0xff
5540 #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5541 #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5542 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5543 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5544 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5545 #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5546 #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5547 #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5548 #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5549 #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5550 };
5551 
5552 /* IGU fifo element */
5553 struct igu_fifo_element {
5554 	u32 dword0;
5555 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5556 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5557 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5558 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5559 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5560 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5561 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5562 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5563 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5564 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5565 	u32 dword1;
5566 	u32 dword2;
5567 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5568 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5569 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5570 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5571 	u32 reserved;
5572 };
5573 
5574 struct igu_fifo_wr_data {
5575 	u32 data;
5576 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5577 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5578 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5579 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5580 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5581 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5582 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5583 #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5584 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5585 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5586 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5587 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5588 };
5589 
5590 struct igu_fifo_cleanup_wr_data {
5591 	u32 data;
5592 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5593 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5594 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5595 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5596 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5597 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5598 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5599 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5600 };
5601 
5602 /* Protection override element */
5603 struct protection_override_element {
5604 	u64 data;
5605 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5606 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5607 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5608 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5609 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5610 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5611 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5612 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5613 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5614 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5615 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5616 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5617 };
5618 
5619 enum igu_fifo_sources {
5620 	IGU_SRC_PXP0,
5621 	IGU_SRC_PXP1,
5622 	IGU_SRC_PXP2,
5623 	IGU_SRC_PXP3,
5624 	IGU_SRC_PXP4,
5625 	IGU_SRC_PXP5,
5626 	IGU_SRC_PXP6,
5627 	IGU_SRC_PXP7,
5628 	IGU_SRC_CAU,
5629 	IGU_SRC_ATTN,
5630 	IGU_SRC_GRC
5631 };
5632 
5633 enum igu_fifo_addr_types {
5634 	IGU_ADDR_TYPE_MSIX_MEM,
5635 	IGU_ADDR_TYPE_WRITE_PBA,
5636 	IGU_ADDR_TYPE_WRITE_INT_ACK,
5637 	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5638 	IGU_ADDR_TYPE_READ_INT,
5639 	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5640 	IGU_ADDR_TYPE_RESERVED
5641 };
5642 
5643 struct igu_fifo_addr_data {
5644 	u16 start_addr;
5645 	u16 end_addr;
5646 	char *desc;
5647 	char *vf_desc;
5648 	enum igu_fifo_addr_types type;
5649 };
5650 
5651 /******************************** Constants **********************************/
5652 
5653 #define MAX_MSG_LEN				1024
5654 
5655 #define MCP_TRACE_MAX_MODULE_LEN		8
5656 #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5657 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5658 	(MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
5659 
5660 #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5661 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5662 
5663 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5664 
5665 /***************************** Constant Arrays *******************************/
5666 
5667 struct user_dbg_array {
5668 	const u32 *ptr;
5669 	u32 size_in_dwords;
5670 };
5671 
5672 /* Debug arrays */
5673 static struct user_dbg_array
5674 s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
5675 
5676 /* Block names array */
5677 static struct block_info s_block_info_arr[] = {
5678 	{"grc", BLOCK_GRC},
5679 	{"miscs", BLOCK_MISCS},
5680 	{"misc", BLOCK_MISC},
5681 	{"dbu", BLOCK_DBU},
5682 	{"pglue_b", BLOCK_PGLUE_B},
5683 	{"cnig", BLOCK_CNIG},
5684 	{"cpmu", BLOCK_CPMU},
5685 	{"ncsi", BLOCK_NCSI},
5686 	{"opte", BLOCK_OPTE},
5687 	{"bmb", BLOCK_BMB},
5688 	{"pcie", BLOCK_PCIE},
5689 	{"mcp", BLOCK_MCP},
5690 	{"mcp2", BLOCK_MCP2},
5691 	{"pswhst", BLOCK_PSWHST},
5692 	{"pswhst2", BLOCK_PSWHST2},
5693 	{"pswrd", BLOCK_PSWRD},
5694 	{"pswrd2", BLOCK_PSWRD2},
5695 	{"pswwr", BLOCK_PSWWR},
5696 	{"pswwr2", BLOCK_PSWWR2},
5697 	{"pswrq", BLOCK_PSWRQ},
5698 	{"pswrq2", BLOCK_PSWRQ2},
5699 	{"pglcs", BLOCK_PGLCS},
5700 	{"ptu", BLOCK_PTU},
5701 	{"dmae", BLOCK_DMAE},
5702 	{"tcm", BLOCK_TCM},
5703 	{"mcm", BLOCK_MCM},
5704 	{"ucm", BLOCK_UCM},
5705 	{"xcm", BLOCK_XCM},
5706 	{"ycm", BLOCK_YCM},
5707 	{"pcm", BLOCK_PCM},
5708 	{"qm", BLOCK_QM},
5709 	{"tm", BLOCK_TM},
5710 	{"dorq", BLOCK_DORQ},
5711 	{"brb", BLOCK_BRB},
5712 	{"src", BLOCK_SRC},
5713 	{"prs", BLOCK_PRS},
5714 	{"tsdm", BLOCK_TSDM},
5715 	{"msdm", BLOCK_MSDM},
5716 	{"usdm", BLOCK_USDM},
5717 	{"xsdm", BLOCK_XSDM},
5718 	{"ysdm", BLOCK_YSDM},
5719 	{"psdm", BLOCK_PSDM},
5720 	{"tsem", BLOCK_TSEM},
5721 	{"msem", BLOCK_MSEM},
5722 	{"usem", BLOCK_USEM},
5723 	{"xsem", BLOCK_XSEM},
5724 	{"ysem", BLOCK_YSEM},
5725 	{"psem", BLOCK_PSEM},
5726 	{"rss", BLOCK_RSS},
5727 	{"tmld", BLOCK_TMLD},
5728 	{"muld", BLOCK_MULD},
5729 	{"yuld", BLOCK_YULD},
5730 	{"xyld", BLOCK_XYLD},
5731 	{"ptld", BLOCK_PTLD},
5732 	{"ypld", BLOCK_YPLD},
5733 	{"prm", BLOCK_PRM},
5734 	{"pbf_pb1", BLOCK_PBF_PB1},
5735 	{"pbf_pb2", BLOCK_PBF_PB2},
5736 	{"rpb", BLOCK_RPB},
5737 	{"btb", BLOCK_BTB},
5738 	{"pbf", BLOCK_PBF},
5739 	{"rdif", BLOCK_RDIF},
5740 	{"tdif", BLOCK_TDIF},
5741 	{"cdu", BLOCK_CDU},
5742 	{"ccfc", BLOCK_CCFC},
5743 	{"tcfc", BLOCK_TCFC},
5744 	{"igu", BLOCK_IGU},
5745 	{"cau", BLOCK_CAU},
5746 	{"rgfs", BLOCK_RGFS},
5747 	{"rgsrc", BLOCK_RGSRC},
5748 	{"tgfs", BLOCK_TGFS},
5749 	{"tgsrc", BLOCK_TGSRC},
5750 	{"umac", BLOCK_UMAC},
5751 	{"xmac", BLOCK_XMAC},
5752 	{"dbg", BLOCK_DBG},
5753 	{"nig", BLOCK_NIG},
5754 	{"wol", BLOCK_WOL},
5755 	{"bmbn", BLOCK_BMBN},
5756 	{"ipc", BLOCK_IPC},
5757 	{"nwm", BLOCK_NWM},
5758 	{"nws", BLOCK_NWS},
5759 	{"ms", BLOCK_MS},
5760 	{"phy_pcie", BLOCK_PHY_PCIE},
5761 	{"led", BLOCK_LED},
5762 	{"avs_wrap", BLOCK_AVS_WRAP},
5763 	{"pxpreqbus", BLOCK_PXPREQBUS},
5764 	{"misc_aeu", BLOCK_MISC_AEU},
5765 	{"bar0_map", BLOCK_BAR0_MAP}
5766 };
5767 
5768 /* Status string array */
5769 static const char * const s_status_str[] = {
5770 	/* DBG_STATUS_OK */
5771 	"Operation completed successfully",
5772 
5773 	/* DBG_STATUS_APP_VERSION_NOT_SET */
5774 	"Debug application version wasn't set",
5775 
5776 	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5777 	"Unsupported debug application version",
5778 
5779 	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5780 	"The debug block wasn't reset since the last recording",
5781 
5782 	/* DBG_STATUS_INVALID_ARGS */
5783 	"Invalid arguments",
5784 
5785 	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5786 	"The debug output was already set",
5787 
5788 	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5789 	"Invalid PCI buffer size",
5790 
5791 	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5792 	"PCI buffer allocation failed",
5793 
5794 	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5795 	"A PCI buffer wasn't allocated",
5796 
5797 	/* DBG_STATUS_TOO_MANY_INPUTS */
5798 	"Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
5799 
5800 	/* DBG_STATUS_INPUT_OVERLAP */
5801 	"Overlapping debug bus inputs",
5802 
5803 	/* DBG_STATUS_HW_ONLY_RECORDING */
5804 	"Cannot record Storm data since the entire recording cycle is used by HW",
5805 
5806 	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5807 	"The Storm was already enabled",
5808 
5809 	/* DBG_STATUS_STORM_NOT_ENABLED */
5810 	"The specified Storm wasn't enabled",
5811 
5812 	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5813 	"The block was already enabled",
5814 
5815 	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5816 	"The specified block wasn't enabled",
5817 
5818 	/* DBG_STATUS_NO_INPUT_ENABLED */
5819 	"No input was enabled for recording",
5820 
5821 	/* DBG_STATUS_NO_FILTER_TRIGGER_64B */
5822 	"Filters and triggers are not allowed when recording in 64b units",
5823 
5824 	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5825 	"The filter was already enabled",
5826 
5827 	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5828 	"The trigger was already enabled",
5829 
5830 	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5831 	"The trigger wasn't enabled",
5832 
5833 	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5834 	"A constraint can be added only after a filter was enabled or a trigger state was added",
5835 
5836 	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5837 	"Cannot add more than 3 trigger states",
5838 
5839 	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5840 	"Cannot add more than 4 constraints per filter or trigger state",
5841 
5842 	/* DBG_STATUS_RECORDING_NOT_STARTED */
5843 	"The recording wasn't started",
5844 
5845 	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
5846 	"A trigger was configured, but it didn't trigger",
5847 
5848 	/* DBG_STATUS_NO_DATA_RECORDED */
5849 	"No data was recorded",
5850 
5851 	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5852 	"Dump buffer is too small",
5853 
5854 	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5855 	"Dumped data is not aligned to chunks",
5856 
5857 	/* DBG_STATUS_UNKNOWN_CHIP */
5858 	"Unknown chip",
5859 
5860 	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5861 	"Failed allocating virtual memory",
5862 
5863 	/* DBG_STATUS_BLOCK_IN_RESET */
5864 	"The input block is in reset",
5865 
5866 	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5867 	"Invalid MCP trace signature found in NVRAM",
5868 
5869 	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5870 	"Invalid bundle ID found in NVRAM",
5871 
5872 	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5873 	"Failed getting NVRAM image",
5874 
5875 	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5876 	"NVRAM image is not dword-aligned",
5877 
5878 	/* DBG_STATUS_NVRAM_READ_FAILED */
5879 	"Failed reading from NVRAM",
5880 
5881 	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5882 	"Idle check parsing failed",
5883 
5884 	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
5885 	"MCP Trace data is corrupt",
5886 
5887 	/* DBG_STATUS_MCP_TRACE_NO_META */
5888 	"Dump doesn't contain meta data - it must be provided in image file",
5889 
5890 	/* DBG_STATUS_MCP_COULD_NOT_HALT */
5891 	"Failed to halt MCP",
5892 
5893 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
5894 	"Failed to resume MCP after halt",
5895 
5896 	/* DBG_STATUS_RESERVED2 */
5897 	"Reserved debug status - shouldn't be returned",
5898 
5899 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5900 	"Failed to empty SEMI sync FIFO",
5901 
5902 	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
5903 	"IGU FIFO data is corrupt",
5904 
5905 	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5906 	"MCP failed to mask parities",
5907 
5908 	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5909 	"FW Asserts parsing failed",
5910 
5911 	/* DBG_STATUS_REG_FIFO_BAD_DATA */
5912 	"GRC FIFO data is corrupt",
5913 
5914 	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5915 	"Protection Override data is corrupt",
5916 
5917 	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
5918 	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5919 
5920 	/* DBG_STATUS_FILTER_BUG */
5921 	"Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
5922 
5923 	/* DBG_STATUS_NON_MATCHING_LINES */
5924 	"Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
5925 
5926 	/* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
5927 	"The selected trigger dword offset wasn't enabled in the recorded HW block",
5928 
5929 	/* DBG_STATUS_DBG_BUS_IN_USE */
5930 	"The debug bus is in use"
5931 };
5932 
5933 /* Idle check severity names array */
5934 static const char * const s_idle_chk_severity_str[] = {
5935 	"Error",
5936 	"Error if no traffic",
5937 	"Warning"
5938 };
5939 
5940 /* MCP Trace level names array */
5941 static const char * const s_mcp_trace_level_str[] = {
5942 	"ERROR",
5943 	"TRACE",
5944 	"DEBUG"
5945 };
5946 
5947 /* Access type names array */
5948 static const char * const s_access_strs[] = {
5949 	"read",
5950 	"write"
5951 };
5952 
5953 /* Privilege type names array */
5954 static const char * const s_privilege_strs[] = {
5955 	"VF",
5956 	"PDA",
5957 	"HV",
5958 	"UA"
5959 };
5960 
5961 /* Protection type names array */
5962 static const char * const s_protection_strs[] = {
5963 	"(default)",
5964 	"(default)",
5965 	"(default)",
5966 	"(default)",
5967 	"override VF",
5968 	"override PDA",
5969 	"override HV",
5970 	"override UA"
5971 };
5972 
5973 /* Master type names array */
5974 static const char * const s_master_strs[] = {
5975 	"???",
5976 	"pxp",
5977 	"mcp",
5978 	"msdm",
5979 	"psdm",
5980 	"ysdm",
5981 	"usdm",
5982 	"tsdm",
5983 	"xsdm",
5984 	"dbu",
5985 	"dmae",
5986 	"???",
5987 	"???",
5988 	"???",
5989 	"???",
5990 	"???"
5991 };
5992 
5993 /* REG FIFO error messages array */
5994 static const char * const s_reg_fifo_error_strs[] = {
5995 	"grc timeout",
5996 	"address doesn't belong to any block",
5997 	"reserved address in block or write to read-only address",
5998 	"privilege/protection mismatch",
5999 	"path isolation error"
6000 };
6001 
6002 /* IGU FIFO sources array */
6003 static const char * const s_igu_fifo_source_strs[] = {
6004 	"TSTORM",
6005 	"MSTORM",
6006 	"USTORM",
6007 	"XSTORM",
6008 	"YSTORM",
6009 	"PSTORM",
6010 	"PCIE",
6011 	"NIG_QM_PBF",
6012 	"CAU",
6013 	"ATTN",
6014 	"GRC",
6015 };
6016 
6017 /* IGU FIFO error messages */
6018 static const char * const s_igu_fifo_error_strs[] = {
6019 	"no error",
6020 	"length error",
6021 	"function disabled",
6022 	"VF sent command to attnetion address",
6023 	"host sent prod update command",
6024 	"read of during interrupt register while in MIMD mode",
6025 	"access to PXP BAR reserved address",
6026 	"producer update command to attention index",
6027 	"unknown error",
6028 	"SB index not valid",
6029 	"SB relative index and FID not found",
6030 	"FID not match",
6031 	"command with error flag asserted (PCI error or CAU discard)",
6032 	"VF sent cleanup and RF cleanup is disabled",
6033 	"cleanup command on type bigger than 4"
6034 };
6035 
6036 /* IGU FIFO address data */
6037 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6038 	{0x0, 0x101, "MSI-X Memory", NULL,
6039 	 IGU_ADDR_TYPE_MSIX_MEM},
6040 	{0x102, 0x1ff, "reserved", NULL,
6041 	 IGU_ADDR_TYPE_RESERVED},
6042 	{0x200, 0x200, "Write PBA[0:63]", NULL,
6043 	 IGU_ADDR_TYPE_WRITE_PBA},
6044 	{0x201, 0x201, "Write PBA[64:127]", "reserved",
6045 	 IGU_ADDR_TYPE_WRITE_PBA},
6046 	{0x202, 0x202, "Write PBA[128]", "reserved",
6047 	 IGU_ADDR_TYPE_WRITE_PBA},
6048 	{0x203, 0x3ff, "reserved", NULL,
6049 	 IGU_ADDR_TYPE_RESERVED},
6050 	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6051 	 IGU_ADDR_TYPE_WRITE_INT_ACK},
6052 	{0x5f0, 0x5f0, "Attention bits update", NULL,
6053 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6054 	{0x5f1, 0x5f1, "Attention bits set", NULL,
6055 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6056 	{0x5f2, 0x5f2, "Attention bits clear", NULL,
6057 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6058 	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6059 	 IGU_ADDR_TYPE_READ_INT},
6060 	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6061 	 IGU_ADDR_TYPE_READ_INT},
6062 	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6063 	 IGU_ADDR_TYPE_READ_INT},
6064 	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6065 	 IGU_ADDR_TYPE_READ_INT},
6066 	{0x5f7, 0x5ff, "reserved", NULL,
6067 	 IGU_ADDR_TYPE_RESERVED},
6068 	{0x600, 0x7ff, "Producer update", NULL,
6069 	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6070 };
6071 
6072 /******************************** Variables **********************************/
6073 
6074 /* MCP Trace meta data - used in case the dump doesn't contain the meta data
6075  * (e.g. due to no NVRAM access).
6076  */
6077 static struct user_dbg_array s_mcp_trace_meta = { NULL, 0 };
6078 
6079 /* Temporary buffer, used for print size calculations */
6080 static char s_temp_buf[MAX_MSG_LEN];
6081 
6082 /**************************** Private Functions ******************************/
6083 
6084 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6085 {
6086 	return (a + b) % size;
6087 }
6088 
6089 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6090 {
6091 	return (size + a - b) % size;
6092 }
6093 
6094 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6095  * bytes) and returns them as a dword value. the specified buffer offset is
6096  * updated.
6097  */
6098 static u32 qed_read_from_cyclic_buf(void *buf,
6099 				    u32 *offset,
6100 				    u32 buf_size, u8 num_bytes_to_read)
6101 {
6102 	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6103 	u32 val = 0;
6104 
6105 	val_ptr = (u8 *)&val;
6106 
6107 	for (i = 0; i < num_bytes_to_read; i++) {
6108 		val_ptr[i] = bytes_buf[*offset];
6109 		*offset = qed_cyclic_add(*offset, 1, buf_size);
6110 	}
6111 
6112 	return val;
6113 }
6114 
6115 /* Reads and returns the next byte from the specified buffer.
6116  * The specified buffer offset is updated.
6117  */
6118 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6119 {
6120 	return ((u8 *)buf)[(*offset)++];
6121 }
6122 
6123 /* Reads and returns the next dword from the specified buffer.
6124  * The specified buffer offset is updated.
6125  */
6126 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6127 {
6128 	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6129 
6130 	*offset += 4;
6131 
6132 	return dword_val;
6133 }
6134 
6135 /* Reads the next string from the specified buffer, and copies it to the
6136  * specified pointer. The specified buffer offset is updated.
6137  */
6138 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6139 {
6140 	const char *source_str = &((const char *)buf)[*offset];
6141 
6142 	strncpy(dest, source_str, size);
6143 	dest[size - 1] = '\0';
6144 	*offset += size;
6145 }
6146 
6147 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6148  * If the specified buffer in NULL, a temporary buffer pointer is returned.
6149  */
6150 static char *qed_get_buf_ptr(void *buf, u32 offset)
6151 {
6152 	return buf ? (char *)buf + offset : s_temp_buf;
6153 }
6154 
6155 /* Reads a param from the specified buffer. Returns the number of dwords read.
6156  * If the returned str_param is NULL, the param is numeric and its value is
6157  * returned in num_param.
6158  * Otheriwise, the param is a string and its pointer is returned in str_param.
6159  */
6160 static u32 qed_read_param(u32 *dump_buf,
6161 			  const char **param_name,
6162 			  const char **param_str_val, u32 *param_num_val)
6163 {
6164 	char *char_buf = (char *)dump_buf;
6165 	size_t offset = 0;
6166 
6167 	/* Extract param name */
6168 	*param_name = char_buf;
6169 	offset += strlen(*param_name) + 1;
6170 
6171 	/* Check param type */
6172 	if (*(char_buf + offset++)) {
6173 		/* String param */
6174 		*param_str_val = char_buf + offset;
6175 		*param_num_val = 0;
6176 		offset += strlen(*param_str_val) + 1;
6177 		if (offset & 0x3)
6178 			offset += (4 - (offset & 0x3));
6179 	} else {
6180 		/* Numeric param */
6181 		*param_str_val = NULL;
6182 		if (offset & 0x3)
6183 			offset += (4 - (offset & 0x3));
6184 		*param_num_val = *(u32 *)(char_buf + offset);
6185 		offset += 4;
6186 	}
6187 
6188 	return offset / 4;
6189 }
6190 
6191 /* Reads a section header from the specified buffer.
6192  * Returns the number of dwords read.
6193  */
6194 static u32 qed_read_section_hdr(u32 *dump_buf,
6195 				const char **section_name,
6196 				u32 *num_section_params)
6197 {
6198 	const char *param_str_val;
6199 
6200 	return qed_read_param(dump_buf,
6201 			      section_name, &param_str_val, num_section_params);
6202 }
6203 
6204 /* Reads section params from the specified buffer and prints them to the results
6205  * buffer. Returns the number of dwords read.
6206  */
6207 static u32 qed_print_section_params(u32 *dump_buf,
6208 				    u32 num_section_params,
6209 				    char *results_buf, u32 *num_chars_printed)
6210 {
6211 	u32 i, dump_offset = 0, results_offset = 0;
6212 
6213 	for (i = 0; i < num_section_params; i++) {
6214 		const char *param_name, *param_str_val;
6215 		u32 param_num_val = 0;
6216 
6217 		dump_offset += qed_read_param(dump_buf + dump_offset,
6218 					      &param_name,
6219 					      &param_str_val, &param_num_val);
6220 
6221 		if (param_str_val)
6222 			results_offset +=
6223 				sprintf(qed_get_buf_ptr(results_buf,
6224 							results_offset),
6225 					"%s: %s\n", param_name, param_str_val);
6226 		else if (strcmp(param_name, "fw-timestamp"))
6227 			results_offset +=
6228 				sprintf(qed_get_buf_ptr(results_buf,
6229 							results_offset),
6230 					"%s: %d\n", param_name, param_num_val);
6231 	}
6232 
6233 	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6234 				  "\n");
6235 
6236 	*num_chars_printed = results_offset;
6237 
6238 	return dump_offset;
6239 }
6240 
6241 /* Parses the idle check rules and returns the number of characters printed.
6242  * In case of parsing error, returns 0.
6243  */
6244 static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
6245 					 u32 *dump_buf_end,
6246 					 u32 num_rules,
6247 					 bool print_fw_idle_chk,
6248 					 char *results_buf,
6249 					 u32 *num_errors, u32 *num_warnings)
6250 {
6251 	/* Offset in results_buf in bytes */
6252 	u32 results_offset = 0;
6253 
6254 	u32 rule_idx;
6255 	u16 i, j;
6256 
6257 	*num_errors = 0;
6258 	*num_warnings = 0;
6259 
6260 	/* Go over dumped results */
6261 	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6262 	     rule_idx++) {
6263 		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6264 		struct dbg_idle_chk_result_hdr *hdr;
6265 		const char *parsing_str, *lsi_msg;
6266 		u32 parsing_str_offset;
6267 		bool has_fw_msg;
6268 		u8 curr_reg_id;
6269 
6270 		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6271 		rule_parsing_data =
6272 			(const struct dbg_idle_chk_rule_parsing_data *)
6273 			&s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
6274 			ptr[hdr->rule_id];
6275 		parsing_str_offset =
6276 			GET_FIELD(rule_parsing_data->data,
6277 				  DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6278 		has_fw_msg =
6279 			GET_FIELD(rule_parsing_data->data,
6280 				DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6281 		parsing_str =
6282 			&((const char *)
6283 			s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
6284 			[parsing_str_offset];
6285 		lsi_msg = parsing_str;
6286 		curr_reg_id = 0;
6287 
6288 		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6289 			return 0;
6290 
6291 		/* Skip rule header */
6292 		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6293 
6294 		/* Update errors/warnings count */
6295 		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6296 		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6297 			(*num_errors)++;
6298 		else
6299 			(*num_warnings)++;
6300 
6301 		/* Print rule severity */
6302 		results_offset +=
6303 		    sprintf(qed_get_buf_ptr(results_buf,
6304 					    results_offset), "%s: ",
6305 			    s_idle_chk_severity_str[hdr->severity]);
6306 
6307 		/* Print rule message */
6308 		if (has_fw_msg)
6309 			parsing_str += strlen(parsing_str) + 1;
6310 		results_offset +=
6311 		    sprintf(qed_get_buf_ptr(results_buf,
6312 					    results_offset), "%s.",
6313 			    has_fw_msg &&
6314 			    print_fw_idle_chk ? parsing_str : lsi_msg);
6315 		parsing_str += strlen(parsing_str) + 1;
6316 
6317 		/* Print register values */
6318 		results_offset +=
6319 		    sprintf(qed_get_buf_ptr(results_buf,
6320 					    results_offset), " Registers:");
6321 		for (i = 0;
6322 		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6323 		     i++) {
6324 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6325 			bool is_mem;
6326 			u8 reg_id;
6327 
6328 			reg_hdr =
6329 				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6330 			is_mem = GET_FIELD(reg_hdr->data,
6331 					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6332 			reg_id = GET_FIELD(reg_hdr->data,
6333 					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6334 
6335 			/* Skip reg header */
6336 			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6337 
6338 			/* Skip register names until the required reg_id is
6339 			 * reached.
6340 			 */
6341 			for (; reg_id > curr_reg_id;
6342 			     curr_reg_id++,
6343 			     parsing_str += strlen(parsing_str) + 1);
6344 
6345 			results_offset +=
6346 			    sprintf(qed_get_buf_ptr(results_buf,
6347 						    results_offset), " %s",
6348 				    parsing_str);
6349 			if (i < hdr->num_dumped_cond_regs && is_mem)
6350 				results_offset +=
6351 				    sprintf(qed_get_buf_ptr(results_buf,
6352 							    results_offset),
6353 					    "[%d]", hdr->mem_entry_id +
6354 					    reg_hdr->start_entry);
6355 			results_offset +=
6356 			    sprintf(qed_get_buf_ptr(results_buf,
6357 						    results_offset), "=");
6358 			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6359 				results_offset +=
6360 				    sprintf(qed_get_buf_ptr(results_buf,
6361 							    results_offset),
6362 					    "0x%x", *dump_buf);
6363 				if (j < reg_hdr->size - 1)
6364 					results_offset +=
6365 					    sprintf(qed_get_buf_ptr
6366 						    (results_buf,
6367 						     results_offset), ",");
6368 			}
6369 		}
6370 
6371 		results_offset +=
6372 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6373 	}
6374 
6375 	/* Check if end of dump buffer was exceeded */
6376 	if (dump_buf > dump_buf_end)
6377 		return 0;
6378 
6379 	return results_offset;
6380 }
6381 
6382 /* Parses an idle check dump buffer.
6383  * If result_buf is not NULL, the idle check results are printed to it.
6384  * In any case, the required results buffer size is assigned to
6385  * parsed_results_bytes.
6386  * The parsing status is returned.
6387  */
6388 static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
6389 					       u32 num_dumped_dwords,
6390 					       char *results_buf,
6391 					       u32 *parsed_results_bytes,
6392 					       u32 *num_errors,
6393 					       u32 *num_warnings)
6394 {
6395 	const char *section_name, *param_name, *param_str_val;
6396 	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6397 	u32 num_section_params = 0, num_rules;
6398 
6399 	/* Offset in results_buf in bytes */
6400 	u32 results_offset = 0;
6401 
6402 	*parsed_results_bytes = 0;
6403 	*num_errors = 0;
6404 	*num_warnings = 0;
6405 
6406 	if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6407 	    !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6408 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6409 
6410 	/* Read global_params section */
6411 	dump_buf += qed_read_section_hdr(dump_buf,
6412 					 &section_name, &num_section_params);
6413 	if (strcmp(section_name, "global_params"))
6414 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6415 
6416 	/* Print global params */
6417 	dump_buf += qed_print_section_params(dump_buf,
6418 					     num_section_params,
6419 					     results_buf, &results_offset);
6420 
6421 	/* Read idle_chk section */
6422 	dump_buf += qed_read_section_hdr(dump_buf,
6423 					 &section_name, &num_section_params);
6424 	if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6425 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6426 	dump_buf += qed_read_param(dump_buf,
6427 				   &param_name, &param_str_val, &num_rules);
6428 	if (strcmp(param_name, "num_rules"))
6429 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6430 
6431 	if (num_rules) {
6432 		u32 rules_print_size;
6433 
6434 		/* Print FW output */
6435 		results_offset +=
6436 		    sprintf(qed_get_buf_ptr(results_buf,
6437 					    results_offset),
6438 			    "FW_IDLE_CHECK:\n");
6439 		rules_print_size =
6440 			qed_parse_idle_chk_dump_rules(dump_buf,
6441 						      dump_buf_end,
6442 						      num_rules,
6443 						      true,
6444 						      results_buf ?
6445 						      results_buf +
6446 						      results_offset :
6447 						      NULL,
6448 						      num_errors,
6449 						      num_warnings);
6450 		results_offset += rules_print_size;
6451 		if (!rules_print_size)
6452 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6453 
6454 		/* Print LSI output */
6455 		results_offset +=
6456 		    sprintf(qed_get_buf_ptr(results_buf,
6457 					    results_offset),
6458 			    "\nLSI_IDLE_CHECK:\n");
6459 		rules_print_size =
6460 			qed_parse_idle_chk_dump_rules(dump_buf,
6461 						      dump_buf_end,
6462 						      num_rules,
6463 						      false,
6464 						      results_buf ?
6465 						      results_buf +
6466 						      results_offset :
6467 						      NULL,
6468 						      num_errors,
6469 						      num_warnings);
6470 		results_offset += rules_print_size;
6471 		if (!rules_print_size)
6472 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6473 	}
6474 
6475 	/* Print errors/warnings count */
6476 	if (*num_errors)
6477 		results_offset +=
6478 		    sprintf(qed_get_buf_ptr(results_buf,
6479 					    results_offset),
6480 			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6481 			    *num_errors, *num_warnings);
6482 	else if (*num_warnings)
6483 		results_offset +=
6484 		    sprintf(qed_get_buf_ptr(results_buf,
6485 					    results_offset),
6486 			    "\nIdle Check completed successfully (with %d warnings)\n",
6487 			    *num_warnings);
6488 	else
6489 		results_offset +=
6490 		    sprintf(qed_get_buf_ptr(results_buf,
6491 					    results_offset),
6492 			    "\nIdle Check completed successfully\n");
6493 
6494 	/* Add 1 for string NULL termination */
6495 	*parsed_results_bytes = results_offset + 1;
6496 
6497 	return DBG_STATUS_OK;
6498 }
6499 
6500 /* Frees the specified MCP Trace meta data */
6501 static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
6502 				    struct mcp_trace_meta *meta)
6503 {
6504 	u32 i;
6505 
6506 	/* Release modules */
6507 	if (meta->modules) {
6508 		for (i = 0; i < meta->modules_num; i++)
6509 			kfree(meta->modules[i]);
6510 		kfree(meta->modules);
6511 	}
6512 
6513 	/* Release formats */
6514 	if (meta->formats) {
6515 		for (i = 0; i < meta->formats_num; i++)
6516 			kfree(meta->formats[i].format_str);
6517 		kfree(meta->formats);
6518 	}
6519 }
6520 
6521 /* Allocates and fills MCP Trace meta data based on the specified meta data
6522  * dump buffer.
6523  * Returns debug status code.
6524  */
6525 static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
6526 						const u32 *meta_buf,
6527 						struct mcp_trace_meta *meta)
6528 {
6529 	u8 *meta_buf_bytes = (u8 *)meta_buf;
6530 	u32 offset = 0, signature, i;
6531 
6532 	memset(meta, 0, sizeof(*meta));
6533 
6534 	/* Read first signature */
6535 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6536 	if (signature != NVM_MAGIC_VALUE)
6537 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6538 
6539 	/* Read no. of modules and allocate memory for their pointers */
6540 	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6541 	meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
6542 	if (!meta->modules)
6543 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6544 
6545 	/* Allocate and read all module strings */
6546 	for (i = 0; i < meta->modules_num; i++) {
6547 		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6548 
6549 		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6550 		if (!(*(meta->modules + i))) {
6551 			/* Update number of modules to be released */
6552 			meta->modules_num = i ? i - 1 : 0;
6553 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6554 		}
6555 
6556 		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6557 				      *(meta->modules + i));
6558 		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6559 			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6560 	}
6561 
6562 	/* Read second signature */
6563 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6564 	if (signature != NVM_MAGIC_VALUE)
6565 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6566 
6567 	/* Read number of formats and allocate memory for all formats */
6568 	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6569 	meta->formats = kzalloc(meta->formats_num *
6570 				sizeof(struct mcp_trace_format),
6571 				GFP_KERNEL);
6572 	if (!meta->formats)
6573 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6574 
6575 	/* Allocate and read all strings */
6576 	for (i = 0; i < meta->formats_num; i++) {
6577 		struct mcp_trace_format *format_ptr = &meta->formats[i];
6578 		u8 format_len;
6579 
6580 		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6581 							   &offset);
6582 		format_len =
6583 		    (format_ptr->data &
6584 		     MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
6585 		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6586 		if (!format_ptr->format_str) {
6587 			/* Update number of modules to be released */
6588 			meta->formats_num = i ? i - 1 : 0;
6589 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6590 		}
6591 
6592 		qed_read_str_from_buf(meta_buf_bytes,
6593 				      &offset,
6594 				      format_len, format_ptr->format_str);
6595 	}
6596 
6597 	return DBG_STATUS_OK;
6598 }
6599 
6600 /* Parses an MCP Trace dump buffer.
6601  * If result_buf is not NULL, the MCP Trace results are printed to it.
6602  * In any case, the required results buffer size is assigned to
6603  * parsed_results_bytes.
6604  * The parsing status is returned.
6605  */
6606 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6607 						u32 *dump_buf,
6608 						char *results_buf,
6609 						u32 *parsed_results_bytes)
6610 {
6611 	u32 end_offset, bytes_left, trace_data_dwords, trace_meta_dwords;
6612 	u32 param_mask, param_shift, param_num_val, num_section_params;
6613 	const char *section_name, *param_name, *param_str_val;
6614 	u32 offset, results_offset = 0;
6615 	struct mcp_trace_meta meta;
6616 	struct mcp_trace *trace;
6617 	enum dbg_status status;
6618 	const u32 *meta_buf;
6619 	u8 *trace_buf;
6620 
6621 	*parsed_results_bytes = 0;
6622 
6623 	/* Read global_params section */
6624 	dump_buf += qed_read_section_hdr(dump_buf,
6625 					 &section_name, &num_section_params);
6626 	if (strcmp(section_name, "global_params"))
6627 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6628 
6629 	/* Print global params */
6630 	dump_buf += qed_print_section_params(dump_buf,
6631 					     num_section_params,
6632 					     results_buf, &results_offset);
6633 
6634 	/* Read trace_data section */
6635 	dump_buf += qed_read_section_hdr(dump_buf,
6636 					 &section_name, &num_section_params);
6637 	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6638 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6639 	dump_buf += qed_read_param(dump_buf,
6640 				   &param_name, &param_str_val, &param_num_val);
6641 	if (strcmp(param_name, "size"))
6642 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6643 	trace_data_dwords = param_num_val;
6644 
6645 	/* Prepare trace info */
6646 	trace = (struct mcp_trace *)dump_buf;
6647 	trace_buf = (u8 *)dump_buf + sizeof(*trace);
6648 	offset = trace->trace_oldest;
6649 	end_offset = trace->trace_prod;
6650 	bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
6651 	dump_buf += trace_data_dwords;
6652 
6653 	/* Read meta_data section */
6654 	dump_buf += qed_read_section_hdr(dump_buf,
6655 					 &section_name, &num_section_params);
6656 	if (strcmp(section_name, "mcp_trace_meta"))
6657 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6658 	dump_buf += qed_read_param(dump_buf,
6659 				   &param_name, &param_str_val, &param_num_val);
6660 	if (strcmp(param_name, "size"))
6661 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6662 	trace_meta_dwords = param_num_val;
6663 
6664 	/* Choose meta data buffer */
6665 	if (!trace_meta_dwords) {
6666 		/* Dump doesn't include meta data */
6667 		if (!s_mcp_trace_meta.ptr)
6668 			return DBG_STATUS_MCP_TRACE_NO_META;
6669 		meta_buf = s_mcp_trace_meta.ptr;
6670 	} else {
6671 		/* Dump includes meta data */
6672 		meta_buf = dump_buf;
6673 	}
6674 
6675 	/* Allocate meta data memory */
6676 	status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta);
6677 	if (status != DBG_STATUS_OK)
6678 		goto free_mem;
6679 
6680 	/* Ignore the level and modules masks - just print everything that is
6681 	 * already in the buffer.
6682 	 */
6683 	while (bytes_left) {
6684 		struct mcp_trace_format *format_ptr;
6685 		u8 format_level, format_module;
6686 		u32 params[3] = { 0, 0, 0 };
6687 		u32 header, format_idx, i;
6688 
6689 		if (bytes_left < MFW_TRACE_ENTRY_SIZE) {
6690 			status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6691 			goto free_mem;
6692 		}
6693 
6694 		header = qed_read_from_cyclic_buf(trace_buf,
6695 						  &offset,
6696 						  trace->size,
6697 						  MFW_TRACE_ENTRY_SIZE);
6698 		bytes_left -= MFW_TRACE_ENTRY_SIZE;
6699 		format_idx = header & MFW_TRACE_EVENTID_MASK;
6700 
6701 		/* Skip message if its  index doesn't exist in the meta data */
6702 		if (format_idx > meta.formats_num) {
6703 			u8 format_size =
6704 			    (u8)((header &
6705 				  MFW_TRACE_PRM_SIZE_MASK) >>
6706 				 MFW_TRACE_PRM_SIZE_SHIFT);
6707 
6708 			if (bytes_left < format_size) {
6709 				status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6710 				goto free_mem;
6711 			}
6712 
6713 			offset = qed_cyclic_add(offset,
6714 						format_size, trace->size);
6715 			bytes_left -= format_size;
6716 			continue;
6717 		}
6718 
6719 		format_ptr = &meta.formats[format_idx];
6720 
6721 		for (i = 0,
6722 		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6723 		     MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
6724 		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6725 		     i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6726 		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6727 			/* Extract param size (0..3) */
6728 			u8 param_size =
6729 			    (u8)((format_ptr->data &
6730 				  param_mask) >> param_shift);
6731 
6732 			/* If the param size is zero, there are no other
6733 			 * parameters.
6734 			 */
6735 			if (!param_size)
6736 				break;
6737 
6738 			/* Size is encoded using 2 bits, where 3 is used to
6739 			 * encode 4.
6740 			 */
6741 			if (param_size == 3)
6742 				param_size = 4;
6743 
6744 			if (bytes_left < param_size) {
6745 				status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6746 				goto free_mem;
6747 			}
6748 
6749 			params[i] = qed_read_from_cyclic_buf(trace_buf,
6750 							     &offset,
6751 							     trace->size,
6752 							     param_size);
6753 
6754 			bytes_left -= param_size;
6755 		}
6756 
6757 		format_level =
6758 		    (u8)((format_ptr->data &
6759 			  MCP_TRACE_FORMAT_LEVEL_MASK) >>
6760 			 MCP_TRACE_FORMAT_LEVEL_SHIFT);
6761 		format_module =
6762 		    (u8)((format_ptr->data &
6763 			  MCP_TRACE_FORMAT_MODULE_MASK) >>
6764 			 MCP_TRACE_FORMAT_MODULE_SHIFT);
6765 		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) {
6766 			status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6767 			goto free_mem;
6768 		}
6769 
6770 		/* Print current message to results buffer */
6771 		results_offset +=
6772 		    sprintf(qed_get_buf_ptr(results_buf,
6773 					    results_offset), "%s %-8s: ",
6774 			    s_mcp_trace_level_str[format_level],
6775 			    meta.modules[format_module]);
6776 		results_offset +=
6777 		    sprintf(qed_get_buf_ptr(results_buf,
6778 					    results_offset),
6779 			    format_ptr->format_str, params[0], params[1],
6780 			    params[2]);
6781 	}
6782 
6783 free_mem:
6784 	*parsed_results_bytes = results_offset + 1;
6785 	qed_mcp_trace_free_meta(p_hwfn, &meta);
6786 	return status;
6787 }
6788 
6789 /* Parses a Reg FIFO dump buffer.
6790  * If result_buf is not NULL, the Reg FIFO results are printed to it.
6791  * In any case, the required results buffer size is assigned to
6792  * parsed_results_bytes.
6793  * The parsing status is returned.
6794  */
6795 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6796 					       char *results_buf,
6797 					       u32 *parsed_results_bytes)
6798 {
6799 	const char *section_name, *param_name, *param_str_val;
6800 	u32 param_num_val, num_section_params, num_elements;
6801 	struct reg_fifo_element *elements;
6802 	u8 i, j, err_val, vf_val;
6803 	u32 results_offset = 0;
6804 	char vf_str[4];
6805 
6806 	/* Read global_params section */
6807 	dump_buf += qed_read_section_hdr(dump_buf,
6808 					 &section_name, &num_section_params);
6809 	if (strcmp(section_name, "global_params"))
6810 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6811 
6812 	/* Print global params */
6813 	dump_buf += qed_print_section_params(dump_buf,
6814 					     num_section_params,
6815 					     results_buf, &results_offset);
6816 
6817 	/* Read reg_fifo_data section */
6818 	dump_buf += qed_read_section_hdr(dump_buf,
6819 					 &section_name, &num_section_params);
6820 	if (strcmp(section_name, "reg_fifo_data"))
6821 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6822 	dump_buf += qed_read_param(dump_buf,
6823 				   &param_name, &param_str_val, &param_num_val);
6824 	if (strcmp(param_name, "size"))
6825 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6826 	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6827 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6828 	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6829 	elements = (struct reg_fifo_element *)dump_buf;
6830 
6831 	/* Decode elements */
6832 	for (i = 0; i < num_elements; i++) {
6833 		bool err_printed = false;
6834 
6835 		/* Discover if element belongs to a VF or a PF */
6836 		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6837 		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6838 			sprintf(vf_str, "%s", "N/A");
6839 		else
6840 			sprintf(vf_str, "%d", vf_val);
6841 
6842 		/* Add parsed element to parsed buffer */
6843 		results_offset +=
6844 		    sprintf(qed_get_buf_ptr(results_buf,
6845 					    results_offset),
6846 			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
6847 			    elements[i].data,
6848 			    (u32)GET_FIELD(elements[i].data,
6849 					   REG_FIFO_ELEMENT_ADDRESS) *
6850 			    REG_FIFO_ELEMENT_ADDR_FACTOR,
6851 			    s_access_strs[GET_FIELD(elements[i].data,
6852 						    REG_FIFO_ELEMENT_ACCESS)],
6853 			    (u32)GET_FIELD(elements[i].data,
6854 					   REG_FIFO_ELEMENT_PF),
6855 			    vf_str,
6856 			    (u32)GET_FIELD(elements[i].data,
6857 					   REG_FIFO_ELEMENT_PORT),
6858 			    s_privilege_strs[GET_FIELD(elements[i].data,
6859 						REG_FIFO_ELEMENT_PRIVILEGE)],
6860 			    s_protection_strs[GET_FIELD(elements[i].data,
6861 						REG_FIFO_ELEMENT_PROTECTION)],
6862 			    s_master_strs[GET_FIELD(elements[i].data,
6863 						REG_FIFO_ELEMENT_MASTER)]);
6864 
6865 		/* Print errors */
6866 		for (j = 0,
6867 		     err_val = GET_FIELD(elements[i].data,
6868 					 REG_FIFO_ELEMENT_ERROR);
6869 		     j < ARRAY_SIZE(s_reg_fifo_error_strs);
6870 		     j++, err_val >>= 1) {
6871 			if (err_val & 0x1) {
6872 				if (err_printed)
6873 					results_offset +=
6874 					    sprintf(qed_get_buf_ptr
6875 						    (results_buf,
6876 						     results_offset), ", ");
6877 				results_offset +=
6878 				    sprintf(qed_get_buf_ptr
6879 					    (results_buf, results_offset), "%s",
6880 					    s_reg_fifo_error_strs[j]);
6881 				err_printed = true;
6882 			}
6883 		}
6884 
6885 		results_offset +=
6886 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6887 	}
6888 
6889 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6890 						  results_offset),
6891 				  "fifo contained %d elements", num_elements);
6892 
6893 	/* Add 1 for string NULL termination */
6894 	*parsed_results_bytes = results_offset + 1;
6895 
6896 	return DBG_STATUS_OK;
6897 }
6898 
6899 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6900 						  *element, char
6901 						  *results_buf,
6902 						  u32 *results_offset)
6903 {
6904 	const struct igu_fifo_addr_data *found_addr = NULL;
6905 	u8 source, err_type, i, is_cleanup;
6906 	char parsed_addr_data[32];
6907 	char parsed_wr_data[256];
6908 	u32 wr_data, prod_cons;
6909 	bool is_wr_cmd, is_pf;
6910 	u16 cmd_addr;
6911 	u64 dword12;
6912 
6913 	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
6914 	 * FIFO element.
6915 	 */
6916 	dword12 = ((u64)element->dword2 << 32) | element->dword1;
6917 	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6918 	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6919 	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6920 	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6921 	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6922 
6923 	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6924 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6925 	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6926 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6927 
6928 	/* Find address data */
6929 	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
6930 		const struct igu_fifo_addr_data *curr_addr =
6931 			&s_igu_fifo_addr_data[i];
6932 
6933 		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
6934 		    curr_addr->end_addr)
6935 			found_addr = curr_addr;
6936 	}
6937 
6938 	if (!found_addr)
6939 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6940 
6941 	/* Prepare parsed address data */
6942 	switch (found_addr->type) {
6943 	case IGU_ADDR_TYPE_MSIX_MEM:
6944 		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
6945 		break;
6946 	case IGU_ADDR_TYPE_WRITE_INT_ACK:
6947 	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6948 		sprintf(parsed_addr_data,
6949 			" SB = 0x%x", cmd_addr - found_addr->start_addr);
6950 		break;
6951 	default:
6952 		parsed_addr_data[0] = '\0';
6953 	}
6954 
6955 	if (!is_wr_cmd) {
6956 		parsed_wr_data[0] = '\0';
6957 		goto out;
6958 	}
6959 
6960 	/* Prepare parsed write data */
6961 	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
6962 	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
6963 	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
6964 
6965 	if (source == IGU_SRC_ATTN) {
6966 		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
6967 	} else {
6968 		if (is_cleanup) {
6969 			u8 cleanup_val, cleanup_type;
6970 
6971 			cleanup_val =
6972 				GET_FIELD(wr_data,
6973 					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
6974 			cleanup_type =
6975 			    GET_FIELD(wr_data,
6976 				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
6977 
6978 			sprintf(parsed_wr_data,
6979 				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
6980 				cleanup_val ? "set" : "clear",
6981 				cleanup_type);
6982 		} else {
6983 			u8 update_flag, en_dis_int_for_sb, segment;
6984 			u8 timer_mask;
6985 
6986 			update_flag = GET_FIELD(wr_data,
6987 						IGU_FIFO_WR_DATA_UPDATE_FLAG);
6988 			en_dis_int_for_sb =
6989 				GET_FIELD(wr_data,
6990 					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
6991 			segment = GET_FIELD(wr_data,
6992 					    IGU_FIFO_WR_DATA_SEGMENT);
6993 			timer_mask = GET_FIELD(wr_data,
6994 					       IGU_FIFO_WR_DATA_TIMER_MASK);
6995 
6996 			sprintf(parsed_wr_data,
6997 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
6998 				prod_cons,
6999 				update_flag ? "update" : "nop",
7000 				en_dis_int_for_sb ?
7001 				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
7002 				"enable",
7003 				segment ? "attn" : "regular",
7004 				timer_mask);
7005 		}
7006 	}
7007 out:
7008 	/* Add parsed element to parsed buffer */
7009 	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
7010 						   *results_offset),
7011 				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7012 				   element->dword2, element->dword1,
7013 				   element->dword0,
7014 				   is_pf ? "pf" : "vf",
7015 				   GET_FIELD(element->dword0,
7016 					     IGU_FIFO_ELEMENT_DWORD0_FID),
7017 				   s_igu_fifo_source_strs[source],
7018 				   is_wr_cmd ? "wr" : "rd",
7019 				   cmd_addr,
7020 				   (!is_pf && found_addr->vf_desc)
7021 				   ? found_addr->vf_desc
7022 				   : found_addr->desc,
7023 				   parsed_addr_data,
7024 				   parsed_wr_data,
7025 				   s_igu_fifo_error_strs[err_type]);
7026 
7027 	return DBG_STATUS_OK;
7028 }
7029 
7030 /* Parses an IGU FIFO dump buffer.
7031  * If result_buf is not NULL, the IGU FIFO results are printed to it.
7032  * In any case, the required results buffer size is assigned to
7033  * parsed_results_bytes.
7034  * The parsing status is returned.
7035  */
7036 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7037 					       char *results_buf,
7038 					       u32 *parsed_results_bytes)
7039 {
7040 	const char *section_name, *param_name, *param_str_val;
7041 	u32 param_num_val, num_section_params, num_elements;
7042 	struct igu_fifo_element *elements;
7043 	enum dbg_status status;
7044 	u32 results_offset = 0;
7045 	u8 i;
7046 
7047 	/* Read global_params section */
7048 	dump_buf += qed_read_section_hdr(dump_buf,
7049 					 &section_name, &num_section_params);
7050 	if (strcmp(section_name, "global_params"))
7051 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7052 
7053 	/* Print global params */
7054 	dump_buf += qed_print_section_params(dump_buf,
7055 					     num_section_params,
7056 					     results_buf, &results_offset);
7057 
7058 	/* Read igu_fifo_data section */
7059 	dump_buf += qed_read_section_hdr(dump_buf,
7060 					 &section_name, &num_section_params);
7061 	if (strcmp(section_name, "igu_fifo_data"))
7062 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7063 	dump_buf += qed_read_param(dump_buf,
7064 				   &param_name, &param_str_val, &param_num_val);
7065 	if (strcmp(param_name, "size"))
7066 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7067 	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7068 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7069 	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7070 	elements = (struct igu_fifo_element *)dump_buf;
7071 
7072 	/* Decode elements */
7073 	for (i = 0; i < num_elements; i++) {
7074 		status = qed_parse_igu_fifo_element(&elements[i],
7075 						    results_buf,
7076 						    &results_offset);
7077 		if (status != DBG_STATUS_OK)
7078 			return status;
7079 	}
7080 
7081 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7082 						  results_offset),
7083 				  "fifo contained %d elements", num_elements);
7084 
7085 	/* Add 1 for string NULL termination */
7086 	*parsed_results_bytes = results_offset + 1;
7087 
7088 	return DBG_STATUS_OK;
7089 }
7090 
7091 static enum dbg_status
7092 qed_parse_protection_override_dump(u32 *dump_buf,
7093 				   char *results_buf,
7094 				   u32 *parsed_results_bytes)
7095 {
7096 	const char *section_name, *param_name, *param_str_val;
7097 	u32 param_num_val, num_section_params, num_elements;
7098 	struct protection_override_element *elements;
7099 	u32 results_offset = 0;
7100 	u8 i;
7101 
7102 	/* Read global_params section */
7103 	dump_buf += qed_read_section_hdr(dump_buf,
7104 					 &section_name, &num_section_params);
7105 	if (strcmp(section_name, "global_params"))
7106 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7107 
7108 	/* Print global params */
7109 	dump_buf += qed_print_section_params(dump_buf,
7110 					     num_section_params,
7111 					     results_buf, &results_offset);
7112 
7113 	/* Read protection_override_data section */
7114 	dump_buf += qed_read_section_hdr(dump_buf,
7115 					 &section_name, &num_section_params);
7116 	if (strcmp(section_name, "protection_override_data"))
7117 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7118 	dump_buf += qed_read_param(dump_buf,
7119 				   &param_name, &param_str_val, &param_num_val);
7120 	if (strcmp(param_name, "size"))
7121 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7122 	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7123 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7124 	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7125 	elements = (struct protection_override_element *)dump_buf;
7126 
7127 	/* Decode elements */
7128 	for (i = 0; i < num_elements; i++) {
7129 		u32 address = GET_FIELD(elements[i].data,
7130 					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7131 			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7132 
7133 		results_offset +=
7134 		    sprintf(qed_get_buf_ptr(results_buf,
7135 					    results_offset),
7136 			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7137 			    i, address,
7138 			    (u32)GET_FIELD(elements[i].data,
7139 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7140 			    (u32)GET_FIELD(elements[i].data,
7141 				      PROTECTION_OVERRIDE_ELEMENT_READ),
7142 			    (u32)GET_FIELD(elements[i].data,
7143 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
7144 			    s_protection_strs[GET_FIELD(elements[i].data,
7145 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7146 			    s_protection_strs[GET_FIELD(elements[i].data,
7147 				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7148 	}
7149 
7150 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7151 						  results_offset),
7152 				  "protection override contained %d elements",
7153 				  num_elements);
7154 
7155 	/* Add 1 for string NULL termination */
7156 	*parsed_results_bytes = results_offset + 1;
7157 
7158 	return DBG_STATUS_OK;
7159 }
7160 
7161 /* Parses a FW Asserts dump buffer.
7162  * If result_buf is not NULL, the FW Asserts results are printed to it.
7163  * In any case, the required results buffer size is assigned to
7164  * parsed_results_bytes.
7165  * The parsing status is returned.
7166  */
7167 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7168 						 char *results_buf,
7169 						 u32 *parsed_results_bytes)
7170 {
7171 	u32 num_section_params, param_num_val, i, results_offset = 0;
7172 	const char *param_name, *param_str_val, *section_name;
7173 	bool last_section_found = false;
7174 
7175 	*parsed_results_bytes = 0;
7176 
7177 	/* Read global_params section */
7178 	dump_buf += qed_read_section_hdr(dump_buf,
7179 					 &section_name, &num_section_params);
7180 	if (strcmp(section_name, "global_params"))
7181 		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7182 
7183 	/* Print global params */
7184 	dump_buf += qed_print_section_params(dump_buf,
7185 					     num_section_params,
7186 					     results_buf, &results_offset);
7187 
7188 	while (!last_section_found) {
7189 		dump_buf += qed_read_section_hdr(dump_buf,
7190 						 &section_name,
7191 						 &num_section_params);
7192 		if (!strcmp(section_name, "fw_asserts")) {
7193 			/* Extract params */
7194 			const char *storm_letter = NULL;
7195 			u32 storm_dump_size = 0;
7196 
7197 			for (i = 0; i < num_section_params; i++) {
7198 				dump_buf += qed_read_param(dump_buf,
7199 							   &param_name,
7200 							   &param_str_val,
7201 							   &param_num_val);
7202 				if (!strcmp(param_name, "storm"))
7203 					storm_letter = param_str_val;
7204 				else if (!strcmp(param_name, "size"))
7205 					storm_dump_size = param_num_val;
7206 				else
7207 					return
7208 					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7209 			}
7210 
7211 			if (!storm_letter || !storm_dump_size)
7212 				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7213 
7214 			/* Print data */
7215 			results_offset +=
7216 			    sprintf(qed_get_buf_ptr(results_buf,
7217 						    results_offset),
7218 				    "\n%sSTORM_ASSERT: size=%d\n",
7219 				    storm_letter, storm_dump_size);
7220 			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7221 				results_offset +=
7222 				    sprintf(qed_get_buf_ptr(results_buf,
7223 							    results_offset),
7224 					    "%08x\n", *dump_buf);
7225 		} else if (!strcmp(section_name, "last")) {
7226 			last_section_found = true;
7227 		} else {
7228 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7229 		}
7230 	}
7231 
7232 	/* Add 1 for string NULL termination */
7233 	*parsed_results_bytes = results_offset + 1;
7234 
7235 	return DBG_STATUS_OK;
7236 }
7237 
7238 /***************************** Public Functions *******************************/
7239 
7240 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
7241 {
7242 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
7243 	u8 buf_id;
7244 
7245 	/* Convert binary data to debug arrays */
7246 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
7247 		s_user_dbg_arrays[buf_id].ptr =
7248 			(u32 *)(bin_ptr + buf_array[buf_id].offset);
7249 		s_user_dbg_arrays[buf_id].size_in_dwords =
7250 			BYTES_TO_DWORDS(buf_array[buf_id].length);
7251 	}
7252 
7253 	return DBG_STATUS_OK;
7254 }
7255 
7256 const char *qed_dbg_get_status_str(enum dbg_status status)
7257 {
7258 	return (status <
7259 		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7260 }
7261 
7262 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7263 						  u32 *dump_buf,
7264 						  u32 num_dumped_dwords,
7265 						  u32 *results_buf_size)
7266 {
7267 	u32 num_errors, num_warnings;
7268 
7269 	return qed_parse_idle_chk_dump(dump_buf,
7270 				       num_dumped_dwords,
7271 				       NULL,
7272 				       results_buf_size,
7273 				       &num_errors, &num_warnings);
7274 }
7275 
7276 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7277 					   u32 *dump_buf,
7278 					   u32 num_dumped_dwords,
7279 					   char *results_buf,
7280 					   u32 *num_errors,
7281 					   u32 *num_warnings)
7282 {
7283 	u32 parsed_buf_size;
7284 
7285 	return qed_parse_idle_chk_dump(dump_buf,
7286 				       num_dumped_dwords,
7287 				       results_buf,
7288 				       &parsed_buf_size,
7289 				       num_errors, num_warnings);
7290 }
7291 
7292 void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size)
7293 {
7294 	s_mcp_trace_meta.ptr = data;
7295 	s_mcp_trace_meta.size_in_dwords = size;
7296 }
7297 
7298 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7299 						   u32 *dump_buf,
7300 						   u32 num_dumped_dwords,
7301 						   u32 *results_buf_size)
7302 {
7303 	return qed_parse_mcp_trace_dump(p_hwfn,
7304 					dump_buf, NULL, results_buf_size);
7305 }
7306 
7307 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7308 					    u32 *dump_buf,
7309 					    u32 num_dumped_dwords,
7310 					    char *results_buf)
7311 {
7312 	u32 parsed_buf_size;
7313 
7314 	return qed_parse_mcp_trace_dump(p_hwfn,
7315 					dump_buf,
7316 					results_buf, &parsed_buf_size);
7317 }
7318 
7319 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7320 						  u32 *dump_buf,
7321 						  u32 num_dumped_dwords,
7322 						  u32 *results_buf_size)
7323 {
7324 	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7325 }
7326 
7327 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7328 					   u32 *dump_buf,
7329 					   u32 num_dumped_dwords,
7330 					   char *results_buf)
7331 {
7332 	u32 parsed_buf_size;
7333 
7334 	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7335 }
7336 
7337 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7338 						  u32 *dump_buf,
7339 						  u32 num_dumped_dwords,
7340 						  u32 *results_buf_size)
7341 {
7342 	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7343 }
7344 
7345 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7346 					   u32 *dump_buf,
7347 					   u32 num_dumped_dwords,
7348 					   char *results_buf)
7349 {
7350 	u32 parsed_buf_size;
7351 
7352 	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7353 }
7354 
7355 enum dbg_status
7356 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7357 					     u32 *dump_buf,
7358 					     u32 num_dumped_dwords,
7359 					     u32 *results_buf_size)
7360 {
7361 	return qed_parse_protection_override_dump(dump_buf,
7362 						  NULL, results_buf_size);
7363 }
7364 
7365 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7366 						      u32 *dump_buf,
7367 						      u32 num_dumped_dwords,
7368 						      char *results_buf)
7369 {
7370 	u32 parsed_buf_size;
7371 
7372 	return qed_parse_protection_override_dump(dump_buf,
7373 						  results_buf,
7374 						  &parsed_buf_size);
7375 }
7376 
7377 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7378 						    u32 *dump_buf,
7379 						    u32 num_dumped_dwords,
7380 						    u32 *results_buf_size)
7381 {
7382 	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7383 }
7384 
7385 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7386 					     u32 *dump_buf,
7387 					     u32 num_dumped_dwords,
7388 					     char *results_buf)
7389 {
7390 	u32 parsed_buf_size;
7391 
7392 	return qed_parse_fw_asserts_dump(dump_buf,
7393 					 results_buf, &parsed_buf_size);
7394 }
7395 
7396 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7397 				   struct dbg_attn_block_result *results)
7398 {
7399 	struct user_dbg_array *block_attn, *pstrings;
7400 	const u32 *block_attn_name_offsets;
7401 	enum dbg_attn_type attn_type;
7402 	const char *block_name;
7403 	u8 num_regs, i, j;
7404 
7405 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7406 	attn_type = (enum dbg_attn_type)
7407 		    GET_FIELD(results->data,
7408 			      DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7409 	block_name = s_block_info_arr[results->block_id].name;
7410 
7411 	if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7412 	    !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7413 	    !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7414 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7415 
7416 	block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
7417 	block_attn_name_offsets = &block_attn->ptr[results->names_offset];
7418 
7419 	/* Go over registers with a non-zero attention status */
7420 	for (i = 0; i < num_regs; i++) {
7421 		struct dbg_attn_bit_mapping *bit_mapping;
7422 		struct dbg_attn_reg_result *reg_result;
7423 		u8 num_reg_attn, bit_idx = 0;
7424 
7425 		reg_result = &results->reg_results[i];
7426 		num_reg_attn = GET_FIELD(reg_result->data,
7427 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7428 		block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
7429 		bit_mapping = &((struct dbg_attn_bit_mapping *)
7430 				block_attn->ptr)[reg_result->block_attn_offset];
7431 
7432 		pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
7433 
7434 		/* Go over attention status bits */
7435 		for (j = 0; j < num_reg_attn; j++) {
7436 			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7437 						     DBG_ATTN_BIT_MAPPING_VAL);
7438 			const char *attn_name, *attn_type_str, *masked_str;
7439 			u32 attn_name_offset, sts_addr;
7440 
7441 			/* Check if bit mask should be advanced (due to unused
7442 			 * bits).
7443 			 */
7444 			if (GET_FIELD(bit_mapping[j].data,
7445 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7446 				bit_idx += (u8)attn_idx_val;
7447 				continue;
7448 			}
7449 
7450 			/* Check current bit index */
7451 			if (!(reg_result->sts_val & BIT(bit_idx))) {
7452 				bit_idx++;
7453 				continue;
7454 			}
7455 
7456 			/* Find attention name */
7457 			attn_name_offset =
7458 				block_attn_name_offsets[attn_idx_val];
7459 			attn_name = &((const char *)
7460 				      pstrings->ptr)[attn_name_offset];
7461 			attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
7462 					"Interrupt" : "Parity";
7463 			masked_str = reg_result->mask_val & BIT(bit_idx) ?
7464 				     " [masked]" : "";
7465 			sts_addr = GET_FIELD(reg_result->data,
7466 					     DBG_ATTN_REG_RESULT_STS_ADDRESS);
7467 			DP_NOTICE(p_hwfn,
7468 				  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7469 				  block_name, attn_type_str, attn_name,
7470 				  sts_addr, bit_idx, masked_str);
7471 
7472 			bit_idx++;
7473 		}
7474 	}
7475 
7476 	return DBG_STATUS_OK;
7477 }
7478 
7479 /* Wrapper for unifying the idle_chk and mcp_trace api */
7480 static enum dbg_status
7481 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7482 				   u32 *dump_buf,
7483 				   u32 num_dumped_dwords,
7484 				   char *results_buf)
7485 {
7486 	u32 num_errors, num_warnnings;
7487 
7488 	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7489 					  results_buf, &num_errors,
7490 					  &num_warnnings);
7491 }
7492 
7493 /* Feature meta data lookup table */
7494 static struct {
7495 	char *name;
7496 	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7497 				    struct qed_ptt *p_ptt, u32 *size);
7498 	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7499 					struct qed_ptt *p_ptt, u32 *dump_buf,
7500 					u32 buf_size, u32 *dumped_dwords);
7501 	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7502 					 u32 *dump_buf, u32 num_dumped_dwords,
7503 					 char *results_buf);
7504 	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7505 					    u32 *dump_buf,
7506 					    u32 num_dumped_dwords,
7507 					    u32 *results_buf_size);
7508 } qed_features_lookup[] = {
7509 	{
7510 	"grc", qed_dbg_grc_get_dump_buf_size,
7511 		    qed_dbg_grc_dump, NULL, NULL}, {
7512 	"idle_chk",
7513 		    qed_dbg_idle_chk_get_dump_buf_size,
7514 		    qed_dbg_idle_chk_dump,
7515 		    qed_print_idle_chk_results_wrapper,
7516 		    qed_get_idle_chk_results_buf_size}, {
7517 	"mcp_trace",
7518 		    qed_dbg_mcp_trace_get_dump_buf_size,
7519 		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7520 		    qed_get_mcp_trace_results_buf_size}, {
7521 	"reg_fifo",
7522 		    qed_dbg_reg_fifo_get_dump_buf_size,
7523 		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7524 		    qed_get_reg_fifo_results_buf_size}, {
7525 	"igu_fifo",
7526 		    qed_dbg_igu_fifo_get_dump_buf_size,
7527 		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7528 		    qed_get_igu_fifo_results_buf_size}, {
7529 	"protection_override",
7530 		    qed_dbg_protection_override_get_dump_buf_size,
7531 		    qed_dbg_protection_override_dump,
7532 		    qed_print_protection_override_results,
7533 		    qed_get_protection_override_results_buf_size}, {
7534 	"fw_asserts",
7535 		    qed_dbg_fw_asserts_get_dump_buf_size,
7536 		    qed_dbg_fw_asserts_dump,
7537 		    qed_print_fw_asserts_results,
7538 		    qed_get_fw_asserts_results_buf_size},};
7539 
7540 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7541 {
7542 	u32 i, precision = 80;
7543 
7544 	if (!p_text_buf)
7545 		return;
7546 
7547 	pr_notice("\n%.*s", precision, p_text_buf);
7548 	for (i = precision; i < text_size; i += precision)
7549 		pr_cont("%.*s", precision, p_text_buf + i);
7550 	pr_cont("\n");
7551 }
7552 
7553 #define QED_RESULTS_BUF_MIN_SIZE 16
7554 /* Generic function for decoding debug feature info */
7555 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7556 				      enum qed_dbg_features feature_idx)
7557 {
7558 	struct qed_dbg_feature *feature =
7559 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7560 	u32 text_size_bytes, null_char_pos, i;
7561 	enum dbg_status rc;
7562 	char *text_buf;
7563 
7564 	/* Check if feature supports formatting capability */
7565 	if (!qed_features_lookup[feature_idx].results_buf_size)
7566 		return DBG_STATUS_OK;
7567 
7568 	/* Obtain size of formatted output */
7569 	rc = qed_features_lookup[feature_idx].
7570 		results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7571 				 feature->dumped_dwords, &text_size_bytes);
7572 	if (rc != DBG_STATUS_OK)
7573 		return rc;
7574 
7575 	/* Make sure that the allocated size is a multiple of dword (4 bytes) */
7576 	null_char_pos = text_size_bytes - 1;
7577 	text_size_bytes = (text_size_bytes + 3) & ~0x3;
7578 
7579 	if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7580 		DP_NOTICE(p_hwfn->cdev,
7581 			  "formatted size of feature was too small %d. Aborting\n",
7582 			  text_size_bytes);
7583 		return DBG_STATUS_INVALID_ARGS;
7584 	}
7585 
7586 	/* Allocate temp text buf */
7587 	text_buf = vzalloc(text_size_bytes);
7588 	if (!text_buf)
7589 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7590 
7591 	/* Decode feature opcodes to string on temp buf */
7592 	rc = qed_features_lookup[feature_idx].
7593 		print_results(p_hwfn, (u32 *)feature->dump_buf,
7594 			      feature->dumped_dwords, text_buf);
7595 	if (rc != DBG_STATUS_OK) {
7596 		vfree(text_buf);
7597 		return rc;
7598 	}
7599 
7600 	/* Replace the original null character with a '\n' character.
7601 	 * The bytes that were added as a result of the dword alignment are also
7602 	 * padded with '\n' characters.
7603 	 */
7604 	for (i = null_char_pos; i < text_size_bytes; i++)
7605 		text_buf[i] = '\n';
7606 
7607 	/* Dump printable feature to log */
7608 	if (p_hwfn->cdev->dbg_params.print_data)
7609 		qed_dbg_print_feature(text_buf, text_size_bytes);
7610 
7611 	/* Free the old dump_buf and point the dump_buf to the newly allocagted
7612 	 * and formatted text buffer.
7613 	 */
7614 	vfree(feature->dump_buf);
7615 	feature->dump_buf = text_buf;
7616 	feature->buf_size = text_size_bytes;
7617 	feature->dumped_dwords = text_size_bytes / 4;
7618 	return rc;
7619 }
7620 
7621 /* Generic function for performing the dump of a debug feature. */
7622 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7623 				    struct qed_ptt *p_ptt,
7624 				    enum qed_dbg_features feature_idx)
7625 {
7626 	struct qed_dbg_feature *feature =
7627 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7628 	u32 buf_size_dwords;
7629 	enum dbg_status rc;
7630 
7631 	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7632 		  qed_features_lookup[feature_idx].name);
7633 
7634 	/* Dump_buf was already allocated need to free (this can happen if dump
7635 	 * was called but file was never read).
7636 	 * We can't use the buffer as is since size may have changed.
7637 	 */
7638 	if (feature->dump_buf) {
7639 		vfree(feature->dump_buf);
7640 		feature->dump_buf = NULL;
7641 	}
7642 
7643 	/* Get buffer size from hsi, allocate accordingly, and perform the
7644 	 * dump.
7645 	 */
7646 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7647 						       &buf_size_dwords);
7648 	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7649 		return rc;
7650 	feature->buf_size = buf_size_dwords * sizeof(u32);
7651 	feature->dump_buf = vmalloc(feature->buf_size);
7652 	if (!feature->dump_buf)
7653 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7654 
7655 	rc = qed_features_lookup[feature_idx].
7656 		perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7657 			     feature->buf_size / sizeof(u32),
7658 			     &feature->dumped_dwords);
7659 
7660 	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7661 	 * In this case the buffer holds valid binary data, but we wont able
7662 	 * to parse it (since parsing relies on data in NVRAM which is only
7663 	 * accessible when MFW is responsive). skip the formatting but return
7664 	 * success so that binary data is provided.
7665 	 */
7666 	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7667 		return DBG_STATUS_OK;
7668 
7669 	if (rc != DBG_STATUS_OK)
7670 		return rc;
7671 
7672 	/* Format output */
7673 	rc = format_feature(p_hwfn, feature_idx);
7674 	return rc;
7675 }
7676 
7677 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7678 {
7679 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7680 }
7681 
7682 int qed_dbg_grc_size(struct qed_dev *cdev)
7683 {
7684 	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7685 }
7686 
7687 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7688 {
7689 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7690 			       num_dumped_bytes);
7691 }
7692 
7693 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7694 {
7695 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7696 }
7697 
7698 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7699 {
7700 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7701 			       num_dumped_bytes);
7702 }
7703 
7704 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7705 {
7706 	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7707 }
7708 
7709 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7710 {
7711 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7712 			       num_dumped_bytes);
7713 }
7714 
7715 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7716 {
7717 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7718 }
7719 
7720 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7721 				u32 *num_dumped_bytes)
7722 {
7723 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7724 			       num_dumped_bytes);
7725 }
7726 
7727 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7728 {
7729 	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7730 }
7731 
7732 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7733 		       u32 *num_dumped_bytes)
7734 {
7735 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7736 			       num_dumped_bytes);
7737 }
7738 
7739 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7740 {
7741 	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7742 }
7743 
7744 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7745 		      u32 *num_dumped_bytes)
7746 {
7747 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7748 			       num_dumped_bytes);
7749 }
7750 
7751 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7752 {
7753 	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7754 }
7755 
7756 /* Defines the amount of bytes allocated for recording the length of debugfs
7757  * feature buffer.
7758  */
7759 #define REGDUMP_HEADER_SIZE			sizeof(u32)
7760 #define REGDUMP_HEADER_FEATURE_SHIFT		24
7761 #define REGDUMP_HEADER_ENGINE_SHIFT		31
7762 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
7763 enum debug_print_features {
7764 	OLD_MODE = 0,
7765 	IDLE_CHK = 1,
7766 	GRC_DUMP = 2,
7767 	MCP_TRACE = 3,
7768 	REG_FIFO = 4,
7769 	PROTECTION_OVERRIDE = 5,
7770 	IGU_FIFO = 6,
7771 	PHY = 7,
7772 	FW_ASSERTS = 8,
7773 };
7774 
7775 static u32 qed_calc_regdump_header(enum debug_print_features feature,
7776 				   int engine, u32 feature_size, u8 omit_engine)
7777 {
7778 	/* Insert the engine, feature and mode inside the header and combine it
7779 	 * with feature size.
7780 	 */
7781 	return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
7782 	       (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
7783 	       (engine << REGDUMP_HEADER_ENGINE_SHIFT);
7784 }
7785 
7786 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
7787 {
7788 	u8 cur_engine, omit_engine = 0, org_engine;
7789 	u32 offset = 0, feature_size;
7790 	int rc;
7791 
7792 	if (cdev->num_hwfns == 1)
7793 		omit_engine = 1;
7794 
7795 	org_engine = qed_get_debug_engine(cdev);
7796 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7797 		/* Collect idle_chks and grcDump for each hw function */
7798 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
7799 			   "obtaining idle_chk and grcdump for current engine\n");
7800 		qed_set_debug_engine(cdev, cur_engine);
7801 
7802 		/* First idle_chk */
7803 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7804 				      REGDUMP_HEADER_SIZE, &feature_size);
7805 		if (!rc) {
7806 			*(u32 *)((u8 *)buffer + offset) =
7807 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
7808 						    feature_size, omit_engine);
7809 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7810 		} else {
7811 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7812 		}
7813 
7814 		/* Second idle_chk */
7815 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7816 				      REGDUMP_HEADER_SIZE, &feature_size);
7817 		if (!rc) {
7818 			*(u32 *)((u8 *)buffer + offset) =
7819 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
7820 						    feature_size, omit_engine);
7821 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7822 		} else {
7823 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7824 		}
7825 
7826 		/* reg_fifo dump */
7827 		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
7828 				      REGDUMP_HEADER_SIZE, &feature_size);
7829 		if (!rc) {
7830 			*(u32 *)((u8 *)buffer + offset) =
7831 			    qed_calc_regdump_header(REG_FIFO, cur_engine,
7832 						    feature_size, omit_engine);
7833 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7834 		} else {
7835 			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7836 		}
7837 
7838 		/* igu_fifo dump */
7839 		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
7840 				      REGDUMP_HEADER_SIZE, &feature_size);
7841 		if (!rc) {
7842 			*(u32 *)((u8 *)buffer + offset) =
7843 			    qed_calc_regdump_header(IGU_FIFO, cur_engine,
7844 						    feature_size, omit_engine);
7845 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7846 		} else {
7847 			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7848 		}
7849 
7850 		/* protection_override dump */
7851 		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
7852 						 REGDUMP_HEADER_SIZE,
7853 						 &feature_size);
7854 		if (!rc) {
7855 			*(u32 *)((u8 *)buffer + offset) =
7856 			    qed_calc_regdump_header(PROTECTION_OVERRIDE,
7857 						    cur_engine,
7858 						    feature_size, omit_engine);
7859 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7860 		} else {
7861 			DP_ERR(cdev,
7862 			       "qed_dbg_protection_override failed. rc = %d\n",
7863 			       rc);
7864 		}
7865 
7866 		/* fw_asserts dump */
7867 		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
7868 					REGDUMP_HEADER_SIZE, &feature_size);
7869 		if (!rc) {
7870 			*(u32 *)((u8 *)buffer + offset) =
7871 			    qed_calc_regdump_header(FW_ASSERTS, cur_engine,
7872 						    feature_size, omit_engine);
7873 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7874 		} else {
7875 			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
7876 			       rc);
7877 		}
7878 
7879 		/* GRC dump - must be last because when mcp stuck it will
7880 		 * clutter idle_chk, reg_fifo, ...
7881 		 */
7882 		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
7883 				 REGDUMP_HEADER_SIZE, &feature_size);
7884 		if (!rc) {
7885 			*(u32 *)((u8 *)buffer + offset) =
7886 			    qed_calc_regdump_header(GRC_DUMP, cur_engine,
7887 						    feature_size, omit_engine);
7888 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7889 		} else {
7890 			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
7891 		}
7892 	}
7893 
7894 	/* mcp_trace */
7895 	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
7896 			       REGDUMP_HEADER_SIZE, &feature_size);
7897 	if (!rc) {
7898 		*(u32 *)((u8 *)buffer + offset) =
7899 		    qed_calc_regdump_header(MCP_TRACE, cur_engine,
7900 					    feature_size, omit_engine);
7901 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7902 	} else {
7903 		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
7904 	}
7905 
7906 	qed_set_debug_engine(cdev, org_engine);
7907 
7908 	return 0;
7909 }
7910 
7911 int qed_dbg_all_data_size(struct qed_dev *cdev)
7912 {
7913 	u8 cur_engine, org_engine;
7914 	u32 regs_len = 0;
7915 
7916 	org_engine = qed_get_debug_engine(cdev);
7917 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7918 		/* Engine specific */
7919 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
7920 			   "calculating idle_chk and grcdump register length for current engine\n");
7921 		qed_set_debug_engine(cdev, cur_engine);
7922 		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
7923 			    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
7924 			    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
7925 			    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
7926 			    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
7927 			    REGDUMP_HEADER_SIZE +
7928 			    qed_dbg_protection_override_size(cdev) +
7929 			    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
7930 	}
7931 
7932 	/* Engine common */
7933 	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
7934 	qed_set_debug_engine(cdev, org_engine);
7935 
7936 	return regs_len;
7937 }
7938 
7939 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
7940 		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
7941 {
7942 	struct qed_hwfn *p_hwfn =
7943 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
7944 	struct qed_dbg_feature *qed_feature =
7945 		&cdev->dbg_params.features[feature];
7946 	enum dbg_status dbg_rc;
7947 	struct qed_ptt *p_ptt;
7948 	int rc = 0;
7949 
7950 	/* Acquire ptt */
7951 	p_ptt = qed_ptt_acquire(p_hwfn);
7952 	if (!p_ptt)
7953 		return -EINVAL;
7954 
7955 	/* Get dump */
7956 	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
7957 	if (dbg_rc != DBG_STATUS_OK) {
7958 		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
7959 			   qed_dbg_get_status_str(dbg_rc));
7960 		*num_dumped_bytes = 0;
7961 		rc = -EINVAL;
7962 		goto out;
7963 	}
7964 
7965 	DP_VERBOSE(cdev, QED_MSG_DEBUG,
7966 		   "copying debugfs feature to external buffer\n");
7967 	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
7968 	*num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
7969 			    4;
7970 
7971 out:
7972 	qed_ptt_release(p_hwfn, p_ptt);
7973 	return rc;
7974 }
7975 
7976 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
7977 {
7978 	struct qed_hwfn *p_hwfn =
7979 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
7980 	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
7981 	struct qed_dbg_feature *qed_feature =
7982 		&cdev->dbg_params.features[feature];
7983 	u32 buf_size_dwords;
7984 	enum dbg_status rc;
7985 
7986 	if (!p_ptt)
7987 		return -EINVAL;
7988 
7989 	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
7990 						   &buf_size_dwords);
7991 	if (rc != DBG_STATUS_OK)
7992 		buf_size_dwords = 0;
7993 
7994 	qed_ptt_release(p_hwfn, p_ptt);
7995 	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
7996 	return qed_feature->buf_size;
7997 }
7998 
7999 u8 qed_get_debug_engine(struct qed_dev *cdev)
8000 {
8001 	return cdev->dbg_params.engine_for_debug;
8002 }
8003 
8004 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8005 {
8006 	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8007 		   engine_number);
8008 	cdev->dbg_params.engine_for_debug = engine_number;
8009 }
8010 
8011 void qed_dbg_pf_init(struct qed_dev *cdev)
8012 {
8013 	const u8 *dbg_values;
8014 
8015 	/* Debug values are after init values.
8016 	 * The offset is the first dword of the file.
8017 	 */
8018 	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8019 	qed_dbg_set_bin_ptr((u8 *)dbg_values);
8020 	qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
8021 }
8022 
8023 void qed_dbg_pf_exit(struct qed_dev *cdev)
8024 {
8025 	struct qed_dbg_feature *feature = NULL;
8026 	enum qed_dbg_features feature_idx;
8027 
8028 	/* Debug features' buffers may be allocated if debug feature was used
8029 	 * but dump wasn't called.
8030 	 */
8031 	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8032 		feature = &cdev->dbg_params.features[feature_idx];
8033 		if (feature->dump_buf) {
8034 			vfree(feature->dump_buf);
8035 			feature->dump_buf = NULL;
8036 		}
8037 	}
8038 }
8039