xref: /openbmc/linux/drivers/net/ethernet/qlogic/qed/qed_debug.c (revision d003c346bf75f01d240c80000baf2fbf28e53782)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/vmalloc.h>
11 #include <linux/crc32.h>
12 #include "qed.h"
13 #include "qed_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_mcp.h"
16 #include "qed_reg_addr.h"
17 
18 /* Memory groups enum */
19 enum mem_groups {
20 	MEM_GROUP_PXP_MEM,
21 	MEM_GROUP_DMAE_MEM,
22 	MEM_GROUP_CM_MEM,
23 	MEM_GROUP_QM_MEM,
24 	MEM_GROUP_DORQ_MEM,
25 	MEM_GROUP_BRB_RAM,
26 	MEM_GROUP_BRB_MEM,
27 	MEM_GROUP_PRS_MEM,
28 	MEM_GROUP_IOR,
29 	MEM_GROUP_BTB_RAM,
30 	MEM_GROUP_CONN_CFC_MEM,
31 	MEM_GROUP_TASK_CFC_MEM,
32 	MEM_GROUP_CAU_PI,
33 	MEM_GROUP_CAU_MEM,
34 	MEM_GROUP_PXP_ILT,
35 	MEM_GROUP_TM_MEM,
36 	MEM_GROUP_SDM_MEM,
37 	MEM_GROUP_PBUF,
38 	MEM_GROUP_RAM,
39 	MEM_GROUP_MULD_MEM,
40 	MEM_GROUP_BTB_MEM,
41 	MEM_GROUP_RDIF_CTX,
42 	MEM_GROUP_TDIF_CTX,
43 	MEM_GROUP_CFC_MEM,
44 	MEM_GROUP_IGU_MEM,
45 	MEM_GROUP_IGU_MSIX,
46 	MEM_GROUP_CAU_SB,
47 	MEM_GROUP_BMB_RAM,
48 	MEM_GROUP_BMB_MEM,
49 	MEM_GROUPS_NUM
50 };
51 
52 /* Memory groups names */
53 static const char * const s_mem_group_names[] = {
54 	"PXP_MEM",
55 	"DMAE_MEM",
56 	"CM_MEM",
57 	"QM_MEM",
58 	"DORQ_MEM",
59 	"BRB_RAM",
60 	"BRB_MEM",
61 	"PRS_MEM",
62 	"IOR",
63 	"BTB_RAM",
64 	"CONN_CFC_MEM",
65 	"TASK_CFC_MEM",
66 	"CAU_PI",
67 	"CAU_MEM",
68 	"PXP_ILT",
69 	"TM_MEM",
70 	"SDM_MEM",
71 	"PBUF",
72 	"RAM",
73 	"MULD_MEM",
74 	"BTB_MEM",
75 	"RDIF_CTX",
76 	"TDIF_CTX",
77 	"CFC_MEM",
78 	"IGU_MEM",
79 	"IGU_MSIX",
80 	"CAU_SB",
81 	"BMB_RAM",
82 	"BMB_MEM",
83 };
84 
85 /* Idle check conditions */
86 
87 static u32 cond5(const u32 *r, const u32 *imm)
88 {
89 	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
90 }
91 
92 static u32 cond7(const u32 *r, const u32 *imm)
93 {
94 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
95 }
96 
97 static u32 cond6(const u32 *r, const u32 *imm)
98 {
99 	return (r[0] & imm[0]) != imm[1];
100 }
101 
102 static u32 cond9(const u32 *r, const u32 *imm)
103 {
104 	return ((r[0] & imm[0]) >> imm[1]) !=
105 	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
106 }
107 
108 static u32 cond10(const u32 *r, const u32 *imm)
109 {
110 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
111 }
112 
113 static u32 cond4(const u32 *r, const u32 *imm)
114 {
115 	return (r[0] & ~imm[0]) != imm[1];
116 }
117 
118 static u32 cond0(const u32 *r, const u32 *imm)
119 {
120 	return (r[0] & ~r[1]) != imm[0];
121 }
122 
123 static u32 cond1(const u32 *r, const u32 *imm)
124 {
125 	return r[0] != imm[0];
126 }
127 
128 static u32 cond11(const u32 *r, const u32 *imm)
129 {
130 	return r[0] != r[1] && r[2] == imm[0];
131 }
132 
133 static u32 cond12(const u32 *r, const u32 *imm)
134 {
135 	return r[0] != r[1] && r[2] > imm[0];
136 }
137 
138 static u32 cond3(const u32 *r, const u32 *imm)
139 {
140 	return r[0] != r[1];
141 }
142 
143 static u32 cond13(const u32 *r, const u32 *imm)
144 {
145 	return r[0] & imm[0];
146 }
147 
148 static u32 cond8(const u32 *r, const u32 *imm)
149 {
150 	return r[0] < (r[1] - imm[0]);
151 }
152 
153 static u32 cond2(const u32 *r, const u32 *imm)
154 {
155 	return r[0] > imm[0];
156 }
157 
158 /* Array of Idle Check conditions */
159 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
160 	cond0,
161 	cond1,
162 	cond2,
163 	cond3,
164 	cond4,
165 	cond5,
166 	cond6,
167 	cond7,
168 	cond8,
169 	cond9,
170 	cond10,
171 	cond11,
172 	cond12,
173 	cond13,
174 };
175 
176 /******************************* Data Types **********************************/
177 
178 enum platform_ids {
179 	PLATFORM_ASIC,
180 	PLATFORM_RESERVED,
181 	PLATFORM_RESERVED2,
182 	PLATFORM_RESERVED3,
183 	MAX_PLATFORM_IDS
184 };
185 
186 /* Chip constant definitions */
187 struct chip_defs {
188 	const char *name;
189 };
190 
191 /* Platform constant definitions */
192 struct platform_defs {
193 	const char *name;
194 	u32 delay_factor;
195 	u32 dmae_thresh;
196 	u32 log_thresh;
197 };
198 
199 /* Storm constant definitions.
200  * Addresses are in bytes, sizes are in quad-regs.
201  */
202 struct storm_defs {
203 	char letter;
204 	enum block_id block_id;
205 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
206 	bool has_vfc;
207 	u32 sem_fast_mem_addr;
208 	u32 sem_frame_mode_addr;
209 	u32 sem_slow_enable_addr;
210 	u32 sem_slow_mode_addr;
211 	u32 sem_slow_mode1_conf_addr;
212 	u32 sem_sync_dbg_empty_addr;
213 	u32 sem_slow_dbg_empty_addr;
214 	u32 cm_ctx_wr_addr;
215 	u32 cm_conn_ag_ctx_lid_size;
216 	u32 cm_conn_ag_ctx_rd_addr;
217 	u32 cm_conn_st_ctx_lid_size;
218 	u32 cm_conn_st_ctx_rd_addr;
219 	u32 cm_task_ag_ctx_lid_size;
220 	u32 cm_task_ag_ctx_rd_addr;
221 	u32 cm_task_st_ctx_lid_size;
222 	u32 cm_task_st_ctx_rd_addr;
223 };
224 
225 /* Block constant definitions */
226 struct block_defs {
227 	const char *name;
228 	bool exists[MAX_CHIP_IDS];
229 	bool associated_to_storm;
230 
231 	/* Valid only if associated_to_storm is true */
232 	u32 storm_id;
233 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
234 	u32 dbg_select_addr;
235 	u32 dbg_enable_addr;
236 	u32 dbg_shift_addr;
237 	u32 dbg_force_valid_addr;
238 	u32 dbg_force_frame_addr;
239 	bool has_reset_bit;
240 
241 	/* If true, block is taken out of reset before dump */
242 	bool unreset;
243 	enum dbg_reset_regs reset_reg;
244 
245 	/* Bit offset in reset register */
246 	u8 reset_bit_offset;
247 };
248 
249 /* Reset register definitions */
250 struct reset_reg_defs {
251 	u32 addr;
252 	bool exists[MAX_CHIP_IDS];
253 	u32 unreset_val[MAX_CHIP_IDS];
254 };
255 
256 struct grc_param_defs {
257 	u32 default_val[MAX_CHIP_IDS];
258 	u32 min;
259 	u32 max;
260 	bool is_preset;
261 	bool is_persistent;
262 	u32 exclude_all_preset_val;
263 	u32 crash_preset_val;
264 };
265 
266 /* Address is in 128b units. Width is in bits. */
267 struct rss_mem_defs {
268 	const char *mem_name;
269 	const char *type_name;
270 	u32 addr;
271 	u32 entry_width;
272 	u32 num_entries[MAX_CHIP_IDS];
273 };
274 
275 struct vfc_ram_defs {
276 	const char *mem_name;
277 	const char *type_name;
278 	u32 base_row;
279 	u32 num_rows;
280 };
281 
282 struct big_ram_defs {
283 	const char *instance_name;
284 	enum mem_groups mem_group_id;
285 	enum mem_groups ram_mem_group_id;
286 	enum dbg_grc_params grc_param;
287 	u32 addr_reg_addr;
288 	u32 data_reg_addr;
289 	u32 is_256b_reg_addr;
290 	u32 is_256b_bit_offset[MAX_CHIP_IDS];
291 	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
292 };
293 
294 struct phy_defs {
295 	const char *phy_name;
296 
297 	/* PHY base GRC address */
298 	u32 base_addr;
299 
300 	/* Relative address of indirect TBUS address register (bits 0..7) */
301 	u32 tbus_addr_lo_addr;
302 
303 	/* Relative address of indirect TBUS address register (bits 8..10) */
304 	u32 tbus_addr_hi_addr;
305 
306 	/* Relative address of indirect TBUS data register (bits 0..7) */
307 	u32 tbus_data_lo_addr;
308 
309 	/* Relative address of indirect TBUS data register (bits 8..11) */
310 	u32 tbus_data_hi_addr;
311 };
312 
313 /* Split type definitions */
314 struct split_type_defs {
315 	const char *name;
316 };
317 
318 /******************************** Constants **********************************/
319 
320 #define MAX_LCIDS			320
321 #define MAX_LTIDS			320
322 
323 #define NUM_IOR_SETS			2
324 #define IORS_PER_SET			176
325 #define IOR_SET_OFFSET(set_id)		((set_id) * 256)
326 
327 #define BYTES_IN_DWORD			sizeof(u32)
328 
329 /* In the macros below, size and offset are specified in bits */
330 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
331 #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
332 #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
333 #define FIELD_DWORD_OFFSET(type, field) \
334 	 (int)(FIELD_BIT_OFFSET(type, field) / 32)
335 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
336 #define FIELD_BIT_MASK(type, field) \
337 	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
338 	 FIELD_DWORD_SHIFT(type, field))
339 
340 #define SET_VAR_FIELD(var, type, field, val) \
341 	do { \
342 		var[FIELD_DWORD_OFFSET(type, field)] &=	\
343 		(~FIELD_BIT_MASK(type, field));	\
344 		var[FIELD_DWORD_OFFSET(type, field)] |= \
345 		(val) << FIELD_DWORD_SHIFT(type, field); \
346 	} while (0)
347 
348 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
349 	do { \
350 		for (i = 0; i < (arr_size); i++) \
351 			qed_wr(dev, ptt, addr,	(arr)[i]); \
352 	} while (0)
353 
354 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
355 	do { \
356 		for (i = 0; i < (arr_size); i++) \
357 			(arr)[i] = qed_rd(dev, ptt, addr); \
358 	} while (0)
359 
360 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
361 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
362 
363 /* Extra lines include a signature line + optional latency events line */
364 #define NUM_EXTRA_DBG_LINES(block_desc) \
365 	(1 + ((block_desc)->has_latency_events ? 1 : 0))
366 #define NUM_DBG_LINES(block_desc) \
367 	((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
368 
369 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
370 #define RAM_LINES_TO_BYTES(lines) \
371 	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
372 
373 #define REG_DUMP_LEN_SHIFT		24
374 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
375 	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
376 
377 #define IDLE_CHK_RULE_SIZE_DWORDS \
378 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
379 
380 #define IDLE_CHK_RESULT_HDR_DWORDS \
381 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
382 
383 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
384 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
385 
386 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
387 
388 /* The sizes and offsets below are specified in bits */
389 #define VFC_CAM_CMD_STRUCT_SIZE		64
390 #define VFC_CAM_CMD_ROW_OFFSET		48
391 #define VFC_CAM_CMD_ROW_SIZE		9
392 #define VFC_CAM_ADDR_STRUCT_SIZE	16
393 #define VFC_CAM_ADDR_OP_OFFSET		0
394 #define VFC_CAM_ADDR_OP_SIZE		4
395 #define VFC_CAM_RESP_STRUCT_SIZE	256
396 #define VFC_RAM_ADDR_STRUCT_SIZE	16
397 #define VFC_RAM_ADDR_OP_OFFSET		0
398 #define VFC_RAM_ADDR_OP_SIZE		2
399 #define VFC_RAM_ADDR_ROW_OFFSET		2
400 #define VFC_RAM_ADDR_ROW_SIZE		10
401 #define VFC_RAM_RESP_STRUCT_SIZE	256
402 
403 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
404 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
405 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
406 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
407 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
408 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
409 
410 #define NUM_VFC_RAM_TYPES		4
411 
412 #define VFC_CAM_NUM_ROWS		512
413 
414 #define VFC_OPCODE_CAM_RD		14
415 #define VFC_OPCODE_RAM_RD		0
416 
417 #define NUM_RSS_MEM_TYPES		5
418 
419 #define NUM_BIG_RAM_TYPES		3
420 #define BIG_RAM_NAME_LEN		3
421 
422 #define NUM_PHY_TBUS_ADDRESSES		2048
423 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
424 
425 #define RESET_REG_UNRESET_OFFSET	4
426 
427 #define STALL_DELAY_MS			500
428 
429 #define STATIC_DEBUG_LINE_DWORDS	9
430 
431 #define NUM_COMMON_GLOBAL_PARAMS	8
432 
433 #define FW_IMG_MAIN			1
434 
435 #define REG_FIFO_ELEMENT_DWORDS		2
436 #define REG_FIFO_DEPTH_ELEMENTS		32
437 #define REG_FIFO_DEPTH_DWORDS \
438 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
439 
440 #define IGU_FIFO_ELEMENT_DWORDS		4
441 #define IGU_FIFO_DEPTH_ELEMENTS		64
442 #define IGU_FIFO_DEPTH_DWORDS \
443 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
444 
445 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
446 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
447 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
448 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
449 	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
450 
451 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
452 	(MCP_REG_SCRATCH + \
453 	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
454 
455 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
456 #define EMPTY_FW_IMAGE_STR		"???????????????"
457 
458 /***************************** Constant Arrays *******************************/
459 
460 struct dbg_array {
461 	const u32 *ptr;
462 	u32 size_in_dwords;
463 };
464 
465 /* Debug arrays */
466 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
467 
468 /* Chip constant definitions array */
469 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
470 	{"bb"},
471 	{"ah"},
472 	{"reserved"},
473 };
474 
475 /* Storm constant definitions array */
476 static struct storm_defs s_storm_defs[] = {
477 	/* Tstorm */
478 	{'T', BLOCK_TSEM,
479 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
480 	  DBG_BUS_CLIENT_RBCT}, true,
481 	 TSEM_REG_FAST_MEMORY,
482 	 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
483 	 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
484 	 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
485 	 TCM_REG_CTX_RBC_ACCS,
486 	 4, TCM_REG_AGG_CON_CTX,
487 	 16, TCM_REG_SM_CON_CTX,
488 	 2, TCM_REG_AGG_TASK_CTX,
489 	 4, TCM_REG_SM_TASK_CTX},
490 
491 	/* Mstorm */
492 	{'M', BLOCK_MSEM,
493 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
494 	  DBG_BUS_CLIENT_RBCM}, false,
495 	 MSEM_REG_FAST_MEMORY,
496 	 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
497 	 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
498 	 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
499 	 MCM_REG_CTX_RBC_ACCS,
500 	 1, MCM_REG_AGG_CON_CTX,
501 	 10, MCM_REG_SM_CON_CTX,
502 	 2, MCM_REG_AGG_TASK_CTX,
503 	 7, MCM_REG_SM_TASK_CTX},
504 
505 	/* Ustorm */
506 	{'U', BLOCK_USEM,
507 	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
508 	  DBG_BUS_CLIENT_RBCU}, false,
509 	 USEM_REG_FAST_MEMORY,
510 	 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
511 	 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
512 	 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
513 	 UCM_REG_CTX_RBC_ACCS,
514 	 2, UCM_REG_AGG_CON_CTX,
515 	 13, UCM_REG_SM_CON_CTX,
516 	 3, UCM_REG_AGG_TASK_CTX,
517 	 3, UCM_REG_SM_TASK_CTX},
518 
519 	/* Xstorm */
520 	{'X', BLOCK_XSEM,
521 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
522 	  DBG_BUS_CLIENT_RBCX}, false,
523 	 XSEM_REG_FAST_MEMORY,
524 	 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
525 	 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
526 	 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
527 	 XCM_REG_CTX_RBC_ACCS,
528 	 9, XCM_REG_AGG_CON_CTX,
529 	 15, XCM_REG_SM_CON_CTX,
530 	 0, 0,
531 	 0, 0},
532 
533 	/* Ystorm */
534 	{'Y', BLOCK_YSEM,
535 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
536 	  DBG_BUS_CLIENT_RBCY}, false,
537 	 YSEM_REG_FAST_MEMORY,
538 	 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
539 	 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
540 	 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
541 	 YCM_REG_CTX_RBC_ACCS,
542 	 2, YCM_REG_AGG_CON_CTX,
543 	 3, YCM_REG_SM_CON_CTX,
544 	 2, YCM_REG_AGG_TASK_CTX,
545 	 12, YCM_REG_SM_TASK_CTX},
546 
547 	/* Pstorm */
548 	{'P', BLOCK_PSEM,
549 	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
550 	  DBG_BUS_CLIENT_RBCS}, true,
551 	 PSEM_REG_FAST_MEMORY,
552 	 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
553 	 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
554 	 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
555 	 PCM_REG_CTX_RBC_ACCS,
556 	 0, 0,
557 	 10, PCM_REG_SM_CON_CTX,
558 	 0, 0,
559 	 0, 0}
560 };
561 
562 /* Block definitions array */
563 
564 static struct block_defs block_grc_defs = {
565 	"grc",
566 	{true, true, true}, false, 0,
567 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
568 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
569 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
570 	GRC_REG_DBG_FORCE_FRAME,
571 	true, false, DBG_RESET_REG_MISC_PL_UA, 1
572 };
573 
574 static struct block_defs block_miscs_defs = {
575 	"miscs", {true, true, true}, false, 0,
576 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
577 	0, 0, 0, 0, 0,
578 	false, false, MAX_DBG_RESET_REGS, 0
579 };
580 
581 static struct block_defs block_misc_defs = {
582 	"misc", {true, true, true}, false, 0,
583 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
584 	0, 0, 0, 0, 0,
585 	false, false, MAX_DBG_RESET_REGS, 0
586 };
587 
588 static struct block_defs block_dbu_defs = {
589 	"dbu", {true, true, true}, false, 0,
590 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
591 	0, 0, 0, 0, 0,
592 	false, false, MAX_DBG_RESET_REGS, 0
593 };
594 
595 static struct block_defs block_pglue_b_defs = {
596 	"pglue_b",
597 	{true, true, true}, false, 0,
598 	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
599 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
600 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
601 	PGLUE_B_REG_DBG_FORCE_FRAME,
602 	true, false, DBG_RESET_REG_MISCS_PL_HV, 1
603 };
604 
605 static struct block_defs block_cnig_defs = {
606 	"cnig",
607 	{true, true, true}, false, 0,
608 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
609 	 DBG_BUS_CLIENT_RBCW},
610 	CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
611 	CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
612 	CNIG_REG_DBG_FORCE_FRAME_K2_E5,
613 	true, false, DBG_RESET_REG_MISCS_PL_HV, 0
614 };
615 
616 static struct block_defs block_cpmu_defs = {
617 	"cpmu", {true, true, true}, false, 0,
618 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
619 	0, 0, 0, 0, 0,
620 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8
621 };
622 
623 static struct block_defs block_ncsi_defs = {
624 	"ncsi",
625 	{true, true, true}, false, 0,
626 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
627 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
628 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
629 	NCSI_REG_DBG_FORCE_FRAME,
630 	true, false, DBG_RESET_REG_MISCS_PL_HV, 5
631 };
632 
633 static struct block_defs block_opte_defs = {
634 	"opte", {true, true, false}, false, 0,
635 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
636 	0, 0, 0, 0, 0,
637 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4
638 };
639 
640 static struct block_defs block_bmb_defs = {
641 	"bmb",
642 	{true, true, true}, false, 0,
643 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
644 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
645 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
646 	BMB_REG_DBG_FORCE_FRAME,
647 	true, false, DBG_RESET_REG_MISCS_PL_UA, 7
648 };
649 
650 static struct block_defs block_pcie_defs = {
651 	"pcie",
652 	{true, true, true}, false, 0,
653 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
654 	 DBG_BUS_CLIENT_RBCH},
655 	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
656 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
657 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
658 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
659 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
660 	false, false, MAX_DBG_RESET_REGS, 0
661 };
662 
663 static struct block_defs block_mcp_defs = {
664 	"mcp", {true, true, true}, false, 0,
665 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
666 	0, 0, 0, 0, 0,
667 	false, false, MAX_DBG_RESET_REGS, 0
668 };
669 
670 static struct block_defs block_mcp2_defs = {
671 	"mcp2",
672 	{true, true, true}, false, 0,
673 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
674 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
675 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
676 	MCP2_REG_DBG_FORCE_FRAME,
677 	false, false, MAX_DBG_RESET_REGS, 0
678 };
679 
680 static struct block_defs block_pswhst_defs = {
681 	"pswhst",
682 	{true, true, true}, false, 0,
683 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
684 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
685 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
686 	PSWHST_REG_DBG_FORCE_FRAME,
687 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
688 };
689 
690 static struct block_defs block_pswhst2_defs = {
691 	"pswhst2",
692 	{true, true, true}, false, 0,
693 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
694 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
695 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
696 	PSWHST2_REG_DBG_FORCE_FRAME,
697 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
698 };
699 
700 static struct block_defs block_pswrd_defs = {
701 	"pswrd",
702 	{true, true, true}, false, 0,
703 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
704 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
705 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
706 	PSWRD_REG_DBG_FORCE_FRAME,
707 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
708 };
709 
710 static struct block_defs block_pswrd2_defs = {
711 	"pswrd2",
712 	{true, true, true}, false, 0,
713 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
714 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
715 	PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
716 	PSWRD2_REG_DBG_FORCE_FRAME,
717 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
718 };
719 
720 static struct block_defs block_pswwr_defs = {
721 	"pswwr",
722 	{true, true, true}, false, 0,
723 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
724 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
725 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
726 	PSWWR_REG_DBG_FORCE_FRAME,
727 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
728 };
729 
730 static struct block_defs block_pswwr2_defs = {
731 	"pswwr2", {true, true, true}, false, 0,
732 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
733 	0, 0, 0, 0, 0,
734 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
735 };
736 
737 static struct block_defs block_pswrq_defs = {
738 	"pswrq",
739 	{true, true, true}, false, 0,
740 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
741 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
742 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
743 	PSWRQ_REG_DBG_FORCE_FRAME,
744 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
745 };
746 
747 static struct block_defs block_pswrq2_defs = {
748 	"pswrq2",
749 	{true, true, true}, false, 0,
750 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
751 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
752 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
753 	PSWRQ2_REG_DBG_FORCE_FRAME,
754 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
755 };
756 
757 static struct block_defs block_pglcs_defs = {
758 	"pglcs",
759 	{true, true, true}, false, 0,
760 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
761 	 DBG_BUS_CLIENT_RBCH},
762 	PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
763 	PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
764 	PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
765 	true, false, DBG_RESET_REG_MISCS_PL_HV, 2
766 };
767 
768 static struct block_defs block_ptu_defs = {
769 	"ptu",
770 	{true, true, true}, false, 0,
771 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
772 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
773 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
774 	PTU_REG_DBG_FORCE_FRAME,
775 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
776 };
777 
778 static struct block_defs block_dmae_defs = {
779 	"dmae",
780 	{true, true, true}, false, 0,
781 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
782 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
783 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
784 	DMAE_REG_DBG_FORCE_FRAME,
785 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
786 };
787 
788 static struct block_defs block_tcm_defs = {
789 	"tcm",
790 	{true, true, true}, true, DBG_TSTORM_ID,
791 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
792 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
793 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
794 	TCM_REG_DBG_FORCE_FRAME,
795 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
796 };
797 
798 static struct block_defs block_mcm_defs = {
799 	"mcm",
800 	{true, true, true}, true, DBG_MSTORM_ID,
801 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
802 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
803 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
804 	MCM_REG_DBG_FORCE_FRAME,
805 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
806 };
807 
808 static struct block_defs block_ucm_defs = {
809 	"ucm",
810 	{true, true, true}, true, DBG_USTORM_ID,
811 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
812 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
813 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
814 	UCM_REG_DBG_FORCE_FRAME,
815 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
816 };
817 
818 static struct block_defs block_xcm_defs = {
819 	"xcm",
820 	{true, true, true}, true, DBG_XSTORM_ID,
821 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
822 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
823 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
824 	XCM_REG_DBG_FORCE_FRAME,
825 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
826 };
827 
828 static struct block_defs block_ycm_defs = {
829 	"ycm",
830 	{true, true, true}, true, DBG_YSTORM_ID,
831 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
832 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
833 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
834 	YCM_REG_DBG_FORCE_FRAME,
835 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
836 };
837 
838 static struct block_defs block_pcm_defs = {
839 	"pcm",
840 	{true, true, true}, true, DBG_PSTORM_ID,
841 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
842 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
843 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
844 	PCM_REG_DBG_FORCE_FRAME,
845 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
846 };
847 
848 static struct block_defs block_qm_defs = {
849 	"qm",
850 	{true, true, true}, false, 0,
851 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
852 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
853 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
854 	QM_REG_DBG_FORCE_FRAME,
855 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
856 };
857 
858 static struct block_defs block_tm_defs = {
859 	"tm",
860 	{true, true, true}, false, 0,
861 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
862 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
863 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
864 	TM_REG_DBG_FORCE_FRAME,
865 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
866 };
867 
868 static struct block_defs block_dorq_defs = {
869 	"dorq",
870 	{true, true, true}, false, 0,
871 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
872 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
873 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
874 	DORQ_REG_DBG_FORCE_FRAME,
875 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
876 };
877 
878 static struct block_defs block_brb_defs = {
879 	"brb",
880 	{true, true, true}, false, 0,
881 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
882 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
883 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
884 	BRB_REG_DBG_FORCE_FRAME,
885 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
886 };
887 
888 static struct block_defs block_src_defs = {
889 	"src",
890 	{true, true, true}, false, 0,
891 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
892 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
893 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
894 	SRC_REG_DBG_FORCE_FRAME,
895 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
896 };
897 
898 static struct block_defs block_prs_defs = {
899 	"prs",
900 	{true, true, true}, false, 0,
901 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
902 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
903 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
904 	PRS_REG_DBG_FORCE_FRAME,
905 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
906 };
907 
908 static struct block_defs block_tsdm_defs = {
909 	"tsdm",
910 	{true, true, true}, true, DBG_TSTORM_ID,
911 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
912 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
913 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
914 	TSDM_REG_DBG_FORCE_FRAME,
915 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
916 };
917 
918 static struct block_defs block_msdm_defs = {
919 	"msdm",
920 	{true, true, true}, true, DBG_MSTORM_ID,
921 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
922 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
923 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
924 	MSDM_REG_DBG_FORCE_FRAME,
925 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
926 };
927 
928 static struct block_defs block_usdm_defs = {
929 	"usdm",
930 	{true, true, true}, true, DBG_USTORM_ID,
931 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
932 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
933 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
934 	USDM_REG_DBG_FORCE_FRAME,
935 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
936 };
937 
938 static struct block_defs block_xsdm_defs = {
939 	"xsdm",
940 	{true, true, true}, true, DBG_XSTORM_ID,
941 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
942 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
943 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
944 	XSDM_REG_DBG_FORCE_FRAME,
945 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
946 };
947 
948 static struct block_defs block_ysdm_defs = {
949 	"ysdm",
950 	{true, true, true}, true, DBG_YSTORM_ID,
951 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
952 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
953 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
954 	YSDM_REG_DBG_FORCE_FRAME,
955 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
956 };
957 
958 static struct block_defs block_psdm_defs = {
959 	"psdm",
960 	{true, true, true}, true, DBG_PSTORM_ID,
961 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
962 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
963 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
964 	PSDM_REG_DBG_FORCE_FRAME,
965 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
966 };
967 
968 static struct block_defs block_tsem_defs = {
969 	"tsem",
970 	{true, true, true}, true, DBG_TSTORM_ID,
971 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
972 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
973 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
974 	TSEM_REG_DBG_FORCE_FRAME,
975 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
976 };
977 
978 static struct block_defs block_msem_defs = {
979 	"msem",
980 	{true, true, true}, true, DBG_MSTORM_ID,
981 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
982 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
983 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
984 	MSEM_REG_DBG_FORCE_FRAME,
985 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
986 };
987 
988 static struct block_defs block_usem_defs = {
989 	"usem",
990 	{true, true, true}, true, DBG_USTORM_ID,
991 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
992 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
993 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
994 	USEM_REG_DBG_FORCE_FRAME,
995 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
996 };
997 
998 static struct block_defs block_xsem_defs = {
999 	"xsem",
1000 	{true, true, true}, true, DBG_XSTORM_ID,
1001 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1002 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1003 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1004 	XSEM_REG_DBG_FORCE_FRAME,
1005 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
1006 };
1007 
1008 static struct block_defs block_ysem_defs = {
1009 	"ysem",
1010 	{true, true, true}, true, DBG_YSTORM_ID,
1011 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
1012 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1013 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1014 	YSEM_REG_DBG_FORCE_FRAME,
1015 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
1016 };
1017 
1018 static struct block_defs block_psem_defs = {
1019 	"psem",
1020 	{true, true, true}, true, DBG_PSTORM_ID,
1021 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1022 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1023 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1024 	PSEM_REG_DBG_FORCE_FRAME,
1025 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
1026 };
1027 
1028 static struct block_defs block_rss_defs = {
1029 	"rss",
1030 	{true, true, true}, false, 0,
1031 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
1032 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1033 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1034 	RSS_REG_DBG_FORCE_FRAME,
1035 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
1036 };
1037 
1038 static struct block_defs block_tmld_defs = {
1039 	"tmld",
1040 	{true, true, true}, false, 0,
1041 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1042 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1043 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1044 	TMLD_REG_DBG_FORCE_FRAME,
1045 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
1046 };
1047 
1048 static struct block_defs block_muld_defs = {
1049 	"muld",
1050 	{true, true, true}, false, 0,
1051 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1052 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1053 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1054 	MULD_REG_DBG_FORCE_FRAME,
1055 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
1056 };
1057 
1058 static struct block_defs block_yuld_defs = {
1059 	"yuld",
1060 	{true, true, false}, false, 0,
1061 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
1062 	 MAX_DBG_BUS_CLIENTS},
1063 	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1064 	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1065 	YULD_REG_DBG_FORCE_FRAME_BB_K2,
1066 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1067 	15
1068 };
1069 
1070 static struct block_defs block_xyld_defs = {
1071 	"xyld",
1072 	{true, true, true}, false, 0,
1073 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1074 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1075 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1076 	XYLD_REG_DBG_FORCE_FRAME,
1077 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1078 };
1079 
1080 static struct block_defs block_ptld_defs = {
1081 	"ptld",
1082 	{false, false, true}, false, 0,
1083 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
1084 	PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1085 	PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1086 	PTLD_REG_DBG_FORCE_FRAME_E5,
1087 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1088 	28
1089 };
1090 
1091 static struct block_defs block_ypld_defs = {
1092 	"ypld",
1093 	{false, false, true}, false, 0,
1094 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
1095 	YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1096 	YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1097 	YPLD_REG_DBG_FORCE_FRAME_E5,
1098 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1099 	27
1100 };
1101 
1102 static struct block_defs block_prm_defs = {
1103 	"prm",
1104 	{true, true, true}, false, 0,
1105 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1106 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1107 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1108 	PRM_REG_DBG_FORCE_FRAME,
1109 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1110 };
1111 
1112 static struct block_defs block_pbf_pb1_defs = {
1113 	"pbf_pb1",
1114 	{true, true, true}, false, 0,
1115 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1116 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1117 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1118 	PBF_PB1_REG_DBG_FORCE_FRAME,
1119 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1120 	11
1121 };
1122 
1123 static struct block_defs block_pbf_pb2_defs = {
1124 	"pbf_pb2",
1125 	{true, true, true}, false, 0,
1126 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1127 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1128 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1129 	PBF_PB2_REG_DBG_FORCE_FRAME,
1130 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1131 	12
1132 };
1133 
1134 static struct block_defs block_rpb_defs = {
1135 	"rpb",
1136 	{true, true, true}, false, 0,
1137 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1138 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1139 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1140 	RPB_REG_DBG_FORCE_FRAME,
1141 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1142 };
1143 
1144 static struct block_defs block_btb_defs = {
1145 	"btb",
1146 	{true, true, true}, false, 0,
1147 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1148 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1149 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1150 	BTB_REG_DBG_FORCE_FRAME,
1151 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1152 };
1153 
1154 static struct block_defs block_pbf_defs = {
1155 	"pbf",
1156 	{true, true, true}, false, 0,
1157 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1158 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1159 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1160 	PBF_REG_DBG_FORCE_FRAME,
1161 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1162 };
1163 
1164 static struct block_defs block_rdif_defs = {
1165 	"rdif",
1166 	{true, true, true}, false, 0,
1167 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1168 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1169 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1170 	RDIF_REG_DBG_FORCE_FRAME,
1171 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1172 };
1173 
1174 static struct block_defs block_tdif_defs = {
1175 	"tdif",
1176 	{true, true, true}, false, 0,
1177 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1178 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1179 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1180 	TDIF_REG_DBG_FORCE_FRAME,
1181 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1182 };
1183 
1184 static struct block_defs block_cdu_defs = {
1185 	"cdu",
1186 	{true, true, true}, false, 0,
1187 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1188 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1189 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1190 	CDU_REG_DBG_FORCE_FRAME,
1191 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1192 };
1193 
1194 static struct block_defs block_ccfc_defs = {
1195 	"ccfc",
1196 	{true, true, true}, false, 0,
1197 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1198 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1199 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1200 	CCFC_REG_DBG_FORCE_FRAME,
1201 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1202 };
1203 
1204 static struct block_defs block_tcfc_defs = {
1205 	"tcfc",
1206 	{true, true, true}, false, 0,
1207 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1208 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1209 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1210 	TCFC_REG_DBG_FORCE_FRAME,
1211 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1212 };
1213 
1214 static struct block_defs block_igu_defs = {
1215 	"igu",
1216 	{true, true, true}, false, 0,
1217 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1218 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1219 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1220 	IGU_REG_DBG_FORCE_FRAME,
1221 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1222 };
1223 
1224 static struct block_defs block_cau_defs = {
1225 	"cau",
1226 	{true, true, true}, false, 0,
1227 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1228 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1229 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1230 	CAU_REG_DBG_FORCE_FRAME,
1231 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1232 };
1233 
1234 static struct block_defs block_rgfs_defs = {
1235 	"rgfs", {false, false, true}, false, 0,
1236 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1237 	0, 0, 0, 0, 0,
1238 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
1239 };
1240 
1241 static struct block_defs block_rgsrc_defs = {
1242 	"rgsrc",
1243 	{false, false, true}, false, 0,
1244 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1245 	RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1246 	RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1247 	RGSRC_REG_DBG_FORCE_FRAME_E5,
1248 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1249 	30
1250 };
1251 
1252 static struct block_defs block_tgfs_defs = {
1253 	"tgfs", {false, false, true}, false, 0,
1254 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1255 	0, 0, 0, 0, 0,
1256 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
1257 };
1258 
1259 static struct block_defs block_tgsrc_defs = {
1260 	"tgsrc",
1261 	{false, false, true}, false, 0,
1262 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
1263 	TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1264 	TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1265 	TGSRC_REG_DBG_FORCE_FRAME_E5,
1266 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1267 	31
1268 };
1269 
1270 static struct block_defs block_umac_defs = {
1271 	"umac",
1272 	{true, true, true}, false, 0,
1273 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
1274 	 DBG_BUS_CLIENT_RBCZ},
1275 	UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1276 	UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1277 	UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1278 	true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1279 };
1280 
1281 static struct block_defs block_xmac_defs = {
1282 	"xmac", {true, false, false}, false, 0,
1283 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1284 	0, 0, 0, 0, 0,
1285 	false, false, MAX_DBG_RESET_REGS, 0
1286 };
1287 
1288 static struct block_defs block_dbg_defs = {
1289 	"dbg", {true, true, true}, false, 0,
1290 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1291 	0, 0, 0, 0, 0,
1292 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1293 };
1294 
1295 static struct block_defs block_nig_defs = {
1296 	"nig",
1297 	{true, true, true}, false, 0,
1298 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1299 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1300 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1301 	NIG_REG_DBG_FORCE_FRAME,
1302 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1303 };
1304 
1305 static struct block_defs block_wol_defs = {
1306 	"wol",
1307 	{false, true, true}, false, 0,
1308 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1309 	WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1310 	WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1311 	WOL_REG_DBG_FORCE_FRAME_K2_E5,
1312 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1313 };
1314 
1315 static struct block_defs block_bmbn_defs = {
1316 	"bmbn",
1317 	{false, true, true}, false, 0,
1318 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
1319 	 DBG_BUS_CLIENT_RBCB},
1320 	BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1321 	BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1322 	BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1323 	false, false, MAX_DBG_RESET_REGS, 0
1324 };
1325 
1326 static struct block_defs block_ipc_defs = {
1327 	"ipc", {true, true, true}, false, 0,
1328 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1329 	0, 0, 0, 0, 0,
1330 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1331 };
1332 
1333 static struct block_defs block_nwm_defs = {
1334 	"nwm",
1335 	{false, true, true}, false, 0,
1336 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1337 	NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1338 	NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1339 	NWM_REG_DBG_FORCE_FRAME_K2_E5,
1340 	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1341 };
1342 
1343 static struct block_defs block_nws_defs = {
1344 	"nws",
1345 	{false, true, true}, false, 0,
1346 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1347 	NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1348 	NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1349 	NWS_REG_DBG_FORCE_FRAME_K2_E5,
1350 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1351 };
1352 
1353 static struct block_defs block_ms_defs = {
1354 	"ms",
1355 	{false, true, true}, false, 0,
1356 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1357 	MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1358 	MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1359 	MS_REG_DBG_FORCE_FRAME_K2_E5,
1360 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1361 };
1362 
1363 static struct block_defs block_phy_pcie_defs = {
1364 	"phy_pcie",
1365 	{false, true, true}, false, 0,
1366 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
1367 	 DBG_BUS_CLIENT_RBCH},
1368 	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
1369 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1370 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
1371 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1372 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1373 	false, false, MAX_DBG_RESET_REGS, 0
1374 };
1375 
1376 static struct block_defs block_led_defs = {
1377 	"led", {false, true, true}, false, 0,
1378 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1379 	0, 0, 0, 0, 0,
1380 	true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1381 };
1382 
1383 static struct block_defs block_avs_wrap_defs = {
1384 	"avs_wrap", {false, true, false}, false, 0,
1385 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1386 	0, 0, 0, 0, 0,
1387 	true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1388 };
1389 
1390 static struct block_defs block_pxpreqbus_defs = {
1391 	"pxpreqbus", {false, false, false}, false, 0,
1392 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1393 	0, 0, 0, 0, 0,
1394 	false, false, MAX_DBG_RESET_REGS, 0
1395 };
1396 
1397 static struct block_defs block_misc_aeu_defs = {
1398 	"misc_aeu", {true, true, true}, false, 0,
1399 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1400 	0, 0, 0, 0, 0,
1401 	false, false, MAX_DBG_RESET_REGS, 0
1402 };
1403 
1404 static struct block_defs block_bar0_map_defs = {
1405 	"bar0_map", {true, true, true}, false, 0,
1406 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1407 	0, 0, 0, 0, 0,
1408 	false, false, MAX_DBG_RESET_REGS, 0
1409 };
1410 
1411 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1412 	&block_grc_defs,
1413 	&block_miscs_defs,
1414 	&block_misc_defs,
1415 	&block_dbu_defs,
1416 	&block_pglue_b_defs,
1417 	&block_cnig_defs,
1418 	&block_cpmu_defs,
1419 	&block_ncsi_defs,
1420 	&block_opte_defs,
1421 	&block_bmb_defs,
1422 	&block_pcie_defs,
1423 	&block_mcp_defs,
1424 	&block_mcp2_defs,
1425 	&block_pswhst_defs,
1426 	&block_pswhst2_defs,
1427 	&block_pswrd_defs,
1428 	&block_pswrd2_defs,
1429 	&block_pswwr_defs,
1430 	&block_pswwr2_defs,
1431 	&block_pswrq_defs,
1432 	&block_pswrq2_defs,
1433 	&block_pglcs_defs,
1434 	&block_dmae_defs,
1435 	&block_ptu_defs,
1436 	&block_tcm_defs,
1437 	&block_mcm_defs,
1438 	&block_ucm_defs,
1439 	&block_xcm_defs,
1440 	&block_ycm_defs,
1441 	&block_pcm_defs,
1442 	&block_qm_defs,
1443 	&block_tm_defs,
1444 	&block_dorq_defs,
1445 	&block_brb_defs,
1446 	&block_src_defs,
1447 	&block_prs_defs,
1448 	&block_tsdm_defs,
1449 	&block_msdm_defs,
1450 	&block_usdm_defs,
1451 	&block_xsdm_defs,
1452 	&block_ysdm_defs,
1453 	&block_psdm_defs,
1454 	&block_tsem_defs,
1455 	&block_msem_defs,
1456 	&block_usem_defs,
1457 	&block_xsem_defs,
1458 	&block_ysem_defs,
1459 	&block_psem_defs,
1460 	&block_rss_defs,
1461 	&block_tmld_defs,
1462 	&block_muld_defs,
1463 	&block_yuld_defs,
1464 	&block_xyld_defs,
1465 	&block_ptld_defs,
1466 	&block_ypld_defs,
1467 	&block_prm_defs,
1468 	&block_pbf_pb1_defs,
1469 	&block_pbf_pb2_defs,
1470 	&block_rpb_defs,
1471 	&block_btb_defs,
1472 	&block_pbf_defs,
1473 	&block_rdif_defs,
1474 	&block_tdif_defs,
1475 	&block_cdu_defs,
1476 	&block_ccfc_defs,
1477 	&block_tcfc_defs,
1478 	&block_igu_defs,
1479 	&block_cau_defs,
1480 	&block_rgfs_defs,
1481 	&block_rgsrc_defs,
1482 	&block_tgfs_defs,
1483 	&block_tgsrc_defs,
1484 	&block_umac_defs,
1485 	&block_xmac_defs,
1486 	&block_dbg_defs,
1487 	&block_nig_defs,
1488 	&block_wol_defs,
1489 	&block_bmbn_defs,
1490 	&block_ipc_defs,
1491 	&block_nwm_defs,
1492 	&block_nws_defs,
1493 	&block_ms_defs,
1494 	&block_phy_pcie_defs,
1495 	&block_led_defs,
1496 	&block_avs_wrap_defs,
1497 	&block_pxpreqbus_defs,
1498 	&block_misc_aeu_defs,
1499 	&block_bar0_map_defs,
1500 };
1501 
1502 static struct platform_defs s_platform_defs[] = {
1503 	{"asic", 1, 256, 32768},
1504 	{"reserved", 0, 0, 0},
1505 	{"reserved2", 0, 0, 0},
1506 	{"reserved3", 0, 0, 0}
1507 };
1508 
1509 static struct grc_param_defs s_grc_param_defs[] = {
1510 	/* DBG_GRC_PARAM_DUMP_TSTORM */
1511 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1512 
1513 	/* DBG_GRC_PARAM_DUMP_MSTORM */
1514 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1515 
1516 	/* DBG_GRC_PARAM_DUMP_USTORM */
1517 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1518 
1519 	/* DBG_GRC_PARAM_DUMP_XSTORM */
1520 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1521 
1522 	/* DBG_GRC_PARAM_DUMP_YSTORM */
1523 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1524 
1525 	/* DBG_GRC_PARAM_DUMP_PSTORM */
1526 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1527 
1528 	/* DBG_GRC_PARAM_DUMP_REGS */
1529 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1530 
1531 	/* DBG_GRC_PARAM_DUMP_RAM */
1532 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1533 
1534 	/* DBG_GRC_PARAM_DUMP_PBUF */
1535 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1536 
1537 	/* DBG_GRC_PARAM_DUMP_IOR */
1538 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1539 
1540 	/* DBG_GRC_PARAM_DUMP_VFC */
1541 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1542 
1543 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
1544 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1545 
1546 	/* DBG_GRC_PARAM_DUMP_ILT */
1547 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1548 
1549 	/* DBG_GRC_PARAM_DUMP_RSS */
1550 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1551 
1552 	/* DBG_GRC_PARAM_DUMP_CAU */
1553 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1554 
1555 	/* DBG_GRC_PARAM_DUMP_QM */
1556 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1557 
1558 	/* DBG_GRC_PARAM_DUMP_MCP */
1559 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1560 
1561 	/* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
1562 	{{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1},
1563 
1564 	/* DBG_GRC_PARAM_DUMP_CFC */
1565 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1566 
1567 	/* DBG_GRC_PARAM_DUMP_IGU */
1568 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1569 
1570 	/* DBG_GRC_PARAM_DUMP_BRB */
1571 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1572 
1573 	/* DBG_GRC_PARAM_DUMP_BTB */
1574 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1575 
1576 	/* DBG_GRC_PARAM_DUMP_BMB */
1577 	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1578 
1579 	/* DBG_GRC_PARAM_DUMP_NIG */
1580 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1581 
1582 	/* DBG_GRC_PARAM_DUMP_MULD */
1583 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1584 
1585 	/* DBG_GRC_PARAM_DUMP_PRS */
1586 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1587 
1588 	/* DBG_GRC_PARAM_DUMP_DMAE */
1589 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1590 
1591 	/* DBG_GRC_PARAM_DUMP_TM */
1592 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1593 
1594 	/* DBG_GRC_PARAM_DUMP_SDM */
1595 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1596 
1597 	/* DBG_GRC_PARAM_DUMP_DIF */
1598 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1599 
1600 	/* DBG_GRC_PARAM_DUMP_STATIC */
1601 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1602 
1603 	/* DBG_GRC_PARAM_UNSTALL */
1604 	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1605 
1606 	/* DBG_GRC_PARAM_NUM_LCIDS */
1607 	{{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false,
1608 	 MAX_LCIDS, MAX_LCIDS},
1609 
1610 	/* DBG_GRC_PARAM_NUM_LTIDS */
1611 	{{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false,
1612 	 MAX_LTIDS, MAX_LTIDS},
1613 
1614 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
1615 	{{0, 0, 0}, 0, 1, true, false, 0, 0},
1616 
1617 	/* DBG_GRC_PARAM_CRASH */
1618 	{{0, 0, 0}, 0, 1, true, false, 0, 0},
1619 
1620 	/* DBG_GRC_PARAM_PARITY_SAFE */
1621 	{{0, 0, 0}, 0, 1, false, false, 1, 0},
1622 
1623 	/* DBG_GRC_PARAM_DUMP_CM */
1624 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1625 
1626 	/* DBG_GRC_PARAM_DUMP_PHY */
1627 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1628 
1629 	/* DBG_GRC_PARAM_NO_MCP */
1630 	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1631 
1632 	/* DBG_GRC_PARAM_NO_FW_VER */
1633 	{{0, 0, 0}, 0, 1, false, false, 0, 0}
1634 };
1635 
1636 static struct rss_mem_defs s_rss_mem_defs[] = {
1637 	{ "rss_mem_cid", "rss_cid", 0, 32,
1638 	  {256, 320, 512} },
1639 
1640 	{ "rss_mem_key_msb", "rss_key", 1024, 256,
1641 	  {128, 208, 257} },
1642 
1643 	{ "rss_mem_key_lsb", "rss_key", 2048, 64,
1644 	  {128, 208, 257} },
1645 
1646 	{ "rss_mem_info", "rss_info", 3072, 16,
1647 	  {128, 208, 256} },
1648 
1649 	{ "rss_mem_ind", "rss_ind", 4096, 16,
1650 	  {16384, 26624, 32768} }
1651 };
1652 
1653 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1654 	{"vfc_ram_tt1", "vfc_ram", 0, 512},
1655 	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
1656 	{"vfc_ram_stt2", "vfc_ram", 640, 32},
1657 	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1658 };
1659 
1660 static struct big_ram_defs s_big_ram_defs[] = {
1661 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1662 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1663 	  MISC_REG_BLOCK_256B_EN, {0, 0, 0},
1664 	  {153600, 180224, 282624} },
1665 
1666 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1667 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1668 	  MISC_REG_BLOCK_256B_EN, {0, 1, 1},
1669 	  {92160, 117760, 168960} },
1670 
1671 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1672 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1673 	  MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
1674 	  {36864, 36864, 36864} }
1675 };
1676 
1677 static struct reset_reg_defs s_reset_regs_defs[] = {
1678 	/* DBG_RESET_REG_MISCS_PL_UA */
1679 	{ MISCS_REG_RESET_PL_UA,
1680 	  {true, true, true}, {0x0, 0x0, 0x0} },
1681 
1682 	/* DBG_RESET_REG_MISCS_PL_HV */
1683 	{ MISCS_REG_RESET_PL_HV,
1684 	  {true, true, true}, {0x0, 0x400, 0x600} },
1685 
1686 	/* DBG_RESET_REG_MISCS_PL_HV_2 */
1687 	{ MISCS_REG_RESET_PL_HV_2_K2_E5,
1688 	  {false, true, true}, {0x0, 0x0, 0x0} },
1689 
1690 	/* DBG_RESET_REG_MISC_PL_UA */
1691 	{ MISC_REG_RESET_PL_UA,
1692 	  {true, true, true}, {0x0, 0x0, 0x0} },
1693 
1694 	/* DBG_RESET_REG_MISC_PL_HV */
1695 	{ MISC_REG_RESET_PL_HV,
1696 	  {true, true, true}, {0x0, 0x0, 0x0} },
1697 
1698 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1699 	{ MISC_REG_RESET_PL_PDA_VMAIN_1,
1700 	  {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
1701 
1702 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1703 	{ MISC_REG_RESET_PL_PDA_VMAIN_2,
1704 	  {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
1705 
1706 	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1707 	{ MISC_REG_RESET_PL_PDA_VAUX,
1708 	  {true, true, true}, {0x2, 0x2, 0x2} },
1709 };
1710 
1711 static struct phy_defs s_phy_defs[] = {
1712 	{"nw_phy", NWS_REG_NWS_CMU_K2,
1713 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
1714 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
1715 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
1716 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
1717 	{"sgmii_phy", MS_REG_MS_CMU_K2_E5,
1718 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1719 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1720 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1721 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1722 	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
1723 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1724 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1725 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1726 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1727 	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
1728 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1729 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1730 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1731 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1732 };
1733 
1734 static struct split_type_defs s_split_type_defs[] = {
1735 	/* SPLIT_TYPE_NONE */
1736 	{"eng"},
1737 
1738 	/* SPLIT_TYPE_PORT */
1739 	{"port"},
1740 
1741 	/* SPLIT_TYPE_PF */
1742 	{"pf"},
1743 
1744 	/* SPLIT_TYPE_PORT_PF */
1745 	{"port"},
1746 
1747 	/* SPLIT_TYPE_VF */
1748 	{"vf"}
1749 };
1750 
1751 /**************************** Private Functions ******************************/
1752 
1753 /* Reads and returns a single dword from the specified unaligned buffer */
1754 static u32 qed_read_unaligned_dword(u8 *buf)
1755 {
1756 	u32 dword;
1757 
1758 	memcpy((u8 *)&dword, buf, sizeof(dword));
1759 	return dword;
1760 }
1761 
1762 /* Returns the value of the specified GRC param */
1763 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1764 			     enum dbg_grc_params grc_param)
1765 {
1766 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1767 
1768 	return dev_data->grc.param_val[grc_param];
1769 }
1770 
1771 /* Initializes the GRC parameters */
1772 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1773 {
1774 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1775 
1776 	if (!dev_data->grc.params_initialized) {
1777 		qed_dbg_grc_set_params_default(p_hwfn);
1778 		dev_data->grc.params_initialized = 1;
1779 	}
1780 }
1781 
1782 /* Initializes debug data for the specified device */
1783 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1784 					struct qed_ptt *p_ptt)
1785 {
1786 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1787 	u8 num_pfs = 0, max_pfs_per_port = 0;
1788 
1789 	if (dev_data->initialized)
1790 		return DBG_STATUS_OK;
1791 
1792 	/* Set chip */
1793 	if (QED_IS_K2(p_hwfn->cdev)) {
1794 		dev_data->chip_id = CHIP_K2;
1795 		dev_data->mode_enable[MODE_K2] = 1;
1796 		dev_data->num_vfs = MAX_NUM_VFS_K2;
1797 		num_pfs = MAX_NUM_PFS_K2;
1798 		max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
1799 	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1800 		dev_data->chip_id = CHIP_BB;
1801 		dev_data->mode_enable[MODE_BB] = 1;
1802 		dev_data->num_vfs = MAX_NUM_VFS_BB;
1803 		num_pfs = MAX_NUM_PFS_BB;
1804 		max_pfs_per_port = MAX_NUM_PFS_BB;
1805 	} else {
1806 		return DBG_STATUS_UNKNOWN_CHIP;
1807 	}
1808 
1809 	/* Set platofrm */
1810 	dev_data->platform_id = PLATFORM_ASIC;
1811 	dev_data->mode_enable[MODE_ASIC] = 1;
1812 
1813 	/* Set port mode */
1814 	switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
1815 	case 0:
1816 		dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
1817 		break;
1818 	case 1:
1819 		dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
1820 		break;
1821 	case 2:
1822 		dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
1823 		break;
1824 	}
1825 
1826 	/* Set 100G mode */
1827 	if (dev_data->chip_id == CHIP_BB &&
1828 	    qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2)
1829 		dev_data->mode_enable[MODE_100G] = 1;
1830 
1831 	/* Set number of ports */
1832 	if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
1833 	    dev_data->mode_enable[MODE_100G])
1834 		dev_data->num_ports = 1;
1835 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
1836 		dev_data->num_ports = 2;
1837 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
1838 		dev_data->num_ports = 4;
1839 
1840 	/* Set number of PFs per port */
1841 	dev_data->num_pfs_per_port = min_t(u32,
1842 					   num_pfs / dev_data->num_ports,
1843 					   max_pfs_per_port);
1844 
1845 	/* Initializes the GRC parameters */
1846 	qed_dbg_grc_init_params(p_hwfn);
1847 
1848 	dev_data->use_dmae = true;
1849 	dev_data->initialized = 1;
1850 
1851 	return DBG_STATUS_OK;
1852 }
1853 
1854 static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
1855 						    enum block_id block_id)
1856 {
1857 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1858 
1859 	return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
1860 						       MAX_CHIP_IDS +
1861 						       dev_data->chip_id];
1862 }
1863 
1864 /* Reads the FW info structure for the specified Storm from the chip,
1865  * and writes it to the specified fw_info pointer.
1866  */
1867 static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
1868 				   struct qed_ptt *p_ptt,
1869 				   u8 storm_id, struct fw_info *fw_info)
1870 {
1871 	struct storm_defs *storm = &s_storm_defs[storm_id];
1872 	struct fw_info_location fw_info_location;
1873 	u32 addr, i, *dest;
1874 
1875 	memset(&fw_info_location, 0, sizeof(fw_info_location));
1876 	memset(fw_info, 0, sizeof(*fw_info));
1877 
1878 	/* Read first the address that points to fw_info location.
1879 	 * The address is located in the last line of the Storm RAM.
1880 	 */
1881 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1882 	       DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
1883 	       sizeof(fw_info_location);
1884 	dest = (u32 *)&fw_info_location;
1885 
1886 	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1887 	     i++, addr += BYTES_IN_DWORD)
1888 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1889 
1890 	/* Read FW version info from Storm RAM */
1891 	if (fw_info_location.size > 0 && fw_info_location.size <=
1892 	    sizeof(*fw_info)) {
1893 		addr = fw_info_location.grc_addr;
1894 		dest = (u32 *)fw_info;
1895 		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1896 		     i++, addr += BYTES_IN_DWORD)
1897 			dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1898 	}
1899 }
1900 
1901 /* Dumps the specified string to the specified buffer.
1902  * Returns the dumped size in bytes.
1903  */
1904 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1905 {
1906 	if (dump)
1907 		strcpy(dump_buf, str);
1908 
1909 	return (u32)strlen(str) + 1;
1910 }
1911 
1912 /* Dumps zeros to align the specified buffer to dwords.
1913  * Returns the dumped size in bytes.
1914  */
1915 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1916 {
1917 	u8 offset_in_dword, align_size;
1918 
1919 	offset_in_dword = (u8)(byte_offset & 0x3);
1920 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1921 
1922 	if (dump && align_size)
1923 		memset(dump_buf, 0, align_size);
1924 
1925 	return align_size;
1926 }
1927 
1928 /* Writes the specified string param to the specified buffer.
1929  * Returns the dumped size in dwords.
1930  */
1931 static u32 qed_dump_str_param(u32 *dump_buf,
1932 			      bool dump,
1933 			      const char *param_name, const char *param_val)
1934 {
1935 	char *char_buf = (char *)dump_buf;
1936 	u32 offset = 0;
1937 
1938 	/* Dump param name */
1939 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1940 
1941 	/* Indicate a string param value */
1942 	if (dump)
1943 		*(char_buf + offset) = 1;
1944 	offset++;
1945 
1946 	/* Dump param value */
1947 	offset += qed_dump_str(char_buf + offset, dump, param_val);
1948 
1949 	/* Align buffer to next dword */
1950 	offset += qed_dump_align(char_buf + offset, dump, offset);
1951 
1952 	return BYTES_TO_DWORDS(offset);
1953 }
1954 
1955 /* Writes the specified numeric param to the specified buffer.
1956  * Returns the dumped size in dwords.
1957  */
1958 static u32 qed_dump_num_param(u32 *dump_buf,
1959 			      bool dump, const char *param_name, u32 param_val)
1960 {
1961 	char *char_buf = (char *)dump_buf;
1962 	u32 offset = 0;
1963 
1964 	/* Dump param name */
1965 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1966 
1967 	/* Indicate a numeric param value */
1968 	if (dump)
1969 		*(char_buf + offset) = 0;
1970 	offset++;
1971 
1972 	/* Align buffer to next dword */
1973 	offset += qed_dump_align(char_buf + offset, dump, offset);
1974 
1975 	/* Dump param value (and change offset from bytes to dwords) */
1976 	offset = BYTES_TO_DWORDS(offset);
1977 	if (dump)
1978 		*(dump_buf + offset) = param_val;
1979 	offset++;
1980 
1981 	return offset;
1982 }
1983 
1984 /* Reads the FW version and writes it as a param to the specified buffer.
1985  * Returns the dumped size in dwords.
1986  */
1987 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1988 				 struct qed_ptt *p_ptt,
1989 				 u32 *dump_buf, bool dump)
1990 {
1991 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1992 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1993 	struct fw_info fw_info = { {0}, {0} };
1994 	u32 offset = 0;
1995 
1996 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1997 		/* Read FW info from chip */
1998 		qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
1999 
2000 		/* Create FW version/image strings */
2001 		if (snprintf(fw_ver_str, sizeof(fw_ver_str),
2002 			     "%d_%d_%d_%d", fw_info.ver.num.major,
2003 			     fw_info.ver.num.minor, fw_info.ver.num.rev,
2004 			     fw_info.ver.num.eng) < 0)
2005 			DP_NOTICE(p_hwfn,
2006 				  "Unexpected debug error: invalid FW version string\n");
2007 		switch (fw_info.ver.image_id) {
2008 		case FW_IMG_MAIN:
2009 			strcpy(fw_img_str, "main");
2010 			break;
2011 		default:
2012 			strcpy(fw_img_str, "unknown");
2013 			break;
2014 		}
2015 	}
2016 
2017 	/* Dump FW version, image and timestamp */
2018 	offset += qed_dump_str_param(dump_buf + offset,
2019 				     dump, "fw-version", fw_ver_str);
2020 	offset += qed_dump_str_param(dump_buf + offset,
2021 				     dump, "fw-image", fw_img_str);
2022 	offset += qed_dump_num_param(dump_buf + offset,
2023 				     dump,
2024 				     "fw-timestamp", fw_info.ver.timestamp);
2025 
2026 	return offset;
2027 }
2028 
2029 /* Reads the MFW version and writes it as a param to the specified buffer.
2030  * Returns the dumped size in dwords.
2031  */
2032 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
2033 				  struct qed_ptt *p_ptt,
2034 				  u32 *dump_buf, bool dump)
2035 {
2036 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2037 
2038 	if (dump &&
2039 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2040 		u32 global_section_offsize, global_section_addr, mfw_ver;
2041 		u32 public_data_addr, global_section_offsize_addr;
2042 
2043 		/* Find MCP public data GRC address. Needs to be ORed with
2044 		 * MCP_REG_SCRATCH due to a HW bug.
2045 		 */
2046 		public_data_addr = qed_rd(p_hwfn,
2047 					  p_ptt,
2048 					  MISC_REG_SHARED_MEM_ADDR) |
2049 				   MCP_REG_SCRATCH;
2050 
2051 		/* Find MCP public global section offset */
2052 		global_section_offsize_addr = public_data_addr +
2053 					      offsetof(struct mcp_public_data,
2054 						       sections) +
2055 					      sizeof(offsize_t) * PUBLIC_GLOBAL;
2056 		global_section_offsize = qed_rd(p_hwfn, p_ptt,
2057 						global_section_offsize_addr);
2058 		global_section_addr =
2059 			MCP_REG_SCRATCH +
2060 			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2061 
2062 		/* Read MFW version from MCP public global section */
2063 		mfw_ver = qed_rd(p_hwfn, p_ptt,
2064 				 global_section_addr +
2065 				 offsetof(struct public_global, mfw_ver));
2066 
2067 		/* Dump MFW version param */
2068 		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
2069 			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
2070 			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2071 			DP_NOTICE(p_hwfn,
2072 				  "Unexpected debug error: invalid MFW version string\n");
2073 	}
2074 
2075 	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2076 }
2077 
2078 /* Writes a section header to the specified buffer.
2079  * Returns the dumped size in dwords.
2080  */
2081 static u32 qed_dump_section_hdr(u32 *dump_buf,
2082 				bool dump, const char *name, u32 num_params)
2083 {
2084 	return qed_dump_num_param(dump_buf, dump, name, num_params);
2085 }
2086 
2087 /* Writes the common global params to the specified buffer.
2088  * Returns the dumped size in dwords.
2089  */
2090 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
2091 					 struct qed_ptt *p_ptt,
2092 					 u32 *dump_buf,
2093 					 bool dump,
2094 					 u8 num_specific_global_params)
2095 {
2096 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2097 	u32 offset = 0;
2098 	u8 num_params;
2099 
2100 	/* Dump global params section header */
2101 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2102 	offset += qed_dump_section_hdr(dump_buf + offset,
2103 				       dump, "global_params", num_params);
2104 
2105 	/* Store params */
2106 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2107 	offset += qed_dump_mfw_ver_param(p_hwfn,
2108 					 p_ptt, dump_buf + offset, dump);
2109 	offset += qed_dump_num_param(dump_buf + offset,
2110 				     dump, "tools-version", TOOLS_VERSION);
2111 	offset += qed_dump_str_param(dump_buf + offset,
2112 				     dump,
2113 				     "chip",
2114 				     s_chip_defs[dev_data->chip_id].name);
2115 	offset += qed_dump_str_param(dump_buf + offset,
2116 				     dump,
2117 				     "platform",
2118 				     s_platform_defs[dev_data->platform_id].
2119 				     name);
2120 	offset +=
2121 	    qed_dump_num_param(dump_buf + offset, dump, "pci-func",
2122 			       p_hwfn->abs_pf_id);
2123 
2124 	return offset;
2125 }
2126 
2127 /* Writes the "last" section (including CRC) to the specified buffer at the
2128  * given offset. Returns the dumped size in dwords.
2129  */
2130 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
2131 {
2132 	u32 start_offset = offset;
2133 
2134 	/* Dump CRC section header */
2135 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2136 
2137 	/* Calculate CRC32 and add it to the dword after the "last" section */
2138 	if (dump)
2139 		*(dump_buf + offset) = ~crc32(0xffffffff,
2140 					      (u8 *)dump_buf,
2141 					      DWORDS_TO_BYTES(offset));
2142 
2143 	offset++;
2144 
2145 	return offset - start_offset;
2146 }
2147 
2148 /* Update blocks reset state  */
2149 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
2150 					  struct qed_ptt *p_ptt)
2151 {
2152 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2153 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2154 	u32 i;
2155 
2156 	/* Read reset registers */
2157 	for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2158 		if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2159 			reg_val[i] = qed_rd(p_hwfn,
2160 					    p_ptt, s_reset_regs_defs[i].addr);
2161 
2162 	/* Check if blocks are in reset */
2163 	for (i = 0; i < MAX_BLOCK_ID; i++) {
2164 		struct block_defs *block = s_block_defs[i];
2165 
2166 		dev_data->block_in_reset[i] = block->has_reset_bit &&
2167 		    !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
2168 	}
2169 }
2170 
2171 /* Enable / disable the Debug block */
2172 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
2173 				     struct qed_ptt *p_ptt, bool enable)
2174 {
2175 	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2176 }
2177 
2178 /* Resets the Debug block */
2179 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
2180 				    struct qed_ptt *p_ptt)
2181 {
2182 	u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2183 	struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2184 
2185 	dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2186 	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2187 	new_reset_reg_val =
2188 	    old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
2189 
2190 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2191 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2192 }
2193 
2194 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
2195 				     struct qed_ptt *p_ptt,
2196 				     enum dbg_bus_frame_modes mode)
2197 {
2198 	qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2199 }
2200 
2201 /* Enable / disable Debug Bus clients according to the specified mask
2202  * (1 = enable, 0 = disable).
2203  */
2204 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
2205 				   struct qed_ptt *p_ptt, u32 client_mask)
2206 {
2207 	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2208 }
2209 
2210 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
2211 {
2212 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2213 	bool arg1, arg2;
2214 	const u32 *ptr;
2215 	u8 tree_val;
2216 
2217 	/* Get next element from modes tree buffer */
2218 	ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
2219 	tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
2220 
2221 	switch (tree_val) {
2222 	case INIT_MODE_OP_NOT:
2223 		return !qed_is_mode_match(p_hwfn, modes_buf_offset);
2224 	case INIT_MODE_OP_OR:
2225 	case INIT_MODE_OP_AND:
2226 		arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2227 		arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2228 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
2229 							arg2) : (arg1 && arg2);
2230 	default:
2231 		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2232 	}
2233 }
2234 
2235 /* Returns true if the specified entity (indicated by GRC param) should be
2236  * included in the dump, false otherwise.
2237  */
2238 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
2239 				enum dbg_grc_params grc_param)
2240 {
2241 	return qed_grc_get_param(p_hwfn, grc_param) > 0;
2242 }
2243 
2244 /* Returns true of the specified Storm should be included in the dump, false
2245  * otherwise.
2246  */
2247 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
2248 				      enum dbg_storms storm)
2249 {
2250 	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2251 }
2252 
2253 /* Returns true if the specified memory should be included in the dump, false
2254  * otherwise.
2255  */
2256 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
2257 				    enum block_id block_id, u8 mem_group_id)
2258 {
2259 	struct block_defs *block = s_block_defs[block_id];
2260 	u8 i;
2261 
2262 	/* Check Storm match */
2263 	if (block->associated_to_storm &&
2264 	    !qed_grc_is_storm_included(p_hwfn,
2265 				       (enum dbg_storms)block->storm_id))
2266 		return false;
2267 
2268 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2269 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2270 
2271 		if (mem_group_id == big_ram->mem_group_id ||
2272 		    mem_group_id == big_ram->ram_mem_group_id)
2273 			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
2274 	}
2275 
2276 	switch (mem_group_id) {
2277 	case MEM_GROUP_PXP_ILT:
2278 	case MEM_GROUP_PXP_MEM:
2279 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2280 	case MEM_GROUP_RAM:
2281 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2282 	case MEM_GROUP_PBUF:
2283 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2284 	case MEM_GROUP_CAU_MEM:
2285 	case MEM_GROUP_CAU_SB:
2286 	case MEM_GROUP_CAU_PI:
2287 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2288 	case MEM_GROUP_QM_MEM:
2289 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2290 	case MEM_GROUP_CFC_MEM:
2291 	case MEM_GROUP_CONN_CFC_MEM:
2292 	case MEM_GROUP_TASK_CFC_MEM:
2293 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
2294 		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2295 	case MEM_GROUP_IGU_MEM:
2296 	case MEM_GROUP_IGU_MSIX:
2297 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2298 	case MEM_GROUP_MULD_MEM:
2299 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2300 	case MEM_GROUP_PRS_MEM:
2301 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2302 	case MEM_GROUP_DMAE_MEM:
2303 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2304 	case MEM_GROUP_TM_MEM:
2305 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2306 	case MEM_GROUP_SDM_MEM:
2307 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2308 	case MEM_GROUP_TDIF_CTX:
2309 	case MEM_GROUP_RDIF_CTX:
2310 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2311 	case MEM_GROUP_CM_MEM:
2312 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2313 	case MEM_GROUP_IOR:
2314 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2315 	default:
2316 		return true;
2317 	}
2318 }
2319 
2320 /* Stalls all Storms */
2321 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2322 				 struct qed_ptt *p_ptt, bool stall)
2323 {
2324 	u32 reg_addr;
2325 	u8 storm_id;
2326 
2327 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2328 		if (!qed_grc_is_storm_included(p_hwfn,
2329 					       (enum dbg_storms)storm_id))
2330 			continue;
2331 
2332 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
2333 		    SEM_FAST_REG_STALL_0_BB_K2;
2334 		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2335 	}
2336 
2337 	msleep(STALL_DELAY_MS);
2338 }
2339 
2340 /* Takes all blocks out of reset */
2341 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2342 				   struct qed_ptt *p_ptt)
2343 {
2344 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2345 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2346 	u32 block_id, i;
2347 
2348 	/* Fill reset regs values */
2349 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2350 		struct block_defs *block = s_block_defs[block_id];
2351 
2352 		if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
2353 		    block->unreset)
2354 			reg_val[block->reset_reg] |=
2355 			    BIT(block->reset_bit_offset);
2356 	}
2357 
2358 	/* Write reset registers */
2359 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2360 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2361 			continue;
2362 
2363 		reg_val[i] |=
2364 			s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2365 
2366 		if (reg_val[i])
2367 			qed_wr(p_hwfn,
2368 			       p_ptt,
2369 			       s_reset_regs_defs[i].addr +
2370 			       RESET_REG_UNRESET_OFFSET, reg_val[i]);
2371 	}
2372 }
2373 
2374 /* Returns the attention block data of the specified block */
2375 static const struct dbg_attn_block_type_data *
2376 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
2377 {
2378 	const struct dbg_attn_block *base_attn_block_arr =
2379 		(const struct dbg_attn_block *)
2380 		s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2381 
2382 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
2383 }
2384 
2385 /* Returns the attention registers of the specified block */
2386 static const struct dbg_attn_reg *
2387 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
2388 			u8 *num_attn_regs)
2389 {
2390 	const struct dbg_attn_block_type_data *block_type_data =
2391 		qed_get_block_attn_data(block_id, attn_type);
2392 
2393 	*num_attn_regs = block_type_data->num_regs;
2394 
2395 	return &((const struct dbg_attn_reg *)
2396 		 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2397 							  regs_offset];
2398 }
2399 
2400 /* For each block, clear the status of all parities */
2401 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2402 				   struct qed_ptt *p_ptt)
2403 {
2404 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2405 	const struct dbg_attn_reg *attn_reg_arr;
2406 	u8 reg_idx, num_attn_regs;
2407 	u32 block_id;
2408 
2409 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2410 		if (dev_data->block_in_reset[block_id])
2411 			continue;
2412 
2413 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2414 						       ATTN_TYPE_PARITY,
2415 						       &num_attn_regs);
2416 
2417 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2418 			const struct dbg_attn_reg *reg_data =
2419 				&attn_reg_arr[reg_idx];
2420 			u16 modes_buf_offset;
2421 			bool eval_mode;
2422 
2423 			/* Check mode */
2424 			eval_mode = GET_FIELD(reg_data->mode.data,
2425 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2426 			modes_buf_offset =
2427 				GET_FIELD(reg_data->mode.data,
2428 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2429 
2430 			/* If Mode match: clear parity status */
2431 			if (!eval_mode ||
2432 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
2433 				qed_rd(p_hwfn, p_ptt,
2434 				       DWORDS_TO_BYTES(reg_data->
2435 						       sts_clr_address));
2436 		}
2437 	}
2438 }
2439 
2440 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2441  * The following parameters are dumped:
2442  * - count: no. of dumped entries
2443  * - split_type: split type
2444  * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
2445  * - param_name: user parameter value (dumped only if param_name != NULL
2446  *		 and param_val != NULL).
2447  */
2448 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2449 				 bool dump,
2450 				 u32 num_reg_entries,
2451 				 enum init_split_types split_type,
2452 				 u8 split_id,
2453 				 const char *param_name, const char *param_val)
2454 {
2455 	u8 num_params = 2 +
2456 	    (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0);
2457 	u32 offset = 0;
2458 
2459 	offset += qed_dump_section_hdr(dump_buf + offset,
2460 				       dump, "grc_regs", num_params);
2461 	offset += qed_dump_num_param(dump_buf + offset,
2462 				     dump, "count", num_reg_entries);
2463 	offset += qed_dump_str_param(dump_buf + offset,
2464 				     dump, "split",
2465 				     s_split_type_defs[split_type].name);
2466 	if (split_type != SPLIT_TYPE_NONE)
2467 		offset += qed_dump_num_param(dump_buf + offset,
2468 					     dump, "id", split_id);
2469 	if (param_name && param_val)
2470 		offset += qed_dump_str_param(dump_buf + offset,
2471 					     dump, param_name, param_val);
2472 
2473 	return offset;
2474 }
2475 
2476 /* Reads the specified registers into the specified buffer.
2477  * The addr and len arguments are specified in dwords.
2478  */
2479 void qed_read_regs(struct qed_hwfn *p_hwfn,
2480 		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
2481 {
2482 	u32 i;
2483 
2484 	for (i = 0; i < len; i++)
2485 		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2486 }
2487 
2488 /* Dumps the GRC registers in the specified address range.
2489  * Returns the dumped size in dwords.
2490  * The addr and len arguments are specified in dwords.
2491  */
2492 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2493 				   struct qed_ptt *p_ptt,
2494 				   u32 *dump_buf,
2495 				   bool dump, u32 addr, u32 len, bool wide_bus,
2496 				   enum init_split_types split_type,
2497 				   u8 split_id)
2498 {
2499 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2500 	u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
2501 
2502 	if (!dump)
2503 		return len;
2504 
2505 	/* Print log if needed */
2506 	dev_data->num_regs_read += len;
2507 	if (dev_data->num_regs_read >=
2508 	    s_platform_defs[dev_data->platform_id].log_thresh) {
2509 		DP_VERBOSE(p_hwfn,
2510 			   QED_MSG_DEBUG,
2511 			   "Dumping %d registers...\n",
2512 			   dev_data->num_regs_read);
2513 		dev_data->num_regs_read = 0;
2514 	}
2515 
2516 	switch (split_type) {
2517 	case SPLIT_TYPE_PORT:
2518 		port_id = split_id;
2519 		break;
2520 	case SPLIT_TYPE_PF:
2521 		pf_id = split_id;
2522 		break;
2523 	case SPLIT_TYPE_PORT_PF:
2524 		port_id = split_id / dev_data->num_pfs_per_port;
2525 		pf_id = port_id + dev_data->num_ports *
2526 		    (split_id % dev_data->num_pfs_per_port);
2527 		break;
2528 	case SPLIT_TYPE_VF:
2529 		vf_id = split_id;
2530 		break;
2531 	default:
2532 		break;
2533 	}
2534 
2535 	/* Try reading using DMAE */
2536 	if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE &&
2537 	    (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
2538 	     wide_bus)) {
2539 		if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
2540 				       (u64)(uintptr_t)(dump_buf), len, 0))
2541 			return len;
2542 		dev_data->use_dmae = 0;
2543 		DP_VERBOSE(p_hwfn,
2544 			   QED_MSG_DEBUG,
2545 			   "Failed reading from chip using DMAE, using GRC instead\n");
2546 	}
2547 
2548 	/* If not read using DMAE, read using GRC */
2549 
2550 	/* Set pretend */
2551 	if (split_type != dev_data->pretend.split_type || split_id !=
2552 	    dev_data->pretend.split_id) {
2553 		switch (split_type) {
2554 		case SPLIT_TYPE_PORT:
2555 			qed_port_pretend(p_hwfn, p_ptt, port_id);
2556 			break;
2557 		case SPLIT_TYPE_PF:
2558 			fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2559 			qed_fid_pretend(p_hwfn, p_ptt, fid);
2560 			break;
2561 		case SPLIT_TYPE_PORT_PF:
2562 			fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2563 			qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
2564 			break;
2565 		case SPLIT_TYPE_VF:
2566 			fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) |
2567 			      (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT);
2568 			qed_fid_pretend(p_hwfn, p_ptt, fid);
2569 			break;
2570 		default:
2571 			break;
2572 		}
2573 
2574 		dev_data->pretend.split_type = (u8)split_type;
2575 		dev_data->pretend.split_id = split_id;
2576 	}
2577 
2578 	/* Read registers using GRC */
2579 	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2580 
2581 	return len;
2582 }
2583 
2584 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2585  * The addr and len arguments are specified in dwords.
2586  */
2587 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2588 				      bool dump, u32 addr, u32 len)
2589 {
2590 	if (dump)
2591 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2592 
2593 	return 1;
2594 }
2595 
2596 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2597  * The addr and len arguments are specified in dwords.
2598  */
2599 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2600 				  struct qed_ptt *p_ptt,
2601 				  u32 *dump_buf,
2602 				  bool dump, u32 addr, u32 len, bool wide_bus,
2603 				  enum init_split_types split_type, u8 split_id)
2604 {
2605 	u32 offset = 0;
2606 
2607 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2608 	offset += qed_grc_dump_addr_range(p_hwfn,
2609 					  p_ptt,
2610 					  dump_buf + offset,
2611 					  dump, addr, len, wide_bus,
2612 					  split_type, split_id);
2613 
2614 	return offset;
2615 }
2616 
2617 /* Dumps GRC registers sequence with skip cycle.
2618  * Returns the dumped size in dwords.
2619  * - addr:	start GRC address in dwords
2620  * - total_len:	total no. of dwords to dump
2621  * - read_len:	no. consecutive dwords to read
2622  * - skip_len:	no. of dwords to skip (and fill with zeros)
2623  */
2624 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2625 				       struct qed_ptt *p_ptt,
2626 				       u32 *dump_buf,
2627 				       bool dump,
2628 				       u32 addr,
2629 				       u32 total_len,
2630 				       u32 read_len, u32 skip_len)
2631 {
2632 	u32 offset = 0, reg_offset = 0;
2633 
2634 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2635 
2636 	if (!dump)
2637 		return offset + total_len;
2638 
2639 	while (reg_offset < total_len) {
2640 		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2641 
2642 		offset += qed_grc_dump_addr_range(p_hwfn,
2643 						  p_ptt,
2644 						  dump_buf + offset,
2645 						  dump,  addr, curr_len, false,
2646 						  SPLIT_TYPE_NONE, 0);
2647 		reg_offset += curr_len;
2648 		addr += curr_len;
2649 
2650 		if (reg_offset < total_len) {
2651 			curr_len = min_t(u32, skip_len, total_len - skip_len);
2652 			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2653 			offset += curr_len;
2654 			reg_offset += curr_len;
2655 			addr += curr_len;
2656 		}
2657 	}
2658 
2659 	return offset;
2660 }
2661 
2662 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2663 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2664 				     struct qed_ptt *p_ptt,
2665 				     struct dbg_array input_regs_arr,
2666 				     u32 *dump_buf,
2667 				     bool dump,
2668 				     enum init_split_types split_type,
2669 				     u8 split_id,
2670 				     bool block_enable[MAX_BLOCK_ID],
2671 				     u32 *num_dumped_reg_entries)
2672 {
2673 	u32 i, offset = 0, input_offset = 0;
2674 	bool mode_match = true;
2675 
2676 	*num_dumped_reg_entries = 0;
2677 
2678 	while (input_offset < input_regs_arr.size_in_dwords) {
2679 		const struct dbg_dump_cond_hdr *cond_hdr =
2680 		    (const struct dbg_dump_cond_hdr *)
2681 		    &input_regs_arr.ptr[input_offset++];
2682 		u16 modes_buf_offset;
2683 		bool eval_mode;
2684 
2685 		/* Check mode/block */
2686 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2687 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2688 		if (eval_mode) {
2689 			modes_buf_offset =
2690 				GET_FIELD(cond_hdr->mode.data,
2691 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2692 			mode_match = qed_is_mode_match(p_hwfn,
2693 						       &modes_buf_offset);
2694 		}
2695 
2696 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2697 			input_offset += cond_hdr->data_size;
2698 			continue;
2699 		}
2700 
2701 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2702 			const struct dbg_dump_reg *reg =
2703 			    (const struct dbg_dump_reg *)
2704 			    &input_regs_arr.ptr[input_offset];
2705 			u32 addr, len;
2706 			bool wide_bus;
2707 
2708 			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2709 			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2710 			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2711 			offset += qed_grc_dump_reg_entry(p_hwfn,
2712 							 p_ptt,
2713 							 dump_buf + offset,
2714 							 dump,
2715 							 addr,
2716 							 len,
2717 							 wide_bus,
2718 							 split_type, split_id);
2719 			(*num_dumped_reg_entries)++;
2720 		}
2721 	}
2722 
2723 	return offset;
2724 }
2725 
2726 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2727 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2728 				   struct qed_ptt *p_ptt,
2729 				   struct dbg_array input_regs_arr,
2730 				   u32 *dump_buf,
2731 				   bool dump,
2732 				   bool block_enable[MAX_BLOCK_ID],
2733 				   enum init_split_types split_type,
2734 				   u8 split_id,
2735 				   const char *param_name,
2736 				   const char *param_val)
2737 {
2738 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2739 	enum init_split_types hdr_split_type = split_type;
2740 	u32 num_dumped_reg_entries, offset;
2741 	u8 hdr_split_id = split_id;
2742 
2743 	/* In PORT_PF split type, print a port split header */
2744 	if (split_type == SPLIT_TYPE_PORT_PF) {
2745 		hdr_split_type = SPLIT_TYPE_PORT;
2746 		hdr_split_id = split_id / dev_data->num_pfs_per_port;
2747 	}
2748 
2749 	/* Calculate register dump header size (and skip it for now) */
2750 	offset = qed_grc_dump_regs_hdr(dump_buf,
2751 				       false,
2752 				       0,
2753 				       hdr_split_type,
2754 				       hdr_split_id, param_name, param_val);
2755 
2756 	/* Dump registers */
2757 	offset += qed_grc_dump_regs_entries(p_hwfn,
2758 					    p_ptt,
2759 					    input_regs_arr,
2760 					    dump_buf + offset,
2761 					    dump,
2762 					    split_type,
2763 					    split_id,
2764 					    block_enable,
2765 					    &num_dumped_reg_entries);
2766 
2767 	/* Write register dump header */
2768 	if (dump && num_dumped_reg_entries > 0)
2769 		qed_grc_dump_regs_hdr(dump_buf,
2770 				      dump,
2771 				      num_dumped_reg_entries,
2772 				      hdr_split_type,
2773 				      hdr_split_id, param_name, param_val);
2774 
2775 	return num_dumped_reg_entries > 0 ? offset : 0;
2776 }
2777 
2778 /* Dumps registers according to the input registers array. Returns the dumped
2779  * size in dwords.
2780  */
2781 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2782 				  struct qed_ptt *p_ptt,
2783 				  u32 *dump_buf,
2784 				  bool dump,
2785 				  bool block_enable[MAX_BLOCK_ID],
2786 				  const char *param_name, const char *param_val)
2787 {
2788 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2789 	u32 offset = 0, input_offset = 0;
2790 	u16 fid;
2791 	while (input_offset <
2792 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2793 		const struct dbg_dump_split_hdr *split_hdr;
2794 		struct dbg_array curr_input_regs_arr;
2795 		enum init_split_types split_type;
2796 		u16 split_count = 0;
2797 		u32 split_data_size;
2798 		u8 split_id;
2799 
2800 		split_hdr =
2801 			(const struct dbg_dump_split_hdr *)
2802 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2803 		split_type =
2804 			GET_FIELD(split_hdr->hdr,
2805 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2806 		split_data_size =
2807 			GET_FIELD(split_hdr->hdr,
2808 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2809 		curr_input_regs_arr.ptr =
2810 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
2811 		curr_input_regs_arr.size_in_dwords = split_data_size;
2812 
2813 		switch (split_type) {
2814 		case SPLIT_TYPE_NONE:
2815 			split_count = 1;
2816 			break;
2817 		case SPLIT_TYPE_PORT:
2818 			split_count = dev_data->num_ports;
2819 			break;
2820 		case SPLIT_TYPE_PF:
2821 		case SPLIT_TYPE_PORT_PF:
2822 			split_count = dev_data->num_ports *
2823 			    dev_data->num_pfs_per_port;
2824 			break;
2825 		case SPLIT_TYPE_VF:
2826 			split_count = dev_data->num_vfs;
2827 			break;
2828 		default:
2829 			return 0;
2830 		}
2831 
2832 		for (split_id = 0; split_id < split_count; split_id++)
2833 			offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2834 							  curr_input_regs_arr,
2835 							  dump_buf + offset,
2836 							  dump, block_enable,
2837 							  split_type,
2838 							  split_id,
2839 							  param_name,
2840 							  param_val);
2841 
2842 		input_offset += split_data_size;
2843 	}
2844 
2845 	/* Cancel pretends (pretend to original PF) */
2846 	if (dump) {
2847 		fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2848 		qed_fid_pretend(p_hwfn, p_ptt, fid);
2849 		dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2850 		dev_data->pretend.split_id = 0;
2851 	}
2852 
2853 	return offset;
2854 }
2855 
2856 /* Dump reset registers. Returns the dumped size in dwords. */
2857 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2858 				   struct qed_ptt *p_ptt,
2859 				   u32 *dump_buf, bool dump)
2860 {
2861 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2862 	u32 i, offset = 0, num_regs = 0;
2863 
2864 	/* Calculate header size */
2865 	offset += qed_grc_dump_regs_hdr(dump_buf,
2866 					false, 0,
2867 					SPLIT_TYPE_NONE, 0, NULL, NULL);
2868 
2869 	/* Write reset registers */
2870 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2871 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2872 			continue;
2873 
2874 		offset += qed_grc_dump_reg_entry(p_hwfn,
2875 						 p_ptt,
2876 						 dump_buf + offset,
2877 						 dump,
2878 						 BYTES_TO_DWORDS
2879 						 (s_reset_regs_defs[i].addr), 1,
2880 						 false, SPLIT_TYPE_NONE, 0);
2881 		num_regs++;
2882 	}
2883 
2884 	/* Write header */
2885 	if (dump)
2886 		qed_grc_dump_regs_hdr(dump_buf,
2887 				      true, num_regs, SPLIT_TYPE_NONE,
2888 				      0, NULL, NULL);
2889 
2890 	return offset;
2891 }
2892 
2893 /* Dump registers that are modified during GRC Dump and therefore must be
2894  * dumped first. Returns the dumped size in dwords.
2895  */
2896 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2897 				      struct qed_ptt *p_ptt,
2898 				      u32 *dump_buf, bool dump)
2899 {
2900 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2901 	u32 block_id, offset = 0, num_reg_entries = 0;
2902 	const struct dbg_attn_reg *attn_reg_arr;
2903 	u8 storm_id, reg_idx, num_attn_regs;
2904 
2905 	/* Calculate header size */
2906 	offset += qed_grc_dump_regs_hdr(dump_buf,
2907 					false, 0, SPLIT_TYPE_NONE,
2908 					0, NULL, NULL);
2909 
2910 	/* Write parity registers */
2911 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2912 		if (dev_data->block_in_reset[block_id] && dump)
2913 			continue;
2914 
2915 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2916 						       ATTN_TYPE_PARITY,
2917 						       &num_attn_regs);
2918 
2919 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2920 			const struct dbg_attn_reg *reg_data =
2921 				&attn_reg_arr[reg_idx];
2922 			u16 modes_buf_offset;
2923 			bool eval_mode;
2924 			u32 addr;
2925 
2926 			/* Check mode */
2927 			eval_mode = GET_FIELD(reg_data->mode.data,
2928 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2929 			modes_buf_offset =
2930 				GET_FIELD(reg_data->mode.data,
2931 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2932 			if (eval_mode &&
2933 			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2934 				continue;
2935 
2936 			/* Mode match: read & dump registers */
2937 			addr = reg_data->mask_address;
2938 			offset += qed_grc_dump_reg_entry(p_hwfn,
2939 							 p_ptt,
2940 							 dump_buf + offset,
2941 							 dump,
2942 							 addr,
2943 							 1, false,
2944 							 SPLIT_TYPE_NONE, 0);
2945 			addr = GET_FIELD(reg_data->data,
2946 					 DBG_ATTN_REG_STS_ADDRESS);
2947 			offset += qed_grc_dump_reg_entry(p_hwfn,
2948 							 p_ptt,
2949 							 dump_buf + offset,
2950 							 dump,
2951 							 addr,
2952 							 1, false,
2953 							 SPLIT_TYPE_NONE, 0);
2954 			num_reg_entries += 2;
2955 		}
2956 	}
2957 
2958 	/* Write Storm stall status registers */
2959 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2960 		struct storm_defs *storm = &s_storm_defs[storm_id];
2961 		u32 addr;
2962 
2963 		if (dev_data->block_in_reset[storm->block_id] && dump)
2964 			continue;
2965 
2966 		addr =
2967 		    BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2968 				    SEM_FAST_REG_STALLED);
2969 		offset += qed_grc_dump_reg_entry(p_hwfn,
2970 						 p_ptt,
2971 						 dump_buf + offset,
2972 						 dump,
2973 						 addr,
2974 						 1,
2975 						 false, SPLIT_TYPE_NONE, 0);
2976 		num_reg_entries++;
2977 	}
2978 
2979 	/* Write header */
2980 	if (dump)
2981 		qed_grc_dump_regs_hdr(dump_buf,
2982 				      true,
2983 				      num_reg_entries, SPLIT_TYPE_NONE,
2984 				      0, NULL, NULL);
2985 
2986 	return offset;
2987 }
2988 
2989 /* Dumps registers that can't be represented in the debug arrays */
2990 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2991 				     struct qed_ptt *p_ptt,
2992 				     u32 *dump_buf, bool dump)
2993 {
2994 	u32 offset = 0, addr;
2995 
2996 	offset += qed_grc_dump_regs_hdr(dump_buf,
2997 					dump, 2, SPLIT_TYPE_NONE, 0,
2998 					NULL, NULL);
2999 
3000 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
3001 	 * skipped).
3002 	 */
3003 	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
3004 	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
3005 					      p_ptt,
3006 					      dump_buf + offset,
3007 					      dump,
3008 					      addr,
3009 					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
3010 					      7,
3011 					      1);
3012 	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
3013 	offset +=
3014 	    qed_grc_dump_reg_entry_skip(p_hwfn,
3015 					p_ptt,
3016 					dump_buf + offset,
3017 					dump,
3018 					addr,
3019 					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
3020 					7,
3021 					1);
3022 
3023 	return offset;
3024 }
3025 
3026 /* Dumps a GRC memory header (section and params). Returns the dumped size in
3027  * dwords. The following parameters are dumped:
3028  * - name:	   dumped only if it's not NULL.
3029  * - addr:	   in dwords, dumped only if name is NULL.
3030  * - len:	   in dwords, always dumped.
3031  * - width:	   dumped if it's not zero.
3032  * - packed:	   dumped only if it's not false.
3033  * - mem_group:	   always dumped.
3034  * - is_storm:	   true only if the memory is related to a Storm.
3035  * - storm_letter: valid only if is_storm is true.
3036  *
3037  */
3038 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
3039 				u32 *dump_buf,
3040 				bool dump,
3041 				const char *name,
3042 				u32 addr,
3043 				u32 len,
3044 				u32 bit_width,
3045 				bool packed,
3046 				const char *mem_group,
3047 				bool is_storm, char storm_letter)
3048 {
3049 	u8 num_params = 3;
3050 	u32 offset = 0;
3051 	char buf[64];
3052 
3053 	if (!len)
3054 		DP_NOTICE(p_hwfn,
3055 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3056 
3057 	if (bit_width)
3058 		num_params++;
3059 	if (packed)
3060 		num_params++;
3061 
3062 	/* Dump section header */
3063 	offset += qed_dump_section_hdr(dump_buf + offset,
3064 				       dump, "grc_mem", num_params);
3065 
3066 	if (name) {
3067 		/* Dump name */
3068 		if (is_storm) {
3069 			strcpy(buf, "?STORM_");
3070 			buf[0] = storm_letter;
3071 			strcpy(buf + strlen(buf), name);
3072 		} else {
3073 			strcpy(buf, name);
3074 		}
3075 
3076 		offset += qed_dump_str_param(dump_buf + offset,
3077 					     dump, "name", buf);
3078 	} else {
3079 		/* Dump address */
3080 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3081 
3082 		offset += qed_dump_num_param(dump_buf + offset,
3083 					     dump, "addr", addr_in_bytes);
3084 	}
3085 
3086 	/* Dump len */
3087 	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
3088 
3089 	/* Dump bit width */
3090 	if (bit_width)
3091 		offset += qed_dump_num_param(dump_buf + offset,
3092 					     dump, "width", bit_width);
3093 
3094 	/* Dump packed */
3095 	if (packed)
3096 		offset += qed_dump_num_param(dump_buf + offset,
3097 					     dump, "packed", 1);
3098 
3099 	/* Dump reg type */
3100 	if (is_storm) {
3101 		strcpy(buf, "?STORM_");
3102 		buf[0] = storm_letter;
3103 		strcpy(buf + strlen(buf), mem_group);
3104 	} else {
3105 		strcpy(buf, mem_group);
3106 	}
3107 
3108 	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
3109 
3110 	return offset;
3111 }
3112 
3113 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
3114  * Returns the dumped size in dwords.
3115  * The addr and len arguments are specified in dwords.
3116  */
3117 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
3118 			    struct qed_ptt *p_ptt,
3119 			    u32 *dump_buf,
3120 			    bool dump,
3121 			    const char *name,
3122 			    u32 addr,
3123 			    u32 len,
3124 			    bool wide_bus,
3125 			    u32 bit_width,
3126 			    bool packed,
3127 			    const char *mem_group,
3128 			    bool is_storm, char storm_letter)
3129 {
3130 	u32 offset = 0;
3131 
3132 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3133 				       dump_buf + offset,
3134 				       dump,
3135 				       name,
3136 				       addr,
3137 				       len,
3138 				       bit_width,
3139 				       packed,
3140 				       mem_group, is_storm, storm_letter);
3141 	offset += qed_grc_dump_addr_range(p_hwfn,
3142 					  p_ptt,
3143 					  dump_buf + offset,
3144 					  dump, addr, len, wide_bus,
3145 					  SPLIT_TYPE_NONE, 0);
3146 
3147 	return offset;
3148 }
3149 
3150 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3151 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
3152 				    struct qed_ptt *p_ptt,
3153 				    struct dbg_array input_mems_arr,
3154 				    u32 *dump_buf, bool dump)
3155 {
3156 	u32 i, offset = 0, input_offset = 0;
3157 	bool mode_match = true;
3158 
3159 	while (input_offset < input_mems_arr.size_in_dwords) {
3160 		const struct dbg_dump_cond_hdr *cond_hdr;
3161 		u16 modes_buf_offset;
3162 		u32 num_entries;
3163 		bool eval_mode;
3164 
3165 		cond_hdr = (const struct dbg_dump_cond_hdr *)
3166 			   &input_mems_arr.ptr[input_offset++];
3167 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3168 
3169 		/* Check required mode */
3170 		eval_mode = GET_FIELD(cond_hdr->mode.data,
3171 				      DBG_MODE_HDR_EVAL_MODE) > 0;
3172 		if (eval_mode) {
3173 			modes_buf_offset =
3174 				GET_FIELD(cond_hdr->mode.data,
3175 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
3176 			mode_match = qed_is_mode_match(p_hwfn,
3177 						       &modes_buf_offset);
3178 		}
3179 
3180 		if (!mode_match) {
3181 			input_offset += cond_hdr->data_size;
3182 			continue;
3183 		}
3184 
3185 		for (i = 0; i < num_entries;
3186 		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3187 			const struct dbg_dump_mem *mem =
3188 				(const struct dbg_dump_mem *)
3189 				&input_mems_arr.ptr[input_offset];
3190 			u8 mem_group_id = GET_FIELD(mem->dword0,
3191 						    DBG_DUMP_MEM_MEM_GROUP_ID);
3192 			bool is_storm = false, mem_wide_bus;
3193 			enum dbg_grc_params grc_param;
3194 			char storm_letter = 'a';
3195 			enum block_id block_id;
3196 			u32 mem_addr, mem_len;
3197 
3198 			if (mem_group_id >= MEM_GROUPS_NUM) {
3199 				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
3200 				return 0;
3201 			}
3202 
3203 			block_id = (enum block_id)cond_hdr->block_id;
3204 			if (!qed_grc_is_mem_included(p_hwfn,
3205 						     block_id,
3206 						     mem_group_id))
3207 				continue;
3208 
3209 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3210 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3211 			mem_wide_bus = GET_FIELD(mem->dword1,
3212 						 DBG_DUMP_MEM_WIDE_BUS);
3213 
3214 			/* Update memory length for CCFC/TCFC memories
3215 			 * according to number of LCIDs/LTIDs.
3216 			 */
3217 			if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3218 				if (mem_len % MAX_LCIDS) {
3219 					DP_NOTICE(p_hwfn,
3220 						  "Invalid CCFC connection memory size\n");
3221 					return 0;
3222 				}
3223 
3224 				grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3225 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3226 					  (mem_len / MAX_LCIDS);
3227 			} else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3228 				if (mem_len % MAX_LTIDS) {
3229 					DP_NOTICE(p_hwfn,
3230 						  "Invalid TCFC task memory size\n");
3231 					return 0;
3232 				}
3233 
3234 				grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3235 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3236 					  (mem_len / MAX_LTIDS);
3237 			}
3238 
3239 			/* If memory is associated with Storm, update Storm
3240 			 * details.
3241 			 */
3242 			if (s_block_defs
3243 			    [cond_hdr->block_id]->associated_to_storm) {
3244 				is_storm = true;
3245 				storm_letter =
3246 				    s_storm_defs[s_block_defs
3247 						 [cond_hdr->block_id]->
3248 						 storm_id].letter;
3249 			}
3250 
3251 			/* Dump memory */
3252 			offset += qed_grc_dump_mem(p_hwfn,
3253 						p_ptt,
3254 						dump_buf + offset,
3255 						dump,
3256 						NULL,
3257 						mem_addr,
3258 						mem_len,
3259 						mem_wide_bus,
3260 						0,
3261 						false,
3262 						s_mem_group_names[mem_group_id],
3263 						is_storm,
3264 						storm_letter);
3265 		}
3266 	}
3267 
3268 	return offset;
3269 }
3270 
3271 /* Dumps GRC memories according to the input array dump_mem.
3272  * Returns the dumped size in dwords.
3273  */
3274 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
3275 				 struct qed_ptt *p_ptt,
3276 				 u32 *dump_buf, bool dump)
3277 {
3278 	u32 offset = 0, input_offset = 0;
3279 
3280 	while (input_offset <
3281 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3282 		const struct dbg_dump_split_hdr *split_hdr;
3283 		struct dbg_array curr_input_mems_arr;
3284 		enum init_split_types split_type;
3285 		u32 split_data_size;
3286 
3287 		split_hdr = (const struct dbg_dump_split_hdr *)
3288 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3289 		split_type =
3290 			GET_FIELD(split_hdr->hdr,
3291 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3292 		split_data_size =
3293 			GET_FIELD(split_hdr->hdr,
3294 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3295 		curr_input_mems_arr.ptr =
3296 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3297 		curr_input_mems_arr.size_in_dwords = split_data_size;
3298 
3299 		if (split_type == SPLIT_TYPE_NONE)
3300 			offset += qed_grc_dump_mem_entries(p_hwfn,
3301 							   p_ptt,
3302 							   curr_input_mems_arr,
3303 							   dump_buf + offset,
3304 							   dump);
3305 		else
3306 			DP_NOTICE(p_hwfn,
3307 				  "Dumping split memories is currently not supported\n");
3308 
3309 		input_offset += split_data_size;
3310 	}
3311 
3312 	return offset;
3313 }
3314 
3315 /* Dumps GRC context data for the specified Storm.
3316  * Returns the dumped size in dwords.
3317  * The lid_size argument is specified in quad-regs.
3318  */
3319 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
3320 				 struct qed_ptt *p_ptt,
3321 				 u32 *dump_buf,
3322 				 bool dump,
3323 				 const char *name,
3324 				 u32 num_lids,
3325 				 u32 lid_size,
3326 				 u32 rd_reg_addr,
3327 				 u8 storm_id)
3328 {
3329 	struct storm_defs *storm = &s_storm_defs[storm_id];
3330 	u32 i, lid, total_size, offset = 0;
3331 
3332 	if (!lid_size)
3333 		return 0;
3334 
3335 	lid_size *= BYTES_IN_DWORD;
3336 	total_size = num_lids * lid_size;
3337 
3338 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3339 				       dump_buf + offset,
3340 				       dump,
3341 				       name,
3342 				       0,
3343 				       total_size,
3344 				       lid_size * 32,
3345 				       false, name, true, storm->letter);
3346 
3347 	if (!dump)
3348 		return offset + total_size;
3349 
3350 	/* Dump context data */
3351 	for (lid = 0; lid < num_lids; lid++) {
3352 		for (i = 0; i < lid_size; i++, offset++) {
3353 			qed_wr(p_hwfn,
3354 			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3355 			*(dump_buf + offset) = qed_rd(p_hwfn,
3356 						      p_ptt, rd_reg_addr);
3357 		}
3358 	}
3359 
3360 	return offset;
3361 }
3362 
3363 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3364 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
3365 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3366 {
3367 	enum dbg_grc_params grc_param;
3368 	u32 offset = 0;
3369 	u8 storm_id;
3370 
3371 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3372 		struct storm_defs *storm = &s_storm_defs[storm_id];
3373 
3374 		if (!qed_grc_is_storm_included(p_hwfn,
3375 					       (enum dbg_storms)storm_id))
3376 			continue;
3377 
3378 		/* Dump Conn AG context size */
3379 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3380 		offset +=
3381 			qed_grc_dump_ctx_data(p_hwfn,
3382 					      p_ptt,
3383 					      dump_buf + offset,
3384 					      dump,
3385 					      "CONN_AG_CTX",
3386 					      qed_grc_get_param(p_hwfn,
3387 								grc_param),
3388 					      storm->cm_conn_ag_ctx_lid_size,
3389 					      storm->cm_conn_ag_ctx_rd_addr,
3390 					      storm_id);
3391 
3392 		/* Dump Conn ST context size */
3393 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3394 		offset +=
3395 			qed_grc_dump_ctx_data(p_hwfn,
3396 					      p_ptt,
3397 					      dump_buf + offset,
3398 					      dump,
3399 					      "CONN_ST_CTX",
3400 					      qed_grc_get_param(p_hwfn,
3401 								grc_param),
3402 					      storm->cm_conn_st_ctx_lid_size,
3403 					      storm->cm_conn_st_ctx_rd_addr,
3404 					      storm_id);
3405 
3406 		/* Dump Task AG context size */
3407 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3408 		offset +=
3409 			qed_grc_dump_ctx_data(p_hwfn,
3410 					      p_ptt,
3411 					      dump_buf + offset,
3412 					      dump,
3413 					      "TASK_AG_CTX",
3414 					      qed_grc_get_param(p_hwfn,
3415 								grc_param),
3416 					      storm->cm_task_ag_ctx_lid_size,
3417 					      storm->cm_task_ag_ctx_rd_addr,
3418 					      storm_id);
3419 
3420 		/* Dump Task ST context size */
3421 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3422 		offset +=
3423 			qed_grc_dump_ctx_data(p_hwfn,
3424 					      p_ptt,
3425 					      dump_buf + offset,
3426 					      dump,
3427 					      "TASK_ST_CTX",
3428 					      qed_grc_get_param(p_hwfn,
3429 								grc_param),
3430 					      storm->cm_task_st_ctx_lid_size,
3431 					      storm->cm_task_st_ctx_rd_addr,
3432 					      storm_id);
3433 	}
3434 
3435 	return offset;
3436 }
3437 
3438 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3439 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3440 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3441 {
3442 	char buf[10] = "IOR_SET_?";
3443 	u32 addr, offset = 0;
3444 	u8 storm_id, set_id;
3445 
3446 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3447 		struct storm_defs *storm = &s_storm_defs[storm_id];
3448 
3449 		if (!qed_grc_is_storm_included(p_hwfn,
3450 					       (enum dbg_storms)storm_id))
3451 			continue;
3452 
3453 		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3454 			addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
3455 					       SEM_FAST_REG_STORM_REG_FILE) +
3456 			       IOR_SET_OFFSET(set_id);
3457 			if (strlen(buf) > 0)
3458 				buf[strlen(buf) - 1] = '0' + set_id;
3459 			offset += qed_grc_dump_mem(p_hwfn,
3460 						   p_ptt,
3461 						   dump_buf + offset,
3462 						   dump,
3463 						   buf,
3464 						   addr,
3465 						   IORS_PER_SET,
3466 						   false,
3467 						   32,
3468 						   false,
3469 						   "ior",
3470 						   true,
3471 						   storm->letter);
3472 		}
3473 	}
3474 
3475 	return offset;
3476 }
3477 
3478 /* Dump VFC CAM. Returns the dumped size in dwords. */
3479 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3480 				struct qed_ptt *p_ptt,
3481 				u32 *dump_buf, bool dump, u8 storm_id)
3482 {
3483 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3484 	struct storm_defs *storm = &s_storm_defs[storm_id];
3485 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3486 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3487 	u32 row, i, offset = 0;
3488 
3489 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3490 				       dump_buf + offset,
3491 				       dump,
3492 				       "vfc_cam",
3493 				       0,
3494 				       total_size,
3495 				       256,
3496 				       false, "vfc_cam", true, storm->letter);
3497 
3498 	if (!dump)
3499 		return offset + total_size;
3500 
3501 	/* Prepare CAM address */
3502 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3503 
3504 	for (row = 0; row < VFC_CAM_NUM_ROWS;
3505 	     row++, offset += VFC_CAM_RESP_DWORDS) {
3506 		/* Write VFC CAM command */
3507 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3508 		ARR_REG_WR(p_hwfn,
3509 			   p_ptt,
3510 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3511 			   cam_cmd, VFC_CAM_CMD_DWORDS);
3512 
3513 		/* Write VFC CAM address */
3514 		ARR_REG_WR(p_hwfn,
3515 			   p_ptt,
3516 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3517 			   cam_addr, VFC_CAM_ADDR_DWORDS);
3518 
3519 		/* Read VFC CAM read response */
3520 		ARR_REG_RD(p_hwfn,
3521 			   p_ptt,
3522 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3523 			   dump_buf + offset, VFC_CAM_RESP_DWORDS);
3524 	}
3525 
3526 	return offset;
3527 }
3528 
3529 /* Dump VFC RAM. Returns the dumped size in dwords. */
3530 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3531 				struct qed_ptt *p_ptt,
3532 				u32 *dump_buf,
3533 				bool dump,
3534 				u8 storm_id, struct vfc_ram_defs *ram_defs)
3535 {
3536 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3537 	struct storm_defs *storm = &s_storm_defs[storm_id];
3538 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3539 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3540 	u32 row, i, offset = 0;
3541 
3542 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3543 				       dump_buf + offset,
3544 				       dump,
3545 				       ram_defs->mem_name,
3546 				       0,
3547 				       total_size,
3548 				       256,
3549 				       false,
3550 				       ram_defs->type_name,
3551 				       true, storm->letter);
3552 
3553 	/* Prepare RAM address */
3554 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3555 
3556 	if (!dump)
3557 		return offset + total_size;
3558 
3559 	for (row = ram_defs->base_row;
3560 	     row < ram_defs->base_row + ram_defs->num_rows;
3561 	     row++, offset += VFC_RAM_RESP_DWORDS) {
3562 		/* Write VFC RAM command */
3563 		ARR_REG_WR(p_hwfn,
3564 			   p_ptt,
3565 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3566 			   ram_cmd, VFC_RAM_CMD_DWORDS);
3567 
3568 		/* Write VFC RAM address */
3569 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3570 		ARR_REG_WR(p_hwfn,
3571 			   p_ptt,
3572 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3573 			   ram_addr, VFC_RAM_ADDR_DWORDS);
3574 
3575 		/* Read VFC RAM read response */
3576 		ARR_REG_RD(p_hwfn,
3577 			   p_ptt,
3578 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3579 			   dump_buf + offset, VFC_RAM_RESP_DWORDS);
3580 	}
3581 
3582 	return offset;
3583 }
3584 
3585 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3586 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3587 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3588 {
3589 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3590 	u8 storm_id, i;
3591 	u32 offset = 0;
3592 
3593 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3594 		if (!qed_grc_is_storm_included(p_hwfn,
3595 					       (enum dbg_storms)storm_id) ||
3596 		    !s_storm_defs[storm_id].has_vfc ||
3597 		    (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
3598 		     PLATFORM_ASIC))
3599 			continue;
3600 
3601 		/* Read CAM */
3602 		offset += qed_grc_dump_vfc_cam(p_hwfn,
3603 					       p_ptt,
3604 					       dump_buf + offset,
3605 					       dump, storm_id);
3606 
3607 		/* Read RAM */
3608 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3609 			offset += qed_grc_dump_vfc_ram(p_hwfn,
3610 						       p_ptt,
3611 						       dump_buf + offset,
3612 						       dump,
3613 						       storm_id,
3614 						       &s_vfc_ram_defs[i]);
3615 	}
3616 
3617 	return offset;
3618 }
3619 
3620 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3621 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3622 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3623 {
3624 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3625 	u32 offset = 0;
3626 	u8 rss_mem_id;
3627 
3628 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3629 		u32 rss_addr, num_entries, total_dwords;
3630 		struct rss_mem_defs *rss_defs;
3631 		u32 addr, num_dwords_to_read;
3632 		bool packed;
3633 
3634 		rss_defs = &s_rss_mem_defs[rss_mem_id];
3635 		rss_addr = rss_defs->addr;
3636 		num_entries = rss_defs->num_entries[dev_data->chip_id];
3637 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3638 		packed = (rss_defs->entry_width == 16);
3639 
3640 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3641 					       dump_buf + offset,
3642 					       dump,
3643 					       rss_defs->mem_name,
3644 					       0,
3645 					       total_dwords,
3646 					       rss_defs->entry_width,
3647 					       packed,
3648 					       rss_defs->type_name, false, 0);
3649 
3650 		/* Dump RSS data */
3651 		if (!dump) {
3652 			offset += total_dwords;
3653 			continue;
3654 		}
3655 
3656 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3657 		while (total_dwords) {
3658 			num_dwords_to_read = min_t(u32,
3659 						   RSS_REG_RSS_RAM_DATA_SIZE,
3660 						   total_dwords);
3661 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3662 			offset += qed_grc_dump_addr_range(p_hwfn,
3663 							  p_ptt,
3664 							  dump_buf + offset,
3665 							  dump,
3666 							  addr,
3667 							  num_dwords_to_read,
3668 							  false,
3669 							  SPLIT_TYPE_NONE, 0);
3670 			total_dwords -= num_dwords_to_read;
3671 			rss_addr++;
3672 		}
3673 	}
3674 
3675 	return offset;
3676 }
3677 
3678 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3679 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3680 				struct qed_ptt *p_ptt,
3681 				u32 *dump_buf, bool dump, u8 big_ram_id)
3682 {
3683 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3684 	u32 block_size, ram_size, offset = 0, reg_val, i;
3685 	char mem_name[12] = "???_BIG_RAM";
3686 	char type_name[8] = "???_RAM";
3687 	struct big_ram_defs *big_ram;
3688 
3689 	big_ram = &s_big_ram_defs[big_ram_id];
3690 	ram_size = big_ram->ram_size[dev_data->chip_id];
3691 
3692 	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3693 	block_size = reg_val &
3694 		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3695 									 : 128;
3696 
3697 	strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3698 	strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3699 
3700 	/* Dump memory header */
3701 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3702 				       dump_buf + offset,
3703 				       dump,
3704 				       mem_name,
3705 				       0,
3706 				       ram_size,
3707 				       block_size * 8,
3708 				       false, type_name, false, 0);
3709 
3710 	/* Read and dump Big RAM data */
3711 	if (!dump)
3712 		return offset + ram_size;
3713 
3714 	/* Dump Big RAM */
3715 	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3716 	     i++) {
3717 		u32 addr, len;
3718 
3719 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3720 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3721 		len = BRB_REG_BIG_RAM_DATA_SIZE;
3722 		offset += qed_grc_dump_addr_range(p_hwfn,
3723 						  p_ptt,
3724 						  dump_buf + offset,
3725 						  dump,
3726 						  addr,
3727 						  len,
3728 						  false, SPLIT_TYPE_NONE, 0);
3729 	}
3730 
3731 	return offset;
3732 }
3733 
3734 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3735 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3736 {
3737 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3738 	u32 offset = 0, addr;
3739 	bool halted = false;
3740 
3741 	/* Halt MCP */
3742 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3743 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3744 		if (!halted)
3745 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3746 	}
3747 
3748 	/* Dump MCP scratchpad */
3749 	offset += qed_grc_dump_mem(p_hwfn,
3750 				   p_ptt,
3751 				   dump_buf + offset,
3752 				   dump,
3753 				   NULL,
3754 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3755 				   MCP_REG_SCRATCH_SIZE_BB_K2,
3756 				   false, 0, false, "MCP", false, 0);
3757 
3758 	/* Dump MCP cpu_reg_file */
3759 	offset += qed_grc_dump_mem(p_hwfn,
3760 				   p_ptt,
3761 				   dump_buf + offset,
3762 				   dump,
3763 				   NULL,
3764 				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3765 				   MCP_REG_CPU_REG_FILE_SIZE,
3766 				   false, 0, false, "MCP", false, 0);
3767 
3768 	/* Dump MCP registers */
3769 	block_enable[BLOCK_MCP] = true;
3770 	offset += qed_grc_dump_registers(p_hwfn,
3771 					 p_ptt,
3772 					 dump_buf + offset,
3773 					 dump, block_enable, "block", "MCP");
3774 
3775 	/* Dump required non-MCP registers */
3776 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3777 					dump, 1, SPLIT_TYPE_NONE, 0,
3778 					"block", "MCP");
3779 	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3780 	offset += qed_grc_dump_reg_entry(p_hwfn,
3781 					 p_ptt,
3782 					 dump_buf + offset,
3783 					 dump,
3784 					 addr,
3785 					 1,
3786 					 false, SPLIT_TYPE_NONE, 0);
3787 
3788 	/* Release MCP */
3789 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3790 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3791 
3792 	return offset;
3793 }
3794 
3795 /* Dumps the tbus indirect memory for all PHYs. */
3796 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3797 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3798 {
3799 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3800 	char mem_name[32];
3801 	u8 phy_id;
3802 
3803 	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3804 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3805 		struct phy_defs *phy_defs;
3806 		u8 *bytes_buf;
3807 
3808 		phy_defs = &s_phy_defs[phy_id];
3809 		addr_lo_addr = phy_defs->base_addr +
3810 			       phy_defs->tbus_addr_lo_addr;
3811 		addr_hi_addr = phy_defs->base_addr +
3812 			       phy_defs->tbus_addr_hi_addr;
3813 		data_lo_addr = phy_defs->base_addr +
3814 			       phy_defs->tbus_data_lo_addr;
3815 		data_hi_addr = phy_defs->base_addr +
3816 			       phy_defs->tbus_data_hi_addr;
3817 
3818 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3819 			     phy_defs->phy_name) < 0)
3820 			DP_NOTICE(p_hwfn,
3821 				  "Unexpected debug error: invalid PHY memory name\n");
3822 
3823 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3824 					       dump_buf + offset,
3825 					       dump,
3826 					       mem_name,
3827 					       0,
3828 					       PHY_DUMP_SIZE_DWORDS,
3829 					       16, true, mem_name, false, 0);
3830 
3831 		if (!dump) {
3832 			offset += PHY_DUMP_SIZE_DWORDS;
3833 			continue;
3834 		}
3835 
3836 		bytes_buf = (u8 *)(dump_buf + offset);
3837 		for (tbus_hi_offset = 0;
3838 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3839 		     tbus_hi_offset++) {
3840 			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3841 			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3842 			     tbus_lo_offset++) {
3843 				qed_wr(p_hwfn,
3844 				       p_ptt, addr_lo_addr, tbus_lo_offset);
3845 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3846 							    p_ptt,
3847 							    data_lo_addr);
3848 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3849 							    p_ptt,
3850 							    data_hi_addr);
3851 			}
3852 		}
3853 
3854 		offset += PHY_DUMP_SIZE_DWORDS;
3855 	}
3856 
3857 	return offset;
3858 }
3859 
3860 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3861 				struct qed_ptt *p_ptt,
3862 				enum block_id block_id,
3863 				u8 line_id,
3864 				u8 enable_mask,
3865 				u8 right_shift,
3866 				u8 force_valid_mask, u8 force_frame_mask)
3867 {
3868 	struct block_defs *block = s_block_defs[block_id];
3869 
3870 	qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3871 	qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3872 	qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3873 	qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3874 	qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3875 }
3876 
3877 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3878 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3879 				     struct qed_ptt *p_ptt,
3880 				     u32 *dump_buf, bool dump)
3881 {
3882 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3883 	u32 block_id, line_id, offset = 0;
3884 
3885 	/* Don't dump static debug if a debug bus recording is in progress */
3886 	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3887 		return 0;
3888 
3889 	if (dump) {
3890 		/* Disable all blocks debug output */
3891 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3892 			struct block_defs *block = s_block_defs[block_id];
3893 
3894 			if (block->dbg_client_id[dev_data->chip_id] !=
3895 			    MAX_DBG_BUS_CLIENTS)
3896 				qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
3897 				       0);
3898 		}
3899 
3900 		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3901 		qed_bus_set_framing_mode(p_hwfn,
3902 					 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3903 		qed_wr(p_hwfn,
3904 		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3905 		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3906 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3907 	}
3908 
3909 	/* Dump all static debug lines for each relevant block */
3910 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3911 		struct block_defs *block = s_block_defs[block_id];
3912 		struct dbg_bus_block *block_desc;
3913 		u32 block_dwords, addr, len;
3914 		u8 dbg_client_id;
3915 
3916 		if (block->dbg_client_id[dev_data->chip_id] ==
3917 		    MAX_DBG_BUS_CLIENTS)
3918 			continue;
3919 
3920 		block_desc = get_dbg_bus_block_desc(p_hwfn,
3921 						    (enum block_id)block_id);
3922 		block_dwords = NUM_DBG_LINES(block_desc) *
3923 			       STATIC_DEBUG_LINE_DWORDS;
3924 
3925 		/* Dump static section params */
3926 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3927 					       dump_buf + offset,
3928 					       dump,
3929 					       block->name,
3930 					       0,
3931 					       block_dwords,
3932 					       32, false, "STATIC", false, 0);
3933 
3934 		if (!dump) {
3935 			offset += block_dwords;
3936 			continue;
3937 		}
3938 
3939 		/* If all lines are invalid - dump zeros */
3940 		if (dev_data->block_in_reset[block_id]) {
3941 			memset(dump_buf + offset, 0,
3942 			       DWORDS_TO_BYTES(block_dwords));
3943 			offset += block_dwords;
3944 			continue;
3945 		}
3946 
3947 		/* Enable block's client */
3948 		dbg_client_id = block->dbg_client_id[dev_data->chip_id];
3949 		qed_bus_enable_clients(p_hwfn,
3950 				       p_ptt,
3951 				       BIT(dbg_client_id));
3952 
3953 		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3954 		len = STATIC_DEBUG_LINE_DWORDS;
3955 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
3956 		     line_id++) {
3957 			/* Configure debug line ID */
3958 			qed_config_dbg_line(p_hwfn,
3959 					    p_ptt,
3960 					    (enum block_id)block_id,
3961 					    (u8)line_id, 0xf, 0, 0, 0);
3962 
3963 			/* Read debug line info */
3964 			offset += qed_grc_dump_addr_range(p_hwfn,
3965 							  p_ptt,
3966 							  dump_buf + offset,
3967 							  dump,
3968 							  addr,
3969 							  len,
3970 							  true, SPLIT_TYPE_NONE,
3971 							  0);
3972 		}
3973 
3974 		/* Disable block's client and debug output */
3975 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3976 		qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3977 	}
3978 
3979 	if (dump) {
3980 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3981 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3982 	}
3983 
3984 	return offset;
3985 }
3986 
3987 /* Performs GRC Dump to the specified buffer.
3988  * Returns the dumped size in dwords.
3989  */
3990 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3991 				    struct qed_ptt *p_ptt,
3992 				    u32 *dump_buf,
3993 				    bool dump, u32 *num_dumped_dwords)
3994 {
3995 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3996 	bool parities_masked = false;
3997 	u32 offset = 0;
3998 	u8 i;
3999 
4000 	*num_dumped_dwords = 0;
4001 	dev_data->num_regs_read = 0;
4002 
4003 	/* Update reset state */
4004 	if (dump)
4005 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
4006 
4007 	/* Dump global params */
4008 	offset += qed_dump_common_global_params(p_hwfn,
4009 						p_ptt,
4010 						dump_buf + offset, dump, 4);
4011 	offset += qed_dump_str_param(dump_buf + offset,
4012 				     dump, "dump-type", "grc-dump");
4013 	offset += qed_dump_num_param(dump_buf + offset,
4014 				     dump,
4015 				     "num-lcids",
4016 				     qed_grc_get_param(p_hwfn,
4017 						DBG_GRC_PARAM_NUM_LCIDS));
4018 	offset += qed_dump_num_param(dump_buf + offset,
4019 				     dump,
4020 				     "num-ltids",
4021 				     qed_grc_get_param(p_hwfn,
4022 						DBG_GRC_PARAM_NUM_LTIDS));
4023 	offset += qed_dump_num_param(dump_buf + offset,
4024 				     dump, "num-ports", dev_data->num_ports);
4025 
4026 	/* Dump reset registers (dumped before taking blocks out of reset ) */
4027 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4028 		offset += qed_grc_dump_reset_regs(p_hwfn,
4029 						  p_ptt,
4030 						  dump_buf + offset, dump);
4031 
4032 	/* Take all blocks out of reset (using reset registers) */
4033 	if (dump) {
4034 		qed_grc_unreset_blocks(p_hwfn, p_ptt);
4035 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
4036 	}
4037 
4038 	/* Disable all parities using MFW command */
4039 	if (dump &&
4040 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4041 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
4042 		if (!parities_masked) {
4043 			DP_NOTICE(p_hwfn,
4044 				  "Failed to mask parities using MFW\n");
4045 			if (qed_grc_get_param
4046 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4047 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4048 		}
4049 	}
4050 
4051 	/* Dump modified registers (dumped before modifying them) */
4052 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4053 		offset += qed_grc_dump_modified_regs(p_hwfn,
4054 						     p_ptt,
4055 						     dump_buf + offset, dump);
4056 
4057 	/* Stall storms */
4058 	if (dump &&
4059 	    (qed_grc_is_included(p_hwfn,
4060 				 DBG_GRC_PARAM_DUMP_IOR) ||
4061 	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4062 		qed_grc_stall_storms(p_hwfn, p_ptt, true);
4063 
4064 	/* Dump all regs  */
4065 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4066 		bool block_enable[MAX_BLOCK_ID];
4067 
4068 		/* Dump all blocks except MCP */
4069 		for (i = 0; i < MAX_BLOCK_ID; i++)
4070 			block_enable[i] = true;
4071 		block_enable[BLOCK_MCP] = false;
4072 		offset += qed_grc_dump_registers(p_hwfn,
4073 						 p_ptt,
4074 						 dump_buf +
4075 						 offset,
4076 						 dump,
4077 						 block_enable, NULL, NULL);
4078 
4079 		/* Dump special registers */
4080 		offset += qed_grc_dump_special_regs(p_hwfn,
4081 						    p_ptt,
4082 						    dump_buf + offset, dump);
4083 	}
4084 
4085 	/* Dump memories */
4086 	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4087 
4088 	/* Dump MCP */
4089 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4090 		offset += qed_grc_dump_mcp(p_hwfn,
4091 					   p_ptt, dump_buf + offset, dump);
4092 
4093 	/* Dump context */
4094 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4095 		offset += qed_grc_dump_ctx(p_hwfn,
4096 					   p_ptt, dump_buf + offset, dump);
4097 
4098 	/* Dump RSS memories */
4099 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4100 		offset += qed_grc_dump_rss(p_hwfn,
4101 					   p_ptt, dump_buf + offset, dump);
4102 
4103 	/* Dump Big RAM */
4104 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4105 		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4106 			offset += qed_grc_dump_big_ram(p_hwfn,
4107 						       p_ptt,
4108 						       dump_buf + offset,
4109 						       dump, i);
4110 
4111 	/* Dump IORs */
4112 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4113 		offset += qed_grc_dump_iors(p_hwfn,
4114 					    p_ptt, dump_buf + offset, dump);
4115 
4116 	/* Dump VFC */
4117 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4118 		offset += qed_grc_dump_vfc(p_hwfn,
4119 					   p_ptt, dump_buf + offset, dump);
4120 
4121 	/* Dump PHY tbus */
4122 	if (qed_grc_is_included(p_hwfn,
4123 				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
4124 	    CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4125 		offset += qed_grc_dump_phy(p_hwfn,
4126 					   p_ptt, dump_buf + offset, dump);
4127 
4128 	/* Dump static debug data (only if not during debug bus recording) */
4129 	if (qed_grc_is_included(p_hwfn,
4130 				DBG_GRC_PARAM_DUMP_STATIC) &&
4131 	    (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
4132 		offset += qed_grc_dump_static_debug(p_hwfn,
4133 						    p_ptt,
4134 						    dump_buf + offset, dump);
4135 
4136 	/* Dump last section */
4137 	offset += qed_dump_last_section(dump_buf, offset, dump);
4138 
4139 	if (dump) {
4140 		/* Unstall storms */
4141 		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4142 			qed_grc_stall_storms(p_hwfn, p_ptt, false);
4143 
4144 		/* Clear parity status */
4145 		qed_grc_clear_all_prty(p_hwfn, p_ptt);
4146 
4147 		/* Enable all parities using MFW command */
4148 		if (parities_masked)
4149 			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
4150 	}
4151 
4152 	*num_dumped_dwords = offset;
4153 
4154 	return DBG_STATUS_OK;
4155 }
4156 
4157 /* Writes the specified failing Idle Check rule to the specified buffer.
4158  * Returns the dumped size in dwords.
4159  */
4160 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
4161 				     struct qed_ptt *p_ptt,
4162 				     u32 *
4163 				     dump_buf,
4164 				     bool dump,
4165 				     u16 rule_id,
4166 				     const struct dbg_idle_chk_rule *rule,
4167 				     u16 fail_entry_id, u32 *cond_reg_values)
4168 {
4169 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4170 	const struct dbg_idle_chk_cond_reg *cond_regs;
4171 	const struct dbg_idle_chk_info_reg *info_regs;
4172 	u32 i, next_reg_offset = 0, offset = 0;
4173 	struct dbg_idle_chk_result_hdr *hdr;
4174 	const union dbg_idle_chk_reg *regs;
4175 	u8 reg_id;
4176 
4177 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4178 	regs = &((const union dbg_idle_chk_reg *)
4179 		 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4180 	cond_regs = &regs[0].cond_reg;
4181 	info_regs = &regs[rule->num_cond_regs].info_reg;
4182 
4183 	/* Dump rule data */
4184 	if (dump) {
4185 		memset(hdr, 0, sizeof(*hdr));
4186 		hdr->rule_id = rule_id;
4187 		hdr->mem_entry_id = fail_entry_id;
4188 		hdr->severity = rule->severity;
4189 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
4190 	}
4191 
4192 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
4193 
4194 	/* Dump condition register values */
4195 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4196 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4197 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4198 
4199 		reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4200 			  (dump_buf + offset);
4201 
4202 		/* Write register header */
4203 		if (!dump) {
4204 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
4205 			    reg->entry_size;
4206 			continue;
4207 		}
4208 
4209 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4210 		memset(reg_hdr, 0, sizeof(*reg_hdr));
4211 		reg_hdr->start_entry = reg->start_entry;
4212 		reg_hdr->size = reg->entry_size;
4213 		SET_FIELD(reg_hdr->data,
4214 			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
4215 			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4216 		SET_FIELD(reg_hdr->data,
4217 			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4218 
4219 		/* Write register values */
4220 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4221 			dump_buf[offset] = cond_reg_values[next_reg_offset];
4222 	}
4223 
4224 	/* Dump info register values */
4225 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4226 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4227 		u32 block_id;
4228 
4229 		/* Check if register's block is in reset */
4230 		if (!dump) {
4231 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4232 			continue;
4233 		}
4234 
4235 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4236 		if (block_id >= MAX_BLOCK_ID) {
4237 			DP_NOTICE(p_hwfn, "Invalid block_id\n");
4238 			return 0;
4239 		}
4240 
4241 		if (!dev_data->block_in_reset[block_id]) {
4242 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4243 			bool wide_bus, eval_mode, mode_match = true;
4244 			u16 modes_buf_offset;
4245 			u32 addr;
4246 
4247 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4248 				  (dump_buf + offset);
4249 
4250 			/* Check mode */
4251 			eval_mode = GET_FIELD(reg->mode.data,
4252 					      DBG_MODE_HDR_EVAL_MODE) > 0;
4253 			if (eval_mode) {
4254 				modes_buf_offset =
4255 				    GET_FIELD(reg->mode.data,
4256 					      DBG_MODE_HDR_MODES_BUF_OFFSET);
4257 				mode_match =
4258 					qed_is_mode_match(p_hwfn,
4259 							  &modes_buf_offset);
4260 			}
4261 
4262 			if (!mode_match)
4263 				continue;
4264 
4265 			addr = GET_FIELD(reg->data,
4266 					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
4267 			wide_bus = GET_FIELD(reg->data,
4268 					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4269 
4270 			/* Write register header */
4271 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4272 			hdr->num_dumped_info_regs++;
4273 			memset(reg_hdr, 0, sizeof(*reg_hdr));
4274 			reg_hdr->size = reg->size;
4275 			SET_FIELD(reg_hdr->data,
4276 				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
4277 				  rule->num_cond_regs + reg_id);
4278 
4279 			/* Write register values */
4280 			offset += qed_grc_dump_addr_range(p_hwfn,
4281 							  p_ptt,
4282 							  dump_buf + offset,
4283 							  dump,
4284 							  addr,
4285 							  reg->size, wide_bus,
4286 							  SPLIT_TYPE_NONE, 0);
4287 		}
4288 	}
4289 
4290 	return offset;
4291 }
4292 
4293 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4294 static u32
4295 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4296 			       u32 *dump_buf, bool dump,
4297 			       const struct dbg_idle_chk_rule *input_rules,
4298 			       u32 num_input_rules, u32 *num_failing_rules)
4299 {
4300 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4301 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4302 	u32 i, offset = 0;
4303 	u16 entry_id;
4304 	u8 reg_id;
4305 
4306 	*num_failing_rules = 0;
4307 
4308 	for (i = 0; i < num_input_rules; i++) {
4309 		const struct dbg_idle_chk_cond_reg *cond_regs;
4310 		const struct dbg_idle_chk_rule *rule;
4311 		const union dbg_idle_chk_reg *regs;
4312 		u16 num_reg_entries = 1;
4313 		bool check_rule = true;
4314 		const u32 *imm_values;
4315 
4316 		rule = &input_rules[i];
4317 		regs = &((const union dbg_idle_chk_reg *)
4318 			 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
4319 			[rule->reg_offset];
4320 		cond_regs = &regs[0].cond_reg;
4321 		imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
4322 			     [rule->imm_offset];
4323 
4324 		/* Check if all condition register blocks are out of reset, and
4325 		 * find maximal number of entries (all condition registers that
4326 		 * are memories must have the same size, which is > 1).
4327 		 */
4328 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
4329 		     reg_id++) {
4330 			u32 block_id =
4331 				GET_FIELD(cond_regs[reg_id].data,
4332 					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4333 
4334 			if (block_id >= MAX_BLOCK_ID) {
4335 				DP_NOTICE(p_hwfn, "Invalid block_id\n");
4336 				return 0;
4337 			}
4338 
4339 			check_rule = !dev_data->block_in_reset[block_id];
4340 			if (cond_regs[reg_id].num_entries > num_reg_entries)
4341 				num_reg_entries = cond_regs[reg_id].num_entries;
4342 		}
4343 
4344 		if (!check_rule && dump)
4345 			continue;
4346 
4347 		if (!dump) {
4348 			u32 entry_dump_size =
4349 				qed_idle_chk_dump_failure(p_hwfn,
4350 							  p_ptt,
4351 							  dump_buf + offset,
4352 							  false,
4353 							  rule->rule_id,
4354 							  rule,
4355 							  0,
4356 							  NULL);
4357 
4358 			offset += num_reg_entries * entry_dump_size;
4359 			(*num_failing_rules) += num_reg_entries;
4360 			continue;
4361 		}
4362 
4363 		/* Go over all register entries (number of entries is the same
4364 		 * for all condition registers).
4365 		 */
4366 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4367 			u32 next_reg_offset = 0;
4368 
4369 			/* Read current entry of all condition registers */
4370 			for (reg_id = 0; reg_id < rule->num_cond_regs;
4371 			     reg_id++) {
4372 				const struct dbg_idle_chk_cond_reg *reg =
4373 					&cond_regs[reg_id];
4374 				u32 padded_entry_size, addr;
4375 				bool wide_bus;
4376 
4377 				/* Find GRC address (if it's a memory, the
4378 				 * address of the specific entry is calculated).
4379 				 */
4380 				addr = GET_FIELD(reg->data,
4381 						 DBG_IDLE_CHK_COND_REG_ADDRESS);
4382 				wide_bus =
4383 				    GET_FIELD(reg->data,
4384 					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4385 				if (reg->num_entries > 1 ||
4386 				    reg->start_entry > 0) {
4387 					padded_entry_size =
4388 					   reg->entry_size > 1 ?
4389 					   roundup_pow_of_two(reg->entry_size) :
4390 					   1;
4391 					addr += (reg->start_entry + entry_id) *
4392 						padded_entry_size;
4393 				}
4394 
4395 				/* Read registers */
4396 				if (next_reg_offset + reg->entry_size >=
4397 				    IDLE_CHK_MAX_ENTRIES_SIZE) {
4398 					DP_NOTICE(p_hwfn,
4399 						  "idle check registers entry is too large\n");
4400 					return 0;
4401 				}
4402 
4403 				next_reg_offset +=
4404 				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4405 							    cond_reg_values +
4406 							    next_reg_offset,
4407 							    dump, addr,
4408 							    reg->entry_size,
4409 							    wide_bus,
4410 							    SPLIT_TYPE_NONE, 0);
4411 			}
4412 
4413 			/* Call rule condition function.
4414 			 * If returns true, it's a failure.
4415 			 */
4416 			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
4417 							imm_values)) {
4418 				offset += qed_idle_chk_dump_failure(p_hwfn,
4419 							p_ptt,
4420 							dump_buf + offset,
4421 							dump,
4422 							rule->rule_id,
4423 							rule,
4424 							entry_id,
4425 							cond_reg_values);
4426 				(*num_failing_rules)++;
4427 			}
4428 		}
4429 	}
4430 
4431 	return offset;
4432 }
4433 
4434 /* Performs Idle Check Dump to the specified buffer.
4435  * Returns the dumped size in dwords.
4436  */
4437 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4438 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4439 {
4440 	u32 num_failing_rules_offset, offset = 0, input_offset = 0;
4441 	u32 num_failing_rules = 0;
4442 
4443 	/* Dump global params */
4444 	offset += qed_dump_common_global_params(p_hwfn,
4445 						p_ptt,
4446 						dump_buf + offset, dump, 1);
4447 	offset += qed_dump_str_param(dump_buf + offset,
4448 				     dump, "dump-type", "idle-chk");
4449 
4450 	/* Dump idle check section header with a single parameter */
4451 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4452 	num_failing_rules_offset = offset;
4453 	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4454 
4455 	while (input_offset <
4456 	       s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4457 		const struct dbg_idle_chk_cond_hdr *cond_hdr =
4458 			(const struct dbg_idle_chk_cond_hdr *)
4459 			&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4460 			[input_offset++];
4461 		bool eval_mode, mode_match = true;
4462 		u32 curr_failing_rules;
4463 		u16 modes_buf_offset;
4464 
4465 		/* Check mode */
4466 		eval_mode = GET_FIELD(cond_hdr->mode.data,
4467 				      DBG_MODE_HDR_EVAL_MODE) > 0;
4468 		if (eval_mode) {
4469 			modes_buf_offset =
4470 				GET_FIELD(cond_hdr->mode.data,
4471 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
4472 			mode_match = qed_is_mode_match(p_hwfn,
4473 						       &modes_buf_offset);
4474 		}
4475 
4476 		if (mode_match) {
4477 			offset +=
4478 			    qed_idle_chk_dump_rule_entries(p_hwfn,
4479 				p_ptt,
4480 				dump_buf + offset,
4481 				dump,
4482 				(const struct dbg_idle_chk_rule *)
4483 				&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4484 				ptr[input_offset],
4485 				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4486 				&curr_failing_rules);
4487 			num_failing_rules += curr_failing_rules;
4488 		}
4489 
4490 		input_offset += cond_hdr->data_size;
4491 	}
4492 
4493 	/* Overwrite num_rules parameter */
4494 	if (dump)
4495 		qed_dump_num_param(dump_buf + num_failing_rules_offset,
4496 				   dump, "num_rules", num_failing_rules);
4497 
4498 	/* Dump last section */
4499 	offset += qed_dump_last_section(dump_buf, offset, dump);
4500 
4501 	return offset;
4502 }
4503 
4504 /* Finds the meta data image in NVRAM */
4505 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4506 					    struct qed_ptt *p_ptt,
4507 					    u32 image_type,
4508 					    u32 *nvram_offset_bytes,
4509 					    u32 *nvram_size_bytes)
4510 {
4511 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4512 	struct mcp_file_att file_att;
4513 	int nvm_result;
4514 
4515 	/* Call NVRAM get file command */
4516 	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4517 					p_ptt,
4518 					DRV_MSG_CODE_NVM_GET_FILE_ATT,
4519 					image_type,
4520 					&ret_mcp_resp,
4521 					&ret_mcp_param,
4522 					&ret_txn_size, (u32 *)&file_att);
4523 
4524 	/* Check response */
4525 	if (nvm_result ||
4526 	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4527 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4528 
4529 	/* Update return values */
4530 	*nvram_offset_bytes = file_att.nvm_start_addr;
4531 	*nvram_size_bytes = file_att.len;
4532 
4533 	DP_VERBOSE(p_hwfn,
4534 		   QED_MSG_DEBUG,
4535 		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4536 		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
4537 
4538 	/* Check alignment */
4539 	if (*nvram_size_bytes & 0x3)
4540 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4541 
4542 	return DBG_STATUS_OK;
4543 }
4544 
4545 /* Reads data from NVRAM */
4546 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4547 				      struct qed_ptt *p_ptt,
4548 				      u32 nvram_offset_bytes,
4549 				      u32 nvram_size_bytes, u32 *ret_buf)
4550 {
4551 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4552 	s32 bytes_left = nvram_size_bytes;
4553 	u32 read_offset = 0;
4554 
4555 	DP_VERBOSE(p_hwfn,
4556 		   QED_MSG_DEBUG,
4557 		   "nvram_read: reading image of size %d bytes from NVRAM\n",
4558 		   nvram_size_bytes);
4559 
4560 	do {
4561 		bytes_to_copy =
4562 		    (bytes_left >
4563 		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4564 
4565 		/* Call NVRAM read command */
4566 		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4567 				       DRV_MSG_CODE_NVM_READ_NVRAM,
4568 				       (nvram_offset_bytes +
4569 					read_offset) |
4570 				       (bytes_to_copy <<
4571 					DRV_MB_PARAM_NVM_LEN_OFFSET),
4572 				       &ret_mcp_resp, &ret_mcp_param,
4573 				       &ret_read_size,
4574 				       (u32 *)((u8 *)ret_buf + read_offset)))
4575 			return DBG_STATUS_NVRAM_READ_FAILED;
4576 
4577 		/* Check response */
4578 		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4579 			return DBG_STATUS_NVRAM_READ_FAILED;
4580 
4581 		/* Update read offset */
4582 		read_offset += ret_read_size;
4583 		bytes_left -= ret_read_size;
4584 	} while (bytes_left > 0);
4585 
4586 	return DBG_STATUS_OK;
4587 }
4588 
4589 /* Get info on the MCP Trace data in the scratchpad:
4590  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4591  * - trace_data_size (OUT): trace data size in bytes (without the header)
4592  */
4593 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4594 						   struct qed_ptt *p_ptt,
4595 						   u32 *trace_data_grc_addr,
4596 						   u32 *trace_data_size)
4597 {
4598 	u32 spad_trace_offsize, signature;
4599 
4600 	/* Read trace section offsize structure from MCP scratchpad */
4601 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4602 
4603 	/* Extract trace section address from offsize (in scratchpad) */
4604 	*trace_data_grc_addr =
4605 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4606 
4607 	/* Read signature from MCP trace section */
4608 	signature = qed_rd(p_hwfn, p_ptt,
4609 			   *trace_data_grc_addr +
4610 			   offsetof(struct mcp_trace, signature));
4611 
4612 	if (signature != MFW_TRACE_SIGNATURE)
4613 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4614 
4615 	/* Read trace size from MCP trace section */
4616 	*trace_data_size = qed_rd(p_hwfn,
4617 				  p_ptt,
4618 				  *trace_data_grc_addr +
4619 				  offsetof(struct mcp_trace, size));
4620 
4621 	return DBG_STATUS_OK;
4622 }
4623 
4624 /* Reads MCP trace meta data image from NVRAM
4625  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4626  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4627  *			      loaded from file).
4628  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4629  */
4630 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4631 						   struct qed_ptt *p_ptt,
4632 						   u32 trace_data_size_bytes,
4633 						   u32 *running_bundle_id,
4634 						   u32 *trace_meta_offset,
4635 						   u32 *trace_meta_size)
4636 {
4637 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4638 
4639 	/* Read MCP trace section offsize structure from MCP scratchpad */
4640 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4641 
4642 	/* Find running bundle ID */
4643 	running_mfw_addr =
4644 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4645 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4646 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4647 	if (*running_bundle_id > 1)
4648 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4649 
4650 	/* Find image in NVRAM */
4651 	nvram_image_type =
4652 	    (*running_bundle_id ==
4653 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4654 	return qed_find_nvram_image(p_hwfn,
4655 				    p_ptt,
4656 				    nvram_image_type,
4657 				    trace_meta_offset, trace_meta_size);
4658 }
4659 
4660 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4661 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4662 					       struct qed_ptt *p_ptt,
4663 					       u32 nvram_offset_in_bytes,
4664 					       u32 size_in_bytes, u32 *buf)
4665 {
4666 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4667 	enum dbg_status status;
4668 	u32 signature;
4669 
4670 	/* Read meta data from NVRAM */
4671 	status = qed_nvram_read(p_hwfn,
4672 				p_ptt,
4673 				nvram_offset_in_bytes, size_in_bytes, buf);
4674 	if (status != DBG_STATUS_OK)
4675 		return status;
4676 
4677 	/* Extract and check first signature */
4678 	signature = qed_read_unaligned_dword(byte_buf);
4679 	byte_buf += sizeof(signature);
4680 	if (signature != NVM_MAGIC_VALUE)
4681 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4682 
4683 	/* Extract number of modules */
4684 	modules_num = *(byte_buf++);
4685 
4686 	/* Skip all modules */
4687 	for (i = 0; i < modules_num; i++) {
4688 		module_len = *(byte_buf++);
4689 		byte_buf += module_len;
4690 	}
4691 
4692 	/* Extract and check second signature */
4693 	signature = qed_read_unaligned_dword(byte_buf);
4694 	byte_buf += sizeof(signature);
4695 	if (signature != NVM_MAGIC_VALUE)
4696 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4697 
4698 	return DBG_STATUS_OK;
4699 }
4700 
4701 /* Dump MCP Trace */
4702 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4703 					  struct qed_ptt *p_ptt,
4704 					  u32 *dump_buf,
4705 					  bool dump, u32 *num_dumped_dwords)
4706 {
4707 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4708 	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4709 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4710 	enum dbg_status status;
4711 	bool mcp_access;
4712 	int halted = 0;
4713 
4714 	*num_dumped_dwords = 0;
4715 
4716 	mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4717 
4718 	/* Get trace data info */
4719 	status = qed_mcp_trace_get_data_info(p_hwfn,
4720 					     p_ptt,
4721 					     &trace_data_grc_addr,
4722 					     &trace_data_size_bytes);
4723 	if (status != DBG_STATUS_OK)
4724 		return status;
4725 
4726 	/* Dump global params */
4727 	offset += qed_dump_common_global_params(p_hwfn,
4728 						p_ptt,
4729 						dump_buf + offset, dump, 1);
4730 	offset += qed_dump_str_param(dump_buf + offset,
4731 				     dump, "dump-type", "mcp-trace");
4732 
4733 	/* Halt MCP while reading from scratchpad so the read data will be
4734 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4735 	 * risk that it may be corrupt.
4736 	 */
4737 	if (dump && mcp_access) {
4738 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4739 		if (!halted)
4740 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4741 	}
4742 
4743 	/* Find trace data size */
4744 	trace_data_size_dwords =
4745 	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4746 			 BYTES_IN_DWORD);
4747 
4748 	/* Dump trace data section header and param */
4749 	offset += qed_dump_section_hdr(dump_buf + offset,
4750 				       dump, "mcp_trace_data", 1);
4751 	offset += qed_dump_num_param(dump_buf + offset,
4752 				     dump, "size", trace_data_size_dwords);
4753 
4754 	/* Read trace data from scratchpad into dump buffer */
4755 	offset += qed_grc_dump_addr_range(p_hwfn,
4756 					  p_ptt,
4757 					  dump_buf + offset,
4758 					  dump,
4759 					  BYTES_TO_DWORDS(trace_data_grc_addr),
4760 					  trace_data_size_dwords, false,
4761 					  SPLIT_TYPE_NONE, 0);
4762 
4763 	/* Resume MCP (only if halt succeeded) */
4764 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4765 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4766 
4767 	/* Dump trace meta section header */
4768 	offset += qed_dump_section_hdr(dump_buf + offset,
4769 				       dump, "mcp_trace_meta", 1);
4770 
4771 	/* If MCP Trace meta size parameter was set, use it.
4772 	 * Otherwise, read trace meta.
4773 	 * trace_meta_size_bytes is dword-aligned.
4774 	 */
4775 	trace_meta_size_bytes =
4776 		qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4777 	if ((!trace_meta_size_bytes || dump) && mcp_access) {
4778 		status = qed_mcp_trace_get_meta_info(p_hwfn,
4779 						     p_ptt,
4780 						     trace_data_size_bytes,
4781 						     &running_bundle_id,
4782 						     &trace_meta_offset_bytes,
4783 						     &trace_meta_size_bytes);
4784 		if (status == DBG_STATUS_OK)
4785 			trace_meta_size_dwords =
4786 				BYTES_TO_DWORDS(trace_meta_size_bytes);
4787 	}
4788 
4789 	/* Dump trace meta size param */
4790 	offset += qed_dump_num_param(dump_buf + offset,
4791 				     dump, "size", trace_meta_size_dwords);
4792 
4793 	/* Read trace meta image into dump buffer */
4794 	if (dump && trace_meta_size_dwords)
4795 		status = qed_mcp_trace_read_meta(p_hwfn,
4796 						 p_ptt,
4797 						 trace_meta_offset_bytes,
4798 						 trace_meta_size_bytes,
4799 						 dump_buf + offset);
4800 	if (status == DBG_STATUS_OK)
4801 		offset += trace_meta_size_dwords;
4802 
4803 	/* Dump last section */
4804 	offset += qed_dump_last_section(dump_buf, offset, dump);
4805 
4806 	*num_dumped_dwords = offset;
4807 
4808 	/* If no mcp access, indicate that the dump doesn't contain the meta
4809 	 * data from NVRAM.
4810 	 */
4811 	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4812 }
4813 
4814 /* Dump GRC FIFO */
4815 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4816 					 struct qed_ptt *p_ptt,
4817 					 u32 *dump_buf,
4818 					 bool dump, u32 *num_dumped_dwords)
4819 {
4820 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4821 	bool fifo_has_data;
4822 
4823 	*num_dumped_dwords = 0;
4824 
4825 	/* Dump global params */
4826 	offset += qed_dump_common_global_params(p_hwfn,
4827 						p_ptt,
4828 						dump_buf + offset, dump, 1);
4829 	offset += qed_dump_str_param(dump_buf + offset,
4830 				     dump, "dump-type", "reg-fifo");
4831 
4832 	/* Dump fifo data section header and param. The size param is 0 for
4833 	 * now, and is overwritten after reading the FIFO.
4834 	 */
4835 	offset += qed_dump_section_hdr(dump_buf + offset,
4836 				       dump, "reg_fifo_data", 1);
4837 	size_param_offset = offset;
4838 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4839 
4840 	if (!dump) {
4841 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4842 		 * test how much data is available, except for reading it.
4843 		 */
4844 		offset += REG_FIFO_DEPTH_DWORDS;
4845 		goto out;
4846 	}
4847 
4848 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4849 			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4850 
4851 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4852 	 * and must be accessed atomically. Test for dwords_read not passing
4853 	 * buffer size since more entries could be added to the buffer as we are
4854 	 * emptying it.
4855 	 */
4856 	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4857 	len = REG_FIFO_ELEMENT_DWORDS;
4858 	for (dwords_read = 0;
4859 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4860 	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4861 		offset += qed_grc_dump_addr_range(p_hwfn,
4862 						  p_ptt,
4863 						  dump_buf + offset,
4864 						  true,
4865 						  addr,
4866 						  len,
4867 						  true, SPLIT_TYPE_NONE,
4868 						  0);
4869 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4870 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4871 	}
4872 
4873 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4874 			   dwords_read);
4875 out:
4876 	/* Dump last section */
4877 	offset += qed_dump_last_section(dump_buf, offset, dump);
4878 
4879 	*num_dumped_dwords = offset;
4880 
4881 	return DBG_STATUS_OK;
4882 }
4883 
4884 /* Dump IGU FIFO */
4885 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4886 					 struct qed_ptt *p_ptt,
4887 					 u32 *dump_buf,
4888 					 bool dump, u32 *num_dumped_dwords)
4889 {
4890 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4891 	bool fifo_has_data;
4892 
4893 	*num_dumped_dwords = 0;
4894 
4895 	/* Dump global params */
4896 	offset += qed_dump_common_global_params(p_hwfn,
4897 						p_ptt,
4898 						dump_buf + offset, dump, 1);
4899 	offset += qed_dump_str_param(dump_buf + offset,
4900 				     dump, "dump-type", "igu-fifo");
4901 
4902 	/* Dump fifo data section header and param. The size param is 0 for
4903 	 * now, and is overwritten after reading the FIFO.
4904 	 */
4905 	offset += qed_dump_section_hdr(dump_buf + offset,
4906 				       dump, "igu_fifo_data", 1);
4907 	size_param_offset = offset;
4908 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4909 
4910 	if (!dump) {
4911 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4912 		 * test how much data is available, except for reading it.
4913 		 */
4914 		offset += IGU_FIFO_DEPTH_DWORDS;
4915 		goto out;
4916 	}
4917 
4918 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4919 			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4920 
4921 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4922 	 * and must be accessed atomically. Test for dwords_read not passing
4923 	 * buffer size since more entries could be added to the buffer as we are
4924 	 * emptying it.
4925 	 */
4926 	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4927 	len = IGU_FIFO_ELEMENT_DWORDS;
4928 	for (dwords_read = 0;
4929 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4930 	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4931 		offset += qed_grc_dump_addr_range(p_hwfn,
4932 						  p_ptt,
4933 						  dump_buf + offset,
4934 						  true,
4935 						  addr,
4936 						  len,
4937 						  true, SPLIT_TYPE_NONE,
4938 						  0);
4939 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4940 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4941 	}
4942 
4943 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4944 			   dwords_read);
4945 out:
4946 	/* Dump last section */
4947 	offset += qed_dump_last_section(dump_buf, offset, dump);
4948 
4949 	*num_dumped_dwords = offset;
4950 
4951 	return DBG_STATUS_OK;
4952 }
4953 
4954 /* Protection Override dump */
4955 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4956 						    struct qed_ptt *p_ptt,
4957 						    u32 *dump_buf,
4958 						    bool dump,
4959 						    u32 *num_dumped_dwords)
4960 {
4961 	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4962 
4963 	*num_dumped_dwords = 0;
4964 
4965 	/* Dump global params */
4966 	offset += qed_dump_common_global_params(p_hwfn,
4967 						p_ptt,
4968 						dump_buf + offset, dump, 1);
4969 	offset += qed_dump_str_param(dump_buf + offset,
4970 				     dump, "dump-type", "protection-override");
4971 
4972 	/* Dump data section header and param. The size param is 0 for now,
4973 	 * and is overwritten after reading the data.
4974 	 */
4975 	offset += qed_dump_section_hdr(dump_buf + offset,
4976 				       dump, "protection_override_data", 1);
4977 	size_param_offset = offset;
4978 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4979 
4980 	if (!dump) {
4981 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4982 		goto out;
4983 	}
4984 
4985 	/* Add override window info to buffer */
4986 	override_window_dwords =
4987 		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4988 		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4989 	addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4990 	offset += qed_grc_dump_addr_range(p_hwfn,
4991 					  p_ptt,
4992 					  dump_buf + offset,
4993 					  true,
4994 					  addr,
4995 					  override_window_dwords,
4996 					  true, SPLIT_TYPE_NONE, 0);
4997 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4998 			   override_window_dwords);
4999 out:
5000 	/* Dump last section */
5001 	offset += qed_dump_last_section(dump_buf, offset, dump);
5002 
5003 	*num_dumped_dwords = offset;
5004 
5005 	return DBG_STATUS_OK;
5006 }
5007 
5008 /* Performs FW Asserts Dump to the specified buffer.
5009  * Returns the dumped size in dwords.
5010  */
5011 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5012 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
5013 {
5014 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5015 	struct fw_asserts_ram_section *asserts;
5016 	char storm_letter_str[2] = "?";
5017 	struct fw_info fw_info;
5018 	u32 offset = 0;
5019 	u8 storm_id;
5020 
5021 	/* Dump global params */
5022 	offset += qed_dump_common_global_params(p_hwfn,
5023 						p_ptt,
5024 						dump_buf + offset, dump, 1);
5025 	offset += qed_dump_str_param(dump_buf + offset,
5026 				     dump, "dump-type", "fw-asserts");
5027 
5028 	/* Find Storm dump size */
5029 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5030 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
5031 		struct storm_defs *storm = &s_storm_defs[storm_id];
5032 		u32 last_list_idx, addr;
5033 
5034 		if (dev_data->block_in_reset[storm->block_id])
5035 			continue;
5036 
5037 		/* Read FW info for the current Storm */
5038 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
5039 
5040 		asserts = &fw_info.fw_asserts_section;
5041 
5042 		/* Dump FW Asserts section header and params */
5043 		storm_letter_str[0] = storm->letter;
5044 		offset += qed_dump_section_hdr(dump_buf + offset,
5045 					       dump, "fw_asserts", 2);
5046 		offset += qed_dump_str_param(dump_buf + offset,
5047 					     dump, "storm", storm_letter_str);
5048 		offset += qed_dump_num_param(dump_buf + offset,
5049 					     dump,
5050 					     "size",
5051 					     asserts->list_element_dword_size);
5052 
5053 		/* Read and dump FW Asserts data */
5054 		if (!dump) {
5055 			offset += asserts->list_element_dword_size;
5056 			continue;
5057 		}
5058 
5059 		fw_asserts_section_addr = storm->sem_fast_mem_addr +
5060 			SEM_FAST_REG_INT_RAM +
5061 			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
5062 		next_list_idx_addr = fw_asserts_section_addr +
5063 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
5064 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
5065 		last_list_idx = (next_list_idx > 0 ?
5066 				 next_list_idx :
5067 				 asserts->list_num_elements) - 1;
5068 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
5069 		       asserts->list_dword_offset +
5070 		       last_list_idx * asserts->list_element_dword_size;
5071 		offset +=
5072 		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
5073 					    dump_buf + offset,
5074 					    dump, addr,
5075 					    asserts->list_element_dword_size,
5076 						  false, SPLIT_TYPE_NONE, 0);
5077 	}
5078 
5079 	/* Dump last section */
5080 	offset += qed_dump_last_section(dump_buf, offset, dump);
5081 
5082 	return offset;
5083 }
5084 
5085 /***************************** Public Functions *******************************/
5086 
5087 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
5088 {
5089 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
5090 	u8 buf_id;
5091 
5092 	/* convert binary data to debug arrays */
5093 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
5094 		s_dbg_arrays[buf_id].ptr =
5095 		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
5096 		s_dbg_arrays[buf_id].size_in_dwords =
5097 		    BYTES_TO_DWORDS(buf_array[buf_id].length);
5098 	}
5099 
5100 	return DBG_STATUS_OK;
5101 }
5102 
5103 bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
5104 		      struct qed_ptt *p_ptt, struct fw_info *fw_info)
5105 {
5106 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5107 	u8 storm_id;
5108 
5109 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5110 		struct storm_defs *storm = &s_storm_defs[storm_id];
5111 
5112 		/* Skip Storm if it's in reset */
5113 		if (dev_data->block_in_reset[storm->block_id])
5114 			continue;
5115 
5116 		/* Read FW info for the current Storm */
5117 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
5118 
5119 		return true;
5120 	}
5121 
5122 	return false;
5123 }
5124 
5125 /* Assign default GRC param values */
5126 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5127 {
5128 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5129 	u32 i;
5130 
5131 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5132 		if (!s_grc_param_defs[i].is_persistent)
5133 			dev_data->grc.param_val[i] =
5134 			    s_grc_param_defs[i].default_val[dev_data->chip_id];
5135 }
5136 
5137 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5138 					      struct qed_ptt *p_ptt,
5139 					      u32 *buf_size)
5140 {
5141 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5142 
5143 	*buf_size = 0;
5144 
5145 	if (status != DBG_STATUS_OK)
5146 		return status;
5147 
5148 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5149 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5150 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5151 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5152 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5153 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5154 
5155 	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5156 }
5157 
5158 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5159 				 struct qed_ptt *p_ptt,
5160 				 u32 *dump_buf,
5161 				 u32 buf_size_in_dwords,
5162 				 u32 *num_dumped_dwords)
5163 {
5164 	u32 needed_buf_size_in_dwords;
5165 	enum dbg_status status;
5166 
5167 	*num_dumped_dwords = 0;
5168 
5169 	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5170 					       p_ptt,
5171 					       &needed_buf_size_in_dwords);
5172 	if (status != DBG_STATUS_OK)
5173 		return status;
5174 
5175 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5176 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5177 
5178 	/* GRC Dump */
5179 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5180 
5181 	/* Revert GRC params to their default */
5182 	qed_dbg_grc_set_params_default(p_hwfn);
5183 
5184 	return status;
5185 }
5186 
5187 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5188 						   struct qed_ptt *p_ptt,
5189 						   u32 *buf_size)
5190 {
5191 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5192 	struct idle_chk_data *idle_chk;
5193 	enum dbg_status status;
5194 
5195 	idle_chk = &dev_data->idle_chk;
5196 	*buf_size = 0;
5197 
5198 	status = qed_dbg_dev_init(p_hwfn, p_ptt);
5199 	if (status != DBG_STATUS_OK)
5200 		return status;
5201 
5202 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5203 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5204 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5205 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5206 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5207 
5208 	if (!idle_chk->buf_size_set) {
5209 		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5210 						       p_ptt, NULL, false);
5211 		idle_chk->buf_size_set = true;
5212 	}
5213 
5214 	*buf_size = idle_chk->buf_size;
5215 
5216 	return DBG_STATUS_OK;
5217 }
5218 
5219 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5220 				      struct qed_ptt *p_ptt,
5221 				      u32 *dump_buf,
5222 				      u32 buf_size_in_dwords,
5223 				      u32 *num_dumped_dwords)
5224 {
5225 	u32 needed_buf_size_in_dwords;
5226 	enum dbg_status status;
5227 
5228 	*num_dumped_dwords = 0;
5229 
5230 	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5231 						    p_ptt,
5232 						    &needed_buf_size_in_dwords);
5233 	if (status != DBG_STATUS_OK)
5234 		return status;
5235 
5236 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5237 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5238 
5239 	/* Update reset state */
5240 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5241 
5242 	/* Idle Check Dump */
5243 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5244 
5245 	/* Revert GRC params to their default */
5246 	qed_dbg_grc_set_params_default(p_hwfn);
5247 
5248 	return DBG_STATUS_OK;
5249 }
5250 
5251 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5252 						    struct qed_ptt *p_ptt,
5253 						    u32 *buf_size)
5254 {
5255 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5256 
5257 	*buf_size = 0;
5258 
5259 	if (status != DBG_STATUS_OK)
5260 		return status;
5261 
5262 	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5263 }
5264 
5265 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5266 				       struct qed_ptt *p_ptt,
5267 				       u32 *dump_buf,
5268 				       u32 buf_size_in_dwords,
5269 				       u32 *num_dumped_dwords)
5270 {
5271 	u32 needed_buf_size_in_dwords;
5272 	enum dbg_status status;
5273 
5274 	status =
5275 		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5276 						    p_ptt,
5277 						    &needed_buf_size_in_dwords);
5278 	if (status != DBG_STATUS_OK && status !=
5279 	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5280 		return status;
5281 
5282 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5283 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5284 
5285 	/* Update reset state */
5286 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5287 
5288 	/* Perform dump */
5289 	status = qed_mcp_trace_dump(p_hwfn,
5290 				    p_ptt, dump_buf, true, num_dumped_dwords);
5291 
5292 	/* Revert GRC params to their default */
5293 	qed_dbg_grc_set_params_default(p_hwfn);
5294 
5295 	return status;
5296 }
5297 
5298 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5299 						   struct qed_ptt *p_ptt,
5300 						   u32 *buf_size)
5301 {
5302 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5303 
5304 	*buf_size = 0;
5305 
5306 	if (status != DBG_STATUS_OK)
5307 		return status;
5308 
5309 	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5310 }
5311 
5312 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5313 				      struct qed_ptt *p_ptt,
5314 				      u32 *dump_buf,
5315 				      u32 buf_size_in_dwords,
5316 				      u32 *num_dumped_dwords)
5317 {
5318 	u32 needed_buf_size_in_dwords;
5319 	enum dbg_status status;
5320 
5321 	*num_dumped_dwords = 0;
5322 
5323 	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5324 						    p_ptt,
5325 						    &needed_buf_size_in_dwords);
5326 	if (status != DBG_STATUS_OK)
5327 		return status;
5328 
5329 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5330 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5331 
5332 	/* Update reset state */
5333 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5334 
5335 	status = qed_reg_fifo_dump(p_hwfn,
5336 				   p_ptt, dump_buf, true, num_dumped_dwords);
5337 
5338 	/* Revert GRC params to their default */
5339 	qed_dbg_grc_set_params_default(p_hwfn);
5340 
5341 	return status;
5342 }
5343 
5344 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5345 						   struct qed_ptt *p_ptt,
5346 						   u32 *buf_size)
5347 {
5348 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5349 
5350 	*buf_size = 0;
5351 
5352 	if (status != DBG_STATUS_OK)
5353 		return status;
5354 
5355 	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5356 }
5357 
5358 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5359 				      struct qed_ptt *p_ptt,
5360 				      u32 *dump_buf,
5361 				      u32 buf_size_in_dwords,
5362 				      u32 *num_dumped_dwords)
5363 {
5364 	u32 needed_buf_size_in_dwords;
5365 	enum dbg_status status;
5366 
5367 	*num_dumped_dwords = 0;
5368 
5369 	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5370 						    p_ptt,
5371 						    &needed_buf_size_in_dwords);
5372 	if (status != DBG_STATUS_OK)
5373 		return status;
5374 
5375 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5376 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5377 
5378 	/* Update reset state */
5379 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5380 
5381 	status = qed_igu_fifo_dump(p_hwfn,
5382 				   p_ptt, dump_buf, true, num_dumped_dwords);
5383 	/* Revert GRC params to their default */
5384 	qed_dbg_grc_set_params_default(p_hwfn);
5385 
5386 	return status;
5387 }
5388 
5389 enum dbg_status
5390 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5391 					      struct qed_ptt *p_ptt,
5392 					      u32 *buf_size)
5393 {
5394 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5395 
5396 	*buf_size = 0;
5397 
5398 	if (status != DBG_STATUS_OK)
5399 		return status;
5400 
5401 	return qed_protection_override_dump(p_hwfn,
5402 					    p_ptt, NULL, false, buf_size);
5403 }
5404 
5405 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5406 						 struct qed_ptt *p_ptt,
5407 						 u32 *dump_buf,
5408 						 u32 buf_size_in_dwords,
5409 						 u32 *num_dumped_dwords)
5410 {
5411 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5412 	enum dbg_status status;
5413 
5414 	*num_dumped_dwords = 0;
5415 
5416 	status =
5417 		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5418 							      p_ptt,
5419 							      p_size);
5420 	if (status != DBG_STATUS_OK)
5421 		return status;
5422 
5423 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5424 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5425 
5426 	/* Update reset state */
5427 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5428 
5429 	status = qed_protection_override_dump(p_hwfn,
5430 					      p_ptt,
5431 					      dump_buf,
5432 					      true, num_dumped_dwords);
5433 
5434 	/* Revert GRC params to their default */
5435 	qed_dbg_grc_set_params_default(p_hwfn);
5436 
5437 	return status;
5438 }
5439 
5440 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5441 						     struct qed_ptt *p_ptt,
5442 						     u32 *buf_size)
5443 {
5444 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5445 
5446 	*buf_size = 0;
5447 
5448 	if (status != DBG_STATUS_OK)
5449 		return status;
5450 
5451 	/* Update reset state */
5452 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5453 
5454 	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5455 
5456 	return DBG_STATUS_OK;
5457 }
5458 
5459 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5460 					struct qed_ptt *p_ptt,
5461 					u32 *dump_buf,
5462 					u32 buf_size_in_dwords,
5463 					u32 *num_dumped_dwords)
5464 {
5465 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5466 	enum dbg_status status;
5467 
5468 	*num_dumped_dwords = 0;
5469 
5470 	status =
5471 		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5472 						     p_ptt,
5473 						     p_size);
5474 	if (status != DBG_STATUS_OK)
5475 		return status;
5476 
5477 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5478 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5479 
5480 	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5481 
5482 	/* Revert GRC params to their default */
5483 	qed_dbg_grc_set_params_default(p_hwfn);
5484 
5485 	return DBG_STATUS_OK;
5486 }
5487 
5488 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5489 				  struct qed_ptt *p_ptt,
5490 				  enum block_id block_id,
5491 				  enum dbg_attn_type attn_type,
5492 				  bool clear_status,
5493 				  struct dbg_attn_block_result *results)
5494 {
5495 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5496 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5497 	const struct dbg_attn_reg *attn_reg_arr;
5498 
5499 	if (status != DBG_STATUS_OK)
5500 		return status;
5501 
5502 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5503 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5504 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5505 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5506 
5507 	attn_reg_arr = qed_get_block_attn_regs(block_id,
5508 					       attn_type, &num_attn_regs);
5509 
5510 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5511 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5512 		struct dbg_attn_reg_result *reg_result;
5513 		u32 sts_addr, sts_val;
5514 		u16 modes_buf_offset;
5515 		bool eval_mode;
5516 
5517 		/* Check mode */
5518 		eval_mode = GET_FIELD(reg_data->mode.data,
5519 				      DBG_MODE_HDR_EVAL_MODE) > 0;
5520 		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5521 					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5522 		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5523 			continue;
5524 
5525 		/* Mode match - read attention status register */
5526 		sts_addr = DWORDS_TO_BYTES(clear_status ?
5527 					   reg_data->sts_clr_address :
5528 					   GET_FIELD(reg_data->data,
5529 						     DBG_ATTN_REG_STS_ADDRESS));
5530 		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5531 		if (!sts_val)
5532 			continue;
5533 
5534 		/* Non-zero attention status - add to results */
5535 		reg_result = &results->reg_results[num_result_regs];
5536 		SET_FIELD(reg_result->data,
5537 			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5538 		SET_FIELD(reg_result->data,
5539 			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5540 			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5541 		reg_result->block_attn_offset = reg_data->block_attn_offset;
5542 		reg_result->sts_val = sts_val;
5543 		reg_result->mask_val = qed_rd(p_hwfn,
5544 					      p_ptt,
5545 					      DWORDS_TO_BYTES
5546 					      (reg_data->mask_address));
5547 		num_result_regs++;
5548 	}
5549 
5550 	results->block_id = (u8)block_id;
5551 	results->names_offset =
5552 	    qed_get_block_attn_data(block_id, attn_type)->names_offset;
5553 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5554 	SET_FIELD(results->data,
5555 		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5556 
5557 	return DBG_STATUS_OK;
5558 }
5559 
5560 /******************************* Data Types **********************************/
5561 
5562 struct block_info {
5563 	const char *name;
5564 	enum block_id id;
5565 };
5566 
5567 /* REG fifo element */
5568 struct reg_fifo_element {
5569 	u64 data;
5570 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5571 #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5572 #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5573 #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5574 #define REG_FIFO_ELEMENT_PF_SHIFT		24
5575 #define REG_FIFO_ELEMENT_PF_MASK		0xf
5576 #define REG_FIFO_ELEMENT_VF_SHIFT		28
5577 #define REG_FIFO_ELEMENT_VF_MASK		0xff
5578 #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5579 #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5580 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5581 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5582 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5583 #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5584 #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5585 #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5586 #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5587 #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5588 };
5589 
5590 /* IGU fifo element */
5591 struct igu_fifo_element {
5592 	u32 dword0;
5593 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5594 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5595 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5596 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5597 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5598 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5599 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5600 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5601 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5602 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5603 	u32 dword1;
5604 	u32 dword2;
5605 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5606 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5607 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5608 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5609 	u32 reserved;
5610 };
5611 
5612 struct igu_fifo_wr_data {
5613 	u32 data;
5614 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5615 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5616 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5617 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5618 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5619 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5620 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5621 #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5622 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5623 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5624 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5625 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5626 };
5627 
5628 struct igu_fifo_cleanup_wr_data {
5629 	u32 data;
5630 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5631 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5632 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5633 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5634 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5635 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5636 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5637 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5638 };
5639 
5640 /* Protection override element */
5641 struct protection_override_element {
5642 	u64 data;
5643 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5644 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5645 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5646 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5647 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5648 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5649 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5650 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5651 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5652 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5653 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5654 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5655 };
5656 
5657 enum igu_fifo_sources {
5658 	IGU_SRC_PXP0,
5659 	IGU_SRC_PXP1,
5660 	IGU_SRC_PXP2,
5661 	IGU_SRC_PXP3,
5662 	IGU_SRC_PXP4,
5663 	IGU_SRC_PXP5,
5664 	IGU_SRC_PXP6,
5665 	IGU_SRC_PXP7,
5666 	IGU_SRC_CAU,
5667 	IGU_SRC_ATTN,
5668 	IGU_SRC_GRC
5669 };
5670 
5671 enum igu_fifo_addr_types {
5672 	IGU_ADDR_TYPE_MSIX_MEM,
5673 	IGU_ADDR_TYPE_WRITE_PBA,
5674 	IGU_ADDR_TYPE_WRITE_INT_ACK,
5675 	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5676 	IGU_ADDR_TYPE_READ_INT,
5677 	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5678 	IGU_ADDR_TYPE_RESERVED
5679 };
5680 
5681 struct igu_fifo_addr_data {
5682 	u16 start_addr;
5683 	u16 end_addr;
5684 	char *desc;
5685 	char *vf_desc;
5686 	enum igu_fifo_addr_types type;
5687 };
5688 
5689 struct mcp_trace_meta {
5690 	u32 modules_num;
5691 	char **modules;
5692 	u32 formats_num;
5693 	struct mcp_trace_format *formats;
5694 	bool is_allocated;
5695 };
5696 
5697 /* Debug Tools user data */
5698 struct dbg_tools_user_data {
5699 	struct mcp_trace_meta mcp_trace_meta;
5700 	const u32 *mcp_trace_user_meta_buf;
5701 };
5702 
5703 /******************************** Constants **********************************/
5704 
5705 #define MAX_MSG_LEN				1024
5706 
5707 #define MCP_TRACE_MAX_MODULE_LEN		8
5708 #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5709 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5710 	(MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
5711 
5712 #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5713 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5714 
5715 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5716 
5717 /***************************** Constant Arrays *******************************/
5718 
5719 struct user_dbg_array {
5720 	const u32 *ptr;
5721 	u32 size_in_dwords;
5722 };
5723 
5724 /* Debug arrays */
5725 static struct user_dbg_array
5726 s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
5727 
5728 /* Block names array */
5729 static struct block_info s_block_info_arr[] = {
5730 	{"grc", BLOCK_GRC},
5731 	{"miscs", BLOCK_MISCS},
5732 	{"misc", BLOCK_MISC},
5733 	{"dbu", BLOCK_DBU},
5734 	{"pglue_b", BLOCK_PGLUE_B},
5735 	{"cnig", BLOCK_CNIG},
5736 	{"cpmu", BLOCK_CPMU},
5737 	{"ncsi", BLOCK_NCSI},
5738 	{"opte", BLOCK_OPTE},
5739 	{"bmb", BLOCK_BMB},
5740 	{"pcie", BLOCK_PCIE},
5741 	{"mcp", BLOCK_MCP},
5742 	{"mcp2", BLOCK_MCP2},
5743 	{"pswhst", BLOCK_PSWHST},
5744 	{"pswhst2", BLOCK_PSWHST2},
5745 	{"pswrd", BLOCK_PSWRD},
5746 	{"pswrd2", BLOCK_PSWRD2},
5747 	{"pswwr", BLOCK_PSWWR},
5748 	{"pswwr2", BLOCK_PSWWR2},
5749 	{"pswrq", BLOCK_PSWRQ},
5750 	{"pswrq2", BLOCK_PSWRQ2},
5751 	{"pglcs", BLOCK_PGLCS},
5752 	{"ptu", BLOCK_PTU},
5753 	{"dmae", BLOCK_DMAE},
5754 	{"tcm", BLOCK_TCM},
5755 	{"mcm", BLOCK_MCM},
5756 	{"ucm", BLOCK_UCM},
5757 	{"xcm", BLOCK_XCM},
5758 	{"ycm", BLOCK_YCM},
5759 	{"pcm", BLOCK_PCM},
5760 	{"qm", BLOCK_QM},
5761 	{"tm", BLOCK_TM},
5762 	{"dorq", BLOCK_DORQ},
5763 	{"brb", BLOCK_BRB},
5764 	{"src", BLOCK_SRC},
5765 	{"prs", BLOCK_PRS},
5766 	{"tsdm", BLOCK_TSDM},
5767 	{"msdm", BLOCK_MSDM},
5768 	{"usdm", BLOCK_USDM},
5769 	{"xsdm", BLOCK_XSDM},
5770 	{"ysdm", BLOCK_YSDM},
5771 	{"psdm", BLOCK_PSDM},
5772 	{"tsem", BLOCK_TSEM},
5773 	{"msem", BLOCK_MSEM},
5774 	{"usem", BLOCK_USEM},
5775 	{"xsem", BLOCK_XSEM},
5776 	{"ysem", BLOCK_YSEM},
5777 	{"psem", BLOCK_PSEM},
5778 	{"rss", BLOCK_RSS},
5779 	{"tmld", BLOCK_TMLD},
5780 	{"muld", BLOCK_MULD},
5781 	{"yuld", BLOCK_YULD},
5782 	{"xyld", BLOCK_XYLD},
5783 	{"ptld", BLOCK_PTLD},
5784 	{"ypld", BLOCK_YPLD},
5785 	{"prm", BLOCK_PRM},
5786 	{"pbf_pb1", BLOCK_PBF_PB1},
5787 	{"pbf_pb2", BLOCK_PBF_PB2},
5788 	{"rpb", BLOCK_RPB},
5789 	{"btb", BLOCK_BTB},
5790 	{"pbf", BLOCK_PBF},
5791 	{"rdif", BLOCK_RDIF},
5792 	{"tdif", BLOCK_TDIF},
5793 	{"cdu", BLOCK_CDU},
5794 	{"ccfc", BLOCK_CCFC},
5795 	{"tcfc", BLOCK_TCFC},
5796 	{"igu", BLOCK_IGU},
5797 	{"cau", BLOCK_CAU},
5798 	{"rgfs", BLOCK_RGFS},
5799 	{"rgsrc", BLOCK_RGSRC},
5800 	{"tgfs", BLOCK_TGFS},
5801 	{"tgsrc", BLOCK_TGSRC},
5802 	{"umac", BLOCK_UMAC},
5803 	{"xmac", BLOCK_XMAC},
5804 	{"dbg", BLOCK_DBG},
5805 	{"nig", BLOCK_NIG},
5806 	{"wol", BLOCK_WOL},
5807 	{"bmbn", BLOCK_BMBN},
5808 	{"ipc", BLOCK_IPC},
5809 	{"nwm", BLOCK_NWM},
5810 	{"nws", BLOCK_NWS},
5811 	{"ms", BLOCK_MS},
5812 	{"phy_pcie", BLOCK_PHY_PCIE},
5813 	{"led", BLOCK_LED},
5814 	{"avs_wrap", BLOCK_AVS_WRAP},
5815 	{"pxpreqbus", BLOCK_PXPREQBUS},
5816 	{"misc_aeu", BLOCK_MISC_AEU},
5817 	{"bar0_map", BLOCK_BAR0_MAP}
5818 };
5819 
5820 /* Status string array */
5821 static const char * const s_status_str[] = {
5822 	/* DBG_STATUS_OK */
5823 	"Operation completed successfully",
5824 
5825 	/* DBG_STATUS_APP_VERSION_NOT_SET */
5826 	"Debug application version wasn't set",
5827 
5828 	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5829 	"Unsupported debug application version",
5830 
5831 	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5832 	"The debug block wasn't reset since the last recording",
5833 
5834 	/* DBG_STATUS_INVALID_ARGS */
5835 	"Invalid arguments",
5836 
5837 	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5838 	"The debug output was already set",
5839 
5840 	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5841 	"Invalid PCI buffer size",
5842 
5843 	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5844 	"PCI buffer allocation failed",
5845 
5846 	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5847 	"A PCI buffer wasn't allocated",
5848 
5849 	/* DBG_STATUS_TOO_MANY_INPUTS */
5850 	"Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
5851 
5852 	/* DBG_STATUS_INPUT_OVERLAP */
5853 	"Overlapping debug bus inputs",
5854 
5855 	/* DBG_STATUS_HW_ONLY_RECORDING */
5856 	"Cannot record Storm data since the entire recording cycle is used by HW",
5857 
5858 	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5859 	"The Storm was already enabled",
5860 
5861 	/* DBG_STATUS_STORM_NOT_ENABLED */
5862 	"The specified Storm wasn't enabled",
5863 
5864 	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5865 	"The block was already enabled",
5866 
5867 	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5868 	"The specified block wasn't enabled",
5869 
5870 	/* DBG_STATUS_NO_INPUT_ENABLED */
5871 	"No input was enabled for recording",
5872 
5873 	/* DBG_STATUS_NO_FILTER_TRIGGER_64B */
5874 	"Filters and triggers are not allowed when recording in 64b units",
5875 
5876 	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5877 	"The filter was already enabled",
5878 
5879 	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5880 	"The trigger was already enabled",
5881 
5882 	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5883 	"The trigger wasn't enabled",
5884 
5885 	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5886 	"A constraint can be added only after a filter was enabled or a trigger state was added",
5887 
5888 	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5889 	"Cannot add more than 3 trigger states",
5890 
5891 	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5892 	"Cannot add more than 4 constraints per filter or trigger state",
5893 
5894 	/* DBG_STATUS_RECORDING_NOT_STARTED */
5895 	"The recording wasn't started",
5896 
5897 	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
5898 	"A trigger was configured, but it didn't trigger",
5899 
5900 	/* DBG_STATUS_NO_DATA_RECORDED */
5901 	"No data was recorded",
5902 
5903 	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5904 	"Dump buffer is too small",
5905 
5906 	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5907 	"Dumped data is not aligned to chunks",
5908 
5909 	/* DBG_STATUS_UNKNOWN_CHIP */
5910 	"Unknown chip",
5911 
5912 	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5913 	"Failed allocating virtual memory",
5914 
5915 	/* DBG_STATUS_BLOCK_IN_RESET */
5916 	"The input block is in reset",
5917 
5918 	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5919 	"Invalid MCP trace signature found in NVRAM",
5920 
5921 	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5922 	"Invalid bundle ID found in NVRAM",
5923 
5924 	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5925 	"Failed getting NVRAM image",
5926 
5927 	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5928 	"NVRAM image is not dword-aligned",
5929 
5930 	/* DBG_STATUS_NVRAM_READ_FAILED */
5931 	"Failed reading from NVRAM",
5932 
5933 	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5934 	"Idle check parsing failed",
5935 
5936 	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
5937 	"MCP Trace data is corrupt",
5938 
5939 	/* DBG_STATUS_MCP_TRACE_NO_META */
5940 	"Dump doesn't contain meta data - it must be provided in image file",
5941 
5942 	/* DBG_STATUS_MCP_COULD_NOT_HALT */
5943 	"Failed to halt MCP",
5944 
5945 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
5946 	"Failed to resume MCP after halt",
5947 
5948 	/* DBG_STATUS_RESERVED2 */
5949 	"Reserved debug status - shouldn't be returned",
5950 
5951 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5952 	"Failed to empty SEMI sync FIFO",
5953 
5954 	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
5955 	"IGU FIFO data is corrupt",
5956 
5957 	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5958 	"MCP failed to mask parities",
5959 
5960 	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5961 	"FW Asserts parsing failed",
5962 
5963 	/* DBG_STATUS_REG_FIFO_BAD_DATA */
5964 	"GRC FIFO data is corrupt",
5965 
5966 	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5967 	"Protection Override data is corrupt",
5968 
5969 	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
5970 	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5971 
5972 	/* DBG_STATUS_FILTER_BUG */
5973 	"Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
5974 
5975 	/* DBG_STATUS_NON_MATCHING_LINES */
5976 	"Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
5977 
5978 	/* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
5979 	"The selected trigger dword offset wasn't enabled in the recorded HW block",
5980 
5981 	/* DBG_STATUS_DBG_BUS_IN_USE */
5982 	"The debug bus is in use"
5983 };
5984 
5985 /* Idle check severity names array */
5986 static const char * const s_idle_chk_severity_str[] = {
5987 	"Error",
5988 	"Error if no traffic",
5989 	"Warning"
5990 };
5991 
5992 /* MCP Trace level names array */
5993 static const char * const s_mcp_trace_level_str[] = {
5994 	"ERROR",
5995 	"TRACE",
5996 	"DEBUG"
5997 };
5998 
5999 /* Access type names array */
6000 static const char * const s_access_strs[] = {
6001 	"read",
6002 	"write"
6003 };
6004 
6005 /* Privilege type names array */
6006 static const char * const s_privilege_strs[] = {
6007 	"VF",
6008 	"PDA",
6009 	"HV",
6010 	"UA"
6011 };
6012 
6013 /* Protection type names array */
6014 static const char * const s_protection_strs[] = {
6015 	"(default)",
6016 	"(default)",
6017 	"(default)",
6018 	"(default)",
6019 	"override VF",
6020 	"override PDA",
6021 	"override HV",
6022 	"override UA"
6023 };
6024 
6025 /* Master type names array */
6026 static const char * const s_master_strs[] = {
6027 	"???",
6028 	"pxp",
6029 	"mcp",
6030 	"msdm",
6031 	"psdm",
6032 	"ysdm",
6033 	"usdm",
6034 	"tsdm",
6035 	"xsdm",
6036 	"dbu",
6037 	"dmae",
6038 	"???",
6039 	"???",
6040 	"???",
6041 	"???",
6042 	"???"
6043 };
6044 
6045 /* REG FIFO error messages array */
6046 static const char * const s_reg_fifo_error_strs[] = {
6047 	"grc timeout",
6048 	"address doesn't belong to any block",
6049 	"reserved address in block or write to read-only address",
6050 	"privilege/protection mismatch",
6051 	"path isolation error"
6052 };
6053 
6054 /* IGU FIFO sources array */
6055 static const char * const s_igu_fifo_source_strs[] = {
6056 	"TSTORM",
6057 	"MSTORM",
6058 	"USTORM",
6059 	"XSTORM",
6060 	"YSTORM",
6061 	"PSTORM",
6062 	"PCIE",
6063 	"NIG_QM_PBF",
6064 	"CAU",
6065 	"ATTN",
6066 	"GRC",
6067 };
6068 
6069 /* IGU FIFO error messages */
6070 static const char * const s_igu_fifo_error_strs[] = {
6071 	"no error",
6072 	"length error",
6073 	"function disabled",
6074 	"VF sent command to attention address",
6075 	"host sent prod update command",
6076 	"read of during interrupt register while in MIMD mode",
6077 	"access to PXP BAR reserved address",
6078 	"producer update command to attention index",
6079 	"unknown error",
6080 	"SB index not valid",
6081 	"SB relative index and FID not found",
6082 	"FID not match",
6083 	"command with error flag asserted (PCI error or CAU discard)",
6084 	"VF sent cleanup and RF cleanup is disabled",
6085 	"cleanup command on type bigger than 4"
6086 };
6087 
6088 /* IGU FIFO address data */
6089 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6090 	{0x0, 0x101, "MSI-X Memory", NULL,
6091 	 IGU_ADDR_TYPE_MSIX_MEM},
6092 	{0x102, 0x1ff, "reserved", NULL,
6093 	 IGU_ADDR_TYPE_RESERVED},
6094 	{0x200, 0x200, "Write PBA[0:63]", NULL,
6095 	 IGU_ADDR_TYPE_WRITE_PBA},
6096 	{0x201, 0x201, "Write PBA[64:127]", "reserved",
6097 	 IGU_ADDR_TYPE_WRITE_PBA},
6098 	{0x202, 0x202, "Write PBA[128]", "reserved",
6099 	 IGU_ADDR_TYPE_WRITE_PBA},
6100 	{0x203, 0x3ff, "reserved", NULL,
6101 	 IGU_ADDR_TYPE_RESERVED},
6102 	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6103 	 IGU_ADDR_TYPE_WRITE_INT_ACK},
6104 	{0x5f0, 0x5f0, "Attention bits update", NULL,
6105 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6106 	{0x5f1, 0x5f1, "Attention bits set", NULL,
6107 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6108 	{0x5f2, 0x5f2, "Attention bits clear", NULL,
6109 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6110 	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6111 	 IGU_ADDR_TYPE_READ_INT},
6112 	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6113 	 IGU_ADDR_TYPE_READ_INT},
6114 	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6115 	 IGU_ADDR_TYPE_READ_INT},
6116 	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6117 	 IGU_ADDR_TYPE_READ_INT},
6118 	{0x5f7, 0x5ff, "reserved", NULL,
6119 	 IGU_ADDR_TYPE_RESERVED},
6120 	{0x600, 0x7ff, "Producer update", NULL,
6121 	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6122 };
6123 
6124 /******************************** Variables **********************************/
6125 
6126 /* Temporary buffer, used for print size calculations */
6127 static char s_temp_buf[MAX_MSG_LEN];
6128 
6129 /**************************** Private Functions ******************************/
6130 
6131 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6132 {
6133 	return (a + b) % size;
6134 }
6135 
6136 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6137 {
6138 	return (size + a - b) % size;
6139 }
6140 
6141 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6142  * bytes) and returns them as a dword value. the specified buffer offset is
6143  * updated.
6144  */
6145 static u32 qed_read_from_cyclic_buf(void *buf,
6146 				    u32 *offset,
6147 				    u32 buf_size, u8 num_bytes_to_read)
6148 {
6149 	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6150 	u32 val = 0;
6151 
6152 	val_ptr = (u8 *)&val;
6153 
6154 	/* Assume running on a LITTLE ENDIAN and the buffer is network order
6155 	 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
6156 	 */
6157 	for (i = 0; i < num_bytes_to_read; i++) {
6158 		val_ptr[i] = bytes_buf[*offset];
6159 		*offset = qed_cyclic_add(*offset, 1, buf_size);
6160 	}
6161 
6162 	return val;
6163 }
6164 
6165 /* Reads and returns the next byte from the specified buffer.
6166  * The specified buffer offset is updated.
6167  */
6168 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6169 {
6170 	return ((u8 *)buf)[(*offset)++];
6171 }
6172 
6173 /* Reads and returns the next dword from the specified buffer.
6174  * The specified buffer offset is updated.
6175  */
6176 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6177 {
6178 	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6179 
6180 	*offset += 4;
6181 
6182 	return dword_val;
6183 }
6184 
6185 /* Reads the next string from the specified buffer, and copies it to the
6186  * specified pointer. The specified buffer offset is updated.
6187  */
6188 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6189 {
6190 	const char *source_str = &((const char *)buf)[*offset];
6191 
6192 	strncpy(dest, source_str, size);
6193 	dest[size - 1] = '\0';
6194 	*offset += size;
6195 }
6196 
6197 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6198  * If the specified buffer in NULL, a temporary buffer pointer is returned.
6199  */
6200 static char *qed_get_buf_ptr(void *buf, u32 offset)
6201 {
6202 	return buf ? (char *)buf + offset : s_temp_buf;
6203 }
6204 
6205 /* Reads a param from the specified buffer. Returns the number of dwords read.
6206  * If the returned str_param is NULL, the param is numeric and its value is
6207  * returned in num_param.
6208  * Otheriwise, the param is a string and its pointer is returned in str_param.
6209  */
6210 static u32 qed_read_param(u32 *dump_buf,
6211 			  const char **param_name,
6212 			  const char **param_str_val, u32 *param_num_val)
6213 {
6214 	char *char_buf = (char *)dump_buf;
6215 	size_t offset = 0;
6216 
6217 	/* Extract param name */
6218 	*param_name = char_buf;
6219 	offset += strlen(*param_name) + 1;
6220 
6221 	/* Check param type */
6222 	if (*(char_buf + offset++)) {
6223 		/* String param */
6224 		*param_str_val = char_buf + offset;
6225 		*param_num_val = 0;
6226 		offset += strlen(*param_str_val) + 1;
6227 		if (offset & 0x3)
6228 			offset += (4 - (offset & 0x3));
6229 	} else {
6230 		/* Numeric param */
6231 		*param_str_val = NULL;
6232 		if (offset & 0x3)
6233 			offset += (4 - (offset & 0x3));
6234 		*param_num_val = *(u32 *)(char_buf + offset);
6235 		offset += 4;
6236 	}
6237 
6238 	return (u32)offset / 4;
6239 }
6240 
6241 /* Reads a section header from the specified buffer.
6242  * Returns the number of dwords read.
6243  */
6244 static u32 qed_read_section_hdr(u32 *dump_buf,
6245 				const char **section_name,
6246 				u32 *num_section_params)
6247 {
6248 	const char *param_str_val;
6249 
6250 	return qed_read_param(dump_buf,
6251 			      section_name, &param_str_val, num_section_params);
6252 }
6253 
6254 /* Reads section params from the specified buffer and prints them to the results
6255  * buffer. Returns the number of dwords read.
6256  */
6257 static u32 qed_print_section_params(u32 *dump_buf,
6258 				    u32 num_section_params,
6259 				    char *results_buf, u32 *num_chars_printed)
6260 {
6261 	u32 i, dump_offset = 0, results_offset = 0;
6262 
6263 	for (i = 0; i < num_section_params; i++) {
6264 		const char *param_name, *param_str_val;
6265 		u32 param_num_val = 0;
6266 
6267 		dump_offset += qed_read_param(dump_buf + dump_offset,
6268 					      &param_name,
6269 					      &param_str_val, &param_num_val);
6270 
6271 		if (param_str_val)
6272 			results_offset +=
6273 				sprintf(qed_get_buf_ptr(results_buf,
6274 							results_offset),
6275 					"%s: %s\n", param_name, param_str_val);
6276 		else if (strcmp(param_name, "fw-timestamp"))
6277 			results_offset +=
6278 				sprintf(qed_get_buf_ptr(results_buf,
6279 							results_offset),
6280 					"%s: %d\n", param_name, param_num_val);
6281 	}
6282 
6283 	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6284 				  "\n");
6285 
6286 	*num_chars_printed = results_offset;
6287 
6288 	return dump_offset;
6289 }
6290 
6291 static struct dbg_tools_user_data *
6292 qed_dbg_get_user_data(struct qed_hwfn *p_hwfn)
6293 {
6294 	return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6295 }
6296 
6297 /* Parses the idle check rules and returns the number of characters printed.
6298  * In case of parsing error, returns 0.
6299  */
6300 static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
6301 					 u32 *dump_buf_end,
6302 					 u32 num_rules,
6303 					 bool print_fw_idle_chk,
6304 					 char *results_buf,
6305 					 u32 *num_errors, u32 *num_warnings)
6306 {
6307 	/* Offset in results_buf in bytes */
6308 	u32 results_offset = 0;
6309 
6310 	u32 rule_idx;
6311 	u16 i, j;
6312 
6313 	*num_errors = 0;
6314 	*num_warnings = 0;
6315 
6316 	/* Go over dumped results */
6317 	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6318 	     rule_idx++) {
6319 		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6320 		struct dbg_idle_chk_result_hdr *hdr;
6321 		const char *parsing_str, *lsi_msg;
6322 		u32 parsing_str_offset;
6323 		bool has_fw_msg;
6324 		u8 curr_reg_id;
6325 
6326 		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6327 		rule_parsing_data =
6328 			(const struct dbg_idle_chk_rule_parsing_data *)
6329 			&s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
6330 			ptr[hdr->rule_id];
6331 		parsing_str_offset =
6332 			GET_FIELD(rule_parsing_data->data,
6333 				  DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6334 		has_fw_msg =
6335 			GET_FIELD(rule_parsing_data->data,
6336 				DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6337 		parsing_str =
6338 			&((const char *)
6339 			s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
6340 			[parsing_str_offset];
6341 		lsi_msg = parsing_str;
6342 		curr_reg_id = 0;
6343 
6344 		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6345 			return 0;
6346 
6347 		/* Skip rule header */
6348 		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6349 
6350 		/* Update errors/warnings count */
6351 		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6352 		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6353 			(*num_errors)++;
6354 		else
6355 			(*num_warnings)++;
6356 
6357 		/* Print rule severity */
6358 		results_offset +=
6359 		    sprintf(qed_get_buf_ptr(results_buf,
6360 					    results_offset), "%s: ",
6361 			    s_idle_chk_severity_str[hdr->severity]);
6362 
6363 		/* Print rule message */
6364 		if (has_fw_msg)
6365 			parsing_str += strlen(parsing_str) + 1;
6366 		results_offset +=
6367 		    sprintf(qed_get_buf_ptr(results_buf,
6368 					    results_offset), "%s.",
6369 			    has_fw_msg &&
6370 			    print_fw_idle_chk ? parsing_str : lsi_msg);
6371 		parsing_str += strlen(parsing_str) + 1;
6372 
6373 		/* Print register values */
6374 		results_offset +=
6375 		    sprintf(qed_get_buf_ptr(results_buf,
6376 					    results_offset), " Registers:");
6377 		for (i = 0;
6378 		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6379 		     i++) {
6380 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6381 			bool is_mem;
6382 			u8 reg_id;
6383 
6384 			reg_hdr =
6385 				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6386 			is_mem = GET_FIELD(reg_hdr->data,
6387 					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6388 			reg_id = GET_FIELD(reg_hdr->data,
6389 					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6390 
6391 			/* Skip reg header */
6392 			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6393 
6394 			/* Skip register names until the required reg_id is
6395 			 * reached.
6396 			 */
6397 			for (; reg_id > curr_reg_id;
6398 			     curr_reg_id++,
6399 			     parsing_str += strlen(parsing_str) + 1);
6400 
6401 			results_offset +=
6402 			    sprintf(qed_get_buf_ptr(results_buf,
6403 						    results_offset), " %s",
6404 				    parsing_str);
6405 			if (i < hdr->num_dumped_cond_regs && is_mem)
6406 				results_offset +=
6407 				    sprintf(qed_get_buf_ptr(results_buf,
6408 							    results_offset),
6409 					    "[%d]", hdr->mem_entry_id +
6410 					    reg_hdr->start_entry);
6411 			results_offset +=
6412 			    sprintf(qed_get_buf_ptr(results_buf,
6413 						    results_offset), "=");
6414 			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6415 				results_offset +=
6416 				    sprintf(qed_get_buf_ptr(results_buf,
6417 							    results_offset),
6418 					    "0x%x", *dump_buf);
6419 				if (j < reg_hdr->size - 1)
6420 					results_offset +=
6421 					    sprintf(qed_get_buf_ptr
6422 						    (results_buf,
6423 						     results_offset), ",");
6424 			}
6425 		}
6426 
6427 		results_offset +=
6428 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6429 	}
6430 
6431 	/* Check if end of dump buffer was exceeded */
6432 	if (dump_buf > dump_buf_end)
6433 		return 0;
6434 
6435 	return results_offset;
6436 }
6437 
6438 /* Parses an idle check dump buffer.
6439  * If result_buf is not NULL, the idle check results are printed to it.
6440  * In any case, the required results buffer size is assigned to
6441  * parsed_results_bytes.
6442  * The parsing status is returned.
6443  */
6444 static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
6445 					       u32 num_dumped_dwords,
6446 					       char *results_buf,
6447 					       u32 *parsed_results_bytes,
6448 					       u32 *num_errors,
6449 					       u32 *num_warnings)
6450 {
6451 	const char *section_name, *param_name, *param_str_val;
6452 	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6453 	u32 num_section_params = 0, num_rules;
6454 
6455 	/* Offset in results_buf in bytes */
6456 	u32 results_offset = 0;
6457 
6458 	*parsed_results_bytes = 0;
6459 	*num_errors = 0;
6460 	*num_warnings = 0;
6461 
6462 	if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6463 	    !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6464 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6465 
6466 	/* Read global_params section */
6467 	dump_buf += qed_read_section_hdr(dump_buf,
6468 					 &section_name, &num_section_params);
6469 	if (strcmp(section_name, "global_params"))
6470 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6471 
6472 	/* Print global params */
6473 	dump_buf += qed_print_section_params(dump_buf,
6474 					     num_section_params,
6475 					     results_buf, &results_offset);
6476 
6477 	/* Read idle_chk section */
6478 	dump_buf += qed_read_section_hdr(dump_buf,
6479 					 &section_name, &num_section_params);
6480 	if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6481 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6482 	dump_buf += qed_read_param(dump_buf,
6483 				   &param_name, &param_str_val, &num_rules);
6484 	if (strcmp(param_name, "num_rules"))
6485 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6486 
6487 	if (num_rules) {
6488 		u32 rules_print_size;
6489 
6490 		/* Print FW output */
6491 		results_offset +=
6492 		    sprintf(qed_get_buf_ptr(results_buf,
6493 					    results_offset),
6494 			    "FW_IDLE_CHECK:\n");
6495 		rules_print_size =
6496 			qed_parse_idle_chk_dump_rules(dump_buf,
6497 						      dump_buf_end,
6498 						      num_rules,
6499 						      true,
6500 						      results_buf ?
6501 						      results_buf +
6502 						      results_offset :
6503 						      NULL,
6504 						      num_errors,
6505 						      num_warnings);
6506 		results_offset += rules_print_size;
6507 		if (!rules_print_size)
6508 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6509 
6510 		/* Print LSI output */
6511 		results_offset +=
6512 		    sprintf(qed_get_buf_ptr(results_buf,
6513 					    results_offset),
6514 			    "\nLSI_IDLE_CHECK:\n");
6515 		rules_print_size =
6516 			qed_parse_idle_chk_dump_rules(dump_buf,
6517 						      dump_buf_end,
6518 						      num_rules,
6519 						      false,
6520 						      results_buf ?
6521 						      results_buf +
6522 						      results_offset :
6523 						      NULL,
6524 						      num_errors,
6525 						      num_warnings);
6526 		results_offset += rules_print_size;
6527 		if (!rules_print_size)
6528 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6529 	}
6530 
6531 	/* Print errors/warnings count */
6532 	if (*num_errors)
6533 		results_offset +=
6534 		    sprintf(qed_get_buf_ptr(results_buf,
6535 					    results_offset),
6536 			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6537 			    *num_errors, *num_warnings);
6538 	else if (*num_warnings)
6539 		results_offset +=
6540 		    sprintf(qed_get_buf_ptr(results_buf,
6541 					    results_offset),
6542 			    "\nIdle Check completed successfully (with %d warnings)\n",
6543 			    *num_warnings);
6544 	else
6545 		results_offset +=
6546 		    sprintf(qed_get_buf_ptr(results_buf,
6547 					    results_offset),
6548 			    "\nIdle Check completed successfully\n");
6549 
6550 	/* Add 1 for string NULL termination */
6551 	*parsed_results_bytes = results_offset + 1;
6552 
6553 	return DBG_STATUS_OK;
6554 }
6555 
6556 /* Allocates and fills MCP Trace meta data based on the specified meta data
6557  * dump buffer.
6558  * Returns debug status code.
6559  */
6560 static enum dbg_status
6561 qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
6562 			      const u32 *meta_buf)
6563 {
6564 	struct dbg_tools_user_data *dev_user_data;
6565 	u32 offset = 0, signature, i;
6566 	struct mcp_trace_meta *meta;
6567 	u8 *meta_buf_bytes;
6568 
6569 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6570 	meta = &dev_user_data->mcp_trace_meta;
6571 	meta_buf_bytes = (u8 *)meta_buf;
6572 
6573 	/* Free the previous meta before loading a new one. */
6574 	if (meta->is_allocated)
6575 		qed_mcp_trace_free_meta_data(p_hwfn);
6576 
6577 	memset(meta, 0, sizeof(*meta));
6578 
6579 	/* Read first signature */
6580 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6581 	if (signature != NVM_MAGIC_VALUE)
6582 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6583 
6584 	/* Read no. of modules and allocate memory for their pointers */
6585 	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6586 	meta->modules = kcalloc(meta->modules_num, sizeof(char *),
6587 				GFP_KERNEL);
6588 	if (!meta->modules)
6589 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6590 
6591 	/* Allocate and read all module strings */
6592 	for (i = 0; i < meta->modules_num; i++) {
6593 		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6594 
6595 		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6596 		if (!(*(meta->modules + i))) {
6597 			/* Update number of modules to be released */
6598 			meta->modules_num = i ? i - 1 : 0;
6599 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6600 		}
6601 
6602 		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6603 				      *(meta->modules + i));
6604 		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6605 			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6606 	}
6607 
6608 	/* Read second signature */
6609 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6610 	if (signature != NVM_MAGIC_VALUE)
6611 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6612 
6613 	/* Read number of formats and allocate memory for all formats */
6614 	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6615 	meta->formats = kcalloc(meta->formats_num,
6616 				sizeof(struct mcp_trace_format),
6617 				GFP_KERNEL);
6618 	if (!meta->formats)
6619 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6620 
6621 	/* Allocate and read all strings */
6622 	for (i = 0; i < meta->formats_num; i++) {
6623 		struct mcp_trace_format *format_ptr = &meta->formats[i];
6624 		u8 format_len;
6625 
6626 		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6627 							   &offset);
6628 		format_len =
6629 		    (format_ptr->data &
6630 		     MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
6631 		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6632 		if (!format_ptr->format_str) {
6633 			/* Update number of modules to be released */
6634 			meta->formats_num = i ? i - 1 : 0;
6635 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6636 		}
6637 
6638 		qed_read_str_from_buf(meta_buf_bytes,
6639 				      &offset,
6640 				      format_len, format_ptr->format_str);
6641 	}
6642 
6643 	meta->is_allocated = true;
6644 	return DBG_STATUS_OK;
6645 }
6646 
6647 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6648  * are printed to it. The parsing status is returned.
6649  * Arguments:
6650  * trace_buf - MCP trace cyclic buffer
6651  * trace_buf_size - MCP trace cyclic buffer size in bytes
6652  * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6653  *               buffer.
6654  * data_size - size in bytes of data to parse.
6655  * parsed_buf - destination buffer for parsed data.
6656  * parsed_results_bytes - size of parsed data in bytes.
6657  */
6658 static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
6659 					       u8 *trace_buf,
6660 					       u32 trace_buf_size,
6661 					       u32 data_offset,
6662 					       u32 data_size,
6663 					       char *parsed_buf,
6664 					       u32 *parsed_results_bytes)
6665 {
6666 	struct dbg_tools_user_data *dev_user_data;
6667 	struct mcp_trace_meta *meta;
6668 	u32 param_mask, param_shift;
6669 	enum dbg_status status;
6670 
6671 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6672 	meta = &dev_user_data->mcp_trace_meta;
6673 	*parsed_results_bytes = 0;
6674 
6675 	if (!meta->is_allocated)
6676 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6677 
6678 	status = DBG_STATUS_OK;
6679 
6680 	while (data_size) {
6681 		struct mcp_trace_format *format_ptr;
6682 		u8 format_level, format_module;
6683 		u32 params[3] = { 0, 0, 0 };
6684 		u32 header, format_idx, i;
6685 
6686 		if (data_size < MFW_TRACE_ENTRY_SIZE)
6687 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6688 
6689 		header = qed_read_from_cyclic_buf(trace_buf,
6690 						  &data_offset,
6691 						  trace_buf_size,
6692 						  MFW_TRACE_ENTRY_SIZE);
6693 		data_size -= MFW_TRACE_ENTRY_SIZE;
6694 		format_idx = header & MFW_TRACE_EVENTID_MASK;
6695 
6696 		/* Skip message if its index doesn't exist in the meta data */
6697 		if (format_idx >= meta->formats_num) {
6698 			u8 format_size =
6699 				(u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
6700 				     MFW_TRACE_PRM_SIZE_SHIFT);
6701 
6702 			if (data_size < format_size)
6703 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6704 
6705 			data_offset = qed_cyclic_add(data_offset,
6706 						     format_size,
6707 						     trace_buf_size);
6708 			data_size -= format_size;
6709 			continue;
6710 		}
6711 
6712 		format_ptr = &meta->formats[format_idx];
6713 
6714 		for (i = 0,
6715 		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
6716 		     param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
6717 		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6718 		     i++,
6719 		     param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6720 		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6721 			/* Extract param size (0..3) */
6722 			u8 param_size = (u8)((format_ptr->data & param_mask) >>
6723 					     param_shift);
6724 
6725 			/* If the param size is zero, there are no other
6726 			 * parameters.
6727 			 */
6728 			if (!param_size)
6729 				break;
6730 
6731 			/* Size is encoded using 2 bits, where 3 is used to
6732 			 * encode 4.
6733 			 */
6734 			if (param_size == 3)
6735 				param_size = 4;
6736 
6737 			if (data_size < param_size)
6738 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6739 
6740 			params[i] = qed_read_from_cyclic_buf(trace_buf,
6741 							     &data_offset,
6742 							     trace_buf_size,
6743 							     param_size);
6744 			data_size -= param_size;
6745 		}
6746 
6747 		format_level = (u8)((format_ptr->data &
6748 				     MCP_TRACE_FORMAT_LEVEL_MASK) >>
6749 				    MCP_TRACE_FORMAT_LEVEL_SHIFT);
6750 		format_module = (u8)((format_ptr->data &
6751 				      MCP_TRACE_FORMAT_MODULE_MASK) >>
6752 				     MCP_TRACE_FORMAT_MODULE_SHIFT);
6753 		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6754 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6755 
6756 		/* Print current message to results buffer */
6757 		*parsed_results_bytes +=
6758 			sprintf(qed_get_buf_ptr(parsed_buf,
6759 						*parsed_results_bytes),
6760 				"%s %-8s: ",
6761 				s_mcp_trace_level_str[format_level],
6762 				meta->modules[format_module]);
6763 		*parsed_results_bytes +=
6764 		    sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
6765 			    format_ptr->format_str,
6766 			    params[0], params[1], params[2]);
6767 	}
6768 
6769 	/* Add string NULL terminator */
6770 	(*parsed_results_bytes)++;
6771 
6772 	return status;
6773 }
6774 
6775 /* Parses an MCP Trace dump buffer.
6776  * If result_buf is not NULL, the MCP Trace results are printed to it.
6777  * In any case, the required results buffer size is assigned to
6778  * parsed_results_bytes.
6779  * The parsing status is returned.
6780  */
6781 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6782 						u32 *dump_buf,
6783 						char *results_buf,
6784 						u32 *parsed_results_bytes,
6785 						bool free_meta_data)
6786 {
6787 	const char *section_name, *param_name, *param_str_val;
6788 	u32 data_size, trace_data_dwords, trace_meta_dwords;
6789 	u32 offset, results_offset, results_buf_bytes;
6790 	u32 param_num_val, num_section_params;
6791 	struct mcp_trace *trace;
6792 	enum dbg_status status;
6793 	const u32 *meta_buf;
6794 	u8 *trace_buf;
6795 
6796 	*parsed_results_bytes = 0;
6797 
6798 	/* Read global_params section */
6799 	dump_buf += qed_read_section_hdr(dump_buf,
6800 					 &section_name, &num_section_params);
6801 	if (strcmp(section_name, "global_params"))
6802 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6803 
6804 	/* Print global params */
6805 	dump_buf += qed_print_section_params(dump_buf,
6806 					     num_section_params,
6807 					     results_buf, &results_offset);
6808 
6809 	/* Read trace_data section */
6810 	dump_buf += qed_read_section_hdr(dump_buf,
6811 					 &section_name, &num_section_params);
6812 	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6813 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6814 	dump_buf += qed_read_param(dump_buf,
6815 				   &param_name, &param_str_val, &param_num_val);
6816 	if (strcmp(param_name, "size"))
6817 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6818 	trace_data_dwords = param_num_val;
6819 
6820 	/* Prepare trace info */
6821 	trace = (struct mcp_trace *)dump_buf;
6822 	if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
6823 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6824 
6825 	trace_buf = (u8 *)dump_buf + sizeof(*trace);
6826 	offset = trace->trace_oldest;
6827 	data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
6828 	dump_buf += trace_data_dwords;
6829 
6830 	/* Read meta_data section */
6831 	dump_buf += qed_read_section_hdr(dump_buf,
6832 					 &section_name, &num_section_params);
6833 	if (strcmp(section_name, "mcp_trace_meta"))
6834 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6835 	dump_buf += qed_read_param(dump_buf,
6836 				   &param_name, &param_str_val, &param_num_val);
6837 	if (strcmp(param_name, "size"))
6838 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6839 	trace_meta_dwords = param_num_val;
6840 
6841 	/* Choose meta data buffer */
6842 	if (!trace_meta_dwords) {
6843 		/* Dump doesn't include meta data */
6844 		struct dbg_tools_user_data *dev_user_data =
6845 			qed_dbg_get_user_data(p_hwfn);
6846 
6847 		if (!dev_user_data->mcp_trace_user_meta_buf)
6848 			return DBG_STATUS_MCP_TRACE_NO_META;
6849 
6850 		meta_buf = dev_user_data->mcp_trace_user_meta_buf;
6851 	} else {
6852 		/* Dump includes meta data */
6853 		meta_buf = dump_buf;
6854 	}
6855 
6856 	/* Allocate meta data memory */
6857 	status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
6858 	if (status != DBG_STATUS_OK)
6859 		return status;
6860 
6861 	status = qed_parse_mcp_trace_buf(p_hwfn,
6862 					 trace_buf,
6863 					 trace->size,
6864 					 offset,
6865 					 data_size,
6866 					 results_buf ?
6867 					 results_buf + results_offset :
6868 					 NULL,
6869 					 &results_buf_bytes);
6870 	if (status != DBG_STATUS_OK)
6871 		return status;
6872 
6873 	if (free_meta_data)
6874 		qed_mcp_trace_free_meta_data(p_hwfn);
6875 
6876 	*parsed_results_bytes = results_offset + results_buf_bytes;
6877 
6878 	return DBG_STATUS_OK;
6879 }
6880 
6881 /* Parses a Reg FIFO dump buffer.
6882  * If result_buf is not NULL, the Reg FIFO results are printed to it.
6883  * In any case, the required results buffer size is assigned to
6884  * parsed_results_bytes.
6885  * The parsing status is returned.
6886  */
6887 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6888 					       char *results_buf,
6889 					       u32 *parsed_results_bytes)
6890 {
6891 	const char *section_name, *param_name, *param_str_val;
6892 	u32 param_num_val, num_section_params, num_elements;
6893 	struct reg_fifo_element *elements;
6894 	u8 i, j, err_val, vf_val;
6895 	u32 results_offset = 0;
6896 	char vf_str[4];
6897 
6898 	/* Read global_params section */
6899 	dump_buf += qed_read_section_hdr(dump_buf,
6900 					 &section_name, &num_section_params);
6901 	if (strcmp(section_name, "global_params"))
6902 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6903 
6904 	/* Print global params */
6905 	dump_buf += qed_print_section_params(dump_buf,
6906 					     num_section_params,
6907 					     results_buf, &results_offset);
6908 
6909 	/* Read reg_fifo_data section */
6910 	dump_buf += qed_read_section_hdr(dump_buf,
6911 					 &section_name, &num_section_params);
6912 	if (strcmp(section_name, "reg_fifo_data"))
6913 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6914 	dump_buf += qed_read_param(dump_buf,
6915 				   &param_name, &param_str_val, &param_num_val);
6916 	if (strcmp(param_name, "size"))
6917 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6918 	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6919 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6920 	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6921 	elements = (struct reg_fifo_element *)dump_buf;
6922 
6923 	/* Decode elements */
6924 	for (i = 0; i < num_elements; i++) {
6925 		bool err_printed = false;
6926 
6927 		/* Discover if element belongs to a VF or a PF */
6928 		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6929 		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6930 			sprintf(vf_str, "%s", "N/A");
6931 		else
6932 			sprintf(vf_str, "%d", vf_val);
6933 
6934 		/* Add parsed element to parsed buffer */
6935 		results_offset +=
6936 		    sprintf(qed_get_buf_ptr(results_buf,
6937 					    results_offset),
6938 			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
6939 			    elements[i].data,
6940 			    (u32)GET_FIELD(elements[i].data,
6941 					   REG_FIFO_ELEMENT_ADDRESS) *
6942 			    REG_FIFO_ELEMENT_ADDR_FACTOR,
6943 			    s_access_strs[GET_FIELD(elements[i].data,
6944 						    REG_FIFO_ELEMENT_ACCESS)],
6945 			    (u32)GET_FIELD(elements[i].data,
6946 					   REG_FIFO_ELEMENT_PF),
6947 			    vf_str,
6948 			    (u32)GET_FIELD(elements[i].data,
6949 					   REG_FIFO_ELEMENT_PORT),
6950 			    s_privilege_strs[GET_FIELD(elements[i].data,
6951 						REG_FIFO_ELEMENT_PRIVILEGE)],
6952 			    s_protection_strs[GET_FIELD(elements[i].data,
6953 						REG_FIFO_ELEMENT_PROTECTION)],
6954 			    s_master_strs[GET_FIELD(elements[i].data,
6955 						REG_FIFO_ELEMENT_MASTER)]);
6956 
6957 		/* Print errors */
6958 		for (j = 0,
6959 		     err_val = GET_FIELD(elements[i].data,
6960 					 REG_FIFO_ELEMENT_ERROR);
6961 		     j < ARRAY_SIZE(s_reg_fifo_error_strs);
6962 		     j++, err_val >>= 1) {
6963 			if (err_val & 0x1) {
6964 				if (err_printed)
6965 					results_offset +=
6966 					    sprintf(qed_get_buf_ptr
6967 						    (results_buf,
6968 						     results_offset), ", ");
6969 				results_offset +=
6970 				    sprintf(qed_get_buf_ptr
6971 					    (results_buf, results_offset), "%s",
6972 					    s_reg_fifo_error_strs[j]);
6973 				err_printed = true;
6974 			}
6975 		}
6976 
6977 		results_offset +=
6978 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6979 	}
6980 
6981 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6982 						  results_offset),
6983 				  "fifo contained %d elements", num_elements);
6984 
6985 	/* Add 1 for string NULL termination */
6986 	*parsed_results_bytes = results_offset + 1;
6987 
6988 	return DBG_STATUS_OK;
6989 }
6990 
6991 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6992 						  *element, char
6993 						  *results_buf,
6994 						  u32 *results_offset)
6995 {
6996 	const struct igu_fifo_addr_data *found_addr = NULL;
6997 	u8 source, err_type, i, is_cleanup;
6998 	char parsed_addr_data[32];
6999 	char parsed_wr_data[256];
7000 	u32 wr_data, prod_cons;
7001 	bool is_wr_cmd, is_pf;
7002 	u16 cmd_addr;
7003 	u64 dword12;
7004 
7005 	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
7006 	 * FIFO element.
7007 	 */
7008 	dword12 = ((u64)element->dword2 << 32) | element->dword1;
7009 	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
7010 	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
7011 	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
7012 	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
7013 	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
7014 
7015 	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
7016 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7017 	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
7018 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7019 
7020 	/* Find address data */
7021 	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
7022 		const struct igu_fifo_addr_data *curr_addr =
7023 			&s_igu_fifo_addr_data[i];
7024 
7025 		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
7026 		    curr_addr->end_addr)
7027 			found_addr = curr_addr;
7028 	}
7029 
7030 	if (!found_addr)
7031 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7032 
7033 	/* Prepare parsed address data */
7034 	switch (found_addr->type) {
7035 	case IGU_ADDR_TYPE_MSIX_MEM:
7036 		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
7037 		break;
7038 	case IGU_ADDR_TYPE_WRITE_INT_ACK:
7039 	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
7040 		sprintf(parsed_addr_data,
7041 			" SB = 0x%x", cmd_addr - found_addr->start_addr);
7042 		break;
7043 	default:
7044 		parsed_addr_data[0] = '\0';
7045 	}
7046 
7047 	if (!is_wr_cmd) {
7048 		parsed_wr_data[0] = '\0';
7049 		goto out;
7050 	}
7051 
7052 	/* Prepare parsed write data */
7053 	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
7054 	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
7055 	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
7056 
7057 	if (source == IGU_SRC_ATTN) {
7058 		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
7059 	} else {
7060 		if (is_cleanup) {
7061 			u8 cleanup_val, cleanup_type;
7062 
7063 			cleanup_val =
7064 				GET_FIELD(wr_data,
7065 					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
7066 			cleanup_type =
7067 			    GET_FIELD(wr_data,
7068 				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
7069 
7070 			sprintf(parsed_wr_data,
7071 				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
7072 				cleanup_val ? "set" : "clear",
7073 				cleanup_type);
7074 		} else {
7075 			u8 update_flag, en_dis_int_for_sb, segment;
7076 			u8 timer_mask;
7077 
7078 			update_flag = GET_FIELD(wr_data,
7079 						IGU_FIFO_WR_DATA_UPDATE_FLAG);
7080 			en_dis_int_for_sb =
7081 				GET_FIELD(wr_data,
7082 					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
7083 			segment = GET_FIELD(wr_data,
7084 					    IGU_FIFO_WR_DATA_SEGMENT);
7085 			timer_mask = GET_FIELD(wr_data,
7086 					       IGU_FIFO_WR_DATA_TIMER_MASK);
7087 
7088 			sprintf(parsed_wr_data,
7089 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
7090 				prod_cons,
7091 				update_flag ? "update" : "nop",
7092 				en_dis_int_for_sb ?
7093 				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
7094 				"enable",
7095 				segment ? "attn" : "regular",
7096 				timer_mask);
7097 		}
7098 	}
7099 out:
7100 	/* Add parsed element to parsed buffer */
7101 	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
7102 						   *results_offset),
7103 				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7104 				   element->dword2, element->dword1,
7105 				   element->dword0,
7106 				   is_pf ? "pf" : "vf",
7107 				   GET_FIELD(element->dword0,
7108 					     IGU_FIFO_ELEMENT_DWORD0_FID),
7109 				   s_igu_fifo_source_strs[source],
7110 				   is_wr_cmd ? "wr" : "rd",
7111 				   cmd_addr,
7112 				   (!is_pf && found_addr->vf_desc)
7113 				   ? found_addr->vf_desc
7114 				   : found_addr->desc,
7115 				   parsed_addr_data,
7116 				   parsed_wr_data,
7117 				   s_igu_fifo_error_strs[err_type]);
7118 
7119 	return DBG_STATUS_OK;
7120 }
7121 
7122 /* Parses an IGU FIFO dump buffer.
7123  * If result_buf is not NULL, the IGU FIFO results are printed to it.
7124  * In any case, the required results buffer size is assigned to
7125  * parsed_results_bytes.
7126  * The parsing status is returned.
7127  */
7128 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7129 					       char *results_buf,
7130 					       u32 *parsed_results_bytes)
7131 {
7132 	const char *section_name, *param_name, *param_str_val;
7133 	u32 param_num_val, num_section_params, num_elements;
7134 	struct igu_fifo_element *elements;
7135 	enum dbg_status status;
7136 	u32 results_offset = 0;
7137 	u8 i;
7138 
7139 	/* Read global_params section */
7140 	dump_buf += qed_read_section_hdr(dump_buf,
7141 					 &section_name, &num_section_params);
7142 	if (strcmp(section_name, "global_params"))
7143 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7144 
7145 	/* Print global params */
7146 	dump_buf += qed_print_section_params(dump_buf,
7147 					     num_section_params,
7148 					     results_buf, &results_offset);
7149 
7150 	/* Read igu_fifo_data section */
7151 	dump_buf += qed_read_section_hdr(dump_buf,
7152 					 &section_name, &num_section_params);
7153 	if (strcmp(section_name, "igu_fifo_data"))
7154 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7155 	dump_buf += qed_read_param(dump_buf,
7156 				   &param_name, &param_str_val, &param_num_val);
7157 	if (strcmp(param_name, "size"))
7158 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7159 	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7160 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7161 	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7162 	elements = (struct igu_fifo_element *)dump_buf;
7163 
7164 	/* Decode elements */
7165 	for (i = 0; i < num_elements; i++) {
7166 		status = qed_parse_igu_fifo_element(&elements[i],
7167 						    results_buf,
7168 						    &results_offset);
7169 		if (status != DBG_STATUS_OK)
7170 			return status;
7171 	}
7172 
7173 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7174 						  results_offset),
7175 				  "fifo contained %d elements", num_elements);
7176 
7177 	/* Add 1 for string NULL termination */
7178 	*parsed_results_bytes = results_offset + 1;
7179 
7180 	return DBG_STATUS_OK;
7181 }
7182 
7183 static enum dbg_status
7184 qed_parse_protection_override_dump(u32 *dump_buf,
7185 				   char *results_buf,
7186 				   u32 *parsed_results_bytes)
7187 {
7188 	const char *section_name, *param_name, *param_str_val;
7189 	u32 param_num_val, num_section_params, num_elements;
7190 	struct protection_override_element *elements;
7191 	u32 results_offset = 0;
7192 	u8 i;
7193 
7194 	/* Read global_params section */
7195 	dump_buf += qed_read_section_hdr(dump_buf,
7196 					 &section_name, &num_section_params);
7197 	if (strcmp(section_name, "global_params"))
7198 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7199 
7200 	/* Print global params */
7201 	dump_buf += qed_print_section_params(dump_buf,
7202 					     num_section_params,
7203 					     results_buf, &results_offset);
7204 
7205 	/* Read protection_override_data section */
7206 	dump_buf += qed_read_section_hdr(dump_buf,
7207 					 &section_name, &num_section_params);
7208 	if (strcmp(section_name, "protection_override_data"))
7209 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7210 	dump_buf += qed_read_param(dump_buf,
7211 				   &param_name, &param_str_val, &param_num_val);
7212 	if (strcmp(param_name, "size"))
7213 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7214 	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7215 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7216 	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7217 	elements = (struct protection_override_element *)dump_buf;
7218 
7219 	/* Decode elements */
7220 	for (i = 0; i < num_elements; i++) {
7221 		u32 address = GET_FIELD(elements[i].data,
7222 					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7223 			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7224 
7225 		results_offset +=
7226 		    sprintf(qed_get_buf_ptr(results_buf,
7227 					    results_offset),
7228 			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7229 			    i, address,
7230 			    (u32)GET_FIELD(elements[i].data,
7231 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7232 			    (u32)GET_FIELD(elements[i].data,
7233 				      PROTECTION_OVERRIDE_ELEMENT_READ),
7234 			    (u32)GET_FIELD(elements[i].data,
7235 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
7236 			    s_protection_strs[GET_FIELD(elements[i].data,
7237 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7238 			    s_protection_strs[GET_FIELD(elements[i].data,
7239 				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7240 	}
7241 
7242 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7243 						  results_offset),
7244 				  "protection override contained %d elements",
7245 				  num_elements);
7246 
7247 	/* Add 1 for string NULL termination */
7248 	*parsed_results_bytes = results_offset + 1;
7249 
7250 	return DBG_STATUS_OK;
7251 }
7252 
7253 /* Parses a FW Asserts dump buffer.
7254  * If result_buf is not NULL, the FW Asserts results are printed to it.
7255  * In any case, the required results buffer size is assigned to
7256  * parsed_results_bytes.
7257  * The parsing status is returned.
7258  */
7259 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7260 						 char *results_buf,
7261 						 u32 *parsed_results_bytes)
7262 {
7263 	u32 num_section_params, param_num_val, i, results_offset = 0;
7264 	const char *param_name, *param_str_val, *section_name;
7265 	bool last_section_found = false;
7266 
7267 	*parsed_results_bytes = 0;
7268 
7269 	/* Read global_params section */
7270 	dump_buf += qed_read_section_hdr(dump_buf,
7271 					 &section_name, &num_section_params);
7272 	if (strcmp(section_name, "global_params"))
7273 		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7274 
7275 	/* Print global params */
7276 	dump_buf += qed_print_section_params(dump_buf,
7277 					     num_section_params,
7278 					     results_buf, &results_offset);
7279 
7280 	while (!last_section_found) {
7281 		dump_buf += qed_read_section_hdr(dump_buf,
7282 						 &section_name,
7283 						 &num_section_params);
7284 		if (!strcmp(section_name, "fw_asserts")) {
7285 			/* Extract params */
7286 			const char *storm_letter = NULL;
7287 			u32 storm_dump_size = 0;
7288 
7289 			for (i = 0; i < num_section_params; i++) {
7290 				dump_buf += qed_read_param(dump_buf,
7291 							   &param_name,
7292 							   &param_str_val,
7293 							   &param_num_val);
7294 				if (!strcmp(param_name, "storm"))
7295 					storm_letter = param_str_val;
7296 				else if (!strcmp(param_name, "size"))
7297 					storm_dump_size = param_num_val;
7298 				else
7299 					return
7300 					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7301 			}
7302 
7303 			if (!storm_letter || !storm_dump_size)
7304 				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7305 
7306 			/* Print data */
7307 			results_offset +=
7308 			    sprintf(qed_get_buf_ptr(results_buf,
7309 						    results_offset),
7310 				    "\n%sSTORM_ASSERT: size=%d\n",
7311 				    storm_letter, storm_dump_size);
7312 			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7313 				results_offset +=
7314 				    sprintf(qed_get_buf_ptr(results_buf,
7315 							    results_offset),
7316 					    "%08x\n", *dump_buf);
7317 		} else if (!strcmp(section_name, "last")) {
7318 			last_section_found = true;
7319 		} else {
7320 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7321 		}
7322 	}
7323 
7324 	/* Add 1 for string NULL termination */
7325 	*parsed_results_bytes = results_offset + 1;
7326 
7327 	return DBG_STATUS_OK;
7328 }
7329 
7330 /***************************** Public Functions *******************************/
7331 
7332 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
7333 {
7334 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
7335 	u8 buf_id;
7336 
7337 	/* Convert binary data to debug arrays */
7338 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
7339 		s_user_dbg_arrays[buf_id].ptr =
7340 			(u32 *)(bin_ptr + buf_array[buf_id].offset);
7341 		s_user_dbg_arrays[buf_id].size_in_dwords =
7342 			BYTES_TO_DWORDS(buf_array[buf_id].length);
7343 	}
7344 
7345 	return DBG_STATUS_OK;
7346 }
7347 
7348 enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn)
7349 {
7350 	p_hwfn->dbg_user_info = kzalloc(sizeof(struct dbg_tools_user_data),
7351 					GFP_KERNEL);
7352 	if (!p_hwfn->dbg_user_info)
7353 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7354 
7355 	return DBG_STATUS_OK;
7356 }
7357 
7358 const char *qed_dbg_get_status_str(enum dbg_status status)
7359 {
7360 	return (status <
7361 		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7362 }
7363 
7364 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7365 						  u32 *dump_buf,
7366 						  u32 num_dumped_dwords,
7367 						  u32 *results_buf_size)
7368 {
7369 	u32 num_errors, num_warnings;
7370 
7371 	return qed_parse_idle_chk_dump(dump_buf,
7372 				       num_dumped_dwords,
7373 				       NULL,
7374 				       results_buf_size,
7375 				       &num_errors, &num_warnings);
7376 }
7377 
7378 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7379 					   u32 *dump_buf,
7380 					   u32 num_dumped_dwords,
7381 					   char *results_buf,
7382 					   u32 *num_errors,
7383 					   u32 *num_warnings)
7384 {
7385 	u32 parsed_buf_size;
7386 
7387 	return qed_parse_idle_chk_dump(dump_buf,
7388 				       num_dumped_dwords,
7389 				       results_buf,
7390 				       &parsed_buf_size,
7391 				       num_errors, num_warnings);
7392 }
7393 
7394 void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
7395 				     const u32 *meta_buf)
7396 {
7397 	struct dbg_tools_user_data *dev_user_data =
7398 		qed_dbg_get_user_data(p_hwfn);
7399 
7400 	dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7401 }
7402 
7403 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7404 						   u32 *dump_buf,
7405 						   u32 num_dumped_dwords,
7406 						   u32 *results_buf_size)
7407 {
7408 	return qed_parse_mcp_trace_dump(p_hwfn,
7409 					dump_buf, NULL, results_buf_size, true);
7410 }
7411 
7412 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7413 					    u32 *dump_buf,
7414 					    u32 num_dumped_dwords,
7415 					    char *results_buf)
7416 {
7417 	u32 parsed_buf_size;
7418 
7419 	return qed_parse_mcp_trace_dump(p_hwfn,
7420 					dump_buf,
7421 					results_buf, &parsed_buf_size, true);
7422 }
7423 
7424 enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
7425 						 u32 *dump_buf,
7426 						 char *results_buf)
7427 {
7428 	u32 parsed_buf_size;
7429 
7430 	return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7431 					&parsed_buf_size, false);
7432 }
7433 
7434 enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
7435 					 u8 *dump_buf,
7436 					 u32 num_dumped_bytes,
7437 					 char *results_buf)
7438 {
7439 	u32 parsed_results_bytes;
7440 
7441 	return qed_parse_mcp_trace_buf(p_hwfn,
7442 				       dump_buf,
7443 				       num_dumped_bytes,
7444 				       0,
7445 				       num_dumped_bytes,
7446 				       results_buf, &parsed_results_bytes);
7447 }
7448 
7449 /* Frees the specified MCP Trace meta data */
7450 void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
7451 {
7452 	struct dbg_tools_user_data *dev_user_data;
7453 	struct mcp_trace_meta *meta;
7454 	u32 i;
7455 
7456 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
7457 	meta = &dev_user_data->mcp_trace_meta;
7458 	if (!meta->is_allocated)
7459 		return;
7460 
7461 	/* Release modules */
7462 	if (meta->modules) {
7463 		for (i = 0; i < meta->modules_num; i++)
7464 			kfree(meta->modules[i]);
7465 		kfree(meta->modules);
7466 	}
7467 
7468 	/* Release formats */
7469 	if (meta->formats) {
7470 		for (i = 0; i < meta->formats_num; i++)
7471 			kfree(meta->formats[i].format_str);
7472 		kfree(meta->formats);
7473 	}
7474 
7475 	meta->is_allocated = false;
7476 }
7477 
7478 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7479 						  u32 *dump_buf,
7480 						  u32 num_dumped_dwords,
7481 						  u32 *results_buf_size)
7482 {
7483 	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7484 }
7485 
7486 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7487 					   u32 *dump_buf,
7488 					   u32 num_dumped_dwords,
7489 					   char *results_buf)
7490 {
7491 	u32 parsed_buf_size;
7492 
7493 	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7494 }
7495 
7496 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7497 						  u32 *dump_buf,
7498 						  u32 num_dumped_dwords,
7499 						  u32 *results_buf_size)
7500 {
7501 	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7502 }
7503 
7504 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7505 					   u32 *dump_buf,
7506 					   u32 num_dumped_dwords,
7507 					   char *results_buf)
7508 {
7509 	u32 parsed_buf_size;
7510 
7511 	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7512 }
7513 
7514 enum dbg_status
7515 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7516 					     u32 *dump_buf,
7517 					     u32 num_dumped_dwords,
7518 					     u32 *results_buf_size)
7519 {
7520 	return qed_parse_protection_override_dump(dump_buf,
7521 						  NULL, results_buf_size);
7522 }
7523 
7524 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7525 						      u32 *dump_buf,
7526 						      u32 num_dumped_dwords,
7527 						      char *results_buf)
7528 {
7529 	u32 parsed_buf_size;
7530 
7531 	return qed_parse_protection_override_dump(dump_buf,
7532 						  results_buf,
7533 						  &parsed_buf_size);
7534 }
7535 
7536 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7537 						    u32 *dump_buf,
7538 						    u32 num_dumped_dwords,
7539 						    u32 *results_buf_size)
7540 {
7541 	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7542 }
7543 
7544 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7545 					     u32 *dump_buf,
7546 					     u32 num_dumped_dwords,
7547 					     char *results_buf)
7548 {
7549 	u32 parsed_buf_size;
7550 
7551 	return qed_parse_fw_asserts_dump(dump_buf,
7552 					 results_buf, &parsed_buf_size);
7553 }
7554 
7555 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7556 				   struct dbg_attn_block_result *results)
7557 {
7558 	struct user_dbg_array *block_attn, *pstrings;
7559 	const u32 *block_attn_name_offsets;
7560 	enum dbg_attn_type attn_type;
7561 	const char *block_name;
7562 	u8 num_regs, i, j;
7563 
7564 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7565 	attn_type = (enum dbg_attn_type)
7566 		    GET_FIELD(results->data,
7567 			      DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7568 	block_name = s_block_info_arr[results->block_id].name;
7569 
7570 	if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7571 	    !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7572 	    !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7573 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7574 
7575 	block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
7576 	block_attn_name_offsets = &block_attn->ptr[results->names_offset];
7577 
7578 	/* Go over registers with a non-zero attention status */
7579 	for (i = 0; i < num_regs; i++) {
7580 		struct dbg_attn_bit_mapping *bit_mapping;
7581 		struct dbg_attn_reg_result *reg_result;
7582 		u8 num_reg_attn, bit_idx = 0;
7583 
7584 		reg_result = &results->reg_results[i];
7585 		num_reg_attn = GET_FIELD(reg_result->data,
7586 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7587 		block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
7588 		bit_mapping = &((struct dbg_attn_bit_mapping *)
7589 				block_attn->ptr)[reg_result->block_attn_offset];
7590 
7591 		pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
7592 
7593 		/* Go over attention status bits */
7594 		for (j = 0; j < num_reg_attn; j++) {
7595 			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7596 						     DBG_ATTN_BIT_MAPPING_VAL);
7597 			const char *attn_name, *attn_type_str, *masked_str;
7598 			u32 attn_name_offset, sts_addr;
7599 
7600 			/* Check if bit mask should be advanced (due to unused
7601 			 * bits).
7602 			 */
7603 			if (GET_FIELD(bit_mapping[j].data,
7604 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7605 				bit_idx += (u8)attn_idx_val;
7606 				continue;
7607 			}
7608 
7609 			/* Check current bit index */
7610 			if (!(reg_result->sts_val & BIT(bit_idx))) {
7611 				bit_idx++;
7612 				continue;
7613 			}
7614 
7615 			/* Find attention name */
7616 			attn_name_offset =
7617 				block_attn_name_offsets[attn_idx_val];
7618 			attn_name = &((const char *)
7619 				      pstrings->ptr)[attn_name_offset];
7620 			attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
7621 					"Interrupt" : "Parity";
7622 			masked_str = reg_result->mask_val & BIT(bit_idx) ?
7623 				     " [masked]" : "";
7624 			sts_addr = GET_FIELD(reg_result->data,
7625 					     DBG_ATTN_REG_RESULT_STS_ADDRESS);
7626 			DP_NOTICE(p_hwfn,
7627 				  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7628 				  block_name, attn_type_str, attn_name,
7629 				  sts_addr, bit_idx, masked_str);
7630 
7631 			bit_idx++;
7632 		}
7633 	}
7634 
7635 	return DBG_STATUS_OK;
7636 }
7637 
7638 /* Wrapper for unifying the idle_chk and mcp_trace api */
7639 static enum dbg_status
7640 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7641 				   u32 *dump_buf,
7642 				   u32 num_dumped_dwords,
7643 				   char *results_buf)
7644 {
7645 	u32 num_errors, num_warnnings;
7646 
7647 	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7648 					  results_buf, &num_errors,
7649 					  &num_warnnings);
7650 }
7651 
7652 /* Feature meta data lookup table */
7653 static struct {
7654 	char *name;
7655 	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7656 				    struct qed_ptt *p_ptt, u32 *size);
7657 	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7658 					struct qed_ptt *p_ptt, u32 *dump_buf,
7659 					u32 buf_size, u32 *dumped_dwords);
7660 	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7661 					 u32 *dump_buf, u32 num_dumped_dwords,
7662 					 char *results_buf);
7663 	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7664 					    u32 *dump_buf,
7665 					    u32 num_dumped_dwords,
7666 					    u32 *results_buf_size);
7667 } qed_features_lookup[] = {
7668 	{
7669 	"grc", qed_dbg_grc_get_dump_buf_size,
7670 		    qed_dbg_grc_dump, NULL, NULL}, {
7671 	"idle_chk",
7672 		    qed_dbg_idle_chk_get_dump_buf_size,
7673 		    qed_dbg_idle_chk_dump,
7674 		    qed_print_idle_chk_results_wrapper,
7675 		    qed_get_idle_chk_results_buf_size}, {
7676 	"mcp_trace",
7677 		    qed_dbg_mcp_trace_get_dump_buf_size,
7678 		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7679 		    qed_get_mcp_trace_results_buf_size}, {
7680 	"reg_fifo",
7681 		    qed_dbg_reg_fifo_get_dump_buf_size,
7682 		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7683 		    qed_get_reg_fifo_results_buf_size}, {
7684 	"igu_fifo",
7685 		    qed_dbg_igu_fifo_get_dump_buf_size,
7686 		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7687 		    qed_get_igu_fifo_results_buf_size}, {
7688 	"protection_override",
7689 		    qed_dbg_protection_override_get_dump_buf_size,
7690 		    qed_dbg_protection_override_dump,
7691 		    qed_print_protection_override_results,
7692 		    qed_get_protection_override_results_buf_size}, {
7693 	"fw_asserts",
7694 		    qed_dbg_fw_asserts_get_dump_buf_size,
7695 		    qed_dbg_fw_asserts_dump,
7696 		    qed_print_fw_asserts_results,
7697 		    qed_get_fw_asserts_results_buf_size},};
7698 
7699 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7700 {
7701 	u32 i, precision = 80;
7702 
7703 	if (!p_text_buf)
7704 		return;
7705 
7706 	pr_notice("\n%.*s", precision, p_text_buf);
7707 	for (i = precision; i < text_size; i += precision)
7708 		pr_cont("%.*s", precision, p_text_buf + i);
7709 	pr_cont("\n");
7710 }
7711 
7712 #define QED_RESULTS_BUF_MIN_SIZE 16
7713 /* Generic function for decoding debug feature info */
7714 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7715 				      enum qed_dbg_features feature_idx)
7716 {
7717 	struct qed_dbg_feature *feature =
7718 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7719 	u32 text_size_bytes, null_char_pos, i;
7720 	enum dbg_status rc;
7721 	char *text_buf;
7722 
7723 	/* Check if feature supports formatting capability */
7724 	if (!qed_features_lookup[feature_idx].results_buf_size)
7725 		return DBG_STATUS_OK;
7726 
7727 	/* Obtain size of formatted output */
7728 	rc = qed_features_lookup[feature_idx].
7729 		results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7730 				 feature->dumped_dwords, &text_size_bytes);
7731 	if (rc != DBG_STATUS_OK)
7732 		return rc;
7733 
7734 	/* Make sure that the allocated size is a multiple of dword (4 bytes) */
7735 	null_char_pos = text_size_bytes - 1;
7736 	text_size_bytes = (text_size_bytes + 3) & ~0x3;
7737 
7738 	if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7739 		DP_NOTICE(p_hwfn->cdev,
7740 			  "formatted size of feature was too small %d. Aborting\n",
7741 			  text_size_bytes);
7742 		return DBG_STATUS_INVALID_ARGS;
7743 	}
7744 
7745 	/* Allocate temp text buf */
7746 	text_buf = vzalloc(text_size_bytes);
7747 	if (!text_buf)
7748 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7749 
7750 	/* Decode feature opcodes to string on temp buf */
7751 	rc = qed_features_lookup[feature_idx].
7752 		print_results(p_hwfn, (u32 *)feature->dump_buf,
7753 			      feature->dumped_dwords, text_buf);
7754 	if (rc != DBG_STATUS_OK) {
7755 		vfree(text_buf);
7756 		return rc;
7757 	}
7758 
7759 	/* Replace the original null character with a '\n' character.
7760 	 * The bytes that were added as a result of the dword alignment are also
7761 	 * padded with '\n' characters.
7762 	 */
7763 	for (i = null_char_pos; i < text_size_bytes; i++)
7764 		text_buf[i] = '\n';
7765 
7766 	/* Dump printable feature to log */
7767 	if (p_hwfn->cdev->dbg_params.print_data)
7768 		qed_dbg_print_feature(text_buf, text_size_bytes);
7769 
7770 	/* Free the old dump_buf and point the dump_buf to the newly allocagted
7771 	 * and formatted text buffer.
7772 	 */
7773 	vfree(feature->dump_buf);
7774 	feature->dump_buf = text_buf;
7775 	feature->buf_size = text_size_bytes;
7776 	feature->dumped_dwords = text_size_bytes / 4;
7777 	return rc;
7778 }
7779 
7780 /* Generic function for performing the dump of a debug feature. */
7781 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7782 				    struct qed_ptt *p_ptt,
7783 				    enum qed_dbg_features feature_idx)
7784 {
7785 	struct qed_dbg_feature *feature =
7786 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7787 	u32 buf_size_dwords;
7788 	enum dbg_status rc;
7789 
7790 	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7791 		  qed_features_lookup[feature_idx].name);
7792 
7793 	/* Dump_buf was already allocated need to free (this can happen if dump
7794 	 * was called but file was never read).
7795 	 * We can't use the buffer as is since size may have changed.
7796 	 */
7797 	if (feature->dump_buf) {
7798 		vfree(feature->dump_buf);
7799 		feature->dump_buf = NULL;
7800 	}
7801 
7802 	/* Get buffer size from hsi, allocate accordingly, and perform the
7803 	 * dump.
7804 	 */
7805 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7806 						       &buf_size_dwords);
7807 	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7808 		return rc;
7809 	feature->buf_size = buf_size_dwords * sizeof(u32);
7810 	feature->dump_buf = vmalloc(feature->buf_size);
7811 	if (!feature->dump_buf)
7812 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7813 
7814 	rc = qed_features_lookup[feature_idx].
7815 		perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7816 			     feature->buf_size / sizeof(u32),
7817 			     &feature->dumped_dwords);
7818 
7819 	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7820 	 * In this case the buffer holds valid binary data, but we wont able
7821 	 * to parse it (since parsing relies on data in NVRAM which is only
7822 	 * accessible when MFW is responsive). skip the formatting but return
7823 	 * success so that binary data is provided.
7824 	 */
7825 	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7826 		return DBG_STATUS_OK;
7827 
7828 	if (rc != DBG_STATUS_OK)
7829 		return rc;
7830 
7831 	/* Format output */
7832 	rc = format_feature(p_hwfn, feature_idx);
7833 	return rc;
7834 }
7835 
7836 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7837 {
7838 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7839 }
7840 
7841 int qed_dbg_grc_size(struct qed_dev *cdev)
7842 {
7843 	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7844 }
7845 
7846 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7847 {
7848 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7849 			       num_dumped_bytes);
7850 }
7851 
7852 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7853 {
7854 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7855 }
7856 
7857 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7858 {
7859 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7860 			       num_dumped_bytes);
7861 }
7862 
7863 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7864 {
7865 	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7866 }
7867 
7868 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7869 {
7870 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7871 			       num_dumped_bytes);
7872 }
7873 
7874 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7875 {
7876 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7877 }
7878 
7879 static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
7880 				    enum qed_nvm_images image_id, u32 *length)
7881 {
7882 	struct qed_nvm_image_att image_att;
7883 	int rc;
7884 
7885 	*length = 0;
7886 	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
7887 	if (rc)
7888 		return rc;
7889 
7890 	*length = image_att.length;
7891 
7892 	return rc;
7893 }
7894 
7895 static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
7896 			     u32 *num_dumped_bytes,
7897 			     enum qed_nvm_images image_id)
7898 {
7899 	struct qed_hwfn *p_hwfn =
7900 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
7901 	u32 len_rounded, i;
7902 	__be32 val;
7903 	int rc;
7904 
7905 	*num_dumped_bytes = 0;
7906 	rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
7907 	if (rc)
7908 		return rc;
7909 
7910 	DP_NOTICE(p_hwfn->cdev,
7911 		  "Collecting a debug feature [\"nvram image %d\"]\n",
7912 		  image_id);
7913 
7914 	len_rounded = roundup(len_rounded, sizeof(u32));
7915 	rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
7916 	if (rc)
7917 		return rc;
7918 
7919 	/* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
7920 	if (image_id != QED_NVM_IMAGE_NVM_META)
7921 		for (i = 0; i < len_rounded; i += 4) {
7922 			val = cpu_to_be32(*(u32 *)(buffer + i));
7923 			*(u32 *)(buffer + i) = val;
7924 		}
7925 
7926 	*num_dumped_bytes = len_rounded;
7927 
7928 	return rc;
7929 }
7930 
7931 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7932 				u32 *num_dumped_bytes)
7933 {
7934 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7935 			       num_dumped_bytes);
7936 }
7937 
7938 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7939 {
7940 	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7941 }
7942 
7943 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7944 		       u32 *num_dumped_bytes)
7945 {
7946 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7947 			       num_dumped_bytes);
7948 }
7949 
7950 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7951 {
7952 	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7953 }
7954 
7955 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7956 		      u32 *num_dumped_bytes)
7957 {
7958 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7959 			       num_dumped_bytes);
7960 }
7961 
7962 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7963 {
7964 	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7965 }
7966 
7967 /* Defines the amount of bytes allocated for recording the length of debugfs
7968  * feature buffer.
7969  */
7970 #define REGDUMP_HEADER_SIZE			sizeof(u32)
7971 #define REGDUMP_HEADER_FEATURE_SHIFT		24
7972 #define REGDUMP_HEADER_ENGINE_SHIFT		31
7973 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
7974 enum debug_print_features {
7975 	OLD_MODE = 0,
7976 	IDLE_CHK = 1,
7977 	GRC_DUMP = 2,
7978 	MCP_TRACE = 3,
7979 	REG_FIFO = 4,
7980 	PROTECTION_OVERRIDE = 5,
7981 	IGU_FIFO = 6,
7982 	PHY = 7,
7983 	FW_ASSERTS = 8,
7984 	NVM_CFG1 = 9,
7985 	DEFAULT_CFG = 10,
7986 	NVM_META = 11,
7987 };
7988 
7989 static u32 qed_calc_regdump_header(enum debug_print_features feature,
7990 				   int engine, u32 feature_size, u8 omit_engine)
7991 {
7992 	/* Insert the engine, feature and mode inside the header and combine it
7993 	 * with feature size.
7994 	 */
7995 	return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
7996 	       (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
7997 	       (engine << REGDUMP_HEADER_ENGINE_SHIFT);
7998 }
7999 
8000 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
8001 {
8002 	u8 cur_engine, omit_engine = 0, org_engine;
8003 	u32 offset = 0, feature_size;
8004 	int rc;
8005 
8006 	if (cdev->num_hwfns == 1)
8007 		omit_engine = 1;
8008 
8009 	org_engine = qed_get_debug_engine(cdev);
8010 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8011 		/* Collect idle_chks and grcDump for each hw function */
8012 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8013 			   "obtaining idle_chk and grcdump for current engine\n");
8014 		qed_set_debug_engine(cdev, cur_engine);
8015 
8016 		/* First idle_chk */
8017 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8018 				      REGDUMP_HEADER_SIZE, &feature_size);
8019 		if (!rc) {
8020 			*(u32 *)((u8 *)buffer + offset) =
8021 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
8022 						    feature_size, omit_engine);
8023 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8024 		} else {
8025 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8026 		}
8027 
8028 		/* Second idle_chk */
8029 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8030 				      REGDUMP_HEADER_SIZE, &feature_size);
8031 		if (!rc) {
8032 			*(u32 *)((u8 *)buffer + offset) =
8033 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
8034 						    feature_size, omit_engine);
8035 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8036 		} else {
8037 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8038 		}
8039 
8040 		/* reg_fifo dump */
8041 		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
8042 				      REGDUMP_HEADER_SIZE, &feature_size);
8043 		if (!rc) {
8044 			*(u32 *)((u8 *)buffer + offset) =
8045 			    qed_calc_regdump_header(REG_FIFO, cur_engine,
8046 						    feature_size, omit_engine);
8047 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8048 		} else {
8049 			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
8050 		}
8051 
8052 		/* igu_fifo dump */
8053 		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
8054 				      REGDUMP_HEADER_SIZE, &feature_size);
8055 		if (!rc) {
8056 			*(u32 *)((u8 *)buffer + offset) =
8057 			    qed_calc_regdump_header(IGU_FIFO, cur_engine,
8058 						    feature_size, omit_engine);
8059 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8060 		} else {
8061 			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
8062 		}
8063 
8064 		/* protection_override dump */
8065 		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
8066 						 REGDUMP_HEADER_SIZE,
8067 						 &feature_size);
8068 		if (!rc) {
8069 			*(u32 *)((u8 *)buffer + offset) =
8070 			    qed_calc_regdump_header(PROTECTION_OVERRIDE,
8071 						    cur_engine,
8072 						    feature_size, omit_engine);
8073 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8074 		} else {
8075 			DP_ERR(cdev,
8076 			       "qed_dbg_protection_override failed. rc = %d\n",
8077 			       rc);
8078 		}
8079 
8080 		/* fw_asserts dump */
8081 		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
8082 					REGDUMP_HEADER_SIZE, &feature_size);
8083 		if (!rc) {
8084 			*(u32 *)((u8 *)buffer + offset) =
8085 			    qed_calc_regdump_header(FW_ASSERTS, cur_engine,
8086 						    feature_size, omit_engine);
8087 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8088 		} else {
8089 			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
8090 			       rc);
8091 		}
8092 
8093 		/* GRC dump - must be last because when mcp stuck it will
8094 		 * clutter idle_chk, reg_fifo, ...
8095 		 */
8096 		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
8097 				 REGDUMP_HEADER_SIZE, &feature_size);
8098 		if (!rc) {
8099 			*(u32 *)((u8 *)buffer + offset) =
8100 			    qed_calc_regdump_header(GRC_DUMP, cur_engine,
8101 						    feature_size, omit_engine);
8102 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8103 		} else {
8104 			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
8105 		}
8106 	}
8107 
8108 	qed_set_debug_engine(cdev, org_engine);
8109 	/* mcp_trace */
8110 	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
8111 			       REGDUMP_HEADER_SIZE, &feature_size);
8112 	if (!rc) {
8113 		*(u32 *)((u8 *)buffer + offset) =
8114 		    qed_calc_regdump_header(MCP_TRACE, cur_engine,
8115 					    feature_size, omit_engine);
8116 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8117 	} else {
8118 		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
8119 	}
8120 
8121 	/* nvm cfg1 */
8122 	rc = qed_dbg_nvm_image(cdev,
8123 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8124 			       &feature_size, QED_NVM_IMAGE_NVM_CFG1);
8125 	if (!rc) {
8126 		*(u32 *)((u8 *)buffer + offset) =
8127 		    qed_calc_regdump_header(NVM_CFG1, cur_engine,
8128 					    feature_size, omit_engine);
8129 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8130 	} else if (rc != -ENOENT) {
8131 		DP_ERR(cdev,
8132 		       "qed_dbg_nvm_image failed for image  %d (%s), rc = %d\n",
8133 		       QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
8134 	}
8135 
8136 	/* nvm default */
8137 	rc = qed_dbg_nvm_image(cdev,
8138 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8139 			       &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
8140 	if (!rc) {
8141 		*(u32 *)((u8 *)buffer + offset) =
8142 		    qed_calc_regdump_header(DEFAULT_CFG, cur_engine,
8143 					    feature_size, omit_engine);
8144 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8145 	} else if (rc != -ENOENT) {
8146 		DP_ERR(cdev,
8147 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8148 		       QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
8149 		       rc);
8150 	}
8151 
8152 	/* nvm meta */
8153 	rc = qed_dbg_nvm_image(cdev,
8154 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8155 			       &feature_size, QED_NVM_IMAGE_NVM_META);
8156 	if (!rc) {
8157 		*(u32 *)((u8 *)buffer + offset) =
8158 		    qed_calc_regdump_header(NVM_META, cur_engine,
8159 					    feature_size, omit_engine);
8160 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8161 	} else if (rc != -ENOENT) {
8162 		DP_ERR(cdev,
8163 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8164 		       QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
8165 	}
8166 
8167 	return 0;
8168 }
8169 
8170 int qed_dbg_all_data_size(struct qed_dev *cdev)
8171 {
8172 	struct qed_hwfn *p_hwfn =
8173 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8174 	u32 regs_len = 0, image_len = 0;
8175 	u8 cur_engine, org_engine;
8176 
8177 	org_engine = qed_get_debug_engine(cdev);
8178 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8179 		/* Engine specific */
8180 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8181 			   "calculating idle_chk and grcdump register length for current engine\n");
8182 		qed_set_debug_engine(cdev, cur_engine);
8183 		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8184 			    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8185 			    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8186 			    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8187 			    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8188 			    REGDUMP_HEADER_SIZE +
8189 			    qed_dbg_protection_override_size(cdev) +
8190 			    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8191 	}
8192 
8193 	qed_set_debug_engine(cdev, org_engine);
8194 
8195 	/* Engine common */
8196 	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
8197 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8198 	if (image_len)
8199 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8200 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8201 	if (image_len)
8202 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8203 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8204 	if (image_len)
8205 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8206 
8207 	return regs_len;
8208 }
8209 
8210 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8211 		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
8212 {
8213 	struct qed_hwfn *p_hwfn =
8214 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8215 	struct qed_dbg_feature *qed_feature =
8216 		&cdev->dbg_params.features[feature];
8217 	enum dbg_status dbg_rc;
8218 	struct qed_ptt *p_ptt;
8219 	int rc = 0;
8220 
8221 	/* Acquire ptt */
8222 	p_ptt = qed_ptt_acquire(p_hwfn);
8223 	if (!p_ptt)
8224 		return -EINVAL;
8225 
8226 	/* Get dump */
8227 	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8228 	if (dbg_rc != DBG_STATUS_OK) {
8229 		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8230 			   qed_dbg_get_status_str(dbg_rc));
8231 		*num_dumped_bytes = 0;
8232 		rc = -EINVAL;
8233 		goto out;
8234 	}
8235 
8236 	DP_VERBOSE(cdev, QED_MSG_DEBUG,
8237 		   "copying debugfs feature to external buffer\n");
8238 	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8239 	*num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
8240 			    4;
8241 
8242 out:
8243 	qed_ptt_release(p_hwfn, p_ptt);
8244 	return rc;
8245 }
8246 
8247 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8248 {
8249 	struct qed_hwfn *p_hwfn =
8250 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8251 	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8252 	struct qed_dbg_feature *qed_feature =
8253 		&cdev->dbg_params.features[feature];
8254 	u32 buf_size_dwords;
8255 	enum dbg_status rc;
8256 
8257 	if (!p_ptt)
8258 		return -EINVAL;
8259 
8260 	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8261 						   &buf_size_dwords);
8262 	if (rc != DBG_STATUS_OK)
8263 		buf_size_dwords = 0;
8264 
8265 	qed_ptt_release(p_hwfn, p_ptt);
8266 	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8267 	return qed_feature->buf_size;
8268 }
8269 
8270 u8 qed_get_debug_engine(struct qed_dev *cdev)
8271 {
8272 	return cdev->dbg_params.engine_for_debug;
8273 }
8274 
8275 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8276 {
8277 	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8278 		   engine_number);
8279 	cdev->dbg_params.engine_for_debug = engine_number;
8280 }
8281 
8282 void qed_dbg_pf_init(struct qed_dev *cdev)
8283 {
8284 	const u8 *dbg_values;
8285 
8286 	/* Debug values are after init values.
8287 	 * The offset is the first dword of the file.
8288 	 */
8289 	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8290 	qed_dbg_set_bin_ptr((u8 *)dbg_values);
8291 	qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
8292 }
8293 
8294 void qed_dbg_pf_exit(struct qed_dev *cdev)
8295 {
8296 	struct qed_dbg_feature *feature = NULL;
8297 	enum qed_dbg_features feature_idx;
8298 
8299 	/* Debug features' buffers may be allocated if debug feature was used
8300 	 * but dump wasn't called.
8301 	 */
8302 	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8303 		feature = &cdev->dbg_params.features[feature_idx];
8304 		if (feature->dump_buf) {
8305 			vfree(feature->dump_buf);
8306 			feature->dump_buf = NULL;
8307 		}
8308 	}
8309 }
8310