1 // SPDX-License-Identifier: GPL-2.0-only
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015 QLogic Corporation
4  */
5 
6 #include <linux/module.h>
7 #include <linux/vmalloc.h>
8 #include <linux/crc32.h>
9 #include "qed.h"
10 #include "qed_hsi.h"
11 #include "qed_hw.h"
12 #include "qed_mcp.h"
13 #include "qed_reg_addr.h"
14 
15 /* Memory groups enum */
16 enum mem_groups {
17 	MEM_GROUP_PXP_MEM,
18 	MEM_GROUP_DMAE_MEM,
19 	MEM_GROUP_CM_MEM,
20 	MEM_GROUP_QM_MEM,
21 	MEM_GROUP_DORQ_MEM,
22 	MEM_GROUP_BRB_RAM,
23 	MEM_GROUP_BRB_MEM,
24 	MEM_GROUP_PRS_MEM,
25 	MEM_GROUP_IOR,
26 	MEM_GROUP_BTB_RAM,
27 	MEM_GROUP_CONN_CFC_MEM,
28 	MEM_GROUP_TASK_CFC_MEM,
29 	MEM_GROUP_CAU_PI,
30 	MEM_GROUP_CAU_MEM,
31 	MEM_GROUP_PXP_ILT,
32 	MEM_GROUP_TM_MEM,
33 	MEM_GROUP_SDM_MEM,
34 	MEM_GROUP_PBUF,
35 	MEM_GROUP_RAM,
36 	MEM_GROUP_MULD_MEM,
37 	MEM_GROUP_BTB_MEM,
38 	MEM_GROUP_RDIF_CTX,
39 	MEM_GROUP_TDIF_CTX,
40 	MEM_GROUP_CFC_MEM,
41 	MEM_GROUP_IGU_MEM,
42 	MEM_GROUP_IGU_MSIX,
43 	MEM_GROUP_CAU_SB,
44 	MEM_GROUP_BMB_RAM,
45 	MEM_GROUP_BMB_MEM,
46 	MEM_GROUPS_NUM
47 };
48 
49 /* Memory groups names */
50 static const char * const s_mem_group_names[] = {
51 	"PXP_MEM",
52 	"DMAE_MEM",
53 	"CM_MEM",
54 	"QM_MEM",
55 	"DORQ_MEM",
56 	"BRB_RAM",
57 	"BRB_MEM",
58 	"PRS_MEM",
59 	"IOR",
60 	"BTB_RAM",
61 	"CONN_CFC_MEM",
62 	"TASK_CFC_MEM",
63 	"CAU_PI",
64 	"CAU_MEM",
65 	"PXP_ILT",
66 	"TM_MEM",
67 	"SDM_MEM",
68 	"PBUF",
69 	"RAM",
70 	"MULD_MEM",
71 	"BTB_MEM",
72 	"RDIF_CTX",
73 	"TDIF_CTX",
74 	"CFC_MEM",
75 	"IGU_MEM",
76 	"IGU_MSIX",
77 	"CAU_SB",
78 	"BMB_RAM",
79 	"BMB_MEM",
80 };
81 
82 /* Idle check conditions */
83 
84 static u32 cond5(const u32 *r, const u32 *imm)
85 {
86 	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
87 }
88 
89 static u32 cond7(const u32 *r, const u32 *imm)
90 {
91 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
92 }
93 
94 static u32 cond6(const u32 *r, const u32 *imm)
95 {
96 	return (r[0] & imm[0]) != imm[1];
97 }
98 
99 static u32 cond9(const u32 *r, const u32 *imm)
100 {
101 	return ((r[0] & imm[0]) >> imm[1]) !=
102 	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
103 }
104 
105 static u32 cond10(const u32 *r, const u32 *imm)
106 {
107 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
108 }
109 
110 static u32 cond4(const u32 *r, const u32 *imm)
111 {
112 	return (r[0] & ~imm[0]) != imm[1];
113 }
114 
115 static u32 cond0(const u32 *r, const u32 *imm)
116 {
117 	return (r[0] & ~r[1]) != imm[0];
118 }
119 
120 static u32 cond1(const u32 *r, const u32 *imm)
121 {
122 	return r[0] != imm[0];
123 }
124 
125 static u32 cond11(const u32 *r, const u32 *imm)
126 {
127 	return r[0] != r[1] && r[2] == imm[0];
128 }
129 
130 static u32 cond12(const u32 *r, const u32 *imm)
131 {
132 	return r[0] != r[1] && r[2] > imm[0];
133 }
134 
135 static u32 cond3(const u32 *r, const u32 *imm)
136 {
137 	return r[0] != r[1];
138 }
139 
140 static u32 cond13(const u32 *r, const u32 *imm)
141 {
142 	return r[0] & imm[0];
143 }
144 
145 static u32 cond8(const u32 *r, const u32 *imm)
146 {
147 	return r[0] < (r[1] - imm[0]);
148 }
149 
150 static u32 cond2(const u32 *r, const u32 *imm)
151 {
152 	return r[0] > imm[0];
153 }
154 
155 /* Array of Idle Check conditions */
156 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
157 	cond0,
158 	cond1,
159 	cond2,
160 	cond3,
161 	cond4,
162 	cond5,
163 	cond6,
164 	cond7,
165 	cond8,
166 	cond9,
167 	cond10,
168 	cond11,
169 	cond12,
170 	cond13,
171 };
172 
173 /******************************* Data Types **********************************/
174 
175 enum platform_ids {
176 	PLATFORM_ASIC,
177 	PLATFORM_RESERVED,
178 	PLATFORM_RESERVED2,
179 	PLATFORM_RESERVED3,
180 	MAX_PLATFORM_IDS
181 };
182 
183 /* Chip constant definitions */
184 struct chip_defs {
185 	const char *name;
186 };
187 
188 /* Platform constant definitions */
189 struct platform_defs {
190 	const char *name;
191 	u32 delay_factor;
192 	u32 dmae_thresh;
193 	u32 log_thresh;
194 };
195 
196 /* Storm constant definitions.
197  * Addresses are in bytes, sizes are in quad-regs.
198  */
199 struct storm_defs {
200 	char letter;
201 	enum block_id block_id;
202 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
203 	bool has_vfc;
204 	u32 sem_fast_mem_addr;
205 	u32 sem_frame_mode_addr;
206 	u32 sem_slow_enable_addr;
207 	u32 sem_slow_mode_addr;
208 	u32 sem_slow_mode1_conf_addr;
209 	u32 sem_sync_dbg_empty_addr;
210 	u32 sem_slow_dbg_empty_addr;
211 	u32 cm_ctx_wr_addr;
212 	u32 cm_conn_ag_ctx_lid_size;
213 	u32 cm_conn_ag_ctx_rd_addr;
214 	u32 cm_conn_st_ctx_lid_size;
215 	u32 cm_conn_st_ctx_rd_addr;
216 	u32 cm_task_ag_ctx_lid_size;
217 	u32 cm_task_ag_ctx_rd_addr;
218 	u32 cm_task_st_ctx_lid_size;
219 	u32 cm_task_st_ctx_rd_addr;
220 };
221 
222 /* Block constant definitions */
223 struct block_defs {
224 	const char *name;
225 	bool exists[MAX_CHIP_IDS];
226 	bool associated_to_storm;
227 
228 	/* Valid only if associated_to_storm is true */
229 	u32 storm_id;
230 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
231 	u32 dbg_select_addr;
232 	u32 dbg_enable_addr;
233 	u32 dbg_shift_addr;
234 	u32 dbg_force_valid_addr;
235 	u32 dbg_force_frame_addr;
236 	bool has_reset_bit;
237 
238 	/* If true, block is taken out of reset before dump */
239 	bool unreset;
240 	enum dbg_reset_regs reset_reg;
241 
242 	/* Bit offset in reset register */
243 	u8 reset_bit_offset;
244 };
245 
246 /* Reset register definitions */
247 struct reset_reg_defs {
248 	u32 addr;
249 	bool exists[MAX_CHIP_IDS];
250 	u32 unreset_val[MAX_CHIP_IDS];
251 };
252 
253 struct grc_param_defs {
254 	u32 default_val[MAX_CHIP_IDS];
255 	u32 min;
256 	u32 max;
257 	bool is_preset;
258 	bool is_persistent;
259 	u32 exclude_all_preset_val;
260 	u32 crash_preset_val;
261 };
262 
263 /* Address is in 128b units. Width is in bits. */
264 struct rss_mem_defs {
265 	const char *mem_name;
266 	const char *type_name;
267 	u32 addr;
268 	u32 entry_width;
269 	u32 num_entries[MAX_CHIP_IDS];
270 };
271 
272 struct vfc_ram_defs {
273 	const char *mem_name;
274 	const char *type_name;
275 	u32 base_row;
276 	u32 num_rows;
277 };
278 
279 struct big_ram_defs {
280 	const char *instance_name;
281 	enum mem_groups mem_group_id;
282 	enum mem_groups ram_mem_group_id;
283 	enum dbg_grc_params grc_param;
284 	u32 addr_reg_addr;
285 	u32 data_reg_addr;
286 	u32 is_256b_reg_addr;
287 	u32 is_256b_bit_offset[MAX_CHIP_IDS];
288 	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
289 };
290 
291 struct phy_defs {
292 	const char *phy_name;
293 
294 	/* PHY base GRC address */
295 	u32 base_addr;
296 
297 	/* Relative address of indirect TBUS address register (bits 0..7) */
298 	u32 tbus_addr_lo_addr;
299 
300 	/* Relative address of indirect TBUS address register (bits 8..10) */
301 	u32 tbus_addr_hi_addr;
302 
303 	/* Relative address of indirect TBUS data register (bits 0..7) */
304 	u32 tbus_data_lo_addr;
305 
306 	/* Relative address of indirect TBUS data register (bits 8..11) */
307 	u32 tbus_data_hi_addr;
308 };
309 
310 /* Split type definitions */
311 struct split_type_defs {
312 	const char *name;
313 };
314 
315 /******************************** Constants **********************************/
316 
317 #define MAX_LCIDS			320
318 #define MAX_LTIDS			320
319 
320 #define NUM_IOR_SETS			2
321 #define IORS_PER_SET			176
322 #define IOR_SET_OFFSET(set_id)		((set_id) * 256)
323 
324 #define BYTES_IN_DWORD			sizeof(u32)
325 
326 /* In the macros below, size and offset are specified in bits */
327 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
328 #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
329 #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
330 #define FIELD_DWORD_OFFSET(type, field) \
331 	 (int)(FIELD_BIT_OFFSET(type, field) / 32)
332 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
333 #define FIELD_BIT_MASK(type, field) \
334 	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
335 	 FIELD_DWORD_SHIFT(type, field))
336 
337 #define SET_VAR_FIELD(var, type, field, val) \
338 	do { \
339 		var[FIELD_DWORD_OFFSET(type, field)] &=	\
340 		(~FIELD_BIT_MASK(type, field));	\
341 		var[FIELD_DWORD_OFFSET(type, field)] |= \
342 		(val) << FIELD_DWORD_SHIFT(type, field); \
343 	} while (0)
344 
345 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
346 	do { \
347 		for (i = 0; i < (arr_size); i++) \
348 			qed_wr(dev, ptt, addr,	(arr)[i]); \
349 	} while (0)
350 
351 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
352 	do { \
353 		for (i = 0; i < (arr_size); i++) \
354 			(arr)[i] = qed_rd(dev, ptt, addr); \
355 	} while (0)
356 
357 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
358 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
359 
360 /* Extra lines include a signature line + optional latency events line */
361 #define NUM_EXTRA_DBG_LINES(block_desc) \
362 	(1 + ((block_desc)->has_latency_events ? 1 : 0))
363 #define NUM_DBG_LINES(block_desc) \
364 	((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
365 
366 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
367 #define RAM_LINES_TO_BYTES(lines) \
368 	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
369 
370 #define REG_DUMP_LEN_SHIFT		24
371 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
372 	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
373 
374 #define IDLE_CHK_RULE_SIZE_DWORDS \
375 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
376 
377 #define IDLE_CHK_RESULT_HDR_DWORDS \
378 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
379 
380 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
381 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
382 
383 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
384 
385 /* The sizes and offsets below are specified in bits */
386 #define VFC_CAM_CMD_STRUCT_SIZE		64
387 #define VFC_CAM_CMD_ROW_OFFSET		48
388 #define VFC_CAM_CMD_ROW_SIZE		9
389 #define VFC_CAM_ADDR_STRUCT_SIZE	16
390 #define VFC_CAM_ADDR_OP_OFFSET		0
391 #define VFC_CAM_ADDR_OP_SIZE		4
392 #define VFC_CAM_RESP_STRUCT_SIZE	256
393 #define VFC_RAM_ADDR_STRUCT_SIZE	16
394 #define VFC_RAM_ADDR_OP_OFFSET		0
395 #define VFC_RAM_ADDR_OP_SIZE		2
396 #define VFC_RAM_ADDR_ROW_OFFSET		2
397 #define VFC_RAM_ADDR_ROW_SIZE		10
398 #define VFC_RAM_RESP_STRUCT_SIZE	256
399 
400 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
401 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
402 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
403 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
404 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
405 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
406 
407 #define NUM_VFC_RAM_TYPES		4
408 
409 #define VFC_CAM_NUM_ROWS		512
410 
411 #define VFC_OPCODE_CAM_RD		14
412 #define VFC_OPCODE_RAM_RD		0
413 
414 #define NUM_RSS_MEM_TYPES		5
415 
416 #define NUM_BIG_RAM_TYPES		3
417 #define BIG_RAM_NAME_LEN		3
418 
419 #define NUM_PHY_TBUS_ADDRESSES		2048
420 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
421 
422 #define RESET_REG_UNRESET_OFFSET	4
423 
424 #define STALL_DELAY_MS			500
425 
426 #define STATIC_DEBUG_LINE_DWORDS	9
427 
428 #define NUM_COMMON_GLOBAL_PARAMS	8
429 
430 #define FW_IMG_MAIN			1
431 
432 #define REG_FIFO_ELEMENT_DWORDS		2
433 #define REG_FIFO_DEPTH_ELEMENTS		32
434 #define REG_FIFO_DEPTH_DWORDS \
435 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
436 
437 #define IGU_FIFO_ELEMENT_DWORDS		4
438 #define IGU_FIFO_DEPTH_ELEMENTS		64
439 #define IGU_FIFO_DEPTH_DWORDS \
440 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
441 
442 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
443 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
444 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
445 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
446 	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
447 
448 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
449 	(MCP_REG_SCRATCH + \
450 	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
451 
452 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
453 #define EMPTY_FW_IMAGE_STR		"???????????????"
454 
455 /***************************** Constant Arrays *******************************/
456 
457 struct dbg_array {
458 	const u32 *ptr;
459 	u32 size_in_dwords;
460 };
461 
462 /* Debug arrays */
463 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
464 
465 /* Chip constant definitions array */
466 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
467 	{"bb"},
468 	{"ah"},
469 	{"reserved"},
470 };
471 
472 /* Storm constant definitions array */
473 static struct storm_defs s_storm_defs[] = {
474 	/* Tstorm */
475 	{'T', BLOCK_TSEM,
476 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
477 	  DBG_BUS_CLIENT_RBCT}, true,
478 	 TSEM_REG_FAST_MEMORY,
479 	 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
480 	 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
481 	 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
482 	 TCM_REG_CTX_RBC_ACCS,
483 	 4, TCM_REG_AGG_CON_CTX,
484 	 16, TCM_REG_SM_CON_CTX,
485 	 2, TCM_REG_AGG_TASK_CTX,
486 	 4, TCM_REG_SM_TASK_CTX},
487 
488 	/* Mstorm */
489 	{'M', BLOCK_MSEM,
490 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM,
491 	  DBG_BUS_CLIENT_RBCM}, false,
492 	 MSEM_REG_FAST_MEMORY,
493 	 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
494 	 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
495 	 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
496 	 MCM_REG_CTX_RBC_ACCS,
497 	 1, MCM_REG_AGG_CON_CTX,
498 	 10, MCM_REG_SM_CON_CTX,
499 	 2, MCM_REG_AGG_TASK_CTX,
500 	 7, MCM_REG_SM_TASK_CTX},
501 
502 	/* Ustorm */
503 	{'U', BLOCK_USEM,
504 	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
505 	  DBG_BUS_CLIENT_RBCU}, false,
506 	 USEM_REG_FAST_MEMORY,
507 	 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
508 	 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
509 	 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
510 	 UCM_REG_CTX_RBC_ACCS,
511 	 2, UCM_REG_AGG_CON_CTX,
512 	 13, UCM_REG_SM_CON_CTX,
513 	 3, UCM_REG_AGG_TASK_CTX,
514 	 3, UCM_REG_SM_TASK_CTX},
515 
516 	/* Xstorm */
517 	{'X', BLOCK_XSEM,
518 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
519 	  DBG_BUS_CLIENT_RBCX}, false,
520 	 XSEM_REG_FAST_MEMORY,
521 	 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
522 	 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
523 	 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
524 	 XCM_REG_CTX_RBC_ACCS,
525 	 9, XCM_REG_AGG_CON_CTX,
526 	 15, XCM_REG_SM_CON_CTX,
527 	 0, 0,
528 	 0, 0},
529 
530 	/* Ystorm */
531 	{'Y', BLOCK_YSEM,
532 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY,
533 	  DBG_BUS_CLIENT_RBCY}, false,
534 	 YSEM_REG_FAST_MEMORY,
535 	 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
536 	 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
537 	 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
538 	 YCM_REG_CTX_RBC_ACCS,
539 	 2, YCM_REG_AGG_CON_CTX,
540 	 3, YCM_REG_SM_CON_CTX,
541 	 2, YCM_REG_AGG_TASK_CTX,
542 	 12, YCM_REG_SM_TASK_CTX},
543 
544 	/* Pstorm */
545 	{'P', BLOCK_PSEM,
546 	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
547 	  DBG_BUS_CLIENT_RBCS}, true,
548 	 PSEM_REG_FAST_MEMORY,
549 	 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
550 	 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
551 	 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
552 	 PCM_REG_CTX_RBC_ACCS,
553 	 0, 0,
554 	 10, PCM_REG_SM_CON_CTX,
555 	 0, 0,
556 	 0, 0}
557 };
558 
559 /* Block definitions array */
560 
561 static struct block_defs block_grc_defs = {
562 	"grc",
563 	{true, true, true}, false, 0,
564 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
565 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
566 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
567 	GRC_REG_DBG_FORCE_FRAME,
568 	true, false, DBG_RESET_REG_MISC_PL_UA, 1
569 };
570 
571 static struct block_defs block_miscs_defs = {
572 	"miscs", {true, true, true}, false, 0,
573 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
574 	0, 0, 0, 0, 0,
575 	false, false, MAX_DBG_RESET_REGS, 0
576 };
577 
578 static struct block_defs block_misc_defs = {
579 	"misc", {true, true, true}, false, 0,
580 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
581 	0, 0, 0, 0, 0,
582 	false, false, MAX_DBG_RESET_REGS, 0
583 };
584 
585 static struct block_defs block_dbu_defs = {
586 	"dbu", {true, true, true}, false, 0,
587 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
588 	0, 0, 0, 0, 0,
589 	false, false, MAX_DBG_RESET_REGS, 0
590 };
591 
592 static struct block_defs block_pglue_b_defs = {
593 	"pglue_b",
594 	{true, true, true}, false, 0,
595 	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
596 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
597 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
598 	PGLUE_B_REG_DBG_FORCE_FRAME,
599 	true, false, DBG_RESET_REG_MISCS_PL_HV, 1
600 };
601 
602 static struct block_defs block_cnig_defs = {
603 	"cnig",
604 	{true, true, true}, false, 0,
605 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW,
606 	 DBG_BUS_CLIENT_RBCW},
607 	CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5,
608 	CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5,
609 	CNIG_REG_DBG_FORCE_FRAME_K2_E5,
610 	true, false, DBG_RESET_REG_MISCS_PL_HV, 0
611 };
612 
613 static struct block_defs block_cpmu_defs = {
614 	"cpmu", {true, true, true}, false, 0,
615 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
616 	0, 0, 0, 0, 0,
617 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8
618 };
619 
620 static struct block_defs block_ncsi_defs = {
621 	"ncsi",
622 	{true, true, true}, false, 0,
623 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
624 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
625 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
626 	NCSI_REG_DBG_FORCE_FRAME,
627 	true, false, DBG_RESET_REG_MISCS_PL_HV, 5
628 };
629 
630 static struct block_defs block_opte_defs = {
631 	"opte", {true, true, false}, false, 0,
632 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
633 	0, 0, 0, 0, 0,
634 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4
635 };
636 
637 static struct block_defs block_bmb_defs = {
638 	"bmb",
639 	{true, true, true}, false, 0,
640 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB},
641 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
642 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
643 	BMB_REG_DBG_FORCE_FRAME,
644 	true, false, DBG_RESET_REG_MISCS_PL_UA, 7
645 };
646 
647 static struct block_defs block_pcie_defs = {
648 	"pcie",
649 	{true, true, true}, false, 0,
650 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
651 	 DBG_BUS_CLIENT_RBCH},
652 	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
653 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
654 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
655 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
656 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
657 	false, false, MAX_DBG_RESET_REGS, 0
658 };
659 
660 static struct block_defs block_mcp_defs = {
661 	"mcp", {true, true, true}, false, 0,
662 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
663 	0, 0, 0, 0, 0,
664 	false, false, MAX_DBG_RESET_REGS, 0
665 };
666 
667 static struct block_defs block_mcp2_defs = {
668 	"mcp2",
669 	{true, true, true}, false, 0,
670 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
671 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
672 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
673 	MCP2_REG_DBG_FORCE_FRAME,
674 	false, false, MAX_DBG_RESET_REGS, 0
675 };
676 
677 static struct block_defs block_pswhst_defs = {
678 	"pswhst",
679 	{true, true, true}, false, 0,
680 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
681 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
682 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
683 	PSWHST_REG_DBG_FORCE_FRAME,
684 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
685 };
686 
687 static struct block_defs block_pswhst2_defs = {
688 	"pswhst2",
689 	{true, true, true}, false, 0,
690 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
691 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
692 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
693 	PSWHST2_REG_DBG_FORCE_FRAME,
694 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
695 };
696 
697 static struct block_defs block_pswrd_defs = {
698 	"pswrd",
699 	{true, true, true}, false, 0,
700 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
701 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
702 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
703 	PSWRD_REG_DBG_FORCE_FRAME,
704 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
705 };
706 
707 static struct block_defs block_pswrd2_defs = {
708 	"pswrd2",
709 	{true, true, true}, false, 0,
710 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
711 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
712 	PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
713 	PSWRD2_REG_DBG_FORCE_FRAME,
714 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
715 };
716 
717 static struct block_defs block_pswwr_defs = {
718 	"pswwr",
719 	{true, true, true}, false, 0,
720 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
721 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
722 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
723 	PSWWR_REG_DBG_FORCE_FRAME,
724 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
725 };
726 
727 static struct block_defs block_pswwr2_defs = {
728 	"pswwr2", {true, true, true}, false, 0,
729 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
730 	0, 0, 0, 0, 0,
731 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
732 };
733 
734 static struct block_defs block_pswrq_defs = {
735 	"pswrq",
736 	{true, true, true}, false, 0,
737 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
738 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
739 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
740 	PSWRQ_REG_DBG_FORCE_FRAME,
741 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
742 };
743 
744 static struct block_defs block_pswrq2_defs = {
745 	"pswrq2",
746 	{true, true, true}, false, 0,
747 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
748 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
749 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
750 	PSWRQ2_REG_DBG_FORCE_FRAME,
751 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
752 };
753 
754 static struct block_defs block_pglcs_defs = {
755 	"pglcs",
756 	{true, true, true}, false, 0,
757 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
758 	 DBG_BUS_CLIENT_RBCH},
759 	PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5,
760 	PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5,
761 	PGLCS_REG_DBG_FORCE_FRAME_K2_E5,
762 	true, false, DBG_RESET_REG_MISCS_PL_HV, 2
763 };
764 
765 static struct block_defs block_ptu_defs = {
766 	"ptu",
767 	{true, true, true}, false, 0,
768 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
769 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
770 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
771 	PTU_REG_DBG_FORCE_FRAME,
772 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
773 };
774 
775 static struct block_defs block_dmae_defs = {
776 	"dmae",
777 	{true, true, true}, false, 0,
778 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
779 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
780 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
781 	DMAE_REG_DBG_FORCE_FRAME,
782 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
783 };
784 
785 static struct block_defs block_tcm_defs = {
786 	"tcm",
787 	{true, true, true}, true, DBG_TSTORM_ID,
788 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
789 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
790 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
791 	TCM_REG_DBG_FORCE_FRAME,
792 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
793 };
794 
795 static struct block_defs block_mcm_defs = {
796 	"mcm",
797 	{true, true, true}, true, DBG_MSTORM_ID,
798 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
799 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
800 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
801 	MCM_REG_DBG_FORCE_FRAME,
802 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
803 };
804 
805 static struct block_defs block_ucm_defs = {
806 	"ucm",
807 	{true, true, true}, true, DBG_USTORM_ID,
808 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
809 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
810 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
811 	UCM_REG_DBG_FORCE_FRAME,
812 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
813 };
814 
815 static struct block_defs block_xcm_defs = {
816 	"xcm",
817 	{true, true, true}, true, DBG_XSTORM_ID,
818 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
819 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
820 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
821 	XCM_REG_DBG_FORCE_FRAME,
822 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
823 };
824 
825 static struct block_defs block_ycm_defs = {
826 	"ycm",
827 	{true, true, true}, true, DBG_YSTORM_ID,
828 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
829 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
830 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
831 	YCM_REG_DBG_FORCE_FRAME,
832 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
833 };
834 
835 static struct block_defs block_pcm_defs = {
836 	"pcm",
837 	{true, true, true}, true, DBG_PSTORM_ID,
838 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
839 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
840 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
841 	PCM_REG_DBG_FORCE_FRAME,
842 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
843 };
844 
845 static struct block_defs block_qm_defs = {
846 	"qm",
847 	{true, true, true}, false, 0,
848 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ},
849 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
850 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
851 	QM_REG_DBG_FORCE_FRAME,
852 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
853 };
854 
855 static struct block_defs block_tm_defs = {
856 	"tm",
857 	{true, true, true}, false, 0,
858 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
859 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
860 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
861 	TM_REG_DBG_FORCE_FRAME,
862 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
863 };
864 
865 static struct block_defs block_dorq_defs = {
866 	"dorq",
867 	{true, true, true}, false, 0,
868 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
869 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
870 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
871 	DORQ_REG_DBG_FORCE_FRAME,
872 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
873 };
874 
875 static struct block_defs block_brb_defs = {
876 	"brb",
877 	{true, true, true}, false, 0,
878 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
879 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
880 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
881 	BRB_REG_DBG_FORCE_FRAME,
882 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
883 };
884 
885 static struct block_defs block_src_defs = {
886 	"src",
887 	{true, true, true}, false, 0,
888 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
889 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
890 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
891 	SRC_REG_DBG_FORCE_FRAME,
892 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
893 };
894 
895 static struct block_defs block_prs_defs = {
896 	"prs",
897 	{true, true, true}, false, 0,
898 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
899 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
900 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
901 	PRS_REG_DBG_FORCE_FRAME,
902 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
903 };
904 
905 static struct block_defs block_tsdm_defs = {
906 	"tsdm",
907 	{true, true, true}, true, DBG_TSTORM_ID,
908 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
909 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
910 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
911 	TSDM_REG_DBG_FORCE_FRAME,
912 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
913 };
914 
915 static struct block_defs block_msdm_defs = {
916 	"msdm",
917 	{true, true, true}, true, DBG_MSTORM_ID,
918 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
919 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
920 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
921 	MSDM_REG_DBG_FORCE_FRAME,
922 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
923 };
924 
925 static struct block_defs block_usdm_defs = {
926 	"usdm",
927 	{true, true, true}, true, DBG_USTORM_ID,
928 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
929 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
930 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
931 	USDM_REG_DBG_FORCE_FRAME,
932 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
933 };
934 
935 static struct block_defs block_xsdm_defs = {
936 	"xsdm",
937 	{true, true, true}, true, DBG_XSTORM_ID,
938 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
939 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
940 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
941 	XSDM_REG_DBG_FORCE_FRAME,
942 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
943 };
944 
945 static struct block_defs block_ysdm_defs = {
946 	"ysdm",
947 	{true, true, true}, true, DBG_YSTORM_ID,
948 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
949 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
950 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
951 	YSDM_REG_DBG_FORCE_FRAME,
952 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
953 };
954 
955 static struct block_defs block_psdm_defs = {
956 	"psdm",
957 	{true, true, true}, true, DBG_PSTORM_ID,
958 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
959 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
960 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
961 	PSDM_REG_DBG_FORCE_FRAME,
962 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
963 };
964 
965 static struct block_defs block_tsem_defs = {
966 	"tsem",
967 	{true, true, true}, true, DBG_TSTORM_ID,
968 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
969 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
970 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
971 	TSEM_REG_DBG_FORCE_FRAME,
972 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
973 };
974 
975 static struct block_defs block_msem_defs = {
976 	"msem",
977 	{true, true, true}, true, DBG_MSTORM_ID,
978 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
979 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
980 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
981 	MSEM_REG_DBG_FORCE_FRAME,
982 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
983 };
984 
985 static struct block_defs block_usem_defs = {
986 	"usem",
987 	{true, true, true}, true, DBG_USTORM_ID,
988 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
989 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
990 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
991 	USEM_REG_DBG_FORCE_FRAME,
992 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
993 };
994 
995 static struct block_defs block_xsem_defs = {
996 	"xsem",
997 	{true, true, true}, true, DBG_XSTORM_ID,
998 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
999 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1000 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1001 	XSEM_REG_DBG_FORCE_FRAME,
1002 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
1003 };
1004 
1005 static struct block_defs block_ysem_defs = {
1006 	"ysem",
1007 	{true, true, true}, true, DBG_YSTORM_ID,
1008 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY},
1009 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1010 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1011 	YSEM_REG_DBG_FORCE_FRAME,
1012 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
1013 };
1014 
1015 static struct block_defs block_psem_defs = {
1016 	"psem",
1017 	{true, true, true}, true, DBG_PSTORM_ID,
1018 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1019 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1020 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1021 	PSEM_REG_DBG_FORCE_FRAME,
1022 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
1023 };
1024 
1025 static struct block_defs block_rss_defs = {
1026 	"rss",
1027 	{true, true, true}, false, 0,
1028 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
1029 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1030 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1031 	RSS_REG_DBG_FORCE_FRAME,
1032 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
1033 };
1034 
1035 static struct block_defs block_tmld_defs = {
1036 	"tmld",
1037 	{true, true, true}, false, 0,
1038 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1039 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1040 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1041 	TMLD_REG_DBG_FORCE_FRAME,
1042 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
1043 };
1044 
1045 static struct block_defs block_muld_defs = {
1046 	"muld",
1047 	{true, true, true}, false, 0,
1048 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1049 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1050 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1051 	MULD_REG_DBG_FORCE_FRAME,
1052 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
1053 };
1054 
1055 static struct block_defs block_yuld_defs = {
1056 	"yuld",
1057 	{true, true, false}, false, 0,
1058 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
1059 	 MAX_DBG_BUS_CLIENTS},
1060 	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1061 	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1062 	YULD_REG_DBG_FORCE_FRAME_BB_K2,
1063 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1064 	15
1065 };
1066 
1067 static struct block_defs block_xyld_defs = {
1068 	"xyld",
1069 	{true, true, true}, false, 0,
1070 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1071 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1072 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1073 	XYLD_REG_DBG_FORCE_FRAME,
1074 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1075 };
1076 
1077 static struct block_defs block_ptld_defs = {
1078 	"ptld",
1079 	{false, false, true}, false, 0,
1080 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT},
1081 	PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5,
1082 	PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5,
1083 	PTLD_REG_DBG_FORCE_FRAME_E5,
1084 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1085 	28
1086 };
1087 
1088 static struct block_defs block_ypld_defs = {
1089 	"ypld",
1090 	{false, false, true}, false, 0,
1091 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS},
1092 	YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5,
1093 	YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5,
1094 	YPLD_REG_DBG_FORCE_FRAME_E5,
1095 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1096 	27
1097 };
1098 
1099 static struct block_defs block_prm_defs = {
1100 	"prm",
1101 	{true, true, true}, false, 0,
1102 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1103 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1104 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1105 	PRM_REG_DBG_FORCE_FRAME,
1106 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1107 };
1108 
1109 static struct block_defs block_pbf_pb1_defs = {
1110 	"pbf_pb1",
1111 	{true, true, true}, false, 0,
1112 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1113 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1114 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1115 	PBF_PB1_REG_DBG_FORCE_FRAME,
1116 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1117 	11
1118 };
1119 
1120 static struct block_defs block_pbf_pb2_defs = {
1121 	"pbf_pb2",
1122 	{true, true, true}, false, 0,
1123 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1124 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1125 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1126 	PBF_PB2_REG_DBG_FORCE_FRAME,
1127 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1128 	12
1129 };
1130 
1131 static struct block_defs block_rpb_defs = {
1132 	"rpb",
1133 	{true, true, true}, false, 0,
1134 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1135 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1136 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1137 	RPB_REG_DBG_FORCE_FRAME,
1138 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1139 };
1140 
1141 static struct block_defs block_btb_defs = {
1142 	"btb",
1143 	{true, true, true}, false, 0,
1144 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1145 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1146 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1147 	BTB_REG_DBG_FORCE_FRAME,
1148 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1149 };
1150 
1151 static struct block_defs block_pbf_defs = {
1152 	"pbf",
1153 	{true, true, true}, false, 0,
1154 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV},
1155 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1156 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1157 	PBF_REG_DBG_FORCE_FRAME,
1158 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1159 };
1160 
1161 static struct block_defs block_rdif_defs = {
1162 	"rdif",
1163 	{true, true, true}, false, 0,
1164 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM},
1165 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1166 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1167 	RDIF_REG_DBG_FORCE_FRAME,
1168 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1169 };
1170 
1171 static struct block_defs block_tdif_defs = {
1172 	"tdif",
1173 	{true, true, true}, false, 0,
1174 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1175 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1176 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1177 	TDIF_REG_DBG_FORCE_FRAME,
1178 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1179 };
1180 
1181 static struct block_defs block_cdu_defs = {
1182 	"cdu",
1183 	{true, true, true}, false, 0,
1184 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1185 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1186 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1187 	CDU_REG_DBG_FORCE_FRAME,
1188 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1189 };
1190 
1191 static struct block_defs block_ccfc_defs = {
1192 	"ccfc",
1193 	{true, true, true}, false, 0,
1194 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1195 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1196 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1197 	CCFC_REG_DBG_FORCE_FRAME,
1198 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1199 };
1200 
1201 static struct block_defs block_tcfc_defs = {
1202 	"tcfc",
1203 	{true, true, true}, false, 0,
1204 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1205 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1206 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1207 	TCFC_REG_DBG_FORCE_FRAME,
1208 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1209 };
1210 
1211 static struct block_defs block_igu_defs = {
1212 	"igu",
1213 	{true, true, true}, false, 0,
1214 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1215 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1216 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1217 	IGU_REG_DBG_FORCE_FRAME,
1218 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1219 };
1220 
1221 static struct block_defs block_cau_defs = {
1222 	"cau",
1223 	{true, true, true}, false, 0,
1224 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1225 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1226 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1227 	CAU_REG_DBG_FORCE_FRAME,
1228 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1229 };
1230 
1231 static struct block_defs block_rgfs_defs = {
1232 	"rgfs", {false, false, true}, false, 0,
1233 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1234 	0, 0, 0, 0, 0,
1235 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29
1236 };
1237 
1238 static struct block_defs block_rgsrc_defs = {
1239 	"rgsrc",
1240 	{false, false, true}, false, 0,
1241 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1242 	RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5,
1243 	RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5,
1244 	RGSRC_REG_DBG_FORCE_FRAME_E5,
1245 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1246 	30
1247 };
1248 
1249 static struct block_defs block_tgfs_defs = {
1250 	"tgfs", {false, false, true}, false, 0,
1251 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1252 	0, 0, 0, 0, 0,
1253 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30
1254 };
1255 
1256 static struct block_defs block_tgsrc_defs = {
1257 	"tgsrc",
1258 	{false, false, true}, false, 0,
1259 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV},
1260 	TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5,
1261 	TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5,
1262 	TGSRC_REG_DBG_FORCE_FRAME_E5,
1263 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1264 	31
1265 };
1266 
1267 static struct block_defs block_umac_defs = {
1268 	"umac",
1269 	{true, true, true}, false, 0,
1270 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ,
1271 	 DBG_BUS_CLIENT_RBCZ},
1272 	UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5,
1273 	UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5,
1274 	UMAC_REG_DBG_FORCE_FRAME_K2_E5,
1275 	true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1276 };
1277 
1278 static struct block_defs block_xmac_defs = {
1279 	"xmac", {true, false, false}, false, 0,
1280 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1281 	0, 0, 0, 0, 0,
1282 	false, false, MAX_DBG_RESET_REGS, 0
1283 };
1284 
1285 static struct block_defs block_dbg_defs = {
1286 	"dbg", {true, true, true}, false, 0,
1287 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1288 	0, 0, 0, 0, 0,
1289 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1290 };
1291 
1292 static struct block_defs block_nig_defs = {
1293 	"nig",
1294 	{true, true, true}, false, 0,
1295 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1296 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1297 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1298 	NIG_REG_DBG_FORCE_FRAME,
1299 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1300 };
1301 
1302 static struct block_defs block_wol_defs = {
1303 	"wol",
1304 	{false, true, true}, false, 0,
1305 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1306 	WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5,
1307 	WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5,
1308 	WOL_REG_DBG_FORCE_FRAME_K2_E5,
1309 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1310 };
1311 
1312 static struct block_defs block_bmbn_defs = {
1313 	"bmbn",
1314 	{false, true, true}, false, 0,
1315 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB,
1316 	 DBG_BUS_CLIENT_RBCB},
1317 	BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5,
1318 	BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5,
1319 	BMBN_REG_DBG_FORCE_FRAME_K2_E5,
1320 	false, false, MAX_DBG_RESET_REGS, 0
1321 };
1322 
1323 static struct block_defs block_ipc_defs = {
1324 	"ipc", {true, true, true}, false, 0,
1325 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1326 	0, 0, 0, 0, 0,
1327 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1328 };
1329 
1330 static struct block_defs block_nwm_defs = {
1331 	"nwm",
1332 	{false, true, true}, false, 0,
1333 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1334 	NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5,
1335 	NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5,
1336 	NWM_REG_DBG_FORCE_FRAME_K2_E5,
1337 	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1338 };
1339 
1340 static struct block_defs block_nws_defs = {
1341 	"nws",
1342 	{false, true, true}, false, 0,
1343 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW},
1344 	NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5,
1345 	NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5,
1346 	NWS_REG_DBG_FORCE_FRAME_K2_E5,
1347 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1348 };
1349 
1350 static struct block_defs block_ms_defs = {
1351 	"ms",
1352 	{false, true, true}, false, 0,
1353 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
1354 	MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5,
1355 	MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5,
1356 	MS_REG_DBG_FORCE_FRAME_K2_E5,
1357 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1358 };
1359 
1360 static struct block_defs block_phy_pcie_defs = {
1361 	"phy_pcie",
1362 	{false, true, true}, false, 0,
1363 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH,
1364 	 DBG_BUS_CLIENT_RBCH},
1365 	PCIE_REG_DBG_COMMON_SELECT_K2_E5,
1366 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5,
1367 	PCIE_REG_DBG_COMMON_SHIFT_K2_E5,
1368 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5,
1369 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5,
1370 	false, false, MAX_DBG_RESET_REGS, 0
1371 };
1372 
1373 static struct block_defs block_led_defs = {
1374 	"led", {false, true, true}, false, 0,
1375 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1376 	0, 0, 0, 0, 0,
1377 	true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1378 };
1379 
1380 static struct block_defs block_avs_wrap_defs = {
1381 	"avs_wrap", {false, true, false}, false, 0,
1382 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1383 	0, 0, 0, 0, 0,
1384 	true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1385 };
1386 
1387 static struct block_defs block_pxpreqbus_defs = {
1388 	"pxpreqbus", {false, false, false}, false, 0,
1389 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1390 	0, 0, 0, 0, 0,
1391 	false, false, MAX_DBG_RESET_REGS, 0
1392 };
1393 
1394 static struct block_defs block_misc_aeu_defs = {
1395 	"misc_aeu", {true, true, true}, false, 0,
1396 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1397 	0, 0, 0, 0, 0,
1398 	false, false, MAX_DBG_RESET_REGS, 0
1399 };
1400 
1401 static struct block_defs block_bar0_map_defs = {
1402 	"bar0_map", {true, true, true}, false, 0,
1403 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1404 	0, 0, 0, 0, 0,
1405 	false, false, MAX_DBG_RESET_REGS, 0
1406 };
1407 
1408 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1409 	&block_grc_defs,
1410 	&block_miscs_defs,
1411 	&block_misc_defs,
1412 	&block_dbu_defs,
1413 	&block_pglue_b_defs,
1414 	&block_cnig_defs,
1415 	&block_cpmu_defs,
1416 	&block_ncsi_defs,
1417 	&block_opte_defs,
1418 	&block_bmb_defs,
1419 	&block_pcie_defs,
1420 	&block_mcp_defs,
1421 	&block_mcp2_defs,
1422 	&block_pswhst_defs,
1423 	&block_pswhst2_defs,
1424 	&block_pswrd_defs,
1425 	&block_pswrd2_defs,
1426 	&block_pswwr_defs,
1427 	&block_pswwr2_defs,
1428 	&block_pswrq_defs,
1429 	&block_pswrq2_defs,
1430 	&block_pglcs_defs,
1431 	&block_dmae_defs,
1432 	&block_ptu_defs,
1433 	&block_tcm_defs,
1434 	&block_mcm_defs,
1435 	&block_ucm_defs,
1436 	&block_xcm_defs,
1437 	&block_ycm_defs,
1438 	&block_pcm_defs,
1439 	&block_qm_defs,
1440 	&block_tm_defs,
1441 	&block_dorq_defs,
1442 	&block_brb_defs,
1443 	&block_src_defs,
1444 	&block_prs_defs,
1445 	&block_tsdm_defs,
1446 	&block_msdm_defs,
1447 	&block_usdm_defs,
1448 	&block_xsdm_defs,
1449 	&block_ysdm_defs,
1450 	&block_psdm_defs,
1451 	&block_tsem_defs,
1452 	&block_msem_defs,
1453 	&block_usem_defs,
1454 	&block_xsem_defs,
1455 	&block_ysem_defs,
1456 	&block_psem_defs,
1457 	&block_rss_defs,
1458 	&block_tmld_defs,
1459 	&block_muld_defs,
1460 	&block_yuld_defs,
1461 	&block_xyld_defs,
1462 	&block_ptld_defs,
1463 	&block_ypld_defs,
1464 	&block_prm_defs,
1465 	&block_pbf_pb1_defs,
1466 	&block_pbf_pb2_defs,
1467 	&block_rpb_defs,
1468 	&block_btb_defs,
1469 	&block_pbf_defs,
1470 	&block_rdif_defs,
1471 	&block_tdif_defs,
1472 	&block_cdu_defs,
1473 	&block_ccfc_defs,
1474 	&block_tcfc_defs,
1475 	&block_igu_defs,
1476 	&block_cau_defs,
1477 	&block_rgfs_defs,
1478 	&block_rgsrc_defs,
1479 	&block_tgfs_defs,
1480 	&block_tgsrc_defs,
1481 	&block_umac_defs,
1482 	&block_xmac_defs,
1483 	&block_dbg_defs,
1484 	&block_nig_defs,
1485 	&block_wol_defs,
1486 	&block_bmbn_defs,
1487 	&block_ipc_defs,
1488 	&block_nwm_defs,
1489 	&block_nws_defs,
1490 	&block_ms_defs,
1491 	&block_phy_pcie_defs,
1492 	&block_led_defs,
1493 	&block_avs_wrap_defs,
1494 	&block_pxpreqbus_defs,
1495 	&block_misc_aeu_defs,
1496 	&block_bar0_map_defs,
1497 };
1498 
1499 static struct platform_defs s_platform_defs[] = {
1500 	{"asic", 1, 256, 32768},
1501 	{"reserved", 0, 0, 0},
1502 	{"reserved2", 0, 0, 0},
1503 	{"reserved3", 0, 0, 0}
1504 };
1505 
1506 static struct grc_param_defs s_grc_param_defs[] = {
1507 	/* DBG_GRC_PARAM_DUMP_TSTORM */
1508 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1509 
1510 	/* DBG_GRC_PARAM_DUMP_MSTORM */
1511 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1512 
1513 	/* DBG_GRC_PARAM_DUMP_USTORM */
1514 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1515 
1516 	/* DBG_GRC_PARAM_DUMP_XSTORM */
1517 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1518 
1519 	/* DBG_GRC_PARAM_DUMP_YSTORM */
1520 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1521 
1522 	/* DBG_GRC_PARAM_DUMP_PSTORM */
1523 	{{1, 1, 1}, 0, 1, false, false, 1, 1},
1524 
1525 	/* DBG_GRC_PARAM_DUMP_REGS */
1526 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1527 
1528 	/* DBG_GRC_PARAM_DUMP_RAM */
1529 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1530 
1531 	/* DBG_GRC_PARAM_DUMP_PBUF */
1532 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1533 
1534 	/* DBG_GRC_PARAM_DUMP_IOR */
1535 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1536 
1537 	/* DBG_GRC_PARAM_DUMP_VFC */
1538 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1539 
1540 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
1541 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1542 
1543 	/* DBG_GRC_PARAM_DUMP_ILT */
1544 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1545 
1546 	/* DBG_GRC_PARAM_DUMP_RSS */
1547 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1548 
1549 	/* DBG_GRC_PARAM_DUMP_CAU */
1550 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1551 
1552 	/* DBG_GRC_PARAM_DUMP_QM */
1553 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1554 
1555 	/* DBG_GRC_PARAM_DUMP_MCP */
1556 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1557 
1558 	/* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
1559 	{{1, 1, 1}, 1, 0xffffffff, false, true, 0, 1},
1560 
1561 	/* DBG_GRC_PARAM_DUMP_CFC */
1562 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1563 
1564 	/* DBG_GRC_PARAM_DUMP_IGU */
1565 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1566 
1567 	/* DBG_GRC_PARAM_DUMP_BRB */
1568 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1569 
1570 	/* DBG_GRC_PARAM_DUMP_BTB */
1571 	{{0, 0, 0}, 0, 1, false, false, 0, 1},
1572 
1573 	/* DBG_GRC_PARAM_DUMP_BMB */
1574 	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1575 
1576 	/* DBG_GRC_PARAM_DUMP_NIG */
1577 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1578 
1579 	/* DBG_GRC_PARAM_DUMP_MULD */
1580 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1581 
1582 	/* DBG_GRC_PARAM_DUMP_PRS */
1583 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1584 
1585 	/* DBG_GRC_PARAM_DUMP_DMAE */
1586 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1587 
1588 	/* DBG_GRC_PARAM_DUMP_TM */
1589 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1590 
1591 	/* DBG_GRC_PARAM_DUMP_SDM */
1592 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1593 
1594 	/* DBG_GRC_PARAM_DUMP_DIF */
1595 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1596 
1597 	/* DBG_GRC_PARAM_DUMP_STATIC */
1598 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1599 
1600 	/* DBG_GRC_PARAM_UNSTALL */
1601 	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1602 
1603 	/* DBG_GRC_PARAM_NUM_LCIDS */
1604 	{{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, false,
1605 	 MAX_LCIDS, MAX_LCIDS},
1606 
1607 	/* DBG_GRC_PARAM_NUM_LTIDS */
1608 	{{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, false,
1609 	 MAX_LTIDS, MAX_LTIDS},
1610 
1611 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
1612 	{{0, 0, 0}, 0, 1, true, false, 0, 0},
1613 
1614 	/* DBG_GRC_PARAM_CRASH */
1615 	{{0, 0, 0}, 0, 1, true, false, 0, 0},
1616 
1617 	/* DBG_GRC_PARAM_PARITY_SAFE */
1618 	{{0, 0, 0}, 0, 1, false, false, 1, 0},
1619 
1620 	/* DBG_GRC_PARAM_DUMP_CM */
1621 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1622 
1623 	/* DBG_GRC_PARAM_DUMP_PHY */
1624 	{{1, 1, 1}, 0, 1, false, false, 0, 1},
1625 
1626 	/* DBG_GRC_PARAM_NO_MCP */
1627 	{{0, 0, 0}, 0, 1, false, false, 0, 0},
1628 
1629 	/* DBG_GRC_PARAM_NO_FW_VER */
1630 	{{0, 0, 0}, 0, 1, false, false, 0, 0}
1631 };
1632 
1633 static struct rss_mem_defs s_rss_mem_defs[] = {
1634 	{ "rss_mem_cid", "rss_cid", 0, 32,
1635 	  {256, 320, 512} },
1636 
1637 	{ "rss_mem_key_msb", "rss_key", 1024, 256,
1638 	  {128, 208, 257} },
1639 
1640 	{ "rss_mem_key_lsb", "rss_key", 2048, 64,
1641 	  {128, 208, 257} },
1642 
1643 	{ "rss_mem_info", "rss_info", 3072, 16,
1644 	  {128, 208, 256} },
1645 
1646 	{ "rss_mem_ind", "rss_ind", 4096, 16,
1647 	  {16384, 26624, 32768} }
1648 };
1649 
1650 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1651 	{"vfc_ram_tt1", "vfc_ram", 0, 512},
1652 	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
1653 	{"vfc_ram_stt2", "vfc_ram", 640, 32},
1654 	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1655 };
1656 
1657 static struct big_ram_defs s_big_ram_defs[] = {
1658 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1659 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1660 	  MISC_REG_BLOCK_256B_EN, {0, 0, 0},
1661 	  {153600, 180224, 282624} },
1662 
1663 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1664 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1665 	  MISC_REG_BLOCK_256B_EN, {0, 1, 1},
1666 	  {92160, 117760, 168960} },
1667 
1668 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1669 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1670 	  MISCS_REG_BLOCK_256B_EN, {0, 0, 0},
1671 	  {36864, 36864, 36864} }
1672 };
1673 
1674 static struct reset_reg_defs s_reset_regs_defs[] = {
1675 	/* DBG_RESET_REG_MISCS_PL_UA */
1676 	{ MISCS_REG_RESET_PL_UA,
1677 	  {true, true, true}, {0x0, 0x0, 0x0} },
1678 
1679 	/* DBG_RESET_REG_MISCS_PL_HV */
1680 	{ MISCS_REG_RESET_PL_HV,
1681 	  {true, true, true}, {0x0, 0x400, 0x600} },
1682 
1683 	/* DBG_RESET_REG_MISCS_PL_HV_2 */
1684 	{ MISCS_REG_RESET_PL_HV_2_K2_E5,
1685 	  {false, true, true}, {0x0, 0x0, 0x0} },
1686 
1687 	/* DBG_RESET_REG_MISC_PL_UA */
1688 	{ MISC_REG_RESET_PL_UA,
1689 	  {true, true, true}, {0x0, 0x0, 0x0} },
1690 
1691 	/* DBG_RESET_REG_MISC_PL_HV */
1692 	{ MISC_REG_RESET_PL_HV,
1693 	  {true, true, true}, {0x0, 0x0, 0x0} },
1694 
1695 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1696 	{ MISC_REG_RESET_PL_PDA_VMAIN_1,
1697 	  {true, true, true}, {0x4404040, 0x4404040, 0x404040} },
1698 
1699 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1700 	{ MISC_REG_RESET_PL_PDA_VMAIN_2,
1701 	  {true, true, true}, {0x7, 0x7c00007, 0x5c08007} },
1702 
1703 	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1704 	{ MISC_REG_RESET_PL_PDA_VAUX,
1705 	  {true, true, true}, {0x2, 0x2, 0x2} },
1706 };
1707 
1708 static struct phy_defs s_phy_defs[] = {
1709 	{"nw_phy", NWS_REG_NWS_CMU_K2,
1710 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
1711 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
1712 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
1713 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
1714 	{"sgmii_phy", MS_REG_MS_CMU_K2_E5,
1715 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1716 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1717 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1718 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1719 	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
1720 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1721 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1722 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1723 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1724 	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
1725 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
1726 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
1727 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
1728 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
1729 };
1730 
1731 static struct split_type_defs s_split_type_defs[] = {
1732 	/* SPLIT_TYPE_NONE */
1733 	{"eng"},
1734 
1735 	/* SPLIT_TYPE_PORT */
1736 	{"port"},
1737 
1738 	/* SPLIT_TYPE_PF */
1739 	{"pf"},
1740 
1741 	/* SPLIT_TYPE_PORT_PF */
1742 	{"port"},
1743 
1744 	/* SPLIT_TYPE_VF */
1745 	{"vf"}
1746 };
1747 
1748 /**************************** Private Functions ******************************/
1749 
1750 /* Reads and returns a single dword from the specified unaligned buffer */
1751 static u32 qed_read_unaligned_dword(u8 *buf)
1752 {
1753 	u32 dword;
1754 
1755 	memcpy((u8 *)&dword, buf, sizeof(dword));
1756 	return dword;
1757 }
1758 
1759 /* Returns the value of the specified GRC param */
1760 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1761 			     enum dbg_grc_params grc_param)
1762 {
1763 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1764 
1765 	return dev_data->grc.param_val[grc_param];
1766 }
1767 
1768 /* Initializes the GRC parameters */
1769 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1770 {
1771 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1772 
1773 	if (!dev_data->grc.params_initialized) {
1774 		qed_dbg_grc_set_params_default(p_hwfn);
1775 		dev_data->grc.params_initialized = 1;
1776 	}
1777 }
1778 
1779 /* Initializes debug data for the specified device */
1780 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1781 					struct qed_ptt *p_ptt)
1782 {
1783 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1784 	u8 num_pfs = 0, max_pfs_per_port = 0;
1785 
1786 	if (dev_data->initialized)
1787 		return DBG_STATUS_OK;
1788 
1789 	/* Set chip */
1790 	if (QED_IS_K2(p_hwfn->cdev)) {
1791 		dev_data->chip_id = CHIP_K2;
1792 		dev_data->mode_enable[MODE_K2] = 1;
1793 		dev_data->num_vfs = MAX_NUM_VFS_K2;
1794 		num_pfs = MAX_NUM_PFS_K2;
1795 		max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
1796 	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1797 		dev_data->chip_id = CHIP_BB;
1798 		dev_data->mode_enable[MODE_BB] = 1;
1799 		dev_data->num_vfs = MAX_NUM_VFS_BB;
1800 		num_pfs = MAX_NUM_PFS_BB;
1801 		max_pfs_per_port = MAX_NUM_PFS_BB;
1802 	} else {
1803 		return DBG_STATUS_UNKNOWN_CHIP;
1804 	}
1805 
1806 	/* Set platofrm */
1807 	dev_data->platform_id = PLATFORM_ASIC;
1808 	dev_data->mode_enable[MODE_ASIC] = 1;
1809 
1810 	/* Set port mode */
1811 	switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
1812 	case 0:
1813 		dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
1814 		break;
1815 	case 1:
1816 		dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
1817 		break;
1818 	case 2:
1819 		dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
1820 		break;
1821 	}
1822 
1823 	/* Set 100G mode */
1824 	if (dev_data->chip_id == CHIP_BB &&
1825 	    qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2)
1826 		dev_data->mode_enable[MODE_100G] = 1;
1827 
1828 	/* Set number of ports */
1829 	if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
1830 	    dev_data->mode_enable[MODE_100G])
1831 		dev_data->num_ports = 1;
1832 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
1833 		dev_data->num_ports = 2;
1834 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
1835 		dev_data->num_ports = 4;
1836 
1837 	/* Set number of PFs per port */
1838 	dev_data->num_pfs_per_port = min_t(u32,
1839 					   num_pfs / dev_data->num_ports,
1840 					   max_pfs_per_port);
1841 
1842 	/* Initializes the GRC parameters */
1843 	qed_dbg_grc_init_params(p_hwfn);
1844 
1845 	dev_data->use_dmae = true;
1846 	dev_data->initialized = 1;
1847 
1848 	return DBG_STATUS_OK;
1849 }
1850 
1851 static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
1852 						    enum block_id block_id)
1853 {
1854 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1855 
1856 	return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
1857 						       MAX_CHIP_IDS +
1858 						       dev_data->chip_id];
1859 }
1860 
1861 /* Reads the FW info structure for the specified Storm from the chip,
1862  * and writes it to the specified fw_info pointer.
1863  */
1864 static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
1865 				   struct qed_ptt *p_ptt,
1866 				   u8 storm_id, struct fw_info *fw_info)
1867 {
1868 	struct storm_defs *storm = &s_storm_defs[storm_id];
1869 	struct fw_info_location fw_info_location;
1870 	u32 addr, i, *dest;
1871 
1872 	memset(&fw_info_location, 0, sizeof(fw_info_location));
1873 	memset(fw_info, 0, sizeof(*fw_info));
1874 
1875 	/* Read first the address that points to fw_info location.
1876 	 * The address is located in the last line of the Storm RAM.
1877 	 */
1878 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1879 	       DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) -
1880 	       sizeof(fw_info_location);
1881 	dest = (u32 *)&fw_info_location;
1882 
1883 	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1884 	     i++, addr += BYTES_IN_DWORD)
1885 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1886 
1887 	/* Read FW version info from Storm RAM */
1888 	if (fw_info_location.size > 0 && fw_info_location.size <=
1889 	    sizeof(*fw_info)) {
1890 		addr = fw_info_location.grc_addr;
1891 		dest = (u32 *)fw_info;
1892 		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1893 		     i++, addr += BYTES_IN_DWORD)
1894 			dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1895 	}
1896 }
1897 
1898 /* Dumps the specified string to the specified buffer.
1899  * Returns the dumped size in bytes.
1900  */
1901 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1902 {
1903 	if (dump)
1904 		strcpy(dump_buf, str);
1905 
1906 	return (u32)strlen(str) + 1;
1907 }
1908 
1909 /* Dumps zeros to align the specified buffer to dwords.
1910  * Returns the dumped size in bytes.
1911  */
1912 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1913 {
1914 	u8 offset_in_dword, align_size;
1915 
1916 	offset_in_dword = (u8)(byte_offset & 0x3);
1917 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1918 
1919 	if (dump && align_size)
1920 		memset(dump_buf, 0, align_size);
1921 
1922 	return align_size;
1923 }
1924 
1925 /* Writes the specified string param to the specified buffer.
1926  * Returns the dumped size in dwords.
1927  */
1928 static u32 qed_dump_str_param(u32 *dump_buf,
1929 			      bool dump,
1930 			      const char *param_name, const char *param_val)
1931 {
1932 	char *char_buf = (char *)dump_buf;
1933 	u32 offset = 0;
1934 
1935 	/* Dump param name */
1936 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1937 
1938 	/* Indicate a string param value */
1939 	if (dump)
1940 		*(char_buf + offset) = 1;
1941 	offset++;
1942 
1943 	/* Dump param value */
1944 	offset += qed_dump_str(char_buf + offset, dump, param_val);
1945 
1946 	/* Align buffer to next dword */
1947 	offset += qed_dump_align(char_buf + offset, dump, offset);
1948 
1949 	return BYTES_TO_DWORDS(offset);
1950 }
1951 
1952 /* Writes the specified numeric param to the specified buffer.
1953  * Returns the dumped size in dwords.
1954  */
1955 static u32 qed_dump_num_param(u32 *dump_buf,
1956 			      bool dump, const char *param_name, u32 param_val)
1957 {
1958 	char *char_buf = (char *)dump_buf;
1959 	u32 offset = 0;
1960 
1961 	/* Dump param name */
1962 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1963 
1964 	/* Indicate a numeric param value */
1965 	if (dump)
1966 		*(char_buf + offset) = 0;
1967 	offset++;
1968 
1969 	/* Align buffer to next dword */
1970 	offset += qed_dump_align(char_buf + offset, dump, offset);
1971 
1972 	/* Dump param value (and change offset from bytes to dwords) */
1973 	offset = BYTES_TO_DWORDS(offset);
1974 	if (dump)
1975 		*(dump_buf + offset) = param_val;
1976 	offset++;
1977 
1978 	return offset;
1979 }
1980 
1981 /* Reads the FW version and writes it as a param to the specified buffer.
1982  * Returns the dumped size in dwords.
1983  */
1984 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1985 				 struct qed_ptt *p_ptt,
1986 				 u32 *dump_buf, bool dump)
1987 {
1988 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1989 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1990 	struct fw_info fw_info = { {0}, {0} };
1991 	u32 offset = 0;
1992 
1993 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1994 		/* Read FW info from chip */
1995 		qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
1996 
1997 		/* Create FW version/image strings */
1998 		if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1999 			     "%d_%d_%d_%d", fw_info.ver.num.major,
2000 			     fw_info.ver.num.minor, fw_info.ver.num.rev,
2001 			     fw_info.ver.num.eng) < 0)
2002 			DP_NOTICE(p_hwfn,
2003 				  "Unexpected debug error: invalid FW version string\n");
2004 		switch (fw_info.ver.image_id) {
2005 		case FW_IMG_MAIN:
2006 			strcpy(fw_img_str, "main");
2007 			break;
2008 		default:
2009 			strcpy(fw_img_str, "unknown");
2010 			break;
2011 		}
2012 	}
2013 
2014 	/* Dump FW version, image and timestamp */
2015 	offset += qed_dump_str_param(dump_buf + offset,
2016 				     dump, "fw-version", fw_ver_str);
2017 	offset += qed_dump_str_param(dump_buf + offset,
2018 				     dump, "fw-image", fw_img_str);
2019 	offset += qed_dump_num_param(dump_buf + offset,
2020 				     dump,
2021 				     "fw-timestamp", fw_info.ver.timestamp);
2022 
2023 	return offset;
2024 }
2025 
2026 /* Reads the MFW version and writes it as a param to the specified buffer.
2027  * Returns the dumped size in dwords.
2028  */
2029 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
2030 				  struct qed_ptt *p_ptt,
2031 				  u32 *dump_buf, bool dump)
2032 {
2033 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
2034 
2035 	if (dump &&
2036 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
2037 		u32 global_section_offsize, global_section_addr, mfw_ver;
2038 		u32 public_data_addr, global_section_offsize_addr;
2039 
2040 		/* Find MCP public data GRC address. Needs to be ORed with
2041 		 * MCP_REG_SCRATCH due to a HW bug.
2042 		 */
2043 		public_data_addr = qed_rd(p_hwfn,
2044 					  p_ptt,
2045 					  MISC_REG_SHARED_MEM_ADDR) |
2046 				   MCP_REG_SCRATCH;
2047 
2048 		/* Find MCP public global section offset */
2049 		global_section_offsize_addr = public_data_addr +
2050 					      offsetof(struct mcp_public_data,
2051 						       sections) +
2052 					      sizeof(offsize_t) * PUBLIC_GLOBAL;
2053 		global_section_offsize = qed_rd(p_hwfn, p_ptt,
2054 						global_section_offsize_addr);
2055 		global_section_addr =
2056 			MCP_REG_SCRATCH +
2057 			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2058 
2059 		/* Read MFW version from MCP public global section */
2060 		mfw_ver = qed_rd(p_hwfn, p_ptt,
2061 				 global_section_addr +
2062 				 offsetof(struct public_global, mfw_ver));
2063 
2064 		/* Dump MFW version param */
2065 		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
2066 			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
2067 			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2068 			DP_NOTICE(p_hwfn,
2069 				  "Unexpected debug error: invalid MFW version string\n");
2070 	}
2071 
2072 	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2073 }
2074 
2075 /* Writes a section header to the specified buffer.
2076  * Returns the dumped size in dwords.
2077  */
2078 static u32 qed_dump_section_hdr(u32 *dump_buf,
2079 				bool dump, const char *name, u32 num_params)
2080 {
2081 	return qed_dump_num_param(dump_buf, dump, name, num_params);
2082 }
2083 
2084 /* Writes the common global params to the specified buffer.
2085  * Returns the dumped size in dwords.
2086  */
2087 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
2088 					 struct qed_ptt *p_ptt,
2089 					 u32 *dump_buf,
2090 					 bool dump,
2091 					 u8 num_specific_global_params)
2092 {
2093 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2094 	u32 offset = 0;
2095 	u8 num_params;
2096 
2097 	/* Dump global params section header */
2098 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2099 	offset += qed_dump_section_hdr(dump_buf + offset,
2100 				       dump, "global_params", num_params);
2101 
2102 	/* Store params */
2103 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2104 	offset += qed_dump_mfw_ver_param(p_hwfn,
2105 					 p_ptt, dump_buf + offset, dump);
2106 	offset += qed_dump_num_param(dump_buf + offset,
2107 				     dump, "tools-version", TOOLS_VERSION);
2108 	offset += qed_dump_str_param(dump_buf + offset,
2109 				     dump,
2110 				     "chip",
2111 				     s_chip_defs[dev_data->chip_id].name);
2112 	offset += qed_dump_str_param(dump_buf + offset,
2113 				     dump,
2114 				     "platform",
2115 				     s_platform_defs[dev_data->platform_id].
2116 				     name);
2117 	offset +=
2118 	    qed_dump_num_param(dump_buf + offset, dump, "pci-func",
2119 			       p_hwfn->abs_pf_id);
2120 
2121 	return offset;
2122 }
2123 
2124 /* Writes the "last" section (including CRC) to the specified buffer at the
2125  * given offset. Returns the dumped size in dwords.
2126  */
2127 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
2128 {
2129 	u32 start_offset = offset;
2130 
2131 	/* Dump CRC section header */
2132 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2133 
2134 	/* Calculate CRC32 and add it to the dword after the "last" section */
2135 	if (dump)
2136 		*(dump_buf + offset) = ~crc32(0xffffffff,
2137 					      (u8 *)dump_buf,
2138 					      DWORDS_TO_BYTES(offset));
2139 
2140 	offset++;
2141 
2142 	return offset - start_offset;
2143 }
2144 
2145 /* Update blocks reset state  */
2146 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
2147 					  struct qed_ptt *p_ptt)
2148 {
2149 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2150 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2151 	u32 i;
2152 
2153 	/* Read reset registers */
2154 	for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2155 		if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2156 			reg_val[i] = qed_rd(p_hwfn,
2157 					    p_ptt, s_reset_regs_defs[i].addr);
2158 
2159 	/* Check if blocks are in reset */
2160 	for (i = 0; i < MAX_BLOCK_ID; i++) {
2161 		struct block_defs *block = s_block_defs[i];
2162 
2163 		dev_data->block_in_reset[i] = block->has_reset_bit &&
2164 		    !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
2165 	}
2166 }
2167 
2168 /* Enable / disable the Debug block */
2169 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
2170 				     struct qed_ptt *p_ptt, bool enable)
2171 {
2172 	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2173 }
2174 
2175 /* Resets the Debug block */
2176 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
2177 				    struct qed_ptt *p_ptt)
2178 {
2179 	u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2180 	struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2181 
2182 	dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2183 	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2184 	new_reset_reg_val =
2185 	    old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
2186 
2187 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2188 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2189 }
2190 
2191 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
2192 				     struct qed_ptt *p_ptt,
2193 				     enum dbg_bus_frame_modes mode)
2194 {
2195 	qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2196 }
2197 
2198 /* Enable / disable Debug Bus clients according to the specified mask
2199  * (1 = enable, 0 = disable).
2200  */
2201 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
2202 				   struct qed_ptt *p_ptt, u32 client_mask)
2203 {
2204 	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2205 }
2206 
2207 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
2208 {
2209 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2210 	bool arg1, arg2;
2211 	const u32 *ptr;
2212 	u8 tree_val;
2213 
2214 	/* Get next element from modes tree buffer */
2215 	ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
2216 	tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
2217 
2218 	switch (tree_val) {
2219 	case INIT_MODE_OP_NOT:
2220 		return !qed_is_mode_match(p_hwfn, modes_buf_offset);
2221 	case INIT_MODE_OP_OR:
2222 	case INIT_MODE_OP_AND:
2223 		arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2224 		arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2225 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
2226 							arg2) : (arg1 && arg2);
2227 	default:
2228 		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2229 	}
2230 }
2231 
2232 /* Returns true if the specified entity (indicated by GRC param) should be
2233  * included in the dump, false otherwise.
2234  */
2235 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
2236 				enum dbg_grc_params grc_param)
2237 {
2238 	return qed_grc_get_param(p_hwfn, grc_param) > 0;
2239 }
2240 
2241 /* Returns true of the specified Storm should be included in the dump, false
2242  * otherwise.
2243  */
2244 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
2245 				      enum dbg_storms storm)
2246 {
2247 	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2248 }
2249 
2250 /* Returns true if the specified memory should be included in the dump, false
2251  * otherwise.
2252  */
2253 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
2254 				    enum block_id block_id, u8 mem_group_id)
2255 {
2256 	struct block_defs *block = s_block_defs[block_id];
2257 	u8 i;
2258 
2259 	/* Check Storm match */
2260 	if (block->associated_to_storm &&
2261 	    !qed_grc_is_storm_included(p_hwfn,
2262 				       (enum dbg_storms)block->storm_id))
2263 		return false;
2264 
2265 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2266 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2267 
2268 		if (mem_group_id == big_ram->mem_group_id ||
2269 		    mem_group_id == big_ram->ram_mem_group_id)
2270 			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
2271 	}
2272 
2273 	switch (mem_group_id) {
2274 	case MEM_GROUP_PXP_ILT:
2275 	case MEM_GROUP_PXP_MEM:
2276 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2277 	case MEM_GROUP_RAM:
2278 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2279 	case MEM_GROUP_PBUF:
2280 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2281 	case MEM_GROUP_CAU_MEM:
2282 	case MEM_GROUP_CAU_SB:
2283 	case MEM_GROUP_CAU_PI:
2284 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2285 	case MEM_GROUP_QM_MEM:
2286 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2287 	case MEM_GROUP_CFC_MEM:
2288 	case MEM_GROUP_CONN_CFC_MEM:
2289 	case MEM_GROUP_TASK_CFC_MEM:
2290 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
2291 		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
2292 	case MEM_GROUP_IGU_MEM:
2293 	case MEM_GROUP_IGU_MSIX:
2294 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2295 	case MEM_GROUP_MULD_MEM:
2296 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2297 	case MEM_GROUP_PRS_MEM:
2298 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2299 	case MEM_GROUP_DMAE_MEM:
2300 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2301 	case MEM_GROUP_TM_MEM:
2302 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2303 	case MEM_GROUP_SDM_MEM:
2304 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2305 	case MEM_GROUP_TDIF_CTX:
2306 	case MEM_GROUP_RDIF_CTX:
2307 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2308 	case MEM_GROUP_CM_MEM:
2309 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2310 	case MEM_GROUP_IOR:
2311 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2312 	default:
2313 		return true;
2314 	}
2315 }
2316 
2317 /* Stalls all Storms */
2318 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2319 				 struct qed_ptt *p_ptt, bool stall)
2320 {
2321 	u32 reg_addr;
2322 	u8 storm_id;
2323 
2324 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2325 		if (!qed_grc_is_storm_included(p_hwfn,
2326 					       (enum dbg_storms)storm_id))
2327 			continue;
2328 
2329 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
2330 		    SEM_FAST_REG_STALL_0_BB_K2;
2331 		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2332 	}
2333 
2334 	msleep(STALL_DELAY_MS);
2335 }
2336 
2337 /* Takes all blocks out of reset */
2338 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2339 				   struct qed_ptt *p_ptt)
2340 {
2341 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2342 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2343 	u32 block_id, i;
2344 
2345 	/* Fill reset regs values */
2346 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2347 		struct block_defs *block = s_block_defs[block_id];
2348 
2349 		if (block->exists[dev_data->chip_id] && block->has_reset_bit &&
2350 		    block->unreset)
2351 			reg_val[block->reset_reg] |=
2352 			    BIT(block->reset_bit_offset);
2353 	}
2354 
2355 	/* Write reset registers */
2356 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2357 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2358 			continue;
2359 
2360 		reg_val[i] |=
2361 			s_reset_regs_defs[i].unreset_val[dev_data->chip_id];
2362 
2363 		if (reg_val[i])
2364 			qed_wr(p_hwfn,
2365 			       p_ptt,
2366 			       s_reset_regs_defs[i].addr +
2367 			       RESET_REG_UNRESET_OFFSET, reg_val[i]);
2368 	}
2369 }
2370 
2371 /* Returns the attention block data of the specified block */
2372 static const struct dbg_attn_block_type_data *
2373 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
2374 {
2375 	const struct dbg_attn_block *base_attn_block_arr =
2376 		(const struct dbg_attn_block *)
2377 		s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2378 
2379 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
2380 }
2381 
2382 /* Returns the attention registers of the specified block */
2383 static const struct dbg_attn_reg *
2384 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
2385 			u8 *num_attn_regs)
2386 {
2387 	const struct dbg_attn_block_type_data *block_type_data =
2388 		qed_get_block_attn_data(block_id, attn_type);
2389 
2390 	*num_attn_regs = block_type_data->num_regs;
2391 
2392 	return &((const struct dbg_attn_reg *)
2393 		 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2394 							  regs_offset];
2395 }
2396 
2397 /* For each block, clear the status of all parities */
2398 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2399 				   struct qed_ptt *p_ptt)
2400 {
2401 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2402 	const struct dbg_attn_reg *attn_reg_arr;
2403 	u8 reg_idx, num_attn_regs;
2404 	u32 block_id;
2405 
2406 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2407 		if (dev_data->block_in_reset[block_id])
2408 			continue;
2409 
2410 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2411 						       ATTN_TYPE_PARITY,
2412 						       &num_attn_regs);
2413 
2414 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2415 			const struct dbg_attn_reg *reg_data =
2416 				&attn_reg_arr[reg_idx];
2417 			u16 modes_buf_offset;
2418 			bool eval_mode;
2419 
2420 			/* Check mode */
2421 			eval_mode = GET_FIELD(reg_data->mode.data,
2422 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2423 			modes_buf_offset =
2424 				GET_FIELD(reg_data->mode.data,
2425 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2426 
2427 			/* If Mode match: clear parity status */
2428 			if (!eval_mode ||
2429 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
2430 				qed_rd(p_hwfn, p_ptt,
2431 				       DWORDS_TO_BYTES(reg_data->
2432 						       sts_clr_address));
2433 		}
2434 	}
2435 }
2436 
2437 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2438  * The following parameters are dumped:
2439  * - count: no. of dumped entries
2440  * - split_type: split type
2441  * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
2442  * - param_name: user parameter value (dumped only if param_name != NULL
2443  *		 and param_val != NULL).
2444  */
2445 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2446 				 bool dump,
2447 				 u32 num_reg_entries,
2448 				 enum init_split_types split_type,
2449 				 u8 split_id,
2450 				 const char *param_name, const char *param_val)
2451 {
2452 	u8 num_params = 2 +
2453 	    (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0);
2454 	u32 offset = 0;
2455 
2456 	offset += qed_dump_section_hdr(dump_buf + offset,
2457 				       dump, "grc_regs", num_params);
2458 	offset += qed_dump_num_param(dump_buf + offset,
2459 				     dump, "count", num_reg_entries);
2460 	offset += qed_dump_str_param(dump_buf + offset,
2461 				     dump, "split",
2462 				     s_split_type_defs[split_type].name);
2463 	if (split_type != SPLIT_TYPE_NONE)
2464 		offset += qed_dump_num_param(dump_buf + offset,
2465 					     dump, "id", split_id);
2466 	if (param_name && param_val)
2467 		offset += qed_dump_str_param(dump_buf + offset,
2468 					     dump, param_name, param_val);
2469 
2470 	return offset;
2471 }
2472 
2473 /* Reads the specified registers into the specified buffer.
2474  * The addr and len arguments are specified in dwords.
2475  */
2476 void qed_read_regs(struct qed_hwfn *p_hwfn,
2477 		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
2478 {
2479 	u32 i;
2480 
2481 	for (i = 0; i < len; i++)
2482 		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
2483 }
2484 
2485 /* Dumps the GRC registers in the specified address range.
2486  * Returns the dumped size in dwords.
2487  * The addr and len arguments are specified in dwords.
2488  */
2489 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2490 				   struct qed_ptt *p_ptt,
2491 				   u32 *dump_buf,
2492 				   bool dump, u32 addr, u32 len, bool wide_bus,
2493 				   enum init_split_types split_type,
2494 				   u8 split_id)
2495 {
2496 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2497 	u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
2498 
2499 	if (!dump)
2500 		return len;
2501 
2502 	/* Print log if needed */
2503 	dev_data->num_regs_read += len;
2504 	if (dev_data->num_regs_read >=
2505 	    s_platform_defs[dev_data->platform_id].log_thresh) {
2506 		DP_VERBOSE(p_hwfn,
2507 			   QED_MSG_DEBUG,
2508 			   "Dumping %d registers...\n",
2509 			   dev_data->num_regs_read);
2510 		dev_data->num_regs_read = 0;
2511 	}
2512 
2513 	switch (split_type) {
2514 	case SPLIT_TYPE_PORT:
2515 		port_id = split_id;
2516 		break;
2517 	case SPLIT_TYPE_PF:
2518 		pf_id = split_id;
2519 		break;
2520 	case SPLIT_TYPE_PORT_PF:
2521 		port_id = split_id / dev_data->num_pfs_per_port;
2522 		pf_id = port_id + dev_data->num_ports *
2523 		    (split_id % dev_data->num_pfs_per_port);
2524 		break;
2525 	case SPLIT_TYPE_VF:
2526 		vf_id = split_id;
2527 		break;
2528 	default:
2529 		break;
2530 	}
2531 
2532 	/* Try reading using DMAE */
2533 	if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE &&
2534 	    (len >= s_platform_defs[dev_data->platform_id].dmae_thresh ||
2535 	     wide_bus)) {
2536 		if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr),
2537 				       (u64)(uintptr_t)(dump_buf), len, NULL))
2538 			return len;
2539 		dev_data->use_dmae = 0;
2540 		DP_VERBOSE(p_hwfn,
2541 			   QED_MSG_DEBUG,
2542 			   "Failed reading from chip using DMAE, using GRC instead\n");
2543 	}
2544 
2545 	/* If not read using DMAE, read using GRC */
2546 
2547 	/* Set pretend */
2548 	if (split_type != dev_data->pretend.split_type || split_id !=
2549 	    dev_data->pretend.split_id) {
2550 		switch (split_type) {
2551 		case SPLIT_TYPE_PORT:
2552 			qed_port_pretend(p_hwfn, p_ptt, port_id);
2553 			break;
2554 		case SPLIT_TYPE_PF:
2555 			fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2556 			qed_fid_pretend(p_hwfn, p_ptt, fid);
2557 			break;
2558 		case SPLIT_TYPE_PORT_PF:
2559 			fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2560 			qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
2561 			break;
2562 		case SPLIT_TYPE_VF:
2563 			fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) |
2564 			      (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT);
2565 			qed_fid_pretend(p_hwfn, p_ptt, fid);
2566 			break;
2567 		default:
2568 			break;
2569 		}
2570 
2571 		dev_data->pretend.split_type = (u8)split_type;
2572 		dev_data->pretend.split_id = split_id;
2573 	}
2574 
2575 	/* Read registers using GRC */
2576 	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2577 
2578 	return len;
2579 }
2580 
2581 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2582  * The addr and len arguments are specified in dwords.
2583  */
2584 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2585 				      bool dump, u32 addr, u32 len)
2586 {
2587 	if (dump)
2588 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2589 
2590 	return 1;
2591 }
2592 
2593 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2594  * The addr and len arguments are specified in dwords.
2595  */
2596 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2597 				  struct qed_ptt *p_ptt,
2598 				  u32 *dump_buf,
2599 				  bool dump, u32 addr, u32 len, bool wide_bus,
2600 				  enum init_split_types split_type, u8 split_id)
2601 {
2602 	u32 offset = 0;
2603 
2604 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2605 	offset += qed_grc_dump_addr_range(p_hwfn,
2606 					  p_ptt,
2607 					  dump_buf + offset,
2608 					  dump, addr, len, wide_bus,
2609 					  split_type, split_id);
2610 
2611 	return offset;
2612 }
2613 
2614 /* Dumps GRC registers sequence with skip cycle.
2615  * Returns the dumped size in dwords.
2616  * - addr:	start GRC address in dwords
2617  * - total_len:	total no. of dwords to dump
2618  * - read_len:	no. consecutive dwords to read
2619  * - skip_len:	no. of dwords to skip (and fill with zeros)
2620  */
2621 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2622 				       struct qed_ptt *p_ptt,
2623 				       u32 *dump_buf,
2624 				       bool dump,
2625 				       u32 addr,
2626 				       u32 total_len,
2627 				       u32 read_len, u32 skip_len)
2628 {
2629 	u32 offset = 0, reg_offset = 0;
2630 
2631 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2632 
2633 	if (!dump)
2634 		return offset + total_len;
2635 
2636 	while (reg_offset < total_len) {
2637 		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2638 
2639 		offset += qed_grc_dump_addr_range(p_hwfn,
2640 						  p_ptt,
2641 						  dump_buf + offset,
2642 						  dump,  addr, curr_len, false,
2643 						  SPLIT_TYPE_NONE, 0);
2644 		reg_offset += curr_len;
2645 		addr += curr_len;
2646 
2647 		if (reg_offset < total_len) {
2648 			curr_len = min_t(u32, skip_len, total_len - skip_len);
2649 			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2650 			offset += curr_len;
2651 			reg_offset += curr_len;
2652 			addr += curr_len;
2653 		}
2654 	}
2655 
2656 	return offset;
2657 }
2658 
2659 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2660 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2661 				     struct qed_ptt *p_ptt,
2662 				     struct dbg_array input_regs_arr,
2663 				     u32 *dump_buf,
2664 				     bool dump,
2665 				     enum init_split_types split_type,
2666 				     u8 split_id,
2667 				     bool block_enable[MAX_BLOCK_ID],
2668 				     u32 *num_dumped_reg_entries)
2669 {
2670 	u32 i, offset = 0, input_offset = 0;
2671 	bool mode_match = true;
2672 
2673 	*num_dumped_reg_entries = 0;
2674 
2675 	while (input_offset < input_regs_arr.size_in_dwords) {
2676 		const struct dbg_dump_cond_hdr *cond_hdr =
2677 		    (const struct dbg_dump_cond_hdr *)
2678 		    &input_regs_arr.ptr[input_offset++];
2679 		u16 modes_buf_offset;
2680 		bool eval_mode;
2681 
2682 		/* Check mode/block */
2683 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2684 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2685 		if (eval_mode) {
2686 			modes_buf_offset =
2687 				GET_FIELD(cond_hdr->mode.data,
2688 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2689 			mode_match = qed_is_mode_match(p_hwfn,
2690 						       &modes_buf_offset);
2691 		}
2692 
2693 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2694 			input_offset += cond_hdr->data_size;
2695 			continue;
2696 		}
2697 
2698 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2699 			const struct dbg_dump_reg *reg =
2700 			    (const struct dbg_dump_reg *)
2701 			    &input_regs_arr.ptr[input_offset];
2702 			u32 addr, len;
2703 			bool wide_bus;
2704 
2705 			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2706 			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2707 			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2708 			offset += qed_grc_dump_reg_entry(p_hwfn,
2709 							 p_ptt,
2710 							 dump_buf + offset,
2711 							 dump,
2712 							 addr,
2713 							 len,
2714 							 wide_bus,
2715 							 split_type, split_id);
2716 			(*num_dumped_reg_entries)++;
2717 		}
2718 	}
2719 
2720 	return offset;
2721 }
2722 
2723 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2724 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2725 				   struct qed_ptt *p_ptt,
2726 				   struct dbg_array input_regs_arr,
2727 				   u32 *dump_buf,
2728 				   bool dump,
2729 				   bool block_enable[MAX_BLOCK_ID],
2730 				   enum init_split_types split_type,
2731 				   u8 split_id,
2732 				   const char *param_name,
2733 				   const char *param_val)
2734 {
2735 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2736 	enum init_split_types hdr_split_type = split_type;
2737 	u32 num_dumped_reg_entries, offset;
2738 	u8 hdr_split_id = split_id;
2739 
2740 	/* In PORT_PF split type, print a port split header */
2741 	if (split_type == SPLIT_TYPE_PORT_PF) {
2742 		hdr_split_type = SPLIT_TYPE_PORT;
2743 		hdr_split_id = split_id / dev_data->num_pfs_per_port;
2744 	}
2745 
2746 	/* Calculate register dump header size (and skip it for now) */
2747 	offset = qed_grc_dump_regs_hdr(dump_buf,
2748 				       false,
2749 				       0,
2750 				       hdr_split_type,
2751 				       hdr_split_id, param_name, param_val);
2752 
2753 	/* Dump registers */
2754 	offset += qed_grc_dump_regs_entries(p_hwfn,
2755 					    p_ptt,
2756 					    input_regs_arr,
2757 					    dump_buf + offset,
2758 					    dump,
2759 					    split_type,
2760 					    split_id,
2761 					    block_enable,
2762 					    &num_dumped_reg_entries);
2763 
2764 	/* Write register dump header */
2765 	if (dump && num_dumped_reg_entries > 0)
2766 		qed_grc_dump_regs_hdr(dump_buf,
2767 				      dump,
2768 				      num_dumped_reg_entries,
2769 				      hdr_split_type,
2770 				      hdr_split_id, param_name, param_val);
2771 
2772 	return num_dumped_reg_entries > 0 ? offset : 0;
2773 }
2774 
2775 /* Dumps registers according to the input registers array. Returns the dumped
2776  * size in dwords.
2777  */
2778 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2779 				  struct qed_ptt *p_ptt,
2780 				  u32 *dump_buf,
2781 				  bool dump,
2782 				  bool block_enable[MAX_BLOCK_ID],
2783 				  const char *param_name, const char *param_val)
2784 {
2785 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2786 	u32 offset = 0, input_offset = 0;
2787 	u16 fid;
2788 	while (input_offset <
2789 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2790 		const struct dbg_dump_split_hdr *split_hdr;
2791 		struct dbg_array curr_input_regs_arr;
2792 		enum init_split_types split_type;
2793 		u16 split_count = 0;
2794 		u32 split_data_size;
2795 		u8 split_id;
2796 
2797 		split_hdr =
2798 			(const struct dbg_dump_split_hdr *)
2799 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2800 		split_type =
2801 			GET_FIELD(split_hdr->hdr,
2802 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2803 		split_data_size =
2804 			GET_FIELD(split_hdr->hdr,
2805 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2806 		curr_input_regs_arr.ptr =
2807 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
2808 		curr_input_regs_arr.size_in_dwords = split_data_size;
2809 
2810 		switch (split_type) {
2811 		case SPLIT_TYPE_NONE:
2812 			split_count = 1;
2813 			break;
2814 		case SPLIT_TYPE_PORT:
2815 			split_count = dev_data->num_ports;
2816 			break;
2817 		case SPLIT_TYPE_PF:
2818 		case SPLIT_TYPE_PORT_PF:
2819 			split_count = dev_data->num_ports *
2820 			    dev_data->num_pfs_per_port;
2821 			break;
2822 		case SPLIT_TYPE_VF:
2823 			split_count = dev_data->num_vfs;
2824 			break;
2825 		default:
2826 			return 0;
2827 		}
2828 
2829 		for (split_id = 0; split_id < split_count; split_id++)
2830 			offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2831 							  curr_input_regs_arr,
2832 							  dump_buf + offset,
2833 							  dump, block_enable,
2834 							  split_type,
2835 							  split_id,
2836 							  param_name,
2837 							  param_val);
2838 
2839 		input_offset += split_data_size;
2840 	}
2841 
2842 	/* Cancel pretends (pretend to original PF) */
2843 	if (dump) {
2844 		fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2845 		qed_fid_pretend(p_hwfn, p_ptt, fid);
2846 		dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2847 		dev_data->pretend.split_id = 0;
2848 	}
2849 
2850 	return offset;
2851 }
2852 
2853 /* Dump reset registers. Returns the dumped size in dwords. */
2854 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2855 				   struct qed_ptt *p_ptt,
2856 				   u32 *dump_buf, bool dump)
2857 {
2858 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2859 	u32 i, offset = 0, num_regs = 0;
2860 
2861 	/* Calculate header size */
2862 	offset += qed_grc_dump_regs_hdr(dump_buf,
2863 					false, 0,
2864 					SPLIT_TYPE_NONE, 0, NULL, NULL);
2865 
2866 	/* Write reset registers */
2867 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2868 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2869 			continue;
2870 
2871 		offset += qed_grc_dump_reg_entry(p_hwfn,
2872 						 p_ptt,
2873 						 dump_buf + offset,
2874 						 dump,
2875 						 BYTES_TO_DWORDS
2876 						 (s_reset_regs_defs[i].addr), 1,
2877 						 false, SPLIT_TYPE_NONE, 0);
2878 		num_regs++;
2879 	}
2880 
2881 	/* Write header */
2882 	if (dump)
2883 		qed_grc_dump_regs_hdr(dump_buf,
2884 				      true, num_regs, SPLIT_TYPE_NONE,
2885 				      0, NULL, NULL);
2886 
2887 	return offset;
2888 }
2889 
2890 /* Dump registers that are modified during GRC Dump and therefore must be
2891  * dumped first. Returns the dumped size in dwords.
2892  */
2893 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2894 				      struct qed_ptt *p_ptt,
2895 				      u32 *dump_buf, bool dump)
2896 {
2897 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2898 	u32 block_id, offset = 0, num_reg_entries = 0;
2899 	const struct dbg_attn_reg *attn_reg_arr;
2900 	u8 storm_id, reg_idx, num_attn_regs;
2901 
2902 	/* Calculate header size */
2903 	offset += qed_grc_dump_regs_hdr(dump_buf,
2904 					false, 0, SPLIT_TYPE_NONE,
2905 					0, NULL, NULL);
2906 
2907 	/* Write parity registers */
2908 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2909 		if (dev_data->block_in_reset[block_id] && dump)
2910 			continue;
2911 
2912 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2913 						       ATTN_TYPE_PARITY,
2914 						       &num_attn_regs);
2915 
2916 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2917 			const struct dbg_attn_reg *reg_data =
2918 				&attn_reg_arr[reg_idx];
2919 			u16 modes_buf_offset;
2920 			bool eval_mode;
2921 			u32 addr;
2922 
2923 			/* Check mode */
2924 			eval_mode = GET_FIELD(reg_data->mode.data,
2925 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2926 			modes_buf_offset =
2927 				GET_FIELD(reg_data->mode.data,
2928 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2929 			if (eval_mode &&
2930 			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2931 				continue;
2932 
2933 			/* Mode match: read & dump registers */
2934 			addr = reg_data->mask_address;
2935 			offset += qed_grc_dump_reg_entry(p_hwfn,
2936 							 p_ptt,
2937 							 dump_buf + offset,
2938 							 dump,
2939 							 addr,
2940 							 1, false,
2941 							 SPLIT_TYPE_NONE, 0);
2942 			addr = GET_FIELD(reg_data->data,
2943 					 DBG_ATTN_REG_STS_ADDRESS);
2944 			offset += qed_grc_dump_reg_entry(p_hwfn,
2945 							 p_ptt,
2946 							 dump_buf + offset,
2947 							 dump,
2948 							 addr,
2949 							 1, false,
2950 							 SPLIT_TYPE_NONE, 0);
2951 			num_reg_entries += 2;
2952 		}
2953 	}
2954 
2955 	/* Write Storm stall status registers */
2956 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2957 		struct storm_defs *storm = &s_storm_defs[storm_id];
2958 		u32 addr;
2959 
2960 		if (dev_data->block_in_reset[storm->block_id] && dump)
2961 			continue;
2962 
2963 		addr =
2964 		    BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2965 				    SEM_FAST_REG_STALLED);
2966 		offset += qed_grc_dump_reg_entry(p_hwfn,
2967 						 p_ptt,
2968 						 dump_buf + offset,
2969 						 dump,
2970 						 addr,
2971 						 1,
2972 						 false, SPLIT_TYPE_NONE, 0);
2973 		num_reg_entries++;
2974 	}
2975 
2976 	/* Write header */
2977 	if (dump)
2978 		qed_grc_dump_regs_hdr(dump_buf,
2979 				      true,
2980 				      num_reg_entries, SPLIT_TYPE_NONE,
2981 				      0, NULL, NULL);
2982 
2983 	return offset;
2984 }
2985 
2986 /* Dumps registers that can't be represented in the debug arrays */
2987 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2988 				     struct qed_ptt *p_ptt,
2989 				     u32 *dump_buf, bool dump)
2990 {
2991 	u32 offset = 0, addr;
2992 
2993 	offset += qed_grc_dump_regs_hdr(dump_buf,
2994 					dump, 2, SPLIT_TYPE_NONE, 0,
2995 					NULL, NULL);
2996 
2997 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2998 	 * skipped).
2999 	 */
3000 	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
3001 	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
3002 					      p_ptt,
3003 					      dump_buf + offset,
3004 					      dump,
3005 					      addr,
3006 					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
3007 					      7,
3008 					      1);
3009 	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
3010 	offset +=
3011 	    qed_grc_dump_reg_entry_skip(p_hwfn,
3012 					p_ptt,
3013 					dump_buf + offset,
3014 					dump,
3015 					addr,
3016 					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
3017 					7,
3018 					1);
3019 
3020 	return offset;
3021 }
3022 
3023 /* Dumps a GRC memory header (section and params). Returns the dumped size in
3024  * dwords. The following parameters are dumped:
3025  * - name:	   dumped only if it's not NULL.
3026  * - addr:	   in dwords, dumped only if name is NULL.
3027  * - len:	   in dwords, always dumped.
3028  * - width:	   dumped if it's not zero.
3029  * - packed:	   dumped only if it's not false.
3030  * - mem_group:	   always dumped.
3031  * - is_storm:	   true only if the memory is related to a Storm.
3032  * - storm_letter: valid only if is_storm is true.
3033  *
3034  */
3035 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
3036 				u32 *dump_buf,
3037 				bool dump,
3038 				const char *name,
3039 				u32 addr,
3040 				u32 len,
3041 				u32 bit_width,
3042 				bool packed,
3043 				const char *mem_group,
3044 				bool is_storm, char storm_letter)
3045 {
3046 	u8 num_params = 3;
3047 	u32 offset = 0;
3048 	char buf[64];
3049 
3050 	if (!len)
3051 		DP_NOTICE(p_hwfn,
3052 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
3053 
3054 	if (bit_width)
3055 		num_params++;
3056 	if (packed)
3057 		num_params++;
3058 
3059 	/* Dump section header */
3060 	offset += qed_dump_section_hdr(dump_buf + offset,
3061 				       dump, "grc_mem", num_params);
3062 
3063 	if (name) {
3064 		/* Dump name */
3065 		if (is_storm) {
3066 			strcpy(buf, "?STORM_");
3067 			buf[0] = storm_letter;
3068 			strcpy(buf + strlen(buf), name);
3069 		} else {
3070 			strcpy(buf, name);
3071 		}
3072 
3073 		offset += qed_dump_str_param(dump_buf + offset,
3074 					     dump, "name", buf);
3075 	} else {
3076 		/* Dump address */
3077 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
3078 
3079 		offset += qed_dump_num_param(dump_buf + offset,
3080 					     dump, "addr", addr_in_bytes);
3081 	}
3082 
3083 	/* Dump len */
3084 	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
3085 
3086 	/* Dump bit width */
3087 	if (bit_width)
3088 		offset += qed_dump_num_param(dump_buf + offset,
3089 					     dump, "width", bit_width);
3090 
3091 	/* Dump packed */
3092 	if (packed)
3093 		offset += qed_dump_num_param(dump_buf + offset,
3094 					     dump, "packed", 1);
3095 
3096 	/* Dump reg type */
3097 	if (is_storm) {
3098 		strcpy(buf, "?STORM_");
3099 		buf[0] = storm_letter;
3100 		strcpy(buf + strlen(buf), mem_group);
3101 	} else {
3102 		strcpy(buf, mem_group);
3103 	}
3104 
3105 	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
3106 
3107 	return offset;
3108 }
3109 
3110 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
3111  * Returns the dumped size in dwords.
3112  * The addr and len arguments are specified in dwords.
3113  */
3114 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
3115 			    struct qed_ptt *p_ptt,
3116 			    u32 *dump_buf,
3117 			    bool dump,
3118 			    const char *name,
3119 			    u32 addr,
3120 			    u32 len,
3121 			    bool wide_bus,
3122 			    u32 bit_width,
3123 			    bool packed,
3124 			    const char *mem_group,
3125 			    bool is_storm, char storm_letter)
3126 {
3127 	u32 offset = 0;
3128 
3129 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3130 				       dump_buf + offset,
3131 				       dump,
3132 				       name,
3133 				       addr,
3134 				       len,
3135 				       bit_width,
3136 				       packed,
3137 				       mem_group, is_storm, storm_letter);
3138 	offset += qed_grc_dump_addr_range(p_hwfn,
3139 					  p_ptt,
3140 					  dump_buf + offset,
3141 					  dump, addr, len, wide_bus,
3142 					  SPLIT_TYPE_NONE, 0);
3143 
3144 	return offset;
3145 }
3146 
3147 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3148 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
3149 				    struct qed_ptt *p_ptt,
3150 				    struct dbg_array input_mems_arr,
3151 				    u32 *dump_buf, bool dump)
3152 {
3153 	u32 i, offset = 0, input_offset = 0;
3154 	bool mode_match = true;
3155 
3156 	while (input_offset < input_mems_arr.size_in_dwords) {
3157 		const struct dbg_dump_cond_hdr *cond_hdr;
3158 		u16 modes_buf_offset;
3159 		u32 num_entries;
3160 		bool eval_mode;
3161 
3162 		cond_hdr = (const struct dbg_dump_cond_hdr *)
3163 			   &input_mems_arr.ptr[input_offset++];
3164 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3165 
3166 		/* Check required mode */
3167 		eval_mode = GET_FIELD(cond_hdr->mode.data,
3168 				      DBG_MODE_HDR_EVAL_MODE) > 0;
3169 		if (eval_mode) {
3170 			modes_buf_offset =
3171 				GET_FIELD(cond_hdr->mode.data,
3172 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
3173 			mode_match = qed_is_mode_match(p_hwfn,
3174 						       &modes_buf_offset);
3175 		}
3176 
3177 		if (!mode_match) {
3178 			input_offset += cond_hdr->data_size;
3179 			continue;
3180 		}
3181 
3182 		for (i = 0; i < num_entries;
3183 		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3184 			const struct dbg_dump_mem *mem =
3185 				(const struct dbg_dump_mem *)
3186 				&input_mems_arr.ptr[input_offset];
3187 			u8 mem_group_id = GET_FIELD(mem->dword0,
3188 						    DBG_DUMP_MEM_MEM_GROUP_ID);
3189 			bool is_storm = false, mem_wide_bus;
3190 			enum dbg_grc_params grc_param;
3191 			char storm_letter = 'a';
3192 			enum block_id block_id;
3193 			u32 mem_addr, mem_len;
3194 
3195 			if (mem_group_id >= MEM_GROUPS_NUM) {
3196 				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
3197 				return 0;
3198 			}
3199 
3200 			block_id = (enum block_id)cond_hdr->block_id;
3201 			if (!qed_grc_is_mem_included(p_hwfn,
3202 						     block_id,
3203 						     mem_group_id))
3204 				continue;
3205 
3206 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3207 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3208 			mem_wide_bus = GET_FIELD(mem->dword1,
3209 						 DBG_DUMP_MEM_WIDE_BUS);
3210 
3211 			/* Update memory length for CCFC/TCFC memories
3212 			 * according to number of LCIDs/LTIDs.
3213 			 */
3214 			if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3215 				if (mem_len % MAX_LCIDS) {
3216 					DP_NOTICE(p_hwfn,
3217 						  "Invalid CCFC connection memory size\n");
3218 					return 0;
3219 				}
3220 
3221 				grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3222 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3223 					  (mem_len / MAX_LCIDS);
3224 			} else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3225 				if (mem_len % MAX_LTIDS) {
3226 					DP_NOTICE(p_hwfn,
3227 						  "Invalid TCFC task memory size\n");
3228 					return 0;
3229 				}
3230 
3231 				grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3232 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3233 					  (mem_len / MAX_LTIDS);
3234 			}
3235 
3236 			/* If memory is associated with Storm, update Storm
3237 			 * details.
3238 			 */
3239 			if (s_block_defs
3240 			    [cond_hdr->block_id]->associated_to_storm) {
3241 				is_storm = true;
3242 				storm_letter =
3243 				    s_storm_defs[s_block_defs
3244 						 [cond_hdr->block_id]->
3245 						 storm_id].letter;
3246 			}
3247 
3248 			/* Dump memory */
3249 			offset += qed_grc_dump_mem(p_hwfn,
3250 						p_ptt,
3251 						dump_buf + offset,
3252 						dump,
3253 						NULL,
3254 						mem_addr,
3255 						mem_len,
3256 						mem_wide_bus,
3257 						0,
3258 						false,
3259 						s_mem_group_names[mem_group_id],
3260 						is_storm,
3261 						storm_letter);
3262 		}
3263 	}
3264 
3265 	return offset;
3266 }
3267 
3268 /* Dumps GRC memories according to the input array dump_mem.
3269  * Returns the dumped size in dwords.
3270  */
3271 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
3272 				 struct qed_ptt *p_ptt,
3273 				 u32 *dump_buf, bool dump)
3274 {
3275 	u32 offset = 0, input_offset = 0;
3276 
3277 	while (input_offset <
3278 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3279 		const struct dbg_dump_split_hdr *split_hdr;
3280 		struct dbg_array curr_input_mems_arr;
3281 		enum init_split_types split_type;
3282 		u32 split_data_size;
3283 
3284 		split_hdr = (const struct dbg_dump_split_hdr *)
3285 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3286 		split_type =
3287 			GET_FIELD(split_hdr->hdr,
3288 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3289 		split_data_size =
3290 			GET_FIELD(split_hdr->hdr,
3291 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3292 		curr_input_mems_arr.ptr =
3293 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3294 		curr_input_mems_arr.size_in_dwords = split_data_size;
3295 
3296 		if (split_type == SPLIT_TYPE_NONE)
3297 			offset += qed_grc_dump_mem_entries(p_hwfn,
3298 							   p_ptt,
3299 							   curr_input_mems_arr,
3300 							   dump_buf + offset,
3301 							   dump);
3302 		else
3303 			DP_NOTICE(p_hwfn,
3304 				  "Dumping split memories is currently not supported\n");
3305 
3306 		input_offset += split_data_size;
3307 	}
3308 
3309 	return offset;
3310 }
3311 
3312 /* Dumps GRC context data for the specified Storm.
3313  * Returns the dumped size in dwords.
3314  * The lid_size argument is specified in quad-regs.
3315  */
3316 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
3317 				 struct qed_ptt *p_ptt,
3318 				 u32 *dump_buf,
3319 				 bool dump,
3320 				 const char *name,
3321 				 u32 num_lids,
3322 				 u32 lid_size,
3323 				 u32 rd_reg_addr,
3324 				 u8 storm_id)
3325 {
3326 	struct storm_defs *storm = &s_storm_defs[storm_id];
3327 	u32 i, lid, total_size, offset = 0;
3328 
3329 	if (!lid_size)
3330 		return 0;
3331 
3332 	lid_size *= BYTES_IN_DWORD;
3333 	total_size = num_lids * lid_size;
3334 
3335 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3336 				       dump_buf + offset,
3337 				       dump,
3338 				       name,
3339 				       0,
3340 				       total_size,
3341 				       lid_size * 32,
3342 				       false, name, true, storm->letter);
3343 
3344 	if (!dump)
3345 		return offset + total_size;
3346 
3347 	/* Dump context data */
3348 	for (lid = 0; lid < num_lids; lid++) {
3349 		for (i = 0; i < lid_size; i++, offset++) {
3350 			qed_wr(p_hwfn,
3351 			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3352 			*(dump_buf + offset) = qed_rd(p_hwfn,
3353 						      p_ptt, rd_reg_addr);
3354 		}
3355 	}
3356 
3357 	return offset;
3358 }
3359 
3360 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3361 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
3362 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3363 {
3364 	enum dbg_grc_params grc_param;
3365 	u32 offset = 0;
3366 	u8 storm_id;
3367 
3368 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3369 		struct storm_defs *storm = &s_storm_defs[storm_id];
3370 
3371 		if (!qed_grc_is_storm_included(p_hwfn,
3372 					       (enum dbg_storms)storm_id))
3373 			continue;
3374 
3375 		/* Dump Conn AG context size */
3376 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3377 		offset +=
3378 			qed_grc_dump_ctx_data(p_hwfn,
3379 					      p_ptt,
3380 					      dump_buf + offset,
3381 					      dump,
3382 					      "CONN_AG_CTX",
3383 					      qed_grc_get_param(p_hwfn,
3384 								grc_param),
3385 					      storm->cm_conn_ag_ctx_lid_size,
3386 					      storm->cm_conn_ag_ctx_rd_addr,
3387 					      storm_id);
3388 
3389 		/* Dump Conn ST context size */
3390 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3391 		offset +=
3392 			qed_grc_dump_ctx_data(p_hwfn,
3393 					      p_ptt,
3394 					      dump_buf + offset,
3395 					      dump,
3396 					      "CONN_ST_CTX",
3397 					      qed_grc_get_param(p_hwfn,
3398 								grc_param),
3399 					      storm->cm_conn_st_ctx_lid_size,
3400 					      storm->cm_conn_st_ctx_rd_addr,
3401 					      storm_id);
3402 
3403 		/* Dump Task AG context size */
3404 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3405 		offset +=
3406 			qed_grc_dump_ctx_data(p_hwfn,
3407 					      p_ptt,
3408 					      dump_buf + offset,
3409 					      dump,
3410 					      "TASK_AG_CTX",
3411 					      qed_grc_get_param(p_hwfn,
3412 								grc_param),
3413 					      storm->cm_task_ag_ctx_lid_size,
3414 					      storm->cm_task_ag_ctx_rd_addr,
3415 					      storm_id);
3416 
3417 		/* Dump Task ST context size */
3418 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3419 		offset +=
3420 			qed_grc_dump_ctx_data(p_hwfn,
3421 					      p_ptt,
3422 					      dump_buf + offset,
3423 					      dump,
3424 					      "TASK_ST_CTX",
3425 					      qed_grc_get_param(p_hwfn,
3426 								grc_param),
3427 					      storm->cm_task_st_ctx_lid_size,
3428 					      storm->cm_task_st_ctx_rd_addr,
3429 					      storm_id);
3430 	}
3431 
3432 	return offset;
3433 }
3434 
3435 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3436 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3437 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3438 {
3439 	char buf[10] = "IOR_SET_?";
3440 	u32 addr, offset = 0;
3441 	u8 storm_id, set_id;
3442 
3443 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3444 		struct storm_defs *storm = &s_storm_defs[storm_id];
3445 
3446 		if (!qed_grc_is_storm_included(p_hwfn,
3447 					       (enum dbg_storms)storm_id))
3448 			continue;
3449 
3450 		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3451 			addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
3452 					       SEM_FAST_REG_STORM_REG_FILE) +
3453 			       IOR_SET_OFFSET(set_id);
3454 			if (strlen(buf) > 0)
3455 				buf[strlen(buf) - 1] = '0' + set_id;
3456 			offset += qed_grc_dump_mem(p_hwfn,
3457 						   p_ptt,
3458 						   dump_buf + offset,
3459 						   dump,
3460 						   buf,
3461 						   addr,
3462 						   IORS_PER_SET,
3463 						   false,
3464 						   32,
3465 						   false,
3466 						   "ior",
3467 						   true,
3468 						   storm->letter);
3469 		}
3470 	}
3471 
3472 	return offset;
3473 }
3474 
3475 /* Dump VFC CAM. Returns the dumped size in dwords. */
3476 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3477 				struct qed_ptt *p_ptt,
3478 				u32 *dump_buf, bool dump, u8 storm_id)
3479 {
3480 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3481 	struct storm_defs *storm = &s_storm_defs[storm_id];
3482 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3483 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3484 	u32 row, i, offset = 0;
3485 
3486 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3487 				       dump_buf + offset,
3488 				       dump,
3489 				       "vfc_cam",
3490 				       0,
3491 				       total_size,
3492 				       256,
3493 				       false, "vfc_cam", true, storm->letter);
3494 
3495 	if (!dump)
3496 		return offset + total_size;
3497 
3498 	/* Prepare CAM address */
3499 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3500 
3501 	for (row = 0; row < VFC_CAM_NUM_ROWS;
3502 	     row++, offset += VFC_CAM_RESP_DWORDS) {
3503 		/* Write VFC CAM command */
3504 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3505 		ARR_REG_WR(p_hwfn,
3506 			   p_ptt,
3507 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3508 			   cam_cmd, VFC_CAM_CMD_DWORDS);
3509 
3510 		/* Write VFC CAM address */
3511 		ARR_REG_WR(p_hwfn,
3512 			   p_ptt,
3513 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3514 			   cam_addr, VFC_CAM_ADDR_DWORDS);
3515 
3516 		/* Read VFC CAM read response */
3517 		ARR_REG_RD(p_hwfn,
3518 			   p_ptt,
3519 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3520 			   dump_buf + offset, VFC_CAM_RESP_DWORDS);
3521 	}
3522 
3523 	return offset;
3524 }
3525 
3526 /* Dump VFC RAM. Returns the dumped size in dwords. */
3527 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3528 				struct qed_ptt *p_ptt,
3529 				u32 *dump_buf,
3530 				bool dump,
3531 				u8 storm_id, struct vfc_ram_defs *ram_defs)
3532 {
3533 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3534 	struct storm_defs *storm = &s_storm_defs[storm_id];
3535 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3536 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3537 	u32 row, i, offset = 0;
3538 
3539 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3540 				       dump_buf + offset,
3541 				       dump,
3542 				       ram_defs->mem_name,
3543 				       0,
3544 				       total_size,
3545 				       256,
3546 				       false,
3547 				       ram_defs->type_name,
3548 				       true, storm->letter);
3549 
3550 	/* Prepare RAM address */
3551 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3552 
3553 	if (!dump)
3554 		return offset + total_size;
3555 
3556 	for (row = ram_defs->base_row;
3557 	     row < ram_defs->base_row + ram_defs->num_rows;
3558 	     row++, offset += VFC_RAM_RESP_DWORDS) {
3559 		/* Write VFC RAM command */
3560 		ARR_REG_WR(p_hwfn,
3561 			   p_ptt,
3562 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3563 			   ram_cmd, VFC_RAM_CMD_DWORDS);
3564 
3565 		/* Write VFC RAM address */
3566 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3567 		ARR_REG_WR(p_hwfn,
3568 			   p_ptt,
3569 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3570 			   ram_addr, VFC_RAM_ADDR_DWORDS);
3571 
3572 		/* Read VFC RAM read response */
3573 		ARR_REG_RD(p_hwfn,
3574 			   p_ptt,
3575 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3576 			   dump_buf + offset, VFC_RAM_RESP_DWORDS);
3577 	}
3578 
3579 	return offset;
3580 }
3581 
3582 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3583 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3584 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3585 {
3586 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3587 	u8 storm_id, i;
3588 	u32 offset = 0;
3589 
3590 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3591 		if (!qed_grc_is_storm_included(p_hwfn,
3592 					       (enum dbg_storms)storm_id) ||
3593 		    !s_storm_defs[storm_id].has_vfc ||
3594 		    (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
3595 		     PLATFORM_ASIC))
3596 			continue;
3597 
3598 		/* Read CAM */
3599 		offset += qed_grc_dump_vfc_cam(p_hwfn,
3600 					       p_ptt,
3601 					       dump_buf + offset,
3602 					       dump, storm_id);
3603 
3604 		/* Read RAM */
3605 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3606 			offset += qed_grc_dump_vfc_ram(p_hwfn,
3607 						       p_ptt,
3608 						       dump_buf + offset,
3609 						       dump,
3610 						       storm_id,
3611 						       &s_vfc_ram_defs[i]);
3612 	}
3613 
3614 	return offset;
3615 }
3616 
3617 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3618 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3619 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3620 {
3621 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3622 	u32 offset = 0;
3623 	u8 rss_mem_id;
3624 
3625 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3626 		u32 rss_addr, num_entries, total_dwords;
3627 		struct rss_mem_defs *rss_defs;
3628 		u32 addr, num_dwords_to_read;
3629 		bool packed;
3630 
3631 		rss_defs = &s_rss_mem_defs[rss_mem_id];
3632 		rss_addr = rss_defs->addr;
3633 		num_entries = rss_defs->num_entries[dev_data->chip_id];
3634 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3635 		packed = (rss_defs->entry_width == 16);
3636 
3637 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3638 					       dump_buf + offset,
3639 					       dump,
3640 					       rss_defs->mem_name,
3641 					       0,
3642 					       total_dwords,
3643 					       rss_defs->entry_width,
3644 					       packed,
3645 					       rss_defs->type_name, false, 0);
3646 
3647 		/* Dump RSS data */
3648 		if (!dump) {
3649 			offset += total_dwords;
3650 			continue;
3651 		}
3652 
3653 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3654 		while (total_dwords) {
3655 			num_dwords_to_read = min_t(u32,
3656 						   RSS_REG_RSS_RAM_DATA_SIZE,
3657 						   total_dwords);
3658 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3659 			offset += qed_grc_dump_addr_range(p_hwfn,
3660 							  p_ptt,
3661 							  dump_buf + offset,
3662 							  dump,
3663 							  addr,
3664 							  num_dwords_to_read,
3665 							  false,
3666 							  SPLIT_TYPE_NONE, 0);
3667 			total_dwords -= num_dwords_to_read;
3668 			rss_addr++;
3669 		}
3670 	}
3671 
3672 	return offset;
3673 }
3674 
3675 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3676 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3677 				struct qed_ptt *p_ptt,
3678 				u32 *dump_buf, bool dump, u8 big_ram_id)
3679 {
3680 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3681 	u32 block_size, ram_size, offset = 0, reg_val, i;
3682 	char mem_name[12] = "???_BIG_RAM";
3683 	char type_name[8] = "???_RAM";
3684 	struct big_ram_defs *big_ram;
3685 
3686 	big_ram = &s_big_ram_defs[big_ram_id];
3687 	ram_size = big_ram->ram_size[dev_data->chip_id];
3688 
3689 	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3690 	block_size = reg_val &
3691 		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3692 									 : 128;
3693 
3694 	strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3695 	strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3696 
3697 	/* Dump memory header */
3698 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3699 				       dump_buf + offset,
3700 				       dump,
3701 				       mem_name,
3702 				       0,
3703 				       ram_size,
3704 				       block_size * 8,
3705 				       false, type_name, false, 0);
3706 
3707 	/* Read and dump Big RAM data */
3708 	if (!dump)
3709 		return offset + ram_size;
3710 
3711 	/* Dump Big RAM */
3712 	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3713 	     i++) {
3714 		u32 addr, len;
3715 
3716 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3717 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3718 		len = BRB_REG_BIG_RAM_DATA_SIZE;
3719 		offset += qed_grc_dump_addr_range(p_hwfn,
3720 						  p_ptt,
3721 						  dump_buf + offset,
3722 						  dump,
3723 						  addr,
3724 						  len,
3725 						  false, SPLIT_TYPE_NONE, 0);
3726 	}
3727 
3728 	return offset;
3729 }
3730 
3731 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3732 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3733 {
3734 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3735 	u32 offset = 0, addr;
3736 	bool halted = false;
3737 
3738 	/* Halt MCP */
3739 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3740 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3741 		if (!halted)
3742 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3743 	}
3744 
3745 	/* Dump MCP scratchpad */
3746 	offset += qed_grc_dump_mem(p_hwfn,
3747 				   p_ptt,
3748 				   dump_buf + offset,
3749 				   dump,
3750 				   NULL,
3751 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3752 				   MCP_REG_SCRATCH_SIZE_BB_K2,
3753 				   false, 0, false, "MCP", false, 0);
3754 
3755 	/* Dump MCP cpu_reg_file */
3756 	offset += qed_grc_dump_mem(p_hwfn,
3757 				   p_ptt,
3758 				   dump_buf + offset,
3759 				   dump,
3760 				   NULL,
3761 				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3762 				   MCP_REG_CPU_REG_FILE_SIZE,
3763 				   false, 0, false, "MCP", false, 0);
3764 
3765 	/* Dump MCP registers */
3766 	block_enable[BLOCK_MCP] = true;
3767 	offset += qed_grc_dump_registers(p_hwfn,
3768 					 p_ptt,
3769 					 dump_buf + offset,
3770 					 dump, block_enable, "block", "MCP");
3771 
3772 	/* Dump required non-MCP registers */
3773 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3774 					dump, 1, SPLIT_TYPE_NONE, 0,
3775 					"block", "MCP");
3776 	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3777 	offset += qed_grc_dump_reg_entry(p_hwfn,
3778 					 p_ptt,
3779 					 dump_buf + offset,
3780 					 dump,
3781 					 addr,
3782 					 1,
3783 					 false, SPLIT_TYPE_NONE, 0);
3784 
3785 	/* Release MCP */
3786 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3787 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3788 
3789 	return offset;
3790 }
3791 
3792 /* Dumps the tbus indirect memory for all PHYs. */
3793 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3794 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3795 {
3796 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3797 	char mem_name[32];
3798 	u8 phy_id;
3799 
3800 	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3801 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3802 		struct phy_defs *phy_defs;
3803 		u8 *bytes_buf;
3804 
3805 		phy_defs = &s_phy_defs[phy_id];
3806 		addr_lo_addr = phy_defs->base_addr +
3807 			       phy_defs->tbus_addr_lo_addr;
3808 		addr_hi_addr = phy_defs->base_addr +
3809 			       phy_defs->tbus_addr_hi_addr;
3810 		data_lo_addr = phy_defs->base_addr +
3811 			       phy_defs->tbus_data_lo_addr;
3812 		data_hi_addr = phy_defs->base_addr +
3813 			       phy_defs->tbus_data_hi_addr;
3814 
3815 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3816 			     phy_defs->phy_name) < 0)
3817 			DP_NOTICE(p_hwfn,
3818 				  "Unexpected debug error: invalid PHY memory name\n");
3819 
3820 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3821 					       dump_buf + offset,
3822 					       dump,
3823 					       mem_name,
3824 					       0,
3825 					       PHY_DUMP_SIZE_DWORDS,
3826 					       16, true, mem_name, false, 0);
3827 
3828 		if (!dump) {
3829 			offset += PHY_DUMP_SIZE_DWORDS;
3830 			continue;
3831 		}
3832 
3833 		bytes_buf = (u8 *)(dump_buf + offset);
3834 		for (tbus_hi_offset = 0;
3835 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3836 		     tbus_hi_offset++) {
3837 			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3838 			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3839 			     tbus_lo_offset++) {
3840 				qed_wr(p_hwfn,
3841 				       p_ptt, addr_lo_addr, tbus_lo_offset);
3842 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3843 							    p_ptt,
3844 							    data_lo_addr);
3845 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3846 							    p_ptt,
3847 							    data_hi_addr);
3848 			}
3849 		}
3850 
3851 		offset += PHY_DUMP_SIZE_DWORDS;
3852 	}
3853 
3854 	return offset;
3855 }
3856 
3857 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3858 				struct qed_ptt *p_ptt,
3859 				enum block_id block_id,
3860 				u8 line_id,
3861 				u8 enable_mask,
3862 				u8 right_shift,
3863 				u8 force_valid_mask, u8 force_frame_mask)
3864 {
3865 	struct block_defs *block = s_block_defs[block_id];
3866 
3867 	qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3868 	qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3869 	qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3870 	qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3871 	qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3872 }
3873 
3874 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3875 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3876 				     struct qed_ptt *p_ptt,
3877 				     u32 *dump_buf, bool dump)
3878 {
3879 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3880 	u32 block_id, line_id, offset = 0;
3881 
3882 	/* Don't dump static debug if a debug bus recording is in progress */
3883 	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3884 		return 0;
3885 
3886 	if (dump) {
3887 		/* Disable all blocks debug output */
3888 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3889 			struct block_defs *block = s_block_defs[block_id];
3890 
3891 			if (block->dbg_client_id[dev_data->chip_id] !=
3892 			    MAX_DBG_BUS_CLIENTS)
3893 				qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
3894 				       0);
3895 		}
3896 
3897 		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3898 		qed_bus_set_framing_mode(p_hwfn,
3899 					 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3900 		qed_wr(p_hwfn,
3901 		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3902 		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3903 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3904 	}
3905 
3906 	/* Dump all static debug lines for each relevant block */
3907 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3908 		struct block_defs *block = s_block_defs[block_id];
3909 		struct dbg_bus_block *block_desc;
3910 		u32 block_dwords, addr, len;
3911 		u8 dbg_client_id;
3912 
3913 		if (block->dbg_client_id[dev_data->chip_id] ==
3914 		    MAX_DBG_BUS_CLIENTS)
3915 			continue;
3916 
3917 		block_desc = get_dbg_bus_block_desc(p_hwfn,
3918 						    (enum block_id)block_id);
3919 		block_dwords = NUM_DBG_LINES(block_desc) *
3920 			       STATIC_DEBUG_LINE_DWORDS;
3921 
3922 		/* Dump static section params */
3923 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3924 					       dump_buf + offset,
3925 					       dump,
3926 					       block->name,
3927 					       0,
3928 					       block_dwords,
3929 					       32, false, "STATIC", false, 0);
3930 
3931 		if (!dump) {
3932 			offset += block_dwords;
3933 			continue;
3934 		}
3935 
3936 		/* If all lines are invalid - dump zeros */
3937 		if (dev_data->block_in_reset[block_id]) {
3938 			memset(dump_buf + offset, 0,
3939 			       DWORDS_TO_BYTES(block_dwords));
3940 			offset += block_dwords;
3941 			continue;
3942 		}
3943 
3944 		/* Enable block's client */
3945 		dbg_client_id = block->dbg_client_id[dev_data->chip_id];
3946 		qed_bus_enable_clients(p_hwfn,
3947 				       p_ptt,
3948 				       BIT(dbg_client_id));
3949 
3950 		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3951 		len = STATIC_DEBUG_LINE_DWORDS;
3952 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
3953 		     line_id++) {
3954 			/* Configure debug line ID */
3955 			qed_config_dbg_line(p_hwfn,
3956 					    p_ptt,
3957 					    (enum block_id)block_id,
3958 					    (u8)line_id, 0xf, 0, 0, 0);
3959 
3960 			/* Read debug line info */
3961 			offset += qed_grc_dump_addr_range(p_hwfn,
3962 							  p_ptt,
3963 							  dump_buf + offset,
3964 							  dump,
3965 							  addr,
3966 							  len,
3967 							  true, SPLIT_TYPE_NONE,
3968 							  0);
3969 		}
3970 
3971 		/* Disable block's client and debug output */
3972 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3973 		qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3974 	}
3975 
3976 	if (dump) {
3977 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3978 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3979 	}
3980 
3981 	return offset;
3982 }
3983 
3984 /* Performs GRC Dump to the specified buffer.
3985  * Returns the dumped size in dwords.
3986  */
3987 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3988 				    struct qed_ptt *p_ptt,
3989 				    u32 *dump_buf,
3990 				    bool dump, u32 *num_dumped_dwords)
3991 {
3992 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3993 	bool parities_masked = false;
3994 	u32 offset = 0;
3995 	u8 i;
3996 
3997 	*num_dumped_dwords = 0;
3998 	dev_data->num_regs_read = 0;
3999 
4000 	/* Update reset state */
4001 	if (dump)
4002 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
4003 
4004 	/* Dump global params */
4005 	offset += qed_dump_common_global_params(p_hwfn,
4006 						p_ptt,
4007 						dump_buf + offset, dump, 4);
4008 	offset += qed_dump_str_param(dump_buf + offset,
4009 				     dump, "dump-type", "grc-dump");
4010 	offset += qed_dump_num_param(dump_buf + offset,
4011 				     dump,
4012 				     "num-lcids",
4013 				     qed_grc_get_param(p_hwfn,
4014 						DBG_GRC_PARAM_NUM_LCIDS));
4015 	offset += qed_dump_num_param(dump_buf + offset,
4016 				     dump,
4017 				     "num-ltids",
4018 				     qed_grc_get_param(p_hwfn,
4019 						DBG_GRC_PARAM_NUM_LTIDS));
4020 	offset += qed_dump_num_param(dump_buf + offset,
4021 				     dump, "num-ports", dev_data->num_ports);
4022 
4023 	/* Dump reset registers (dumped before taking blocks out of reset ) */
4024 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4025 		offset += qed_grc_dump_reset_regs(p_hwfn,
4026 						  p_ptt,
4027 						  dump_buf + offset, dump);
4028 
4029 	/* Take all blocks out of reset (using reset registers) */
4030 	if (dump) {
4031 		qed_grc_unreset_blocks(p_hwfn, p_ptt);
4032 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
4033 	}
4034 
4035 	/* Disable all parities using MFW command */
4036 	if (dump &&
4037 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
4038 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
4039 		if (!parities_masked) {
4040 			DP_NOTICE(p_hwfn,
4041 				  "Failed to mask parities using MFW\n");
4042 			if (qed_grc_get_param
4043 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
4044 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
4045 		}
4046 	}
4047 
4048 	/* Dump modified registers (dumped before modifying them) */
4049 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
4050 		offset += qed_grc_dump_modified_regs(p_hwfn,
4051 						     p_ptt,
4052 						     dump_buf + offset, dump);
4053 
4054 	/* Stall storms */
4055 	if (dump &&
4056 	    (qed_grc_is_included(p_hwfn,
4057 				 DBG_GRC_PARAM_DUMP_IOR) ||
4058 	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
4059 		qed_grc_stall_storms(p_hwfn, p_ptt, true);
4060 
4061 	/* Dump all regs  */
4062 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
4063 		bool block_enable[MAX_BLOCK_ID];
4064 
4065 		/* Dump all blocks except MCP */
4066 		for (i = 0; i < MAX_BLOCK_ID; i++)
4067 			block_enable[i] = true;
4068 		block_enable[BLOCK_MCP] = false;
4069 		offset += qed_grc_dump_registers(p_hwfn,
4070 						 p_ptt,
4071 						 dump_buf +
4072 						 offset,
4073 						 dump,
4074 						 block_enable, NULL, NULL);
4075 
4076 		/* Dump special registers */
4077 		offset += qed_grc_dump_special_regs(p_hwfn,
4078 						    p_ptt,
4079 						    dump_buf + offset, dump);
4080 	}
4081 
4082 	/* Dump memories */
4083 	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
4084 
4085 	/* Dump MCP */
4086 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4087 		offset += qed_grc_dump_mcp(p_hwfn,
4088 					   p_ptt, dump_buf + offset, dump);
4089 
4090 	/* Dump context */
4091 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4092 		offset += qed_grc_dump_ctx(p_hwfn,
4093 					   p_ptt, dump_buf + offset, dump);
4094 
4095 	/* Dump RSS memories */
4096 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4097 		offset += qed_grc_dump_rss(p_hwfn,
4098 					   p_ptt, dump_buf + offset, dump);
4099 
4100 	/* Dump Big RAM */
4101 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4102 		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4103 			offset += qed_grc_dump_big_ram(p_hwfn,
4104 						       p_ptt,
4105 						       dump_buf + offset,
4106 						       dump, i);
4107 
4108 	/* Dump IORs */
4109 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4110 		offset += qed_grc_dump_iors(p_hwfn,
4111 					    p_ptt, dump_buf + offset, dump);
4112 
4113 	/* Dump VFC */
4114 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4115 		offset += qed_grc_dump_vfc(p_hwfn,
4116 					   p_ptt, dump_buf + offset, dump);
4117 
4118 	/* Dump PHY tbus */
4119 	if (qed_grc_is_included(p_hwfn,
4120 				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
4121 	    CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4122 		offset += qed_grc_dump_phy(p_hwfn,
4123 					   p_ptt, dump_buf + offset, dump);
4124 
4125 	/* Dump static debug data (only if not during debug bus recording) */
4126 	if (qed_grc_is_included(p_hwfn,
4127 				DBG_GRC_PARAM_DUMP_STATIC) &&
4128 	    (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
4129 		offset += qed_grc_dump_static_debug(p_hwfn,
4130 						    p_ptt,
4131 						    dump_buf + offset, dump);
4132 
4133 	/* Dump last section */
4134 	offset += qed_dump_last_section(dump_buf, offset, dump);
4135 
4136 	if (dump) {
4137 		/* Unstall storms */
4138 		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4139 			qed_grc_stall_storms(p_hwfn, p_ptt, false);
4140 
4141 		/* Clear parity status */
4142 		qed_grc_clear_all_prty(p_hwfn, p_ptt);
4143 
4144 		/* Enable all parities using MFW command */
4145 		if (parities_masked)
4146 			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
4147 	}
4148 
4149 	*num_dumped_dwords = offset;
4150 
4151 	return DBG_STATUS_OK;
4152 }
4153 
4154 /* Writes the specified failing Idle Check rule to the specified buffer.
4155  * Returns the dumped size in dwords.
4156  */
4157 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
4158 				     struct qed_ptt *p_ptt,
4159 				     u32 *
4160 				     dump_buf,
4161 				     bool dump,
4162 				     u16 rule_id,
4163 				     const struct dbg_idle_chk_rule *rule,
4164 				     u16 fail_entry_id, u32 *cond_reg_values)
4165 {
4166 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4167 	const struct dbg_idle_chk_cond_reg *cond_regs;
4168 	const struct dbg_idle_chk_info_reg *info_regs;
4169 	u32 i, next_reg_offset = 0, offset = 0;
4170 	struct dbg_idle_chk_result_hdr *hdr;
4171 	const union dbg_idle_chk_reg *regs;
4172 	u8 reg_id;
4173 
4174 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4175 	regs = &((const union dbg_idle_chk_reg *)
4176 		 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4177 	cond_regs = &regs[0].cond_reg;
4178 	info_regs = &regs[rule->num_cond_regs].info_reg;
4179 
4180 	/* Dump rule data */
4181 	if (dump) {
4182 		memset(hdr, 0, sizeof(*hdr));
4183 		hdr->rule_id = rule_id;
4184 		hdr->mem_entry_id = fail_entry_id;
4185 		hdr->severity = rule->severity;
4186 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
4187 	}
4188 
4189 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
4190 
4191 	/* Dump condition register values */
4192 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4193 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4194 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4195 
4196 		reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4197 			  (dump_buf + offset);
4198 
4199 		/* Write register header */
4200 		if (!dump) {
4201 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
4202 			    reg->entry_size;
4203 			continue;
4204 		}
4205 
4206 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4207 		memset(reg_hdr, 0, sizeof(*reg_hdr));
4208 		reg_hdr->start_entry = reg->start_entry;
4209 		reg_hdr->size = reg->entry_size;
4210 		SET_FIELD(reg_hdr->data,
4211 			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
4212 			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4213 		SET_FIELD(reg_hdr->data,
4214 			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4215 
4216 		/* Write register values */
4217 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4218 			dump_buf[offset] = cond_reg_values[next_reg_offset];
4219 	}
4220 
4221 	/* Dump info register values */
4222 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4223 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4224 		u32 block_id;
4225 
4226 		/* Check if register's block is in reset */
4227 		if (!dump) {
4228 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4229 			continue;
4230 		}
4231 
4232 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4233 		if (block_id >= MAX_BLOCK_ID) {
4234 			DP_NOTICE(p_hwfn, "Invalid block_id\n");
4235 			return 0;
4236 		}
4237 
4238 		if (!dev_data->block_in_reset[block_id]) {
4239 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4240 			bool wide_bus, eval_mode, mode_match = true;
4241 			u16 modes_buf_offset;
4242 			u32 addr;
4243 
4244 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4245 				  (dump_buf + offset);
4246 
4247 			/* Check mode */
4248 			eval_mode = GET_FIELD(reg->mode.data,
4249 					      DBG_MODE_HDR_EVAL_MODE) > 0;
4250 			if (eval_mode) {
4251 				modes_buf_offset =
4252 				    GET_FIELD(reg->mode.data,
4253 					      DBG_MODE_HDR_MODES_BUF_OFFSET);
4254 				mode_match =
4255 					qed_is_mode_match(p_hwfn,
4256 							  &modes_buf_offset);
4257 			}
4258 
4259 			if (!mode_match)
4260 				continue;
4261 
4262 			addr = GET_FIELD(reg->data,
4263 					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
4264 			wide_bus = GET_FIELD(reg->data,
4265 					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4266 
4267 			/* Write register header */
4268 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4269 			hdr->num_dumped_info_regs++;
4270 			memset(reg_hdr, 0, sizeof(*reg_hdr));
4271 			reg_hdr->size = reg->size;
4272 			SET_FIELD(reg_hdr->data,
4273 				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
4274 				  rule->num_cond_regs + reg_id);
4275 
4276 			/* Write register values */
4277 			offset += qed_grc_dump_addr_range(p_hwfn,
4278 							  p_ptt,
4279 							  dump_buf + offset,
4280 							  dump,
4281 							  addr,
4282 							  reg->size, wide_bus,
4283 							  SPLIT_TYPE_NONE, 0);
4284 		}
4285 	}
4286 
4287 	return offset;
4288 }
4289 
4290 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4291 static u32
4292 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4293 			       u32 *dump_buf, bool dump,
4294 			       const struct dbg_idle_chk_rule *input_rules,
4295 			       u32 num_input_rules, u32 *num_failing_rules)
4296 {
4297 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4298 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4299 	u32 i, offset = 0;
4300 	u16 entry_id;
4301 	u8 reg_id;
4302 
4303 	*num_failing_rules = 0;
4304 
4305 	for (i = 0; i < num_input_rules; i++) {
4306 		const struct dbg_idle_chk_cond_reg *cond_regs;
4307 		const struct dbg_idle_chk_rule *rule;
4308 		const union dbg_idle_chk_reg *regs;
4309 		u16 num_reg_entries = 1;
4310 		bool check_rule = true;
4311 		const u32 *imm_values;
4312 
4313 		rule = &input_rules[i];
4314 		regs = &((const union dbg_idle_chk_reg *)
4315 			 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
4316 			[rule->reg_offset];
4317 		cond_regs = &regs[0].cond_reg;
4318 		imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
4319 			     [rule->imm_offset];
4320 
4321 		/* Check if all condition register blocks are out of reset, and
4322 		 * find maximal number of entries (all condition registers that
4323 		 * are memories must have the same size, which is > 1).
4324 		 */
4325 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
4326 		     reg_id++) {
4327 			u32 block_id =
4328 				GET_FIELD(cond_regs[reg_id].data,
4329 					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4330 
4331 			if (block_id >= MAX_BLOCK_ID) {
4332 				DP_NOTICE(p_hwfn, "Invalid block_id\n");
4333 				return 0;
4334 			}
4335 
4336 			check_rule = !dev_data->block_in_reset[block_id];
4337 			if (cond_regs[reg_id].num_entries > num_reg_entries)
4338 				num_reg_entries = cond_regs[reg_id].num_entries;
4339 		}
4340 
4341 		if (!check_rule && dump)
4342 			continue;
4343 
4344 		if (!dump) {
4345 			u32 entry_dump_size =
4346 				qed_idle_chk_dump_failure(p_hwfn,
4347 							  p_ptt,
4348 							  dump_buf + offset,
4349 							  false,
4350 							  rule->rule_id,
4351 							  rule,
4352 							  0,
4353 							  NULL);
4354 
4355 			offset += num_reg_entries * entry_dump_size;
4356 			(*num_failing_rules) += num_reg_entries;
4357 			continue;
4358 		}
4359 
4360 		/* Go over all register entries (number of entries is the same
4361 		 * for all condition registers).
4362 		 */
4363 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4364 			u32 next_reg_offset = 0;
4365 
4366 			/* Read current entry of all condition registers */
4367 			for (reg_id = 0; reg_id < rule->num_cond_regs;
4368 			     reg_id++) {
4369 				const struct dbg_idle_chk_cond_reg *reg =
4370 					&cond_regs[reg_id];
4371 				u32 padded_entry_size, addr;
4372 				bool wide_bus;
4373 
4374 				/* Find GRC address (if it's a memory, the
4375 				 * address of the specific entry is calculated).
4376 				 */
4377 				addr = GET_FIELD(reg->data,
4378 						 DBG_IDLE_CHK_COND_REG_ADDRESS);
4379 				wide_bus =
4380 				    GET_FIELD(reg->data,
4381 					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4382 				if (reg->num_entries > 1 ||
4383 				    reg->start_entry > 0) {
4384 					padded_entry_size =
4385 					   reg->entry_size > 1 ?
4386 					   roundup_pow_of_two(reg->entry_size) :
4387 					   1;
4388 					addr += (reg->start_entry + entry_id) *
4389 						padded_entry_size;
4390 				}
4391 
4392 				/* Read registers */
4393 				if (next_reg_offset + reg->entry_size >=
4394 				    IDLE_CHK_MAX_ENTRIES_SIZE) {
4395 					DP_NOTICE(p_hwfn,
4396 						  "idle check registers entry is too large\n");
4397 					return 0;
4398 				}
4399 
4400 				next_reg_offset +=
4401 				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4402 							    cond_reg_values +
4403 							    next_reg_offset,
4404 							    dump, addr,
4405 							    reg->entry_size,
4406 							    wide_bus,
4407 							    SPLIT_TYPE_NONE, 0);
4408 			}
4409 
4410 			/* Call rule condition function.
4411 			 * If returns true, it's a failure.
4412 			 */
4413 			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
4414 							imm_values)) {
4415 				offset += qed_idle_chk_dump_failure(p_hwfn,
4416 							p_ptt,
4417 							dump_buf + offset,
4418 							dump,
4419 							rule->rule_id,
4420 							rule,
4421 							entry_id,
4422 							cond_reg_values);
4423 				(*num_failing_rules)++;
4424 			}
4425 		}
4426 	}
4427 
4428 	return offset;
4429 }
4430 
4431 /* Performs Idle Check Dump to the specified buffer.
4432  * Returns the dumped size in dwords.
4433  */
4434 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4435 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4436 {
4437 	u32 num_failing_rules_offset, offset = 0, input_offset = 0;
4438 	u32 num_failing_rules = 0;
4439 
4440 	/* Dump global params */
4441 	offset += qed_dump_common_global_params(p_hwfn,
4442 						p_ptt,
4443 						dump_buf + offset, dump, 1);
4444 	offset += qed_dump_str_param(dump_buf + offset,
4445 				     dump, "dump-type", "idle-chk");
4446 
4447 	/* Dump idle check section header with a single parameter */
4448 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4449 	num_failing_rules_offset = offset;
4450 	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4451 
4452 	while (input_offset <
4453 	       s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4454 		const struct dbg_idle_chk_cond_hdr *cond_hdr =
4455 			(const struct dbg_idle_chk_cond_hdr *)
4456 			&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4457 			[input_offset++];
4458 		bool eval_mode, mode_match = true;
4459 		u32 curr_failing_rules;
4460 		u16 modes_buf_offset;
4461 
4462 		/* Check mode */
4463 		eval_mode = GET_FIELD(cond_hdr->mode.data,
4464 				      DBG_MODE_HDR_EVAL_MODE) > 0;
4465 		if (eval_mode) {
4466 			modes_buf_offset =
4467 				GET_FIELD(cond_hdr->mode.data,
4468 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
4469 			mode_match = qed_is_mode_match(p_hwfn,
4470 						       &modes_buf_offset);
4471 		}
4472 
4473 		if (mode_match) {
4474 			offset +=
4475 			    qed_idle_chk_dump_rule_entries(p_hwfn,
4476 				p_ptt,
4477 				dump_buf + offset,
4478 				dump,
4479 				(const struct dbg_idle_chk_rule *)
4480 				&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4481 				ptr[input_offset],
4482 				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4483 				&curr_failing_rules);
4484 			num_failing_rules += curr_failing_rules;
4485 		}
4486 
4487 		input_offset += cond_hdr->data_size;
4488 	}
4489 
4490 	/* Overwrite num_rules parameter */
4491 	if (dump)
4492 		qed_dump_num_param(dump_buf + num_failing_rules_offset,
4493 				   dump, "num_rules", num_failing_rules);
4494 
4495 	/* Dump last section */
4496 	offset += qed_dump_last_section(dump_buf, offset, dump);
4497 
4498 	return offset;
4499 }
4500 
4501 /* Finds the meta data image in NVRAM */
4502 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4503 					    struct qed_ptt *p_ptt,
4504 					    u32 image_type,
4505 					    u32 *nvram_offset_bytes,
4506 					    u32 *nvram_size_bytes)
4507 {
4508 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4509 	struct mcp_file_att file_att;
4510 	int nvm_result;
4511 
4512 	/* Call NVRAM get file command */
4513 	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4514 					p_ptt,
4515 					DRV_MSG_CODE_NVM_GET_FILE_ATT,
4516 					image_type,
4517 					&ret_mcp_resp,
4518 					&ret_mcp_param,
4519 					&ret_txn_size, (u32 *)&file_att);
4520 
4521 	/* Check response */
4522 	if (nvm_result ||
4523 	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4524 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4525 
4526 	/* Update return values */
4527 	*nvram_offset_bytes = file_att.nvm_start_addr;
4528 	*nvram_size_bytes = file_att.len;
4529 
4530 	DP_VERBOSE(p_hwfn,
4531 		   QED_MSG_DEBUG,
4532 		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4533 		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
4534 
4535 	/* Check alignment */
4536 	if (*nvram_size_bytes & 0x3)
4537 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4538 
4539 	return DBG_STATUS_OK;
4540 }
4541 
4542 /* Reads data from NVRAM */
4543 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4544 				      struct qed_ptt *p_ptt,
4545 				      u32 nvram_offset_bytes,
4546 				      u32 nvram_size_bytes, u32 *ret_buf)
4547 {
4548 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4549 	s32 bytes_left = nvram_size_bytes;
4550 	u32 read_offset = 0;
4551 
4552 	DP_VERBOSE(p_hwfn,
4553 		   QED_MSG_DEBUG,
4554 		   "nvram_read: reading image of size %d bytes from NVRAM\n",
4555 		   nvram_size_bytes);
4556 
4557 	do {
4558 		bytes_to_copy =
4559 		    (bytes_left >
4560 		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4561 
4562 		/* Call NVRAM read command */
4563 		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4564 				       DRV_MSG_CODE_NVM_READ_NVRAM,
4565 				       (nvram_offset_bytes +
4566 					read_offset) |
4567 				       (bytes_to_copy <<
4568 					DRV_MB_PARAM_NVM_LEN_OFFSET),
4569 				       &ret_mcp_resp, &ret_mcp_param,
4570 				       &ret_read_size,
4571 				       (u32 *)((u8 *)ret_buf + read_offset)))
4572 			return DBG_STATUS_NVRAM_READ_FAILED;
4573 
4574 		/* Check response */
4575 		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4576 			return DBG_STATUS_NVRAM_READ_FAILED;
4577 
4578 		/* Update read offset */
4579 		read_offset += ret_read_size;
4580 		bytes_left -= ret_read_size;
4581 	} while (bytes_left > 0);
4582 
4583 	return DBG_STATUS_OK;
4584 }
4585 
4586 /* Get info on the MCP Trace data in the scratchpad:
4587  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4588  * - trace_data_size (OUT): trace data size in bytes (without the header)
4589  */
4590 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4591 						   struct qed_ptt *p_ptt,
4592 						   u32 *trace_data_grc_addr,
4593 						   u32 *trace_data_size)
4594 {
4595 	u32 spad_trace_offsize, signature;
4596 
4597 	/* Read trace section offsize structure from MCP scratchpad */
4598 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4599 
4600 	/* Extract trace section address from offsize (in scratchpad) */
4601 	*trace_data_grc_addr =
4602 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4603 
4604 	/* Read signature from MCP trace section */
4605 	signature = qed_rd(p_hwfn, p_ptt,
4606 			   *trace_data_grc_addr +
4607 			   offsetof(struct mcp_trace, signature));
4608 
4609 	if (signature != MFW_TRACE_SIGNATURE)
4610 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4611 
4612 	/* Read trace size from MCP trace section */
4613 	*trace_data_size = qed_rd(p_hwfn,
4614 				  p_ptt,
4615 				  *trace_data_grc_addr +
4616 				  offsetof(struct mcp_trace, size));
4617 
4618 	return DBG_STATUS_OK;
4619 }
4620 
4621 /* Reads MCP trace meta data image from NVRAM
4622  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4623  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4624  *			      loaded from file).
4625  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4626  */
4627 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4628 						   struct qed_ptt *p_ptt,
4629 						   u32 trace_data_size_bytes,
4630 						   u32 *running_bundle_id,
4631 						   u32 *trace_meta_offset,
4632 						   u32 *trace_meta_size)
4633 {
4634 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4635 
4636 	/* Read MCP trace section offsize structure from MCP scratchpad */
4637 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4638 
4639 	/* Find running bundle ID */
4640 	running_mfw_addr =
4641 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4642 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4643 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4644 	if (*running_bundle_id > 1)
4645 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4646 
4647 	/* Find image in NVRAM */
4648 	nvram_image_type =
4649 	    (*running_bundle_id ==
4650 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4651 	return qed_find_nvram_image(p_hwfn,
4652 				    p_ptt,
4653 				    nvram_image_type,
4654 				    trace_meta_offset, trace_meta_size);
4655 }
4656 
4657 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4658 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4659 					       struct qed_ptt *p_ptt,
4660 					       u32 nvram_offset_in_bytes,
4661 					       u32 size_in_bytes, u32 *buf)
4662 {
4663 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4664 	enum dbg_status status;
4665 	u32 signature;
4666 
4667 	/* Read meta data from NVRAM */
4668 	status = qed_nvram_read(p_hwfn,
4669 				p_ptt,
4670 				nvram_offset_in_bytes, size_in_bytes, buf);
4671 	if (status != DBG_STATUS_OK)
4672 		return status;
4673 
4674 	/* Extract and check first signature */
4675 	signature = qed_read_unaligned_dword(byte_buf);
4676 	byte_buf += sizeof(signature);
4677 	if (signature != NVM_MAGIC_VALUE)
4678 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4679 
4680 	/* Extract number of modules */
4681 	modules_num = *(byte_buf++);
4682 
4683 	/* Skip all modules */
4684 	for (i = 0; i < modules_num; i++) {
4685 		module_len = *(byte_buf++);
4686 		byte_buf += module_len;
4687 	}
4688 
4689 	/* Extract and check second signature */
4690 	signature = qed_read_unaligned_dword(byte_buf);
4691 	byte_buf += sizeof(signature);
4692 	if (signature != NVM_MAGIC_VALUE)
4693 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4694 
4695 	return DBG_STATUS_OK;
4696 }
4697 
4698 /* Dump MCP Trace */
4699 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4700 					  struct qed_ptt *p_ptt,
4701 					  u32 *dump_buf,
4702 					  bool dump, u32 *num_dumped_dwords)
4703 {
4704 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4705 	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4706 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4707 	enum dbg_status status;
4708 	bool mcp_access;
4709 	int halted = 0;
4710 
4711 	*num_dumped_dwords = 0;
4712 
4713 	mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4714 
4715 	/* Get trace data info */
4716 	status = qed_mcp_trace_get_data_info(p_hwfn,
4717 					     p_ptt,
4718 					     &trace_data_grc_addr,
4719 					     &trace_data_size_bytes);
4720 	if (status != DBG_STATUS_OK)
4721 		return status;
4722 
4723 	/* Dump global params */
4724 	offset += qed_dump_common_global_params(p_hwfn,
4725 						p_ptt,
4726 						dump_buf + offset, dump, 1);
4727 	offset += qed_dump_str_param(dump_buf + offset,
4728 				     dump, "dump-type", "mcp-trace");
4729 
4730 	/* Halt MCP while reading from scratchpad so the read data will be
4731 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4732 	 * risk that it may be corrupt.
4733 	 */
4734 	if (dump && mcp_access) {
4735 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4736 		if (!halted)
4737 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4738 	}
4739 
4740 	/* Find trace data size */
4741 	trace_data_size_dwords =
4742 	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4743 			 BYTES_IN_DWORD);
4744 
4745 	/* Dump trace data section header and param */
4746 	offset += qed_dump_section_hdr(dump_buf + offset,
4747 				       dump, "mcp_trace_data", 1);
4748 	offset += qed_dump_num_param(dump_buf + offset,
4749 				     dump, "size", trace_data_size_dwords);
4750 
4751 	/* Read trace data from scratchpad into dump buffer */
4752 	offset += qed_grc_dump_addr_range(p_hwfn,
4753 					  p_ptt,
4754 					  dump_buf + offset,
4755 					  dump,
4756 					  BYTES_TO_DWORDS(trace_data_grc_addr),
4757 					  trace_data_size_dwords, false,
4758 					  SPLIT_TYPE_NONE, 0);
4759 
4760 	/* Resume MCP (only if halt succeeded) */
4761 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4762 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4763 
4764 	/* Dump trace meta section header */
4765 	offset += qed_dump_section_hdr(dump_buf + offset,
4766 				       dump, "mcp_trace_meta", 1);
4767 
4768 	/* If MCP Trace meta size parameter was set, use it.
4769 	 * Otherwise, read trace meta.
4770 	 * trace_meta_size_bytes is dword-aligned.
4771 	 */
4772 	trace_meta_size_bytes =
4773 		qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4774 	if ((!trace_meta_size_bytes || dump) && mcp_access) {
4775 		status = qed_mcp_trace_get_meta_info(p_hwfn,
4776 						     p_ptt,
4777 						     trace_data_size_bytes,
4778 						     &running_bundle_id,
4779 						     &trace_meta_offset_bytes,
4780 						     &trace_meta_size_bytes);
4781 		if (status == DBG_STATUS_OK)
4782 			trace_meta_size_dwords =
4783 				BYTES_TO_DWORDS(trace_meta_size_bytes);
4784 	}
4785 
4786 	/* Dump trace meta size param */
4787 	offset += qed_dump_num_param(dump_buf + offset,
4788 				     dump, "size", trace_meta_size_dwords);
4789 
4790 	/* Read trace meta image into dump buffer */
4791 	if (dump && trace_meta_size_dwords)
4792 		status = qed_mcp_trace_read_meta(p_hwfn,
4793 						 p_ptt,
4794 						 trace_meta_offset_bytes,
4795 						 trace_meta_size_bytes,
4796 						 dump_buf + offset);
4797 	if (status == DBG_STATUS_OK)
4798 		offset += trace_meta_size_dwords;
4799 
4800 	/* Dump last section */
4801 	offset += qed_dump_last_section(dump_buf, offset, dump);
4802 
4803 	*num_dumped_dwords = offset;
4804 
4805 	/* If no mcp access, indicate that the dump doesn't contain the meta
4806 	 * data from NVRAM.
4807 	 */
4808 	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4809 }
4810 
4811 /* Dump GRC FIFO */
4812 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4813 					 struct qed_ptt *p_ptt,
4814 					 u32 *dump_buf,
4815 					 bool dump, u32 *num_dumped_dwords)
4816 {
4817 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4818 	bool fifo_has_data;
4819 
4820 	*num_dumped_dwords = 0;
4821 
4822 	/* Dump global params */
4823 	offset += qed_dump_common_global_params(p_hwfn,
4824 						p_ptt,
4825 						dump_buf + offset, dump, 1);
4826 	offset += qed_dump_str_param(dump_buf + offset,
4827 				     dump, "dump-type", "reg-fifo");
4828 
4829 	/* Dump fifo data section header and param. The size param is 0 for
4830 	 * now, and is overwritten after reading the FIFO.
4831 	 */
4832 	offset += qed_dump_section_hdr(dump_buf + offset,
4833 				       dump, "reg_fifo_data", 1);
4834 	size_param_offset = offset;
4835 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4836 
4837 	if (!dump) {
4838 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4839 		 * test how much data is available, except for reading it.
4840 		 */
4841 		offset += REG_FIFO_DEPTH_DWORDS;
4842 		goto out;
4843 	}
4844 
4845 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4846 			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4847 
4848 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4849 	 * and must be accessed atomically. Test for dwords_read not passing
4850 	 * buffer size since more entries could be added to the buffer as we are
4851 	 * emptying it.
4852 	 */
4853 	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4854 	len = REG_FIFO_ELEMENT_DWORDS;
4855 	for (dwords_read = 0;
4856 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4857 	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4858 		offset += qed_grc_dump_addr_range(p_hwfn,
4859 						  p_ptt,
4860 						  dump_buf + offset,
4861 						  true,
4862 						  addr,
4863 						  len,
4864 						  true, SPLIT_TYPE_NONE,
4865 						  0);
4866 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4867 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4868 	}
4869 
4870 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4871 			   dwords_read);
4872 out:
4873 	/* Dump last section */
4874 	offset += qed_dump_last_section(dump_buf, offset, dump);
4875 
4876 	*num_dumped_dwords = offset;
4877 
4878 	return DBG_STATUS_OK;
4879 }
4880 
4881 /* Dump IGU FIFO */
4882 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4883 					 struct qed_ptt *p_ptt,
4884 					 u32 *dump_buf,
4885 					 bool dump, u32 *num_dumped_dwords)
4886 {
4887 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4888 	bool fifo_has_data;
4889 
4890 	*num_dumped_dwords = 0;
4891 
4892 	/* Dump global params */
4893 	offset += qed_dump_common_global_params(p_hwfn,
4894 						p_ptt,
4895 						dump_buf + offset, dump, 1);
4896 	offset += qed_dump_str_param(dump_buf + offset,
4897 				     dump, "dump-type", "igu-fifo");
4898 
4899 	/* Dump fifo data section header and param. The size param is 0 for
4900 	 * now, and is overwritten after reading the FIFO.
4901 	 */
4902 	offset += qed_dump_section_hdr(dump_buf + offset,
4903 				       dump, "igu_fifo_data", 1);
4904 	size_param_offset = offset;
4905 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4906 
4907 	if (!dump) {
4908 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4909 		 * test how much data is available, except for reading it.
4910 		 */
4911 		offset += IGU_FIFO_DEPTH_DWORDS;
4912 		goto out;
4913 	}
4914 
4915 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4916 			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4917 
4918 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4919 	 * and must be accessed atomically. Test for dwords_read not passing
4920 	 * buffer size since more entries could be added to the buffer as we are
4921 	 * emptying it.
4922 	 */
4923 	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4924 	len = IGU_FIFO_ELEMENT_DWORDS;
4925 	for (dwords_read = 0;
4926 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4927 	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4928 		offset += qed_grc_dump_addr_range(p_hwfn,
4929 						  p_ptt,
4930 						  dump_buf + offset,
4931 						  true,
4932 						  addr,
4933 						  len,
4934 						  true, SPLIT_TYPE_NONE,
4935 						  0);
4936 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4937 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4938 	}
4939 
4940 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4941 			   dwords_read);
4942 out:
4943 	/* Dump last section */
4944 	offset += qed_dump_last_section(dump_buf, offset, dump);
4945 
4946 	*num_dumped_dwords = offset;
4947 
4948 	return DBG_STATUS_OK;
4949 }
4950 
4951 /* Protection Override dump */
4952 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4953 						    struct qed_ptt *p_ptt,
4954 						    u32 *dump_buf,
4955 						    bool dump,
4956 						    u32 *num_dumped_dwords)
4957 {
4958 	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4959 
4960 	*num_dumped_dwords = 0;
4961 
4962 	/* Dump global params */
4963 	offset += qed_dump_common_global_params(p_hwfn,
4964 						p_ptt,
4965 						dump_buf + offset, dump, 1);
4966 	offset += qed_dump_str_param(dump_buf + offset,
4967 				     dump, "dump-type", "protection-override");
4968 
4969 	/* Dump data section header and param. The size param is 0 for now,
4970 	 * and is overwritten after reading the data.
4971 	 */
4972 	offset += qed_dump_section_hdr(dump_buf + offset,
4973 				       dump, "protection_override_data", 1);
4974 	size_param_offset = offset;
4975 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4976 
4977 	if (!dump) {
4978 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4979 		goto out;
4980 	}
4981 
4982 	/* Add override window info to buffer */
4983 	override_window_dwords =
4984 		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4985 		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4986 	addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4987 	offset += qed_grc_dump_addr_range(p_hwfn,
4988 					  p_ptt,
4989 					  dump_buf + offset,
4990 					  true,
4991 					  addr,
4992 					  override_window_dwords,
4993 					  true, SPLIT_TYPE_NONE, 0);
4994 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4995 			   override_window_dwords);
4996 out:
4997 	/* Dump last section */
4998 	offset += qed_dump_last_section(dump_buf, offset, dump);
4999 
5000 	*num_dumped_dwords = offset;
5001 
5002 	return DBG_STATUS_OK;
5003 }
5004 
5005 /* Performs FW Asserts Dump to the specified buffer.
5006  * Returns the dumped size in dwords.
5007  */
5008 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5009 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
5010 {
5011 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5012 	struct fw_asserts_ram_section *asserts;
5013 	char storm_letter_str[2] = "?";
5014 	struct fw_info fw_info;
5015 	u32 offset = 0;
5016 	u8 storm_id;
5017 
5018 	/* Dump global params */
5019 	offset += qed_dump_common_global_params(p_hwfn,
5020 						p_ptt,
5021 						dump_buf + offset, dump, 1);
5022 	offset += qed_dump_str_param(dump_buf + offset,
5023 				     dump, "dump-type", "fw-asserts");
5024 
5025 	/* Find Storm dump size */
5026 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5027 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
5028 		struct storm_defs *storm = &s_storm_defs[storm_id];
5029 		u32 last_list_idx, addr;
5030 
5031 		if (dev_data->block_in_reset[storm->block_id])
5032 			continue;
5033 
5034 		/* Read FW info for the current Storm */
5035 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
5036 
5037 		asserts = &fw_info.fw_asserts_section;
5038 
5039 		/* Dump FW Asserts section header and params */
5040 		storm_letter_str[0] = storm->letter;
5041 		offset += qed_dump_section_hdr(dump_buf + offset,
5042 					       dump, "fw_asserts", 2);
5043 		offset += qed_dump_str_param(dump_buf + offset,
5044 					     dump, "storm", storm_letter_str);
5045 		offset += qed_dump_num_param(dump_buf + offset,
5046 					     dump,
5047 					     "size",
5048 					     asserts->list_element_dword_size);
5049 
5050 		/* Read and dump FW Asserts data */
5051 		if (!dump) {
5052 			offset += asserts->list_element_dword_size;
5053 			continue;
5054 		}
5055 
5056 		fw_asserts_section_addr = storm->sem_fast_mem_addr +
5057 			SEM_FAST_REG_INT_RAM +
5058 			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
5059 		next_list_idx_addr = fw_asserts_section_addr +
5060 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
5061 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
5062 		last_list_idx = (next_list_idx > 0 ?
5063 				 next_list_idx :
5064 				 asserts->list_num_elements) - 1;
5065 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
5066 		       asserts->list_dword_offset +
5067 		       last_list_idx * asserts->list_element_dword_size;
5068 		offset +=
5069 		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
5070 					    dump_buf + offset,
5071 					    dump, addr,
5072 					    asserts->list_element_dword_size,
5073 						  false, SPLIT_TYPE_NONE, 0);
5074 	}
5075 
5076 	/* Dump last section */
5077 	offset += qed_dump_last_section(dump_buf, offset, dump);
5078 
5079 	return offset;
5080 }
5081 
5082 /***************************** Public Functions *******************************/
5083 
5084 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
5085 {
5086 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
5087 	u8 buf_id;
5088 
5089 	/* convert binary data to debug arrays */
5090 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
5091 		s_dbg_arrays[buf_id].ptr =
5092 		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
5093 		s_dbg_arrays[buf_id].size_in_dwords =
5094 		    BYTES_TO_DWORDS(buf_array[buf_id].length);
5095 	}
5096 
5097 	return DBG_STATUS_OK;
5098 }
5099 
5100 bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
5101 		      struct qed_ptt *p_ptt, struct fw_info *fw_info)
5102 {
5103 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5104 	u8 storm_id;
5105 
5106 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5107 		struct storm_defs *storm = &s_storm_defs[storm_id];
5108 
5109 		/* Skip Storm if it's in reset */
5110 		if (dev_data->block_in_reset[storm->block_id])
5111 			continue;
5112 
5113 		/* Read FW info for the current Storm */
5114 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
5115 
5116 		return true;
5117 	}
5118 
5119 	return false;
5120 }
5121 
5122 /* Assign default GRC param values */
5123 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5124 {
5125 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5126 	u32 i;
5127 
5128 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5129 		if (!s_grc_param_defs[i].is_persistent)
5130 			dev_data->grc.param_val[i] =
5131 			    s_grc_param_defs[i].default_val[dev_data->chip_id];
5132 }
5133 
5134 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5135 					      struct qed_ptt *p_ptt,
5136 					      u32 *buf_size)
5137 {
5138 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5139 
5140 	*buf_size = 0;
5141 
5142 	if (status != DBG_STATUS_OK)
5143 		return status;
5144 
5145 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5146 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5147 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5148 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5149 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5150 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5151 
5152 	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5153 }
5154 
5155 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5156 				 struct qed_ptt *p_ptt,
5157 				 u32 *dump_buf,
5158 				 u32 buf_size_in_dwords,
5159 				 u32 *num_dumped_dwords)
5160 {
5161 	u32 needed_buf_size_in_dwords;
5162 	enum dbg_status status;
5163 
5164 	*num_dumped_dwords = 0;
5165 
5166 	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5167 					       p_ptt,
5168 					       &needed_buf_size_in_dwords);
5169 	if (status != DBG_STATUS_OK)
5170 		return status;
5171 
5172 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5173 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5174 
5175 	/* GRC Dump */
5176 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5177 
5178 	/* Revert GRC params to their default */
5179 	qed_dbg_grc_set_params_default(p_hwfn);
5180 
5181 	return status;
5182 }
5183 
5184 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5185 						   struct qed_ptt *p_ptt,
5186 						   u32 *buf_size)
5187 {
5188 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5189 	struct idle_chk_data *idle_chk;
5190 	enum dbg_status status;
5191 
5192 	idle_chk = &dev_data->idle_chk;
5193 	*buf_size = 0;
5194 
5195 	status = qed_dbg_dev_init(p_hwfn, p_ptt);
5196 	if (status != DBG_STATUS_OK)
5197 		return status;
5198 
5199 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5200 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5201 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5202 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5203 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5204 
5205 	if (!idle_chk->buf_size_set) {
5206 		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5207 						       p_ptt, NULL, false);
5208 		idle_chk->buf_size_set = true;
5209 	}
5210 
5211 	*buf_size = idle_chk->buf_size;
5212 
5213 	return DBG_STATUS_OK;
5214 }
5215 
5216 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5217 				      struct qed_ptt *p_ptt,
5218 				      u32 *dump_buf,
5219 				      u32 buf_size_in_dwords,
5220 				      u32 *num_dumped_dwords)
5221 {
5222 	u32 needed_buf_size_in_dwords;
5223 	enum dbg_status status;
5224 
5225 	*num_dumped_dwords = 0;
5226 
5227 	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5228 						    p_ptt,
5229 						    &needed_buf_size_in_dwords);
5230 	if (status != DBG_STATUS_OK)
5231 		return status;
5232 
5233 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5234 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5235 
5236 	/* Update reset state */
5237 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5238 
5239 	/* Idle Check Dump */
5240 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5241 
5242 	/* Revert GRC params to their default */
5243 	qed_dbg_grc_set_params_default(p_hwfn);
5244 
5245 	return DBG_STATUS_OK;
5246 }
5247 
5248 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5249 						    struct qed_ptt *p_ptt,
5250 						    u32 *buf_size)
5251 {
5252 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5253 
5254 	*buf_size = 0;
5255 
5256 	if (status != DBG_STATUS_OK)
5257 		return status;
5258 
5259 	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5260 }
5261 
5262 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5263 				       struct qed_ptt *p_ptt,
5264 				       u32 *dump_buf,
5265 				       u32 buf_size_in_dwords,
5266 				       u32 *num_dumped_dwords)
5267 {
5268 	u32 needed_buf_size_in_dwords;
5269 	enum dbg_status status;
5270 
5271 	status =
5272 		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5273 						    p_ptt,
5274 						    &needed_buf_size_in_dwords);
5275 	if (status != DBG_STATUS_OK && status !=
5276 	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5277 		return status;
5278 
5279 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5280 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5281 
5282 	/* Update reset state */
5283 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5284 
5285 	/* Perform dump */
5286 	status = qed_mcp_trace_dump(p_hwfn,
5287 				    p_ptt, dump_buf, true, num_dumped_dwords);
5288 
5289 	/* Revert GRC params to their default */
5290 	qed_dbg_grc_set_params_default(p_hwfn);
5291 
5292 	return status;
5293 }
5294 
5295 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5296 						   struct qed_ptt *p_ptt,
5297 						   u32 *buf_size)
5298 {
5299 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5300 
5301 	*buf_size = 0;
5302 
5303 	if (status != DBG_STATUS_OK)
5304 		return status;
5305 
5306 	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5307 }
5308 
5309 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5310 				      struct qed_ptt *p_ptt,
5311 				      u32 *dump_buf,
5312 				      u32 buf_size_in_dwords,
5313 				      u32 *num_dumped_dwords)
5314 {
5315 	u32 needed_buf_size_in_dwords;
5316 	enum dbg_status status;
5317 
5318 	*num_dumped_dwords = 0;
5319 
5320 	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5321 						    p_ptt,
5322 						    &needed_buf_size_in_dwords);
5323 	if (status != DBG_STATUS_OK)
5324 		return status;
5325 
5326 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5327 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5328 
5329 	/* Update reset state */
5330 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5331 
5332 	status = qed_reg_fifo_dump(p_hwfn,
5333 				   p_ptt, dump_buf, true, num_dumped_dwords);
5334 
5335 	/* Revert GRC params to their default */
5336 	qed_dbg_grc_set_params_default(p_hwfn);
5337 
5338 	return status;
5339 }
5340 
5341 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5342 						   struct qed_ptt *p_ptt,
5343 						   u32 *buf_size)
5344 {
5345 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5346 
5347 	*buf_size = 0;
5348 
5349 	if (status != DBG_STATUS_OK)
5350 		return status;
5351 
5352 	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5353 }
5354 
5355 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5356 				      struct qed_ptt *p_ptt,
5357 				      u32 *dump_buf,
5358 				      u32 buf_size_in_dwords,
5359 				      u32 *num_dumped_dwords)
5360 {
5361 	u32 needed_buf_size_in_dwords;
5362 	enum dbg_status status;
5363 
5364 	*num_dumped_dwords = 0;
5365 
5366 	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5367 						    p_ptt,
5368 						    &needed_buf_size_in_dwords);
5369 	if (status != DBG_STATUS_OK)
5370 		return status;
5371 
5372 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5373 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5374 
5375 	/* Update reset state */
5376 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5377 
5378 	status = qed_igu_fifo_dump(p_hwfn,
5379 				   p_ptt, dump_buf, true, num_dumped_dwords);
5380 	/* Revert GRC params to their default */
5381 	qed_dbg_grc_set_params_default(p_hwfn);
5382 
5383 	return status;
5384 }
5385 
5386 enum dbg_status
5387 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5388 					      struct qed_ptt *p_ptt,
5389 					      u32 *buf_size)
5390 {
5391 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5392 
5393 	*buf_size = 0;
5394 
5395 	if (status != DBG_STATUS_OK)
5396 		return status;
5397 
5398 	return qed_protection_override_dump(p_hwfn,
5399 					    p_ptt, NULL, false, buf_size);
5400 }
5401 
5402 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5403 						 struct qed_ptt *p_ptt,
5404 						 u32 *dump_buf,
5405 						 u32 buf_size_in_dwords,
5406 						 u32 *num_dumped_dwords)
5407 {
5408 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5409 	enum dbg_status status;
5410 
5411 	*num_dumped_dwords = 0;
5412 
5413 	status =
5414 		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5415 							      p_ptt,
5416 							      p_size);
5417 	if (status != DBG_STATUS_OK)
5418 		return status;
5419 
5420 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5421 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5422 
5423 	/* Update reset state */
5424 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5425 
5426 	status = qed_protection_override_dump(p_hwfn,
5427 					      p_ptt,
5428 					      dump_buf,
5429 					      true, num_dumped_dwords);
5430 
5431 	/* Revert GRC params to their default */
5432 	qed_dbg_grc_set_params_default(p_hwfn);
5433 
5434 	return status;
5435 }
5436 
5437 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5438 						     struct qed_ptt *p_ptt,
5439 						     u32 *buf_size)
5440 {
5441 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5442 
5443 	*buf_size = 0;
5444 
5445 	if (status != DBG_STATUS_OK)
5446 		return status;
5447 
5448 	/* Update reset state */
5449 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5450 
5451 	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5452 
5453 	return DBG_STATUS_OK;
5454 }
5455 
5456 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5457 					struct qed_ptt *p_ptt,
5458 					u32 *dump_buf,
5459 					u32 buf_size_in_dwords,
5460 					u32 *num_dumped_dwords)
5461 {
5462 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5463 	enum dbg_status status;
5464 
5465 	*num_dumped_dwords = 0;
5466 
5467 	status =
5468 		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5469 						     p_ptt,
5470 						     p_size);
5471 	if (status != DBG_STATUS_OK)
5472 		return status;
5473 
5474 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5475 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5476 
5477 	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5478 
5479 	/* Revert GRC params to their default */
5480 	qed_dbg_grc_set_params_default(p_hwfn);
5481 
5482 	return DBG_STATUS_OK;
5483 }
5484 
5485 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5486 				  struct qed_ptt *p_ptt,
5487 				  enum block_id block_id,
5488 				  enum dbg_attn_type attn_type,
5489 				  bool clear_status,
5490 				  struct dbg_attn_block_result *results)
5491 {
5492 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5493 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5494 	const struct dbg_attn_reg *attn_reg_arr;
5495 
5496 	if (status != DBG_STATUS_OK)
5497 		return status;
5498 
5499 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5500 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5501 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5502 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5503 
5504 	attn_reg_arr = qed_get_block_attn_regs(block_id,
5505 					       attn_type, &num_attn_regs);
5506 
5507 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5508 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5509 		struct dbg_attn_reg_result *reg_result;
5510 		u32 sts_addr, sts_val;
5511 		u16 modes_buf_offset;
5512 		bool eval_mode;
5513 
5514 		/* Check mode */
5515 		eval_mode = GET_FIELD(reg_data->mode.data,
5516 				      DBG_MODE_HDR_EVAL_MODE) > 0;
5517 		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5518 					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5519 		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5520 			continue;
5521 
5522 		/* Mode match - read attention status register */
5523 		sts_addr = DWORDS_TO_BYTES(clear_status ?
5524 					   reg_data->sts_clr_address :
5525 					   GET_FIELD(reg_data->data,
5526 						     DBG_ATTN_REG_STS_ADDRESS));
5527 		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5528 		if (!sts_val)
5529 			continue;
5530 
5531 		/* Non-zero attention status - add to results */
5532 		reg_result = &results->reg_results[num_result_regs];
5533 		SET_FIELD(reg_result->data,
5534 			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5535 		SET_FIELD(reg_result->data,
5536 			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5537 			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5538 		reg_result->block_attn_offset = reg_data->block_attn_offset;
5539 		reg_result->sts_val = sts_val;
5540 		reg_result->mask_val = qed_rd(p_hwfn,
5541 					      p_ptt,
5542 					      DWORDS_TO_BYTES
5543 					      (reg_data->mask_address));
5544 		num_result_regs++;
5545 	}
5546 
5547 	results->block_id = (u8)block_id;
5548 	results->names_offset =
5549 	    qed_get_block_attn_data(block_id, attn_type)->names_offset;
5550 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5551 	SET_FIELD(results->data,
5552 		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5553 
5554 	return DBG_STATUS_OK;
5555 }
5556 
5557 /******************************* Data Types **********************************/
5558 
5559 struct block_info {
5560 	const char *name;
5561 	enum block_id id;
5562 };
5563 
5564 /* REG fifo element */
5565 struct reg_fifo_element {
5566 	u64 data;
5567 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5568 #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5569 #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5570 #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5571 #define REG_FIFO_ELEMENT_PF_SHIFT		24
5572 #define REG_FIFO_ELEMENT_PF_MASK		0xf
5573 #define REG_FIFO_ELEMENT_VF_SHIFT		28
5574 #define REG_FIFO_ELEMENT_VF_MASK		0xff
5575 #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5576 #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5577 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5578 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5579 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5580 #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5581 #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5582 #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5583 #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5584 #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5585 };
5586 
5587 /* IGU fifo element */
5588 struct igu_fifo_element {
5589 	u32 dword0;
5590 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5591 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5592 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5593 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5594 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5595 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5596 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5597 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5598 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5599 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5600 	u32 dword1;
5601 	u32 dword2;
5602 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5603 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5604 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5605 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5606 	u32 reserved;
5607 };
5608 
5609 struct igu_fifo_wr_data {
5610 	u32 data;
5611 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5612 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5613 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5614 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5615 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5616 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5617 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5618 #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5619 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5620 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5621 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5622 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5623 };
5624 
5625 struct igu_fifo_cleanup_wr_data {
5626 	u32 data;
5627 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5628 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5629 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5630 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5631 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5632 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5633 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5634 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5635 };
5636 
5637 /* Protection override element */
5638 struct protection_override_element {
5639 	u64 data;
5640 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5641 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5642 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5643 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5644 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5645 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5646 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5647 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5648 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5649 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5650 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5651 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5652 };
5653 
5654 enum igu_fifo_sources {
5655 	IGU_SRC_PXP0,
5656 	IGU_SRC_PXP1,
5657 	IGU_SRC_PXP2,
5658 	IGU_SRC_PXP3,
5659 	IGU_SRC_PXP4,
5660 	IGU_SRC_PXP5,
5661 	IGU_SRC_PXP6,
5662 	IGU_SRC_PXP7,
5663 	IGU_SRC_CAU,
5664 	IGU_SRC_ATTN,
5665 	IGU_SRC_GRC
5666 };
5667 
5668 enum igu_fifo_addr_types {
5669 	IGU_ADDR_TYPE_MSIX_MEM,
5670 	IGU_ADDR_TYPE_WRITE_PBA,
5671 	IGU_ADDR_TYPE_WRITE_INT_ACK,
5672 	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5673 	IGU_ADDR_TYPE_READ_INT,
5674 	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5675 	IGU_ADDR_TYPE_RESERVED
5676 };
5677 
5678 struct igu_fifo_addr_data {
5679 	u16 start_addr;
5680 	u16 end_addr;
5681 	char *desc;
5682 	char *vf_desc;
5683 	enum igu_fifo_addr_types type;
5684 };
5685 
5686 struct mcp_trace_meta {
5687 	u32 modules_num;
5688 	char **modules;
5689 	u32 formats_num;
5690 	struct mcp_trace_format *formats;
5691 	bool is_allocated;
5692 };
5693 
5694 /* Debug Tools user data */
5695 struct dbg_tools_user_data {
5696 	struct mcp_trace_meta mcp_trace_meta;
5697 	const u32 *mcp_trace_user_meta_buf;
5698 };
5699 
5700 /******************************** Constants **********************************/
5701 
5702 #define MAX_MSG_LEN				1024
5703 
5704 #define MCP_TRACE_MAX_MODULE_LEN		8
5705 #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5706 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5707 	(MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
5708 
5709 #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5710 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5711 
5712 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5713 
5714 /***************************** Constant Arrays *******************************/
5715 
5716 struct user_dbg_array {
5717 	const u32 *ptr;
5718 	u32 size_in_dwords;
5719 };
5720 
5721 /* Debug arrays */
5722 static struct user_dbg_array
5723 s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
5724 
5725 /* Block names array */
5726 static struct block_info s_block_info_arr[] = {
5727 	{"grc", BLOCK_GRC},
5728 	{"miscs", BLOCK_MISCS},
5729 	{"misc", BLOCK_MISC},
5730 	{"dbu", BLOCK_DBU},
5731 	{"pglue_b", BLOCK_PGLUE_B},
5732 	{"cnig", BLOCK_CNIG},
5733 	{"cpmu", BLOCK_CPMU},
5734 	{"ncsi", BLOCK_NCSI},
5735 	{"opte", BLOCK_OPTE},
5736 	{"bmb", BLOCK_BMB},
5737 	{"pcie", BLOCK_PCIE},
5738 	{"mcp", BLOCK_MCP},
5739 	{"mcp2", BLOCK_MCP2},
5740 	{"pswhst", BLOCK_PSWHST},
5741 	{"pswhst2", BLOCK_PSWHST2},
5742 	{"pswrd", BLOCK_PSWRD},
5743 	{"pswrd2", BLOCK_PSWRD2},
5744 	{"pswwr", BLOCK_PSWWR},
5745 	{"pswwr2", BLOCK_PSWWR2},
5746 	{"pswrq", BLOCK_PSWRQ},
5747 	{"pswrq2", BLOCK_PSWRQ2},
5748 	{"pglcs", BLOCK_PGLCS},
5749 	{"ptu", BLOCK_PTU},
5750 	{"dmae", BLOCK_DMAE},
5751 	{"tcm", BLOCK_TCM},
5752 	{"mcm", BLOCK_MCM},
5753 	{"ucm", BLOCK_UCM},
5754 	{"xcm", BLOCK_XCM},
5755 	{"ycm", BLOCK_YCM},
5756 	{"pcm", BLOCK_PCM},
5757 	{"qm", BLOCK_QM},
5758 	{"tm", BLOCK_TM},
5759 	{"dorq", BLOCK_DORQ},
5760 	{"brb", BLOCK_BRB},
5761 	{"src", BLOCK_SRC},
5762 	{"prs", BLOCK_PRS},
5763 	{"tsdm", BLOCK_TSDM},
5764 	{"msdm", BLOCK_MSDM},
5765 	{"usdm", BLOCK_USDM},
5766 	{"xsdm", BLOCK_XSDM},
5767 	{"ysdm", BLOCK_YSDM},
5768 	{"psdm", BLOCK_PSDM},
5769 	{"tsem", BLOCK_TSEM},
5770 	{"msem", BLOCK_MSEM},
5771 	{"usem", BLOCK_USEM},
5772 	{"xsem", BLOCK_XSEM},
5773 	{"ysem", BLOCK_YSEM},
5774 	{"psem", BLOCK_PSEM},
5775 	{"rss", BLOCK_RSS},
5776 	{"tmld", BLOCK_TMLD},
5777 	{"muld", BLOCK_MULD},
5778 	{"yuld", BLOCK_YULD},
5779 	{"xyld", BLOCK_XYLD},
5780 	{"ptld", BLOCK_PTLD},
5781 	{"ypld", BLOCK_YPLD},
5782 	{"prm", BLOCK_PRM},
5783 	{"pbf_pb1", BLOCK_PBF_PB1},
5784 	{"pbf_pb2", BLOCK_PBF_PB2},
5785 	{"rpb", BLOCK_RPB},
5786 	{"btb", BLOCK_BTB},
5787 	{"pbf", BLOCK_PBF},
5788 	{"rdif", BLOCK_RDIF},
5789 	{"tdif", BLOCK_TDIF},
5790 	{"cdu", BLOCK_CDU},
5791 	{"ccfc", BLOCK_CCFC},
5792 	{"tcfc", BLOCK_TCFC},
5793 	{"igu", BLOCK_IGU},
5794 	{"cau", BLOCK_CAU},
5795 	{"rgfs", BLOCK_RGFS},
5796 	{"rgsrc", BLOCK_RGSRC},
5797 	{"tgfs", BLOCK_TGFS},
5798 	{"tgsrc", BLOCK_TGSRC},
5799 	{"umac", BLOCK_UMAC},
5800 	{"xmac", BLOCK_XMAC},
5801 	{"dbg", BLOCK_DBG},
5802 	{"nig", BLOCK_NIG},
5803 	{"wol", BLOCK_WOL},
5804 	{"bmbn", BLOCK_BMBN},
5805 	{"ipc", BLOCK_IPC},
5806 	{"nwm", BLOCK_NWM},
5807 	{"nws", BLOCK_NWS},
5808 	{"ms", BLOCK_MS},
5809 	{"phy_pcie", BLOCK_PHY_PCIE},
5810 	{"led", BLOCK_LED},
5811 	{"avs_wrap", BLOCK_AVS_WRAP},
5812 	{"pxpreqbus", BLOCK_PXPREQBUS},
5813 	{"misc_aeu", BLOCK_MISC_AEU},
5814 	{"bar0_map", BLOCK_BAR0_MAP}
5815 };
5816 
5817 /* Status string array */
5818 static const char * const s_status_str[] = {
5819 	/* DBG_STATUS_OK */
5820 	"Operation completed successfully",
5821 
5822 	/* DBG_STATUS_APP_VERSION_NOT_SET */
5823 	"Debug application version wasn't set",
5824 
5825 	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5826 	"Unsupported debug application version",
5827 
5828 	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5829 	"The debug block wasn't reset since the last recording",
5830 
5831 	/* DBG_STATUS_INVALID_ARGS */
5832 	"Invalid arguments",
5833 
5834 	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5835 	"The debug output was already set",
5836 
5837 	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5838 	"Invalid PCI buffer size",
5839 
5840 	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5841 	"PCI buffer allocation failed",
5842 
5843 	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5844 	"A PCI buffer wasn't allocated",
5845 
5846 	/* DBG_STATUS_TOO_MANY_INPUTS */
5847 	"Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
5848 
5849 	/* DBG_STATUS_INPUT_OVERLAP */
5850 	"Overlapping debug bus inputs",
5851 
5852 	/* DBG_STATUS_HW_ONLY_RECORDING */
5853 	"Cannot record Storm data since the entire recording cycle is used by HW",
5854 
5855 	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5856 	"The Storm was already enabled",
5857 
5858 	/* DBG_STATUS_STORM_NOT_ENABLED */
5859 	"The specified Storm wasn't enabled",
5860 
5861 	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5862 	"The block was already enabled",
5863 
5864 	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5865 	"The specified block wasn't enabled",
5866 
5867 	/* DBG_STATUS_NO_INPUT_ENABLED */
5868 	"No input was enabled for recording",
5869 
5870 	/* DBG_STATUS_NO_FILTER_TRIGGER_64B */
5871 	"Filters and triggers are not allowed when recording in 64b units",
5872 
5873 	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5874 	"The filter was already enabled",
5875 
5876 	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5877 	"The trigger was already enabled",
5878 
5879 	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5880 	"The trigger wasn't enabled",
5881 
5882 	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5883 	"A constraint can be added only after a filter was enabled or a trigger state was added",
5884 
5885 	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5886 	"Cannot add more than 3 trigger states",
5887 
5888 	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5889 	"Cannot add more than 4 constraints per filter or trigger state",
5890 
5891 	/* DBG_STATUS_RECORDING_NOT_STARTED */
5892 	"The recording wasn't started",
5893 
5894 	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
5895 	"A trigger was configured, but it didn't trigger",
5896 
5897 	/* DBG_STATUS_NO_DATA_RECORDED */
5898 	"No data was recorded",
5899 
5900 	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5901 	"Dump buffer is too small",
5902 
5903 	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5904 	"Dumped data is not aligned to chunks",
5905 
5906 	/* DBG_STATUS_UNKNOWN_CHIP */
5907 	"Unknown chip",
5908 
5909 	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5910 	"Failed allocating virtual memory",
5911 
5912 	/* DBG_STATUS_BLOCK_IN_RESET */
5913 	"The input block is in reset",
5914 
5915 	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5916 	"Invalid MCP trace signature found in NVRAM",
5917 
5918 	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5919 	"Invalid bundle ID found in NVRAM",
5920 
5921 	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5922 	"Failed getting NVRAM image",
5923 
5924 	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5925 	"NVRAM image is not dword-aligned",
5926 
5927 	/* DBG_STATUS_NVRAM_READ_FAILED */
5928 	"Failed reading from NVRAM",
5929 
5930 	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5931 	"Idle check parsing failed",
5932 
5933 	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
5934 	"MCP Trace data is corrupt",
5935 
5936 	/* DBG_STATUS_MCP_TRACE_NO_META */
5937 	"Dump doesn't contain meta data - it must be provided in image file",
5938 
5939 	/* DBG_STATUS_MCP_COULD_NOT_HALT */
5940 	"Failed to halt MCP",
5941 
5942 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
5943 	"Failed to resume MCP after halt",
5944 
5945 	/* DBG_STATUS_RESERVED2 */
5946 	"Reserved debug status - shouldn't be returned",
5947 
5948 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5949 	"Failed to empty SEMI sync FIFO",
5950 
5951 	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
5952 	"IGU FIFO data is corrupt",
5953 
5954 	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5955 	"MCP failed to mask parities",
5956 
5957 	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5958 	"FW Asserts parsing failed",
5959 
5960 	/* DBG_STATUS_REG_FIFO_BAD_DATA */
5961 	"GRC FIFO data is corrupt",
5962 
5963 	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5964 	"Protection Override data is corrupt",
5965 
5966 	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
5967 	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5968 
5969 	/* DBG_STATUS_FILTER_BUG */
5970 	"Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
5971 
5972 	/* DBG_STATUS_NON_MATCHING_LINES */
5973 	"Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
5974 
5975 	/* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
5976 	"The selected trigger dword offset wasn't enabled in the recorded HW block",
5977 
5978 	/* DBG_STATUS_DBG_BUS_IN_USE */
5979 	"The debug bus is in use"
5980 };
5981 
5982 /* Idle check severity names array */
5983 static const char * const s_idle_chk_severity_str[] = {
5984 	"Error",
5985 	"Error if no traffic",
5986 	"Warning"
5987 };
5988 
5989 /* MCP Trace level names array */
5990 static const char * const s_mcp_trace_level_str[] = {
5991 	"ERROR",
5992 	"TRACE",
5993 	"DEBUG"
5994 };
5995 
5996 /* Access type names array */
5997 static const char * const s_access_strs[] = {
5998 	"read",
5999 	"write"
6000 };
6001 
6002 /* Privilege type names array */
6003 static const char * const s_privilege_strs[] = {
6004 	"VF",
6005 	"PDA",
6006 	"HV",
6007 	"UA"
6008 };
6009 
6010 /* Protection type names array */
6011 static const char * const s_protection_strs[] = {
6012 	"(default)",
6013 	"(default)",
6014 	"(default)",
6015 	"(default)",
6016 	"override VF",
6017 	"override PDA",
6018 	"override HV",
6019 	"override UA"
6020 };
6021 
6022 /* Master type names array */
6023 static const char * const s_master_strs[] = {
6024 	"???",
6025 	"pxp",
6026 	"mcp",
6027 	"msdm",
6028 	"psdm",
6029 	"ysdm",
6030 	"usdm",
6031 	"tsdm",
6032 	"xsdm",
6033 	"dbu",
6034 	"dmae",
6035 	"???",
6036 	"???",
6037 	"???",
6038 	"???",
6039 	"???"
6040 };
6041 
6042 /* REG FIFO error messages array */
6043 static const char * const s_reg_fifo_error_strs[] = {
6044 	"grc timeout",
6045 	"address doesn't belong to any block",
6046 	"reserved address in block or write to read-only address",
6047 	"privilege/protection mismatch",
6048 	"path isolation error"
6049 };
6050 
6051 /* IGU FIFO sources array */
6052 static const char * const s_igu_fifo_source_strs[] = {
6053 	"TSTORM",
6054 	"MSTORM",
6055 	"USTORM",
6056 	"XSTORM",
6057 	"YSTORM",
6058 	"PSTORM",
6059 	"PCIE",
6060 	"NIG_QM_PBF",
6061 	"CAU",
6062 	"ATTN",
6063 	"GRC",
6064 };
6065 
6066 /* IGU FIFO error messages */
6067 static const char * const s_igu_fifo_error_strs[] = {
6068 	"no error",
6069 	"length error",
6070 	"function disabled",
6071 	"VF sent command to attention address",
6072 	"host sent prod update command",
6073 	"read of during interrupt register while in MIMD mode",
6074 	"access to PXP BAR reserved address",
6075 	"producer update command to attention index",
6076 	"unknown error",
6077 	"SB index not valid",
6078 	"SB relative index and FID not found",
6079 	"FID not match",
6080 	"command with error flag asserted (PCI error or CAU discard)",
6081 	"VF sent cleanup and RF cleanup is disabled",
6082 	"cleanup command on type bigger than 4"
6083 };
6084 
6085 /* IGU FIFO address data */
6086 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6087 	{0x0, 0x101, "MSI-X Memory", NULL,
6088 	 IGU_ADDR_TYPE_MSIX_MEM},
6089 	{0x102, 0x1ff, "reserved", NULL,
6090 	 IGU_ADDR_TYPE_RESERVED},
6091 	{0x200, 0x200, "Write PBA[0:63]", NULL,
6092 	 IGU_ADDR_TYPE_WRITE_PBA},
6093 	{0x201, 0x201, "Write PBA[64:127]", "reserved",
6094 	 IGU_ADDR_TYPE_WRITE_PBA},
6095 	{0x202, 0x202, "Write PBA[128]", "reserved",
6096 	 IGU_ADDR_TYPE_WRITE_PBA},
6097 	{0x203, 0x3ff, "reserved", NULL,
6098 	 IGU_ADDR_TYPE_RESERVED},
6099 	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6100 	 IGU_ADDR_TYPE_WRITE_INT_ACK},
6101 	{0x5f0, 0x5f0, "Attention bits update", NULL,
6102 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6103 	{0x5f1, 0x5f1, "Attention bits set", NULL,
6104 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6105 	{0x5f2, 0x5f2, "Attention bits clear", NULL,
6106 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6107 	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6108 	 IGU_ADDR_TYPE_READ_INT},
6109 	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6110 	 IGU_ADDR_TYPE_READ_INT},
6111 	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6112 	 IGU_ADDR_TYPE_READ_INT},
6113 	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6114 	 IGU_ADDR_TYPE_READ_INT},
6115 	{0x5f7, 0x5ff, "reserved", NULL,
6116 	 IGU_ADDR_TYPE_RESERVED},
6117 	{0x600, 0x7ff, "Producer update", NULL,
6118 	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6119 };
6120 
6121 /******************************** Variables **********************************/
6122 
6123 /* Temporary buffer, used for print size calculations */
6124 static char s_temp_buf[MAX_MSG_LEN];
6125 
6126 /**************************** Private Functions ******************************/
6127 
6128 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6129 {
6130 	return (a + b) % size;
6131 }
6132 
6133 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6134 {
6135 	return (size + a - b) % size;
6136 }
6137 
6138 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6139  * bytes) and returns them as a dword value. the specified buffer offset is
6140  * updated.
6141  */
6142 static u32 qed_read_from_cyclic_buf(void *buf,
6143 				    u32 *offset,
6144 				    u32 buf_size, u8 num_bytes_to_read)
6145 {
6146 	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6147 	u32 val = 0;
6148 
6149 	val_ptr = (u8 *)&val;
6150 
6151 	/* Assume running on a LITTLE ENDIAN and the buffer is network order
6152 	 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
6153 	 */
6154 	for (i = 0; i < num_bytes_to_read; i++) {
6155 		val_ptr[i] = bytes_buf[*offset];
6156 		*offset = qed_cyclic_add(*offset, 1, buf_size);
6157 	}
6158 
6159 	return val;
6160 }
6161 
6162 /* Reads and returns the next byte from the specified buffer.
6163  * The specified buffer offset is updated.
6164  */
6165 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6166 {
6167 	return ((u8 *)buf)[(*offset)++];
6168 }
6169 
6170 /* Reads and returns the next dword from the specified buffer.
6171  * The specified buffer offset is updated.
6172  */
6173 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6174 {
6175 	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6176 
6177 	*offset += 4;
6178 
6179 	return dword_val;
6180 }
6181 
6182 /* Reads the next string from the specified buffer, and copies it to the
6183  * specified pointer. The specified buffer offset is updated.
6184  */
6185 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6186 {
6187 	const char *source_str = &((const char *)buf)[*offset];
6188 
6189 	strncpy(dest, source_str, size);
6190 	dest[size - 1] = '\0';
6191 	*offset += size;
6192 }
6193 
6194 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6195  * If the specified buffer in NULL, a temporary buffer pointer is returned.
6196  */
6197 static char *qed_get_buf_ptr(void *buf, u32 offset)
6198 {
6199 	return buf ? (char *)buf + offset : s_temp_buf;
6200 }
6201 
6202 /* Reads a param from the specified buffer. Returns the number of dwords read.
6203  * If the returned str_param is NULL, the param is numeric and its value is
6204  * returned in num_param.
6205  * Otheriwise, the param is a string and its pointer is returned in str_param.
6206  */
6207 static u32 qed_read_param(u32 *dump_buf,
6208 			  const char **param_name,
6209 			  const char **param_str_val, u32 *param_num_val)
6210 {
6211 	char *char_buf = (char *)dump_buf;
6212 	size_t offset = 0;
6213 
6214 	/* Extract param name */
6215 	*param_name = char_buf;
6216 	offset += strlen(*param_name) + 1;
6217 
6218 	/* Check param type */
6219 	if (*(char_buf + offset++)) {
6220 		/* String param */
6221 		*param_str_val = char_buf + offset;
6222 		*param_num_val = 0;
6223 		offset += strlen(*param_str_val) + 1;
6224 		if (offset & 0x3)
6225 			offset += (4 - (offset & 0x3));
6226 	} else {
6227 		/* Numeric param */
6228 		*param_str_val = NULL;
6229 		if (offset & 0x3)
6230 			offset += (4 - (offset & 0x3));
6231 		*param_num_val = *(u32 *)(char_buf + offset);
6232 		offset += 4;
6233 	}
6234 
6235 	return (u32)offset / 4;
6236 }
6237 
6238 /* Reads a section header from the specified buffer.
6239  * Returns the number of dwords read.
6240  */
6241 static u32 qed_read_section_hdr(u32 *dump_buf,
6242 				const char **section_name,
6243 				u32 *num_section_params)
6244 {
6245 	const char *param_str_val;
6246 
6247 	return qed_read_param(dump_buf,
6248 			      section_name, &param_str_val, num_section_params);
6249 }
6250 
6251 /* Reads section params from the specified buffer and prints them to the results
6252  * buffer. Returns the number of dwords read.
6253  */
6254 static u32 qed_print_section_params(u32 *dump_buf,
6255 				    u32 num_section_params,
6256 				    char *results_buf, u32 *num_chars_printed)
6257 {
6258 	u32 i, dump_offset = 0, results_offset = 0;
6259 
6260 	for (i = 0; i < num_section_params; i++) {
6261 		const char *param_name, *param_str_val;
6262 		u32 param_num_val = 0;
6263 
6264 		dump_offset += qed_read_param(dump_buf + dump_offset,
6265 					      &param_name,
6266 					      &param_str_val, &param_num_val);
6267 
6268 		if (param_str_val)
6269 			results_offset +=
6270 				sprintf(qed_get_buf_ptr(results_buf,
6271 							results_offset),
6272 					"%s: %s\n", param_name, param_str_val);
6273 		else if (strcmp(param_name, "fw-timestamp"))
6274 			results_offset +=
6275 				sprintf(qed_get_buf_ptr(results_buf,
6276 							results_offset),
6277 					"%s: %d\n", param_name, param_num_val);
6278 	}
6279 
6280 	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6281 				  "\n");
6282 
6283 	*num_chars_printed = results_offset;
6284 
6285 	return dump_offset;
6286 }
6287 
6288 static struct dbg_tools_user_data *
6289 qed_dbg_get_user_data(struct qed_hwfn *p_hwfn)
6290 {
6291 	return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6292 }
6293 
6294 /* Parses the idle check rules and returns the number of characters printed.
6295  * In case of parsing error, returns 0.
6296  */
6297 static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf,
6298 					 u32 *dump_buf_end,
6299 					 u32 num_rules,
6300 					 bool print_fw_idle_chk,
6301 					 char *results_buf,
6302 					 u32 *num_errors, u32 *num_warnings)
6303 {
6304 	/* Offset in results_buf in bytes */
6305 	u32 results_offset = 0;
6306 
6307 	u32 rule_idx;
6308 	u16 i, j;
6309 
6310 	*num_errors = 0;
6311 	*num_warnings = 0;
6312 
6313 	/* Go over dumped results */
6314 	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6315 	     rule_idx++) {
6316 		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6317 		struct dbg_idle_chk_result_hdr *hdr;
6318 		const char *parsing_str, *lsi_msg;
6319 		u32 parsing_str_offset;
6320 		bool has_fw_msg;
6321 		u8 curr_reg_id;
6322 
6323 		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6324 		rule_parsing_data =
6325 			(const struct dbg_idle_chk_rule_parsing_data *)
6326 			&s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
6327 			ptr[hdr->rule_id];
6328 		parsing_str_offset =
6329 			GET_FIELD(rule_parsing_data->data,
6330 				  DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6331 		has_fw_msg =
6332 			GET_FIELD(rule_parsing_data->data,
6333 				DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6334 		parsing_str =
6335 			&((const char *)
6336 			s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
6337 			[parsing_str_offset];
6338 		lsi_msg = parsing_str;
6339 		curr_reg_id = 0;
6340 
6341 		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6342 			return 0;
6343 
6344 		/* Skip rule header */
6345 		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6346 
6347 		/* Update errors/warnings count */
6348 		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6349 		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6350 			(*num_errors)++;
6351 		else
6352 			(*num_warnings)++;
6353 
6354 		/* Print rule severity */
6355 		results_offset +=
6356 		    sprintf(qed_get_buf_ptr(results_buf,
6357 					    results_offset), "%s: ",
6358 			    s_idle_chk_severity_str[hdr->severity]);
6359 
6360 		/* Print rule message */
6361 		if (has_fw_msg)
6362 			parsing_str += strlen(parsing_str) + 1;
6363 		results_offset +=
6364 		    sprintf(qed_get_buf_ptr(results_buf,
6365 					    results_offset), "%s.",
6366 			    has_fw_msg &&
6367 			    print_fw_idle_chk ? parsing_str : lsi_msg);
6368 		parsing_str += strlen(parsing_str) + 1;
6369 
6370 		/* Print register values */
6371 		results_offset +=
6372 		    sprintf(qed_get_buf_ptr(results_buf,
6373 					    results_offset), " Registers:");
6374 		for (i = 0;
6375 		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6376 		     i++) {
6377 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6378 			bool is_mem;
6379 			u8 reg_id;
6380 
6381 			reg_hdr =
6382 				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6383 			is_mem = GET_FIELD(reg_hdr->data,
6384 					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6385 			reg_id = GET_FIELD(reg_hdr->data,
6386 					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6387 
6388 			/* Skip reg header */
6389 			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6390 
6391 			/* Skip register names until the required reg_id is
6392 			 * reached.
6393 			 */
6394 			for (; reg_id > curr_reg_id;
6395 			     curr_reg_id++,
6396 			     parsing_str += strlen(parsing_str) + 1);
6397 
6398 			results_offset +=
6399 			    sprintf(qed_get_buf_ptr(results_buf,
6400 						    results_offset), " %s",
6401 				    parsing_str);
6402 			if (i < hdr->num_dumped_cond_regs && is_mem)
6403 				results_offset +=
6404 				    sprintf(qed_get_buf_ptr(results_buf,
6405 							    results_offset),
6406 					    "[%d]", hdr->mem_entry_id +
6407 					    reg_hdr->start_entry);
6408 			results_offset +=
6409 			    sprintf(qed_get_buf_ptr(results_buf,
6410 						    results_offset), "=");
6411 			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6412 				results_offset +=
6413 				    sprintf(qed_get_buf_ptr(results_buf,
6414 							    results_offset),
6415 					    "0x%x", *dump_buf);
6416 				if (j < reg_hdr->size - 1)
6417 					results_offset +=
6418 					    sprintf(qed_get_buf_ptr
6419 						    (results_buf,
6420 						     results_offset), ",");
6421 			}
6422 		}
6423 
6424 		results_offset +=
6425 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6426 	}
6427 
6428 	/* Check if end of dump buffer was exceeded */
6429 	if (dump_buf > dump_buf_end)
6430 		return 0;
6431 
6432 	return results_offset;
6433 }
6434 
6435 /* Parses an idle check dump buffer.
6436  * If result_buf is not NULL, the idle check results are printed to it.
6437  * In any case, the required results buffer size is assigned to
6438  * parsed_results_bytes.
6439  * The parsing status is returned.
6440  */
6441 static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf,
6442 					       u32 num_dumped_dwords,
6443 					       char *results_buf,
6444 					       u32 *parsed_results_bytes,
6445 					       u32 *num_errors,
6446 					       u32 *num_warnings)
6447 {
6448 	const char *section_name, *param_name, *param_str_val;
6449 	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6450 	u32 num_section_params = 0, num_rules;
6451 
6452 	/* Offset in results_buf in bytes */
6453 	u32 results_offset = 0;
6454 
6455 	*parsed_results_bytes = 0;
6456 	*num_errors = 0;
6457 	*num_warnings = 0;
6458 
6459 	if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6460 	    !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6461 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6462 
6463 	/* Read global_params section */
6464 	dump_buf += qed_read_section_hdr(dump_buf,
6465 					 &section_name, &num_section_params);
6466 	if (strcmp(section_name, "global_params"))
6467 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6468 
6469 	/* Print global params */
6470 	dump_buf += qed_print_section_params(dump_buf,
6471 					     num_section_params,
6472 					     results_buf, &results_offset);
6473 
6474 	/* Read idle_chk section */
6475 	dump_buf += qed_read_section_hdr(dump_buf,
6476 					 &section_name, &num_section_params);
6477 	if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6478 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6479 	dump_buf += qed_read_param(dump_buf,
6480 				   &param_name, &param_str_val, &num_rules);
6481 	if (strcmp(param_name, "num_rules"))
6482 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6483 
6484 	if (num_rules) {
6485 		u32 rules_print_size;
6486 
6487 		/* Print FW output */
6488 		results_offset +=
6489 		    sprintf(qed_get_buf_ptr(results_buf,
6490 					    results_offset),
6491 			    "FW_IDLE_CHECK:\n");
6492 		rules_print_size =
6493 			qed_parse_idle_chk_dump_rules(dump_buf,
6494 						      dump_buf_end,
6495 						      num_rules,
6496 						      true,
6497 						      results_buf ?
6498 						      results_buf +
6499 						      results_offset :
6500 						      NULL,
6501 						      num_errors,
6502 						      num_warnings);
6503 		results_offset += rules_print_size;
6504 		if (!rules_print_size)
6505 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6506 
6507 		/* Print LSI output */
6508 		results_offset +=
6509 		    sprintf(qed_get_buf_ptr(results_buf,
6510 					    results_offset),
6511 			    "\nLSI_IDLE_CHECK:\n");
6512 		rules_print_size =
6513 			qed_parse_idle_chk_dump_rules(dump_buf,
6514 						      dump_buf_end,
6515 						      num_rules,
6516 						      false,
6517 						      results_buf ?
6518 						      results_buf +
6519 						      results_offset :
6520 						      NULL,
6521 						      num_errors,
6522 						      num_warnings);
6523 		results_offset += rules_print_size;
6524 		if (!rules_print_size)
6525 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6526 	}
6527 
6528 	/* Print errors/warnings count */
6529 	if (*num_errors)
6530 		results_offset +=
6531 		    sprintf(qed_get_buf_ptr(results_buf,
6532 					    results_offset),
6533 			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6534 			    *num_errors, *num_warnings);
6535 	else if (*num_warnings)
6536 		results_offset +=
6537 		    sprintf(qed_get_buf_ptr(results_buf,
6538 					    results_offset),
6539 			    "\nIdle Check completed successfully (with %d warnings)\n",
6540 			    *num_warnings);
6541 	else
6542 		results_offset +=
6543 		    sprintf(qed_get_buf_ptr(results_buf,
6544 					    results_offset),
6545 			    "\nIdle Check completed successfully\n");
6546 
6547 	/* Add 1 for string NULL termination */
6548 	*parsed_results_bytes = results_offset + 1;
6549 
6550 	return DBG_STATUS_OK;
6551 }
6552 
6553 /* Allocates and fills MCP Trace meta data based on the specified meta data
6554  * dump buffer.
6555  * Returns debug status code.
6556  */
6557 static enum dbg_status
6558 qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
6559 			      const u32 *meta_buf)
6560 {
6561 	struct dbg_tools_user_data *dev_user_data;
6562 	u32 offset = 0, signature, i;
6563 	struct mcp_trace_meta *meta;
6564 	u8 *meta_buf_bytes;
6565 
6566 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6567 	meta = &dev_user_data->mcp_trace_meta;
6568 	meta_buf_bytes = (u8 *)meta_buf;
6569 
6570 	/* Free the previous meta before loading a new one. */
6571 	if (meta->is_allocated)
6572 		qed_mcp_trace_free_meta_data(p_hwfn);
6573 
6574 	memset(meta, 0, sizeof(*meta));
6575 
6576 	/* Read first signature */
6577 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6578 	if (signature != NVM_MAGIC_VALUE)
6579 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6580 
6581 	/* Read no. of modules and allocate memory for their pointers */
6582 	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6583 	meta->modules = kcalloc(meta->modules_num, sizeof(char *),
6584 				GFP_KERNEL);
6585 	if (!meta->modules)
6586 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6587 
6588 	/* Allocate and read all module strings */
6589 	for (i = 0; i < meta->modules_num; i++) {
6590 		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6591 
6592 		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6593 		if (!(*(meta->modules + i))) {
6594 			/* Update number of modules to be released */
6595 			meta->modules_num = i ? i - 1 : 0;
6596 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6597 		}
6598 
6599 		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6600 				      *(meta->modules + i));
6601 		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6602 			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6603 	}
6604 
6605 	/* Read second signature */
6606 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6607 	if (signature != NVM_MAGIC_VALUE)
6608 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6609 
6610 	/* Read number of formats and allocate memory for all formats */
6611 	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6612 	meta->formats = kcalloc(meta->formats_num,
6613 				sizeof(struct mcp_trace_format),
6614 				GFP_KERNEL);
6615 	if (!meta->formats)
6616 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6617 
6618 	/* Allocate and read all strings */
6619 	for (i = 0; i < meta->formats_num; i++) {
6620 		struct mcp_trace_format *format_ptr = &meta->formats[i];
6621 		u8 format_len;
6622 
6623 		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6624 							   &offset);
6625 		format_len =
6626 		    (format_ptr->data &
6627 		     MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
6628 		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6629 		if (!format_ptr->format_str) {
6630 			/* Update number of modules to be released */
6631 			meta->formats_num = i ? i - 1 : 0;
6632 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6633 		}
6634 
6635 		qed_read_str_from_buf(meta_buf_bytes,
6636 				      &offset,
6637 				      format_len, format_ptr->format_str);
6638 	}
6639 
6640 	meta->is_allocated = true;
6641 	return DBG_STATUS_OK;
6642 }
6643 
6644 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6645  * are printed to it. The parsing status is returned.
6646  * Arguments:
6647  * trace_buf - MCP trace cyclic buffer
6648  * trace_buf_size - MCP trace cyclic buffer size in bytes
6649  * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6650  *               buffer.
6651  * data_size - size in bytes of data to parse.
6652  * parsed_buf - destination buffer for parsed data.
6653  * parsed_results_bytes - size of parsed data in bytes.
6654  */
6655 static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
6656 					       u8 *trace_buf,
6657 					       u32 trace_buf_size,
6658 					       u32 data_offset,
6659 					       u32 data_size,
6660 					       char *parsed_buf,
6661 					       u32 *parsed_results_bytes)
6662 {
6663 	struct dbg_tools_user_data *dev_user_data;
6664 	struct mcp_trace_meta *meta;
6665 	u32 param_mask, param_shift;
6666 	enum dbg_status status;
6667 
6668 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6669 	meta = &dev_user_data->mcp_trace_meta;
6670 	*parsed_results_bytes = 0;
6671 
6672 	if (!meta->is_allocated)
6673 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6674 
6675 	status = DBG_STATUS_OK;
6676 
6677 	while (data_size) {
6678 		struct mcp_trace_format *format_ptr;
6679 		u8 format_level, format_module;
6680 		u32 params[3] = { 0, 0, 0 };
6681 		u32 header, format_idx, i;
6682 
6683 		if (data_size < MFW_TRACE_ENTRY_SIZE)
6684 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6685 
6686 		header = qed_read_from_cyclic_buf(trace_buf,
6687 						  &data_offset,
6688 						  trace_buf_size,
6689 						  MFW_TRACE_ENTRY_SIZE);
6690 		data_size -= MFW_TRACE_ENTRY_SIZE;
6691 		format_idx = header & MFW_TRACE_EVENTID_MASK;
6692 
6693 		/* Skip message if its index doesn't exist in the meta data */
6694 		if (format_idx >= meta->formats_num) {
6695 			u8 format_size =
6696 				(u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
6697 				     MFW_TRACE_PRM_SIZE_SHIFT);
6698 
6699 			if (data_size < format_size)
6700 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6701 
6702 			data_offset = qed_cyclic_add(data_offset,
6703 						     format_size,
6704 						     trace_buf_size);
6705 			data_size -= format_size;
6706 			continue;
6707 		}
6708 
6709 		format_ptr = &meta->formats[format_idx];
6710 
6711 		for (i = 0,
6712 		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK,
6713 		     param_shift = MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
6714 		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6715 		     i++,
6716 		     param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6717 		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6718 			/* Extract param size (0..3) */
6719 			u8 param_size = (u8)((format_ptr->data & param_mask) >>
6720 					     param_shift);
6721 
6722 			/* If the param size is zero, there are no other
6723 			 * parameters.
6724 			 */
6725 			if (!param_size)
6726 				break;
6727 
6728 			/* Size is encoded using 2 bits, where 3 is used to
6729 			 * encode 4.
6730 			 */
6731 			if (param_size == 3)
6732 				param_size = 4;
6733 
6734 			if (data_size < param_size)
6735 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6736 
6737 			params[i] = qed_read_from_cyclic_buf(trace_buf,
6738 							     &data_offset,
6739 							     trace_buf_size,
6740 							     param_size);
6741 			data_size -= param_size;
6742 		}
6743 
6744 		format_level = (u8)((format_ptr->data &
6745 				     MCP_TRACE_FORMAT_LEVEL_MASK) >>
6746 				    MCP_TRACE_FORMAT_LEVEL_SHIFT);
6747 		format_module = (u8)((format_ptr->data &
6748 				      MCP_TRACE_FORMAT_MODULE_MASK) >>
6749 				     MCP_TRACE_FORMAT_MODULE_SHIFT);
6750 		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6751 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6752 
6753 		/* Print current message to results buffer */
6754 		*parsed_results_bytes +=
6755 			sprintf(qed_get_buf_ptr(parsed_buf,
6756 						*parsed_results_bytes),
6757 				"%s %-8s: ",
6758 				s_mcp_trace_level_str[format_level],
6759 				meta->modules[format_module]);
6760 		*parsed_results_bytes +=
6761 		    sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
6762 			    format_ptr->format_str,
6763 			    params[0], params[1], params[2]);
6764 	}
6765 
6766 	/* Add string NULL terminator */
6767 	(*parsed_results_bytes)++;
6768 
6769 	return status;
6770 }
6771 
6772 /* Parses an MCP Trace dump buffer.
6773  * If result_buf is not NULL, the MCP Trace results are printed to it.
6774  * In any case, the required results buffer size is assigned to
6775  * parsed_results_bytes.
6776  * The parsing status is returned.
6777  */
6778 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6779 						u32 *dump_buf,
6780 						char *results_buf,
6781 						u32 *parsed_results_bytes,
6782 						bool free_meta_data)
6783 {
6784 	const char *section_name, *param_name, *param_str_val;
6785 	u32 data_size, trace_data_dwords, trace_meta_dwords;
6786 	u32 offset, results_offset, results_buf_bytes;
6787 	u32 param_num_val, num_section_params;
6788 	struct mcp_trace *trace;
6789 	enum dbg_status status;
6790 	const u32 *meta_buf;
6791 	u8 *trace_buf;
6792 
6793 	*parsed_results_bytes = 0;
6794 
6795 	/* Read global_params section */
6796 	dump_buf += qed_read_section_hdr(dump_buf,
6797 					 &section_name, &num_section_params);
6798 	if (strcmp(section_name, "global_params"))
6799 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6800 
6801 	/* Print global params */
6802 	dump_buf += qed_print_section_params(dump_buf,
6803 					     num_section_params,
6804 					     results_buf, &results_offset);
6805 
6806 	/* Read trace_data section */
6807 	dump_buf += qed_read_section_hdr(dump_buf,
6808 					 &section_name, &num_section_params);
6809 	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6810 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6811 	dump_buf += qed_read_param(dump_buf,
6812 				   &param_name, &param_str_val, &param_num_val);
6813 	if (strcmp(param_name, "size"))
6814 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6815 	trace_data_dwords = param_num_val;
6816 
6817 	/* Prepare trace info */
6818 	trace = (struct mcp_trace *)dump_buf;
6819 	if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
6820 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6821 
6822 	trace_buf = (u8 *)dump_buf + sizeof(*trace);
6823 	offset = trace->trace_oldest;
6824 	data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
6825 	dump_buf += trace_data_dwords;
6826 
6827 	/* Read meta_data section */
6828 	dump_buf += qed_read_section_hdr(dump_buf,
6829 					 &section_name, &num_section_params);
6830 	if (strcmp(section_name, "mcp_trace_meta"))
6831 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6832 	dump_buf += qed_read_param(dump_buf,
6833 				   &param_name, &param_str_val, &param_num_val);
6834 	if (strcmp(param_name, "size"))
6835 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6836 	trace_meta_dwords = param_num_val;
6837 
6838 	/* Choose meta data buffer */
6839 	if (!trace_meta_dwords) {
6840 		/* Dump doesn't include meta data */
6841 		struct dbg_tools_user_data *dev_user_data =
6842 			qed_dbg_get_user_data(p_hwfn);
6843 
6844 		if (!dev_user_data->mcp_trace_user_meta_buf)
6845 			return DBG_STATUS_MCP_TRACE_NO_META;
6846 
6847 		meta_buf = dev_user_data->mcp_trace_user_meta_buf;
6848 	} else {
6849 		/* Dump includes meta data */
6850 		meta_buf = dump_buf;
6851 	}
6852 
6853 	/* Allocate meta data memory */
6854 	status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
6855 	if (status != DBG_STATUS_OK)
6856 		return status;
6857 
6858 	status = qed_parse_mcp_trace_buf(p_hwfn,
6859 					 trace_buf,
6860 					 trace->size,
6861 					 offset,
6862 					 data_size,
6863 					 results_buf ?
6864 					 results_buf + results_offset :
6865 					 NULL,
6866 					 &results_buf_bytes);
6867 	if (status != DBG_STATUS_OK)
6868 		return status;
6869 
6870 	if (free_meta_data)
6871 		qed_mcp_trace_free_meta_data(p_hwfn);
6872 
6873 	*parsed_results_bytes = results_offset + results_buf_bytes;
6874 
6875 	return DBG_STATUS_OK;
6876 }
6877 
6878 /* Parses a Reg FIFO dump buffer.
6879  * If result_buf is not NULL, the Reg FIFO results are printed to it.
6880  * In any case, the required results buffer size is assigned to
6881  * parsed_results_bytes.
6882  * The parsing status is returned.
6883  */
6884 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6885 					       char *results_buf,
6886 					       u32 *parsed_results_bytes)
6887 {
6888 	const char *section_name, *param_name, *param_str_val;
6889 	u32 param_num_val, num_section_params, num_elements;
6890 	struct reg_fifo_element *elements;
6891 	u8 i, j, err_val, vf_val;
6892 	u32 results_offset = 0;
6893 	char vf_str[4];
6894 
6895 	/* Read global_params section */
6896 	dump_buf += qed_read_section_hdr(dump_buf,
6897 					 &section_name, &num_section_params);
6898 	if (strcmp(section_name, "global_params"))
6899 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6900 
6901 	/* Print global params */
6902 	dump_buf += qed_print_section_params(dump_buf,
6903 					     num_section_params,
6904 					     results_buf, &results_offset);
6905 
6906 	/* Read reg_fifo_data section */
6907 	dump_buf += qed_read_section_hdr(dump_buf,
6908 					 &section_name, &num_section_params);
6909 	if (strcmp(section_name, "reg_fifo_data"))
6910 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6911 	dump_buf += qed_read_param(dump_buf,
6912 				   &param_name, &param_str_val, &param_num_val);
6913 	if (strcmp(param_name, "size"))
6914 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6915 	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6916 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6917 	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6918 	elements = (struct reg_fifo_element *)dump_buf;
6919 
6920 	/* Decode elements */
6921 	for (i = 0; i < num_elements; i++) {
6922 		bool err_printed = false;
6923 
6924 		/* Discover if element belongs to a VF or a PF */
6925 		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6926 		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6927 			sprintf(vf_str, "%s", "N/A");
6928 		else
6929 			sprintf(vf_str, "%d", vf_val);
6930 
6931 		/* Add parsed element to parsed buffer */
6932 		results_offset +=
6933 		    sprintf(qed_get_buf_ptr(results_buf,
6934 					    results_offset),
6935 			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
6936 			    elements[i].data,
6937 			    (u32)GET_FIELD(elements[i].data,
6938 					   REG_FIFO_ELEMENT_ADDRESS) *
6939 			    REG_FIFO_ELEMENT_ADDR_FACTOR,
6940 			    s_access_strs[GET_FIELD(elements[i].data,
6941 						    REG_FIFO_ELEMENT_ACCESS)],
6942 			    (u32)GET_FIELD(elements[i].data,
6943 					   REG_FIFO_ELEMENT_PF),
6944 			    vf_str,
6945 			    (u32)GET_FIELD(elements[i].data,
6946 					   REG_FIFO_ELEMENT_PORT),
6947 			    s_privilege_strs[GET_FIELD(elements[i].data,
6948 						REG_FIFO_ELEMENT_PRIVILEGE)],
6949 			    s_protection_strs[GET_FIELD(elements[i].data,
6950 						REG_FIFO_ELEMENT_PROTECTION)],
6951 			    s_master_strs[GET_FIELD(elements[i].data,
6952 						REG_FIFO_ELEMENT_MASTER)]);
6953 
6954 		/* Print errors */
6955 		for (j = 0,
6956 		     err_val = GET_FIELD(elements[i].data,
6957 					 REG_FIFO_ELEMENT_ERROR);
6958 		     j < ARRAY_SIZE(s_reg_fifo_error_strs);
6959 		     j++, err_val >>= 1) {
6960 			if (err_val & 0x1) {
6961 				if (err_printed)
6962 					results_offset +=
6963 					    sprintf(qed_get_buf_ptr
6964 						    (results_buf,
6965 						     results_offset), ", ");
6966 				results_offset +=
6967 				    sprintf(qed_get_buf_ptr
6968 					    (results_buf, results_offset), "%s",
6969 					    s_reg_fifo_error_strs[j]);
6970 				err_printed = true;
6971 			}
6972 		}
6973 
6974 		results_offset +=
6975 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6976 	}
6977 
6978 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6979 						  results_offset),
6980 				  "fifo contained %d elements", num_elements);
6981 
6982 	/* Add 1 for string NULL termination */
6983 	*parsed_results_bytes = results_offset + 1;
6984 
6985 	return DBG_STATUS_OK;
6986 }
6987 
6988 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6989 						  *element, char
6990 						  *results_buf,
6991 						  u32 *results_offset)
6992 {
6993 	const struct igu_fifo_addr_data *found_addr = NULL;
6994 	u8 source, err_type, i, is_cleanup;
6995 	char parsed_addr_data[32];
6996 	char parsed_wr_data[256];
6997 	u32 wr_data, prod_cons;
6998 	bool is_wr_cmd, is_pf;
6999 	u16 cmd_addr;
7000 	u64 dword12;
7001 
7002 	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
7003 	 * FIFO element.
7004 	 */
7005 	dword12 = ((u64)element->dword2 << 32) | element->dword1;
7006 	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
7007 	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
7008 	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
7009 	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
7010 	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
7011 
7012 	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
7013 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7014 	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
7015 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7016 
7017 	/* Find address data */
7018 	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
7019 		const struct igu_fifo_addr_data *curr_addr =
7020 			&s_igu_fifo_addr_data[i];
7021 
7022 		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
7023 		    curr_addr->end_addr)
7024 			found_addr = curr_addr;
7025 	}
7026 
7027 	if (!found_addr)
7028 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7029 
7030 	/* Prepare parsed address data */
7031 	switch (found_addr->type) {
7032 	case IGU_ADDR_TYPE_MSIX_MEM:
7033 		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
7034 		break;
7035 	case IGU_ADDR_TYPE_WRITE_INT_ACK:
7036 	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
7037 		sprintf(parsed_addr_data,
7038 			" SB = 0x%x", cmd_addr - found_addr->start_addr);
7039 		break;
7040 	default:
7041 		parsed_addr_data[0] = '\0';
7042 	}
7043 
7044 	if (!is_wr_cmd) {
7045 		parsed_wr_data[0] = '\0';
7046 		goto out;
7047 	}
7048 
7049 	/* Prepare parsed write data */
7050 	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
7051 	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
7052 	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
7053 
7054 	if (source == IGU_SRC_ATTN) {
7055 		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
7056 	} else {
7057 		if (is_cleanup) {
7058 			u8 cleanup_val, cleanup_type;
7059 
7060 			cleanup_val =
7061 				GET_FIELD(wr_data,
7062 					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
7063 			cleanup_type =
7064 			    GET_FIELD(wr_data,
7065 				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
7066 
7067 			sprintf(parsed_wr_data,
7068 				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
7069 				cleanup_val ? "set" : "clear",
7070 				cleanup_type);
7071 		} else {
7072 			u8 update_flag, en_dis_int_for_sb, segment;
7073 			u8 timer_mask;
7074 
7075 			update_flag = GET_FIELD(wr_data,
7076 						IGU_FIFO_WR_DATA_UPDATE_FLAG);
7077 			en_dis_int_for_sb =
7078 				GET_FIELD(wr_data,
7079 					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
7080 			segment = GET_FIELD(wr_data,
7081 					    IGU_FIFO_WR_DATA_SEGMENT);
7082 			timer_mask = GET_FIELD(wr_data,
7083 					       IGU_FIFO_WR_DATA_TIMER_MASK);
7084 
7085 			sprintf(parsed_wr_data,
7086 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
7087 				prod_cons,
7088 				update_flag ? "update" : "nop",
7089 				en_dis_int_for_sb ?
7090 				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
7091 				"enable",
7092 				segment ? "attn" : "regular",
7093 				timer_mask);
7094 		}
7095 	}
7096 out:
7097 	/* Add parsed element to parsed buffer */
7098 	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
7099 						   *results_offset),
7100 				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7101 				   element->dword2, element->dword1,
7102 				   element->dword0,
7103 				   is_pf ? "pf" : "vf",
7104 				   GET_FIELD(element->dword0,
7105 					     IGU_FIFO_ELEMENT_DWORD0_FID),
7106 				   s_igu_fifo_source_strs[source],
7107 				   is_wr_cmd ? "wr" : "rd",
7108 				   cmd_addr,
7109 				   (!is_pf && found_addr->vf_desc)
7110 				   ? found_addr->vf_desc
7111 				   : found_addr->desc,
7112 				   parsed_addr_data,
7113 				   parsed_wr_data,
7114 				   s_igu_fifo_error_strs[err_type]);
7115 
7116 	return DBG_STATUS_OK;
7117 }
7118 
7119 /* Parses an IGU FIFO dump buffer.
7120  * If result_buf is not NULL, the IGU FIFO results are printed to it.
7121  * In any case, the required results buffer size is assigned to
7122  * parsed_results_bytes.
7123  * The parsing status is returned.
7124  */
7125 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7126 					       char *results_buf,
7127 					       u32 *parsed_results_bytes)
7128 {
7129 	const char *section_name, *param_name, *param_str_val;
7130 	u32 param_num_val, num_section_params, num_elements;
7131 	struct igu_fifo_element *elements;
7132 	enum dbg_status status;
7133 	u32 results_offset = 0;
7134 	u8 i;
7135 
7136 	/* Read global_params section */
7137 	dump_buf += qed_read_section_hdr(dump_buf,
7138 					 &section_name, &num_section_params);
7139 	if (strcmp(section_name, "global_params"))
7140 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7141 
7142 	/* Print global params */
7143 	dump_buf += qed_print_section_params(dump_buf,
7144 					     num_section_params,
7145 					     results_buf, &results_offset);
7146 
7147 	/* Read igu_fifo_data section */
7148 	dump_buf += qed_read_section_hdr(dump_buf,
7149 					 &section_name, &num_section_params);
7150 	if (strcmp(section_name, "igu_fifo_data"))
7151 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7152 	dump_buf += qed_read_param(dump_buf,
7153 				   &param_name, &param_str_val, &param_num_val);
7154 	if (strcmp(param_name, "size"))
7155 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7156 	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7157 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7158 	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7159 	elements = (struct igu_fifo_element *)dump_buf;
7160 
7161 	/* Decode elements */
7162 	for (i = 0; i < num_elements; i++) {
7163 		status = qed_parse_igu_fifo_element(&elements[i],
7164 						    results_buf,
7165 						    &results_offset);
7166 		if (status != DBG_STATUS_OK)
7167 			return status;
7168 	}
7169 
7170 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7171 						  results_offset),
7172 				  "fifo contained %d elements", num_elements);
7173 
7174 	/* Add 1 for string NULL termination */
7175 	*parsed_results_bytes = results_offset + 1;
7176 
7177 	return DBG_STATUS_OK;
7178 }
7179 
7180 static enum dbg_status
7181 qed_parse_protection_override_dump(u32 *dump_buf,
7182 				   char *results_buf,
7183 				   u32 *parsed_results_bytes)
7184 {
7185 	const char *section_name, *param_name, *param_str_val;
7186 	u32 param_num_val, num_section_params, num_elements;
7187 	struct protection_override_element *elements;
7188 	u32 results_offset = 0;
7189 	u8 i;
7190 
7191 	/* Read global_params section */
7192 	dump_buf += qed_read_section_hdr(dump_buf,
7193 					 &section_name, &num_section_params);
7194 	if (strcmp(section_name, "global_params"))
7195 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7196 
7197 	/* Print global params */
7198 	dump_buf += qed_print_section_params(dump_buf,
7199 					     num_section_params,
7200 					     results_buf, &results_offset);
7201 
7202 	/* Read protection_override_data section */
7203 	dump_buf += qed_read_section_hdr(dump_buf,
7204 					 &section_name, &num_section_params);
7205 	if (strcmp(section_name, "protection_override_data"))
7206 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7207 	dump_buf += qed_read_param(dump_buf,
7208 				   &param_name, &param_str_val, &param_num_val);
7209 	if (strcmp(param_name, "size"))
7210 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7211 	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7212 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7213 	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7214 	elements = (struct protection_override_element *)dump_buf;
7215 
7216 	/* Decode elements */
7217 	for (i = 0; i < num_elements; i++) {
7218 		u32 address = GET_FIELD(elements[i].data,
7219 					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7220 			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7221 
7222 		results_offset +=
7223 		    sprintf(qed_get_buf_ptr(results_buf,
7224 					    results_offset),
7225 			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7226 			    i, address,
7227 			    (u32)GET_FIELD(elements[i].data,
7228 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7229 			    (u32)GET_FIELD(elements[i].data,
7230 				      PROTECTION_OVERRIDE_ELEMENT_READ),
7231 			    (u32)GET_FIELD(elements[i].data,
7232 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
7233 			    s_protection_strs[GET_FIELD(elements[i].data,
7234 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7235 			    s_protection_strs[GET_FIELD(elements[i].data,
7236 				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7237 	}
7238 
7239 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7240 						  results_offset),
7241 				  "protection override contained %d elements",
7242 				  num_elements);
7243 
7244 	/* Add 1 for string NULL termination */
7245 	*parsed_results_bytes = results_offset + 1;
7246 
7247 	return DBG_STATUS_OK;
7248 }
7249 
7250 /* Parses a FW Asserts dump buffer.
7251  * If result_buf is not NULL, the FW Asserts results are printed to it.
7252  * In any case, the required results buffer size is assigned to
7253  * parsed_results_bytes.
7254  * The parsing status is returned.
7255  */
7256 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7257 						 char *results_buf,
7258 						 u32 *parsed_results_bytes)
7259 {
7260 	u32 num_section_params, param_num_val, i, results_offset = 0;
7261 	const char *param_name, *param_str_val, *section_name;
7262 	bool last_section_found = false;
7263 
7264 	*parsed_results_bytes = 0;
7265 
7266 	/* Read global_params section */
7267 	dump_buf += qed_read_section_hdr(dump_buf,
7268 					 &section_name, &num_section_params);
7269 	if (strcmp(section_name, "global_params"))
7270 		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7271 
7272 	/* Print global params */
7273 	dump_buf += qed_print_section_params(dump_buf,
7274 					     num_section_params,
7275 					     results_buf, &results_offset);
7276 
7277 	while (!last_section_found) {
7278 		dump_buf += qed_read_section_hdr(dump_buf,
7279 						 &section_name,
7280 						 &num_section_params);
7281 		if (!strcmp(section_name, "fw_asserts")) {
7282 			/* Extract params */
7283 			const char *storm_letter = NULL;
7284 			u32 storm_dump_size = 0;
7285 
7286 			for (i = 0; i < num_section_params; i++) {
7287 				dump_buf += qed_read_param(dump_buf,
7288 							   &param_name,
7289 							   &param_str_val,
7290 							   &param_num_val);
7291 				if (!strcmp(param_name, "storm"))
7292 					storm_letter = param_str_val;
7293 				else if (!strcmp(param_name, "size"))
7294 					storm_dump_size = param_num_val;
7295 				else
7296 					return
7297 					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7298 			}
7299 
7300 			if (!storm_letter || !storm_dump_size)
7301 				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7302 
7303 			/* Print data */
7304 			results_offset +=
7305 			    sprintf(qed_get_buf_ptr(results_buf,
7306 						    results_offset),
7307 				    "\n%sSTORM_ASSERT: size=%d\n",
7308 				    storm_letter, storm_dump_size);
7309 			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7310 				results_offset +=
7311 				    sprintf(qed_get_buf_ptr(results_buf,
7312 							    results_offset),
7313 					    "%08x\n", *dump_buf);
7314 		} else if (!strcmp(section_name, "last")) {
7315 			last_section_found = true;
7316 		} else {
7317 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7318 		}
7319 	}
7320 
7321 	/* Add 1 for string NULL termination */
7322 	*parsed_results_bytes = results_offset + 1;
7323 
7324 	return DBG_STATUS_OK;
7325 }
7326 
7327 /***************************** Public Functions *******************************/
7328 
7329 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
7330 {
7331 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
7332 	u8 buf_id;
7333 
7334 	/* Convert binary data to debug arrays */
7335 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
7336 		s_user_dbg_arrays[buf_id].ptr =
7337 			(u32 *)(bin_ptr + buf_array[buf_id].offset);
7338 		s_user_dbg_arrays[buf_id].size_in_dwords =
7339 			BYTES_TO_DWORDS(buf_array[buf_id].length);
7340 	}
7341 
7342 	return DBG_STATUS_OK;
7343 }
7344 
7345 enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn)
7346 {
7347 	p_hwfn->dbg_user_info = kzalloc(sizeof(struct dbg_tools_user_data),
7348 					GFP_KERNEL);
7349 	if (!p_hwfn->dbg_user_info)
7350 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7351 
7352 	return DBG_STATUS_OK;
7353 }
7354 
7355 const char *qed_dbg_get_status_str(enum dbg_status status)
7356 {
7357 	return (status <
7358 		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7359 }
7360 
7361 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7362 						  u32 *dump_buf,
7363 						  u32 num_dumped_dwords,
7364 						  u32 *results_buf_size)
7365 {
7366 	u32 num_errors, num_warnings;
7367 
7368 	return qed_parse_idle_chk_dump(dump_buf,
7369 				       num_dumped_dwords,
7370 				       NULL,
7371 				       results_buf_size,
7372 				       &num_errors, &num_warnings);
7373 }
7374 
7375 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7376 					   u32 *dump_buf,
7377 					   u32 num_dumped_dwords,
7378 					   char *results_buf,
7379 					   u32 *num_errors,
7380 					   u32 *num_warnings)
7381 {
7382 	u32 parsed_buf_size;
7383 
7384 	return qed_parse_idle_chk_dump(dump_buf,
7385 				       num_dumped_dwords,
7386 				       results_buf,
7387 				       &parsed_buf_size,
7388 				       num_errors, num_warnings);
7389 }
7390 
7391 void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
7392 				     const u32 *meta_buf)
7393 {
7394 	struct dbg_tools_user_data *dev_user_data =
7395 		qed_dbg_get_user_data(p_hwfn);
7396 
7397 	dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7398 }
7399 
7400 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7401 						   u32 *dump_buf,
7402 						   u32 num_dumped_dwords,
7403 						   u32 *results_buf_size)
7404 {
7405 	return qed_parse_mcp_trace_dump(p_hwfn,
7406 					dump_buf, NULL, results_buf_size, true);
7407 }
7408 
7409 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7410 					    u32 *dump_buf,
7411 					    u32 num_dumped_dwords,
7412 					    char *results_buf)
7413 {
7414 	u32 parsed_buf_size;
7415 
7416 	return qed_parse_mcp_trace_dump(p_hwfn,
7417 					dump_buf,
7418 					results_buf, &parsed_buf_size, true);
7419 }
7420 
7421 enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
7422 						 u32 *dump_buf,
7423 						 char *results_buf)
7424 {
7425 	u32 parsed_buf_size;
7426 
7427 	return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7428 					&parsed_buf_size, false);
7429 }
7430 
7431 enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
7432 					 u8 *dump_buf,
7433 					 u32 num_dumped_bytes,
7434 					 char *results_buf)
7435 {
7436 	u32 parsed_results_bytes;
7437 
7438 	return qed_parse_mcp_trace_buf(p_hwfn,
7439 				       dump_buf,
7440 				       num_dumped_bytes,
7441 				       0,
7442 				       num_dumped_bytes,
7443 				       results_buf, &parsed_results_bytes);
7444 }
7445 
7446 /* Frees the specified MCP Trace meta data */
7447 void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
7448 {
7449 	struct dbg_tools_user_data *dev_user_data;
7450 	struct mcp_trace_meta *meta;
7451 	u32 i;
7452 
7453 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
7454 	meta = &dev_user_data->mcp_trace_meta;
7455 	if (!meta->is_allocated)
7456 		return;
7457 
7458 	/* Release modules */
7459 	if (meta->modules) {
7460 		for (i = 0; i < meta->modules_num; i++)
7461 			kfree(meta->modules[i]);
7462 		kfree(meta->modules);
7463 	}
7464 
7465 	/* Release formats */
7466 	if (meta->formats) {
7467 		for (i = 0; i < meta->formats_num; i++)
7468 			kfree(meta->formats[i].format_str);
7469 		kfree(meta->formats);
7470 	}
7471 
7472 	meta->is_allocated = false;
7473 }
7474 
7475 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7476 						  u32 *dump_buf,
7477 						  u32 num_dumped_dwords,
7478 						  u32 *results_buf_size)
7479 {
7480 	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7481 }
7482 
7483 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7484 					   u32 *dump_buf,
7485 					   u32 num_dumped_dwords,
7486 					   char *results_buf)
7487 {
7488 	u32 parsed_buf_size;
7489 
7490 	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7491 }
7492 
7493 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7494 						  u32 *dump_buf,
7495 						  u32 num_dumped_dwords,
7496 						  u32 *results_buf_size)
7497 {
7498 	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7499 }
7500 
7501 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7502 					   u32 *dump_buf,
7503 					   u32 num_dumped_dwords,
7504 					   char *results_buf)
7505 {
7506 	u32 parsed_buf_size;
7507 
7508 	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7509 }
7510 
7511 enum dbg_status
7512 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7513 					     u32 *dump_buf,
7514 					     u32 num_dumped_dwords,
7515 					     u32 *results_buf_size)
7516 {
7517 	return qed_parse_protection_override_dump(dump_buf,
7518 						  NULL, results_buf_size);
7519 }
7520 
7521 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7522 						      u32 *dump_buf,
7523 						      u32 num_dumped_dwords,
7524 						      char *results_buf)
7525 {
7526 	u32 parsed_buf_size;
7527 
7528 	return qed_parse_protection_override_dump(dump_buf,
7529 						  results_buf,
7530 						  &parsed_buf_size);
7531 }
7532 
7533 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7534 						    u32 *dump_buf,
7535 						    u32 num_dumped_dwords,
7536 						    u32 *results_buf_size)
7537 {
7538 	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7539 }
7540 
7541 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7542 					     u32 *dump_buf,
7543 					     u32 num_dumped_dwords,
7544 					     char *results_buf)
7545 {
7546 	u32 parsed_buf_size;
7547 
7548 	return qed_parse_fw_asserts_dump(dump_buf,
7549 					 results_buf, &parsed_buf_size);
7550 }
7551 
7552 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7553 				   struct dbg_attn_block_result *results)
7554 {
7555 	struct user_dbg_array *block_attn, *pstrings;
7556 	const u32 *block_attn_name_offsets;
7557 	enum dbg_attn_type attn_type;
7558 	const char *block_name;
7559 	u8 num_regs, i, j;
7560 
7561 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7562 	attn_type = (enum dbg_attn_type)
7563 		    GET_FIELD(results->data,
7564 			      DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7565 	block_name = s_block_info_arr[results->block_id].name;
7566 
7567 	if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7568 	    !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7569 	    !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7570 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7571 
7572 	block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
7573 	block_attn_name_offsets = &block_attn->ptr[results->names_offset];
7574 
7575 	/* Go over registers with a non-zero attention status */
7576 	for (i = 0; i < num_regs; i++) {
7577 		struct dbg_attn_bit_mapping *bit_mapping;
7578 		struct dbg_attn_reg_result *reg_result;
7579 		u8 num_reg_attn, bit_idx = 0;
7580 
7581 		reg_result = &results->reg_results[i];
7582 		num_reg_attn = GET_FIELD(reg_result->data,
7583 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7584 		block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
7585 		bit_mapping = &((struct dbg_attn_bit_mapping *)
7586 				block_attn->ptr)[reg_result->block_attn_offset];
7587 
7588 		pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
7589 
7590 		/* Go over attention status bits */
7591 		for (j = 0; j < num_reg_attn; j++) {
7592 			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7593 						     DBG_ATTN_BIT_MAPPING_VAL);
7594 			const char *attn_name, *attn_type_str, *masked_str;
7595 			u32 attn_name_offset, sts_addr;
7596 
7597 			/* Check if bit mask should be advanced (due to unused
7598 			 * bits).
7599 			 */
7600 			if (GET_FIELD(bit_mapping[j].data,
7601 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7602 				bit_idx += (u8)attn_idx_val;
7603 				continue;
7604 			}
7605 
7606 			/* Check current bit index */
7607 			if (!(reg_result->sts_val & BIT(bit_idx))) {
7608 				bit_idx++;
7609 				continue;
7610 			}
7611 
7612 			/* Find attention name */
7613 			attn_name_offset =
7614 				block_attn_name_offsets[attn_idx_val];
7615 			attn_name = &((const char *)
7616 				      pstrings->ptr)[attn_name_offset];
7617 			attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
7618 					"Interrupt" : "Parity";
7619 			masked_str = reg_result->mask_val & BIT(bit_idx) ?
7620 				     " [masked]" : "";
7621 			sts_addr = GET_FIELD(reg_result->data,
7622 					     DBG_ATTN_REG_RESULT_STS_ADDRESS);
7623 			DP_NOTICE(p_hwfn,
7624 				  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7625 				  block_name, attn_type_str, attn_name,
7626 				  sts_addr, bit_idx, masked_str);
7627 
7628 			bit_idx++;
7629 		}
7630 	}
7631 
7632 	return DBG_STATUS_OK;
7633 }
7634 
7635 /* Wrapper for unifying the idle_chk and mcp_trace api */
7636 static enum dbg_status
7637 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7638 				   u32 *dump_buf,
7639 				   u32 num_dumped_dwords,
7640 				   char *results_buf)
7641 {
7642 	u32 num_errors, num_warnnings;
7643 
7644 	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7645 					  results_buf, &num_errors,
7646 					  &num_warnnings);
7647 }
7648 
7649 /* Feature meta data lookup table */
7650 static struct {
7651 	char *name;
7652 	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7653 				    struct qed_ptt *p_ptt, u32 *size);
7654 	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7655 					struct qed_ptt *p_ptt, u32 *dump_buf,
7656 					u32 buf_size, u32 *dumped_dwords);
7657 	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7658 					 u32 *dump_buf, u32 num_dumped_dwords,
7659 					 char *results_buf);
7660 	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7661 					    u32 *dump_buf,
7662 					    u32 num_dumped_dwords,
7663 					    u32 *results_buf_size);
7664 } qed_features_lookup[] = {
7665 	{
7666 	"grc", qed_dbg_grc_get_dump_buf_size,
7667 		    qed_dbg_grc_dump, NULL, NULL}, {
7668 	"idle_chk",
7669 		    qed_dbg_idle_chk_get_dump_buf_size,
7670 		    qed_dbg_idle_chk_dump,
7671 		    qed_print_idle_chk_results_wrapper,
7672 		    qed_get_idle_chk_results_buf_size}, {
7673 	"mcp_trace",
7674 		    qed_dbg_mcp_trace_get_dump_buf_size,
7675 		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7676 		    qed_get_mcp_trace_results_buf_size}, {
7677 	"reg_fifo",
7678 		    qed_dbg_reg_fifo_get_dump_buf_size,
7679 		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7680 		    qed_get_reg_fifo_results_buf_size}, {
7681 	"igu_fifo",
7682 		    qed_dbg_igu_fifo_get_dump_buf_size,
7683 		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7684 		    qed_get_igu_fifo_results_buf_size}, {
7685 	"protection_override",
7686 		    qed_dbg_protection_override_get_dump_buf_size,
7687 		    qed_dbg_protection_override_dump,
7688 		    qed_print_protection_override_results,
7689 		    qed_get_protection_override_results_buf_size}, {
7690 	"fw_asserts",
7691 		    qed_dbg_fw_asserts_get_dump_buf_size,
7692 		    qed_dbg_fw_asserts_dump,
7693 		    qed_print_fw_asserts_results,
7694 		    qed_get_fw_asserts_results_buf_size},};
7695 
7696 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7697 {
7698 	u32 i, precision = 80;
7699 
7700 	if (!p_text_buf)
7701 		return;
7702 
7703 	pr_notice("\n%.*s", precision, p_text_buf);
7704 	for (i = precision; i < text_size; i += precision)
7705 		pr_cont("%.*s", precision, p_text_buf + i);
7706 	pr_cont("\n");
7707 }
7708 
7709 #define QED_RESULTS_BUF_MIN_SIZE 16
7710 /* Generic function for decoding debug feature info */
7711 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7712 				      enum qed_dbg_features feature_idx)
7713 {
7714 	struct qed_dbg_feature *feature =
7715 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7716 	u32 text_size_bytes, null_char_pos, i;
7717 	enum dbg_status rc;
7718 	char *text_buf;
7719 
7720 	/* Check if feature supports formatting capability */
7721 	if (!qed_features_lookup[feature_idx].results_buf_size)
7722 		return DBG_STATUS_OK;
7723 
7724 	/* Obtain size of formatted output */
7725 	rc = qed_features_lookup[feature_idx].
7726 		results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7727 				 feature->dumped_dwords, &text_size_bytes);
7728 	if (rc != DBG_STATUS_OK)
7729 		return rc;
7730 
7731 	/* Make sure that the allocated size is a multiple of dword (4 bytes) */
7732 	null_char_pos = text_size_bytes - 1;
7733 	text_size_bytes = (text_size_bytes + 3) & ~0x3;
7734 
7735 	if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7736 		DP_NOTICE(p_hwfn->cdev,
7737 			  "formatted size of feature was too small %d. Aborting\n",
7738 			  text_size_bytes);
7739 		return DBG_STATUS_INVALID_ARGS;
7740 	}
7741 
7742 	/* Allocate temp text buf */
7743 	text_buf = vzalloc(text_size_bytes);
7744 	if (!text_buf)
7745 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7746 
7747 	/* Decode feature opcodes to string on temp buf */
7748 	rc = qed_features_lookup[feature_idx].
7749 		print_results(p_hwfn, (u32 *)feature->dump_buf,
7750 			      feature->dumped_dwords, text_buf);
7751 	if (rc != DBG_STATUS_OK) {
7752 		vfree(text_buf);
7753 		return rc;
7754 	}
7755 
7756 	/* Replace the original null character with a '\n' character.
7757 	 * The bytes that were added as a result of the dword alignment are also
7758 	 * padded with '\n' characters.
7759 	 */
7760 	for (i = null_char_pos; i < text_size_bytes; i++)
7761 		text_buf[i] = '\n';
7762 
7763 	/* Dump printable feature to log */
7764 	if (p_hwfn->cdev->dbg_params.print_data)
7765 		qed_dbg_print_feature(text_buf, text_size_bytes);
7766 
7767 	/* Free the old dump_buf and point the dump_buf to the newly allocagted
7768 	 * and formatted text buffer.
7769 	 */
7770 	vfree(feature->dump_buf);
7771 	feature->dump_buf = text_buf;
7772 	feature->buf_size = text_size_bytes;
7773 	feature->dumped_dwords = text_size_bytes / 4;
7774 	return rc;
7775 }
7776 
7777 /* Generic function for performing the dump of a debug feature. */
7778 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7779 				    struct qed_ptt *p_ptt,
7780 				    enum qed_dbg_features feature_idx)
7781 {
7782 	struct qed_dbg_feature *feature =
7783 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7784 	u32 buf_size_dwords;
7785 	enum dbg_status rc;
7786 
7787 	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7788 		  qed_features_lookup[feature_idx].name);
7789 
7790 	/* Dump_buf was already allocated need to free (this can happen if dump
7791 	 * was called but file was never read).
7792 	 * We can't use the buffer as is since size may have changed.
7793 	 */
7794 	if (feature->dump_buf) {
7795 		vfree(feature->dump_buf);
7796 		feature->dump_buf = NULL;
7797 	}
7798 
7799 	/* Get buffer size from hsi, allocate accordingly, and perform the
7800 	 * dump.
7801 	 */
7802 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7803 						       &buf_size_dwords);
7804 	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7805 		return rc;
7806 	feature->buf_size = buf_size_dwords * sizeof(u32);
7807 	feature->dump_buf = vmalloc(feature->buf_size);
7808 	if (!feature->dump_buf)
7809 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7810 
7811 	rc = qed_features_lookup[feature_idx].
7812 		perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7813 			     feature->buf_size / sizeof(u32),
7814 			     &feature->dumped_dwords);
7815 
7816 	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7817 	 * In this case the buffer holds valid binary data, but we wont able
7818 	 * to parse it (since parsing relies on data in NVRAM which is only
7819 	 * accessible when MFW is responsive). skip the formatting but return
7820 	 * success so that binary data is provided.
7821 	 */
7822 	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7823 		return DBG_STATUS_OK;
7824 
7825 	if (rc != DBG_STATUS_OK)
7826 		return rc;
7827 
7828 	/* Format output */
7829 	rc = format_feature(p_hwfn, feature_idx);
7830 	return rc;
7831 }
7832 
7833 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7834 {
7835 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7836 }
7837 
7838 int qed_dbg_grc_size(struct qed_dev *cdev)
7839 {
7840 	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7841 }
7842 
7843 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7844 {
7845 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7846 			       num_dumped_bytes);
7847 }
7848 
7849 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7850 {
7851 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7852 }
7853 
7854 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7855 {
7856 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7857 			       num_dumped_bytes);
7858 }
7859 
7860 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7861 {
7862 	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7863 }
7864 
7865 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7866 {
7867 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7868 			       num_dumped_bytes);
7869 }
7870 
7871 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7872 {
7873 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7874 }
7875 
7876 static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
7877 				    enum qed_nvm_images image_id, u32 *length)
7878 {
7879 	struct qed_nvm_image_att image_att;
7880 	int rc;
7881 
7882 	*length = 0;
7883 	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
7884 	if (rc)
7885 		return rc;
7886 
7887 	*length = image_att.length;
7888 
7889 	return rc;
7890 }
7891 
7892 static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
7893 			     u32 *num_dumped_bytes,
7894 			     enum qed_nvm_images image_id)
7895 {
7896 	struct qed_hwfn *p_hwfn =
7897 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
7898 	u32 len_rounded, i;
7899 	__be32 val;
7900 	int rc;
7901 
7902 	*num_dumped_bytes = 0;
7903 	rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
7904 	if (rc)
7905 		return rc;
7906 
7907 	DP_NOTICE(p_hwfn->cdev,
7908 		  "Collecting a debug feature [\"nvram image %d\"]\n",
7909 		  image_id);
7910 
7911 	len_rounded = roundup(len_rounded, sizeof(u32));
7912 	rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
7913 	if (rc)
7914 		return rc;
7915 
7916 	/* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
7917 	if (image_id != QED_NVM_IMAGE_NVM_META)
7918 		for (i = 0; i < len_rounded; i += 4) {
7919 			val = cpu_to_be32(*(u32 *)(buffer + i));
7920 			*(u32 *)(buffer + i) = val;
7921 		}
7922 
7923 	*num_dumped_bytes = len_rounded;
7924 
7925 	return rc;
7926 }
7927 
7928 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7929 				u32 *num_dumped_bytes)
7930 {
7931 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7932 			       num_dumped_bytes);
7933 }
7934 
7935 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7936 {
7937 	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7938 }
7939 
7940 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7941 		       u32 *num_dumped_bytes)
7942 {
7943 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7944 			       num_dumped_bytes);
7945 }
7946 
7947 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7948 {
7949 	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7950 }
7951 
7952 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7953 		      u32 *num_dumped_bytes)
7954 {
7955 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7956 			       num_dumped_bytes);
7957 }
7958 
7959 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7960 {
7961 	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7962 }
7963 
7964 /* Defines the amount of bytes allocated for recording the length of debugfs
7965  * feature buffer.
7966  */
7967 #define REGDUMP_HEADER_SIZE			sizeof(u32)
7968 #define REGDUMP_HEADER_FEATURE_SHIFT		24
7969 #define REGDUMP_HEADER_ENGINE_SHIFT		31
7970 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
7971 enum debug_print_features {
7972 	OLD_MODE = 0,
7973 	IDLE_CHK = 1,
7974 	GRC_DUMP = 2,
7975 	MCP_TRACE = 3,
7976 	REG_FIFO = 4,
7977 	PROTECTION_OVERRIDE = 5,
7978 	IGU_FIFO = 6,
7979 	PHY = 7,
7980 	FW_ASSERTS = 8,
7981 	NVM_CFG1 = 9,
7982 	DEFAULT_CFG = 10,
7983 	NVM_META = 11,
7984 };
7985 
7986 static u32 qed_calc_regdump_header(enum debug_print_features feature,
7987 				   int engine, u32 feature_size, u8 omit_engine)
7988 {
7989 	/* Insert the engine, feature and mode inside the header and combine it
7990 	 * with feature size.
7991 	 */
7992 	return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
7993 	       (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
7994 	       (engine << REGDUMP_HEADER_ENGINE_SHIFT);
7995 }
7996 
7997 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
7998 {
7999 	u8 cur_engine, omit_engine = 0, org_engine;
8000 	u32 offset = 0, feature_size;
8001 	int rc;
8002 
8003 	if (cdev->num_hwfns == 1)
8004 		omit_engine = 1;
8005 
8006 	org_engine = qed_get_debug_engine(cdev);
8007 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8008 		/* Collect idle_chks and grcDump for each hw function */
8009 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8010 			   "obtaining idle_chk and grcdump for current engine\n");
8011 		qed_set_debug_engine(cdev, cur_engine);
8012 
8013 		/* First idle_chk */
8014 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8015 				      REGDUMP_HEADER_SIZE, &feature_size);
8016 		if (!rc) {
8017 			*(u32 *)((u8 *)buffer + offset) =
8018 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
8019 						    feature_size, omit_engine);
8020 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8021 		} else {
8022 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8023 		}
8024 
8025 		/* Second idle_chk */
8026 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8027 				      REGDUMP_HEADER_SIZE, &feature_size);
8028 		if (!rc) {
8029 			*(u32 *)((u8 *)buffer + offset) =
8030 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
8031 						    feature_size, omit_engine);
8032 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8033 		} else {
8034 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8035 		}
8036 
8037 		/* reg_fifo dump */
8038 		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
8039 				      REGDUMP_HEADER_SIZE, &feature_size);
8040 		if (!rc) {
8041 			*(u32 *)((u8 *)buffer + offset) =
8042 			    qed_calc_regdump_header(REG_FIFO, cur_engine,
8043 						    feature_size, omit_engine);
8044 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8045 		} else {
8046 			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
8047 		}
8048 
8049 		/* igu_fifo dump */
8050 		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
8051 				      REGDUMP_HEADER_SIZE, &feature_size);
8052 		if (!rc) {
8053 			*(u32 *)((u8 *)buffer + offset) =
8054 			    qed_calc_regdump_header(IGU_FIFO, cur_engine,
8055 						    feature_size, omit_engine);
8056 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8057 		} else {
8058 			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
8059 		}
8060 
8061 		/* protection_override dump */
8062 		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
8063 						 REGDUMP_HEADER_SIZE,
8064 						 &feature_size);
8065 		if (!rc) {
8066 			*(u32 *)((u8 *)buffer + offset) =
8067 			    qed_calc_regdump_header(PROTECTION_OVERRIDE,
8068 						    cur_engine,
8069 						    feature_size, omit_engine);
8070 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8071 		} else {
8072 			DP_ERR(cdev,
8073 			       "qed_dbg_protection_override failed. rc = %d\n",
8074 			       rc);
8075 		}
8076 
8077 		/* fw_asserts dump */
8078 		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
8079 					REGDUMP_HEADER_SIZE, &feature_size);
8080 		if (!rc) {
8081 			*(u32 *)((u8 *)buffer + offset) =
8082 			    qed_calc_regdump_header(FW_ASSERTS, cur_engine,
8083 						    feature_size, omit_engine);
8084 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8085 		} else {
8086 			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
8087 			       rc);
8088 		}
8089 
8090 		/* GRC dump - must be last because when mcp stuck it will
8091 		 * clutter idle_chk, reg_fifo, ...
8092 		 */
8093 		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
8094 				 REGDUMP_HEADER_SIZE, &feature_size);
8095 		if (!rc) {
8096 			*(u32 *)((u8 *)buffer + offset) =
8097 			    qed_calc_regdump_header(GRC_DUMP, cur_engine,
8098 						    feature_size, omit_engine);
8099 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8100 		} else {
8101 			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
8102 		}
8103 	}
8104 
8105 	qed_set_debug_engine(cdev, org_engine);
8106 	/* mcp_trace */
8107 	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
8108 			       REGDUMP_HEADER_SIZE, &feature_size);
8109 	if (!rc) {
8110 		*(u32 *)((u8 *)buffer + offset) =
8111 		    qed_calc_regdump_header(MCP_TRACE, cur_engine,
8112 					    feature_size, omit_engine);
8113 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8114 	} else {
8115 		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
8116 	}
8117 
8118 	/* nvm cfg1 */
8119 	rc = qed_dbg_nvm_image(cdev,
8120 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8121 			       &feature_size, QED_NVM_IMAGE_NVM_CFG1);
8122 	if (!rc) {
8123 		*(u32 *)((u8 *)buffer + offset) =
8124 		    qed_calc_regdump_header(NVM_CFG1, cur_engine,
8125 					    feature_size, omit_engine);
8126 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8127 	} else if (rc != -ENOENT) {
8128 		DP_ERR(cdev,
8129 		       "qed_dbg_nvm_image failed for image  %d (%s), rc = %d\n",
8130 		       QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
8131 	}
8132 
8133 	/* nvm default */
8134 	rc = qed_dbg_nvm_image(cdev,
8135 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8136 			       &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
8137 	if (!rc) {
8138 		*(u32 *)((u8 *)buffer + offset) =
8139 		    qed_calc_regdump_header(DEFAULT_CFG, cur_engine,
8140 					    feature_size, omit_engine);
8141 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8142 	} else if (rc != -ENOENT) {
8143 		DP_ERR(cdev,
8144 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8145 		       QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
8146 		       rc);
8147 	}
8148 
8149 	/* nvm meta */
8150 	rc = qed_dbg_nvm_image(cdev,
8151 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
8152 			       &feature_size, QED_NVM_IMAGE_NVM_META);
8153 	if (!rc) {
8154 		*(u32 *)((u8 *)buffer + offset) =
8155 		    qed_calc_regdump_header(NVM_META, cur_engine,
8156 					    feature_size, omit_engine);
8157 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8158 	} else if (rc != -ENOENT) {
8159 		DP_ERR(cdev,
8160 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8161 		       QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
8162 	}
8163 
8164 	return 0;
8165 }
8166 
8167 int qed_dbg_all_data_size(struct qed_dev *cdev)
8168 {
8169 	struct qed_hwfn *p_hwfn =
8170 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8171 	u32 regs_len = 0, image_len = 0;
8172 	u8 cur_engine, org_engine;
8173 
8174 	org_engine = qed_get_debug_engine(cdev);
8175 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8176 		/* Engine specific */
8177 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8178 			   "calculating idle_chk and grcdump register length for current engine\n");
8179 		qed_set_debug_engine(cdev, cur_engine);
8180 		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8181 			    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8182 			    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8183 			    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8184 			    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8185 			    REGDUMP_HEADER_SIZE +
8186 			    qed_dbg_protection_override_size(cdev) +
8187 			    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8188 	}
8189 
8190 	qed_set_debug_engine(cdev, org_engine);
8191 
8192 	/* Engine common */
8193 	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
8194 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8195 	if (image_len)
8196 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8197 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8198 	if (image_len)
8199 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8200 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8201 	if (image_len)
8202 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8203 
8204 	return regs_len;
8205 }
8206 
8207 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8208 		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
8209 {
8210 	struct qed_hwfn *p_hwfn =
8211 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8212 	struct qed_dbg_feature *qed_feature =
8213 		&cdev->dbg_params.features[feature];
8214 	enum dbg_status dbg_rc;
8215 	struct qed_ptt *p_ptt;
8216 	int rc = 0;
8217 
8218 	/* Acquire ptt */
8219 	p_ptt = qed_ptt_acquire(p_hwfn);
8220 	if (!p_ptt)
8221 		return -EINVAL;
8222 
8223 	/* Get dump */
8224 	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8225 	if (dbg_rc != DBG_STATUS_OK) {
8226 		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8227 			   qed_dbg_get_status_str(dbg_rc));
8228 		*num_dumped_bytes = 0;
8229 		rc = -EINVAL;
8230 		goto out;
8231 	}
8232 
8233 	DP_VERBOSE(cdev, QED_MSG_DEBUG,
8234 		   "copying debugfs feature to external buffer\n");
8235 	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8236 	*num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
8237 			    4;
8238 
8239 out:
8240 	qed_ptt_release(p_hwfn, p_ptt);
8241 	return rc;
8242 }
8243 
8244 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8245 {
8246 	struct qed_hwfn *p_hwfn =
8247 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
8248 	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8249 	struct qed_dbg_feature *qed_feature =
8250 		&cdev->dbg_params.features[feature];
8251 	u32 buf_size_dwords;
8252 	enum dbg_status rc;
8253 
8254 	if (!p_ptt)
8255 		return -EINVAL;
8256 
8257 	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8258 						   &buf_size_dwords);
8259 	if (rc != DBG_STATUS_OK)
8260 		buf_size_dwords = 0;
8261 
8262 	qed_ptt_release(p_hwfn, p_ptt);
8263 	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8264 	return qed_feature->buf_size;
8265 }
8266 
8267 u8 qed_get_debug_engine(struct qed_dev *cdev)
8268 {
8269 	return cdev->dbg_params.engine_for_debug;
8270 }
8271 
8272 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8273 {
8274 	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8275 		   engine_number);
8276 	cdev->dbg_params.engine_for_debug = engine_number;
8277 }
8278 
8279 void qed_dbg_pf_init(struct qed_dev *cdev)
8280 {
8281 	const u8 *dbg_values;
8282 
8283 	/* Debug values are after init values.
8284 	 * The offset is the first dword of the file.
8285 	 */
8286 	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8287 	qed_dbg_set_bin_ptr((u8 *)dbg_values);
8288 	qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
8289 }
8290 
8291 void qed_dbg_pf_exit(struct qed_dev *cdev)
8292 {
8293 	struct qed_dbg_feature *feature = NULL;
8294 	enum qed_dbg_features feature_idx;
8295 
8296 	/* Debug features' buffers may be allocated if debug feature was used
8297 	 * but dump wasn't called.
8298 	 */
8299 	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8300 		feature = &cdev->dbg_params.features[feature_idx];
8301 		if (feature->dump_buf) {
8302 			vfree(feature->dump_buf);
8303 			feature->dump_buf = NULL;
8304 		}
8305 	}
8306 }
8307