1 // SPDX-License-Identifier: GPL-2.0-only
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015 QLogic Corporation
4  */
5 
6 #include <linux/module.h>
7 #include <linux/vmalloc.h>
8 #include <linux/crc32.h>
9 #include "qed.h"
10 #include "qed_cxt.h"
11 #include "qed_hsi.h"
12 #include "qed_hw.h"
13 #include "qed_mcp.h"
14 #include "qed_reg_addr.h"
15 
16 /* Memory groups enum */
17 enum mem_groups {
18 	MEM_GROUP_PXP_MEM,
19 	MEM_GROUP_DMAE_MEM,
20 	MEM_GROUP_CM_MEM,
21 	MEM_GROUP_QM_MEM,
22 	MEM_GROUP_DORQ_MEM,
23 	MEM_GROUP_BRB_RAM,
24 	MEM_GROUP_BRB_MEM,
25 	MEM_GROUP_PRS_MEM,
26 	MEM_GROUP_SDM_MEM,
27 	MEM_GROUP_PBUF,
28 	MEM_GROUP_IOR,
29 	MEM_GROUP_RAM,
30 	MEM_GROUP_BTB_RAM,
31 	MEM_GROUP_RDIF_CTX,
32 	MEM_GROUP_TDIF_CTX,
33 	MEM_GROUP_CFC_MEM,
34 	MEM_GROUP_CONN_CFC_MEM,
35 	MEM_GROUP_CAU_PI,
36 	MEM_GROUP_CAU_MEM,
37 	MEM_GROUP_CAU_MEM_EXT,
38 	MEM_GROUP_PXP_ILT,
39 	MEM_GROUP_MULD_MEM,
40 	MEM_GROUP_BTB_MEM,
41 	MEM_GROUP_IGU_MEM,
42 	MEM_GROUP_IGU_MSIX,
43 	MEM_GROUP_CAU_SB,
44 	MEM_GROUP_BMB_RAM,
45 	MEM_GROUP_BMB_MEM,
46 	MEM_GROUP_TM_MEM,
47 	MEM_GROUP_TASK_CFC_MEM,
48 	MEM_GROUPS_NUM
49 };
50 
51 /* Memory groups names */
52 static const char * const s_mem_group_names[] = {
53 	"PXP_MEM",
54 	"DMAE_MEM",
55 	"CM_MEM",
56 	"QM_MEM",
57 	"DORQ_MEM",
58 	"BRB_RAM",
59 	"BRB_MEM",
60 	"PRS_MEM",
61 	"SDM_MEM",
62 	"PBUF",
63 	"IOR",
64 	"RAM",
65 	"BTB_RAM",
66 	"RDIF_CTX",
67 	"TDIF_CTX",
68 	"CFC_MEM",
69 	"CONN_CFC_MEM",
70 	"CAU_PI",
71 	"CAU_MEM",
72 	"CAU_MEM_EXT",
73 	"PXP_ILT",
74 	"MULD_MEM",
75 	"BTB_MEM",
76 	"IGU_MEM",
77 	"IGU_MSIX",
78 	"CAU_SB",
79 	"BMB_RAM",
80 	"BMB_MEM",
81 	"TM_MEM",
82 	"TASK_CFC_MEM",
83 };
84 
85 /* Idle check conditions */
86 
87 static u32 cond5(const u32 *r, const u32 *imm)
88 {
89 	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
90 }
91 
92 static u32 cond7(const u32 *r, const u32 *imm)
93 {
94 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
95 }
96 
97 static u32 cond6(const u32 *r, const u32 *imm)
98 {
99 	return (r[0] & imm[0]) != imm[1];
100 }
101 
102 static u32 cond9(const u32 *r, const u32 *imm)
103 {
104 	return ((r[0] & imm[0]) >> imm[1]) !=
105 	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
106 }
107 
108 static u32 cond10(const u32 *r, const u32 *imm)
109 {
110 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
111 }
112 
113 static u32 cond4(const u32 *r, const u32 *imm)
114 {
115 	return (r[0] & ~imm[0]) != imm[1];
116 }
117 
118 static u32 cond0(const u32 *r, const u32 *imm)
119 {
120 	return (r[0] & ~r[1]) != imm[0];
121 }
122 
123 static u32 cond1(const u32 *r, const u32 *imm)
124 {
125 	return r[0] != imm[0];
126 }
127 
128 static u32 cond11(const u32 *r, const u32 *imm)
129 {
130 	return r[0] != r[1] && r[2] == imm[0];
131 }
132 
133 static u32 cond12(const u32 *r, const u32 *imm)
134 {
135 	return r[0] != r[1] && r[2] > imm[0];
136 }
137 
138 static u32 cond3(const u32 *r, const u32 *imm)
139 {
140 	return r[0] != r[1];
141 }
142 
143 static u32 cond13(const u32 *r, const u32 *imm)
144 {
145 	return r[0] & imm[0];
146 }
147 
148 static u32 cond8(const u32 *r, const u32 *imm)
149 {
150 	return r[0] < (r[1] - imm[0]);
151 }
152 
153 static u32 cond2(const u32 *r, const u32 *imm)
154 {
155 	return r[0] > imm[0];
156 }
157 
158 /* Array of Idle Check conditions */
159 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
160 	cond0,
161 	cond1,
162 	cond2,
163 	cond3,
164 	cond4,
165 	cond5,
166 	cond6,
167 	cond7,
168 	cond8,
169 	cond9,
170 	cond10,
171 	cond11,
172 	cond12,
173 	cond13,
174 };
175 
176 #define NUM_PHYS_BLOCKS 84
177 
178 #define NUM_DBG_RESET_REGS 8
179 
180 /******************************* Data Types **********************************/
181 
182 enum hw_types {
183 	HW_TYPE_ASIC,
184 	PLATFORM_RESERVED,
185 	PLATFORM_RESERVED2,
186 	PLATFORM_RESERVED3,
187 	PLATFORM_RESERVED4,
188 	MAX_HW_TYPES
189 };
190 
191 /* CM context types */
192 enum cm_ctx_types {
193 	CM_CTX_CONN_AG,
194 	CM_CTX_CONN_ST,
195 	CM_CTX_TASK_AG,
196 	CM_CTX_TASK_ST,
197 	NUM_CM_CTX_TYPES
198 };
199 
200 /* Debug bus frame modes */
201 enum dbg_bus_frame_modes {
202 	DBG_BUS_FRAME_MODE_4ST = 0,	/* 4 Storm dwords (no HW) */
203 	DBG_BUS_FRAME_MODE_2ST_2HW = 1,	/* 2 Storm dwords, 2 HW dwords */
204 	DBG_BUS_FRAME_MODE_1ST_3HW = 2,	/* 1 Storm dwords, 3 HW dwords */
205 	DBG_BUS_FRAME_MODE_4HW = 3,	/* 4 HW dwords (no Storms) */
206 	DBG_BUS_FRAME_MODE_8HW = 4,	/* 8 HW dwords (no Storms) */
207 	DBG_BUS_NUM_FRAME_MODES
208 };
209 
210 /* Chip constant definitions */
211 struct chip_defs {
212 	const char *name;
213 	u32 num_ilt_pages;
214 };
215 
216 /* HW type constant definitions */
217 struct hw_type_defs {
218 	const char *name;
219 	u32 delay_factor;
220 	u32 dmae_thresh;
221 	u32 log_thresh;
222 };
223 
224 /* RBC reset definitions */
225 struct rbc_reset_defs {
226 	u32 reset_reg_addr;
227 	u32 reset_val[MAX_CHIP_IDS];
228 };
229 
230 /* Storm constant definitions.
231  * Addresses are in bytes, sizes are in quad-regs.
232  */
233 struct storm_defs {
234 	char letter;
235 	enum block_id sem_block_id;
236 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
237 	bool has_vfc;
238 	u32 sem_fast_mem_addr;
239 	u32 sem_frame_mode_addr;
240 	u32 sem_slow_enable_addr;
241 	u32 sem_slow_mode_addr;
242 	u32 sem_slow_mode1_conf_addr;
243 	u32 sem_sync_dbg_empty_addr;
244 	u32 sem_gpre_vect_addr;
245 	u32 cm_ctx_wr_addr;
246 	u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
247 	u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
248 };
249 
250 /* Debug Bus Constraint operation constant definitions */
251 struct dbg_bus_constraint_op_defs {
252 	u8 hw_op_val;
253 	bool is_cyclic;
254 };
255 
256 /* Storm Mode definitions */
257 struct storm_mode_defs {
258 	const char *name;
259 	bool is_fast_dbg;
260 	u8 id_in_hw;
261 	u32 src_disable_reg_addr;
262 	u32 src_enable_val;
263 	bool exists[MAX_CHIP_IDS];
264 };
265 
266 struct grc_param_defs {
267 	u32 default_val[MAX_CHIP_IDS];
268 	u32 min;
269 	u32 max;
270 	bool is_preset;
271 	bool is_persistent;
272 	u32 exclude_all_preset_val;
273 	u32 crash_preset_val[MAX_CHIP_IDS];
274 };
275 
276 /* Address is in 128b units. Width is in bits. */
277 struct rss_mem_defs {
278 	const char *mem_name;
279 	const char *type_name;
280 	u32 addr;
281 	u32 entry_width;
282 	u32 num_entries[MAX_CHIP_IDS];
283 };
284 
285 struct vfc_ram_defs {
286 	const char *mem_name;
287 	const char *type_name;
288 	u32 base_row;
289 	u32 num_rows;
290 };
291 
292 struct big_ram_defs {
293 	const char *instance_name;
294 	enum mem_groups mem_group_id;
295 	enum mem_groups ram_mem_group_id;
296 	enum dbg_grc_params grc_param;
297 	u32 addr_reg_addr;
298 	u32 data_reg_addr;
299 	u32 is_256b_reg_addr;
300 	u32 is_256b_bit_offset[MAX_CHIP_IDS];
301 	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
302 };
303 
304 struct phy_defs {
305 	const char *phy_name;
306 
307 	/* PHY base GRC address */
308 	u32 base_addr;
309 
310 	/* Relative address of indirect TBUS address register (bits 0..7) */
311 	u32 tbus_addr_lo_addr;
312 
313 	/* Relative address of indirect TBUS address register (bits 8..10) */
314 	u32 tbus_addr_hi_addr;
315 
316 	/* Relative address of indirect TBUS data register (bits 0..7) */
317 	u32 tbus_data_lo_addr;
318 
319 	/* Relative address of indirect TBUS data register (bits 8..11) */
320 	u32 tbus_data_hi_addr;
321 };
322 
323 /* Split type definitions */
324 struct split_type_defs {
325 	const char *name;
326 };
327 
328 /******************************** Constants **********************************/
329 
330 #define BYTES_IN_DWORD			sizeof(u32)
331 /* In the macros below, size and offset are specified in bits */
332 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
333 #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
334 #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
335 #define FIELD_DWORD_OFFSET(type, field) \
336 	 (int)(FIELD_BIT_OFFSET(type, field) / 32)
337 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
338 #define FIELD_BIT_MASK(type, field) \
339 	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
340 	 FIELD_DWORD_SHIFT(type, field))
341 
342 #define SET_VAR_FIELD(var, type, field, val) \
343 	do { \
344 		var[FIELD_DWORD_OFFSET(type, field)] &=	\
345 		(~FIELD_BIT_MASK(type, field));	\
346 		var[FIELD_DWORD_OFFSET(type, field)] |= \
347 		(val) << FIELD_DWORD_SHIFT(type, field); \
348 	} while (0)
349 
350 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
351 	do { \
352 		for (i = 0; i < (arr_size); i++) \
353 			qed_wr(dev, ptt, addr,	(arr)[i]); \
354 	} while (0)
355 
356 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
357 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
358 
359 /* extra lines include a signature line + optional latency events line */
360 #define NUM_EXTRA_DBG_LINES(block) \
361 	(GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
362 #define NUM_DBG_LINES(block) \
363 	((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
364 
365 #define USE_DMAE			true
366 #define PROTECT_WIDE_BUS		true
367 
368 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
369 #define RAM_LINES_TO_BYTES(lines) \
370 	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
371 
372 #define REG_DUMP_LEN_SHIFT		24
373 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
374 	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
375 
376 #define IDLE_CHK_RULE_SIZE_DWORDS \
377 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
378 
379 #define IDLE_CHK_RESULT_HDR_DWORDS \
380 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
381 
382 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
383 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
384 
385 #define PAGE_MEM_DESC_SIZE_DWORDS \
386 	BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
387 
388 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
389 
390 /* The sizes and offsets below are specified in bits */
391 #define VFC_CAM_CMD_STRUCT_SIZE		64
392 #define VFC_CAM_CMD_ROW_OFFSET		48
393 #define VFC_CAM_CMD_ROW_SIZE		9
394 #define VFC_CAM_ADDR_STRUCT_SIZE	16
395 #define VFC_CAM_ADDR_OP_OFFSET		0
396 #define VFC_CAM_ADDR_OP_SIZE		4
397 #define VFC_CAM_RESP_STRUCT_SIZE	256
398 #define VFC_RAM_ADDR_STRUCT_SIZE	16
399 #define VFC_RAM_ADDR_OP_OFFSET		0
400 #define VFC_RAM_ADDR_OP_SIZE		2
401 #define VFC_RAM_ADDR_ROW_OFFSET		2
402 #define VFC_RAM_ADDR_ROW_SIZE		10
403 #define VFC_RAM_RESP_STRUCT_SIZE	256
404 
405 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
406 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
407 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
408 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
409 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
410 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
411 
412 #define NUM_VFC_RAM_TYPES		4
413 
414 #define VFC_CAM_NUM_ROWS		512
415 
416 #define VFC_OPCODE_CAM_RD		14
417 #define VFC_OPCODE_RAM_RD		0
418 
419 #define NUM_RSS_MEM_TYPES		5
420 
421 #define NUM_BIG_RAM_TYPES		3
422 #define BIG_RAM_NAME_LEN		3
423 
424 #define NUM_PHY_TBUS_ADDRESSES		2048
425 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
426 
427 #define RESET_REG_UNRESET_OFFSET	4
428 
429 #define STALL_DELAY_MS			500
430 
431 #define STATIC_DEBUG_LINE_DWORDS	9
432 
433 #define NUM_COMMON_GLOBAL_PARAMS	9
434 
435 #define MAX_RECURSION_DEPTH		10
436 
437 #define FW_IMG_MAIN			1
438 
439 #define REG_FIFO_ELEMENT_DWORDS		2
440 #define REG_FIFO_DEPTH_ELEMENTS		32
441 #define REG_FIFO_DEPTH_DWORDS \
442 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
443 
444 #define IGU_FIFO_ELEMENT_DWORDS		4
445 #define IGU_FIFO_DEPTH_ELEMENTS		64
446 #define IGU_FIFO_DEPTH_DWORDS \
447 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
448 
449 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
450 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
451 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
452 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
453 	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
454 
455 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
456 	(MCP_REG_SCRATCH + \
457 	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
458 
459 #define MAX_SW_PLTAFORM_STR_SIZE	64
460 
461 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
462 #define EMPTY_FW_IMAGE_STR		"???????????????"
463 
464 /***************************** Constant Arrays *******************************/
465 
466 /* Chip constant definitions array */
467 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
468 	{"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
469 	{"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
470 };
471 
472 /* Storm constant definitions array */
473 static struct storm_defs s_storm_defs[] = {
474 	/* Tstorm */
475 	{'T', BLOCK_TSEM,
476 		{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
477 		true,
478 		TSEM_REG_FAST_MEMORY,
479 		TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
480 		TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
481 		TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
482 		TCM_REG_CTX_RBC_ACCS,
483 		{TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
484 		 TCM_REG_SM_TASK_CTX},
485 		{{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */
486 	},
487 
488 	/* Mstorm */
489 	{'M', BLOCK_MSEM,
490 		{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
491 		false,
492 		MSEM_REG_FAST_MEMORY,
493 		MSEM_REG_DBG_FRAME_MODE_BB_K2,
494 		MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
495 		MSEM_REG_SLOW_DBG_MODE_BB_K2,
496 		MSEM_REG_DBG_MODE1_CFG_BB_K2,
497 		MSEM_REG_SYNC_DBG_EMPTY,
498 		MSEM_REG_DBG_GPRE_VECT,
499 		MCM_REG_CTX_RBC_ACCS,
500 		{MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
501 		 MCM_REG_SM_TASK_CTX },
502 		{{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/
503 	},
504 
505 	/* Ustorm */
506 	{'U', BLOCK_USEM,
507 		{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
508 		false,
509 		USEM_REG_FAST_MEMORY,
510 		USEM_REG_DBG_FRAME_MODE_BB_K2,
511 		USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
512 		USEM_REG_SLOW_DBG_MODE_BB_K2,
513 		USEM_REG_DBG_MODE1_CFG_BB_K2,
514 		USEM_REG_SYNC_DBG_EMPTY,
515 		USEM_REG_DBG_GPRE_VECT,
516 		UCM_REG_CTX_RBC_ACCS,
517 		{UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
518 		 UCM_REG_SM_TASK_CTX},
519 		{{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */
520 	},
521 
522 	/* Xstorm */
523 	{'X', BLOCK_XSEM,
524 		{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
525 		false,
526 		XSEM_REG_FAST_MEMORY,
527 		XSEM_REG_DBG_FRAME_MODE_BB_K2,
528 		XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
529 		XSEM_REG_SLOW_DBG_MODE_BB_K2,
530 		XSEM_REG_DBG_MODE1_CFG_BB_K2,
531 		XSEM_REG_SYNC_DBG_EMPTY,
532 		XSEM_REG_DBG_GPRE_VECT,
533 		XCM_REG_CTX_RBC_ACCS,
534 		{XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
535 		{{9, 15, 0, 0}, {9, 15,	0, 0}} /* {bb} {k2} */
536 	},
537 
538 	/* Ystorm */
539 	{'Y', BLOCK_YSEM,
540 		{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
541 		false,
542 		YSEM_REG_FAST_MEMORY,
543 		YSEM_REG_DBG_FRAME_MODE_BB_K2,
544 		YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
545 		YSEM_REG_SLOW_DBG_MODE_BB_K2,
546 		YSEM_REG_DBG_MODE1_CFG_BB_K2,
547 		YSEM_REG_SYNC_DBG_EMPTY,
548 		YSEM_REG_DBG_GPRE_VECT,
549 		YCM_REG_CTX_RBC_ACCS,
550 		{YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
551 		 YCM_REG_SM_TASK_CTX},
552 		{{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */
553 	},
554 
555 	/* Pstorm */
556 	{'P', BLOCK_PSEM,
557 		{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
558 		true,
559 		PSEM_REG_FAST_MEMORY,
560 		PSEM_REG_DBG_FRAME_MODE_BB_K2,
561 		PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
562 		PSEM_REG_SLOW_DBG_MODE_BB_K2,
563 		PSEM_REG_DBG_MODE1_CFG_BB_K2,
564 		PSEM_REG_SYNC_DBG_EMPTY,
565 		PSEM_REG_DBG_GPRE_VECT,
566 		PCM_REG_CTX_RBC_ACCS,
567 		{0, PCM_REG_SM_CON_CTX, 0, 0},
568 		{{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */
569 	},
570 };
571 
572 static struct hw_type_defs s_hw_type_defs[] = {
573 	/* HW_TYPE_ASIC */
574 	{"asic", 1, 256, 32768},
575 	{"reserved", 0, 0, 0},
576 	{"reserved2", 0, 0, 0},
577 	{"reserved3", 0, 0, 0}
578 };
579 
580 static struct grc_param_defs s_grc_param_defs[] = {
581 	/* DBG_GRC_PARAM_DUMP_TSTORM */
582 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
583 
584 	/* DBG_GRC_PARAM_DUMP_MSTORM */
585 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
586 
587 	/* DBG_GRC_PARAM_DUMP_USTORM */
588 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
589 
590 	/* DBG_GRC_PARAM_DUMP_XSTORM */
591 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
592 
593 	/* DBG_GRC_PARAM_DUMP_YSTORM */
594 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
595 
596 	/* DBG_GRC_PARAM_DUMP_PSTORM */
597 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
598 
599 	/* DBG_GRC_PARAM_DUMP_REGS */
600 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
601 
602 	/* DBG_GRC_PARAM_DUMP_RAM */
603 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
604 
605 	/* DBG_GRC_PARAM_DUMP_PBUF */
606 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
607 
608 	/* DBG_GRC_PARAM_DUMP_IOR */
609 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
610 
611 	/* DBG_GRC_PARAM_DUMP_VFC */
612 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
613 
614 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
615 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
616 
617 	/* DBG_GRC_PARAM_DUMP_ILT */
618 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
619 
620 	/* DBG_GRC_PARAM_DUMP_RSS */
621 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
622 
623 	/* DBG_GRC_PARAM_DUMP_CAU */
624 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
625 
626 	/* DBG_GRC_PARAM_DUMP_QM */
627 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
628 
629 	/* DBG_GRC_PARAM_DUMP_MCP */
630 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
631 
632 	/* DBG_GRC_PARAM_DUMP_DORQ */
633 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
634 
635 	/* DBG_GRC_PARAM_DUMP_CFC */
636 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
637 
638 	/* DBG_GRC_PARAM_DUMP_IGU */
639 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
640 
641 	/* DBG_GRC_PARAM_DUMP_BRB */
642 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
643 
644 	/* DBG_GRC_PARAM_DUMP_BTB */
645 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
646 
647 	/* DBG_GRC_PARAM_DUMP_BMB */
648 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
649 
650 	/* DBG_GRC_PARAM_RESERVED1 */
651 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
652 
653 	/* DBG_GRC_PARAM_DUMP_MULD */
654 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
655 
656 	/* DBG_GRC_PARAM_DUMP_PRS */
657 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
658 
659 	/* DBG_GRC_PARAM_DUMP_DMAE */
660 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
661 
662 	/* DBG_GRC_PARAM_DUMP_TM */
663 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
664 
665 	/* DBG_GRC_PARAM_DUMP_SDM */
666 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
667 
668 	/* DBG_GRC_PARAM_DUMP_DIF */
669 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
670 
671 	/* DBG_GRC_PARAM_DUMP_STATIC */
672 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
673 
674 	/* DBG_GRC_PARAM_UNSTALL */
675 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
676 
677 	/* DBG_GRC_PARAM_RESERVED2 */
678 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
679 
680 	/* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
681 	{{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}},
682 
683 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
684 	{{0, 0}, 0, 1, true, false, 0, {0, 0}},
685 
686 	/* DBG_GRC_PARAM_CRASH */
687 	{{0, 0}, 0, 1, true, false, 0, {0, 0}},
688 
689 	/* DBG_GRC_PARAM_PARITY_SAFE */
690 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
691 
692 	/* DBG_GRC_PARAM_DUMP_CM */
693 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
694 
695 	/* DBG_GRC_PARAM_DUMP_PHY */
696 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
697 
698 	/* DBG_GRC_PARAM_NO_MCP */
699 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
700 
701 	/* DBG_GRC_PARAM_NO_FW_VER */
702 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
703 
704 	/* DBG_GRC_PARAM_RESERVED3 */
705 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
706 
707 	/* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
708 	{{0, 1}, 0, 1, false, false, 0, {0, 1}},
709 
710 	/* DBG_GRC_PARAM_DUMP_ILT_CDUC */
711 	{{1, 1}, 0, 1, false, false, 0, {0, 0}},
712 
713 	/* DBG_GRC_PARAM_DUMP_ILT_CDUT */
714 	{{1, 1}, 0, 1, false, false, 0, {0, 0}},
715 
716 	/* DBG_GRC_PARAM_DUMP_CAU_EXT */
717 	{{0, 0}, 0, 1, false, false, 0, {1, 1}}
718 };
719 
720 static struct rss_mem_defs s_rss_mem_defs[] = {
721 	{"rss_mem_cid", "rss_cid", 0, 32,
722 	 {256, 320}},
723 
724 	{"rss_mem_key_msb", "rss_key", 1024, 256,
725 	 {128, 208}},
726 
727 	{"rss_mem_key_lsb", "rss_key", 2048, 64,
728 	 {128, 208}},
729 
730 	{"rss_mem_info", "rss_info", 3072, 16,
731 	 {128, 208}},
732 
733 	{"rss_mem_ind", "rss_ind", 4096, 16,
734 	 {16384, 26624}}
735 };
736 
737 static struct vfc_ram_defs s_vfc_ram_defs[] = {
738 	{"vfc_ram_tt1", "vfc_ram", 0, 512},
739 	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
740 	{"vfc_ram_stt2", "vfc_ram", 640, 32},
741 	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
742 };
743 
744 static struct big_ram_defs s_big_ram_defs[] = {
745 	{"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
746 	 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
747 	 MISC_REG_BLOCK_256B_EN, {0, 0},
748 	 {153600, 180224}},
749 
750 	{"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
751 	 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
752 	 MISC_REG_BLOCK_256B_EN, {0, 1},
753 	 {92160, 117760}},
754 
755 	{"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
756 	 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
757 	 MISCS_REG_BLOCK_256B_EN, {0, 0},
758 	 {36864, 36864}}
759 };
760 
761 static struct rbc_reset_defs s_rbc_reset_defs[] = {
762 	{MISCS_REG_RESET_PL_HV,
763 	 {0x0, 0x400}},
764 	{MISC_REG_RESET_PL_PDA_VMAIN_1,
765 	 {0x4404040, 0x4404040}},
766 	{MISC_REG_RESET_PL_PDA_VMAIN_2,
767 	 {0x7, 0x7c00007}},
768 	{MISC_REG_RESET_PL_PDA_VAUX,
769 	 {0x2, 0x2}},
770 };
771 
772 static struct phy_defs s_phy_defs[] = {
773 	{"nw_phy", NWS_REG_NWS_CMU_K2,
774 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
775 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
776 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
777 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
778 	{"sgmii_phy", MS_REG_MS_CMU_K2_E5,
779 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
780 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
781 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
782 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
783 	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
784 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
785 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
786 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
787 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
788 	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
789 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
790 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
791 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
792 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
793 };
794 
795 static struct split_type_defs s_split_type_defs[] = {
796 	/* SPLIT_TYPE_NONE */
797 	{"eng"},
798 
799 	/* SPLIT_TYPE_PORT */
800 	{"port"},
801 
802 	/* SPLIT_TYPE_PF */
803 	{"pf"},
804 
805 	/* SPLIT_TYPE_PORT_PF */
806 	{"port"},
807 
808 	/* SPLIT_TYPE_VF */
809 	{"vf"}
810 };
811 
812 /**************************** Private Functions ******************************/
813 
814 /* Reads and returns a single dword from the specified unaligned buffer */
815 static u32 qed_read_unaligned_dword(u8 *buf)
816 {
817 	u32 dword;
818 
819 	memcpy((u8 *)&dword, buf, sizeof(dword));
820 	return dword;
821 }
822 
823 /* Sets the value of the specified GRC param */
824 static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
825 			      enum dbg_grc_params grc_param, u32 val)
826 {
827 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
828 
829 	dev_data->grc.param_val[grc_param] = val;
830 }
831 
832 /* Returns the value of the specified GRC param */
833 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
834 			     enum dbg_grc_params grc_param)
835 {
836 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
837 
838 	return dev_data->grc.param_val[grc_param];
839 }
840 
841 /* Initializes the GRC parameters */
842 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
843 {
844 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
845 
846 	if (!dev_data->grc.params_initialized) {
847 		qed_dbg_grc_set_params_default(p_hwfn);
848 		dev_data->grc.params_initialized = 1;
849 	}
850 }
851 
852 /* Sets pointer and size for the specified binary buffer type */
853 static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
854 				enum bin_dbg_buffer_type buf_type,
855 				const u32 *ptr, u32 size)
856 {
857 	struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
858 
859 	buf->ptr = (void *)ptr;
860 	buf->size = size;
861 }
862 
863 /* Initializes debug data for the specified device */
864 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
865 {
866 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
867 	u8 num_pfs = 0, max_pfs_per_port = 0;
868 
869 	if (dev_data->initialized)
870 		return DBG_STATUS_OK;
871 
872 	/* Set chip */
873 	if (QED_IS_K2(p_hwfn->cdev)) {
874 		dev_data->chip_id = CHIP_K2;
875 		dev_data->mode_enable[MODE_K2] = 1;
876 		dev_data->num_vfs = MAX_NUM_VFS_K2;
877 		num_pfs = MAX_NUM_PFS_K2;
878 		max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
879 	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
880 		dev_data->chip_id = CHIP_BB;
881 		dev_data->mode_enable[MODE_BB] = 1;
882 		dev_data->num_vfs = MAX_NUM_VFS_BB;
883 		num_pfs = MAX_NUM_PFS_BB;
884 		max_pfs_per_port = MAX_NUM_PFS_BB;
885 	} else {
886 		return DBG_STATUS_UNKNOWN_CHIP;
887 	}
888 
889 	/* Set HW type */
890 	dev_data->hw_type = HW_TYPE_ASIC;
891 	dev_data->mode_enable[MODE_ASIC] = 1;
892 
893 	/* Set port mode */
894 	switch (p_hwfn->cdev->num_ports_in_engine) {
895 	case 1:
896 		dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
897 		break;
898 	case 2:
899 		dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
900 		break;
901 	case 4:
902 		dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
903 		break;
904 	}
905 
906 	/* Set 100G mode */
907 	if (QED_IS_CMT(p_hwfn->cdev))
908 		dev_data->mode_enable[MODE_100G] = 1;
909 
910 	/* Set number of ports */
911 	if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
912 	    dev_data->mode_enable[MODE_100G])
913 		dev_data->num_ports = 1;
914 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
915 		dev_data->num_ports = 2;
916 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
917 		dev_data->num_ports = 4;
918 
919 	/* Set number of PFs per port */
920 	dev_data->num_pfs_per_port = min_t(u32,
921 					   num_pfs / dev_data->num_ports,
922 					   max_pfs_per_port);
923 
924 	/* Initializes the GRC parameters */
925 	qed_dbg_grc_init_params(p_hwfn);
926 
927 	dev_data->use_dmae = true;
928 	dev_data->initialized = 1;
929 
930 	return DBG_STATUS_OK;
931 }
932 
933 static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
934 					     enum block_id block_id)
935 {
936 	const struct dbg_block *dbg_block;
937 
938 	dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
939 	return dbg_block + block_id;
940 }
941 
942 static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
943 							       *p_hwfn,
944 							       enum block_id
945 							       block_id)
946 {
947 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
948 
949 	return (const struct dbg_block_chip *)
950 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
951 	    block_id * MAX_CHIP_IDS + dev_data->chip_id;
952 }
953 
954 static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
955 							 *p_hwfn,
956 							 u8 reset_reg_id)
957 {
958 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
959 
960 	return (const struct dbg_reset_reg *)
961 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
962 	    reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
963 }
964 
965 /* Reads the FW info structure for the specified Storm from the chip,
966  * and writes it to the specified fw_info pointer.
967  */
968 static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
969 				   struct qed_ptt *p_ptt,
970 				   u8 storm_id, struct fw_info *fw_info)
971 {
972 	struct storm_defs *storm = &s_storm_defs[storm_id];
973 	struct fw_info_location fw_info_location;
974 	u32 addr, i, *dest;
975 
976 	memset(&fw_info_location, 0, sizeof(fw_info_location));
977 	memset(fw_info, 0, sizeof(*fw_info));
978 
979 	/* Read first the address that points to fw_info location.
980 	 * The address is located in the last line of the Storm RAM.
981 	 */
982 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
983 	    DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
984 	    sizeof(fw_info_location);
985 
986 	dest = (u32 *)&fw_info_location;
987 
988 	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
989 	     i++, addr += BYTES_IN_DWORD)
990 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
991 
992 	/* Read FW version info from Storm RAM */
993 	if (fw_info_location.size > 0 && fw_info_location.size <=
994 	    sizeof(*fw_info)) {
995 		addr = fw_info_location.grc_addr;
996 		dest = (u32 *)fw_info;
997 		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
998 		     i++, addr += BYTES_IN_DWORD)
999 			dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1000 	}
1001 }
1002 
1003 /* Dumps the specified string to the specified buffer.
1004  * Returns the dumped size in bytes.
1005  */
1006 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1007 {
1008 	if (dump)
1009 		strcpy(dump_buf, str);
1010 
1011 	return (u32)strlen(str) + 1;
1012 }
1013 
1014 /* Dumps zeros to align the specified buffer to dwords.
1015  * Returns the dumped size in bytes.
1016  */
1017 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1018 {
1019 	u8 offset_in_dword, align_size;
1020 
1021 	offset_in_dword = (u8)(byte_offset & 0x3);
1022 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1023 
1024 	if (dump && align_size)
1025 		memset(dump_buf, 0, align_size);
1026 
1027 	return align_size;
1028 }
1029 
1030 /* Writes the specified string param to the specified buffer.
1031  * Returns the dumped size in dwords.
1032  */
1033 static u32 qed_dump_str_param(u32 *dump_buf,
1034 			      bool dump,
1035 			      const char *param_name, const char *param_val)
1036 {
1037 	char *char_buf = (char *)dump_buf;
1038 	u32 offset = 0;
1039 
1040 	/* Dump param name */
1041 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1042 
1043 	/* Indicate a string param value */
1044 	if (dump)
1045 		*(char_buf + offset) = 1;
1046 	offset++;
1047 
1048 	/* Dump param value */
1049 	offset += qed_dump_str(char_buf + offset, dump, param_val);
1050 
1051 	/* Align buffer to next dword */
1052 	offset += qed_dump_align(char_buf + offset, dump, offset);
1053 
1054 	return BYTES_TO_DWORDS(offset);
1055 }
1056 
1057 /* Writes the specified numeric param to the specified buffer.
1058  * Returns the dumped size in dwords.
1059  */
1060 static u32 qed_dump_num_param(u32 *dump_buf,
1061 			      bool dump, const char *param_name, u32 param_val)
1062 {
1063 	char *char_buf = (char *)dump_buf;
1064 	u32 offset = 0;
1065 
1066 	/* Dump param name */
1067 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1068 
1069 	/* Indicate a numeric param value */
1070 	if (dump)
1071 		*(char_buf + offset) = 0;
1072 	offset++;
1073 
1074 	/* Align buffer to next dword */
1075 	offset += qed_dump_align(char_buf + offset, dump, offset);
1076 
1077 	/* Dump param value (and change offset from bytes to dwords) */
1078 	offset = BYTES_TO_DWORDS(offset);
1079 	if (dump)
1080 		*(dump_buf + offset) = param_val;
1081 	offset++;
1082 
1083 	return offset;
1084 }
1085 
1086 /* Reads the FW version and writes it as a param to the specified buffer.
1087  * Returns the dumped size in dwords.
1088  */
1089 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1090 				 struct qed_ptt *p_ptt,
1091 				 u32 *dump_buf, bool dump)
1092 {
1093 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1094 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1095 	struct fw_info fw_info = { {0}, {0} };
1096 	u32 offset = 0;
1097 
1098 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1099 		/* Read FW info from chip */
1100 		qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
1101 
1102 		/* Create FW version/image strings */
1103 		if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1104 			     "%d_%d_%d_%d", fw_info.ver.num.major,
1105 			     fw_info.ver.num.minor, fw_info.ver.num.rev,
1106 			     fw_info.ver.num.eng) < 0)
1107 			DP_NOTICE(p_hwfn,
1108 				  "Unexpected debug error: invalid FW version string\n");
1109 		switch (fw_info.ver.image_id) {
1110 		case FW_IMG_MAIN:
1111 			strcpy(fw_img_str, "main");
1112 			break;
1113 		default:
1114 			strcpy(fw_img_str, "unknown");
1115 			break;
1116 		}
1117 	}
1118 
1119 	/* Dump FW version, image and timestamp */
1120 	offset += qed_dump_str_param(dump_buf + offset,
1121 				     dump, "fw-version", fw_ver_str);
1122 	offset += qed_dump_str_param(dump_buf + offset,
1123 				     dump, "fw-image", fw_img_str);
1124 	offset += qed_dump_num_param(dump_buf + offset,
1125 				     dump,
1126 				     "fw-timestamp", fw_info.ver.timestamp);
1127 
1128 	return offset;
1129 }
1130 
1131 /* Reads the MFW version and writes it as a param to the specified buffer.
1132  * Returns the dumped size in dwords.
1133  */
1134 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1135 				  struct qed_ptt *p_ptt,
1136 				  u32 *dump_buf, bool dump)
1137 {
1138 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1139 
1140 	if (dump &&
1141 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1142 		u32 global_section_offsize, global_section_addr, mfw_ver;
1143 		u32 public_data_addr, global_section_offsize_addr;
1144 
1145 		/* Find MCP public data GRC address. Needs to be ORed with
1146 		 * MCP_REG_SCRATCH due to a HW bug.
1147 		 */
1148 		public_data_addr = qed_rd(p_hwfn,
1149 					  p_ptt,
1150 					  MISC_REG_SHARED_MEM_ADDR) |
1151 				   MCP_REG_SCRATCH;
1152 
1153 		/* Find MCP public global section offset */
1154 		global_section_offsize_addr = public_data_addr +
1155 					      offsetof(struct mcp_public_data,
1156 						       sections) +
1157 					      sizeof(offsize_t) * PUBLIC_GLOBAL;
1158 		global_section_offsize = qed_rd(p_hwfn, p_ptt,
1159 						global_section_offsize_addr);
1160 		global_section_addr =
1161 			MCP_REG_SCRATCH +
1162 			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
1163 
1164 		/* Read MFW version from MCP public global section */
1165 		mfw_ver = qed_rd(p_hwfn, p_ptt,
1166 				 global_section_addr +
1167 				 offsetof(struct public_global, mfw_ver));
1168 
1169 		/* Dump MFW version param */
1170 		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
1171 			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
1172 			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
1173 			DP_NOTICE(p_hwfn,
1174 				  "Unexpected debug error: invalid MFW version string\n");
1175 	}
1176 
1177 	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
1178 }
1179 
1180 /* Reads the chip revision from the chip and writes it as a param to the
1181  * specified buffer. Returns the dumped size in dwords.
1182  */
1183 static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
1184 					struct qed_ptt *p_ptt,
1185 					u32 *dump_buf, bool dump)
1186 {
1187 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1188 	char param_str[3] = "??";
1189 
1190 	if (dev_data->hw_type == HW_TYPE_ASIC) {
1191 		u32 chip_rev, chip_metal;
1192 
1193 		chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
1194 		chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
1195 
1196 		param_str[0] = 'a' + (u8)chip_rev;
1197 		param_str[1] = '0' + (u8)chip_metal;
1198 	}
1199 
1200 	return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
1201 }
1202 
1203 /* Writes a section header to the specified buffer.
1204  * Returns the dumped size in dwords.
1205  */
1206 static u32 qed_dump_section_hdr(u32 *dump_buf,
1207 				bool dump, const char *name, u32 num_params)
1208 {
1209 	return qed_dump_num_param(dump_buf, dump, name, num_params);
1210 }
1211 
1212 /* Writes the common global params to the specified buffer.
1213  * Returns the dumped size in dwords.
1214  */
1215 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
1216 					 struct qed_ptt *p_ptt,
1217 					 u32 *dump_buf,
1218 					 bool dump,
1219 					 u8 num_specific_global_params)
1220 {
1221 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1222 	u32 offset = 0;
1223 	u8 num_params;
1224 
1225 	/* Dump global params section header */
1226 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
1227 		(dev_data->chip_id == CHIP_BB ? 1 : 0);
1228 	offset += qed_dump_section_hdr(dump_buf + offset,
1229 				       dump, "global_params", num_params);
1230 
1231 	/* Store params */
1232 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
1233 	offset += qed_dump_mfw_ver_param(p_hwfn,
1234 					 p_ptt, dump_buf + offset, dump);
1235 	offset += qed_dump_chip_revision_param(p_hwfn,
1236 					       p_ptt, dump_buf + offset, dump);
1237 	offset += qed_dump_num_param(dump_buf + offset,
1238 				     dump, "tools-version", TOOLS_VERSION);
1239 	offset += qed_dump_str_param(dump_buf + offset,
1240 				     dump,
1241 				     "chip",
1242 				     s_chip_defs[dev_data->chip_id].name);
1243 	offset += qed_dump_str_param(dump_buf + offset,
1244 				     dump,
1245 				     "platform",
1246 				     s_hw_type_defs[dev_data->hw_type].name);
1247 	offset += qed_dump_num_param(dump_buf + offset,
1248 				     dump, "pci-func", p_hwfn->abs_pf_id);
1249 	if (dev_data->chip_id == CHIP_BB)
1250 		offset += qed_dump_num_param(dump_buf + offset,
1251 					     dump, "path", QED_PATH_ID(p_hwfn));
1252 
1253 	return offset;
1254 }
1255 
1256 /* Writes the "last" section (including CRC) to the specified buffer at the
1257  * given offset. Returns the dumped size in dwords.
1258  */
1259 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
1260 {
1261 	u32 start_offset = offset;
1262 
1263 	/* Dump CRC section header */
1264 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
1265 
1266 	/* Calculate CRC32 and add it to the dword after the "last" section */
1267 	if (dump)
1268 		*(dump_buf + offset) = ~crc32(0xffffffff,
1269 					      (u8 *)dump_buf,
1270 					      DWORDS_TO_BYTES(offset));
1271 
1272 	offset++;
1273 
1274 	return offset - start_offset;
1275 }
1276 
1277 /* Update blocks reset state  */
1278 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
1279 					  struct qed_ptt *p_ptt)
1280 {
1281 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1282 	u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1283 	u8 rst_reg_id;
1284 	u32 blk_id;
1285 
1286 	/* Read reset registers */
1287 	for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
1288 		const struct dbg_reset_reg *rst_reg;
1289 		bool rst_reg_removed;
1290 		u32 rst_reg_addr;
1291 
1292 		rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
1293 		rst_reg_removed = GET_FIELD(rst_reg->data,
1294 					    DBG_RESET_REG_IS_REMOVED);
1295 		rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
1296 							 DBG_RESET_REG_ADDR));
1297 
1298 		if (!rst_reg_removed)
1299 			reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
1300 						     rst_reg_addr);
1301 	}
1302 
1303 	/* Check if blocks are in reset */
1304 	for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
1305 		const struct dbg_block_chip *blk;
1306 		bool has_rst_reg;
1307 		bool is_removed;
1308 
1309 		blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
1310 		is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1311 		has_rst_reg = GET_FIELD(blk->flags,
1312 					DBG_BLOCK_CHIP_HAS_RESET_REG);
1313 
1314 		if (!is_removed && has_rst_reg)
1315 			dev_data->block_in_reset[blk_id] =
1316 			    !(reg_val[blk->reset_reg_id] &
1317 			      BIT(blk->reset_reg_bit_offset));
1318 	}
1319 }
1320 
1321 /* is_mode_match recursive function */
1322 static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
1323 				  u16 *modes_buf_offset, u8 rec_depth)
1324 {
1325 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1326 	u8 *dbg_array;
1327 	bool arg1, arg2;
1328 	u8 tree_val;
1329 
1330 	if (rec_depth > MAX_RECURSION_DEPTH) {
1331 		DP_NOTICE(p_hwfn,
1332 			  "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
1333 		return false;
1334 	}
1335 
1336 	/* Get next element from modes tree buffer */
1337 	dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
1338 	tree_val = dbg_array[(*modes_buf_offset)++];
1339 
1340 	switch (tree_val) {
1341 	case INIT_MODE_OP_NOT:
1342 		return !qed_is_mode_match_rec(p_hwfn,
1343 					      modes_buf_offset, rec_depth + 1);
1344 	case INIT_MODE_OP_OR:
1345 	case INIT_MODE_OP_AND:
1346 		arg1 = qed_is_mode_match_rec(p_hwfn,
1347 					     modes_buf_offset, rec_depth + 1);
1348 		arg2 = qed_is_mode_match_rec(p_hwfn,
1349 					     modes_buf_offset, rec_depth + 1);
1350 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
1351 							arg2) : (arg1 && arg2);
1352 	default:
1353 		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
1354 	}
1355 }
1356 
1357 /* Returns true if the mode (specified using modes_buf_offset) is enabled */
1358 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
1359 {
1360 	return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
1361 }
1362 
1363 /* Enable / disable the Debug block */
1364 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
1365 				     struct qed_ptt *p_ptt, bool enable)
1366 {
1367 	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
1368 }
1369 
1370 /* Resets the Debug block */
1371 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
1372 				    struct qed_ptt *p_ptt)
1373 {
1374 	u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
1375 	const struct dbg_reset_reg *reset_reg;
1376 	const struct dbg_block_chip *block;
1377 
1378 	block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
1379 	reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
1380 	reset_reg_addr =
1381 	    DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
1382 
1383 	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
1384 	new_reset_reg_val =
1385 	    old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
1386 
1387 	qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
1388 	qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
1389 }
1390 
1391 /* Enable / disable Debug Bus clients according to the specified mask
1392  * (1 = enable, 0 = disable).
1393  */
1394 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
1395 				   struct qed_ptt *p_ptt, u32 client_mask)
1396 {
1397 	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
1398 }
1399 
1400 static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
1401 				    struct qed_ptt *p_ptt,
1402 				    enum block_id block_id,
1403 				    u8 line_id,
1404 				    u8 enable_mask,
1405 				    u8 right_shift,
1406 				    u8 force_valid_mask, u8 force_frame_mask)
1407 {
1408 	const struct dbg_block_chip *block =
1409 		qed_get_dbg_block_per_chip(p_hwfn, block_id);
1410 
1411 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
1412 	       line_id);
1413 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
1414 	       enable_mask);
1415 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
1416 	       right_shift);
1417 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
1418 	       force_valid_mask);
1419 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
1420 	       force_frame_mask);
1421 }
1422 
1423 /* Disable debug bus in all blocks */
1424 static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
1425 				   struct qed_ptt *p_ptt)
1426 {
1427 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1428 	u32 block_id;
1429 
1430 	/* Disable all blocks */
1431 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
1432 		const struct dbg_block_chip *block_per_chip =
1433 		    qed_get_dbg_block_per_chip(p_hwfn,
1434 					       (enum block_id)block_id);
1435 
1436 		if (GET_FIELD(block_per_chip->flags,
1437 			      DBG_BLOCK_CHIP_IS_REMOVED) ||
1438 		    dev_data->block_in_reset[block_id])
1439 			continue;
1440 
1441 		/* Disable debug bus */
1442 		if (GET_FIELD(block_per_chip->flags,
1443 			      DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
1444 			u32 dbg_en_addr =
1445 				block_per_chip->dbg_dword_enable_reg_addr;
1446 			u16 modes_buf_offset =
1447 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
1448 				      DBG_MODE_HDR_MODES_BUF_OFFSET);
1449 			bool eval_mode =
1450 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
1451 				      DBG_MODE_HDR_EVAL_MODE) > 0;
1452 
1453 			if (!eval_mode ||
1454 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
1455 				qed_wr(p_hwfn, p_ptt,
1456 				       DWORDS_TO_BYTES(dbg_en_addr),
1457 				       0);
1458 		}
1459 	}
1460 }
1461 
1462 /* Returns true if the specified entity (indicated by GRC param) should be
1463  * included in the dump, false otherwise.
1464  */
1465 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
1466 				enum dbg_grc_params grc_param)
1467 {
1468 	return qed_grc_get_param(p_hwfn, grc_param) > 0;
1469 }
1470 
1471 /* Returns the storm_id that matches the specified Storm letter,
1472  * or MAX_DBG_STORMS if invalid storm letter.
1473  */
1474 static enum dbg_storms qed_get_id_from_letter(char storm_letter)
1475 {
1476 	u8 storm_id;
1477 
1478 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
1479 		if (s_storm_defs[storm_id].letter == storm_letter)
1480 			return (enum dbg_storms)storm_id;
1481 
1482 	return MAX_DBG_STORMS;
1483 }
1484 
1485 /* Returns true of the specified Storm should be included in the dump, false
1486  * otherwise.
1487  */
1488 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
1489 				      enum dbg_storms storm)
1490 {
1491 	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
1492 }
1493 
1494 /* Returns true if the specified memory should be included in the dump, false
1495  * otherwise.
1496  */
1497 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
1498 				    enum block_id block_id, u8 mem_group_id)
1499 {
1500 	const struct dbg_block *block;
1501 	u8 i;
1502 
1503 	block = get_dbg_block(p_hwfn, block_id);
1504 
1505 	/* If the block is associated with a Storm, check Storm match */
1506 	if (block->associated_storm_letter) {
1507 		enum dbg_storms associated_storm_id =
1508 		    qed_get_id_from_letter(block->associated_storm_letter);
1509 
1510 		if (associated_storm_id == MAX_DBG_STORMS ||
1511 		    !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
1512 			return false;
1513 	}
1514 
1515 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
1516 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
1517 
1518 		if (mem_group_id == big_ram->mem_group_id ||
1519 		    mem_group_id == big_ram->ram_mem_group_id)
1520 			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
1521 	}
1522 
1523 	switch (mem_group_id) {
1524 	case MEM_GROUP_PXP_ILT:
1525 	case MEM_GROUP_PXP_MEM:
1526 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
1527 	case MEM_GROUP_RAM:
1528 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
1529 	case MEM_GROUP_PBUF:
1530 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
1531 	case MEM_GROUP_CAU_MEM:
1532 	case MEM_GROUP_CAU_SB:
1533 	case MEM_GROUP_CAU_PI:
1534 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
1535 	case MEM_GROUP_CAU_MEM_EXT:
1536 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
1537 	case MEM_GROUP_QM_MEM:
1538 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
1539 	case MEM_GROUP_CFC_MEM:
1540 	case MEM_GROUP_CONN_CFC_MEM:
1541 	case MEM_GROUP_TASK_CFC_MEM:
1542 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
1543 		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
1544 	case MEM_GROUP_DORQ_MEM:
1545 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
1546 	case MEM_GROUP_IGU_MEM:
1547 	case MEM_GROUP_IGU_MSIX:
1548 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
1549 	case MEM_GROUP_MULD_MEM:
1550 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
1551 	case MEM_GROUP_PRS_MEM:
1552 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
1553 	case MEM_GROUP_DMAE_MEM:
1554 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
1555 	case MEM_GROUP_TM_MEM:
1556 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
1557 	case MEM_GROUP_SDM_MEM:
1558 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
1559 	case MEM_GROUP_TDIF_CTX:
1560 	case MEM_GROUP_RDIF_CTX:
1561 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
1562 	case MEM_GROUP_CM_MEM:
1563 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
1564 	case MEM_GROUP_IOR:
1565 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
1566 	default:
1567 		return true;
1568 	}
1569 }
1570 
1571 /* Stalls all Storms */
1572 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
1573 				 struct qed_ptt *p_ptt, bool stall)
1574 {
1575 	u32 reg_addr;
1576 	u8 storm_id;
1577 
1578 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
1579 		if (!qed_grc_is_storm_included(p_hwfn,
1580 					       (enum dbg_storms)storm_id))
1581 			continue;
1582 
1583 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
1584 		    SEM_FAST_REG_STALL_0_BB_K2;
1585 		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
1586 	}
1587 
1588 	msleep(STALL_DELAY_MS);
1589 }
1590 
1591 /* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
1592  * taken out of reset.
1593  */
1594 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
1595 				   struct qed_ptt *p_ptt, bool rbc_only)
1596 {
1597 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1598 	u8 chip_id = dev_data->chip_id;
1599 	u32 i;
1600 
1601 	/* Take RBCs out of reset */
1602 	for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
1603 		if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
1604 			qed_wr(p_hwfn,
1605 			       p_ptt,
1606 			       s_rbc_reset_defs[i].reset_reg_addr +
1607 			       RESET_REG_UNRESET_OFFSET,
1608 			       s_rbc_reset_defs[i].reset_val[chip_id]);
1609 
1610 	if (!rbc_only) {
1611 		u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1612 		u8 reset_reg_id;
1613 		u32 block_id;
1614 
1615 		/* Fill reset regs values */
1616 		for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1617 			bool is_removed, has_reset_reg, unreset_before_dump;
1618 			const struct dbg_block_chip *block;
1619 
1620 			block = qed_get_dbg_block_per_chip(p_hwfn,
1621 							   (enum block_id)
1622 							   block_id);
1623 			is_removed =
1624 			    GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1625 			has_reset_reg =
1626 			    GET_FIELD(block->flags,
1627 				      DBG_BLOCK_CHIP_HAS_RESET_REG);
1628 			unreset_before_dump =
1629 			    GET_FIELD(block->flags,
1630 				      DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
1631 
1632 			if (!is_removed && has_reset_reg && unreset_before_dump)
1633 				reg_val[block->reset_reg_id] |=
1634 				    BIT(block->reset_reg_bit_offset);
1635 		}
1636 
1637 		/* Write reset registers */
1638 		for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
1639 		     reset_reg_id++) {
1640 			const struct dbg_reset_reg *reset_reg;
1641 			u32 reset_reg_addr;
1642 
1643 			reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
1644 
1645 			if (GET_FIELD
1646 			    (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
1647 				continue;
1648 
1649 			if (reg_val[reset_reg_id]) {
1650 				reset_reg_addr =
1651 				    GET_FIELD(reset_reg->data,
1652 					      DBG_RESET_REG_ADDR);
1653 				qed_wr(p_hwfn,
1654 				       p_ptt,
1655 				       DWORDS_TO_BYTES(reset_reg_addr) +
1656 				       RESET_REG_UNRESET_OFFSET,
1657 				       reg_val[reset_reg_id]);
1658 			}
1659 		}
1660 	}
1661 }
1662 
1663 /* Returns the attention block data of the specified block */
1664 static const struct dbg_attn_block_type_data *
1665 qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
1666 			enum block_id block_id, enum dbg_attn_type attn_type)
1667 {
1668 	const struct dbg_attn_block *base_attn_block_arr =
1669 	    (const struct dbg_attn_block *)
1670 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
1671 
1672 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
1673 }
1674 
1675 /* Returns the attention registers of the specified block */
1676 static const struct dbg_attn_reg *
1677 qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
1678 			enum block_id block_id, enum dbg_attn_type attn_type,
1679 			u8 *num_attn_regs)
1680 {
1681 	const struct dbg_attn_block_type_data *block_type_data =
1682 	    qed_get_block_attn_data(p_hwfn, block_id, attn_type);
1683 
1684 	*num_attn_regs = block_type_data->num_regs;
1685 
1686 	return (const struct dbg_attn_reg *)
1687 		p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
1688 		block_type_data->regs_offset;
1689 }
1690 
1691 /* For each block, clear the status of all parities */
1692 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
1693 				   struct qed_ptt *p_ptt)
1694 {
1695 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1696 	const struct dbg_attn_reg *attn_reg_arr;
1697 	u8 reg_idx, num_attn_regs;
1698 	u32 block_id;
1699 
1700 	for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1701 		if (dev_data->block_in_reset[block_id])
1702 			continue;
1703 
1704 		attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
1705 						       (enum block_id)block_id,
1706 						       ATTN_TYPE_PARITY,
1707 						       &num_attn_regs);
1708 
1709 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
1710 			const struct dbg_attn_reg *reg_data =
1711 				&attn_reg_arr[reg_idx];
1712 			u16 modes_buf_offset;
1713 			bool eval_mode;
1714 
1715 			/* Check mode */
1716 			eval_mode = GET_FIELD(reg_data->mode.data,
1717 					      DBG_MODE_HDR_EVAL_MODE) > 0;
1718 			modes_buf_offset =
1719 				GET_FIELD(reg_data->mode.data,
1720 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
1721 
1722 			/* If Mode match: clear parity status */
1723 			if (!eval_mode ||
1724 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
1725 				qed_rd(p_hwfn, p_ptt,
1726 				       DWORDS_TO_BYTES(reg_data->
1727 						       sts_clr_address));
1728 		}
1729 	}
1730 }
1731 
1732 /* Dumps GRC registers section header. Returns the dumped size in dwords.
1733  * the following parameters are dumped:
1734  * - count: no. of dumped entries
1735  * - split_type: split type
1736  * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
1737  * - reg_type_name: register type name (dumped only if reg_type_name != NULL)
1738  */
1739 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
1740 				 bool dump,
1741 				 u32 num_reg_entries,
1742 				 enum init_split_types split_type,
1743 				 u8 split_id, const char *reg_type_name)
1744 {
1745 	u8 num_params = 2 +
1746 	    (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
1747 	u32 offset = 0;
1748 
1749 	offset += qed_dump_section_hdr(dump_buf + offset,
1750 				       dump, "grc_regs", num_params);
1751 	offset += qed_dump_num_param(dump_buf + offset,
1752 				     dump, "count", num_reg_entries);
1753 	offset += qed_dump_str_param(dump_buf + offset,
1754 				     dump, "split",
1755 				     s_split_type_defs[split_type].name);
1756 	if (split_type != SPLIT_TYPE_NONE)
1757 		offset += qed_dump_num_param(dump_buf + offset,
1758 					     dump, "id", split_id);
1759 	if (reg_type_name)
1760 		offset += qed_dump_str_param(dump_buf + offset,
1761 					     dump, "type", reg_type_name);
1762 
1763 	return offset;
1764 }
1765 
1766 /* Reads the specified registers into the specified buffer.
1767  * The addr and len arguments are specified in dwords.
1768  */
1769 void qed_read_regs(struct qed_hwfn *p_hwfn,
1770 		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
1771 {
1772 	u32 i;
1773 
1774 	for (i = 0; i < len; i++)
1775 		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
1776 }
1777 
1778 /* Dumps the GRC registers in the specified address range.
1779  * Returns the dumped size in dwords.
1780  * The addr and len arguments are specified in dwords.
1781  */
1782 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
1783 				   struct qed_ptt *p_ptt,
1784 				   u32 *dump_buf,
1785 				   bool dump, u32 addr, u32 len, bool wide_bus,
1786 				   enum init_split_types split_type,
1787 				   u8 split_id)
1788 {
1789 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1790 	u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
1791 	bool read_using_dmae = false;
1792 	u32 thresh;
1793 
1794 	if (!dump)
1795 		return len;
1796 
1797 	switch (split_type) {
1798 	case SPLIT_TYPE_PORT:
1799 		port_id = split_id;
1800 		break;
1801 	case SPLIT_TYPE_PF:
1802 		pf_id = split_id;
1803 		break;
1804 	case SPLIT_TYPE_PORT_PF:
1805 		port_id = split_id / dev_data->num_pfs_per_port;
1806 		pf_id = port_id + dev_data->num_ports *
1807 		    (split_id % dev_data->num_pfs_per_port);
1808 		break;
1809 	case SPLIT_TYPE_VF:
1810 		vf_id = split_id;
1811 		break;
1812 	default:
1813 		break;
1814 	}
1815 
1816 	/* Try reading using DMAE */
1817 	if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
1818 	    (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
1819 	     (PROTECT_WIDE_BUS && wide_bus))) {
1820 		struct qed_dmae_params dmae_params;
1821 
1822 		/* Set DMAE params */
1823 		memset(&dmae_params, 0, sizeof(dmae_params));
1824 		SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
1825 		switch (split_type) {
1826 		case SPLIT_TYPE_PORT:
1827 			SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
1828 				  1);
1829 			dmae_params.port_id = port_id;
1830 			break;
1831 		case SPLIT_TYPE_PF:
1832 			SET_FIELD(dmae_params.flags,
1833 				  QED_DMAE_PARAMS_SRC_PF_VALID, 1);
1834 			dmae_params.src_pfid = pf_id;
1835 			break;
1836 		case SPLIT_TYPE_PORT_PF:
1837 			SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
1838 				  1);
1839 			SET_FIELD(dmae_params.flags,
1840 				  QED_DMAE_PARAMS_SRC_PF_VALID, 1);
1841 			dmae_params.port_id = port_id;
1842 			dmae_params.src_pfid = pf_id;
1843 			break;
1844 		default:
1845 			break;
1846 		}
1847 
1848 		/* Execute DMAE command */
1849 		read_using_dmae = !qed_dmae_grc2host(p_hwfn,
1850 						     p_ptt,
1851 						     DWORDS_TO_BYTES(addr),
1852 						     (u64)(uintptr_t)(dump_buf),
1853 						     len, &dmae_params);
1854 		if (!read_using_dmae) {
1855 			dev_data->use_dmae = 0;
1856 			DP_VERBOSE(p_hwfn,
1857 				   QED_MSG_DEBUG,
1858 				   "Failed reading from chip using DMAE, using GRC instead\n");
1859 		}
1860 	}
1861 
1862 	if (read_using_dmae)
1863 		goto print_log;
1864 
1865 	/* If not read using DMAE, read using GRC */
1866 
1867 	/* Set pretend */
1868 	if (split_type != dev_data->pretend.split_type ||
1869 	    split_id != dev_data->pretend.split_id) {
1870 		switch (split_type) {
1871 		case SPLIT_TYPE_PORT:
1872 			qed_port_pretend(p_hwfn, p_ptt, port_id);
1873 			break;
1874 		case SPLIT_TYPE_PF:
1875 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
1876 					  pf_id);
1877 			qed_fid_pretend(p_hwfn, p_ptt, fid);
1878 			break;
1879 		case SPLIT_TYPE_PORT_PF:
1880 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
1881 					  pf_id);
1882 			qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
1883 			break;
1884 		case SPLIT_TYPE_VF:
1885 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
1886 			      | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
1887 					  vf_id);
1888 			qed_fid_pretend(p_hwfn, p_ptt, fid);
1889 			break;
1890 		default:
1891 			break;
1892 		}
1893 
1894 		dev_data->pretend.split_type = (u8)split_type;
1895 		dev_data->pretend.split_id = split_id;
1896 	}
1897 
1898 	/* Read registers using GRC */
1899 	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
1900 
1901 print_log:
1902 	/* Print log */
1903 	dev_data->num_regs_read += len;
1904 	thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
1905 	if ((dev_data->num_regs_read / thresh) >
1906 	    ((dev_data->num_regs_read - len) / thresh))
1907 		DP_VERBOSE(p_hwfn,
1908 			   QED_MSG_DEBUG,
1909 			   "Dumped %d registers...\n", dev_data->num_regs_read);
1910 
1911 	return len;
1912 }
1913 
1914 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
1915  * The addr and len arguments are specified in dwords.
1916  */
1917 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
1918 				      bool dump, u32 addr, u32 len)
1919 {
1920 	if (dump)
1921 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
1922 
1923 	return 1;
1924 }
1925 
1926 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
1927  * The addr and len arguments are specified in dwords.
1928  */
1929 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
1930 				  struct qed_ptt *p_ptt,
1931 				  u32 *dump_buf,
1932 				  bool dump, u32 addr, u32 len, bool wide_bus,
1933 				  enum init_split_types split_type, u8 split_id)
1934 {
1935 	u32 offset = 0;
1936 
1937 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
1938 	offset += qed_grc_dump_addr_range(p_hwfn,
1939 					  p_ptt,
1940 					  dump_buf + offset,
1941 					  dump, addr, len, wide_bus,
1942 					  split_type, split_id);
1943 
1944 	return offset;
1945 }
1946 
1947 /* Dumps GRC registers sequence with skip cycle.
1948  * Returns the dumped size in dwords.
1949  * - addr:	start GRC address in dwords
1950  * - total_len:	total no. of dwords to dump
1951  * - read_len:	no. consecutive dwords to read
1952  * - skip_len:	no. of dwords to skip (and fill with zeros)
1953  */
1954 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
1955 				       struct qed_ptt *p_ptt,
1956 				       u32 *dump_buf,
1957 				       bool dump,
1958 				       u32 addr,
1959 				       u32 total_len,
1960 				       u32 read_len, u32 skip_len)
1961 {
1962 	u32 offset = 0, reg_offset = 0;
1963 
1964 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
1965 
1966 	if (!dump)
1967 		return offset + total_len;
1968 
1969 	while (reg_offset < total_len) {
1970 		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
1971 
1972 		offset += qed_grc_dump_addr_range(p_hwfn,
1973 						  p_ptt,
1974 						  dump_buf + offset,
1975 						  dump,  addr, curr_len, false,
1976 						  SPLIT_TYPE_NONE, 0);
1977 		reg_offset += curr_len;
1978 		addr += curr_len;
1979 
1980 		if (reg_offset < total_len) {
1981 			curr_len = min_t(u32, skip_len, total_len - skip_len);
1982 			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
1983 			offset += curr_len;
1984 			reg_offset += curr_len;
1985 			addr += curr_len;
1986 		}
1987 	}
1988 
1989 	return offset;
1990 }
1991 
1992 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
1993 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
1994 				     struct qed_ptt *p_ptt,
1995 				     struct virt_mem_desc input_regs_arr,
1996 				     u32 *dump_buf,
1997 				     bool dump,
1998 				     enum init_split_types split_type,
1999 				     u8 split_id,
2000 				     bool block_enable[MAX_BLOCK_ID],
2001 				     u32 *num_dumped_reg_entries)
2002 {
2003 	u32 i, offset = 0, input_offset = 0;
2004 	bool mode_match = true;
2005 
2006 	*num_dumped_reg_entries = 0;
2007 
2008 	while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
2009 		const struct dbg_dump_cond_hdr *cond_hdr =
2010 		    (const struct dbg_dump_cond_hdr *)
2011 		    input_regs_arr.ptr + input_offset++;
2012 		u16 modes_buf_offset;
2013 		bool eval_mode;
2014 
2015 		/* Check mode/block */
2016 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2017 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2018 		if (eval_mode) {
2019 			modes_buf_offset =
2020 				GET_FIELD(cond_hdr->mode.data,
2021 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2022 			mode_match = qed_is_mode_match(p_hwfn,
2023 						       &modes_buf_offset);
2024 		}
2025 
2026 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2027 			input_offset += cond_hdr->data_size;
2028 			continue;
2029 		}
2030 
2031 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2032 			const struct dbg_dump_reg *reg =
2033 			    (const struct dbg_dump_reg *)
2034 			    input_regs_arr.ptr + input_offset;
2035 			u32 addr, len;
2036 			bool wide_bus;
2037 
2038 			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2039 			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2040 			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2041 			offset += qed_grc_dump_reg_entry(p_hwfn,
2042 							 p_ptt,
2043 							 dump_buf + offset,
2044 							 dump,
2045 							 addr,
2046 							 len,
2047 							 wide_bus,
2048 							 split_type, split_id);
2049 			(*num_dumped_reg_entries)++;
2050 		}
2051 	}
2052 
2053 	return offset;
2054 }
2055 
2056 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2057 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2058 				   struct qed_ptt *p_ptt,
2059 				   struct virt_mem_desc input_regs_arr,
2060 				   u32 *dump_buf,
2061 				   bool dump,
2062 				   bool block_enable[MAX_BLOCK_ID],
2063 				   enum init_split_types split_type,
2064 				   u8 split_id, const char *reg_type_name)
2065 {
2066 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2067 	enum init_split_types hdr_split_type = split_type;
2068 	u32 num_dumped_reg_entries, offset;
2069 	u8 hdr_split_id = split_id;
2070 
2071 	/* In PORT_PF split type, print a port split header */
2072 	if (split_type == SPLIT_TYPE_PORT_PF) {
2073 		hdr_split_type = SPLIT_TYPE_PORT;
2074 		hdr_split_id = split_id / dev_data->num_pfs_per_port;
2075 	}
2076 
2077 	/* Calculate register dump header size (and skip it for now) */
2078 	offset = qed_grc_dump_regs_hdr(dump_buf,
2079 				       false,
2080 				       0,
2081 				       hdr_split_type,
2082 				       hdr_split_id, reg_type_name);
2083 
2084 	/* Dump registers */
2085 	offset += qed_grc_dump_regs_entries(p_hwfn,
2086 					    p_ptt,
2087 					    input_regs_arr,
2088 					    dump_buf + offset,
2089 					    dump,
2090 					    split_type,
2091 					    split_id,
2092 					    block_enable,
2093 					    &num_dumped_reg_entries);
2094 
2095 	/* Write register dump header */
2096 	if (dump && num_dumped_reg_entries > 0)
2097 		qed_grc_dump_regs_hdr(dump_buf,
2098 				      dump,
2099 				      num_dumped_reg_entries,
2100 				      hdr_split_type,
2101 				      hdr_split_id, reg_type_name);
2102 
2103 	return num_dumped_reg_entries > 0 ? offset : 0;
2104 }
2105 
2106 /* Dumps registers according to the input registers array. Returns the dumped
2107  * size in dwords.
2108  */
2109 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2110 				  struct qed_ptt *p_ptt,
2111 				  u32 *dump_buf,
2112 				  bool dump,
2113 				  bool block_enable[MAX_BLOCK_ID],
2114 				  const char *reg_type_name)
2115 {
2116 	struct virt_mem_desc *dbg_buf =
2117 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
2118 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2119 	u32 offset = 0, input_offset = 0;
2120 
2121 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2122 		const struct dbg_dump_split_hdr *split_hdr;
2123 		struct virt_mem_desc curr_input_regs_arr;
2124 		enum init_split_types split_type;
2125 		u16 split_count = 0;
2126 		u32 split_data_size;
2127 		u8 split_id;
2128 
2129 		split_hdr =
2130 		    (const struct dbg_dump_split_hdr *)
2131 		    dbg_buf->ptr + input_offset++;
2132 		split_type =
2133 		    GET_FIELD(split_hdr->hdr,
2134 			      DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2135 		split_data_size = GET_FIELD(split_hdr->hdr,
2136 					    DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2137 		curr_input_regs_arr.ptr =
2138 		    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
2139 		    input_offset;
2140 		curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
2141 
2142 		switch (split_type) {
2143 		case SPLIT_TYPE_NONE:
2144 			split_count = 1;
2145 			break;
2146 		case SPLIT_TYPE_PORT:
2147 			split_count = dev_data->num_ports;
2148 			break;
2149 		case SPLIT_TYPE_PF:
2150 		case SPLIT_TYPE_PORT_PF:
2151 			split_count = dev_data->num_ports *
2152 			    dev_data->num_pfs_per_port;
2153 			break;
2154 		case SPLIT_TYPE_VF:
2155 			split_count = dev_data->num_vfs;
2156 			break;
2157 		default:
2158 			return 0;
2159 		}
2160 
2161 		for (split_id = 0; split_id < split_count; split_id++)
2162 			offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2163 							  curr_input_regs_arr,
2164 							  dump_buf + offset,
2165 							  dump, block_enable,
2166 							  split_type,
2167 							  split_id,
2168 							  reg_type_name);
2169 
2170 		input_offset += split_data_size;
2171 	}
2172 
2173 	/* Cancel pretends (pretend to original PF) */
2174 	if (dump) {
2175 		qed_fid_pretend(p_hwfn, p_ptt,
2176 				FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2177 					    p_hwfn->rel_pf_id));
2178 		dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2179 		dev_data->pretend.split_id = 0;
2180 	}
2181 
2182 	return offset;
2183 }
2184 
2185 /* Dump reset registers. Returns the dumped size in dwords. */
2186 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2187 				   struct qed_ptt *p_ptt,
2188 				   u32 *dump_buf, bool dump)
2189 {
2190 	u32 offset = 0, num_regs = 0;
2191 	u8 reset_reg_id;
2192 
2193 	/* Calculate header size */
2194 	offset += qed_grc_dump_regs_hdr(dump_buf,
2195 					false,
2196 					0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
2197 
2198 	/* Write reset registers */
2199 	for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
2200 	     reset_reg_id++) {
2201 		const struct dbg_reset_reg *reset_reg;
2202 		u32 reset_reg_addr;
2203 
2204 		reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
2205 
2206 		if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
2207 			continue;
2208 
2209 		reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
2210 		offset += qed_grc_dump_reg_entry(p_hwfn,
2211 						 p_ptt,
2212 						 dump_buf + offset,
2213 						 dump,
2214 						 reset_reg_addr,
2215 						 1, false, SPLIT_TYPE_NONE, 0);
2216 		num_regs++;
2217 	}
2218 
2219 	/* Write header */
2220 	if (dump)
2221 		qed_grc_dump_regs_hdr(dump_buf,
2222 				      true, num_regs, SPLIT_TYPE_NONE,
2223 				      0, "RESET_REGS");
2224 
2225 	return offset;
2226 }
2227 
2228 /* Dump registers that are modified during GRC Dump and therefore must be
2229  * dumped first. Returns the dumped size in dwords.
2230  */
2231 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2232 				      struct qed_ptt *p_ptt,
2233 				      u32 *dump_buf, bool dump)
2234 {
2235 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2236 	u32 block_id, offset = 0, stall_regs_offset;
2237 	const struct dbg_attn_reg *attn_reg_arr;
2238 	u8 storm_id, reg_idx, num_attn_regs;
2239 	u32 num_reg_entries = 0;
2240 
2241 	/* Write empty header for attention registers */
2242 	offset += qed_grc_dump_regs_hdr(dump_buf,
2243 					false,
2244 					0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2245 
2246 	/* Write parity registers */
2247 	for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
2248 		if (dev_data->block_in_reset[block_id] && dump)
2249 			continue;
2250 
2251 		attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
2252 						       (enum block_id)block_id,
2253 						       ATTN_TYPE_PARITY,
2254 						       &num_attn_regs);
2255 
2256 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2257 			const struct dbg_attn_reg *reg_data =
2258 				&attn_reg_arr[reg_idx];
2259 			u16 modes_buf_offset;
2260 			bool eval_mode;
2261 			u32 addr;
2262 
2263 			/* Check mode */
2264 			eval_mode = GET_FIELD(reg_data->mode.data,
2265 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2266 			modes_buf_offset =
2267 				GET_FIELD(reg_data->mode.data,
2268 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2269 			if (eval_mode &&
2270 			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2271 				continue;
2272 
2273 			/* Mode match: read & dump registers */
2274 			addr = reg_data->mask_address;
2275 			offset += qed_grc_dump_reg_entry(p_hwfn,
2276 							 p_ptt,
2277 							 dump_buf + offset,
2278 							 dump,
2279 							 addr,
2280 							 1, false,
2281 							 SPLIT_TYPE_NONE, 0);
2282 			addr = GET_FIELD(reg_data->data,
2283 					 DBG_ATTN_REG_STS_ADDRESS);
2284 			offset += qed_grc_dump_reg_entry(p_hwfn,
2285 							 p_ptt,
2286 							 dump_buf + offset,
2287 							 dump,
2288 							 addr,
2289 							 1, false,
2290 							 SPLIT_TYPE_NONE, 0);
2291 			num_reg_entries += 2;
2292 		}
2293 	}
2294 
2295 	/* Overwrite header for attention registers */
2296 	if (dump)
2297 		qed_grc_dump_regs_hdr(dump_buf,
2298 				      true,
2299 				      num_reg_entries,
2300 				      SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2301 
2302 	/* Write empty header for stall registers */
2303 	stall_regs_offset = offset;
2304 	offset += qed_grc_dump_regs_hdr(dump_buf,
2305 					false, 0, SPLIT_TYPE_NONE, 0, "REGS");
2306 
2307 	/* Write Storm stall status registers */
2308 	for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
2309 	     storm_id++) {
2310 		struct storm_defs *storm = &s_storm_defs[storm_id];
2311 		u32 addr;
2312 
2313 		if (dev_data->block_in_reset[storm->sem_block_id] && dump)
2314 			continue;
2315 
2316 		addr =
2317 		    BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
2318 				    SEM_FAST_REG_STALLED);
2319 		offset += qed_grc_dump_reg_entry(p_hwfn,
2320 						 p_ptt,
2321 						 dump_buf + offset,
2322 						 dump,
2323 						 addr,
2324 						 1,
2325 						 false, SPLIT_TYPE_NONE, 0);
2326 		num_reg_entries++;
2327 	}
2328 
2329 	/* Overwrite header for stall registers */
2330 	if (dump)
2331 		qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
2332 				      true,
2333 				      num_reg_entries,
2334 				      SPLIT_TYPE_NONE, 0, "REGS");
2335 
2336 	return offset;
2337 }
2338 
2339 /* Dumps registers that can't be represented in the debug arrays */
2340 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2341 				     struct qed_ptt *p_ptt,
2342 				     u32 *dump_buf, bool dump)
2343 {
2344 	u32 offset = 0, addr;
2345 
2346 	offset += qed_grc_dump_regs_hdr(dump_buf,
2347 					dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
2348 
2349 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2350 	 * skipped).
2351 	 */
2352 	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2353 	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2354 					      p_ptt,
2355 					      dump_buf + offset,
2356 					      dump,
2357 					      addr,
2358 					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2359 					      7,
2360 					      1);
2361 	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2362 	offset +=
2363 	    qed_grc_dump_reg_entry_skip(p_hwfn,
2364 					p_ptt,
2365 					dump_buf + offset,
2366 					dump,
2367 					addr,
2368 					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2369 					7,
2370 					1);
2371 
2372 	return offset;
2373 }
2374 
2375 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2376  * dwords. The following parameters are dumped:
2377  * - name:	   dumped only if it's not NULL.
2378  * - addr:	   in dwords, dumped only if name is NULL.
2379  * - len:	   in dwords, always dumped.
2380  * - width:	   dumped if it's not zero.
2381  * - packed:	   dumped only if it's not false.
2382  * - mem_group:	   always dumped.
2383  * - is_storm:	   true only if the memory is related to a Storm.
2384  * - storm_letter: valid only if is_storm is true.
2385  *
2386  */
2387 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2388 				u32 *dump_buf,
2389 				bool dump,
2390 				const char *name,
2391 				u32 addr,
2392 				u32 len,
2393 				u32 bit_width,
2394 				bool packed,
2395 				const char *mem_group, char storm_letter)
2396 {
2397 	u8 num_params = 3;
2398 	u32 offset = 0;
2399 	char buf[64];
2400 
2401 	if (!len)
2402 		DP_NOTICE(p_hwfn,
2403 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2404 
2405 	if (bit_width)
2406 		num_params++;
2407 	if (packed)
2408 		num_params++;
2409 
2410 	/* Dump section header */
2411 	offset += qed_dump_section_hdr(dump_buf + offset,
2412 				       dump, "grc_mem", num_params);
2413 
2414 	if (name) {
2415 		/* Dump name */
2416 		if (storm_letter) {
2417 			strcpy(buf, "?STORM_");
2418 			buf[0] = storm_letter;
2419 			strcpy(buf + strlen(buf), name);
2420 		} else {
2421 			strcpy(buf, name);
2422 		}
2423 
2424 		offset += qed_dump_str_param(dump_buf + offset,
2425 					     dump, "name", buf);
2426 	} else {
2427 		/* Dump address */
2428 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
2429 
2430 		offset += qed_dump_num_param(dump_buf + offset,
2431 					     dump, "addr", addr_in_bytes);
2432 	}
2433 
2434 	/* Dump len */
2435 	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2436 
2437 	/* Dump bit width */
2438 	if (bit_width)
2439 		offset += qed_dump_num_param(dump_buf + offset,
2440 					     dump, "width", bit_width);
2441 
2442 	/* Dump packed */
2443 	if (packed)
2444 		offset += qed_dump_num_param(dump_buf + offset,
2445 					     dump, "packed", 1);
2446 
2447 	/* Dump reg type */
2448 	if (storm_letter) {
2449 		strcpy(buf, "?STORM_");
2450 		buf[0] = storm_letter;
2451 		strcpy(buf + strlen(buf), mem_group);
2452 	} else {
2453 		strcpy(buf, mem_group);
2454 	}
2455 
2456 	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
2457 
2458 	return offset;
2459 }
2460 
2461 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
2462  * Returns the dumped size in dwords.
2463  * The addr and len arguments are specified in dwords.
2464  */
2465 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2466 			    struct qed_ptt *p_ptt,
2467 			    u32 *dump_buf,
2468 			    bool dump,
2469 			    const char *name,
2470 			    u32 addr,
2471 			    u32 len,
2472 			    bool wide_bus,
2473 			    u32 bit_width,
2474 			    bool packed,
2475 			    const char *mem_group, char storm_letter)
2476 {
2477 	u32 offset = 0;
2478 
2479 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2480 				       dump_buf + offset,
2481 				       dump,
2482 				       name,
2483 				       addr,
2484 				       len,
2485 				       bit_width,
2486 				       packed, mem_group, storm_letter);
2487 	offset += qed_grc_dump_addr_range(p_hwfn,
2488 					  p_ptt,
2489 					  dump_buf + offset,
2490 					  dump, addr, len, wide_bus,
2491 					  SPLIT_TYPE_NONE, 0);
2492 
2493 	return offset;
2494 }
2495 
2496 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
2497 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2498 				    struct qed_ptt *p_ptt,
2499 				    struct virt_mem_desc input_mems_arr,
2500 				    u32 *dump_buf, bool dump)
2501 {
2502 	u32 i, offset = 0, input_offset = 0;
2503 	bool mode_match = true;
2504 
2505 	while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
2506 		const struct dbg_dump_cond_hdr *cond_hdr;
2507 		u16 modes_buf_offset;
2508 		u32 num_entries;
2509 		bool eval_mode;
2510 
2511 		cond_hdr =
2512 		    (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
2513 		    input_offset++;
2514 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
2515 
2516 		/* Check required mode */
2517 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2518 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2519 		if (eval_mode) {
2520 			modes_buf_offset =
2521 				GET_FIELD(cond_hdr->mode.data,
2522 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2523 			mode_match = qed_is_mode_match(p_hwfn,
2524 						       &modes_buf_offset);
2525 		}
2526 
2527 		if (!mode_match) {
2528 			input_offset += cond_hdr->data_size;
2529 			continue;
2530 		}
2531 
2532 		for (i = 0; i < num_entries;
2533 		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
2534 			const struct dbg_dump_mem *mem =
2535 			    (const struct dbg_dump_mem *)((u32 *)
2536 							  input_mems_arr.ptr
2537 							  + input_offset);
2538 			const struct dbg_block *block;
2539 			char storm_letter = 0;
2540 			u32 mem_addr, mem_len;
2541 			bool mem_wide_bus;
2542 			u8 mem_group_id;
2543 
2544 			mem_group_id = GET_FIELD(mem->dword0,
2545 						 DBG_DUMP_MEM_MEM_GROUP_ID);
2546 			if (mem_group_id >= MEM_GROUPS_NUM) {
2547 				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
2548 				return 0;
2549 			}
2550 
2551 			if (!qed_grc_is_mem_included(p_hwfn,
2552 						     (enum block_id)
2553 						     cond_hdr->block_id,
2554 						     mem_group_id))
2555 				continue;
2556 
2557 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
2558 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
2559 			mem_wide_bus = GET_FIELD(mem->dword1,
2560 						 DBG_DUMP_MEM_WIDE_BUS);
2561 
2562 			block = get_dbg_block(p_hwfn,
2563 					      cond_hdr->block_id);
2564 
2565 			/* If memory is associated with Storm,
2566 			 * update storm details
2567 			 */
2568 			if (block->associated_storm_letter)
2569 				storm_letter = block->associated_storm_letter;
2570 
2571 			/* Dump memory */
2572 			offset += qed_grc_dump_mem(p_hwfn,
2573 						p_ptt,
2574 						dump_buf + offset,
2575 						dump,
2576 						NULL,
2577 						mem_addr,
2578 						mem_len,
2579 						mem_wide_bus,
2580 						0,
2581 						false,
2582 						s_mem_group_names[mem_group_id],
2583 						storm_letter);
2584 		}
2585 	}
2586 
2587 	return offset;
2588 }
2589 
2590 /* Dumps GRC memories according to the input array dump_mem.
2591  * Returns the dumped size in dwords.
2592  */
2593 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
2594 				 struct qed_ptt *p_ptt,
2595 				 u32 *dump_buf, bool dump)
2596 {
2597 	struct virt_mem_desc *dbg_buf =
2598 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
2599 	u32 offset = 0, input_offset = 0;
2600 
2601 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2602 		const struct dbg_dump_split_hdr *split_hdr;
2603 		struct virt_mem_desc curr_input_mems_arr;
2604 		enum init_split_types split_type;
2605 		u32 split_data_size;
2606 
2607 		split_hdr =
2608 		    (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
2609 		    input_offset++;
2610 		split_type = GET_FIELD(split_hdr->hdr,
2611 				       DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2612 		split_data_size = GET_FIELD(split_hdr->hdr,
2613 					    DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2614 		curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
2615 		curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
2616 
2617 		if (split_type == SPLIT_TYPE_NONE)
2618 			offset += qed_grc_dump_mem_entries(p_hwfn,
2619 							   p_ptt,
2620 							   curr_input_mems_arr,
2621 							   dump_buf + offset,
2622 							   dump);
2623 		else
2624 			DP_NOTICE(p_hwfn,
2625 				  "Dumping split memories is currently not supported\n");
2626 
2627 		input_offset += split_data_size;
2628 	}
2629 
2630 	return offset;
2631 }
2632 
2633 /* Dumps GRC context data for the specified Storm.
2634  * Returns the dumped size in dwords.
2635  * The lid_size argument is specified in quad-regs.
2636  */
2637 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2638 				 struct qed_ptt *p_ptt,
2639 				 u32 *dump_buf,
2640 				 bool dump,
2641 				 const char *name,
2642 				 u32 num_lids,
2643 				 enum cm_ctx_types ctx_type, u8 storm_id)
2644 {
2645 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2646 	struct storm_defs *storm = &s_storm_defs[storm_id];
2647 	u32 i, lid, lid_size, total_size;
2648 	u32 rd_reg_addr, offset = 0;
2649 
2650 	/* Convert quad-regs to dwords */
2651 	lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
2652 
2653 	if (!lid_size)
2654 		return 0;
2655 
2656 	total_size = num_lids * lid_size;
2657 
2658 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2659 				       dump_buf + offset,
2660 				       dump,
2661 				       name,
2662 				       0,
2663 				       total_size,
2664 				       lid_size * 32,
2665 				       false, name, storm->letter);
2666 
2667 	if (!dump)
2668 		return offset + total_size;
2669 
2670 	rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
2671 
2672 	/* Dump context data */
2673 	for (lid = 0; lid < num_lids; lid++) {
2674 		for (i = 0; i < lid_size; i++) {
2675 			qed_wr(p_hwfn,
2676 			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
2677 			offset += qed_grc_dump_addr_range(p_hwfn,
2678 							  p_ptt,
2679 							  dump_buf + offset,
2680 							  dump,
2681 							  rd_reg_addr,
2682 							  1,
2683 							  false,
2684 							  SPLIT_TYPE_NONE, 0);
2685 		}
2686 	}
2687 
2688 	return offset;
2689 }
2690 
2691 /* Dumps GRC contexts. Returns the dumped size in dwords. */
2692 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
2693 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2694 {
2695 	u32 offset = 0;
2696 	u8 storm_id;
2697 
2698 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2699 		if (!qed_grc_is_storm_included(p_hwfn,
2700 					       (enum dbg_storms)storm_id))
2701 			continue;
2702 
2703 		/* Dump Conn AG context size */
2704 		offset += qed_grc_dump_ctx_data(p_hwfn,
2705 						p_ptt,
2706 						dump_buf + offset,
2707 						dump,
2708 						"CONN_AG_CTX",
2709 						NUM_OF_LCIDS,
2710 						CM_CTX_CONN_AG, storm_id);
2711 
2712 		/* Dump Conn ST context size */
2713 		offset += qed_grc_dump_ctx_data(p_hwfn,
2714 						p_ptt,
2715 						dump_buf + offset,
2716 						dump,
2717 						"CONN_ST_CTX",
2718 						NUM_OF_LCIDS,
2719 						CM_CTX_CONN_ST, storm_id);
2720 
2721 		/* Dump Task AG context size */
2722 		offset += qed_grc_dump_ctx_data(p_hwfn,
2723 						p_ptt,
2724 						dump_buf + offset,
2725 						dump,
2726 						"TASK_AG_CTX",
2727 						NUM_OF_LTIDS,
2728 						CM_CTX_TASK_AG, storm_id);
2729 
2730 		/* Dump Task ST context size */
2731 		offset += qed_grc_dump_ctx_data(p_hwfn,
2732 						p_ptt,
2733 						dump_buf + offset,
2734 						dump,
2735 						"TASK_ST_CTX",
2736 						NUM_OF_LTIDS,
2737 						CM_CTX_TASK_ST, storm_id);
2738 	}
2739 
2740 	return offset;
2741 }
2742 
2743 #define VFC_STATUS_RESP_READY_BIT	0
2744 #define VFC_STATUS_BUSY_BIT		1
2745 #define VFC_STATUS_SENDING_CMD_BIT	2
2746 
2747 #define VFC_POLLING_DELAY_MS	1
2748 #define VFC_POLLING_COUNT		20
2749 
2750 /* Reads data from VFC. Returns the number of dwords read (0 on error).
2751  * Sizes are specified in dwords.
2752  */
2753 static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn,
2754 				      struct qed_ptt *p_ptt,
2755 				      struct storm_defs *storm,
2756 				      u32 *cmd_data,
2757 				      u32 cmd_size,
2758 				      u32 *addr_data,
2759 				      u32 addr_size,
2760 				      u32 resp_size, u32 *dump_buf)
2761 {
2762 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2763 	u32 vfc_status, polling_ms, polling_count = 0, i;
2764 	u32 reg_addr, sem_base;
2765 	bool is_ready = false;
2766 
2767 	sem_base = storm->sem_fast_mem_addr;
2768 	polling_ms = VFC_POLLING_DELAY_MS *
2769 	    s_hw_type_defs[dev_data->hw_type].delay_factor;
2770 
2771 	/* Write VFC command */
2772 	ARR_REG_WR(p_hwfn,
2773 		   p_ptt,
2774 		   sem_base + SEM_FAST_REG_VFC_DATA_WR,
2775 		   cmd_data, cmd_size);
2776 
2777 	/* Write VFC address */
2778 	ARR_REG_WR(p_hwfn,
2779 		   p_ptt,
2780 		   sem_base + SEM_FAST_REG_VFC_ADDR,
2781 		   addr_data, addr_size);
2782 
2783 	/* Read response */
2784 	for (i = 0; i < resp_size; i++) {
2785 		/* Poll until ready */
2786 		do {
2787 			reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
2788 			qed_grc_dump_addr_range(p_hwfn,
2789 						p_ptt,
2790 						&vfc_status,
2791 						true,
2792 						BYTES_TO_DWORDS(reg_addr),
2793 						1,
2794 						false, SPLIT_TYPE_NONE, 0);
2795 			is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT);
2796 
2797 			if (!is_ready) {
2798 				if (polling_count++ == VFC_POLLING_COUNT)
2799 					return 0;
2800 
2801 				msleep(polling_ms);
2802 			}
2803 		} while (!is_ready);
2804 
2805 		reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
2806 		qed_grc_dump_addr_range(p_hwfn,
2807 					p_ptt,
2808 					dump_buf + i,
2809 					true,
2810 					BYTES_TO_DWORDS(reg_addr),
2811 					1, false, SPLIT_TYPE_NONE, 0);
2812 	}
2813 
2814 	return resp_size;
2815 }
2816 
2817 /* Dump VFC CAM. Returns the dumped size in dwords. */
2818 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
2819 				struct qed_ptt *p_ptt,
2820 				u32 *dump_buf, bool dump, u8 storm_id)
2821 {
2822 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
2823 	struct storm_defs *storm = &s_storm_defs[storm_id];
2824 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
2825 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
2826 	u32 row, offset = 0;
2827 
2828 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2829 				       dump_buf + offset,
2830 				       dump,
2831 				       "vfc_cam",
2832 				       0,
2833 				       total_size,
2834 				       256,
2835 				       false, "vfc_cam", storm->letter);
2836 
2837 	if (!dump)
2838 		return offset + total_size;
2839 
2840 	/* Prepare CAM address */
2841 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
2842 
2843 	/* Read VFC CAM data */
2844 	for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
2845 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
2846 		offset += qed_grc_dump_read_from_vfc(p_hwfn,
2847 						     p_ptt,
2848 						     storm,
2849 						     cam_cmd,
2850 						     VFC_CAM_CMD_DWORDS,
2851 						     cam_addr,
2852 						     VFC_CAM_ADDR_DWORDS,
2853 						     VFC_CAM_RESP_DWORDS,
2854 						     dump_buf + offset);
2855 	}
2856 
2857 	return offset;
2858 }
2859 
2860 /* Dump VFC RAM. Returns the dumped size in dwords. */
2861 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
2862 				struct qed_ptt *p_ptt,
2863 				u32 *dump_buf,
2864 				bool dump,
2865 				u8 storm_id, struct vfc_ram_defs *ram_defs)
2866 {
2867 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
2868 	struct storm_defs *storm = &s_storm_defs[storm_id];
2869 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
2870 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
2871 	u32 row, offset = 0;
2872 
2873 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2874 				       dump_buf + offset,
2875 				       dump,
2876 				       ram_defs->mem_name,
2877 				       0,
2878 				       total_size,
2879 				       256,
2880 				       false,
2881 				       ram_defs->type_name,
2882 				       storm->letter);
2883 
2884 	if (!dump)
2885 		return offset + total_size;
2886 
2887 	/* Prepare RAM address */
2888 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
2889 
2890 	/* Read VFC RAM data */
2891 	for (row = ram_defs->base_row;
2892 	     row < ram_defs->base_row + ram_defs->num_rows; row++) {
2893 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
2894 		offset += qed_grc_dump_read_from_vfc(p_hwfn,
2895 						     p_ptt,
2896 						     storm,
2897 						     ram_cmd,
2898 						     VFC_RAM_CMD_DWORDS,
2899 						     ram_addr,
2900 						     VFC_RAM_ADDR_DWORDS,
2901 						     VFC_RAM_RESP_DWORDS,
2902 						     dump_buf + offset);
2903 	}
2904 
2905 	return offset;
2906 }
2907 
2908 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
2909 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
2910 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2911 {
2912 	u8 storm_id, i;
2913 	u32 offset = 0;
2914 
2915 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2916 		if (!qed_grc_is_storm_included(p_hwfn,
2917 					       (enum dbg_storms)storm_id) ||
2918 		    !s_storm_defs[storm_id].has_vfc)
2919 			continue;
2920 
2921 		/* Read CAM */
2922 		offset += qed_grc_dump_vfc_cam(p_hwfn,
2923 					       p_ptt,
2924 					       dump_buf + offset,
2925 					       dump, storm_id);
2926 
2927 		/* Read RAM */
2928 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
2929 			offset += qed_grc_dump_vfc_ram(p_hwfn,
2930 						       p_ptt,
2931 						       dump_buf + offset,
2932 						       dump,
2933 						       storm_id,
2934 						       &s_vfc_ram_defs[i]);
2935 	}
2936 
2937 	return offset;
2938 }
2939 
2940 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
2941 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
2942 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2943 {
2944 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2945 	u32 offset = 0;
2946 	u8 rss_mem_id;
2947 
2948 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
2949 		u32 rss_addr, num_entries, total_dwords;
2950 		struct rss_mem_defs *rss_defs;
2951 		u32 addr, num_dwords_to_read;
2952 		bool packed;
2953 
2954 		rss_defs = &s_rss_mem_defs[rss_mem_id];
2955 		rss_addr = rss_defs->addr;
2956 		num_entries = rss_defs->num_entries[dev_data->chip_id];
2957 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
2958 		packed = (rss_defs->entry_width == 16);
2959 
2960 		offset += qed_grc_dump_mem_hdr(p_hwfn,
2961 					       dump_buf + offset,
2962 					       dump,
2963 					       rss_defs->mem_name,
2964 					       0,
2965 					       total_dwords,
2966 					       rss_defs->entry_width,
2967 					       packed,
2968 					       rss_defs->type_name, 0);
2969 
2970 		/* Dump RSS data */
2971 		if (!dump) {
2972 			offset += total_dwords;
2973 			continue;
2974 		}
2975 
2976 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
2977 		while (total_dwords) {
2978 			num_dwords_to_read = min_t(u32,
2979 						   RSS_REG_RSS_RAM_DATA_SIZE,
2980 						   total_dwords);
2981 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
2982 			offset += qed_grc_dump_addr_range(p_hwfn,
2983 							  p_ptt,
2984 							  dump_buf + offset,
2985 							  dump,
2986 							  addr,
2987 							  num_dwords_to_read,
2988 							  false,
2989 							  SPLIT_TYPE_NONE, 0);
2990 			total_dwords -= num_dwords_to_read;
2991 			rss_addr++;
2992 		}
2993 	}
2994 
2995 	return offset;
2996 }
2997 
2998 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
2999 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3000 				struct qed_ptt *p_ptt,
3001 				u32 *dump_buf, bool dump, u8 big_ram_id)
3002 {
3003 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3004 	u32 block_size, ram_size, offset = 0, reg_val, i;
3005 	char mem_name[12] = "???_BIG_RAM";
3006 	char type_name[8] = "???_RAM";
3007 	struct big_ram_defs *big_ram;
3008 
3009 	big_ram = &s_big_ram_defs[big_ram_id];
3010 	ram_size = big_ram->ram_size[dev_data->chip_id];
3011 
3012 	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3013 	block_size = reg_val &
3014 		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3015 									 : 128;
3016 
3017 	strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3018 	strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3019 
3020 	/* Dump memory header */
3021 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3022 				       dump_buf + offset,
3023 				       dump,
3024 				       mem_name,
3025 				       0,
3026 				       ram_size,
3027 				       block_size * 8,
3028 				       false, type_name, 0);
3029 
3030 	/* Read and dump Big RAM data */
3031 	if (!dump)
3032 		return offset + ram_size;
3033 
3034 	/* Dump Big RAM */
3035 	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3036 	     i++) {
3037 		u32 addr, len;
3038 
3039 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3040 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3041 		len = BRB_REG_BIG_RAM_DATA_SIZE;
3042 		offset += qed_grc_dump_addr_range(p_hwfn,
3043 						  p_ptt,
3044 						  dump_buf + offset,
3045 						  dump,
3046 						  addr,
3047 						  len,
3048 						  false, SPLIT_TYPE_NONE, 0);
3049 	}
3050 
3051 	return offset;
3052 }
3053 
3054 /* Dumps MCP scratchpad. Returns the dumped size in dwords. */
3055 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3056 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3057 {
3058 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3059 	u32 offset = 0, addr;
3060 	bool halted = false;
3061 
3062 	/* Halt MCP */
3063 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3064 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3065 		if (!halted)
3066 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3067 	}
3068 
3069 	/* Dump MCP scratchpad */
3070 	offset += qed_grc_dump_mem(p_hwfn,
3071 				   p_ptt,
3072 				   dump_buf + offset,
3073 				   dump,
3074 				   NULL,
3075 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3076 				   MCP_REG_SCRATCH_SIZE,
3077 				   false, 0, false, "MCP", 0);
3078 
3079 	/* Dump MCP cpu_reg_file */
3080 	offset += qed_grc_dump_mem(p_hwfn,
3081 				   p_ptt,
3082 				   dump_buf + offset,
3083 				   dump,
3084 				   NULL,
3085 				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3086 				   MCP_REG_CPU_REG_FILE_SIZE,
3087 				   false, 0, false, "MCP", 0);
3088 
3089 	/* Dump MCP registers */
3090 	block_enable[BLOCK_MCP] = true;
3091 	offset += qed_grc_dump_registers(p_hwfn,
3092 					 p_ptt,
3093 					 dump_buf + offset,
3094 					 dump, block_enable, "MCP");
3095 
3096 	/* Dump required non-MCP registers */
3097 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3098 					dump, 1, SPLIT_TYPE_NONE, 0,
3099 					"MCP");
3100 	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3101 	offset += qed_grc_dump_reg_entry(p_hwfn,
3102 					 p_ptt,
3103 					 dump_buf + offset,
3104 					 dump,
3105 					 addr,
3106 					 1,
3107 					 false, SPLIT_TYPE_NONE, 0);
3108 
3109 	/* Release MCP */
3110 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3111 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3112 
3113 	return offset;
3114 }
3115 
3116 /* Dumps the tbus indirect memory for all PHYs.
3117  * Returns the dumped size in dwords.
3118  */
3119 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3120 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3121 {
3122 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3123 	char mem_name[32];
3124 	u8 phy_id;
3125 
3126 	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3127 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3128 		struct phy_defs *phy_defs;
3129 		u8 *bytes_buf;
3130 
3131 		phy_defs = &s_phy_defs[phy_id];
3132 		addr_lo_addr = phy_defs->base_addr +
3133 			       phy_defs->tbus_addr_lo_addr;
3134 		addr_hi_addr = phy_defs->base_addr +
3135 			       phy_defs->tbus_addr_hi_addr;
3136 		data_lo_addr = phy_defs->base_addr +
3137 			       phy_defs->tbus_data_lo_addr;
3138 		data_hi_addr = phy_defs->base_addr +
3139 			       phy_defs->tbus_data_hi_addr;
3140 
3141 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3142 			     phy_defs->phy_name) < 0)
3143 			DP_NOTICE(p_hwfn,
3144 				  "Unexpected debug error: invalid PHY memory name\n");
3145 
3146 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3147 					       dump_buf + offset,
3148 					       dump,
3149 					       mem_name,
3150 					       0,
3151 					       PHY_DUMP_SIZE_DWORDS,
3152 					       16, true, mem_name, 0);
3153 
3154 		if (!dump) {
3155 			offset += PHY_DUMP_SIZE_DWORDS;
3156 			continue;
3157 		}
3158 
3159 		bytes_buf = (u8 *)(dump_buf + offset);
3160 		for (tbus_hi_offset = 0;
3161 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3162 		     tbus_hi_offset++) {
3163 			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3164 			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3165 			     tbus_lo_offset++) {
3166 				qed_wr(p_hwfn,
3167 				       p_ptt, addr_lo_addr, tbus_lo_offset);
3168 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3169 							    p_ptt,
3170 							    data_lo_addr);
3171 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3172 							    p_ptt,
3173 							    data_hi_addr);
3174 			}
3175 		}
3176 
3177 		offset += PHY_DUMP_SIZE_DWORDS;
3178 	}
3179 
3180 	return offset;
3181 }
3182 
3183 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
3184 					    struct qed_ptt *p_ptt,
3185 					    u32 image_type,
3186 					    u32 *nvram_offset_bytes,
3187 					    u32 *nvram_size_bytes);
3188 
3189 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
3190 				      struct qed_ptt *p_ptt,
3191 				      u32 nvram_offset_bytes,
3192 				      u32 nvram_size_bytes, u32 *ret_buf);
3193 
3194 /* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
3195 static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
3196 				    struct qed_ptt *p_ptt,
3197 				    u32 *dump_buf, bool dump)
3198 {
3199 	u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
3200 	u32 hw_dump_size_dwords = 0, offset = 0;
3201 	enum dbg_status status;
3202 
3203 	/* Read HW dump image from NVRAM */
3204 	status = qed_find_nvram_image(p_hwfn,
3205 				      p_ptt,
3206 				      NVM_TYPE_HW_DUMP_OUT,
3207 				      &hw_dump_offset_bytes,
3208 				      &hw_dump_size_bytes);
3209 	if (status != DBG_STATUS_OK)
3210 		return 0;
3211 
3212 	hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
3213 
3214 	/* Dump HW dump image section */
3215 	offset += qed_dump_section_hdr(dump_buf + offset,
3216 				       dump, "mcp_hw_dump", 1);
3217 	offset += qed_dump_num_param(dump_buf + offset,
3218 				     dump, "size", hw_dump_size_dwords);
3219 
3220 	/* Read MCP HW dump image into dump buffer */
3221 	if (dump && hw_dump_size_dwords) {
3222 		status = qed_nvram_read(p_hwfn,
3223 					p_ptt,
3224 					hw_dump_offset_bytes,
3225 					hw_dump_size_bytes, dump_buf + offset);
3226 		if (status != DBG_STATUS_OK) {
3227 			DP_NOTICE(p_hwfn,
3228 				  "Failed to read MCP HW Dump image from NVRAM\n");
3229 			return 0;
3230 		}
3231 	}
3232 	offset += hw_dump_size_dwords;
3233 
3234 	return offset;
3235 }
3236 
3237 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3238 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3239 				     struct qed_ptt *p_ptt,
3240 				     u32 *dump_buf, bool dump)
3241 {
3242 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3243 	u32 block_id, line_id, offset = 0, addr, len;
3244 
3245 	/* Don't dump static debug if a debug bus recording is in progress */
3246 	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3247 		return 0;
3248 
3249 	if (dump) {
3250 		/* Disable debug bus in all blocks */
3251 		qed_bus_disable_blocks(p_hwfn, p_ptt);
3252 
3253 		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3254 		qed_wr(p_hwfn,
3255 		       p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
3256 		qed_wr(p_hwfn,
3257 		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3258 		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3259 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3260 	}
3261 
3262 	/* Dump all static debug lines for each relevant block */
3263 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3264 		const struct dbg_block_chip *block_per_chip;
3265 		const struct dbg_block *block;
3266 		bool is_removed, has_dbg_bus;
3267 		u16 modes_buf_offset;
3268 		u32 block_dwords;
3269 
3270 		block_per_chip =
3271 		    qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
3272 		is_removed = GET_FIELD(block_per_chip->flags,
3273 				       DBG_BLOCK_CHIP_IS_REMOVED);
3274 		has_dbg_bus = GET_FIELD(block_per_chip->flags,
3275 					DBG_BLOCK_CHIP_HAS_DBG_BUS);
3276 
3277 		/* read+clear for NWS parity is not working, skip NWS block */
3278 		if (block_id == BLOCK_NWS)
3279 			continue;
3280 
3281 		if (!is_removed && has_dbg_bus &&
3282 		    GET_FIELD(block_per_chip->dbg_bus_mode.data,
3283 			      DBG_MODE_HDR_EVAL_MODE) > 0) {
3284 			modes_buf_offset =
3285 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
3286 				      DBG_MODE_HDR_MODES_BUF_OFFSET);
3287 			if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
3288 				has_dbg_bus = false;
3289 		}
3290 
3291 		if (is_removed || !has_dbg_bus)
3292 			continue;
3293 
3294 		block_dwords = NUM_DBG_LINES(block_per_chip) *
3295 			       STATIC_DEBUG_LINE_DWORDS;
3296 
3297 		/* Dump static section params */
3298 		block = get_dbg_block(p_hwfn, (enum block_id)block_id);
3299 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3300 					       dump_buf + offset,
3301 					       dump,
3302 					       block->name,
3303 					       0,
3304 					       block_dwords,
3305 					       32, false, "STATIC", 0);
3306 
3307 		if (!dump) {
3308 			offset += block_dwords;
3309 			continue;
3310 		}
3311 
3312 		/* If all lines are invalid - dump zeros */
3313 		if (dev_data->block_in_reset[block_id]) {
3314 			memset(dump_buf + offset, 0,
3315 			       DWORDS_TO_BYTES(block_dwords));
3316 			offset += block_dwords;
3317 			continue;
3318 		}
3319 
3320 		/* Enable block's client */
3321 		qed_bus_enable_clients(p_hwfn,
3322 				       p_ptt,
3323 				       BIT(block_per_chip->dbg_client_id));
3324 
3325 		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3326 		len = STATIC_DEBUG_LINE_DWORDS;
3327 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
3328 		     line_id++) {
3329 			/* Configure debug line ID */
3330 			qed_bus_config_dbg_line(p_hwfn,
3331 						p_ptt,
3332 						(enum block_id)block_id,
3333 						(u8)line_id, 0xf, 0, 0, 0);
3334 
3335 			/* Read debug line info */
3336 			offset += qed_grc_dump_addr_range(p_hwfn,
3337 							  p_ptt,
3338 							  dump_buf + offset,
3339 							  dump,
3340 							  addr,
3341 							  len,
3342 							  true, SPLIT_TYPE_NONE,
3343 							  0);
3344 		}
3345 
3346 		/* Disable block's client and debug output */
3347 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3348 		qed_bus_config_dbg_line(p_hwfn, p_ptt,
3349 					(enum block_id)block_id, 0, 0, 0, 0, 0);
3350 	}
3351 
3352 	if (dump) {
3353 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3354 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3355 	}
3356 
3357 	return offset;
3358 }
3359 
3360 /* Performs GRC Dump to the specified buffer.
3361  * Returns the dumped size in dwords.
3362  */
3363 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3364 				    struct qed_ptt *p_ptt,
3365 				    u32 *dump_buf,
3366 				    bool dump, u32 *num_dumped_dwords)
3367 {
3368 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3369 	u32 dwords_read, offset = 0;
3370 	bool parities_masked = false;
3371 	u8 i;
3372 
3373 	*num_dumped_dwords = 0;
3374 	dev_data->num_regs_read = 0;
3375 
3376 	/* Update reset state */
3377 	if (dump)
3378 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3379 
3380 	/* Dump global params */
3381 	offset += qed_dump_common_global_params(p_hwfn,
3382 						p_ptt,
3383 						dump_buf + offset, dump, 4);
3384 	offset += qed_dump_str_param(dump_buf + offset,
3385 				     dump, "dump-type", "grc-dump");
3386 	offset += qed_dump_num_param(dump_buf + offset,
3387 				     dump,
3388 				     "num-lcids",
3389 				     NUM_OF_LCIDS);
3390 	offset += qed_dump_num_param(dump_buf + offset,
3391 				     dump,
3392 				     "num-ltids",
3393 				     NUM_OF_LTIDS);
3394 	offset += qed_dump_num_param(dump_buf + offset,
3395 				     dump, "num-ports", dev_data->num_ports);
3396 
3397 	/* Dump reset registers (dumped before taking blocks out of reset ) */
3398 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3399 		offset += qed_grc_dump_reset_regs(p_hwfn,
3400 						  p_ptt,
3401 						  dump_buf + offset, dump);
3402 
3403 	/* Take all blocks out of reset (using reset registers) */
3404 	if (dump) {
3405 		qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
3406 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3407 	}
3408 
3409 	/* Disable all parities using MFW command */
3410 	if (dump &&
3411 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3412 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3413 		if (!parities_masked) {
3414 			DP_NOTICE(p_hwfn,
3415 				  "Failed to mask parities using MFW\n");
3416 			if (qed_grc_get_param
3417 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3418 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3419 		}
3420 	}
3421 
3422 	/* Dump modified registers (dumped before modifying them) */
3423 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3424 		offset += qed_grc_dump_modified_regs(p_hwfn,
3425 						     p_ptt,
3426 						     dump_buf + offset, dump);
3427 
3428 	/* Stall storms */
3429 	if (dump &&
3430 	    (qed_grc_is_included(p_hwfn,
3431 				 DBG_GRC_PARAM_DUMP_IOR) ||
3432 	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3433 		qed_grc_stall_storms(p_hwfn, p_ptt, true);
3434 
3435 	/* Dump all regs  */
3436 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3437 		bool block_enable[MAX_BLOCK_ID];
3438 
3439 		/* Dump all blocks except MCP */
3440 		for (i = 0; i < MAX_BLOCK_ID; i++)
3441 			block_enable[i] = true;
3442 		block_enable[BLOCK_MCP] = false;
3443 		offset += qed_grc_dump_registers(p_hwfn,
3444 						 p_ptt,
3445 						 dump_buf +
3446 						 offset,
3447 						 dump,
3448 						 block_enable, NULL);
3449 
3450 		/* Dump special registers */
3451 		offset += qed_grc_dump_special_regs(p_hwfn,
3452 						    p_ptt,
3453 						    dump_buf + offset, dump);
3454 	}
3455 
3456 	/* Dump memories */
3457 	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3458 
3459 	/* Dump MCP */
3460 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
3461 		offset += qed_grc_dump_mcp(p_hwfn,
3462 					   p_ptt, dump_buf + offset, dump);
3463 
3464 	/* Dump context */
3465 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
3466 		offset += qed_grc_dump_ctx(p_hwfn,
3467 					   p_ptt, dump_buf + offset, dump);
3468 
3469 	/* Dump RSS memories */
3470 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
3471 		offset += qed_grc_dump_rss(p_hwfn,
3472 					   p_ptt, dump_buf + offset, dump);
3473 
3474 	/* Dump Big RAM */
3475 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
3476 		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
3477 			offset += qed_grc_dump_big_ram(p_hwfn,
3478 						       p_ptt,
3479 						       dump_buf + offset,
3480 						       dump, i);
3481 
3482 	/* Dump VFC */
3483 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
3484 		dwords_read = qed_grc_dump_vfc(p_hwfn,
3485 					       p_ptt, dump_buf + offset, dump);
3486 		offset += dwords_read;
3487 		if (!dwords_read)
3488 			return DBG_STATUS_VFC_READ_ERROR;
3489 	}
3490 
3491 	/* Dump PHY tbus */
3492 	if (qed_grc_is_included(p_hwfn,
3493 				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
3494 	    CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
3495 		offset += qed_grc_dump_phy(p_hwfn,
3496 					   p_ptt, dump_buf + offset, dump);
3497 
3498 	/* Dump MCP HW Dump */
3499 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
3500 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
3501 		offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
3502 						   p_ptt,
3503 						   dump_buf + offset, dump);
3504 
3505 	/* Dump static debug data (only if not during debug bus recording) */
3506 	if (qed_grc_is_included(p_hwfn,
3507 				DBG_GRC_PARAM_DUMP_STATIC) &&
3508 	    (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
3509 		offset += qed_grc_dump_static_debug(p_hwfn,
3510 						    p_ptt,
3511 						    dump_buf + offset, dump);
3512 
3513 	/* Dump last section */
3514 	offset += qed_dump_last_section(dump_buf, offset, dump);
3515 
3516 	if (dump) {
3517 		/* Unstall storms */
3518 		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
3519 			qed_grc_stall_storms(p_hwfn, p_ptt, false);
3520 
3521 		/* Clear parity status */
3522 		qed_grc_clear_all_prty(p_hwfn, p_ptt);
3523 
3524 		/* Enable all parities using MFW command */
3525 		if (parities_masked)
3526 			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
3527 	}
3528 
3529 	*num_dumped_dwords = offset;
3530 
3531 	return DBG_STATUS_OK;
3532 }
3533 
3534 /* Writes the specified failing Idle Check rule to the specified buffer.
3535  * Returns the dumped size in dwords.
3536  */
3537 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3538 				     struct qed_ptt *p_ptt,
3539 				     u32 *
3540 				     dump_buf,
3541 				     bool dump,
3542 				     u16 rule_id,
3543 				     const struct dbg_idle_chk_rule *rule,
3544 				     u16 fail_entry_id, u32 *cond_reg_values)
3545 {
3546 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3547 	const struct dbg_idle_chk_cond_reg *cond_regs;
3548 	const struct dbg_idle_chk_info_reg *info_regs;
3549 	u32 i, next_reg_offset = 0, offset = 0;
3550 	struct dbg_idle_chk_result_hdr *hdr;
3551 	const union dbg_idle_chk_reg *regs;
3552 	u8 reg_id;
3553 
3554 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
3555 	regs = (const union dbg_idle_chk_reg *)
3556 		p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3557 		rule->reg_offset;
3558 	cond_regs = &regs[0].cond_reg;
3559 	info_regs = &regs[rule->num_cond_regs].info_reg;
3560 
3561 	/* Dump rule data */
3562 	if (dump) {
3563 		memset(hdr, 0, sizeof(*hdr));
3564 		hdr->rule_id = rule_id;
3565 		hdr->mem_entry_id = fail_entry_id;
3566 		hdr->severity = rule->severity;
3567 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
3568 	}
3569 
3570 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
3571 
3572 	/* Dump condition register values */
3573 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
3574 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
3575 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3576 
3577 		reg_hdr =
3578 		    (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
3579 
3580 		/* Write register header */
3581 		if (!dump) {
3582 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
3583 			    reg->entry_size;
3584 			continue;
3585 		}
3586 
3587 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3588 		memset(reg_hdr, 0, sizeof(*reg_hdr));
3589 		reg_hdr->start_entry = reg->start_entry;
3590 		reg_hdr->size = reg->entry_size;
3591 		SET_FIELD(reg_hdr->data,
3592 			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
3593 			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
3594 		SET_FIELD(reg_hdr->data,
3595 			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
3596 
3597 		/* Write register values */
3598 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
3599 			dump_buf[offset] = cond_reg_values[next_reg_offset];
3600 	}
3601 
3602 	/* Dump info register values */
3603 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
3604 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
3605 		u32 block_id;
3606 
3607 		/* Check if register's block is in reset */
3608 		if (!dump) {
3609 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
3610 			continue;
3611 		}
3612 
3613 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
3614 		if (block_id >= MAX_BLOCK_ID) {
3615 			DP_NOTICE(p_hwfn, "Invalid block_id\n");
3616 			return 0;
3617 		}
3618 
3619 		if (!dev_data->block_in_reset[block_id]) {
3620 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3621 			bool wide_bus, eval_mode, mode_match = true;
3622 			u16 modes_buf_offset;
3623 			u32 addr;
3624 
3625 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
3626 				  (dump_buf + offset);
3627 
3628 			/* Check mode */
3629 			eval_mode = GET_FIELD(reg->mode.data,
3630 					      DBG_MODE_HDR_EVAL_MODE) > 0;
3631 			if (eval_mode) {
3632 				modes_buf_offset =
3633 				    GET_FIELD(reg->mode.data,
3634 					      DBG_MODE_HDR_MODES_BUF_OFFSET);
3635 				mode_match =
3636 					qed_is_mode_match(p_hwfn,
3637 							  &modes_buf_offset);
3638 			}
3639 
3640 			if (!mode_match)
3641 				continue;
3642 
3643 			addr = GET_FIELD(reg->data,
3644 					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
3645 			wide_bus = GET_FIELD(reg->data,
3646 					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
3647 
3648 			/* Write register header */
3649 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3650 			hdr->num_dumped_info_regs++;
3651 			memset(reg_hdr, 0, sizeof(*reg_hdr));
3652 			reg_hdr->size = reg->size;
3653 			SET_FIELD(reg_hdr->data,
3654 				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3655 				  rule->num_cond_regs + reg_id);
3656 
3657 			/* Write register values */
3658 			offset += qed_grc_dump_addr_range(p_hwfn,
3659 							  p_ptt,
3660 							  dump_buf + offset,
3661 							  dump,
3662 							  addr,
3663 							  reg->size, wide_bus,
3664 							  SPLIT_TYPE_NONE, 0);
3665 		}
3666 	}
3667 
3668 	return offset;
3669 }
3670 
3671 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
3672 static u32
3673 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3674 			       u32 *dump_buf, bool dump,
3675 			       const struct dbg_idle_chk_rule *input_rules,
3676 			       u32 num_input_rules, u32 *num_failing_rules)
3677 {
3678 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3679 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3680 	u32 i, offset = 0;
3681 	u16 entry_id;
3682 	u8 reg_id;
3683 
3684 	*num_failing_rules = 0;
3685 
3686 	for (i = 0; i < num_input_rules; i++) {
3687 		const struct dbg_idle_chk_cond_reg *cond_regs;
3688 		const struct dbg_idle_chk_rule *rule;
3689 		const union dbg_idle_chk_reg *regs;
3690 		u16 num_reg_entries = 1;
3691 		bool check_rule = true;
3692 		const u32 *imm_values;
3693 
3694 		rule = &input_rules[i];
3695 		regs = (const union dbg_idle_chk_reg *)
3696 			p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3697 			rule->reg_offset;
3698 		cond_regs = &regs[0].cond_reg;
3699 		imm_values =
3700 		    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
3701 		    rule->imm_offset;
3702 
3703 		/* Check if all condition register blocks are out of reset, and
3704 		 * find maximal number of entries (all condition registers that
3705 		 * are memories must have the same size, which is > 1).
3706 		 */
3707 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
3708 		     reg_id++) {
3709 			u32 block_id =
3710 				GET_FIELD(cond_regs[reg_id].data,
3711 					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
3712 
3713 			if (block_id >= MAX_BLOCK_ID) {
3714 				DP_NOTICE(p_hwfn, "Invalid block_id\n");
3715 				return 0;
3716 			}
3717 
3718 			check_rule = !dev_data->block_in_reset[block_id];
3719 			if (cond_regs[reg_id].num_entries > num_reg_entries)
3720 				num_reg_entries = cond_regs[reg_id].num_entries;
3721 		}
3722 
3723 		if (!check_rule && dump)
3724 			continue;
3725 
3726 		if (!dump) {
3727 			u32 entry_dump_size =
3728 				qed_idle_chk_dump_failure(p_hwfn,
3729 							  p_ptt,
3730 							  dump_buf + offset,
3731 							  false,
3732 							  rule->rule_id,
3733 							  rule,
3734 							  0,
3735 							  NULL);
3736 
3737 			offset += num_reg_entries * entry_dump_size;
3738 			(*num_failing_rules) += num_reg_entries;
3739 			continue;
3740 		}
3741 
3742 		/* Go over all register entries (number of entries is the same
3743 		 * for all condition registers).
3744 		 */
3745 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3746 			u32 next_reg_offset = 0;
3747 
3748 			/* Read current entry of all condition registers */
3749 			for (reg_id = 0; reg_id < rule->num_cond_regs;
3750 			     reg_id++) {
3751 				const struct dbg_idle_chk_cond_reg *reg =
3752 					&cond_regs[reg_id];
3753 				u32 padded_entry_size, addr;
3754 				bool wide_bus;
3755 
3756 				/* Find GRC address (if it's a memory, the
3757 				 * address of the specific entry is calculated).
3758 				 */
3759 				addr = GET_FIELD(reg->data,
3760 						 DBG_IDLE_CHK_COND_REG_ADDRESS);
3761 				wide_bus =
3762 				    GET_FIELD(reg->data,
3763 					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
3764 				if (reg->num_entries > 1 ||
3765 				    reg->start_entry > 0) {
3766 					padded_entry_size =
3767 					   reg->entry_size > 1 ?
3768 					   roundup_pow_of_two(reg->entry_size) :
3769 					   1;
3770 					addr += (reg->start_entry + entry_id) *
3771 						padded_entry_size;
3772 				}
3773 
3774 				/* Read registers */
3775 				if (next_reg_offset + reg->entry_size >=
3776 				    IDLE_CHK_MAX_ENTRIES_SIZE) {
3777 					DP_NOTICE(p_hwfn,
3778 						  "idle check registers entry is too large\n");
3779 					return 0;
3780 				}
3781 
3782 				next_reg_offset +=
3783 				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
3784 							    cond_reg_values +
3785 							    next_reg_offset,
3786 							    dump, addr,
3787 							    reg->entry_size,
3788 							    wide_bus,
3789 							    SPLIT_TYPE_NONE, 0);
3790 			}
3791 
3792 			/* Call rule condition function.
3793 			 * If returns true, it's a failure.
3794 			 */
3795 			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
3796 							imm_values)) {
3797 				offset += qed_idle_chk_dump_failure(p_hwfn,
3798 							p_ptt,
3799 							dump_buf + offset,
3800 							dump,
3801 							rule->rule_id,
3802 							rule,
3803 							entry_id,
3804 							cond_reg_values);
3805 				(*num_failing_rules)++;
3806 			}
3807 		}
3808 	}
3809 
3810 	return offset;
3811 }
3812 
3813 /* Performs Idle Check Dump to the specified buffer.
3814  * Returns the dumped size in dwords.
3815  */
3816 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
3817 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3818 {
3819 	struct virt_mem_desc *dbg_buf =
3820 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
3821 	u32 num_failing_rules_offset, offset = 0,
3822 	    input_offset = 0, num_failing_rules = 0;
3823 
3824 	/* Dump global params  - 1 must match below amount of params */
3825 	offset += qed_dump_common_global_params(p_hwfn,
3826 						p_ptt,
3827 						dump_buf + offset, dump, 1);
3828 	offset += qed_dump_str_param(dump_buf + offset,
3829 				     dump, "dump-type", "idle-chk");
3830 
3831 	/* Dump idle check section header with a single parameter */
3832 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
3833 	num_failing_rules_offset = offset;
3834 	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
3835 
3836 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
3837 		const struct dbg_idle_chk_cond_hdr *cond_hdr =
3838 		    (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr +
3839 		    input_offset++;
3840 		bool eval_mode, mode_match = true;
3841 		u32 curr_failing_rules;
3842 		u16 modes_buf_offset;
3843 
3844 		/* Check mode */
3845 		eval_mode = GET_FIELD(cond_hdr->mode.data,
3846 				      DBG_MODE_HDR_EVAL_MODE) > 0;
3847 		if (eval_mode) {
3848 			modes_buf_offset =
3849 				GET_FIELD(cond_hdr->mode.data,
3850 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
3851 			mode_match = qed_is_mode_match(p_hwfn,
3852 						       &modes_buf_offset);
3853 		}
3854 
3855 		if (mode_match) {
3856 			const struct dbg_idle_chk_rule *rule =
3857 			    (const struct dbg_idle_chk_rule *)((u32 *)
3858 							       dbg_buf->ptr
3859 							       + input_offset);
3860 			u32 num_input_rules =
3861 				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS;
3862 			offset +=
3863 			    qed_idle_chk_dump_rule_entries(p_hwfn,
3864 							   p_ptt,
3865 							   dump_buf +
3866 							   offset,
3867 							   dump,
3868 							   rule,
3869 							   num_input_rules,
3870 							   &curr_failing_rules);
3871 			num_failing_rules += curr_failing_rules;
3872 		}
3873 
3874 		input_offset += cond_hdr->data_size;
3875 	}
3876 
3877 	/* Overwrite num_rules parameter */
3878 	if (dump)
3879 		qed_dump_num_param(dump_buf + num_failing_rules_offset,
3880 				   dump, "num_rules", num_failing_rules);
3881 
3882 	/* Dump last section */
3883 	offset += qed_dump_last_section(dump_buf, offset, dump);
3884 
3885 	return offset;
3886 }
3887 
3888 /* Finds the meta data image in NVRAM */
3889 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
3890 					    struct qed_ptt *p_ptt,
3891 					    u32 image_type,
3892 					    u32 *nvram_offset_bytes,
3893 					    u32 *nvram_size_bytes)
3894 {
3895 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
3896 	struct mcp_file_att file_att;
3897 	int nvm_result;
3898 
3899 	/* Call NVRAM get file command */
3900 	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
3901 					p_ptt,
3902 					DRV_MSG_CODE_NVM_GET_FILE_ATT,
3903 					image_type,
3904 					&ret_mcp_resp,
3905 					&ret_mcp_param,
3906 					&ret_txn_size, (u32 *)&file_att);
3907 
3908 	/* Check response */
3909 	if (nvm_result ||
3910 	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3911 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
3912 
3913 	/* Update return values */
3914 	*nvram_offset_bytes = file_att.nvm_start_addr;
3915 	*nvram_size_bytes = file_att.len;
3916 
3917 	DP_VERBOSE(p_hwfn,
3918 		   QED_MSG_DEBUG,
3919 		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
3920 		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
3921 
3922 	/* Check alignment */
3923 	if (*nvram_size_bytes & 0x3)
3924 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
3925 
3926 	return DBG_STATUS_OK;
3927 }
3928 
3929 /* Reads data from NVRAM */
3930 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
3931 				      struct qed_ptt *p_ptt,
3932 				      u32 nvram_offset_bytes,
3933 				      u32 nvram_size_bytes, u32 *ret_buf)
3934 {
3935 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
3936 	s32 bytes_left = nvram_size_bytes;
3937 	u32 read_offset = 0, param = 0;
3938 
3939 	DP_VERBOSE(p_hwfn,
3940 		   QED_MSG_DEBUG,
3941 		   "nvram_read: reading image of size %d bytes from NVRAM\n",
3942 		   nvram_size_bytes);
3943 
3944 	do {
3945 		bytes_to_copy =
3946 		    (bytes_left >
3947 		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
3948 
3949 		/* Call NVRAM read command */
3950 		SET_MFW_FIELD(param,
3951 			      DRV_MB_PARAM_NVM_OFFSET,
3952 			      nvram_offset_bytes + read_offset);
3953 		SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
3954 		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3955 				       DRV_MSG_CODE_NVM_READ_NVRAM, param,
3956 				       &ret_mcp_resp,
3957 				       &ret_mcp_param, &ret_read_size,
3958 				       (u32 *)((u8 *)ret_buf + read_offset)))
3959 			return DBG_STATUS_NVRAM_READ_FAILED;
3960 
3961 		/* Check response */
3962 		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3963 			return DBG_STATUS_NVRAM_READ_FAILED;
3964 
3965 		/* Update read offset */
3966 		read_offset += ret_read_size;
3967 		bytes_left -= ret_read_size;
3968 	} while (bytes_left > 0);
3969 
3970 	return DBG_STATUS_OK;
3971 }
3972 
3973 /* Get info on the MCP Trace data in the scratchpad:
3974  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
3975  * - trace_data_size (OUT): trace data size in bytes (without the header)
3976  */
3977 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
3978 						   struct qed_ptt *p_ptt,
3979 						   u32 *trace_data_grc_addr,
3980 						   u32 *trace_data_size)
3981 {
3982 	u32 spad_trace_offsize, signature;
3983 
3984 	/* Read trace section offsize structure from MCP scratchpad */
3985 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
3986 
3987 	/* Extract trace section address from offsize (in scratchpad) */
3988 	*trace_data_grc_addr =
3989 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
3990 
3991 	/* Read signature from MCP trace section */
3992 	signature = qed_rd(p_hwfn, p_ptt,
3993 			   *trace_data_grc_addr +
3994 			   offsetof(struct mcp_trace, signature));
3995 
3996 	if (signature != MFW_TRACE_SIGNATURE)
3997 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
3998 
3999 	/* Read trace size from MCP trace section */
4000 	*trace_data_size = qed_rd(p_hwfn,
4001 				  p_ptt,
4002 				  *trace_data_grc_addr +
4003 				  offsetof(struct mcp_trace, size));
4004 
4005 	return DBG_STATUS_OK;
4006 }
4007 
4008 /* Reads MCP trace meta data image from NVRAM
4009  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4010  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4011  *			      loaded from file).
4012  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4013  */
4014 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4015 						   struct qed_ptt *p_ptt,
4016 						   u32 trace_data_size_bytes,
4017 						   u32 *running_bundle_id,
4018 						   u32 *trace_meta_offset,
4019 						   u32 *trace_meta_size)
4020 {
4021 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4022 
4023 	/* Read MCP trace section offsize structure from MCP scratchpad */
4024 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4025 
4026 	/* Find running bundle ID */
4027 	running_mfw_addr =
4028 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4029 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4030 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4031 	if (*running_bundle_id > 1)
4032 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4033 
4034 	/* Find image in NVRAM */
4035 	nvram_image_type =
4036 	    (*running_bundle_id ==
4037 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4038 	return qed_find_nvram_image(p_hwfn,
4039 				    p_ptt,
4040 				    nvram_image_type,
4041 				    trace_meta_offset, trace_meta_size);
4042 }
4043 
4044 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4045 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4046 					       struct qed_ptt *p_ptt,
4047 					       u32 nvram_offset_in_bytes,
4048 					       u32 size_in_bytes, u32 *buf)
4049 {
4050 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4051 	enum dbg_status status;
4052 	u32 signature;
4053 
4054 	/* Read meta data from NVRAM */
4055 	status = qed_nvram_read(p_hwfn,
4056 				p_ptt,
4057 				nvram_offset_in_bytes, size_in_bytes, buf);
4058 	if (status != DBG_STATUS_OK)
4059 		return status;
4060 
4061 	/* Extract and check first signature */
4062 	signature = qed_read_unaligned_dword(byte_buf);
4063 	byte_buf += sizeof(signature);
4064 	if (signature != NVM_MAGIC_VALUE)
4065 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4066 
4067 	/* Extract number of modules */
4068 	modules_num = *(byte_buf++);
4069 
4070 	/* Skip all modules */
4071 	for (i = 0; i < modules_num; i++) {
4072 		module_len = *(byte_buf++);
4073 		byte_buf += module_len;
4074 	}
4075 
4076 	/* Extract and check second signature */
4077 	signature = qed_read_unaligned_dword(byte_buf);
4078 	byte_buf += sizeof(signature);
4079 	if (signature != NVM_MAGIC_VALUE)
4080 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4081 
4082 	return DBG_STATUS_OK;
4083 }
4084 
4085 /* Dump MCP Trace */
4086 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4087 					  struct qed_ptt *p_ptt,
4088 					  u32 *dump_buf,
4089 					  bool dump, u32 *num_dumped_dwords)
4090 {
4091 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4092 	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4093 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4094 	enum dbg_status status;
4095 	int halted = 0;
4096 	bool use_mfw;
4097 
4098 	*num_dumped_dwords = 0;
4099 
4100 	use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4101 
4102 	/* Get trace data info */
4103 	status = qed_mcp_trace_get_data_info(p_hwfn,
4104 					     p_ptt,
4105 					     &trace_data_grc_addr,
4106 					     &trace_data_size_bytes);
4107 	if (status != DBG_STATUS_OK)
4108 		return status;
4109 
4110 	/* Dump global params */
4111 	offset += qed_dump_common_global_params(p_hwfn,
4112 						p_ptt,
4113 						dump_buf + offset, dump, 1);
4114 	offset += qed_dump_str_param(dump_buf + offset,
4115 				     dump, "dump-type", "mcp-trace");
4116 
4117 	/* Halt MCP while reading from scratchpad so the read data will be
4118 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4119 	 * risk that it may be corrupt.
4120 	 */
4121 	if (dump && use_mfw) {
4122 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4123 		if (!halted)
4124 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4125 	}
4126 
4127 	/* Find trace data size */
4128 	trace_data_size_dwords =
4129 	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4130 			 BYTES_IN_DWORD);
4131 
4132 	/* Dump trace data section header and param */
4133 	offset += qed_dump_section_hdr(dump_buf + offset,
4134 				       dump, "mcp_trace_data", 1);
4135 	offset += qed_dump_num_param(dump_buf + offset,
4136 				     dump, "size", trace_data_size_dwords);
4137 
4138 	/* Read trace data from scratchpad into dump buffer */
4139 	offset += qed_grc_dump_addr_range(p_hwfn,
4140 					  p_ptt,
4141 					  dump_buf + offset,
4142 					  dump,
4143 					  BYTES_TO_DWORDS(trace_data_grc_addr),
4144 					  trace_data_size_dwords, false,
4145 					  SPLIT_TYPE_NONE, 0);
4146 
4147 	/* Resume MCP (only if halt succeeded) */
4148 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4149 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4150 
4151 	/* Dump trace meta section header */
4152 	offset += qed_dump_section_hdr(dump_buf + offset,
4153 				       dump, "mcp_trace_meta", 1);
4154 
4155 	/* If MCP Trace meta size parameter was set, use it.
4156 	 * Otherwise, read trace meta.
4157 	 * trace_meta_size_bytes is dword-aligned.
4158 	 */
4159 	trace_meta_size_bytes =
4160 		qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4161 	if ((!trace_meta_size_bytes || dump) && use_mfw)
4162 		status = qed_mcp_trace_get_meta_info(p_hwfn,
4163 						     p_ptt,
4164 						     trace_data_size_bytes,
4165 						     &running_bundle_id,
4166 						     &trace_meta_offset_bytes,
4167 						     &trace_meta_size_bytes);
4168 	if (status == DBG_STATUS_OK)
4169 		trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4170 
4171 	/* Dump trace meta size param */
4172 	offset += qed_dump_num_param(dump_buf + offset,
4173 				     dump, "size", trace_meta_size_dwords);
4174 
4175 	/* Read trace meta image into dump buffer */
4176 	if (dump && trace_meta_size_dwords)
4177 		status = qed_mcp_trace_read_meta(p_hwfn,
4178 						 p_ptt,
4179 						 trace_meta_offset_bytes,
4180 						 trace_meta_size_bytes,
4181 						 dump_buf + offset);
4182 	if (status == DBG_STATUS_OK)
4183 		offset += trace_meta_size_dwords;
4184 
4185 	/* Dump last section */
4186 	offset += qed_dump_last_section(dump_buf, offset, dump);
4187 
4188 	*num_dumped_dwords = offset;
4189 
4190 	/* If no mcp access, indicate that the dump doesn't contain the meta
4191 	 * data from NVRAM.
4192 	 */
4193 	return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4194 }
4195 
4196 /* Dump GRC FIFO */
4197 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4198 					 struct qed_ptt *p_ptt,
4199 					 u32 *dump_buf,
4200 					 bool dump, u32 *num_dumped_dwords)
4201 {
4202 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4203 	bool fifo_has_data;
4204 
4205 	*num_dumped_dwords = 0;
4206 
4207 	/* Dump global params */
4208 	offset += qed_dump_common_global_params(p_hwfn,
4209 						p_ptt,
4210 						dump_buf + offset, dump, 1);
4211 	offset += qed_dump_str_param(dump_buf + offset,
4212 				     dump, "dump-type", "reg-fifo");
4213 
4214 	/* Dump fifo data section header and param. The size param is 0 for
4215 	 * now, and is overwritten after reading the FIFO.
4216 	 */
4217 	offset += qed_dump_section_hdr(dump_buf + offset,
4218 				       dump, "reg_fifo_data", 1);
4219 	size_param_offset = offset;
4220 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4221 
4222 	if (!dump) {
4223 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4224 		 * test how much data is available, except for reading it.
4225 		 */
4226 		offset += REG_FIFO_DEPTH_DWORDS;
4227 		goto out;
4228 	}
4229 
4230 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4231 			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4232 
4233 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4234 	 * and must be accessed atomically. Test for dwords_read not passing
4235 	 * buffer size since more entries could be added to the buffer as we are
4236 	 * emptying it.
4237 	 */
4238 	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4239 	len = REG_FIFO_ELEMENT_DWORDS;
4240 	for (dwords_read = 0;
4241 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4242 	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4243 		offset += qed_grc_dump_addr_range(p_hwfn,
4244 						  p_ptt,
4245 						  dump_buf + offset,
4246 						  true,
4247 						  addr,
4248 						  len,
4249 						  true, SPLIT_TYPE_NONE,
4250 						  0);
4251 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4252 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4253 	}
4254 
4255 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4256 			   dwords_read);
4257 out:
4258 	/* Dump last section */
4259 	offset += qed_dump_last_section(dump_buf, offset, dump);
4260 
4261 	*num_dumped_dwords = offset;
4262 
4263 	return DBG_STATUS_OK;
4264 }
4265 
4266 /* Dump IGU FIFO */
4267 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4268 					 struct qed_ptt *p_ptt,
4269 					 u32 *dump_buf,
4270 					 bool dump, u32 *num_dumped_dwords)
4271 {
4272 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4273 	bool fifo_has_data;
4274 
4275 	*num_dumped_dwords = 0;
4276 
4277 	/* Dump global params */
4278 	offset += qed_dump_common_global_params(p_hwfn,
4279 						p_ptt,
4280 						dump_buf + offset, dump, 1);
4281 	offset += qed_dump_str_param(dump_buf + offset,
4282 				     dump, "dump-type", "igu-fifo");
4283 
4284 	/* Dump fifo data section header and param. The size param is 0 for
4285 	 * now, and is overwritten after reading the FIFO.
4286 	 */
4287 	offset += qed_dump_section_hdr(dump_buf + offset,
4288 				       dump, "igu_fifo_data", 1);
4289 	size_param_offset = offset;
4290 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4291 
4292 	if (!dump) {
4293 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4294 		 * test how much data is available, except for reading it.
4295 		 */
4296 		offset += IGU_FIFO_DEPTH_DWORDS;
4297 		goto out;
4298 	}
4299 
4300 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4301 			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4302 
4303 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4304 	 * and must be accessed atomically. Test for dwords_read not passing
4305 	 * buffer size since more entries could be added to the buffer as we are
4306 	 * emptying it.
4307 	 */
4308 	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4309 	len = IGU_FIFO_ELEMENT_DWORDS;
4310 	for (dwords_read = 0;
4311 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4312 	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4313 		offset += qed_grc_dump_addr_range(p_hwfn,
4314 						  p_ptt,
4315 						  dump_buf + offset,
4316 						  true,
4317 						  addr,
4318 						  len,
4319 						  true, SPLIT_TYPE_NONE,
4320 						  0);
4321 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4322 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4323 	}
4324 
4325 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4326 			   dwords_read);
4327 out:
4328 	/* Dump last section */
4329 	offset += qed_dump_last_section(dump_buf, offset, dump);
4330 
4331 	*num_dumped_dwords = offset;
4332 
4333 	return DBG_STATUS_OK;
4334 }
4335 
4336 /* Protection Override dump */
4337 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4338 						    struct qed_ptt *p_ptt,
4339 						    u32 *dump_buf,
4340 						    bool dump,
4341 						    u32 *num_dumped_dwords)
4342 {
4343 	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4344 
4345 	*num_dumped_dwords = 0;
4346 
4347 	/* Dump global params */
4348 	offset += qed_dump_common_global_params(p_hwfn,
4349 						p_ptt,
4350 						dump_buf + offset, dump, 1);
4351 	offset += qed_dump_str_param(dump_buf + offset,
4352 				     dump, "dump-type", "protection-override");
4353 
4354 	/* Dump data section header and param. The size param is 0 for now,
4355 	 * and is overwritten after reading the data.
4356 	 */
4357 	offset += qed_dump_section_hdr(dump_buf + offset,
4358 				       dump, "protection_override_data", 1);
4359 	size_param_offset = offset;
4360 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4361 
4362 	if (!dump) {
4363 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4364 		goto out;
4365 	}
4366 
4367 	/* Add override window info to buffer */
4368 	override_window_dwords =
4369 		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4370 		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4371 	if (override_window_dwords) {
4372 		addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4373 		offset += qed_grc_dump_addr_range(p_hwfn,
4374 						  p_ptt,
4375 						  dump_buf + offset,
4376 						  true,
4377 						  addr,
4378 						  override_window_dwords,
4379 						  true, SPLIT_TYPE_NONE, 0);
4380 		qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4381 				   override_window_dwords);
4382 	}
4383 out:
4384 	/* Dump last section */
4385 	offset += qed_dump_last_section(dump_buf, offset, dump);
4386 
4387 	*num_dumped_dwords = offset;
4388 
4389 	return DBG_STATUS_OK;
4390 }
4391 
4392 /* Performs FW Asserts Dump to the specified buffer.
4393  * Returns the dumped size in dwords.
4394  */
4395 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4396 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4397 {
4398 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4399 	struct fw_asserts_ram_section *asserts;
4400 	char storm_letter_str[2] = "?";
4401 	struct fw_info fw_info;
4402 	u32 offset = 0;
4403 	u8 storm_id;
4404 
4405 	/* Dump global params */
4406 	offset += qed_dump_common_global_params(p_hwfn,
4407 						p_ptt,
4408 						dump_buf + offset, dump, 1);
4409 	offset += qed_dump_str_param(dump_buf + offset,
4410 				     dump, "dump-type", "fw-asserts");
4411 
4412 	/* Find Storm dump size */
4413 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4414 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4415 		struct storm_defs *storm = &s_storm_defs[storm_id];
4416 		u32 last_list_idx, addr;
4417 
4418 		if (dev_data->block_in_reset[storm->sem_block_id])
4419 			continue;
4420 
4421 		/* Read FW info for the current Storm */
4422 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4423 
4424 		asserts = &fw_info.fw_asserts_section;
4425 
4426 		/* Dump FW Asserts section header and params */
4427 		storm_letter_str[0] = storm->letter;
4428 		offset += qed_dump_section_hdr(dump_buf + offset,
4429 					       dump, "fw_asserts", 2);
4430 		offset += qed_dump_str_param(dump_buf + offset,
4431 					     dump, "storm", storm_letter_str);
4432 		offset += qed_dump_num_param(dump_buf + offset,
4433 					     dump,
4434 					     "size",
4435 					     asserts->list_element_dword_size);
4436 
4437 		/* Read and dump FW Asserts data */
4438 		if (!dump) {
4439 			offset += asserts->list_element_dword_size;
4440 			continue;
4441 		}
4442 
4443 		fw_asserts_section_addr = storm->sem_fast_mem_addr +
4444 			SEM_FAST_REG_INT_RAM +
4445 			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4446 		next_list_idx_addr = fw_asserts_section_addr +
4447 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4448 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4449 		last_list_idx = (next_list_idx > 0 ?
4450 				 next_list_idx :
4451 				 asserts->list_num_elements) - 1;
4452 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4453 		       asserts->list_dword_offset +
4454 		       last_list_idx * asserts->list_element_dword_size;
4455 		offset +=
4456 		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4457 					    dump_buf + offset,
4458 					    dump, addr,
4459 					    asserts->list_element_dword_size,
4460 						  false, SPLIT_TYPE_NONE, 0);
4461 	}
4462 
4463 	/* Dump last section */
4464 	offset += qed_dump_last_section(dump_buf, offset, dump);
4465 
4466 	return offset;
4467 }
4468 
4469 /* Dumps the specified ILT pages to the specified buffer.
4470  * Returns the dumped size in dwords.
4471  */
4472 static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
4473 				    bool dump,
4474 				    u32 start_page_id,
4475 				    u32 num_pages,
4476 				    struct phys_mem_desc *ilt_pages,
4477 				    bool dump_page_ids)
4478 {
4479 	u32 page_id, end_page_id, offset = 0;
4480 
4481 	if (num_pages == 0)
4482 		return offset;
4483 
4484 	end_page_id = start_page_id + num_pages - 1;
4485 
4486 	for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
4487 		struct phys_mem_desc *mem_desc = &ilt_pages[page_id];
4488 
4489 		/**
4490 		 *
4491 		 * if (page_id >= ->p_cxt_mngr->ilt_shadow_size)
4492 		 *     break;
4493 		 */
4494 
4495 		if (!ilt_pages[page_id].virt_addr)
4496 			continue;
4497 
4498 		if (dump_page_ids) {
4499 			/* Copy page ID to dump buffer */
4500 			if (dump)
4501 				*(dump_buf + offset) = page_id;
4502 			offset++;
4503 		} else {
4504 			/* Copy page memory to dump buffer */
4505 			if (dump)
4506 				memcpy(dump_buf + offset,
4507 				       mem_desc->virt_addr, mem_desc->size);
4508 			offset += BYTES_TO_DWORDS(mem_desc->size);
4509 		}
4510 	}
4511 
4512 	return offset;
4513 }
4514 
4515 /* Dumps a section containing the dumped ILT pages.
4516  * Returns the dumped size in dwords.
4517  */
4518 static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
4519 				      u32 *dump_buf,
4520 				      bool dump,
4521 				      u32 valid_conn_pf_pages,
4522 				      u32 valid_conn_vf_pages,
4523 				      struct phys_mem_desc *ilt_pages,
4524 				      bool dump_page_ids)
4525 {
4526 	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4527 	u32 pf_start_line, start_page_id, offset = 0;
4528 	u32 cdut_pf_init_pages, cdut_vf_init_pages;
4529 	u32 cdut_pf_work_pages, cdut_vf_work_pages;
4530 	u32 base_data_offset, size_param_offset;
4531 	u32 cdut_pf_pages, cdut_vf_pages;
4532 	const char *section_name;
4533 	u8 i;
4534 
4535 	section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
4536 	cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
4537 	cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
4538 	cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn);
4539 	cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn);
4540 	cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
4541 	cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
4542 	pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
4543 
4544 	offset +=
4545 	    qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1);
4546 
4547 	/* Dump size parameter (0 for now, overwritten with real size later) */
4548 	size_param_offset = offset;
4549 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4550 	base_data_offset = offset;
4551 
4552 	/* CDUC pages are ordered as follows:
4553 	 * - PF pages - valid section (included in PF connection type mapping)
4554 	 * - PF pages - invalid section (not dumped)
4555 	 * - For each VF in the PF:
4556 	 *   - VF pages - valid section (included in VF connection type mapping)
4557 	 *   - VF pages - invalid section (not dumped)
4558 	 */
4559 	if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
4560 		/* Dump connection PF pages */
4561 		start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
4562 		offset += qed_ilt_dump_pages_range(dump_buf + offset,
4563 						   dump,
4564 						   start_page_id,
4565 						   valid_conn_pf_pages,
4566 						   ilt_pages, dump_page_ids);
4567 
4568 		/* Dump connection VF pages */
4569 		start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
4570 		for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4571 		     i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
4572 			offset += qed_ilt_dump_pages_range(dump_buf + offset,
4573 							   dump,
4574 							   start_page_id,
4575 							   valid_conn_vf_pages,
4576 							   ilt_pages,
4577 							   dump_page_ids);
4578 	}
4579 
4580 	/* CDUT pages are ordered as follows:
4581 	 * - PF init pages (not dumped)
4582 	 * - PF work pages
4583 	 * - For each VF in the PF:
4584 	 *   - VF init pages (not dumped)
4585 	 *   - VF work pages
4586 	 */
4587 	if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
4588 		/* Dump task PF pages */
4589 		start_page_id = clients[ILT_CLI_CDUT].first.val +
4590 		    cdut_pf_init_pages - pf_start_line;
4591 		offset += qed_ilt_dump_pages_range(dump_buf + offset,
4592 						   dump,
4593 						   start_page_id,
4594 						   cdut_pf_work_pages,
4595 						   ilt_pages, dump_page_ids);
4596 
4597 		/* Dump task VF pages */
4598 		start_page_id = clients[ILT_CLI_CDUT].first.val +
4599 		    cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
4600 		for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4601 		     i++, start_page_id += cdut_vf_pages)
4602 			offset += qed_ilt_dump_pages_range(dump_buf + offset,
4603 							   dump,
4604 							   start_page_id,
4605 							   cdut_vf_work_pages,
4606 							   ilt_pages,
4607 							   dump_page_ids);
4608 	}
4609 
4610 	/* Overwrite size param */
4611 	if (dump)
4612 		qed_dump_num_param(dump_buf + size_param_offset,
4613 				   dump, "size", offset - base_data_offset);
4614 
4615 	return offset;
4616 }
4617 
4618 /* Performs ILT Dump to the specified buffer.
4619  * Returns the dumped size in dwords.
4620  */
4621 static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
4622 			struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4623 {
4624 	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4625 	u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0;
4626 	u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages;
4627 	u32 num_cids_per_page, conn_ctx_size;
4628 	u32 cduc_page_size, cdut_page_size;
4629 	struct phys_mem_desc *ilt_pages;
4630 	u8 conn_type;
4631 
4632 	cduc_page_size = 1 <<
4633 	    (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
4634 	cdut_page_size = 1 <<
4635 	    (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
4636 	conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
4637 	num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
4638 	ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
4639 
4640 	/* Dump global params - 22 must match number of params below */
4641 	offset += qed_dump_common_global_params(p_hwfn, p_ptt,
4642 						dump_buf + offset, dump, 22);
4643 	offset += qed_dump_str_param(dump_buf + offset,
4644 				     dump, "dump-type", "ilt-dump");
4645 	offset += qed_dump_num_param(dump_buf + offset,
4646 				     dump,
4647 				     "cduc-page-size", cduc_page_size);
4648 	offset += qed_dump_num_param(dump_buf + offset,
4649 				     dump,
4650 				     "cduc-first-page-id",
4651 				     clients[ILT_CLI_CDUC].first.val);
4652 	offset += qed_dump_num_param(dump_buf + offset,
4653 				     dump,
4654 				     "cduc-last-page-id",
4655 				     clients[ILT_CLI_CDUC].last.val);
4656 	offset += qed_dump_num_param(dump_buf + offset,
4657 				     dump,
4658 				     "cduc-num-pf-pages",
4659 				     clients
4660 				     [ILT_CLI_CDUC].pf_total_lines);
4661 	offset += qed_dump_num_param(dump_buf + offset,
4662 				     dump,
4663 				     "cduc-num-vf-pages",
4664 				     clients
4665 				     [ILT_CLI_CDUC].vf_total_lines);
4666 	offset += qed_dump_num_param(dump_buf + offset,
4667 				     dump,
4668 				     "max-conn-ctx-size",
4669 				     conn_ctx_size);
4670 	offset += qed_dump_num_param(dump_buf + offset,
4671 				     dump,
4672 				     "cdut-page-size", cdut_page_size);
4673 	offset += qed_dump_num_param(dump_buf + offset,
4674 				     dump,
4675 				     "cdut-first-page-id",
4676 				     clients[ILT_CLI_CDUT].first.val);
4677 	offset += qed_dump_num_param(dump_buf + offset,
4678 				     dump,
4679 				     "cdut-last-page-id",
4680 				     clients[ILT_CLI_CDUT].last.val);
4681 	offset += qed_dump_num_param(dump_buf + offset,
4682 				     dump,
4683 				     "cdut-num-pf-init-pages",
4684 				     qed_get_cdut_num_pf_init_pages(p_hwfn));
4685 	offset += qed_dump_num_param(dump_buf + offset,
4686 				     dump,
4687 				     "cdut-num-vf-init-pages",
4688 				     qed_get_cdut_num_vf_init_pages(p_hwfn));
4689 	offset += qed_dump_num_param(dump_buf + offset,
4690 				     dump,
4691 				     "cdut-num-pf-work-pages",
4692 				     qed_get_cdut_num_pf_work_pages(p_hwfn));
4693 	offset += qed_dump_num_param(dump_buf + offset,
4694 				     dump,
4695 				     "cdut-num-vf-work-pages",
4696 				     qed_get_cdut_num_vf_work_pages(p_hwfn));
4697 	offset += qed_dump_num_param(dump_buf + offset,
4698 				     dump,
4699 				     "max-task-ctx-size",
4700 				     p_hwfn->p_cxt_mngr->task_ctx_size);
4701 	offset += qed_dump_num_param(dump_buf + offset,
4702 				     dump,
4703 				     "task-type-id",
4704 				     p_hwfn->p_cxt_mngr->task_type_id);
4705 	offset += qed_dump_num_param(dump_buf + offset,
4706 				     dump,
4707 				     "first-vf-id-in-pf",
4708 				     p_hwfn->p_cxt_mngr->first_vf_in_pf);
4709 	offset += /* 18 */ qed_dump_num_param(dump_buf + offset,
4710 					      dump,
4711 					      "num-vfs-in-pf",
4712 					      p_hwfn->p_cxt_mngr->vf_count);
4713 	offset += qed_dump_num_param(dump_buf + offset,
4714 				     dump,
4715 				     "ptr-size-bytes", sizeof(void *));
4716 	offset += qed_dump_num_param(dump_buf + offset,
4717 				     dump,
4718 				     "pf-start-line",
4719 				     p_hwfn->p_cxt_mngr->pf_start_line);
4720 	offset += qed_dump_num_param(dump_buf + offset,
4721 				     dump,
4722 				     "page-mem-desc-size-dwords",
4723 				     PAGE_MEM_DESC_SIZE_DWORDS);
4724 	offset += qed_dump_num_param(dump_buf + offset,
4725 				     dump,
4726 				     "ilt-shadow-size",
4727 				     p_hwfn->p_cxt_mngr->ilt_shadow_size);
4728 	/* Additional/Less parameters require matching of number in call to
4729 	 * dump_common_global_params()
4730 	 */
4731 
4732 	/* Dump section containing number of PF CIDs per connection type */
4733 	offset += qed_dump_section_hdr(dump_buf + offset,
4734 				       dump, "num_pf_cids_per_conn_type", 1);
4735 	offset += qed_dump_num_param(dump_buf + offset,
4736 				     dump, "size", NUM_OF_CONNECTION_TYPES_E4);
4737 	for (conn_type = 0, valid_conn_pf_cids = 0;
4738 	     conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
4739 		u32 num_pf_cids =
4740 		    p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
4741 
4742 		if (dump)
4743 			*(dump_buf + offset) = num_pf_cids;
4744 		valid_conn_pf_cids += num_pf_cids;
4745 	}
4746 
4747 	/* Dump section containing number of VF CIDs per connection type */
4748 	offset += qed_dump_section_hdr(dump_buf + offset,
4749 				       dump, "num_vf_cids_per_conn_type", 1);
4750 	offset += qed_dump_num_param(dump_buf + offset,
4751 				     dump, "size", NUM_OF_CONNECTION_TYPES_E4);
4752 	for (conn_type = 0, valid_conn_vf_cids = 0;
4753 	     conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
4754 		u32 num_vf_cids =
4755 		    p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
4756 
4757 		if (dump)
4758 			*(dump_buf + offset) = num_vf_cids;
4759 		valid_conn_vf_cids += num_vf_cids;
4760 	}
4761 
4762 	/* Dump section containing physical memory descs for each ILT page */
4763 	num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
4764 	offset += qed_dump_section_hdr(dump_buf + offset,
4765 				       dump, "ilt_page_desc", 1);
4766 	offset += qed_dump_num_param(dump_buf + offset,
4767 				     dump,
4768 				     "size",
4769 				     num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
4770 
4771 	/* Copy memory descriptors to dump buffer */
4772 	if (dump) {
4773 		u32 page_id;
4774 
4775 		for (page_id = 0; page_id < num_pages;
4776 		     page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS)
4777 			memcpy(dump_buf + offset,
4778 			       &ilt_pages[page_id],
4779 			       DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS));
4780 	} else {
4781 		offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
4782 	}
4783 
4784 	valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
4785 					   num_cids_per_page);
4786 	valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
4787 					   num_cids_per_page);
4788 
4789 	/* Dump ILT pages IDs */
4790 	offset += qed_ilt_dump_pages_section(p_hwfn,
4791 					     dump_buf + offset,
4792 					     dump,
4793 					     valid_conn_pf_pages,
4794 					     valid_conn_vf_pages,
4795 					     ilt_pages, true);
4796 
4797 	/* Dump ILT pages memory */
4798 	offset += qed_ilt_dump_pages_section(p_hwfn,
4799 					     dump_buf + offset,
4800 					     dump,
4801 					     valid_conn_pf_pages,
4802 					     valid_conn_vf_pages,
4803 					     ilt_pages, false);
4804 
4805 	/* Dump last section */
4806 	offset += qed_dump_last_section(dump_buf, offset, dump);
4807 
4808 	return offset;
4809 }
4810 
4811 /***************************** Public Functions *******************************/
4812 
4813 enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
4814 				    const u8 * const bin_ptr)
4815 {
4816 	struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
4817 	u8 buf_id;
4818 
4819 	/* Convert binary data to debug arrays */
4820 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
4821 		qed_set_dbg_bin_buf(p_hwfn,
4822 				    buf_id,
4823 				    (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
4824 				    buf_hdrs[buf_id].length);
4825 
4826 	return DBG_STATUS_OK;
4827 }
4828 
4829 bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
4830 		      struct qed_ptt *p_ptt, struct fw_info *fw_info)
4831 {
4832 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4833 	u8 storm_id;
4834 
4835 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4836 		struct storm_defs *storm = &s_storm_defs[storm_id];
4837 
4838 		/* Skip Storm if it's in reset */
4839 		if (dev_data->block_in_reset[storm->sem_block_id])
4840 			continue;
4841 
4842 		/* Read FW info for the current Storm */
4843 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
4844 
4845 		return true;
4846 	}
4847 
4848 	return false;
4849 }
4850 
4851 enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
4852 				   enum dbg_grc_params grc_param, u32 val)
4853 {
4854 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4855 	enum dbg_status status;
4856 	int i;
4857 
4858 	DP_VERBOSE(p_hwfn,
4859 		   QED_MSG_DEBUG,
4860 		   "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
4861 
4862 	status = qed_dbg_dev_init(p_hwfn);
4863 	if (status != DBG_STATUS_OK)
4864 		return status;
4865 
4866 	/* Initializes the GRC parameters (if not initialized). Needed in order
4867 	 * to set the default parameter values for the first time.
4868 	 */
4869 	qed_dbg_grc_init_params(p_hwfn);
4870 
4871 	if (grc_param >= MAX_DBG_GRC_PARAMS)
4872 		return DBG_STATUS_INVALID_ARGS;
4873 	if (val < s_grc_param_defs[grc_param].min ||
4874 	    val > s_grc_param_defs[grc_param].max)
4875 		return DBG_STATUS_INVALID_ARGS;
4876 
4877 	if (s_grc_param_defs[grc_param].is_preset) {
4878 		/* Preset param */
4879 
4880 		/* Disabling a preset is not allowed. Call
4881 		 * dbg_grc_set_params_default instead.
4882 		 */
4883 		if (!val)
4884 			return DBG_STATUS_INVALID_ARGS;
4885 
4886 		/* Update all params with the preset values */
4887 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
4888 			struct grc_param_defs *defs = &s_grc_param_defs[i];
4889 			u32 preset_val;
4890 			/* Skip persistent params */
4891 			if (defs->is_persistent)
4892 				continue;
4893 
4894 			/* Find preset value */
4895 			if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
4896 				preset_val =
4897 				    defs->exclude_all_preset_val;
4898 			else if (grc_param == DBG_GRC_PARAM_CRASH)
4899 				preset_val =
4900 				    defs->crash_preset_val[dev_data->chip_id];
4901 			else
4902 				return DBG_STATUS_INVALID_ARGS;
4903 
4904 			qed_grc_set_param(p_hwfn, i, preset_val);
4905 		}
4906 	} else {
4907 		/* Regular param - set its value */
4908 		qed_grc_set_param(p_hwfn, grc_param, val);
4909 	}
4910 
4911 	return DBG_STATUS_OK;
4912 }
4913 
4914 /* Assign default GRC param values */
4915 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
4916 {
4917 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4918 	u32 i;
4919 
4920 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
4921 		if (!s_grc_param_defs[i].is_persistent)
4922 			dev_data->grc.param_val[i] =
4923 			    s_grc_param_defs[i].default_val[dev_data->chip_id];
4924 }
4925 
4926 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4927 					      struct qed_ptt *p_ptt,
4928 					      u32 *buf_size)
4929 {
4930 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
4931 
4932 	*buf_size = 0;
4933 
4934 	if (status != DBG_STATUS_OK)
4935 		return status;
4936 
4937 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4938 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
4939 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
4940 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
4941 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
4942 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
4943 
4944 	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4945 }
4946 
4947 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
4948 				 struct qed_ptt *p_ptt,
4949 				 u32 *dump_buf,
4950 				 u32 buf_size_in_dwords,
4951 				 u32 *num_dumped_dwords)
4952 {
4953 	u32 needed_buf_size_in_dwords;
4954 	enum dbg_status status;
4955 
4956 	*num_dumped_dwords = 0;
4957 
4958 	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
4959 					       p_ptt,
4960 					       &needed_buf_size_in_dwords);
4961 	if (status != DBG_STATUS_OK)
4962 		return status;
4963 
4964 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
4965 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4966 
4967 	/* GRC Dump */
4968 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
4969 
4970 	/* Revert GRC params to their default */
4971 	qed_dbg_grc_set_params_default(p_hwfn);
4972 
4973 	return status;
4974 }
4975 
4976 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4977 						   struct qed_ptt *p_ptt,
4978 						   u32 *buf_size)
4979 {
4980 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4981 	struct idle_chk_data *idle_chk = &dev_data->idle_chk;
4982 	enum dbg_status status;
4983 
4984 	*buf_size = 0;
4985 
4986 	status = qed_dbg_dev_init(p_hwfn);
4987 	if (status != DBG_STATUS_OK)
4988 		return status;
4989 
4990 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4991 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
4992 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
4993 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
4994 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
4995 
4996 	if (!idle_chk->buf_size_set) {
4997 		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
4998 						       p_ptt, NULL, false);
4999 		idle_chk->buf_size_set = true;
5000 	}
5001 
5002 	*buf_size = idle_chk->buf_size;
5003 
5004 	return DBG_STATUS_OK;
5005 }
5006 
5007 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5008 				      struct qed_ptt *p_ptt,
5009 				      u32 *dump_buf,
5010 				      u32 buf_size_in_dwords,
5011 				      u32 *num_dumped_dwords)
5012 {
5013 	u32 needed_buf_size_in_dwords;
5014 	enum dbg_status status;
5015 
5016 	*num_dumped_dwords = 0;
5017 
5018 	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5019 						    p_ptt,
5020 						    &needed_buf_size_in_dwords);
5021 	if (status != DBG_STATUS_OK)
5022 		return status;
5023 
5024 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5025 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5026 
5027 	/* Update reset state */
5028 	qed_grc_unreset_blocks(p_hwfn, p_ptt, true);
5029 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5030 
5031 	/* Idle Check Dump */
5032 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5033 
5034 	/* Revert GRC params to their default */
5035 	qed_dbg_grc_set_params_default(p_hwfn);
5036 
5037 	return DBG_STATUS_OK;
5038 }
5039 
5040 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5041 						    struct qed_ptt *p_ptt,
5042 						    u32 *buf_size)
5043 {
5044 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5045 
5046 	*buf_size = 0;
5047 
5048 	if (status != DBG_STATUS_OK)
5049 		return status;
5050 
5051 	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5052 }
5053 
5054 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5055 				       struct qed_ptt *p_ptt,
5056 				       u32 *dump_buf,
5057 				       u32 buf_size_in_dwords,
5058 				       u32 *num_dumped_dwords)
5059 {
5060 	u32 needed_buf_size_in_dwords;
5061 	enum dbg_status status;
5062 
5063 	status =
5064 		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5065 						    p_ptt,
5066 						    &needed_buf_size_in_dwords);
5067 	if (status != DBG_STATUS_OK && status !=
5068 	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5069 		return status;
5070 
5071 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5072 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5073 
5074 	/* Update reset state */
5075 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5076 
5077 	/* Perform dump */
5078 	status = qed_mcp_trace_dump(p_hwfn,
5079 				    p_ptt, dump_buf, true, num_dumped_dwords);
5080 
5081 	/* Revert GRC params to their default */
5082 	qed_dbg_grc_set_params_default(p_hwfn);
5083 
5084 	return status;
5085 }
5086 
5087 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5088 						   struct qed_ptt *p_ptt,
5089 						   u32 *buf_size)
5090 {
5091 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5092 
5093 	*buf_size = 0;
5094 
5095 	if (status != DBG_STATUS_OK)
5096 		return status;
5097 
5098 	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5099 }
5100 
5101 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5102 				      struct qed_ptt *p_ptt,
5103 				      u32 *dump_buf,
5104 				      u32 buf_size_in_dwords,
5105 				      u32 *num_dumped_dwords)
5106 {
5107 	u32 needed_buf_size_in_dwords;
5108 	enum dbg_status status;
5109 
5110 	*num_dumped_dwords = 0;
5111 
5112 	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5113 						    p_ptt,
5114 						    &needed_buf_size_in_dwords);
5115 	if (status != DBG_STATUS_OK)
5116 		return status;
5117 
5118 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5119 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5120 
5121 	/* Update reset state */
5122 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5123 
5124 	status = qed_reg_fifo_dump(p_hwfn,
5125 				   p_ptt, dump_buf, true, num_dumped_dwords);
5126 
5127 	/* Revert GRC params to their default */
5128 	qed_dbg_grc_set_params_default(p_hwfn);
5129 
5130 	return status;
5131 }
5132 
5133 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5134 						   struct qed_ptt *p_ptt,
5135 						   u32 *buf_size)
5136 {
5137 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5138 
5139 	*buf_size = 0;
5140 
5141 	if (status != DBG_STATUS_OK)
5142 		return status;
5143 
5144 	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5145 }
5146 
5147 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5148 				      struct qed_ptt *p_ptt,
5149 				      u32 *dump_buf,
5150 				      u32 buf_size_in_dwords,
5151 				      u32 *num_dumped_dwords)
5152 {
5153 	u32 needed_buf_size_in_dwords;
5154 	enum dbg_status status;
5155 
5156 	*num_dumped_dwords = 0;
5157 
5158 	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5159 						    p_ptt,
5160 						    &needed_buf_size_in_dwords);
5161 	if (status != DBG_STATUS_OK)
5162 		return status;
5163 
5164 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5165 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5166 
5167 	/* Update reset state */
5168 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5169 
5170 	status = qed_igu_fifo_dump(p_hwfn,
5171 				   p_ptt, dump_buf, true, num_dumped_dwords);
5172 	/* Revert GRC params to their default */
5173 	qed_dbg_grc_set_params_default(p_hwfn);
5174 
5175 	return status;
5176 }
5177 
5178 enum dbg_status
5179 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5180 					      struct qed_ptt *p_ptt,
5181 					      u32 *buf_size)
5182 {
5183 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5184 
5185 	*buf_size = 0;
5186 
5187 	if (status != DBG_STATUS_OK)
5188 		return status;
5189 
5190 	return qed_protection_override_dump(p_hwfn,
5191 					    p_ptt, NULL, false, buf_size);
5192 }
5193 
5194 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5195 						 struct qed_ptt *p_ptt,
5196 						 u32 *dump_buf,
5197 						 u32 buf_size_in_dwords,
5198 						 u32 *num_dumped_dwords)
5199 {
5200 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5201 	enum dbg_status status;
5202 
5203 	*num_dumped_dwords = 0;
5204 
5205 	status =
5206 		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5207 							      p_ptt,
5208 							      p_size);
5209 	if (status != DBG_STATUS_OK)
5210 		return status;
5211 
5212 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5213 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5214 
5215 	/* Update reset state */
5216 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5217 
5218 	status = qed_protection_override_dump(p_hwfn,
5219 					      p_ptt,
5220 					      dump_buf,
5221 					      true, num_dumped_dwords);
5222 
5223 	/* Revert GRC params to their default */
5224 	qed_dbg_grc_set_params_default(p_hwfn);
5225 
5226 	return status;
5227 }
5228 
5229 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5230 						     struct qed_ptt *p_ptt,
5231 						     u32 *buf_size)
5232 {
5233 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5234 
5235 	*buf_size = 0;
5236 
5237 	if (status != DBG_STATUS_OK)
5238 		return status;
5239 
5240 	/* Update reset state */
5241 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5242 
5243 	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5244 
5245 	return DBG_STATUS_OK;
5246 }
5247 
5248 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5249 					struct qed_ptt *p_ptt,
5250 					u32 *dump_buf,
5251 					u32 buf_size_in_dwords,
5252 					u32 *num_dumped_dwords)
5253 {
5254 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5255 	enum dbg_status status;
5256 
5257 	*num_dumped_dwords = 0;
5258 
5259 	status =
5260 		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5261 						     p_ptt,
5262 						     p_size);
5263 	if (status != DBG_STATUS_OK)
5264 		return status;
5265 
5266 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5267 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5268 
5269 	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5270 
5271 	/* Revert GRC params to their default */
5272 	qed_dbg_grc_set_params_default(p_hwfn);
5273 
5274 	return DBG_STATUS_OK;
5275 }
5276 
5277 static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5278 						     struct qed_ptt *p_ptt,
5279 						     u32 *buf_size)
5280 {
5281 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5282 
5283 	*buf_size = 0;
5284 
5285 	if (status != DBG_STATUS_OK)
5286 		return status;
5287 
5288 	*buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false);
5289 
5290 	return DBG_STATUS_OK;
5291 }
5292 
5293 static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
5294 					struct qed_ptt *p_ptt,
5295 					u32 *dump_buf,
5296 					u32 buf_size_in_dwords,
5297 					u32 *num_dumped_dwords)
5298 {
5299 	u32 needed_buf_size_in_dwords;
5300 	enum dbg_status status;
5301 
5302 	*num_dumped_dwords = 0;
5303 
5304 	status = qed_dbg_ilt_get_dump_buf_size(p_hwfn,
5305 					       p_ptt,
5306 					       &needed_buf_size_in_dwords);
5307 	if (status != DBG_STATUS_OK)
5308 		return status;
5309 
5310 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5311 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5312 
5313 	*num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true);
5314 
5315 	/* Reveret GRC params to their default */
5316 	qed_dbg_grc_set_params_default(p_hwfn);
5317 
5318 	return DBG_STATUS_OK;
5319 }
5320 
5321 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5322 				  struct qed_ptt *p_ptt,
5323 				  enum block_id block_id,
5324 				  enum dbg_attn_type attn_type,
5325 				  bool clear_status,
5326 				  struct dbg_attn_block_result *results)
5327 {
5328 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5329 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5330 	const struct dbg_attn_reg *attn_reg_arr;
5331 
5332 	if (status != DBG_STATUS_OK)
5333 		return status;
5334 
5335 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5336 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5337 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5338 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5339 
5340 	attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
5341 					       block_id,
5342 					       attn_type, &num_attn_regs);
5343 
5344 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5345 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5346 		struct dbg_attn_reg_result *reg_result;
5347 		u32 sts_addr, sts_val;
5348 		u16 modes_buf_offset;
5349 		bool eval_mode;
5350 
5351 		/* Check mode */
5352 		eval_mode = GET_FIELD(reg_data->mode.data,
5353 				      DBG_MODE_HDR_EVAL_MODE) > 0;
5354 		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5355 					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5356 		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5357 			continue;
5358 
5359 		/* Mode match - read attention status register */
5360 		sts_addr = DWORDS_TO_BYTES(clear_status ?
5361 					   reg_data->sts_clr_address :
5362 					   GET_FIELD(reg_data->data,
5363 						     DBG_ATTN_REG_STS_ADDRESS));
5364 		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5365 		if (!sts_val)
5366 			continue;
5367 
5368 		/* Non-zero attention status - add to results */
5369 		reg_result = &results->reg_results[num_result_regs];
5370 		SET_FIELD(reg_result->data,
5371 			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5372 		SET_FIELD(reg_result->data,
5373 			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5374 			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5375 		reg_result->block_attn_offset = reg_data->block_attn_offset;
5376 		reg_result->sts_val = sts_val;
5377 		reg_result->mask_val = qed_rd(p_hwfn,
5378 					      p_ptt,
5379 					      DWORDS_TO_BYTES
5380 					      (reg_data->mask_address));
5381 		num_result_regs++;
5382 	}
5383 
5384 	results->block_id = (u8)block_id;
5385 	results->names_offset =
5386 	    qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset;
5387 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5388 	SET_FIELD(results->data,
5389 		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5390 
5391 	return DBG_STATUS_OK;
5392 }
5393 
5394 /******************************* Data Types **********************************/
5395 
5396 /* REG fifo element */
5397 struct reg_fifo_element {
5398 	u64 data;
5399 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5400 #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5401 #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5402 #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5403 #define REG_FIFO_ELEMENT_PF_SHIFT		24
5404 #define REG_FIFO_ELEMENT_PF_MASK		0xf
5405 #define REG_FIFO_ELEMENT_VF_SHIFT		28
5406 #define REG_FIFO_ELEMENT_VF_MASK		0xff
5407 #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5408 #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5409 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5410 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5411 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5412 #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5413 #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5414 #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5415 #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5416 #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5417 };
5418 
5419 /* REG fifo error element */
5420 struct reg_fifo_err {
5421 	u32 err_code;
5422 	const char *err_msg;
5423 };
5424 
5425 /* IGU fifo element */
5426 struct igu_fifo_element {
5427 	u32 dword0;
5428 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5429 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5430 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5431 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5432 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5433 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5434 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5435 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5436 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5437 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5438 	u32 dword1;
5439 	u32 dword2;
5440 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5441 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5442 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5443 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5444 	u32 reserved;
5445 };
5446 
5447 struct igu_fifo_wr_data {
5448 	u32 data;
5449 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5450 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5451 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5452 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5453 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5454 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5455 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5456 #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5457 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5458 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5459 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5460 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5461 };
5462 
5463 struct igu_fifo_cleanup_wr_data {
5464 	u32 data;
5465 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5466 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5467 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5468 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5469 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5470 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5471 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5472 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5473 };
5474 
5475 /* Protection override element */
5476 struct protection_override_element {
5477 	u64 data;
5478 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5479 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5480 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5481 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5482 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5483 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5484 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5485 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5486 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5487 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5488 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5489 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5490 };
5491 
5492 enum igu_fifo_sources {
5493 	IGU_SRC_PXP0,
5494 	IGU_SRC_PXP1,
5495 	IGU_SRC_PXP2,
5496 	IGU_SRC_PXP3,
5497 	IGU_SRC_PXP4,
5498 	IGU_SRC_PXP5,
5499 	IGU_SRC_PXP6,
5500 	IGU_SRC_PXP7,
5501 	IGU_SRC_CAU,
5502 	IGU_SRC_ATTN,
5503 	IGU_SRC_GRC
5504 };
5505 
5506 enum igu_fifo_addr_types {
5507 	IGU_ADDR_TYPE_MSIX_MEM,
5508 	IGU_ADDR_TYPE_WRITE_PBA,
5509 	IGU_ADDR_TYPE_WRITE_INT_ACK,
5510 	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5511 	IGU_ADDR_TYPE_READ_INT,
5512 	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5513 	IGU_ADDR_TYPE_RESERVED
5514 };
5515 
5516 struct igu_fifo_addr_data {
5517 	u16 start_addr;
5518 	u16 end_addr;
5519 	char *desc;
5520 	char *vf_desc;
5521 	enum igu_fifo_addr_types type;
5522 };
5523 
5524 /******************************** Constants **********************************/
5525 
5526 #define MAX_MSG_LEN				1024
5527 
5528 #define MCP_TRACE_MAX_MODULE_LEN		8
5529 #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5530 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5531 	(MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET)
5532 
5533 #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5534 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5535 
5536 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5537 
5538 /***************************** Constant Arrays *******************************/
5539 
5540 /* Status string array */
5541 static const char * const s_status_str[] = {
5542 	/* DBG_STATUS_OK */
5543 	"Operation completed successfully",
5544 
5545 	/* DBG_STATUS_APP_VERSION_NOT_SET */
5546 	"Debug application version wasn't set",
5547 
5548 	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5549 	"Unsupported debug application version",
5550 
5551 	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5552 	"The debug block wasn't reset since the last recording",
5553 
5554 	/* DBG_STATUS_INVALID_ARGS */
5555 	"Invalid arguments",
5556 
5557 	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5558 	"The debug output was already set",
5559 
5560 	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5561 	"Invalid PCI buffer size",
5562 
5563 	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5564 	"PCI buffer allocation failed",
5565 
5566 	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5567 	"A PCI buffer wasn't allocated",
5568 
5569 	/* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
5570 	"The filter/trigger constraint dword offsets are not enabled for recording",
5571 
5572 
5573 	/* DBG_STATUS_VFC_READ_ERROR */
5574 	"Error reading from VFC",
5575 
5576 	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5577 	"The Storm was already enabled",
5578 
5579 	/* DBG_STATUS_STORM_NOT_ENABLED */
5580 	"The specified Storm wasn't enabled",
5581 
5582 	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5583 	"The block was already enabled",
5584 
5585 	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5586 	"The specified block wasn't enabled",
5587 
5588 	/* DBG_STATUS_NO_INPUT_ENABLED */
5589 	"No input was enabled for recording",
5590 
5591 	/* DBG_STATUS_NO_FILTER_TRIGGER_256B */
5592 	"Filters and triggers are not allowed in E4 256-bit mode",
5593 
5594 	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5595 	"The filter was already enabled",
5596 
5597 	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5598 	"The trigger was already enabled",
5599 
5600 	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5601 	"The trigger wasn't enabled",
5602 
5603 	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5604 	"A constraint can be added only after a filter was enabled or a trigger state was added",
5605 
5606 	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5607 	"Cannot add more than 3 trigger states",
5608 
5609 	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5610 	"Cannot add more than 4 constraints per filter or trigger state",
5611 
5612 	/* DBG_STATUS_RECORDING_NOT_STARTED */
5613 	"The recording wasn't started",
5614 
5615 	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
5616 	"A trigger was configured, but it didn't trigger",
5617 
5618 	/* DBG_STATUS_NO_DATA_RECORDED */
5619 	"No data was recorded",
5620 
5621 	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5622 	"Dump buffer is too small",
5623 
5624 	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5625 	"Dumped data is not aligned to chunks",
5626 
5627 	/* DBG_STATUS_UNKNOWN_CHIP */
5628 	"Unknown chip",
5629 
5630 	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5631 	"Failed allocating virtual memory",
5632 
5633 	/* DBG_STATUS_BLOCK_IN_RESET */
5634 	"The input block is in reset",
5635 
5636 	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5637 	"Invalid MCP trace signature found in NVRAM",
5638 
5639 	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5640 	"Invalid bundle ID found in NVRAM",
5641 
5642 	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5643 	"Failed getting NVRAM image",
5644 
5645 	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5646 	"NVRAM image is not dword-aligned",
5647 
5648 	/* DBG_STATUS_NVRAM_READ_FAILED */
5649 	"Failed reading from NVRAM",
5650 
5651 	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5652 	"Idle check parsing failed",
5653 
5654 	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
5655 	"MCP Trace data is corrupt",
5656 
5657 	/* DBG_STATUS_MCP_TRACE_NO_META */
5658 	"Dump doesn't contain meta data - it must be provided in image file",
5659 
5660 	/* DBG_STATUS_MCP_COULD_NOT_HALT */
5661 	"Failed to halt MCP",
5662 
5663 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
5664 	"Failed to resume MCP after halt",
5665 
5666 	/* DBG_STATUS_RESERVED0 */
5667 	"",
5668 
5669 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5670 	"Failed to empty SEMI sync FIFO",
5671 
5672 	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
5673 	"IGU FIFO data is corrupt",
5674 
5675 	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5676 	"MCP failed to mask parities",
5677 
5678 	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5679 	"FW Asserts parsing failed",
5680 
5681 	/* DBG_STATUS_REG_FIFO_BAD_DATA */
5682 	"GRC FIFO data is corrupt",
5683 
5684 	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5685 	"Protection Override data is corrupt",
5686 
5687 	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
5688 	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5689 
5690 	/* DBG_STATUS_RESERVED1 */
5691 	"",
5692 
5693 	/* DBG_STATUS_NON_MATCHING_LINES */
5694 	"Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)",
5695 
5696 	/* DBG_STATUS_INSUFFICIENT_HW_IDS */
5697 	"Insufficient HW IDs. Try to record less Storms/blocks",
5698 
5699 	/* DBG_STATUS_DBG_BUS_IN_USE */
5700 	"The debug bus is in use",
5701 
5702 	/* DBG_STATUS_INVALID_STORM_DBG_MODE */
5703 	"The storm debug mode is not supported in the current chip",
5704 
5705 	/* DBG_STATUS_OTHER_ENGINE_BB_ONLY */
5706 	"Other engine is supported only in BB",
5707 
5708 	/* DBG_STATUS_FILTER_SINGLE_HW_ID */
5709 	"The configured filter mode requires a single Storm/block input",
5710 
5711 	/* DBG_STATUS_TRIGGER_SINGLE_HW_ID */
5712 	"The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
5713 
5714 	/* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
5715 	"When triggering on Storm data, the Storm to trigger on must be specified"
5716 };
5717 
5718 /* Idle check severity names array */
5719 static const char * const s_idle_chk_severity_str[] = {
5720 	"Error",
5721 	"Error if no traffic",
5722 	"Warning"
5723 };
5724 
5725 /* MCP Trace level names array */
5726 static const char * const s_mcp_trace_level_str[] = {
5727 	"ERROR",
5728 	"TRACE",
5729 	"DEBUG"
5730 };
5731 
5732 /* Access type names array */
5733 static const char * const s_access_strs[] = {
5734 	"read",
5735 	"write"
5736 };
5737 
5738 /* Privilege type names array */
5739 static const char * const s_privilege_strs[] = {
5740 	"VF",
5741 	"PDA",
5742 	"HV",
5743 	"UA"
5744 };
5745 
5746 /* Protection type names array */
5747 static const char * const s_protection_strs[] = {
5748 	"(default)",
5749 	"(default)",
5750 	"(default)",
5751 	"(default)",
5752 	"override VF",
5753 	"override PDA",
5754 	"override HV",
5755 	"override UA"
5756 };
5757 
5758 /* Master type names array */
5759 static const char * const s_master_strs[] = {
5760 	"???",
5761 	"pxp",
5762 	"mcp",
5763 	"msdm",
5764 	"psdm",
5765 	"ysdm",
5766 	"usdm",
5767 	"tsdm",
5768 	"xsdm",
5769 	"dbu",
5770 	"dmae",
5771 	"jdap",
5772 	"???",
5773 	"???",
5774 	"???",
5775 	"???"
5776 };
5777 
5778 /* REG FIFO error messages array */
5779 static struct reg_fifo_err s_reg_fifo_errors[] = {
5780 	{1, "grc timeout"},
5781 	{2, "address doesn't belong to any block"},
5782 	{4, "reserved address in block or write to read-only address"},
5783 	{8, "privilege/protection mismatch"},
5784 	{16, "path isolation error"},
5785 	{17, "RSL error"}
5786 };
5787 
5788 /* IGU FIFO sources array */
5789 static const char * const s_igu_fifo_source_strs[] = {
5790 	"TSTORM",
5791 	"MSTORM",
5792 	"USTORM",
5793 	"XSTORM",
5794 	"YSTORM",
5795 	"PSTORM",
5796 	"PCIE",
5797 	"NIG_QM_PBF",
5798 	"CAU",
5799 	"ATTN",
5800 	"GRC",
5801 };
5802 
5803 /* IGU FIFO error messages */
5804 static const char * const s_igu_fifo_error_strs[] = {
5805 	"no error",
5806 	"length error",
5807 	"function disabled",
5808 	"VF sent command to attention address",
5809 	"host sent prod update command",
5810 	"read of during interrupt register while in MIMD mode",
5811 	"access to PXP BAR reserved address",
5812 	"producer update command to attention index",
5813 	"unknown error",
5814 	"SB index not valid",
5815 	"SB relative index and FID not found",
5816 	"FID not match",
5817 	"command with error flag asserted (PCI error or CAU discard)",
5818 	"VF sent cleanup and RF cleanup is disabled",
5819 	"cleanup command on type bigger than 4"
5820 };
5821 
5822 /* IGU FIFO address data */
5823 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
5824 	{0x0, 0x101, "MSI-X Memory", NULL,
5825 	 IGU_ADDR_TYPE_MSIX_MEM},
5826 	{0x102, 0x1ff, "reserved", NULL,
5827 	 IGU_ADDR_TYPE_RESERVED},
5828 	{0x200, 0x200, "Write PBA[0:63]", NULL,
5829 	 IGU_ADDR_TYPE_WRITE_PBA},
5830 	{0x201, 0x201, "Write PBA[64:127]", "reserved",
5831 	 IGU_ADDR_TYPE_WRITE_PBA},
5832 	{0x202, 0x202, "Write PBA[128]", "reserved",
5833 	 IGU_ADDR_TYPE_WRITE_PBA},
5834 	{0x203, 0x3ff, "reserved", NULL,
5835 	 IGU_ADDR_TYPE_RESERVED},
5836 	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
5837 	 IGU_ADDR_TYPE_WRITE_INT_ACK},
5838 	{0x5f0, 0x5f0, "Attention bits update", NULL,
5839 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5840 	{0x5f1, 0x5f1, "Attention bits set", NULL,
5841 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5842 	{0x5f2, 0x5f2, "Attention bits clear", NULL,
5843 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5844 	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
5845 	 IGU_ADDR_TYPE_READ_INT},
5846 	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
5847 	 IGU_ADDR_TYPE_READ_INT},
5848 	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
5849 	 IGU_ADDR_TYPE_READ_INT},
5850 	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
5851 	 IGU_ADDR_TYPE_READ_INT},
5852 	{0x5f7, 0x5ff, "reserved", NULL,
5853 	 IGU_ADDR_TYPE_RESERVED},
5854 	{0x600, 0x7ff, "Producer update", NULL,
5855 	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
5856 };
5857 
5858 /******************************** Variables **********************************/
5859 
5860 /* Temporary buffer, used for print size calculations */
5861 static char s_temp_buf[MAX_MSG_LEN];
5862 
5863 /**************************** Private Functions ******************************/
5864 
5865 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
5866 {
5867 	return (a + b) % size;
5868 }
5869 
5870 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
5871 {
5872 	return (size + a - b) % size;
5873 }
5874 
5875 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
5876  * bytes) and returns them as a dword value. the specified buffer offset is
5877  * updated.
5878  */
5879 static u32 qed_read_from_cyclic_buf(void *buf,
5880 				    u32 *offset,
5881 				    u32 buf_size, u8 num_bytes_to_read)
5882 {
5883 	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
5884 	u32 val = 0;
5885 
5886 	val_ptr = (u8 *)&val;
5887 
5888 	/* Assume running on a LITTLE ENDIAN and the buffer is network order
5889 	 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
5890 	 */
5891 	for (i = 0; i < num_bytes_to_read; i++) {
5892 		val_ptr[i] = bytes_buf[*offset];
5893 		*offset = qed_cyclic_add(*offset, 1, buf_size);
5894 	}
5895 
5896 	return val;
5897 }
5898 
5899 /* Reads and returns the next byte from the specified buffer.
5900  * The specified buffer offset is updated.
5901  */
5902 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
5903 {
5904 	return ((u8 *)buf)[(*offset)++];
5905 }
5906 
5907 /* Reads and returns the next dword from the specified buffer.
5908  * The specified buffer offset is updated.
5909  */
5910 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
5911 {
5912 	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
5913 
5914 	*offset += 4;
5915 
5916 	return dword_val;
5917 }
5918 
5919 /* Reads the next string from the specified buffer, and copies it to the
5920  * specified pointer. The specified buffer offset is updated.
5921  */
5922 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
5923 {
5924 	const char *source_str = &((const char *)buf)[*offset];
5925 
5926 	strncpy(dest, source_str, size);
5927 	dest[size - 1] = '\0';
5928 	*offset += size;
5929 }
5930 
5931 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
5932  * If the specified buffer in NULL, a temporary buffer pointer is returned.
5933  */
5934 static char *qed_get_buf_ptr(void *buf, u32 offset)
5935 {
5936 	return buf ? (char *)buf + offset : s_temp_buf;
5937 }
5938 
5939 /* Reads a param from the specified buffer. Returns the number of dwords read.
5940  * If the returned str_param is NULL, the param is numeric and its value is
5941  * returned in num_param.
5942  * Otheriwise, the param is a string and its pointer is returned in str_param.
5943  */
5944 static u32 qed_read_param(u32 *dump_buf,
5945 			  const char **param_name,
5946 			  const char **param_str_val, u32 *param_num_val)
5947 {
5948 	char *char_buf = (char *)dump_buf;
5949 	size_t offset = 0;
5950 
5951 	/* Extract param name */
5952 	*param_name = char_buf;
5953 	offset += strlen(*param_name) + 1;
5954 
5955 	/* Check param type */
5956 	if (*(char_buf + offset++)) {
5957 		/* String param */
5958 		*param_str_val = char_buf + offset;
5959 		*param_num_val = 0;
5960 		offset += strlen(*param_str_val) + 1;
5961 		if (offset & 0x3)
5962 			offset += (4 - (offset & 0x3));
5963 	} else {
5964 		/* Numeric param */
5965 		*param_str_val = NULL;
5966 		if (offset & 0x3)
5967 			offset += (4 - (offset & 0x3));
5968 		*param_num_val = *(u32 *)(char_buf + offset);
5969 		offset += 4;
5970 	}
5971 
5972 	return (u32)offset / 4;
5973 }
5974 
5975 /* Reads a section header from the specified buffer.
5976  * Returns the number of dwords read.
5977  */
5978 static u32 qed_read_section_hdr(u32 *dump_buf,
5979 				const char **section_name,
5980 				u32 *num_section_params)
5981 {
5982 	const char *param_str_val;
5983 
5984 	return qed_read_param(dump_buf,
5985 			      section_name, &param_str_val, num_section_params);
5986 }
5987 
5988 /* Reads section params from the specified buffer and prints them to the results
5989  * buffer. Returns the number of dwords read.
5990  */
5991 static u32 qed_print_section_params(u32 *dump_buf,
5992 				    u32 num_section_params,
5993 				    char *results_buf, u32 *num_chars_printed)
5994 {
5995 	u32 i, dump_offset = 0, results_offset = 0;
5996 
5997 	for (i = 0; i < num_section_params; i++) {
5998 		const char *param_name, *param_str_val;
5999 		u32 param_num_val = 0;
6000 
6001 		dump_offset += qed_read_param(dump_buf + dump_offset,
6002 					      &param_name,
6003 					      &param_str_val, &param_num_val);
6004 
6005 		if (param_str_val)
6006 			results_offset +=
6007 				sprintf(qed_get_buf_ptr(results_buf,
6008 							results_offset),
6009 					"%s: %s\n", param_name, param_str_val);
6010 		else if (strcmp(param_name, "fw-timestamp"))
6011 			results_offset +=
6012 				sprintf(qed_get_buf_ptr(results_buf,
6013 							results_offset),
6014 					"%s: %d\n", param_name, param_num_val);
6015 	}
6016 
6017 	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6018 				  "\n");
6019 
6020 	*num_chars_printed = results_offset;
6021 
6022 	return dump_offset;
6023 }
6024 
6025 /* Returns the block name that matches the specified block ID,
6026  * or NULL if not found.
6027  */
6028 static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn,
6029 					  enum block_id block_id)
6030 {
6031 	const struct dbg_block_user *block =
6032 	    (const struct dbg_block_user *)
6033 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id;
6034 
6035 	return (const char *)block->name;
6036 }
6037 
6038 static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn
6039 							 *p_hwfn)
6040 {
6041 	return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6042 }
6043 
6044 /* Parses the idle check rules and returns the number of characters printed.
6045  * In case of parsing error, returns 0.
6046  */
6047 static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
6048 					 u32 *dump_buf,
6049 					 u32 *dump_buf_end,
6050 					 u32 num_rules,
6051 					 bool print_fw_idle_chk,
6052 					 char *results_buf,
6053 					 u32 *num_errors, u32 *num_warnings)
6054 {
6055 	/* Offset in results_buf in bytes */
6056 	u32 results_offset = 0;
6057 
6058 	u32 rule_idx;
6059 	u16 i, j;
6060 
6061 	*num_errors = 0;
6062 	*num_warnings = 0;
6063 
6064 	/* Go over dumped results */
6065 	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6066 	     rule_idx++) {
6067 		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6068 		struct dbg_idle_chk_result_hdr *hdr;
6069 		const char *parsing_str, *lsi_msg;
6070 		u32 parsing_str_offset;
6071 		bool has_fw_msg;
6072 		u8 curr_reg_id;
6073 
6074 		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6075 		rule_parsing_data =
6076 		    (const struct dbg_idle_chk_rule_parsing_data *)
6077 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr +
6078 		    hdr->rule_id;
6079 		parsing_str_offset =
6080 		    GET_FIELD(rule_parsing_data->data,
6081 			      DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6082 		has_fw_msg =
6083 		    GET_FIELD(rule_parsing_data->data,
6084 			      DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6085 		parsing_str = (const char *)
6086 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr +
6087 		    parsing_str_offset;
6088 		lsi_msg = parsing_str;
6089 		curr_reg_id = 0;
6090 
6091 		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6092 			return 0;
6093 
6094 		/* Skip rule header */
6095 		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6096 
6097 		/* Update errors/warnings count */
6098 		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6099 		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6100 			(*num_errors)++;
6101 		else
6102 			(*num_warnings)++;
6103 
6104 		/* Print rule severity */
6105 		results_offset +=
6106 		    sprintf(qed_get_buf_ptr(results_buf,
6107 					    results_offset), "%s: ",
6108 			    s_idle_chk_severity_str[hdr->severity]);
6109 
6110 		/* Print rule message */
6111 		if (has_fw_msg)
6112 			parsing_str += strlen(parsing_str) + 1;
6113 		results_offset +=
6114 		    sprintf(qed_get_buf_ptr(results_buf,
6115 					    results_offset), "%s.",
6116 			    has_fw_msg &&
6117 			    print_fw_idle_chk ? parsing_str : lsi_msg);
6118 		parsing_str += strlen(parsing_str) + 1;
6119 
6120 		/* Print register values */
6121 		results_offset +=
6122 		    sprintf(qed_get_buf_ptr(results_buf,
6123 					    results_offset), " Registers:");
6124 		for (i = 0;
6125 		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6126 		     i++) {
6127 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6128 			bool is_mem;
6129 			u8 reg_id;
6130 
6131 			reg_hdr =
6132 				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6133 			is_mem = GET_FIELD(reg_hdr->data,
6134 					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6135 			reg_id = GET_FIELD(reg_hdr->data,
6136 					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6137 
6138 			/* Skip reg header */
6139 			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6140 
6141 			/* Skip register names until the required reg_id is
6142 			 * reached.
6143 			 */
6144 			for (; reg_id > curr_reg_id;
6145 			     curr_reg_id++,
6146 			     parsing_str += strlen(parsing_str) + 1);
6147 
6148 			results_offset +=
6149 			    sprintf(qed_get_buf_ptr(results_buf,
6150 						    results_offset), " %s",
6151 				    parsing_str);
6152 			if (i < hdr->num_dumped_cond_regs && is_mem)
6153 				results_offset +=
6154 				    sprintf(qed_get_buf_ptr(results_buf,
6155 							    results_offset),
6156 					    "[%d]", hdr->mem_entry_id +
6157 					    reg_hdr->start_entry);
6158 			results_offset +=
6159 			    sprintf(qed_get_buf_ptr(results_buf,
6160 						    results_offset), "=");
6161 			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6162 				results_offset +=
6163 				    sprintf(qed_get_buf_ptr(results_buf,
6164 							    results_offset),
6165 					    "0x%x", *dump_buf);
6166 				if (j < reg_hdr->size - 1)
6167 					results_offset +=
6168 					    sprintf(qed_get_buf_ptr
6169 						    (results_buf,
6170 						     results_offset), ",");
6171 			}
6172 		}
6173 
6174 		results_offset +=
6175 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6176 	}
6177 
6178 	/* Check if end of dump buffer was exceeded */
6179 	if (dump_buf > dump_buf_end)
6180 		return 0;
6181 
6182 	return results_offset;
6183 }
6184 
6185 /* Parses an idle check dump buffer.
6186  * If result_buf is not NULL, the idle check results are printed to it.
6187  * In any case, the required results buffer size is assigned to
6188  * parsed_results_bytes.
6189  * The parsing status is returned.
6190  */
6191 static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
6192 					       u32 *dump_buf,
6193 					       u32 num_dumped_dwords,
6194 					       char *results_buf,
6195 					       u32 *parsed_results_bytes,
6196 					       u32 *num_errors,
6197 					       u32 *num_warnings)
6198 {
6199 	const char *section_name, *param_name, *param_str_val;
6200 	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6201 	u32 num_section_params = 0, num_rules;
6202 
6203 	/* Offset in results_buf in bytes */
6204 	u32 results_offset = 0;
6205 
6206 	*parsed_results_bytes = 0;
6207 	*num_errors = 0;
6208 	*num_warnings = 0;
6209 
6210 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6211 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6212 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6213 
6214 	/* Read global_params section */
6215 	dump_buf += qed_read_section_hdr(dump_buf,
6216 					 &section_name, &num_section_params);
6217 	if (strcmp(section_name, "global_params"))
6218 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6219 
6220 	/* Print global params */
6221 	dump_buf += qed_print_section_params(dump_buf,
6222 					     num_section_params,
6223 					     results_buf, &results_offset);
6224 
6225 	/* Read idle_chk section */
6226 	dump_buf += qed_read_section_hdr(dump_buf,
6227 					 &section_name, &num_section_params);
6228 	if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6229 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6230 	dump_buf += qed_read_param(dump_buf,
6231 				   &param_name, &param_str_val, &num_rules);
6232 	if (strcmp(param_name, "num_rules"))
6233 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6234 
6235 	if (num_rules) {
6236 		u32 rules_print_size;
6237 
6238 		/* Print FW output */
6239 		results_offset +=
6240 		    sprintf(qed_get_buf_ptr(results_buf,
6241 					    results_offset),
6242 			    "FW_IDLE_CHECK:\n");
6243 		rules_print_size =
6244 			qed_parse_idle_chk_dump_rules(p_hwfn,
6245 						      dump_buf,
6246 						      dump_buf_end,
6247 						      num_rules,
6248 						      true,
6249 						      results_buf ?
6250 						      results_buf +
6251 						      results_offset :
6252 						      NULL,
6253 						      num_errors,
6254 						      num_warnings);
6255 		results_offset += rules_print_size;
6256 		if (!rules_print_size)
6257 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6258 
6259 		/* Print LSI output */
6260 		results_offset +=
6261 		    sprintf(qed_get_buf_ptr(results_buf,
6262 					    results_offset),
6263 			    "\nLSI_IDLE_CHECK:\n");
6264 		rules_print_size =
6265 			qed_parse_idle_chk_dump_rules(p_hwfn,
6266 						      dump_buf,
6267 						      dump_buf_end,
6268 						      num_rules,
6269 						      false,
6270 						      results_buf ?
6271 						      results_buf +
6272 						      results_offset :
6273 						      NULL,
6274 						      num_errors,
6275 						      num_warnings);
6276 		results_offset += rules_print_size;
6277 		if (!rules_print_size)
6278 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6279 	}
6280 
6281 	/* Print errors/warnings count */
6282 	if (*num_errors)
6283 		results_offset +=
6284 		    sprintf(qed_get_buf_ptr(results_buf,
6285 					    results_offset),
6286 			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6287 			    *num_errors, *num_warnings);
6288 	else if (*num_warnings)
6289 		results_offset +=
6290 		    sprintf(qed_get_buf_ptr(results_buf,
6291 					    results_offset),
6292 			    "\nIdle Check completed successfully (with %d warnings)\n",
6293 			    *num_warnings);
6294 	else
6295 		results_offset +=
6296 		    sprintf(qed_get_buf_ptr(results_buf,
6297 					    results_offset),
6298 			    "\nIdle Check completed successfully\n");
6299 
6300 	/* Add 1 for string NULL termination */
6301 	*parsed_results_bytes = results_offset + 1;
6302 
6303 	return DBG_STATUS_OK;
6304 }
6305 
6306 /* Allocates and fills MCP Trace meta data based on the specified meta data
6307  * dump buffer.
6308  * Returns debug status code.
6309  */
6310 static enum dbg_status
6311 qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
6312 			      const u32 *meta_buf)
6313 {
6314 	struct dbg_tools_user_data *dev_user_data;
6315 	u32 offset = 0, signature, i;
6316 	struct mcp_trace_meta *meta;
6317 	u8 *meta_buf_bytes;
6318 
6319 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6320 	meta = &dev_user_data->mcp_trace_meta;
6321 	meta_buf_bytes = (u8 *)meta_buf;
6322 
6323 	/* Free the previous meta before loading a new one. */
6324 	if (meta->is_allocated)
6325 		qed_mcp_trace_free_meta_data(p_hwfn);
6326 
6327 	memset(meta, 0, sizeof(*meta));
6328 
6329 	/* Read first signature */
6330 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6331 	if (signature != NVM_MAGIC_VALUE)
6332 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6333 
6334 	/* Read no. of modules and allocate memory for their pointers */
6335 	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6336 	meta->modules = kcalloc(meta->modules_num, sizeof(char *),
6337 				GFP_KERNEL);
6338 	if (!meta->modules)
6339 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6340 
6341 	/* Allocate and read all module strings */
6342 	for (i = 0; i < meta->modules_num; i++) {
6343 		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6344 
6345 		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6346 		if (!(*(meta->modules + i))) {
6347 			/* Update number of modules to be released */
6348 			meta->modules_num = i ? i - 1 : 0;
6349 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6350 		}
6351 
6352 		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6353 				      *(meta->modules + i));
6354 		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6355 			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6356 	}
6357 
6358 	/* Read second signature */
6359 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6360 	if (signature != NVM_MAGIC_VALUE)
6361 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6362 
6363 	/* Read number of formats and allocate memory for all formats */
6364 	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6365 	meta->formats = kcalloc(meta->formats_num,
6366 				sizeof(struct mcp_trace_format),
6367 				GFP_KERNEL);
6368 	if (!meta->formats)
6369 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6370 
6371 	/* Allocate and read all strings */
6372 	for (i = 0; i < meta->formats_num; i++) {
6373 		struct mcp_trace_format *format_ptr = &meta->formats[i];
6374 		u8 format_len;
6375 
6376 		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6377 							   &offset);
6378 		format_len = GET_MFW_FIELD(format_ptr->data,
6379 					   MCP_TRACE_FORMAT_LEN);
6380 		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6381 		if (!format_ptr->format_str) {
6382 			/* Update number of modules to be released */
6383 			meta->formats_num = i ? i - 1 : 0;
6384 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6385 		}
6386 
6387 		qed_read_str_from_buf(meta_buf_bytes,
6388 				      &offset,
6389 				      format_len, format_ptr->format_str);
6390 	}
6391 
6392 	meta->is_allocated = true;
6393 	return DBG_STATUS_OK;
6394 }
6395 
6396 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6397  * are printed to it. The parsing status is returned.
6398  * Arguments:
6399  * trace_buf - MCP trace cyclic buffer
6400  * trace_buf_size - MCP trace cyclic buffer size in bytes
6401  * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6402  *		 buffer.
6403  * data_size - size in bytes of data to parse.
6404  * parsed_buf - destination buffer for parsed data.
6405  * parsed_results_bytes - size of parsed data in bytes.
6406  */
6407 static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
6408 					       u8 *trace_buf,
6409 					       u32 trace_buf_size,
6410 					       u32 data_offset,
6411 					       u32 data_size,
6412 					       char *parsed_buf,
6413 					       u32 *parsed_results_bytes)
6414 {
6415 	struct dbg_tools_user_data *dev_user_data;
6416 	struct mcp_trace_meta *meta;
6417 	u32 param_mask, param_shift;
6418 	enum dbg_status status;
6419 
6420 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6421 	meta = &dev_user_data->mcp_trace_meta;
6422 	*parsed_results_bytes = 0;
6423 
6424 	if (!meta->is_allocated)
6425 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6426 
6427 	status = DBG_STATUS_OK;
6428 
6429 	while (data_size) {
6430 		struct mcp_trace_format *format_ptr;
6431 		u8 format_level, format_module;
6432 		u32 params[3] = { 0, 0, 0 };
6433 		u32 header, format_idx, i;
6434 
6435 		if (data_size < MFW_TRACE_ENTRY_SIZE)
6436 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6437 
6438 		header = qed_read_from_cyclic_buf(trace_buf,
6439 						  &data_offset,
6440 						  trace_buf_size,
6441 						  MFW_TRACE_ENTRY_SIZE);
6442 		data_size -= MFW_TRACE_ENTRY_SIZE;
6443 		format_idx = header & MFW_TRACE_EVENTID_MASK;
6444 
6445 		/* Skip message if its index doesn't exist in the meta data */
6446 		if (format_idx >= meta->formats_num) {
6447 			u8 format_size = (u8)GET_MFW_FIELD(header,
6448 							   MFW_TRACE_PRM_SIZE);
6449 
6450 			if (data_size < format_size)
6451 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6452 
6453 			data_offset = qed_cyclic_add(data_offset,
6454 						     format_size,
6455 						     trace_buf_size);
6456 			data_size -= format_size;
6457 			continue;
6458 		}
6459 
6460 		format_ptr = &meta->formats[format_idx];
6461 
6462 		for (i = 0,
6463 		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6464 		     MCP_TRACE_FORMAT_P1_SIZE_OFFSET;
6465 		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6466 		     i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6467 		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6468 			/* Extract param size (0..3) */
6469 			u8 param_size = (u8)((format_ptr->data & param_mask) >>
6470 					     param_shift);
6471 
6472 			/* If the param size is zero, there are no other
6473 			 * parameters.
6474 			 */
6475 			if (!param_size)
6476 				break;
6477 
6478 			/* Size is encoded using 2 bits, where 3 is used to
6479 			 * encode 4.
6480 			 */
6481 			if (param_size == 3)
6482 				param_size = 4;
6483 
6484 			if (data_size < param_size)
6485 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6486 
6487 			params[i] = qed_read_from_cyclic_buf(trace_buf,
6488 							     &data_offset,
6489 							     trace_buf_size,
6490 							     param_size);
6491 			data_size -= param_size;
6492 		}
6493 
6494 		format_level = (u8)GET_MFW_FIELD(format_ptr->data,
6495 						 MCP_TRACE_FORMAT_LEVEL);
6496 		format_module = (u8)GET_MFW_FIELD(format_ptr->data,
6497 						  MCP_TRACE_FORMAT_MODULE);
6498 		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6499 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6500 
6501 		/* Print current message to results buffer */
6502 		*parsed_results_bytes +=
6503 			sprintf(qed_get_buf_ptr(parsed_buf,
6504 						*parsed_results_bytes),
6505 				"%s %-8s: ",
6506 				s_mcp_trace_level_str[format_level],
6507 				meta->modules[format_module]);
6508 		*parsed_results_bytes +=
6509 		    sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
6510 			    format_ptr->format_str,
6511 			    params[0], params[1], params[2]);
6512 	}
6513 
6514 	/* Add string NULL terminator */
6515 	(*parsed_results_bytes)++;
6516 
6517 	return status;
6518 }
6519 
6520 /* Parses an MCP Trace dump buffer.
6521  * If result_buf is not NULL, the MCP Trace results are printed to it.
6522  * In any case, the required results buffer size is assigned to
6523  * parsed_results_bytes.
6524  * The parsing status is returned.
6525  */
6526 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6527 						u32 *dump_buf,
6528 						char *results_buf,
6529 						u32 *parsed_results_bytes,
6530 						bool free_meta_data)
6531 {
6532 	const char *section_name, *param_name, *param_str_val;
6533 	u32 data_size, trace_data_dwords, trace_meta_dwords;
6534 	u32 offset, results_offset, results_buf_bytes;
6535 	u32 param_num_val, num_section_params;
6536 	struct mcp_trace *trace;
6537 	enum dbg_status status;
6538 	const u32 *meta_buf;
6539 	u8 *trace_buf;
6540 
6541 	*parsed_results_bytes = 0;
6542 
6543 	/* Read global_params section */
6544 	dump_buf += qed_read_section_hdr(dump_buf,
6545 					 &section_name, &num_section_params);
6546 	if (strcmp(section_name, "global_params"))
6547 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6548 
6549 	/* Print global params */
6550 	dump_buf += qed_print_section_params(dump_buf,
6551 					     num_section_params,
6552 					     results_buf, &results_offset);
6553 
6554 	/* Read trace_data section */
6555 	dump_buf += qed_read_section_hdr(dump_buf,
6556 					 &section_name, &num_section_params);
6557 	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6558 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6559 	dump_buf += qed_read_param(dump_buf,
6560 				   &param_name, &param_str_val, &param_num_val);
6561 	if (strcmp(param_name, "size"))
6562 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6563 	trace_data_dwords = param_num_val;
6564 
6565 	/* Prepare trace info */
6566 	trace = (struct mcp_trace *)dump_buf;
6567 	if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
6568 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6569 
6570 	trace_buf = (u8 *)dump_buf + sizeof(*trace);
6571 	offset = trace->trace_oldest;
6572 	data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
6573 	dump_buf += trace_data_dwords;
6574 
6575 	/* Read meta_data section */
6576 	dump_buf += qed_read_section_hdr(dump_buf,
6577 					 &section_name, &num_section_params);
6578 	if (strcmp(section_name, "mcp_trace_meta"))
6579 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6580 	dump_buf += qed_read_param(dump_buf,
6581 				   &param_name, &param_str_val, &param_num_val);
6582 	if (strcmp(param_name, "size"))
6583 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6584 	trace_meta_dwords = param_num_val;
6585 
6586 	/* Choose meta data buffer */
6587 	if (!trace_meta_dwords) {
6588 		/* Dump doesn't include meta data */
6589 		struct dbg_tools_user_data *dev_user_data =
6590 			qed_dbg_get_user_data(p_hwfn);
6591 
6592 		if (!dev_user_data->mcp_trace_user_meta_buf)
6593 			return DBG_STATUS_MCP_TRACE_NO_META;
6594 
6595 		meta_buf = dev_user_data->mcp_trace_user_meta_buf;
6596 	} else {
6597 		/* Dump includes meta data */
6598 		meta_buf = dump_buf;
6599 	}
6600 
6601 	/* Allocate meta data memory */
6602 	status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
6603 	if (status != DBG_STATUS_OK)
6604 		return status;
6605 
6606 	status = qed_parse_mcp_trace_buf(p_hwfn,
6607 					 trace_buf,
6608 					 trace->size,
6609 					 offset,
6610 					 data_size,
6611 					 results_buf ?
6612 					 results_buf + results_offset :
6613 					 NULL,
6614 					 &results_buf_bytes);
6615 	if (status != DBG_STATUS_OK)
6616 		return status;
6617 
6618 	if (free_meta_data)
6619 		qed_mcp_trace_free_meta_data(p_hwfn);
6620 
6621 	*parsed_results_bytes = results_offset + results_buf_bytes;
6622 
6623 	return DBG_STATUS_OK;
6624 }
6625 
6626 /* Parses a Reg FIFO dump buffer.
6627  * If result_buf is not NULL, the Reg FIFO results are printed to it.
6628  * In any case, the required results buffer size is assigned to
6629  * parsed_results_bytes.
6630  * The parsing status is returned.
6631  */
6632 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6633 					       char *results_buf,
6634 					       u32 *parsed_results_bytes)
6635 {
6636 	const char *section_name, *param_name, *param_str_val;
6637 	u32 param_num_val, num_section_params, num_elements;
6638 	struct reg_fifo_element *elements;
6639 	u8 i, j, err_code, vf_val;
6640 	u32 results_offset = 0;
6641 	char vf_str[4];
6642 
6643 	/* Read global_params section */
6644 	dump_buf += qed_read_section_hdr(dump_buf,
6645 					 &section_name, &num_section_params);
6646 	if (strcmp(section_name, "global_params"))
6647 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6648 
6649 	/* Print global params */
6650 	dump_buf += qed_print_section_params(dump_buf,
6651 					     num_section_params,
6652 					     results_buf, &results_offset);
6653 
6654 	/* Read reg_fifo_data section */
6655 	dump_buf += qed_read_section_hdr(dump_buf,
6656 					 &section_name, &num_section_params);
6657 	if (strcmp(section_name, "reg_fifo_data"))
6658 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6659 	dump_buf += qed_read_param(dump_buf,
6660 				   &param_name, &param_str_val, &param_num_val);
6661 	if (strcmp(param_name, "size"))
6662 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6663 	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6664 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6665 	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6666 	elements = (struct reg_fifo_element *)dump_buf;
6667 
6668 	/* Decode elements */
6669 	for (i = 0; i < num_elements; i++) {
6670 		const char *err_msg = NULL;
6671 
6672 		/* Discover if element belongs to a VF or a PF */
6673 		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6674 		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6675 			sprintf(vf_str, "%s", "N/A");
6676 		else
6677 			sprintf(vf_str, "%d", vf_val);
6678 
6679 		/* Find error message */
6680 		err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR);
6681 		for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++)
6682 			if (err_code == s_reg_fifo_errors[j].err_code)
6683 				err_msg = s_reg_fifo_errors[j].err_msg;
6684 
6685 		/* Add parsed element to parsed buffer */
6686 		results_offset +=
6687 		    sprintf(qed_get_buf_ptr(results_buf,
6688 					    results_offset),
6689 			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n",
6690 			    elements[i].data,
6691 			    (u32)GET_FIELD(elements[i].data,
6692 					   REG_FIFO_ELEMENT_ADDRESS) *
6693 			    REG_FIFO_ELEMENT_ADDR_FACTOR,
6694 			    s_access_strs[GET_FIELD(elements[i].data,
6695 						    REG_FIFO_ELEMENT_ACCESS)],
6696 			    (u32)GET_FIELD(elements[i].data,
6697 					   REG_FIFO_ELEMENT_PF),
6698 			    vf_str,
6699 			    (u32)GET_FIELD(elements[i].data,
6700 					   REG_FIFO_ELEMENT_PORT),
6701 			    s_privilege_strs[GET_FIELD(elements[i].data,
6702 						REG_FIFO_ELEMENT_PRIVILEGE)],
6703 			    s_protection_strs[GET_FIELD(elements[i].data,
6704 						REG_FIFO_ELEMENT_PROTECTION)],
6705 			    s_master_strs[GET_FIELD(elements[i].data,
6706 						    REG_FIFO_ELEMENT_MASTER)],
6707 			    err_msg ? err_msg : "unknown error code");
6708 	}
6709 
6710 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6711 						  results_offset),
6712 				  "fifo contained %d elements", num_elements);
6713 
6714 	/* Add 1 for string NULL termination */
6715 	*parsed_results_bytes = results_offset + 1;
6716 
6717 	return DBG_STATUS_OK;
6718 }
6719 
6720 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6721 						  *element, char
6722 						  *results_buf,
6723 						  u32 *results_offset)
6724 {
6725 	const struct igu_fifo_addr_data *found_addr = NULL;
6726 	u8 source, err_type, i, is_cleanup;
6727 	char parsed_addr_data[32];
6728 	char parsed_wr_data[256];
6729 	u32 wr_data, prod_cons;
6730 	bool is_wr_cmd, is_pf;
6731 	u16 cmd_addr;
6732 	u64 dword12;
6733 
6734 	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
6735 	 * FIFO element.
6736 	 */
6737 	dword12 = ((u64)element->dword2 << 32) | element->dword1;
6738 	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6739 	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6740 	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6741 	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6742 	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6743 
6744 	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6745 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6746 	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6747 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6748 
6749 	/* Find address data */
6750 	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
6751 		const struct igu_fifo_addr_data *curr_addr =
6752 			&s_igu_fifo_addr_data[i];
6753 
6754 		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
6755 		    curr_addr->end_addr)
6756 			found_addr = curr_addr;
6757 	}
6758 
6759 	if (!found_addr)
6760 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6761 
6762 	/* Prepare parsed address data */
6763 	switch (found_addr->type) {
6764 	case IGU_ADDR_TYPE_MSIX_MEM:
6765 		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
6766 		break;
6767 	case IGU_ADDR_TYPE_WRITE_INT_ACK:
6768 	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6769 		sprintf(parsed_addr_data,
6770 			" SB = 0x%x", cmd_addr - found_addr->start_addr);
6771 		break;
6772 	default:
6773 		parsed_addr_data[0] = '\0';
6774 	}
6775 
6776 	if (!is_wr_cmd) {
6777 		parsed_wr_data[0] = '\0';
6778 		goto out;
6779 	}
6780 
6781 	/* Prepare parsed write data */
6782 	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
6783 	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
6784 	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
6785 
6786 	if (source == IGU_SRC_ATTN) {
6787 		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
6788 	} else {
6789 		if (is_cleanup) {
6790 			u8 cleanup_val, cleanup_type;
6791 
6792 			cleanup_val =
6793 				GET_FIELD(wr_data,
6794 					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
6795 			cleanup_type =
6796 			    GET_FIELD(wr_data,
6797 				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
6798 
6799 			sprintf(parsed_wr_data,
6800 				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
6801 				cleanup_val ? "set" : "clear",
6802 				cleanup_type);
6803 		} else {
6804 			u8 update_flag, en_dis_int_for_sb, segment;
6805 			u8 timer_mask;
6806 
6807 			update_flag = GET_FIELD(wr_data,
6808 						IGU_FIFO_WR_DATA_UPDATE_FLAG);
6809 			en_dis_int_for_sb =
6810 				GET_FIELD(wr_data,
6811 					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
6812 			segment = GET_FIELD(wr_data,
6813 					    IGU_FIFO_WR_DATA_SEGMENT);
6814 			timer_mask = GET_FIELD(wr_data,
6815 					       IGU_FIFO_WR_DATA_TIMER_MASK);
6816 
6817 			sprintf(parsed_wr_data,
6818 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
6819 				prod_cons,
6820 				update_flag ? "update" : "nop",
6821 				en_dis_int_for_sb ?
6822 				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
6823 				"enable",
6824 				segment ? "attn" : "regular",
6825 				timer_mask);
6826 		}
6827 	}
6828 out:
6829 	/* Add parsed element to parsed buffer */
6830 	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
6831 						   *results_offset),
6832 				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
6833 				   element->dword2, element->dword1,
6834 				   element->dword0,
6835 				   is_pf ? "pf" : "vf",
6836 				   GET_FIELD(element->dword0,
6837 					     IGU_FIFO_ELEMENT_DWORD0_FID),
6838 				   s_igu_fifo_source_strs[source],
6839 				   is_wr_cmd ? "wr" : "rd",
6840 				   cmd_addr,
6841 				   (!is_pf && found_addr->vf_desc)
6842 				   ? found_addr->vf_desc
6843 				   : found_addr->desc,
6844 				   parsed_addr_data,
6845 				   parsed_wr_data,
6846 				   s_igu_fifo_error_strs[err_type]);
6847 
6848 	return DBG_STATUS_OK;
6849 }
6850 
6851 /* Parses an IGU FIFO dump buffer.
6852  * If result_buf is not NULL, the IGU FIFO results are printed to it.
6853  * In any case, the required results buffer size is assigned to
6854  * parsed_results_bytes.
6855  * The parsing status is returned.
6856  */
6857 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
6858 					       char *results_buf,
6859 					       u32 *parsed_results_bytes)
6860 {
6861 	const char *section_name, *param_name, *param_str_val;
6862 	u32 param_num_val, num_section_params, num_elements;
6863 	struct igu_fifo_element *elements;
6864 	enum dbg_status status;
6865 	u32 results_offset = 0;
6866 	u8 i;
6867 
6868 	/* Read global_params section */
6869 	dump_buf += qed_read_section_hdr(dump_buf,
6870 					 &section_name, &num_section_params);
6871 	if (strcmp(section_name, "global_params"))
6872 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6873 
6874 	/* Print global params */
6875 	dump_buf += qed_print_section_params(dump_buf,
6876 					     num_section_params,
6877 					     results_buf, &results_offset);
6878 
6879 	/* Read igu_fifo_data section */
6880 	dump_buf += qed_read_section_hdr(dump_buf,
6881 					 &section_name, &num_section_params);
6882 	if (strcmp(section_name, "igu_fifo_data"))
6883 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6884 	dump_buf += qed_read_param(dump_buf,
6885 				   &param_name, &param_str_val, &param_num_val);
6886 	if (strcmp(param_name, "size"))
6887 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6888 	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
6889 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6890 	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
6891 	elements = (struct igu_fifo_element *)dump_buf;
6892 
6893 	/* Decode elements */
6894 	for (i = 0; i < num_elements; i++) {
6895 		status = qed_parse_igu_fifo_element(&elements[i],
6896 						    results_buf,
6897 						    &results_offset);
6898 		if (status != DBG_STATUS_OK)
6899 			return status;
6900 	}
6901 
6902 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6903 						  results_offset),
6904 				  "fifo contained %d elements", num_elements);
6905 
6906 	/* Add 1 for string NULL termination */
6907 	*parsed_results_bytes = results_offset + 1;
6908 
6909 	return DBG_STATUS_OK;
6910 }
6911 
6912 static enum dbg_status
6913 qed_parse_protection_override_dump(u32 *dump_buf,
6914 				   char *results_buf,
6915 				   u32 *parsed_results_bytes)
6916 {
6917 	const char *section_name, *param_name, *param_str_val;
6918 	u32 param_num_val, num_section_params, num_elements;
6919 	struct protection_override_element *elements;
6920 	u32 results_offset = 0;
6921 	u8 i;
6922 
6923 	/* Read global_params section */
6924 	dump_buf += qed_read_section_hdr(dump_buf,
6925 					 &section_name, &num_section_params);
6926 	if (strcmp(section_name, "global_params"))
6927 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6928 
6929 	/* Print global params */
6930 	dump_buf += qed_print_section_params(dump_buf,
6931 					     num_section_params,
6932 					     results_buf, &results_offset);
6933 
6934 	/* Read protection_override_data section */
6935 	dump_buf += qed_read_section_hdr(dump_buf,
6936 					 &section_name, &num_section_params);
6937 	if (strcmp(section_name, "protection_override_data"))
6938 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6939 	dump_buf += qed_read_param(dump_buf,
6940 				   &param_name, &param_str_val, &param_num_val);
6941 	if (strcmp(param_name, "size"))
6942 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6943 	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
6944 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6945 	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
6946 	elements = (struct protection_override_element *)dump_buf;
6947 
6948 	/* Decode elements */
6949 	for (i = 0; i < num_elements; i++) {
6950 		u32 address = GET_FIELD(elements[i].data,
6951 					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
6952 			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
6953 
6954 		results_offset +=
6955 		    sprintf(qed_get_buf_ptr(results_buf,
6956 					    results_offset),
6957 			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
6958 			    i, address,
6959 			    (u32)GET_FIELD(elements[i].data,
6960 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
6961 			    (u32)GET_FIELD(elements[i].data,
6962 				      PROTECTION_OVERRIDE_ELEMENT_READ),
6963 			    (u32)GET_FIELD(elements[i].data,
6964 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
6965 			    s_protection_strs[GET_FIELD(elements[i].data,
6966 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
6967 			    s_protection_strs[GET_FIELD(elements[i].data,
6968 				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
6969 	}
6970 
6971 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6972 						  results_offset),
6973 				  "protection override contained %d elements",
6974 				  num_elements);
6975 
6976 	/* Add 1 for string NULL termination */
6977 	*parsed_results_bytes = results_offset + 1;
6978 
6979 	return DBG_STATUS_OK;
6980 }
6981 
6982 /* Parses a FW Asserts dump buffer.
6983  * If result_buf is not NULL, the FW Asserts results are printed to it.
6984  * In any case, the required results buffer size is assigned to
6985  * parsed_results_bytes.
6986  * The parsing status is returned.
6987  */
6988 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
6989 						 char *results_buf,
6990 						 u32 *parsed_results_bytes)
6991 {
6992 	u32 num_section_params, param_num_val, i, results_offset = 0;
6993 	const char *param_name, *param_str_val, *section_name;
6994 	bool last_section_found = false;
6995 
6996 	*parsed_results_bytes = 0;
6997 
6998 	/* Read global_params section */
6999 	dump_buf += qed_read_section_hdr(dump_buf,
7000 					 &section_name, &num_section_params);
7001 	if (strcmp(section_name, "global_params"))
7002 		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7003 
7004 	/* Print global params */
7005 	dump_buf += qed_print_section_params(dump_buf,
7006 					     num_section_params,
7007 					     results_buf, &results_offset);
7008 
7009 	while (!last_section_found) {
7010 		dump_buf += qed_read_section_hdr(dump_buf,
7011 						 &section_name,
7012 						 &num_section_params);
7013 		if (!strcmp(section_name, "fw_asserts")) {
7014 			/* Extract params */
7015 			const char *storm_letter = NULL;
7016 			u32 storm_dump_size = 0;
7017 
7018 			for (i = 0; i < num_section_params; i++) {
7019 				dump_buf += qed_read_param(dump_buf,
7020 							   &param_name,
7021 							   &param_str_val,
7022 							   &param_num_val);
7023 				if (!strcmp(param_name, "storm"))
7024 					storm_letter = param_str_val;
7025 				else if (!strcmp(param_name, "size"))
7026 					storm_dump_size = param_num_val;
7027 				else
7028 					return
7029 					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7030 			}
7031 
7032 			if (!storm_letter || !storm_dump_size)
7033 				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7034 
7035 			/* Print data */
7036 			results_offset +=
7037 			    sprintf(qed_get_buf_ptr(results_buf,
7038 						    results_offset),
7039 				    "\n%sSTORM_ASSERT: size=%d\n",
7040 				    storm_letter, storm_dump_size);
7041 			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7042 				results_offset +=
7043 				    sprintf(qed_get_buf_ptr(results_buf,
7044 							    results_offset),
7045 					    "%08x\n", *dump_buf);
7046 		} else if (!strcmp(section_name, "last")) {
7047 			last_section_found = true;
7048 		} else {
7049 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7050 		}
7051 	}
7052 
7053 	/* Add 1 for string NULL termination */
7054 	*parsed_results_bytes = results_offset + 1;
7055 
7056 	return DBG_STATUS_OK;
7057 }
7058 
7059 /***************************** Public Functions *******************************/
7060 
7061 enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
7062 					 const u8 * const bin_ptr)
7063 {
7064 	struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
7065 	u8 buf_id;
7066 
7067 	/* Convert binary data to debug arrays */
7068 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
7069 		qed_set_dbg_bin_buf(p_hwfn,
7070 				    (enum bin_dbg_buffer_type)buf_id,
7071 				    (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
7072 				    buf_hdrs[buf_id].length);
7073 
7074 	return DBG_STATUS_OK;
7075 }
7076 
7077 enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
7078 					void **user_data_ptr)
7079 {
7080 	*user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data),
7081 				 GFP_KERNEL);
7082 	if (!(*user_data_ptr))
7083 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7084 
7085 	return DBG_STATUS_OK;
7086 }
7087 
7088 const char *qed_dbg_get_status_str(enum dbg_status status)
7089 {
7090 	return (status <
7091 		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7092 }
7093 
7094 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7095 						  u32 *dump_buf,
7096 						  u32 num_dumped_dwords,
7097 						  u32 *results_buf_size)
7098 {
7099 	u32 num_errors, num_warnings;
7100 
7101 	return qed_parse_idle_chk_dump(p_hwfn,
7102 				       dump_buf,
7103 				       num_dumped_dwords,
7104 				       NULL,
7105 				       results_buf_size,
7106 				       &num_errors, &num_warnings);
7107 }
7108 
7109 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7110 					   u32 *dump_buf,
7111 					   u32 num_dumped_dwords,
7112 					   char *results_buf,
7113 					   u32 *num_errors,
7114 					   u32 *num_warnings)
7115 {
7116 	u32 parsed_buf_size;
7117 
7118 	return qed_parse_idle_chk_dump(p_hwfn,
7119 				       dump_buf,
7120 				       num_dumped_dwords,
7121 				       results_buf,
7122 				       &parsed_buf_size,
7123 				       num_errors, num_warnings);
7124 }
7125 
7126 void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
7127 				     const u32 *meta_buf)
7128 {
7129 	struct dbg_tools_user_data *dev_user_data =
7130 		qed_dbg_get_user_data(p_hwfn);
7131 
7132 	dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7133 }
7134 
7135 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7136 						   u32 *dump_buf,
7137 						   u32 num_dumped_dwords,
7138 						   u32 *results_buf_size)
7139 {
7140 	return qed_parse_mcp_trace_dump(p_hwfn,
7141 					dump_buf, NULL, results_buf_size, true);
7142 }
7143 
7144 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7145 					    u32 *dump_buf,
7146 					    u32 num_dumped_dwords,
7147 					    char *results_buf)
7148 {
7149 	u32 parsed_buf_size;
7150 
7151 	return qed_parse_mcp_trace_dump(p_hwfn,
7152 					dump_buf,
7153 					results_buf, &parsed_buf_size, true);
7154 }
7155 
7156 enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
7157 						 u32 *dump_buf,
7158 						 char *results_buf)
7159 {
7160 	u32 parsed_buf_size;
7161 
7162 	return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7163 					&parsed_buf_size, false);
7164 }
7165 
7166 enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
7167 					 u8 *dump_buf,
7168 					 u32 num_dumped_bytes,
7169 					 char *results_buf)
7170 {
7171 	u32 parsed_results_bytes;
7172 
7173 	return qed_parse_mcp_trace_buf(p_hwfn,
7174 				       dump_buf,
7175 				       num_dumped_bytes,
7176 				       0,
7177 				       num_dumped_bytes,
7178 				       results_buf, &parsed_results_bytes);
7179 }
7180 
7181 /* Frees the specified MCP Trace meta data */
7182 void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
7183 {
7184 	struct dbg_tools_user_data *dev_user_data;
7185 	struct mcp_trace_meta *meta;
7186 	u32 i;
7187 
7188 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
7189 	meta = &dev_user_data->mcp_trace_meta;
7190 	if (!meta->is_allocated)
7191 		return;
7192 
7193 	/* Release modules */
7194 	if (meta->modules) {
7195 		for (i = 0; i < meta->modules_num; i++)
7196 			kfree(meta->modules[i]);
7197 		kfree(meta->modules);
7198 	}
7199 
7200 	/* Release formats */
7201 	if (meta->formats) {
7202 		for (i = 0; i < meta->formats_num; i++)
7203 			kfree(meta->formats[i].format_str);
7204 		kfree(meta->formats);
7205 	}
7206 
7207 	meta->is_allocated = false;
7208 }
7209 
7210 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7211 						  u32 *dump_buf,
7212 						  u32 num_dumped_dwords,
7213 						  u32 *results_buf_size)
7214 {
7215 	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7216 }
7217 
7218 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7219 					   u32 *dump_buf,
7220 					   u32 num_dumped_dwords,
7221 					   char *results_buf)
7222 {
7223 	u32 parsed_buf_size;
7224 
7225 	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7226 }
7227 
7228 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7229 						  u32 *dump_buf,
7230 						  u32 num_dumped_dwords,
7231 						  u32 *results_buf_size)
7232 {
7233 	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7234 }
7235 
7236 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7237 					   u32 *dump_buf,
7238 					   u32 num_dumped_dwords,
7239 					   char *results_buf)
7240 {
7241 	u32 parsed_buf_size;
7242 
7243 	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7244 }
7245 
7246 enum dbg_status
7247 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7248 					     u32 *dump_buf,
7249 					     u32 num_dumped_dwords,
7250 					     u32 *results_buf_size)
7251 {
7252 	return qed_parse_protection_override_dump(dump_buf,
7253 						  NULL, results_buf_size);
7254 }
7255 
7256 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7257 						      u32 *dump_buf,
7258 						      u32 num_dumped_dwords,
7259 						      char *results_buf)
7260 {
7261 	u32 parsed_buf_size;
7262 
7263 	return qed_parse_protection_override_dump(dump_buf,
7264 						  results_buf,
7265 						  &parsed_buf_size);
7266 }
7267 
7268 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7269 						    u32 *dump_buf,
7270 						    u32 num_dumped_dwords,
7271 						    u32 *results_buf_size)
7272 {
7273 	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7274 }
7275 
7276 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7277 					     u32 *dump_buf,
7278 					     u32 num_dumped_dwords,
7279 					     char *results_buf)
7280 {
7281 	u32 parsed_buf_size;
7282 
7283 	return qed_parse_fw_asserts_dump(dump_buf,
7284 					 results_buf, &parsed_buf_size);
7285 }
7286 
7287 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7288 				   struct dbg_attn_block_result *results)
7289 {
7290 	const u32 *block_attn_name_offsets;
7291 	const char *attn_name_base;
7292 	const char *block_name;
7293 	enum dbg_attn_type attn_type;
7294 	u8 num_regs, i, j;
7295 
7296 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7297 	attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7298 	block_name = qed_dbg_get_block_name(p_hwfn, results->block_id);
7299 	if (!block_name)
7300 		return DBG_STATUS_INVALID_ARGS;
7301 
7302 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7303 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7304 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7305 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7306 
7307 	block_attn_name_offsets =
7308 	    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr +
7309 	    results->names_offset;
7310 
7311 	attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr;
7312 
7313 	/* Go over registers with a non-zero attention status */
7314 	for (i = 0; i < num_regs; i++) {
7315 		struct dbg_attn_bit_mapping *bit_mapping;
7316 		struct dbg_attn_reg_result *reg_result;
7317 		u8 num_reg_attn, bit_idx = 0;
7318 
7319 		reg_result = &results->reg_results[i];
7320 		num_reg_attn = GET_FIELD(reg_result->data,
7321 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7322 		bit_mapping = (struct dbg_attn_bit_mapping *)
7323 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr +
7324 		    reg_result->block_attn_offset;
7325 
7326 		/* Go over attention status bits */
7327 		for (j = 0; j < num_reg_attn; j++, bit_idx++) {
7328 			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7329 						     DBG_ATTN_BIT_MAPPING_VAL);
7330 			const char *attn_name, *attn_type_str, *masked_str;
7331 			u32 attn_name_offset;
7332 			u32 sts_addr;
7333 
7334 			/* Check if bit mask should be advanced (due to unused
7335 			 * bits).
7336 			 */
7337 			if (GET_FIELD(bit_mapping[j].data,
7338 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7339 				bit_idx += (u8)attn_idx_val;
7340 				continue;
7341 			}
7342 
7343 			/* Check current bit index */
7344 			if (!(reg_result->sts_val & BIT(bit_idx)))
7345 				continue;
7346 
7347 			/* An attention bit with value=1 was found
7348 			 * Find attention name
7349 			 */
7350 			attn_name_offset =
7351 				block_attn_name_offsets[attn_idx_val];
7352 			attn_name = attn_name_base + attn_name_offset;
7353 			attn_type_str =
7354 				(attn_type ==
7355 				 ATTN_TYPE_INTERRUPT ? "Interrupt" :
7356 				 "Parity");
7357 			masked_str = reg_result->mask_val & BIT(bit_idx) ?
7358 				     " [masked]" : "";
7359 			sts_addr = GET_FIELD(reg_result->data,
7360 					     DBG_ATTN_REG_RESULT_STS_ADDRESS);
7361 			DP_NOTICE(p_hwfn,
7362 				  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7363 				  block_name, attn_type_str, attn_name,
7364 				  sts_addr * 4, bit_idx, masked_str);
7365 		}
7366 	}
7367 
7368 	return DBG_STATUS_OK;
7369 }
7370 
7371 static DEFINE_MUTEX(qed_dbg_lock);
7372 
7373 /* Wrapper for unifying the idle_chk and mcp_trace api */
7374 static enum dbg_status
7375 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7376 				   u32 *dump_buf,
7377 				   u32 num_dumped_dwords,
7378 				   char *results_buf)
7379 {
7380 	u32 num_errors, num_warnnings;
7381 
7382 	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7383 					  results_buf, &num_errors,
7384 					  &num_warnnings);
7385 }
7386 
7387 /* Feature meta data lookup table */
7388 static struct {
7389 	char *name;
7390 	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7391 				    struct qed_ptt *p_ptt, u32 *size);
7392 	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7393 					struct qed_ptt *p_ptt, u32 *dump_buf,
7394 					u32 buf_size, u32 *dumped_dwords);
7395 	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7396 					 u32 *dump_buf, u32 num_dumped_dwords,
7397 					 char *results_buf);
7398 	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7399 					    u32 *dump_buf,
7400 					    u32 num_dumped_dwords,
7401 					    u32 *results_buf_size);
7402 } qed_features_lookup[] = {
7403 	{
7404 	"grc", qed_dbg_grc_get_dump_buf_size,
7405 		    qed_dbg_grc_dump, NULL, NULL}, {
7406 	"idle_chk",
7407 		    qed_dbg_idle_chk_get_dump_buf_size,
7408 		    qed_dbg_idle_chk_dump,
7409 		    qed_print_idle_chk_results_wrapper,
7410 		    qed_get_idle_chk_results_buf_size}, {
7411 	"mcp_trace",
7412 		    qed_dbg_mcp_trace_get_dump_buf_size,
7413 		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7414 		    qed_get_mcp_trace_results_buf_size}, {
7415 	"reg_fifo",
7416 		    qed_dbg_reg_fifo_get_dump_buf_size,
7417 		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7418 		    qed_get_reg_fifo_results_buf_size}, {
7419 	"igu_fifo",
7420 		    qed_dbg_igu_fifo_get_dump_buf_size,
7421 		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7422 		    qed_get_igu_fifo_results_buf_size}, {
7423 	"protection_override",
7424 		    qed_dbg_protection_override_get_dump_buf_size,
7425 		    qed_dbg_protection_override_dump,
7426 		    qed_print_protection_override_results,
7427 		    qed_get_protection_override_results_buf_size}, {
7428 	"fw_asserts",
7429 		    qed_dbg_fw_asserts_get_dump_buf_size,
7430 		    qed_dbg_fw_asserts_dump,
7431 		    qed_print_fw_asserts_results,
7432 		    qed_get_fw_asserts_results_buf_size}, {
7433 	"ilt",
7434 		    qed_dbg_ilt_get_dump_buf_size,
7435 		    qed_dbg_ilt_dump, NULL, NULL},};
7436 
7437 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7438 {
7439 	u32 i, precision = 80;
7440 
7441 	if (!p_text_buf)
7442 		return;
7443 
7444 	pr_notice("\n%.*s", precision, p_text_buf);
7445 	for (i = precision; i < text_size; i += precision)
7446 		pr_cont("%.*s", precision, p_text_buf + i);
7447 	pr_cont("\n");
7448 }
7449 
7450 #define QED_RESULTS_BUF_MIN_SIZE 16
7451 /* Generic function for decoding debug feature info */
7452 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7453 				      enum qed_dbg_features feature_idx)
7454 {
7455 	struct qed_dbg_feature *feature =
7456 	    &p_hwfn->cdev->dbg_features[feature_idx];
7457 	u32 text_size_bytes, null_char_pos, i;
7458 	enum dbg_status rc;
7459 	char *text_buf;
7460 
7461 	/* Check if feature supports formatting capability */
7462 	if (!qed_features_lookup[feature_idx].results_buf_size)
7463 		return DBG_STATUS_OK;
7464 
7465 	/* Obtain size of formatted output */
7466 	rc = qed_features_lookup[feature_idx].
7467 		results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7468 				 feature->dumped_dwords, &text_size_bytes);
7469 	if (rc != DBG_STATUS_OK)
7470 		return rc;
7471 
7472 	/* Make sure that the allocated size is a multiple of dword (4 bytes) */
7473 	null_char_pos = text_size_bytes - 1;
7474 	text_size_bytes = (text_size_bytes + 3) & ~0x3;
7475 
7476 	if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7477 		DP_NOTICE(p_hwfn->cdev,
7478 			  "formatted size of feature was too small %d. Aborting\n",
7479 			  text_size_bytes);
7480 		return DBG_STATUS_INVALID_ARGS;
7481 	}
7482 
7483 	/* Allocate temp text buf */
7484 	text_buf = vzalloc(text_size_bytes);
7485 	if (!text_buf)
7486 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7487 
7488 	/* Decode feature opcodes to string on temp buf */
7489 	rc = qed_features_lookup[feature_idx].
7490 		print_results(p_hwfn, (u32 *)feature->dump_buf,
7491 			      feature->dumped_dwords, text_buf);
7492 	if (rc != DBG_STATUS_OK) {
7493 		vfree(text_buf);
7494 		return rc;
7495 	}
7496 
7497 	/* Replace the original null character with a '\n' character.
7498 	 * The bytes that were added as a result of the dword alignment are also
7499 	 * padded with '\n' characters.
7500 	 */
7501 	for (i = null_char_pos; i < text_size_bytes; i++)
7502 		text_buf[i] = '\n';
7503 
7504 	/* Dump printable feature to log */
7505 	if (p_hwfn->cdev->print_dbg_data)
7506 		qed_dbg_print_feature(text_buf, text_size_bytes);
7507 
7508 	/* Free the old dump_buf and point the dump_buf to the newly allocagted
7509 	 * and formatted text buffer.
7510 	 */
7511 	vfree(feature->dump_buf);
7512 	feature->dump_buf = text_buf;
7513 	feature->buf_size = text_size_bytes;
7514 	feature->dumped_dwords = text_size_bytes / 4;
7515 	return rc;
7516 }
7517 
7518 #define MAX_DBG_FEATURE_SIZE_DWORDS	0x3FFFFFFF
7519 
7520 /* Generic function for performing the dump of a debug feature. */
7521 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7522 				    struct qed_ptt *p_ptt,
7523 				    enum qed_dbg_features feature_idx)
7524 {
7525 	struct qed_dbg_feature *feature =
7526 	    &p_hwfn->cdev->dbg_features[feature_idx];
7527 	u32 buf_size_dwords;
7528 	enum dbg_status rc;
7529 
7530 	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7531 		  qed_features_lookup[feature_idx].name);
7532 
7533 	/* Dump_buf was already allocated need to free (this can happen if dump
7534 	 * was called but file was never read).
7535 	 * We can't use the buffer as is since size may have changed.
7536 	 */
7537 	if (feature->dump_buf) {
7538 		vfree(feature->dump_buf);
7539 		feature->dump_buf = NULL;
7540 	}
7541 
7542 	/* Get buffer size from hsi, allocate accordingly, and perform the
7543 	 * dump.
7544 	 */
7545 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7546 						       &buf_size_dwords);
7547 	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7548 		return rc;
7549 
7550 	if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) {
7551 		feature->buf_size = 0;
7552 		DP_NOTICE(p_hwfn->cdev,
7553 			  "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n",
7554 			  qed_features_lookup[feature_idx].name,
7555 			  buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS);
7556 
7557 		return DBG_STATUS_OK;
7558 	}
7559 
7560 	feature->buf_size = buf_size_dwords * sizeof(u32);
7561 	feature->dump_buf = vmalloc(feature->buf_size);
7562 	if (!feature->dump_buf)
7563 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7564 
7565 	rc = qed_features_lookup[feature_idx].
7566 		perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7567 			     feature->buf_size / sizeof(u32),
7568 			     &feature->dumped_dwords);
7569 
7570 	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7571 	 * In this case the buffer holds valid binary data, but we wont able
7572 	 * to parse it (since parsing relies on data in NVRAM which is only
7573 	 * accessible when MFW is responsive). skip the formatting but return
7574 	 * success so that binary data is provided.
7575 	 */
7576 	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7577 		return DBG_STATUS_OK;
7578 
7579 	if (rc != DBG_STATUS_OK)
7580 		return rc;
7581 
7582 	/* Format output */
7583 	rc = format_feature(p_hwfn, feature_idx);
7584 	return rc;
7585 }
7586 
7587 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7588 {
7589 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7590 }
7591 
7592 int qed_dbg_grc_size(struct qed_dev *cdev)
7593 {
7594 	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7595 }
7596 
7597 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7598 {
7599 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7600 			       num_dumped_bytes);
7601 }
7602 
7603 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7604 {
7605 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7606 }
7607 
7608 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7609 {
7610 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7611 			       num_dumped_bytes);
7612 }
7613 
7614 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7615 {
7616 	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7617 }
7618 
7619 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7620 {
7621 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7622 			       num_dumped_bytes);
7623 }
7624 
7625 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7626 {
7627 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7628 }
7629 
7630 static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
7631 				    enum qed_nvm_images image_id, u32 *length)
7632 {
7633 	struct qed_nvm_image_att image_att;
7634 	int rc;
7635 
7636 	*length = 0;
7637 	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
7638 	if (rc)
7639 		return rc;
7640 
7641 	*length = image_att.length;
7642 
7643 	return rc;
7644 }
7645 
7646 static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
7647 			     u32 *num_dumped_bytes,
7648 			     enum qed_nvm_images image_id)
7649 {
7650 	struct qed_hwfn *p_hwfn =
7651 		&cdev->hwfns[cdev->engine_for_debug];
7652 	u32 len_rounded, i;
7653 	__be32 val;
7654 	int rc;
7655 
7656 	*num_dumped_bytes = 0;
7657 	rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
7658 	if (rc)
7659 		return rc;
7660 
7661 	DP_NOTICE(p_hwfn->cdev,
7662 		  "Collecting a debug feature [\"nvram image %d\"]\n",
7663 		  image_id);
7664 
7665 	len_rounded = roundup(len_rounded, sizeof(u32));
7666 	rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
7667 	if (rc)
7668 		return rc;
7669 
7670 	/* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
7671 	if (image_id != QED_NVM_IMAGE_NVM_META)
7672 		for (i = 0; i < len_rounded; i += 4) {
7673 			val = cpu_to_be32(*(u32 *)(buffer + i));
7674 			*(u32 *)(buffer + i) = val;
7675 		}
7676 
7677 	*num_dumped_bytes = len_rounded;
7678 
7679 	return rc;
7680 }
7681 
7682 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7683 				u32 *num_dumped_bytes)
7684 {
7685 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7686 			       num_dumped_bytes);
7687 }
7688 
7689 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7690 {
7691 	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7692 }
7693 
7694 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7695 		       u32 *num_dumped_bytes)
7696 {
7697 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7698 			       num_dumped_bytes);
7699 }
7700 
7701 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7702 {
7703 	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7704 }
7705 
7706 int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7707 {
7708 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
7709 }
7710 
7711 int qed_dbg_ilt_size(struct qed_dev *cdev)
7712 {
7713 	return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT);
7714 }
7715 
7716 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7717 		      u32 *num_dumped_bytes)
7718 {
7719 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7720 			       num_dumped_bytes);
7721 }
7722 
7723 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7724 {
7725 	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7726 }
7727 
7728 /* Defines the amount of bytes allocated for recording the length of debugfs
7729  * feature buffer.
7730  */
7731 #define REGDUMP_HEADER_SIZE			sizeof(u32)
7732 #define REGDUMP_HEADER_SIZE_SHIFT		0
7733 #define REGDUMP_HEADER_SIZE_MASK		0xffffff
7734 #define REGDUMP_HEADER_FEATURE_SHIFT		24
7735 #define REGDUMP_HEADER_FEATURE_MASK		0x3f
7736 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
7737 #define REGDUMP_HEADER_OMIT_ENGINE_MASK		0x1
7738 #define REGDUMP_HEADER_ENGINE_SHIFT		31
7739 #define REGDUMP_HEADER_ENGINE_MASK		0x1
7740 #define REGDUMP_MAX_SIZE			0x1000000
7741 #define ILT_DUMP_MAX_SIZE			(1024 * 1024 * 15)
7742 
7743 enum debug_print_features {
7744 	OLD_MODE = 0,
7745 	IDLE_CHK = 1,
7746 	GRC_DUMP = 2,
7747 	MCP_TRACE = 3,
7748 	REG_FIFO = 4,
7749 	PROTECTION_OVERRIDE = 5,
7750 	IGU_FIFO = 6,
7751 	PHY = 7,
7752 	FW_ASSERTS = 8,
7753 	NVM_CFG1 = 9,
7754 	DEFAULT_CFG = 10,
7755 	NVM_META = 11,
7756 	MDUMP = 12,
7757 	ILT_DUMP = 13,
7758 };
7759 
7760 static u32 qed_calc_regdump_header(struct qed_dev *cdev,
7761 				   enum debug_print_features feature,
7762 				   int engine, u32 feature_size, u8 omit_engine)
7763 {
7764 	u32 res = 0;
7765 
7766 	SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size);
7767 	if (res != feature_size)
7768 		DP_NOTICE(cdev,
7769 			  "Feature %d is too large (size 0x%x) and will corrupt the dump\n",
7770 			  feature, feature_size);
7771 
7772 	SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
7773 	SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
7774 	SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
7775 
7776 	return res;
7777 }
7778 
7779 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
7780 {
7781 	u8 cur_engine, omit_engine = 0, org_engine;
7782 	struct qed_hwfn *p_hwfn =
7783 		&cdev->hwfns[cdev->engine_for_debug];
7784 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
7785 	int grc_params[MAX_DBG_GRC_PARAMS], i;
7786 	u32 offset = 0, feature_size;
7787 	int rc;
7788 
7789 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
7790 		grc_params[i] = dev_data->grc.param_val[i];
7791 
7792 	if (!QED_IS_CMT(cdev))
7793 		omit_engine = 1;
7794 
7795 	mutex_lock(&qed_dbg_lock);
7796 
7797 	org_engine = qed_get_debug_engine(cdev);
7798 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7799 		/* Collect idle_chks and grcDump for each hw function */
7800 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
7801 			   "obtaining idle_chk and grcdump for current engine\n");
7802 		qed_set_debug_engine(cdev, cur_engine);
7803 
7804 		/* First idle_chk */
7805 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7806 				      REGDUMP_HEADER_SIZE, &feature_size);
7807 		if (!rc) {
7808 			*(u32 *)((u8 *)buffer + offset) =
7809 			    qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
7810 						    feature_size, omit_engine);
7811 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7812 		} else {
7813 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7814 		}
7815 
7816 		/* Second idle_chk */
7817 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7818 				      REGDUMP_HEADER_SIZE, &feature_size);
7819 		if (!rc) {
7820 			*(u32 *)((u8 *)buffer + offset) =
7821 			    qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
7822 						    feature_size, omit_engine);
7823 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7824 		} else {
7825 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7826 		}
7827 
7828 		/* reg_fifo dump */
7829 		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
7830 				      REGDUMP_HEADER_SIZE, &feature_size);
7831 		if (!rc) {
7832 			*(u32 *)((u8 *)buffer + offset) =
7833 			    qed_calc_regdump_header(cdev, REG_FIFO, cur_engine,
7834 						    feature_size, omit_engine);
7835 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7836 		} else {
7837 			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7838 		}
7839 
7840 		/* igu_fifo dump */
7841 		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
7842 				      REGDUMP_HEADER_SIZE, &feature_size);
7843 		if (!rc) {
7844 			*(u32 *)((u8 *)buffer + offset) =
7845 			    qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine,
7846 						    feature_size, omit_engine);
7847 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7848 		} else {
7849 			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7850 		}
7851 
7852 		/* protection_override dump */
7853 		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
7854 						 REGDUMP_HEADER_SIZE,
7855 						 &feature_size);
7856 		if (!rc) {
7857 			*(u32 *)((u8 *)buffer + offset) =
7858 			    qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE,
7859 						    cur_engine,
7860 						    feature_size, omit_engine);
7861 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7862 		} else {
7863 			DP_ERR(cdev,
7864 			       "qed_dbg_protection_override failed. rc = %d\n",
7865 			       rc);
7866 		}
7867 
7868 		/* fw_asserts dump */
7869 		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
7870 					REGDUMP_HEADER_SIZE, &feature_size);
7871 		if (!rc) {
7872 			*(u32 *)((u8 *)buffer + offset) =
7873 			    qed_calc_regdump_header(cdev, FW_ASSERTS,
7874 						    cur_engine, feature_size,
7875 						    omit_engine);
7876 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7877 		} else {
7878 			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
7879 			       rc);
7880 		}
7881 
7882 		feature_size = qed_dbg_ilt_size(cdev);
7883 		if (!cdev->disable_ilt_dump &&
7884 		    feature_size < ILT_DUMP_MAX_SIZE) {
7885 			rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
7886 					 REGDUMP_HEADER_SIZE, &feature_size);
7887 			if (!rc) {
7888 				*(u32 *)((u8 *)buffer + offset) =
7889 				    qed_calc_regdump_header(cdev, ILT_DUMP,
7890 							    cur_engine,
7891 							    feature_size,
7892 							    omit_engine);
7893 				offset += feature_size + REGDUMP_HEADER_SIZE;
7894 			} else {
7895 				DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
7896 				       rc);
7897 			}
7898 		}
7899 
7900 		/* GRC dump - must be last because when mcp stuck it will
7901 		 * clutter idle_chk, reg_fifo, ...
7902 		 */
7903 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
7904 			dev_data->grc.param_val[i] = grc_params[i];
7905 
7906 		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
7907 				 REGDUMP_HEADER_SIZE, &feature_size);
7908 		if (!rc) {
7909 			*(u32 *)((u8 *)buffer + offset) =
7910 			    qed_calc_regdump_header(cdev, GRC_DUMP,
7911 						    cur_engine,
7912 						    feature_size, omit_engine);
7913 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7914 		} else {
7915 			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
7916 		}
7917 	}
7918 
7919 	qed_set_debug_engine(cdev, org_engine);
7920 
7921 	/* mcp_trace */
7922 	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
7923 			       REGDUMP_HEADER_SIZE, &feature_size);
7924 	if (!rc) {
7925 		*(u32 *)((u8 *)buffer + offset) =
7926 		    qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
7927 					    feature_size, omit_engine);
7928 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7929 	} else {
7930 		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
7931 	}
7932 
7933 	/* nvm cfg1 */
7934 	rc = qed_dbg_nvm_image(cdev,
7935 			       (u8 *)buffer + offset +
7936 			       REGDUMP_HEADER_SIZE, &feature_size,
7937 			       QED_NVM_IMAGE_NVM_CFG1);
7938 	if (!rc) {
7939 		*(u32 *)((u8 *)buffer + offset) =
7940 		    qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
7941 					    feature_size, omit_engine);
7942 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7943 	} else if (rc != -ENOENT) {
7944 		DP_ERR(cdev,
7945 		       "qed_dbg_nvm_image failed for image  %d (%s), rc = %d\n",
7946 		       QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
7947 	}
7948 
7949 	/* nvm default */
7950 	rc = qed_dbg_nvm_image(cdev,
7951 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
7952 			       &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
7953 	if (!rc) {
7954 		*(u32 *)((u8 *)buffer + offset) =
7955 		    qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine,
7956 					    feature_size, omit_engine);
7957 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7958 	} else if (rc != -ENOENT) {
7959 		DP_ERR(cdev,
7960 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
7961 		       QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
7962 		       rc);
7963 	}
7964 
7965 	/* nvm meta */
7966 	rc = qed_dbg_nvm_image(cdev,
7967 			       (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
7968 			       &feature_size, QED_NVM_IMAGE_NVM_META);
7969 	if (!rc) {
7970 		*(u32 *)((u8 *)buffer + offset) =
7971 			qed_calc_regdump_header(cdev, NVM_META, cur_engine,
7972 						feature_size, omit_engine);
7973 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7974 	} else if (rc != -ENOENT) {
7975 		DP_ERR(cdev,
7976 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
7977 		       QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
7978 	}
7979 
7980 	/* nvm mdump */
7981 	rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset +
7982 			       REGDUMP_HEADER_SIZE, &feature_size,
7983 			       QED_NVM_IMAGE_MDUMP);
7984 	if (!rc) {
7985 		*(u32 *)((u8 *)buffer + offset) =
7986 			qed_calc_regdump_header(cdev, MDUMP, cur_engine,
7987 						feature_size, omit_engine);
7988 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7989 	} else if (rc != -ENOENT) {
7990 		DP_ERR(cdev,
7991 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
7992 		       QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
7993 	}
7994 
7995 	mutex_unlock(&qed_dbg_lock);
7996 
7997 	return 0;
7998 }
7999 
8000 int qed_dbg_all_data_size(struct qed_dev *cdev)
8001 {
8002 	struct qed_hwfn *p_hwfn =
8003 		&cdev->hwfns[cdev->engine_for_debug];
8004 	u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
8005 	u8 cur_engine, org_engine;
8006 
8007 	cdev->disable_ilt_dump = false;
8008 	org_engine = qed_get_debug_engine(cdev);
8009 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8010 		/* Engine specific */
8011 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8012 			   "calculating idle_chk and grcdump register length for current engine\n");
8013 		qed_set_debug_engine(cdev, cur_engine);
8014 		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8015 			    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8016 			    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8017 			    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8018 			    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8019 			    REGDUMP_HEADER_SIZE +
8020 			    qed_dbg_protection_override_size(cdev) +
8021 			    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8022 
8023 		ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
8024 		if (ilt_len < ILT_DUMP_MAX_SIZE) {
8025 			total_ilt_len += ilt_len;
8026 			regs_len += ilt_len;
8027 		}
8028 	}
8029 
8030 	qed_set_debug_engine(cdev, org_engine);
8031 
8032 	/* Engine common */
8033 	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
8034 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8035 	if (image_len)
8036 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8037 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8038 	if (image_len)
8039 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8040 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8041 	if (image_len)
8042 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8043 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len);
8044 	if (image_len)
8045 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8046 
8047 	if (regs_len > REGDUMP_MAX_SIZE) {
8048 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8049 			   "Dump exceeds max size 0x%x, disable ILT dump\n",
8050 			   REGDUMP_MAX_SIZE);
8051 		cdev->disable_ilt_dump = true;
8052 		regs_len -= total_ilt_len;
8053 	}
8054 
8055 	return regs_len;
8056 }
8057 
8058 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8059 		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
8060 {
8061 	struct qed_hwfn *p_hwfn =
8062 		&cdev->hwfns[cdev->engine_for_debug];
8063 	struct qed_dbg_feature *qed_feature =
8064 		&cdev->dbg_features[feature];
8065 	enum dbg_status dbg_rc;
8066 	struct qed_ptt *p_ptt;
8067 	int rc = 0;
8068 
8069 	/* Acquire ptt */
8070 	p_ptt = qed_ptt_acquire(p_hwfn);
8071 	if (!p_ptt)
8072 		return -EINVAL;
8073 
8074 	/* Get dump */
8075 	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8076 	if (dbg_rc != DBG_STATUS_OK) {
8077 		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8078 			   qed_dbg_get_status_str(dbg_rc));
8079 		*num_dumped_bytes = 0;
8080 		rc = -EINVAL;
8081 		goto out;
8082 	}
8083 
8084 	DP_VERBOSE(cdev, QED_MSG_DEBUG,
8085 		   "copying debugfs feature to external buffer\n");
8086 	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8087 	*num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords *
8088 			    4;
8089 
8090 out:
8091 	qed_ptt_release(p_hwfn, p_ptt);
8092 	return rc;
8093 }
8094 
8095 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8096 {
8097 	struct qed_hwfn *p_hwfn =
8098 		&cdev->hwfns[cdev->engine_for_debug];
8099 	struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
8100 	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8101 	u32 buf_size_dwords;
8102 	enum dbg_status rc;
8103 
8104 	if (!p_ptt)
8105 		return -EINVAL;
8106 
8107 	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8108 						   &buf_size_dwords);
8109 	if (rc != DBG_STATUS_OK)
8110 		buf_size_dwords = 0;
8111 
8112 	/* Feature will not be dumped if it exceeds maximum size */
8113 	if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
8114 		buf_size_dwords = 0;
8115 
8116 	qed_ptt_release(p_hwfn, p_ptt);
8117 	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8118 	return qed_feature->buf_size;
8119 }
8120 
8121 u8 qed_get_debug_engine(struct qed_dev *cdev)
8122 {
8123 	return cdev->engine_for_debug;
8124 }
8125 
8126 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8127 {
8128 	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8129 		   engine_number);
8130 	cdev->engine_for_debug = engine_number;
8131 }
8132 
8133 void qed_dbg_pf_init(struct qed_dev *cdev)
8134 {
8135 	const u8 *dbg_values = NULL;
8136 	int i;
8137 
8138 	/* Debug values are after init values.
8139 	 * The offset is the first dword of the file.
8140 	 */
8141 	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8142 
8143 	for_each_hwfn(cdev, i) {
8144 		qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8145 		qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8146 	}
8147 
8148 	/* Set the hwfn to be 0 as default */
8149 	cdev->engine_for_debug = 0;
8150 }
8151 
8152 void qed_dbg_pf_exit(struct qed_dev *cdev)
8153 {
8154 	struct qed_dbg_feature *feature = NULL;
8155 	enum qed_dbg_features feature_idx;
8156 
8157 	/* debug features' buffers may be allocated if debug feature was used
8158 	 * but dump wasn't called
8159 	 */
8160 	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8161 		feature = &cdev->dbg_features[feature_idx];
8162 		if (feature->dump_buf) {
8163 			vfree(feature->dump_buf);
8164 			feature->dump_buf = NULL;
8165 		}
8166 	}
8167 }
8168