1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015 QLogic Corporation
4  * Copyright (c) 2019-2021 Marvell International Ltd.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/vmalloc.h>
9 #include <linux/crc32.h>
10 #include "qed.h"
11 #include "qed_cxt.h"
12 #include "qed_hsi.h"
13 #include "qed_dbg_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_mcp.h"
16 #include "qed_reg_addr.h"
17 
18 /* Memory groups enum */
19 enum mem_groups {
20 	MEM_GROUP_PXP_MEM,
21 	MEM_GROUP_DMAE_MEM,
22 	MEM_GROUP_CM_MEM,
23 	MEM_GROUP_QM_MEM,
24 	MEM_GROUP_DORQ_MEM,
25 	MEM_GROUP_BRB_RAM,
26 	MEM_GROUP_BRB_MEM,
27 	MEM_GROUP_PRS_MEM,
28 	MEM_GROUP_SDM_MEM,
29 	MEM_GROUP_PBUF,
30 	MEM_GROUP_IOR,
31 	MEM_GROUP_RAM,
32 	MEM_GROUP_BTB_RAM,
33 	MEM_GROUP_RDIF_CTX,
34 	MEM_GROUP_TDIF_CTX,
35 	MEM_GROUP_CFC_MEM,
36 	MEM_GROUP_CONN_CFC_MEM,
37 	MEM_GROUP_CAU_PI,
38 	MEM_GROUP_CAU_MEM,
39 	MEM_GROUP_CAU_MEM_EXT,
40 	MEM_GROUP_PXP_ILT,
41 	MEM_GROUP_MULD_MEM,
42 	MEM_GROUP_BTB_MEM,
43 	MEM_GROUP_IGU_MEM,
44 	MEM_GROUP_IGU_MSIX,
45 	MEM_GROUP_CAU_SB,
46 	MEM_GROUP_BMB_RAM,
47 	MEM_GROUP_BMB_MEM,
48 	MEM_GROUP_TM_MEM,
49 	MEM_GROUP_TASK_CFC_MEM,
50 	MEM_GROUPS_NUM
51 };
52 
53 /* Memory groups names */
54 static const char * const s_mem_group_names[] = {
55 	"PXP_MEM",
56 	"DMAE_MEM",
57 	"CM_MEM",
58 	"QM_MEM",
59 	"DORQ_MEM",
60 	"BRB_RAM",
61 	"BRB_MEM",
62 	"PRS_MEM",
63 	"SDM_MEM",
64 	"PBUF",
65 	"IOR",
66 	"RAM",
67 	"BTB_RAM",
68 	"RDIF_CTX",
69 	"TDIF_CTX",
70 	"CFC_MEM",
71 	"CONN_CFC_MEM",
72 	"CAU_PI",
73 	"CAU_MEM",
74 	"CAU_MEM_EXT",
75 	"PXP_ILT",
76 	"MULD_MEM",
77 	"BTB_MEM",
78 	"IGU_MEM",
79 	"IGU_MSIX",
80 	"CAU_SB",
81 	"BMB_RAM",
82 	"BMB_MEM",
83 	"TM_MEM",
84 	"TASK_CFC_MEM",
85 };
86 
87 /* Idle check conditions */
88 
89 static u32 cond5(const u32 *r, const u32 *imm)
90 {
91 	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
92 }
93 
94 static u32 cond7(const u32 *r, const u32 *imm)
95 {
96 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
97 }
98 
99 static u32 cond6(const u32 *r, const u32 *imm)
100 {
101 	return (r[0] & imm[0]) != imm[1];
102 }
103 
104 static u32 cond9(const u32 *r, const u32 *imm)
105 {
106 	return ((r[0] & imm[0]) >> imm[1]) !=
107 	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
108 }
109 
110 static u32 cond10(const u32 *r, const u32 *imm)
111 {
112 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
113 }
114 
115 static u32 cond4(const u32 *r, const u32 *imm)
116 {
117 	return (r[0] & ~imm[0]) != imm[1];
118 }
119 
120 static u32 cond0(const u32 *r, const u32 *imm)
121 {
122 	return (r[0] & ~r[1]) != imm[0];
123 }
124 
125 static u32 cond14(const u32 *r, const u32 *imm)
126 {
127 	return (r[0] | imm[0]) != imm[1];
128 }
129 
130 static u32 cond1(const u32 *r, const u32 *imm)
131 {
132 	return r[0] != imm[0];
133 }
134 
135 static u32 cond11(const u32 *r, const u32 *imm)
136 {
137 	return r[0] != r[1] && r[2] == imm[0];
138 }
139 
140 static u32 cond12(const u32 *r, const u32 *imm)
141 {
142 	return r[0] != r[1] && r[2] > imm[0];
143 }
144 
145 static u32 cond3(const u32 *r, const u32 *imm)
146 {
147 	return r[0] != r[1];
148 }
149 
150 static u32 cond13(const u32 *r, const u32 *imm)
151 {
152 	return r[0] & imm[0];
153 }
154 
155 static u32 cond8(const u32 *r, const u32 *imm)
156 {
157 	return r[0] < (r[1] - imm[0]);
158 }
159 
160 static u32 cond2(const u32 *r, const u32 *imm)
161 {
162 	return r[0] > imm[0];
163 }
164 
165 /* Array of Idle Check conditions */
166 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
167 	cond0,
168 	cond1,
169 	cond2,
170 	cond3,
171 	cond4,
172 	cond5,
173 	cond6,
174 	cond7,
175 	cond8,
176 	cond9,
177 	cond10,
178 	cond11,
179 	cond12,
180 	cond13,
181 	cond14,
182 };
183 
184 #define NUM_PHYS_BLOCKS 84
185 
186 #define NUM_DBG_RESET_REGS 8
187 
188 /******************************* Data Types **********************************/
189 
190 enum hw_types {
191 	HW_TYPE_ASIC,
192 	PLATFORM_RESERVED,
193 	PLATFORM_RESERVED2,
194 	PLATFORM_RESERVED3,
195 	PLATFORM_RESERVED4,
196 	MAX_HW_TYPES
197 };
198 
199 /* CM context types */
200 enum cm_ctx_types {
201 	CM_CTX_CONN_AG,
202 	CM_CTX_CONN_ST,
203 	CM_CTX_TASK_AG,
204 	CM_CTX_TASK_ST,
205 	NUM_CM_CTX_TYPES
206 };
207 
208 /* Debug bus frame modes */
209 enum dbg_bus_frame_modes {
210 	DBG_BUS_FRAME_MODE_4ST = 0,	/* 4 Storm dwords (no HW) */
211 	DBG_BUS_FRAME_MODE_2ST_2HW = 1,	/* 2 Storm dwords, 2 HW dwords */
212 	DBG_BUS_FRAME_MODE_1ST_3HW = 2,	/* 1 Storm dwords, 3 HW dwords */
213 	DBG_BUS_FRAME_MODE_4HW = 3,	/* 4 HW dwords (no Storms) */
214 	DBG_BUS_FRAME_MODE_8HW = 4,	/* 8 HW dwords (no Storms) */
215 	DBG_BUS_NUM_FRAME_MODES
216 };
217 
218 /* Debug bus SEMI frame modes */
219 enum dbg_bus_semi_frame_modes {
220 	DBG_BUS_SEMI_FRAME_MODE_4FAST = 0,	/* 4 fast dw */
221 	DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */
222 	DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */
223 	DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3,	/* 4 slow dw */
224 	DBG_BUS_SEMI_NUM_FRAME_MODES
225 };
226 
227 /* Debug bus filter types */
228 enum dbg_bus_filter_types {
229 	DBG_BUS_FILTER_TYPE_OFF,	/* Filter always off */
230 	DBG_BUS_FILTER_TYPE_PRE,	/* Filter before trigger only */
231 	DBG_BUS_FILTER_TYPE_POST,	/* Filter after trigger only */
232 	DBG_BUS_FILTER_TYPE_ON	/* Filter always on */
233 };
234 
235 /* Debug bus pre-trigger recording types */
236 enum dbg_bus_pre_trigger_types {
237 	DBG_BUS_PRE_TRIGGER_FROM_ZERO,	/* Record from time 0 */
238 	DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,	/* Record some chunks before trigger */
239 	DBG_BUS_PRE_TRIGGER_DROP	/* Drop data before trigger */
240 };
241 
242 /* Debug bus post-trigger recording types */
243 enum dbg_bus_post_trigger_types {
244 	DBG_BUS_POST_TRIGGER_RECORD,	/* Start recording after trigger */
245 	DBG_BUS_POST_TRIGGER_DROP	/* Drop data after trigger */
246 };
247 
248 /* Debug bus other engine mode */
249 enum dbg_bus_other_engine_modes {
250 	DBG_BUS_OTHER_ENGINE_MODE_NONE,
251 	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
252 	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
253 	DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
254 	DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX
255 };
256 
257 /* DBG block Framing mode definitions */
258 struct framing_mode_defs {
259 	u8 id;
260 	u8 blocks_dword_mask;
261 	u8 storms_dword_mask;
262 	u8 semi_framing_mode_id;
263 	u8 full_buf_thr;
264 };
265 
266 /* Chip constant definitions */
267 struct chip_defs {
268 	const char *name;
269 	u8 dwords_per_cycle;
270 	u8 num_framing_modes;
271 	u32 num_ilt_pages;
272 	struct framing_mode_defs *framing_modes;
273 };
274 
275 /* HW type constant definitions */
276 struct hw_type_defs {
277 	const char *name;
278 	u32 delay_factor;
279 	u32 dmae_thresh;
280 	u32 log_thresh;
281 };
282 
283 /* RBC reset definitions */
284 struct rbc_reset_defs {
285 	u32 reset_reg_addr;
286 	u32 reset_val[MAX_CHIP_IDS];
287 };
288 
289 /* Storm constant definitions.
290  * Addresses are in bytes, sizes are in quad-regs.
291  */
292 struct storm_defs {
293 	char letter;
294 	enum block_id sem_block_id;
295 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
296 	bool has_vfc;
297 	u32 sem_fast_mem_addr;
298 	u32 sem_frame_mode_addr;
299 	u32 sem_slow_enable_addr;
300 	u32 sem_slow_mode_addr;
301 	u32 sem_slow_mode1_conf_addr;
302 	u32 sem_sync_dbg_empty_addr;
303 	u32 sem_gpre_vect_addr;
304 	u32 cm_ctx_wr_addr;
305 	u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
306 	u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
307 };
308 
309 /* Debug Bus Constraint operation constant definitions */
310 struct dbg_bus_constraint_op_defs {
311 	u8 hw_op_val;
312 	bool is_cyclic;
313 };
314 
315 /* Storm Mode definitions */
316 struct storm_mode_defs {
317 	const char *name;
318 	bool is_fast_dbg;
319 	u8 id_in_hw;
320 	u32 src_disable_reg_addr;
321 	u32 src_enable_val;
322 	bool exists[MAX_CHIP_IDS];
323 };
324 
325 struct grc_param_defs {
326 	u32 default_val[MAX_CHIP_IDS];
327 	u32 min;
328 	u32 max;
329 	bool is_preset;
330 	bool is_persistent;
331 	u32 exclude_all_preset_val;
332 	u32 crash_preset_val[MAX_CHIP_IDS];
333 };
334 
335 /* Address is in 128b units. Width is in bits. */
336 struct rss_mem_defs {
337 	const char *mem_name;
338 	const char *type_name;
339 	u32 addr;
340 	u32 entry_width;
341 	u32 num_entries[MAX_CHIP_IDS];
342 };
343 
344 struct vfc_ram_defs {
345 	const char *mem_name;
346 	const char *type_name;
347 	u32 base_row;
348 	u32 num_rows;
349 };
350 
351 struct big_ram_defs {
352 	const char *instance_name;
353 	enum mem_groups mem_group_id;
354 	enum mem_groups ram_mem_group_id;
355 	enum dbg_grc_params grc_param;
356 	u32 addr_reg_addr;
357 	u32 data_reg_addr;
358 	u32 is_256b_reg_addr;
359 	u32 is_256b_bit_offset[MAX_CHIP_IDS];
360 	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
361 };
362 
363 struct phy_defs {
364 	const char *phy_name;
365 
366 	/* PHY base GRC address */
367 	u32 base_addr;
368 
369 	/* Relative address of indirect TBUS address register (bits 0..7) */
370 	u32 tbus_addr_lo_addr;
371 
372 	/* Relative address of indirect TBUS address register (bits 8..10) */
373 	u32 tbus_addr_hi_addr;
374 
375 	/* Relative address of indirect TBUS data register (bits 0..7) */
376 	u32 tbus_data_lo_addr;
377 
378 	/* Relative address of indirect TBUS data register (bits 8..11) */
379 	u32 tbus_data_hi_addr;
380 };
381 
382 /* Split type definitions */
383 struct split_type_defs {
384 	const char *name;
385 };
386 
387 /******************************** Constants **********************************/
388 
389 #define BYTES_IN_DWORD			sizeof(u32)
390 /* In the macros below, size and offset are specified in bits */
391 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
392 #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
393 #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
394 #define FIELD_DWORD_OFFSET(type, field) \
395 	 ((int)(FIELD_BIT_OFFSET(type, field) / 32))
396 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
397 #define FIELD_BIT_MASK(type, field) \
398 	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
399 	 FIELD_DWORD_SHIFT(type, field))
400 
401 #define SET_VAR_FIELD(var, type, field, val) \
402 	do { \
403 		var[FIELD_DWORD_OFFSET(type, field)] &=	\
404 		(~FIELD_BIT_MASK(type, field));	\
405 		var[FIELD_DWORD_OFFSET(type, field)] |= \
406 		(val) << FIELD_DWORD_SHIFT(type, field); \
407 	} while (0)
408 
409 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
410 	do { \
411 		for (i = 0; i < (arr_size); i++) \
412 			qed_wr(dev, ptt, addr,	(arr)[i]); \
413 	} while (0)
414 
415 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
416 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
417 
418 /* extra lines include a signature line + optional latency events line */
419 #define NUM_EXTRA_DBG_LINES(block) \
420 	(GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
421 #define NUM_DBG_LINES(block) \
422 	((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
423 
424 #define USE_DMAE			true
425 #define PROTECT_WIDE_BUS		true
426 
427 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
428 #define RAM_LINES_TO_BYTES(lines) \
429 	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
430 
431 #define REG_DUMP_LEN_SHIFT		24
432 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
433 	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
434 
435 #define IDLE_CHK_RULE_SIZE_DWORDS \
436 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
437 
438 #define IDLE_CHK_RESULT_HDR_DWORDS \
439 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
440 
441 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
442 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
443 
444 #define PAGE_MEM_DESC_SIZE_DWORDS \
445 	BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
446 
447 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
448 
449 /* The sizes and offsets below are specified in bits */
450 #define VFC_CAM_CMD_STRUCT_SIZE		64
451 #define VFC_CAM_CMD_ROW_OFFSET		48
452 #define VFC_CAM_CMD_ROW_SIZE		9
453 #define VFC_CAM_ADDR_STRUCT_SIZE	16
454 #define VFC_CAM_ADDR_OP_OFFSET		0
455 #define VFC_CAM_ADDR_OP_SIZE		4
456 #define VFC_CAM_RESP_STRUCT_SIZE	256
457 #define VFC_RAM_ADDR_STRUCT_SIZE	16
458 #define VFC_RAM_ADDR_OP_OFFSET		0
459 #define VFC_RAM_ADDR_OP_SIZE		2
460 #define VFC_RAM_ADDR_ROW_OFFSET		2
461 #define VFC_RAM_ADDR_ROW_SIZE		10
462 #define VFC_RAM_RESP_STRUCT_SIZE	256
463 
464 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
465 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
466 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
467 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
468 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
469 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
470 
471 #define NUM_VFC_RAM_TYPES		4
472 
473 #define VFC_CAM_NUM_ROWS		512
474 
475 #define VFC_OPCODE_CAM_RD		14
476 #define VFC_OPCODE_RAM_RD		0
477 
478 #define NUM_RSS_MEM_TYPES		5
479 
480 #define NUM_BIG_RAM_TYPES		3
481 #define BIG_RAM_NAME_LEN		3
482 
483 #define NUM_PHY_TBUS_ADDRESSES		2048
484 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
485 
486 #define RESET_REG_UNRESET_OFFSET	4
487 
488 #define STALL_DELAY_MS			500
489 
490 #define STATIC_DEBUG_LINE_DWORDS	9
491 
492 #define NUM_COMMON_GLOBAL_PARAMS	11
493 
494 #define MAX_RECURSION_DEPTH		10
495 
496 #define FW_IMG_KUKU                     0
497 #define FW_IMG_MAIN			1
498 #define FW_IMG_L2B                      2
499 
500 #define REG_FIFO_ELEMENT_DWORDS		2
501 #define REG_FIFO_DEPTH_ELEMENTS		32
502 #define REG_FIFO_DEPTH_DWORDS \
503 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
504 
505 #define IGU_FIFO_ELEMENT_DWORDS		4
506 #define IGU_FIFO_DEPTH_ELEMENTS		64
507 #define IGU_FIFO_DEPTH_DWORDS \
508 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
509 
510 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
511 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
512 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
513 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
514 	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
515 
516 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
517 	(MCP_REG_SCRATCH + \
518 	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
519 
520 #define MAX_SW_PLTAFORM_STR_SIZE	64
521 
522 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
523 #define EMPTY_FW_IMAGE_STR		"???????????????"
524 
525 /***************************** Constant Arrays *******************************/
526 
527 /* DBG block framing mode definitions, in descending preference order */
528 static struct framing_mode_defs s_framing_mode_defs[4] = {
529 	{DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf,
530 	 DBG_BUS_SEMI_FRAME_MODE_4FAST,
531 	 10},
532 	{DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW,
533 	 10},
534 	{DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc,
535 	 DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10},
536 	{DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8,
537 	 DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10}
538 };
539 
540 /* Chip constant definitions array */
541 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
542 	{"bb", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2,
543 	 s_framing_mode_defs},
544 	{"ah", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2,
545 	 s_framing_mode_defs}
546 };
547 
548 /* Storm constant definitions array */
549 static struct storm_defs s_storm_defs[] = {
550 	/* Tstorm */
551 	{'T', BLOCK_TSEM,
552 		{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
553 		true,
554 		TSEM_REG_FAST_MEMORY,
555 		TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
556 		TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
557 		TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
558 		TCM_REG_CTX_RBC_ACCS,
559 		{TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
560 		 TCM_REG_SM_TASK_CTX},
561 		{{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */
562 	},
563 
564 	/* Mstorm */
565 	{'M', BLOCK_MSEM,
566 		{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
567 		false,
568 		MSEM_REG_FAST_MEMORY,
569 		MSEM_REG_DBG_FRAME_MODE,
570 		MSEM_REG_SLOW_DBG_ACTIVE,
571 		MSEM_REG_SLOW_DBG_MODE,
572 		MSEM_REG_DBG_MODE1_CFG,
573 		MSEM_REG_SYNC_DBG_EMPTY,
574 		MSEM_REG_DBG_GPRE_VECT,
575 		MCM_REG_CTX_RBC_ACCS,
576 		{MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
577 		 MCM_REG_SM_TASK_CTX },
578 		{{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/
579 	},
580 
581 	/* Ustorm */
582 	{'U', BLOCK_USEM,
583 		{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
584 		false,
585 		USEM_REG_FAST_MEMORY,
586 		USEM_REG_DBG_FRAME_MODE,
587 		USEM_REG_SLOW_DBG_ACTIVE,
588 		USEM_REG_SLOW_DBG_MODE,
589 		USEM_REG_DBG_MODE1_CFG,
590 		USEM_REG_SYNC_DBG_EMPTY,
591 		USEM_REG_DBG_GPRE_VECT,
592 		UCM_REG_CTX_RBC_ACCS,
593 		{UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
594 		 UCM_REG_SM_TASK_CTX},
595 		{{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */
596 	},
597 
598 	/* Xstorm */
599 	{'X', BLOCK_XSEM,
600 		{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
601 		false,
602 		XSEM_REG_FAST_MEMORY,
603 		XSEM_REG_DBG_FRAME_MODE,
604 		XSEM_REG_SLOW_DBG_ACTIVE,
605 		XSEM_REG_SLOW_DBG_MODE,
606 		XSEM_REG_DBG_MODE1_CFG,
607 		XSEM_REG_SYNC_DBG_EMPTY,
608 		XSEM_REG_DBG_GPRE_VECT,
609 		XCM_REG_CTX_RBC_ACCS,
610 		{XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
611 		{{9, 15, 0, 0}, {9, 15,	0, 0}} /* {bb} {k2} */
612 	},
613 
614 	/* Ystorm */
615 	{'Y', BLOCK_YSEM,
616 		{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
617 		false,
618 		YSEM_REG_FAST_MEMORY,
619 		YSEM_REG_DBG_FRAME_MODE,
620 		YSEM_REG_SLOW_DBG_ACTIVE,
621 		YSEM_REG_SLOW_DBG_MODE,
622 		YSEM_REG_DBG_MODE1_CFG,
623 		YSEM_REG_SYNC_DBG_EMPTY,
624 		YSEM_REG_DBG_GPRE_VECT,
625 		YCM_REG_CTX_RBC_ACCS,
626 		{YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
627 		 YCM_REG_SM_TASK_CTX},
628 		{{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */
629 	},
630 
631 	/* Pstorm */
632 	{'P', BLOCK_PSEM,
633 		{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
634 		true,
635 		PSEM_REG_FAST_MEMORY,
636 		PSEM_REG_DBG_FRAME_MODE,
637 		PSEM_REG_SLOW_DBG_ACTIVE,
638 		PSEM_REG_SLOW_DBG_MODE,
639 		PSEM_REG_DBG_MODE1_CFG,
640 		PSEM_REG_SYNC_DBG_EMPTY,
641 		PSEM_REG_DBG_GPRE_VECT,
642 		PCM_REG_CTX_RBC_ACCS,
643 		{0, PCM_REG_SM_CON_CTX, 0, 0},
644 		{{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */
645 	},
646 };
647 
648 static struct hw_type_defs s_hw_type_defs[] = {
649 	/* HW_TYPE_ASIC */
650 	{"asic", 1, 256, 32768},
651 	{"reserved", 0, 0, 0},
652 	{"reserved2", 0, 0, 0},
653 	{"reserved3", 0, 0, 0},
654 	{"reserved4", 0, 0, 0}
655 };
656 
657 static struct grc_param_defs s_grc_param_defs[] = {
658 	/* DBG_GRC_PARAM_DUMP_TSTORM */
659 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
660 
661 	/* DBG_GRC_PARAM_DUMP_MSTORM */
662 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
663 
664 	/* DBG_GRC_PARAM_DUMP_USTORM */
665 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
666 
667 	/* DBG_GRC_PARAM_DUMP_XSTORM */
668 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
669 
670 	/* DBG_GRC_PARAM_DUMP_YSTORM */
671 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
672 
673 	/* DBG_GRC_PARAM_DUMP_PSTORM */
674 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
675 
676 	/* DBG_GRC_PARAM_DUMP_REGS */
677 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
678 
679 	/* DBG_GRC_PARAM_DUMP_RAM */
680 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
681 
682 	/* DBG_GRC_PARAM_DUMP_PBUF */
683 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
684 
685 	/* DBG_GRC_PARAM_DUMP_IOR */
686 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
687 
688 	/* DBG_GRC_PARAM_DUMP_VFC */
689 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
690 
691 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
692 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
693 
694 	/* DBG_GRC_PARAM_DUMP_ILT */
695 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
696 
697 	/* DBG_GRC_PARAM_DUMP_RSS */
698 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
699 
700 	/* DBG_GRC_PARAM_DUMP_CAU */
701 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
702 
703 	/* DBG_GRC_PARAM_DUMP_QM */
704 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
705 
706 	/* DBG_GRC_PARAM_DUMP_MCP */
707 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
708 
709 	/* DBG_GRC_PARAM_DUMP_DORQ */
710 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
711 
712 	/* DBG_GRC_PARAM_DUMP_CFC */
713 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
714 
715 	/* DBG_GRC_PARAM_DUMP_IGU */
716 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
717 
718 	/* DBG_GRC_PARAM_DUMP_BRB */
719 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
720 
721 	/* DBG_GRC_PARAM_DUMP_BTB */
722 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
723 
724 	/* DBG_GRC_PARAM_DUMP_BMB */
725 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
726 
727 	/* DBG_GRC_PARAM_RESERVED1 */
728 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
729 
730 	/* DBG_GRC_PARAM_DUMP_MULD */
731 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
732 
733 	/* DBG_GRC_PARAM_DUMP_PRS */
734 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
735 
736 	/* DBG_GRC_PARAM_DUMP_DMAE */
737 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
738 
739 	/* DBG_GRC_PARAM_DUMP_TM */
740 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
741 
742 	/* DBG_GRC_PARAM_DUMP_SDM */
743 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
744 
745 	/* DBG_GRC_PARAM_DUMP_DIF */
746 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
747 
748 	/* DBG_GRC_PARAM_DUMP_STATIC */
749 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
750 
751 	/* DBG_GRC_PARAM_UNSTALL */
752 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
753 
754 	/* DBG_GRC_PARAM_RESERVED2 */
755 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
756 
757 	/* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
758 	{{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}},
759 
760 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
761 	{{0, 0}, 0, 1, true, false, 0, {0, 0}},
762 
763 	/* DBG_GRC_PARAM_CRASH */
764 	{{0, 0}, 0, 1, true, false, 0, {0, 0}},
765 
766 	/* DBG_GRC_PARAM_PARITY_SAFE */
767 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
768 
769 	/* DBG_GRC_PARAM_DUMP_CM */
770 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
771 
772 	/* DBG_GRC_PARAM_DUMP_PHY */
773 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
774 
775 	/* DBG_GRC_PARAM_NO_MCP */
776 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
777 
778 	/* DBG_GRC_PARAM_NO_FW_VER */
779 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
780 
781 	/* DBG_GRC_PARAM_RESERVED3 */
782 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
783 
784 	/* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
785 	{{0, 1}, 0, 1, false, false, 0, {0, 1}},
786 
787 	/* DBG_GRC_PARAM_DUMP_ILT_CDUC */
788 	{{1, 1}, 0, 1, false, false, 0, {0, 0}},
789 
790 	/* DBG_GRC_PARAM_DUMP_ILT_CDUT */
791 	{{1, 1}, 0, 1, false, false, 0, {0, 0}},
792 
793 	/* DBG_GRC_PARAM_DUMP_CAU_EXT */
794 	{{0, 0}, 0, 1, false, false, 0, {1, 1}}
795 };
796 
797 static struct rss_mem_defs s_rss_mem_defs[] = {
798 	{"rss_mem_cid", "rss_cid", 0, 32,
799 	 {256, 320}},
800 
801 	{"rss_mem_key_msb", "rss_key", 1024, 256,
802 	 {128, 208}},
803 
804 	{"rss_mem_key_lsb", "rss_key", 2048, 64,
805 	 {128, 208}},
806 
807 	{"rss_mem_info", "rss_info", 3072, 16,
808 	 {128, 208}},
809 
810 	{"rss_mem_ind", "rss_ind", 4096, 16,
811 	 {16384, 26624}}
812 };
813 
814 static struct vfc_ram_defs s_vfc_ram_defs[] = {
815 	{"vfc_ram_tt1", "vfc_ram", 0, 512},
816 	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
817 	{"vfc_ram_stt2", "vfc_ram", 640, 32},
818 	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
819 };
820 
821 static struct big_ram_defs s_big_ram_defs[] = {
822 	{"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
823 	 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
824 	 MISC_REG_BLOCK_256B_EN, {0, 0},
825 	 {153600, 180224}},
826 
827 	{"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
828 	 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
829 	 MISC_REG_BLOCK_256B_EN, {0, 1},
830 	 {92160, 117760}},
831 
832 	{"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
833 	 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
834 	 MISCS_REG_BLOCK_256B_EN, {0, 0},
835 	 {36864, 36864}}
836 };
837 
838 static struct rbc_reset_defs s_rbc_reset_defs[] = {
839 	{MISCS_REG_RESET_PL_HV,
840 	 {0x0, 0x400}},
841 	{MISC_REG_RESET_PL_PDA_VMAIN_1,
842 	 {0x4404040, 0x4404040}},
843 	{MISC_REG_RESET_PL_PDA_VMAIN_2,
844 	 {0x7, 0x7c00007}},
845 	{MISC_REG_RESET_PL_PDA_VAUX,
846 	 {0x2, 0x2}},
847 };
848 
849 static struct phy_defs s_phy_defs[] = {
850 	{"nw_phy", NWS_REG_NWS_CMU_K2,
851 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
852 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
853 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
854 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
855 	{"sgmii_phy", MS_REG_MS_CMU_K2,
856 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
857 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
858 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
859 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
860 	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
861 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
862 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
863 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
864 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
865 	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
866 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
867 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
868 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
869 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
870 };
871 
872 static struct split_type_defs s_split_type_defs[] = {
873 	/* SPLIT_TYPE_NONE */
874 	{"eng"},
875 
876 	/* SPLIT_TYPE_PORT */
877 	{"port"},
878 
879 	/* SPLIT_TYPE_PF */
880 	{"pf"},
881 
882 	/* SPLIT_TYPE_PORT_PF */
883 	{"port"},
884 
885 	/* SPLIT_TYPE_VF */
886 	{"vf"}
887 };
888 
889 /******************************** Variables **********************************/
890 
891 /* The version of the calling app */
892 static u32 s_app_ver;
893 
894 /**************************** Private Functions ******************************/
895 
896 static void qed_static_asserts(void)
897 {
898 }
899 
900 /* Reads and returns a single dword from the specified unaligned buffer */
901 static u32 qed_read_unaligned_dword(u8 *buf)
902 {
903 	u32 dword;
904 
905 	memcpy((u8 *)&dword, buf, sizeof(dword));
906 	return dword;
907 }
908 
909 /* Sets the value of the specified GRC param */
910 static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
911 			      enum dbg_grc_params grc_param, u32 val)
912 {
913 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
914 
915 	dev_data->grc.param_val[grc_param] = val;
916 }
917 
918 /* Returns the value of the specified GRC param */
919 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
920 			     enum dbg_grc_params grc_param)
921 {
922 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
923 
924 	return dev_data->grc.param_val[grc_param];
925 }
926 
927 /* Initializes the GRC parameters */
928 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
929 {
930 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
931 
932 	if (!dev_data->grc.params_initialized) {
933 		qed_dbg_grc_set_params_default(p_hwfn);
934 		dev_data->grc.params_initialized = 1;
935 	}
936 }
937 
938 /* Sets pointer and size for the specified binary buffer type */
939 static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
940 				enum bin_dbg_buffer_type buf_type,
941 				const u32 *ptr, u32 size)
942 {
943 	struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
944 
945 	buf->ptr = (void *)ptr;
946 	buf->size = size;
947 }
948 
949 /* Initializes debug data for the specified device */
950 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
951 {
952 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
953 	u8 num_pfs = 0, max_pfs_per_port = 0;
954 
955 	if (dev_data->initialized)
956 		return DBG_STATUS_OK;
957 
958 	if (!s_app_ver)
959 		return DBG_STATUS_APP_VERSION_NOT_SET;
960 
961 	/* Set chip */
962 	if (QED_IS_K2(p_hwfn->cdev)) {
963 		dev_data->chip_id = CHIP_K2;
964 		dev_data->mode_enable[MODE_K2] = 1;
965 		dev_data->num_vfs = MAX_NUM_VFS_K2;
966 		num_pfs = MAX_NUM_PFS_K2;
967 		max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
968 	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
969 		dev_data->chip_id = CHIP_BB;
970 		dev_data->mode_enable[MODE_BB] = 1;
971 		dev_data->num_vfs = MAX_NUM_VFS_BB;
972 		num_pfs = MAX_NUM_PFS_BB;
973 		max_pfs_per_port = MAX_NUM_PFS_BB;
974 	} else {
975 		return DBG_STATUS_UNKNOWN_CHIP;
976 	}
977 
978 	/* Set HW type */
979 	dev_data->hw_type = HW_TYPE_ASIC;
980 	dev_data->mode_enable[MODE_ASIC] = 1;
981 
982 	/* Set port mode */
983 	switch (p_hwfn->cdev->num_ports_in_engine) {
984 	case 1:
985 		dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
986 		break;
987 	case 2:
988 		dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
989 		break;
990 	case 4:
991 		dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
992 		break;
993 	}
994 
995 	/* Set 100G mode */
996 	if (QED_IS_CMT(p_hwfn->cdev))
997 		dev_data->mode_enable[MODE_100G] = 1;
998 
999 	/* Set number of ports */
1000 	if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
1001 	    dev_data->mode_enable[MODE_100G])
1002 		dev_data->num_ports = 1;
1003 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
1004 		dev_data->num_ports = 2;
1005 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
1006 		dev_data->num_ports = 4;
1007 
1008 	/* Set number of PFs per port */
1009 	dev_data->num_pfs_per_port = min_t(u32,
1010 					   num_pfs / dev_data->num_ports,
1011 					   max_pfs_per_port);
1012 
1013 	/* Initializes the GRC parameters */
1014 	qed_dbg_grc_init_params(p_hwfn);
1015 
1016 	dev_data->use_dmae = true;
1017 	dev_data->initialized = 1;
1018 
1019 	return DBG_STATUS_OK;
1020 }
1021 
1022 static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
1023 					     enum block_id block_id)
1024 {
1025 	const struct dbg_block *dbg_block;
1026 
1027 	dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
1028 	return dbg_block + block_id;
1029 }
1030 
1031 static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
1032 							       *p_hwfn,
1033 							       enum block_id
1034 							       block_id)
1035 {
1036 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1037 
1038 	return (const struct dbg_block_chip *)
1039 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
1040 	    block_id * MAX_CHIP_IDS + dev_data->chip_id;
1041 }
1042 
1043 static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
1044 							 *p_hwfn,
1045 							 u8 reset_reg_id)
1046 {
1047 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1048 
1049 	return (const struct dbg_reset_reg *)
1050 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
1051 	    reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
1052 }
1053 
1054 /* Reads the FW info structure for the specified Storm from the chip,
1055  * and writes it to the specified fw_info pointer.
1056  */
1057 static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
1058 				   struct qed_ptt *p_ptt,
1059 				   u8 storm_id, struct fw_info *fw_info)
1060 {
1061 	struct storm_defs *storm = &s_storm_defs[storm_id];
1062 	struct fw_info_location fw_info_location;
1063 	u32 addr, i, size, *dest;
1064 
1065 	memset(&fw_info_location, 0, sizeof(fw_info_location));
1066 	memset(fw_info, 0, sizeof(*fw_info));
1067 
1068 	/* Read first the address that points to fw_info location.
1069 	 * The address is located in the last line of the Storm RAM.
1070 	 */
1071 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1072 	    DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
1073 	    sizeof(fw_info_location);
1074 
1075 	dest = (u32 *)&fw_info_location;
1076 	size = BYTES_TO_DWORDS(sizeof(fw_info_location));
1077 
1078 	for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
1079 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1080 
1081 	/* Read FW version info from Storm RAM */
1082 	size = le32_to_cpu(fw_info_location.size);
1083 	if (!size || size > sizeof(*fw_info))
1084 		return;
1085 
1086 	addr = le32_to_cpu(fw_info_location.grc_addr);
1087 	dest = (u32 *)fw_info;
1088 	size = BYTES_TO_DWORDS(size);
1089 
1090 	for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
1091 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1092 }
1093 
1094 /* Dumps the specified string to the specified buffer.
1095  * Returns the dumped size in bytes.
1096  */
1097 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1098 {
1099 	if (dump)
1100 		strcpy(dump_buf, str);
1101 
1102 	return (u32)strlen(str) + 1;
1103 }
1104 
1105 /* Dumps zeros to align the specified buffer to dwords.
1106  * Returns the dumped size in bytes.
1107  */
1108 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1109 {
1110 	u8 offset_in_dword, align_size;
1111 
1112 	offset_in_dword = (u8)(byte_offset & 0x3);
1113 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1114 
1115 	if (dump && align_size)
1116 		memset(dump_buf, 0, align_size);
1117 
1118 	return align_size;
1119 }
1120 
1121 /* Writes the specified string param to the specified buffer.
1122  * Returns the dumped size in dwords.
1123  */
1124 static u32 qed_dump_str_param(u32 *dump_buf,
1125 			      bool dump,
1126 			      const char *param_name, const char *param_val)
1127 {
1128 	char *char_buf = (char *)dump_buf;
1129 	u32 offset = 0;
1130 
1131 	/* Dump param name */
1132 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1133 
1134 	/* Indicate a string param value */
1135 	if (dump)
1136 		*(char_buf + offset) = 1;
1137 	offset++;
1138 
1139 	/* Dump param value */
1140 	offset += qed_dump_str(char_buf + offset, dump, param_val);
1141 
1142 	/* Align buffer to next dword */
1143 	offset += qed_dump_align(char_buf + offset, dump, offset);
1144 
1145 	return BYTES_TO_DWORDS(offset);
1146 }
1147 
1148 /* Writes the specified numeric param to the specified buffer.
1149  * Returns the dumped size in dwords.
1150  */
1151 static u32 qed_dump_num_param(u32 *dump_buf,
1152 			      bool dump, const char *param_name, u32 param_val)
1153 {
1154 	char *char_buf = (char *)dump_buf;
1155 	u32 offset = 0;
1156 
1157 	/* Dump param name */
1158 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1159 
1160 	/* Indicate a numeric param value */
1161 	if (dump)
1162 		*(char_buf + offset) = 0;
1163 	offset++;
1164 
1165 	/* Align buffer to next dword */
1166 	offset += qed_dump_align(char_buf + offset, dump, offset);
1167 
1168 	/* Dump param value (and change offset from bytes to dwords) */
1169 	offset = BYTES_TO_DWORDS(offset);
1170 	if (dump)
1171 		*(dump_buf + offset) = param_val;
1172 	offset++;
1173 
1174 	return offset;
1175 }
1176 
1177 /* Reads the FW version and writes it as a param to the specified buffer.
1178  * Returns the dumped size in dwords.
1179  */
1180 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1181 				 struct qed_ptt *p_ptt,
1182 				 u32 *dump_buf, bool dump)
1183 {
1184 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1185 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1186 	struct fw_info fw_info = { {0}, {0} };
1187 	u32 offset = 0;
1188 
1189 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1190 		/* Read FW info from chip */
1191 		qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
1192 
1193 		/* Create FW version/image strings */
1194 		if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1195 			     "%d_%d_%d_%d", fw_info.ver.num.major,
1196 			     fw_info.ver.num.minor, fw_info.ver.num.rev,
1197 			     fw_info.ver.num.eng) < 0)
1198 			DP_NOTICE(p_hwfn,
1199 				  "Unexpected debug error: invalid FW version string\n");
1200 		switch (fw_info.ver.image_id) {
1201 		case FW_IMG_KUKU:
1202 			strcpy(fw_img_str, "kuku");
1203 			break;
1204 		case FW_IMG_MAIN:
1205 			strcpy(fw_img_str, "main");
1206 			break;
1207 		case FW_IMG_L2B:
1208 			strcpy(fw_img_str, "l2b");
1209 			break;
1210 		default:
1211 			strcpy(fw_img_str, "unknown");
1212 			break;
1213 		}
1214 	}
1215 
1216 	/* Dump FW version, image and timestamp */
1217 	offset += qed_dump_str_param(dump_buf + offset,
1218 				     dump, "fw-version", fw_ver_str);
1219 	offset += qed_dump_str_param(dump_buf + offset,
1220 				     dump, "fw-image", fw_img_str);
1221 	offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp",
1222 				     le32_to_cpu(fw_info.ver.timestamp));
1223 
1224 	return offset;
1225 }
1226 
1227 /* Reads the MFW version and writes it as a param to the specified buffer.
1228  * Returns the dumped size in dwords.
1229  */
1230 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1231 				  struct qed_ptt *p_ptt,
1232 				  u32 *dump_buf, bool dump)
1233 {
1234 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1235 
1236 	if (dump &&
1237 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1238 		u32 global_section_offsize, global_section_addr, mfw_ver;
1239 		u32 public_data_addr, global_section_offsize_addr;
1240 
1241 		/* Find MCP public data GRC address. Needs to be ORed with
1242 		 * MCP_REG_SCRATCH due to a HW bug.
1243 		 */
1244 		public_data_addr = qed_rd(p_hwfn,
1245 					  p_ptt,
1246 					  MISC_REG_SHARED_MEM_ADDR) |
1247 				   MCP_REG_SCRATCH;
1248 
1249 		/* Find MCP public global section offset */
1250 		global_section_offsize_addr = public_data_addr +
1251 					      offsetof(struct mcp_public_data,
1252 						       sections) +
1253 					      sizeof(offsize_t) * PUBLIC_GLOBAL;
1254 		global_section_offsize = qed_rd(p_hwfn, p_ptt,
1255 						global_section_offsize_addr);
1256 		global_section_addr =
1257 			MCP_REG_SCRATCH +
1258 			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
1259 
1260 		/* Read MFW version from MCP public global section */
1261 		mfw_ver = qed_rd(p_hwfn, p_ptt,
1262 				 global_section_addr +
1263 				 offsetof(struct public_global, mfw_ver));
1264 
1265 		/* Dump MFW version param */
1266 		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
1267 			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
1268 			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
1269 			DP_NOTICE(p_hwfn,
1270 				  "Unexpected debug error: invalid MFW version string\n");
1271 	}
1272 
1273 	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
1274 }
1275 
1276 /* Reads the chip revision from the chip and writes it as a param to the
1277  * specified buffer. Returns the dumped size in dwords.
1278  */
1279 static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
1280 					struct qed_ptt *p_ptt,
1281 					u32 *dump_buf, bool dump)
1282 {
1283 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1284 	char param_str[3] = "??";
1285 
1286 	if (dev_data->hw_type == HW_TYPE_ASIC) {
1287 		u32 chip_rev, chip_metal;
1288 
1289 		chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
1290 		chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
1291 
1292 		param_str[0] = 'a' + (u8)chip_rev;
1293 		param_str[1] = '0' + (u8)chip_metal;
1294 	}
1295 
1296 	return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
1297 }
1298 
1299 /* Writes a section header to the specified buffer.
1300  * Returns the dumped size in dwords.
1301  */
1302 static u32 qed_dump_section_hdr(u32 *dump_buf,
1303 				bool dump, const char *name, u32 num_params)
1304 {
1305 	return qed_dump_num_param(dump_buf, dump, name, num_params);
1306 }
1307 
1308 /* Writes the common global params to the specified buffer.
1309  * Returns the dumped size in dwords.
1310  */
1311 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
1312 					 struct qed_ptt *p_ptt,
1313 					 u32 *dump_buf,
1314 					 bool dump,
1315 					 u8 num_specific_global_params)
1316 {
1317 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1318 	char sw_platform_str[MAX_SW_PLTAFORM_STR_SIZE];
1319 	u32 offset = 0;
1320 	u8 num_params;
1321 
1322 	/* Dump global params section header */
1323 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
1324 		(dev_data->chip_id == CHIP_BB ? 1 : 0);
1325 	offset += qed_dump_section_hdr(dump_buf + offset,
1326 				       dump, "global_params", num_params);
1327 
1328 	/* Store params */
1329 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
1330 	offset += qed_dump_mfw_ver_param(p_hwfn,
1331 					 p_ptt, dump_buf + offset, dump);
1332 	offset += qed_dump_chip_revision_param(p_hwfn,
1333 					       p_ptt, dump_buf + offset, dump);
1334 	offset += qed_dump_num_param(dump_buf + offset,
1335 				     dump, "tools-version", TOOLS_VERSION);
1336 	offset += qed_dump_str_param(dump_buf + offset,
1337 				     dump,
1338 				     "chip",
1339 				     s_chip_defs[dev_data->chip_id].name);
1340 	offset += qed_dump_str_param(dump_buf + offset,
1341 				     dump,
1342 				     "platform",
1343 				     s_hw_type_defs[dev_data->hw_type].name);
1344 	offset += qed_dump_str_param(dump_buf + offset,
1345 				     dump, "sw-platform", sw_platform_str);
1346 	offset += qed_dump_num_param(dump_buf + offset,
1347 				     dump, "pci-func", p_hwfn->abs_pf_id);
1348 	offset += qed_dump_num_param(dump_buf + offset,
1349 				     dump, "epoch", qed_get_epoch_time());
1350 	if (dev_data->chip_id == CHIP_BB)
1351 		offset += qed_dump_num_param(dump_buf + offset,
1352 					     dump, "path", QED_PATH_ID(p_hwfn));
1353 
1354 	return offset;
1355 }
1356 
1357 /* Writes the "last" section (including CRC) to the specified buffer at the
1358  * given offset. Returns the dumped size in dwords.
1359  */
1360 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
1361 {
1362 	u32 start_offset = offset;
1363 
1364 	/* Dump CRC section header */
1365 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
1366 
1367 	/* Calculate CRC32 and add it to the dword after the "last" section */
1368 	if (dump)
1369 		*(dump_buf + offset) = ~crc32(0xffffffff,
1370 					      (u8 *)dump_buf,
1371 					      DWORDS_TO_BYTES(offset));
1372 
1373 	offset++;
1374 
1375 	return offset - start_offset;
1376 }
1377 
1378 /* Update blocks reset state  */
1379 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
1380 					  struct qed_ptt *p_ptt)
1381 {
1382 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1383 	u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1384 	u8 rst_reg_id;
1385 	u32 blk_id;
1386 
1387 	/* Read reset registers */
1388 	for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
1389 		const struct dbg_reset_reg *rst_reg;
1390 		bool rst_reg_removed;
1391 		u32 rst_reg_addr;
1392 
1393 		rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
1394 		rst_reg_removed = GET_FIELD(rst_reg->data,
1395 					    DBG_RESET_REG_IS_REMOVED);
1396 		rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
1397 							 DBG_RESET_REG_ADDR));
1398 
1399 		if (!rst_reg_removed)
1400 			reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
1401 						     rst_reg_addr);
1402 	}
1403 
1404 	/* Check if blocks are in reset */
1405 	for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
1406 		const struct dbg_block_chip *blk;
1407 		bool has_rst_reg;
1408 		bool is_removed;
1409 
1410 		blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
1411 		is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1412 		has_rst_reg = GET_FIELD(blk->flags,
1413 					DBG_BLOCK_CHIP_HAS_RESET_REG);
1414 
1415 		if (!is_removed && has_rst_reg)
1416 			dev_data->block_in_reset[blk_id] =
1417 			    !(reg_val[blk->reset_reg_id] &
1418 			      BIT(blk->reset_reg_bit_offset));
1419 	}
1420 }
1421 
1422 /* is_mode_match recursive function */
1423 static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
1424 				  u16 *modes_buf_offset, u8 rec_depth)
1425 {
1426 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1427 	u8 *dbg_array;
1428 	bool arg1, arg2;
1429 	u8 tree_val;
1430 
1431 	if (rec_depth > MAX_RECURSION_DEPTH) {
1432 		DP_NOTICE(p_hwfn,
1433 			  "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
1434 		return false;
1435 	}
1436 
1437 	/* Get next element from modes tree buffer */
1438 	dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
1439 	tree_val = dbg_array[(*modes_buf_offset)++];
1440 
1441 	switch (tree_val) {
1442 	case INIT_MODE_OP_NOT:
1443 		return !qed_is_mode_match_rec(p_hwfn,
1444 					      modes_buf_offset, rec_depth + 1);
1445 	case INIT_MODE_OP_OR:
1446 	case INIT_MODE_OP_AND:
1447 		arg1 = qed_is_mode_match_rec(p_hwfn,
1448 					     modes_buf_offset, rec_depth + 1);
1449 		arg2 = qed_is_mode_match_rec(p_hwfn,
1450 					     modes_buf_offset, rec_depth + 1);
1451 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
1452 							arg2) : (arg1 && arg2);
1453 	default:
1454 		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
1455 	}
1456 }
1457 
1458 /* Returns true if the mode (specified using modes_buf_offset) is enabled */
1459 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
1460 {
1461 	return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
1462 }
1463 
1464 /* Enable / disable the Debug block */
1465 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
1466 				     struct qed_ptt *p_ptt, bool enable)
1467 {
1468 	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
1469 }
1470 
1471 /* Resets the Debug block */
1472 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
1473 				    struct qed_ptt *p_ptt)
1474 {
1475 	u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
1476 	const struct dbg_reset_reg *reset_reg;
1477 	const struct dbg_block_chip *block;
1478 
1479 	block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
1480 	reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
1481 	reset_reg_addr =
1482 	    DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
1483 
1484 	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
1485 	new_reset_reg_val =
1486 	    old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
1487 
1488 	qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
1489 	qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
1490 }
1491 
1492 /* Enable / disable Debug Bus clients according to the specified mask
1493  * (1 = enable, 0 = disable).
1494  */
1495 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
1496 				   struct qed_ptt *p_ptt, u32 client_mask)
1497 {
1498 	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
1499 }
1500 
1501 static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
1502 				    struct qed_ptt *p_ptt,
1503 				    enum block_id block_id,
1504 				    u8 line_id,
1505 				    u8 enable_mask,
1506 				    u8 right_shift,
1507 				    u8 force_valid_mask, u8 force_frame_mask)
1508 {
1509 	const struct dbg_block_chip *block =
1510 		qed_get_dbg_block_per_chip(p_hwfn, block_id);
1511 
1512 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
1513 	       line_id);
1514 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
1515 	       enable_mask);
1516 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
1517 	       right_shift);
1518 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
1519 	       force_valid_mask);
1520 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
1521 	       force_frame_mask);
1522 }
1523 
1524 /* Disable debug bus in all blocks */
1525 static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
1526 				   struct qed_ptt *p_ptt)
1527 {
1528 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1529 	u32 block_id;
1530 
1531 	/* Disable all blocks */
1532 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
1533 		const struct dbg_block_chip *block_per_chip =
1534 		    qed_get_dbg_block_per_chip(p_hwfn,
1535 					       (enum block_id)block_id);
1536 
1537 		if (GET_FIELD(block_per_chip->flags,
1538 			      DBG_BLOCK_CHIP_IS_REMOVED) ||
1539 		    dev_data->block_in_reset[block_id])
1540 			continue;
1541 
1542 		/* Disable debug bus */
1543 		if (GET_FIELD(block_per_chip->flags,
1544 			      DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
1545 			u32 dbg_en_addr =
1546 				block_per_chip->dbg_dword_enable_reg_addr;
1547 			u16 modes_buf_offset =
1548 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
1549 				      DBG_MODE_HDR_MODES_BUF_OFFSET);
1550 			bool eval_mode =
1551 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
1552 				      DBG_MODE_HDR_EVAL_MODE) > 0;
1553 
1554 			if (!eval_mode ||
1555 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
1556 				qed_wr(p_hwfn, p_ptt,
1557 				       DWORDS_TO_BYTES(dbg_en_addr),
1558 				       0);
1559 		}
1560 	}
1561 }
1562 
1563 /* Returns true if the specified entity (indicated by GRC param) should be
1564  * included in the dump, false otherwise.
1565  */
1566 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
1567 				enum dbg_grc_params grc_param)
1568 {
1569 	return qed_grc_get_param(p_hwfn, grc_param) > 0;
1570 }
1571 
1572 /* Returns the storm_id that matches the specified Storm letter,
1573  * or MAX_DBG_STORMS if invalid storm letter.
1574  */
1575 static enum dbg_storms qed_get_id_from_letter(char storm_letter)
1576 {
1577 	u8 storm_id;
1578 
1579 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
1580 		if (s_storm_defs[storm_id].letter == storm_letter)
1581 			return (enum dbg_storms)storm_id;
1582 
1583 	return MAX_DBG_STORMS;
1584 }
1585 
1586 /* Returns true of the specified Storm should be included in the dump, false
1587  * otherwise.
1588  */
1589 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
1590 				      enum dbg_storms storm)
1591 {
1592 	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
1593 }
1594 
1595 /* Returns true if the specified memory should be included in the dump, false
1596  * otherwise.
1597  */
1598 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
1599 				    enum block_id block_id, u8 mem_group_id)
1600 {
1601 	const struct dbg_block *block;
1602 	u8 i;
1603 
1604 	block = get_dbg_block(p_hwfn, block_id);
1605 
1606 	/* If the block is associated with a Storm, check Storm match */
1607 	if (block->associated_storm_letter) {
1608 		enum dbg_storms associated_storm_id =
1609 		    qed_get_id_from_letter(block->associated_storm_letter);
1610 
1611 		if (associated_storm_id == MAX_DBG_STORMS ||
1612 		    !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
1613 			return false;
1614 	}
1615 
1616 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
1617 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
1618 
1619 		if (mem_group_id == big_ram->mem_group_id ||
1620 		    mem_group_id == big_ram->ram_mem_group_id)
1621 			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
1622 	}
1623 
1624 	switch (mem_group_id) {
1625 	case MEM_GROUP_PXP_ILT:
1626 	case MEM_GROUP_PXP_MEM:
1627 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
1628 	case MEM_GROUP_RAM:
1629 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
1630 	case MEM_GROUP_PBUF:
1631 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
1632 	case MEM_GROUP_CAU_MEM:
1633 	case MEM_GROUP_CAU_SB:
1634 	case MEM_GROUP_CAU_PI:
1635 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
1636 	case MEM_GROUP_CAU_MEM_EXT:
1637 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
1638 	case MEM_GROUP_QM_MEM:
1639 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
1640 	case MEM_GROUP_CFC_MEM:
1641 	case MEM_GROUP_CONN_CFC_MEM:
1642 	case MEM_GROUP_TASK_CFC_MEM:
1643 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
1644 		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
1645 	case MEM_GROUP_DORQ_MEM:
1646 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
1647 	case MEM_GROUP_IGU_MEM:
1648 	case MEM_GROUP_IGU_MSIX:
1649 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
1650 	case MEM_GROUP_MULD_MEM:
1651 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
1652 	case MEM_GROUP_PRS_MEM:
1653 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
1654 	case MEM_GROUP_DMAE_MEM:
1655 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
1656 	case MEM_GROUP_TM_MEM:
1657 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
1658 	case MEM_GROUP_SDM_MEM:
1659 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
1660 	case MEM_GROUP_TDIF_CTX:
1661 	case MEM_GROUP_RDIF_CTX:
1662 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
1663 	case MEM_GROUP_CM_MEM:
1664 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
1665 	case MEM_GROUP_IOR:
1666 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
1667 	default:
1668 		return true;
1669 	}
1670 }
1671 
1672 /* Stalls all Storms */
1673 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
1674 				 struct qed_ptt *p_ptt, bool stall)
1675 {
1676 	u32 reg_addr;
1677 	u8 storm_id;
1678 
1679 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
1680 		if (!qed_grc_is_storm_included(p_hwfn,
1681 					       (enum dbg_storms)storm_id))
1682 			continue;
1683 
1684 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
1685 		    SEM_FAST_REG_STALL_0;
1686 		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
1687 	}
1688 
1689 	msleep(STALL_DELAY_MS);
1690 }
1691 
1692 /* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
1693  * taken out of reset.
1694  */
1695 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
1696 				   struct qed_ptt *p_ptt, bool rbc_only)
1697 {
1698 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1699 	u8 chip_id = dev_data->chip_id;
1700 	u32 i;
1701 
1702 	/* Take RBCs out of reset */
1703 	for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
1704 		if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
1705 			qed_wr(p_hwfn,
1706 			       p_ptt,
1707 			       s_rbc_reset_defs[i].reset_reg_addr +
1708 			       RESET_REG_UNRESET_OFFSET,
1709 			       s_rbc_reset_defs[i].reset_val[chip_id]);
1710 
1711 	if (!rbc_only) {
1712 		u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1713 		u8 reset_reg_id;
1714 		u32 block_id;
1715 
1716 		/* Fill reset regs values */
1717 		for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1718 			bool is_removed, has_reset_reg, unreset_before_dump;
1719 			const struct dbg_block_chip *block;
1720 
1721 			block = qed_get_dbg_block_per_chip(p_hwfn,
1722 							   (enum block_id)
1723 							   block_id);
1724 			is_removed =
1725 			    GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1726 			has_reset_reg =
1727 			    GET_FIELD(block->flags,
1728 				      DBG_BLOCK_CHIP_HAS_RESET_REG);
1729 			unreset_before_dump =
1730 			    GET_FIELD(block->flags,
1731 				      DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
1732 
1733 			if (!is_removed && has_reset_reg && unreset_before_dump)
1734 				reg_val[block->reset_reg_id] |=
1735 				    BIT(block->reset_reg_bit_offset);
1736 		}
1737 
1738 		/* Write reset registers */
1739 		for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
1740 		     reset_reg_id++) {
1741 			const struct dbg_reset_reg *reset_reg;
1742 			u32 reset_reg_addr;
1743 
1744 			reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
1745 
1746 			if (GET_FIELD
1747 			    (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
1748 				continue;
1749 
1750 			if (reg_val[reset_reg_id]) {
1751 				reset_reg_addr =
1752 				    GET_FIELD(reset_reg->data,
1753 					      DBG_RESET_REG_ADDR);
1754 				qed_wr(p_hwfn,
1755 				       p_ptt,
1756 				       DWORDS_TO_BYTES(reset_reg_addr) +
1757 				       RESET_REG_UNRESET_OFFSET,
1758 				       reg_val[reset_reg_id]);
1759 			}
1760 		}
1761 	}
1762 }
1763 
1764 /* Returns the attention block data of the specified block */
1765 static const struct dbg_attn_block_type_data *
1766 qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
1767 			enum block_id block_id, enum dbg_attn_type attn_type)
1768 {
1769 	const struct dbg_attn_block *base_attn_block_arr =
1770 	    (const struct dbg_attn_block *)
1771 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
1772 
1773 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
1774 }
1775 
1776 /* Returns the attention registers of the specified block */
1777 static const struct dbg_attn_reg *
1778 qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
1779 			enum block_id block_id, enum dbg_attn_type attn_type,
1780 			u8 *num_attn_regs)
1781 {
1782 	const struct dbg_attn_block_type_data *block_type_data =
1783 	    qed_get_block_attn_data(p_hwfn, block_id, attn_type);
1784 
1785 	*num_attn_regs = block_type_data->num_regs;
1786 
1787 	return (const struct dbg_attn_reg *)
1788 		p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
1789 		block_type_data->regs_offset;
1790 }
1791 
1792 /* For each block, clear the status of all parities */
1793 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
1794 				   struct qed_ptt *p_ptt)
1795 {
1796 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1797 	const struct dbg_attn_reg *attn_reg_arr;
1798 	u32 block_id, sts_clr_address;
1799 	u8 reg_idx, num_attn_regs;
1800 
1801 	for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1802 		if (dev_data->block_in_reset[block_id])
1803 			continue;
1804 
1805 		attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
1806 						       (enum block_id)block_id,
1807 						       ATTN_TYPE_PARITY,
1808 						       &num_attn_regs);
1809 
1810 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
1811 			const struct dbg_attn_reg *reg_data =
1812 				&attn_reg_arr[reg_idx];
1813 			u16 modes_buf_offset;
1814 			bool eval_mode;
1815 
1816 			/* Check mode */
1817 			eval_mode = GET_FIELD(reg_data->mode.data,
1818 					      DBG_MODE_HDR_EVAL_MODE) > 0;
1819 			modes_buf_offset =
1820 				GET_FIELD(reg_data->mode.data,
1821 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
1822 
1823 			sts_clr_address = reg_data->sts_clr_address;
1824 			/* If Mode match: clear parity status */
1825 			if (!eval_mode ||
1826 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
1827 				qed_rd(p_hwfn, p_ptt,
1828 				       DWORDS_TO_BYTES(sts_clr_address));
1829 		}
1830 	}
1831 }
1832 
1833 /* Finds the meta data image in NVRAM */
1834 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
1835 					    struct qed_ptt *p_ptt,
1836 					    u32 image_type,
1837 					    u32 *nvram_offset_bytes,
1838 					    u32 *nvram_size_bytes)
1839 {
1840 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
1841 	struct mcp_file_att file_att;
1842 	int nvm_result;
1843 
1844 	/* Call NVRAM get file command */
1845 	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
1846 					p_ptt,
1847 					DRV_MSG_CODE_NVM_GET_FILE_ATT,
1848 					image_type,
1849 					&ret_mcp_resp,
1850 					&ret_mcp_param,
1851 					&ret_txn_size,
1852 					(u32 *)&file_att, false);
1853 
1854 	/* Check response */
1855 	if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) !=
1856 	    FW_MSG_CODE_NVM_OK)
1857 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
1858 
1859 	/* Update return values */
1860 	*nvram_offset_bytes = file_att.nvm_start_addr;
1861 	*nvram_size_bytes = file_att.len;
1862 
1863 	DP_VERBOSE(p_hwfn,
1864 		   QED_MSG_DEBUG,
1865 		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
1866 		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
1867 
1868 	/* Check alignment */
1869 	if (*nvram_size_bytes & 0x3)
1870 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
1871 
1872 	return DBG_STATUS_OK;
1873 }
1874 
1875 /* Reads data from NVRAM */
1876 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
1877 				      struct qed_ptt *p_ptt,
1878 				      u32 nvram_offset_bytes,
1879 				      u32 nvram_size_bytes, u32 *ret_buf)
1880 {
1881 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
1882 	s32 bytes_left = nvram_size_bytes;
1883 	u32 read_offset = 0, param = 0;
1884 
1885 	DP_VERBOSE(p_hwfn,
1886 		   QED_MSG_DEBUG,
1887 		   "nvram_read: reading image of size %d bytes from NVRAM\n",
1888 		   nvram_size_bytes);
1889 
1890 	do {
1891 		bytes_to_copy =
1892 		    (bytes_left >
1893 		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
1894 
1895 		/* Call NVRAM read command */
1896 		SET_MFW_FIELD(param,
1897 			      DRV_MB_PARAM_NVM_OFFSET,
1898 			      nvram_offset_bytes + read_offset);
1899 		SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
1900 		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
1901 				       DRV_MSG_CODE_NVM_READ_NVRAM, param,
1902 				       &ret_mcp_resp,
1903 				       &ret_mcp_param, &ret_read_size,
1904 				       (u32 *)((u8 *)ret_buf + read_offset),
1905 				       false))
1906 			return DBG_STATUS_NVRAM_READ_FAILED;
1907 
1908 		/* Check response */
1909 		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
1910 			return DBG_STATUS_NVRAM_READ_FAILED;
1911 
1912 		/* Update read offset */
1913 		read_offset += ret_read_size;
1914 		bytes_left -= ret_read_size;
1915 	} while (bytes_left > 0);
1916 
1917 	return DBG_STATUS_OK;
1918 }
1919 
1920 /* Dumps GRC registers section header. Returns the dumped size in dwords.
1921  * the following parameters are dumped:
1922  * - count: no. of dumped entries
1923  * - split_type: split type
1924  * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
1925  * - reg_type_name: register type name (dumped only if reg_type_name != NULL)
1926  */
1927 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
1928 				 bool dump,
1929 				 u32 num_reg_entries,
1930 				 enum init_split_types split_type,
1931 				 u8 split_id, const char *reg_type_name)
1932 {
1933 	u8 num_params = 2 +
1934 	    (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
1935 	u32 offset = 0;
1936 
1937 	offset += qed_dump_section_hdr(dump_buf + offset,
1938 				       dump, "grc_regs", num_params);
1939 	offset += qed_dump_num_param(dump_buf + offset,
1940 				     dump, "count", num_reg_entries);
1941 	offset += qed_dump_str_param(dump_buf + offset,
1942 				     dump, "split",
1943 				     s_split_type_defs[split_type].name);
1944 	if (split_type != SPLIT_TYPE_NONE)
1945 		offset += qed_dump_num_param(dump_buf + offset,
1946 					     dump, "id", split_id);
1947 	if (reg_type_name)
1948 		offset += qed_dump_str_param(dump_buf + offset,
1949 					     dump, "type", reg_type_name);
1950 
1951 	return offset;
1952 }
1953 
1954 /* Reads the specified registers into the specified buffer.
1955  * The addr and len arguments are specified in dwords.
1956  */
1957 void qed_read_regs(struct qed_hwfn *p_hwfn,
1958 		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
1959 {
1960 	u32 i;
1961 
1962 	for (i = 0; i < len; i++)
1963 		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
1964 }
1965 
1966 /* Dumps the GRC registers in the specified address range.
1967  * Returns the dumped size in dwords.
1968  * The addr and len arguments are specified in dwords.
1969  */
1970 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
1971 				   struct qed_ptt *p_ptt,
1972 				   u32 *dump_buf,
1973 				   bool dump, u32 addr, u32 len, bool wide_bus,
1974 				   enum init_split_types split_type,
1975 				   u8 split_id)
1976 {
1977 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1978 	u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
1979 	bool read_using_dmae = false;
1980 	u32 thresh;
1981 
1982 	if (!dump)
1983 		return len;
1984 
1985 	switch (split_type) {
1986 	case SPLIT_TYPE_PORT:
1987 		port_id = split_id;
1988 		break;
1989 	case SPLIT_TYPE_PF:
1990 		pf_id = split_id;
1991 		break;
1992 	case SPLIT_TYPE_PORT_PF:
1993 		port_id = split_id / dev_data->num_pfs_per_port;
1994 		pf_id = port_id + dev_data->num_ports *
1995 		    (split_id % dev_data->num_pfs_per_port);
1996 		break;
1997 	case SPLIT_TYPE_VF:
1998 		vf_id = split_id;
1999 		break;
2000 	default:
2001 		break;
2002 	}
2003 
2004 	/* Try reading using DMAE */
2005 	if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
2006 	    (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
2007 	     (PROTECT_WIDE_BUS && wide_bus))) {
2008 		struct qed_dmae_params dmae_params;
2009 
2010 		/* Set DMAE params */
2011 		memset(&dmae_params, 0, sizeof(dmae_params));
2012 		SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
2013 		switch (split_type) {
2014 		case SPLIT_TYPE_PORT:
2015 			SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
2016 				  1);
2017 			dmae_params.port_id = port_id;
2018 			break;
2019 		case SPLIT_TYPE_PF:
2020 			SET_FIELD(dmae_params.flags,
2021 				  QED_DMAE_PARAMS_SRC_PF_VALID, 1);
2022 			dmae_params.src_pfid = pf_id;
2023 			break;
2024 		case SPLIT_TYPE_PORT_PF:
2025 			SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
2026 				  1);
2027 			SET_FIELD(dmae_params.flags,
2028 				  QED_DMAE_PARAMS_SRC_PF_VALID, 1);
2029 			dmae_params.port_id = port_id;
2030 			dmae_params.src_pfid = pf_id;
2031 			break;
2032 		default:
2033 			break;
2034 		}
2035 
2036 		/* Execute DMAE command */
2037 		read_using_dmae = !qed_dmae_grc2host(p_hwfn,
2038 						     p_ptt,
2039 						     DWORDS_TO_BYTES(addr),
2040 						     (u64)(uintptr_t)(dump_buf),
2041 						     len, &dmae_params);
2042 		if (!read_using_dmae) {
2043 			dev_data->use_dmae = 0;
2044 			DP_VERBOSE(p_hwfn,
2045 				   QED_MSG_DEBUG,
2046 				   "Failed reading from chip using DMAE, using GRC instead\n");
2047 		}
2048 	}
2049 
2050 	if (read_using_dmae)
2051 		goto print_log;
2052 
2053 	/* If not read using DMAE, read using GRC */
2054 
2055 	/* Set pretend */
2056 	if (split_type != dev_data->pretend.split_type ||
2057 	    split_id != dev_data->pretend.split_id) {
2058 		switch (split_type) {
2059 		case SPLIT_TYPE_PORT:
2060 			qed_port_pretend(p_hwfn, p_ptt, port_id);
2061 			break;
2062 		case SPLIT_TYPE_PF:
2063 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2064 					  pf_id);
2065 			qed_fid_pretend(p_hwfn, p_ptt, fid);
2066 			break;
2067 		case SPLIT_TYPE_PORT_PF:
2068 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2069 					  pf_id);
2070 			qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
2071 			break;
2072 		case SPLIT_TYPE_VF:
2073 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
2074 			      | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
2075 					  vf_id);
2076 			qed_fid_pretend(p_hwfn, p_ptt, fid);
2077 			break;
2078 		default:
2079 			break;
2080 		}
2081 
2082 		dev_data->pretend.split_type = (u8)split_type;
2083 		dev_data->pretend.split_id = split_id;
2084 	}
2085 
2086 	/* Read registers using GRC */
2087 	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2088 
2089 print_log:
2090 	/* Print log */
2091 	dev_data->num_regs_read += len;
2092 	thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
2093 	if ((dev_data->num_regs_read / thresh) >
2094 	    ((dev_data->num_regs_read - len) / thresh))
2095 		DP_VERBOSE(p_hwfn,
2096 			   QED_MSG_DEBUG,
2097 			   "Dumped %d registers...\n", dev_data->num_regs_read);
2098 
2099 	return len;
2100 }
2101 
2102 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2103  * The addr and len arguments are specified in dwords.
2104  */
2105 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2106 				      bool dump, u32 addr, u32 len)
2107 {
2108 	if (dump)
2109 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2110 
2111 	return 1;
2112 }
2113 
2114 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2115  * The addr and len arguments are specified in dwords.
2116  */
2117 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2118 				  struct qed_ptt *p_ptt,
2119 				  u32 *dump_buf,
2120 				  bool dump, u32 addr, u32 len, bool wide_bus,
2121 				  enum init_split_types split_type, u8 split_id)
2122 {
2123 	u32 offset = 0;
2124 
2125 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2126 	offset += qed_grc_dump_addr_range(p_hwfn,
2127 					  p_ptt,
2128 					  dump_buf + offset,
2129 					  dump, addr, len, wide_bus,
2130 					  split_type, split_id);
2131 
2132 	return offset;
2133 }
2134 
2135 /* Dumps GRC registers sequence with skip cycle.
2136  * Returns the dumped size in dwords.
2137  * - addr:	start GRC address in dwords
2138  * - total_len:	total no. of dwords to dump
2139  * - read_len:	no. consecutive dwords to read
2140  * - skip_len:	no. of dwords to skip (and fill with zeros)
2141  */
2142 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2143 				       struct qed_ptt *p_ptt,
2144 				       u32 *dump_buf,
2145 				       bool dump,
2146 				       u32 addr,
2147 				       u32 total_len,
2148 				       u32 read_len, u32 skip_len)
2149 {
2150 	u32 offset = 0, reg_offset = 0;
2151 
2152 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2153 
2154 	if (!dump)
2155 		return offset + total_len;
2156 
2157 	while (reg_offset < total_len) {
2158 		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2159 
2160 		offset += qed_grc_dump_addr_range(p_hwfn,
2161 						  p_ptt,
2162 						  dump_buf + offset,
2163 						  dump,  addr, curr_len, false,
2164 						  SPLIT_TYPE_NONE, 0);
2165 		reg_offset += curr_len;
2166 		addr += curr_len;
2167 
2168 		if (reg_offset < total_len) {
2169 			curr_len = min_t(u32, skip_len, total_len - skip_len);
2170 			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2171 			offset += curr_len;
2172 			reg_offset += curr_len;
2173 			addr += curr_len;
2174 		}
2175 	}
2176 
2177 	return offset;
2178 }
2179 
2180 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2181 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2182 				     struct qed_ptt *p_ptt,
2183 				     struct virt_mem_desc input_regs_arr,
2184 				     u32 *dump_buf,
2185 				     bool dump,
2186 				     enum init_split_types split_type,
2187 				     u8 split_id,
2188 				     bool block_enable[MAX_BLOCK_ID],
2189 				     u32 *num_dumped_reg_entries)
2190 {
2191 	u32 i, offset = 0, input_offset = 0;
2192 	bool mode_match = true;
2193 
2194 	*num_dumped_reg_entries = 0;
2195 
2196 	while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
2197 		const struct dbg_dump_cond_hdr *cond_hdr =
2198 		    (const struct dbg_dump_cond_hdr *)
2199 		    input_regs_arr.ptr + input_offset++;
2200 		u16 modes_buf_offset;
2201 		bool eval_mode;
2202 
2203 		/* Check mode/block */
2204 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2205 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2206 		if (eval_mode) {
2207 			modes_buf_offset =
2208 				GET_FIELD(cond_hdr->mode.data,
2209 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2210 			mode_match = qed_is_mode_match(p_hwfn,
2211 						       &modes_buf_offset);
2212 		}
2213 
2214 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2215 			input_offset += cond_hdr->data_size;
2216 			continue;
2217 		}
2218 
2219 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2220 			const struct dbg_dump_reg *reg =
2221 			    (const struct dbg_dump_reg *)
2222 			    input_regs_arr.ptr + input_offset;
2223 			u32 addr, len;
2224 			bool wide_bus;
2225 
2226 			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2227 			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2228 			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2229 			offset += qed_grc_dump_reg_entry(p_hwfn,
2230 							 p_ptt,
2231 							 dump_buf + offset,
2232 							 dump,
2233 							 addr,
2234 							 len,
2235 							 wide_bus,
2236 							 split_type, split_id);
2237 			(*num_dumped_reg_entries)++;
2238 		}
2239 	}
2240 
2241 	return offset;
2242 }
2243 
2244 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2245 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2246 				   struct qed_ptt *p_ptt,
2247 				   struct virt_mem_desc input_regs_arr,
2248 				   u32 *dump_buf,
2249 				   bool dump,
2250 				   bool block_enable[MAX_BLOCK_ID],
2251 				   enum init_split_types split_type,
2252 				   u8 split_id, const char *reg_type_name)
2253 {
2254 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2255 	enum init_split_types hdr_split_type = split_type;
2256 	u32 num_dumped_reg_entries, offset;
2257 	u8 hdr_split_id = split_id;
2258 
2259 	/* In PORT_PF split type, print a port split header */
2260 	if (split_type == SPLIT_TYPE_PORT_PF) {
2261 		hdr_split_type = SPLIT_TYPE_PORT;
2262 		hdr_split_id = split_id / dev_data->num_pfs_per_port;
2263 	}
2264 
2265 	/* Calculate register dump header size (and skip it for now) */
2266 	offset = qed_grc_dump_regs_hdr(dump_buf,
2267 				       false,
2268 				       0,
2269 				       hdr_split_type,
2270 				       hdr_split_id, reg_type_name);
2271 
2272 	/* Dump registers */
2273 	offset += qed_grc_dump_regs_entries(p_hwfn,
2274 					    p_ptt,
2275 					    input_regs_arr,
2276 					    dump_buf + offset,
2277 					    dump,
2278 					    split_type,
2279 					    split_id,
2280 					    block_enable,
2281 					    &num_dumped_reg_entries);
2282 
2283 	/* Write register dump header */
2284 	if (dump && num_dumped_reg_entries > 0)
2285 		qed_grc_dump_regs_hdr(dump_buf,
2286 				      dump,
2287 				      num_dumped_reg_entries,
2288 				      hdr_split_type,
2289 				      hdr_split_id, reg_type_name);
2290 
2291 	return num_dumped_reg_entries > 0 ? offset : 0;
2292 }
2293 
2294 /* Dumps registers according to the input registers array. Returns the dumped
2295  * size in dwords.
2296  */
2297 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2298 				  struct qed_ptt *p_ptt,
2299 				  u32 *dump_buf,
2300 				  bool dump,
2301 				  bool block_enable[MAX_BLOCK_ID],
2302 				  const char *reg_type_name)
2303 {
2304 	struct virt_mem_desc *dbg_buf =
2305 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
2306 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2307 	u32 offset = 0, input_offset = 0;
2308 
2309 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2310 		const struct dbg_dump_split_hdr *split_hdr;
2311 		struct virt_mem_desc curr_input_regs_arr;
2312 		enum init_split_types split_type;
2313 		u16 split_count = 0;
2314 		u32 split_data_size;
2315 		u8 split_id;
2316 
2317 		split_hdr =
2318 		    (const struct dbg_dump_split_hdr *)
2319 		    dbg_buf->ptr + input_offset++;
2320 		split_type =
2321 		    GET_FIELD(split_hdr->hdr,
2322 			      DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2323 		split_data_size = GET_FIELD(split_hdr->hdr,
2324 					    DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2325 		curr_input_regs_arr.ptr =
2326 		    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
2327 		    input_offset;
2328 		curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
2329 
2330 		switch (split_type) {
2331 		case SPLIT_TYPE_NONE:
2332 			split_count = 1;
2333 			break;
2334 		case SPLIT_TYPE_PORT:
2335 			split_count = dev_data->num_ports;
2336 			break;
2337 		case SPLIT_TYPE_PF:
2338 		case SPLIT_TYPE_PORT_PF:
2339 			split_count = dev_data->num_ports *
2340 			    dev_data->num_pfs_per_port;
2341 			break;
2342 		case SPLIT_TYPE_VF:
2343 			split_count = dev_data->num_vfs;
2344 			break;
2345 		default:
2346 			return 0;
2347 		}
2348 
2349 		for (split_id = 0; split_id < split_count; split_id++)
2350 			offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2351 							  curr_input_regs_arr,
2352 							  dump_buf + offset,
2353 							  dump, block_enable,
2354 							  split_type,
2355 							  split_id,
2356 							  reg_type_name);
2357 
2358 		input_offset += split_data_size;
2359 	}
2360 
2361 	/* Cancel pretends (pretend to original PF) */
2362 	if (dump) {
2363 		qed_fid_pretend(p_hwfn, p_ptt,
2364 				FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2365 					    p_hwfn->rel_pf_id));
2366 		dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2367 		dev_data->pretend.split_id = 0;
2368 	}
2369 
2370 	return offset;
2371 }
2372 
2373 /* Dump reset registers. Returns the dumped size in dwords. */
2374 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2375 				   struct qed_ptt *p_ptt,
2376 				   u32 *dump_buf, bool dump)
2377 {
2378 	u32 offset = 0, num_regs = 0;
2379 	u8 reset_reg_id;
2380 
2381 	/* Calculate header size */
2382 	offset += qed_grc_dump_regs_hdr(dump_buf,
2383 					false,
2384 					0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
2385 
2386 	/* Write reset registers */
2387 	for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
2388 	     reset_reg_id++) {
2389 		const struct dbg_reset_reg *reset_reg;
2390 		u32 reset_reg_addr;
2391 
2392 		reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
2393 
2394 		if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
2395 			continue;
2396 
2397 		reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
2398 		offset += qed_grc_dump_reg_entry(p_hwfn,
2399 						 p_ptt,
2400 						 dump_buf + offset,
2401 						 dump,
2402 						 reset_reg_addr,
2403 						 1, false, SPLIT_TYPE_NONE, 0);
2404 		num_regs++;
2405 	}
2406 
2407 	/* Write header */
2408 	if (dump)
2409 		qed_grc_dump_regs_hdr(dump_buf,
2410 				      true, num_regs, SPLIT_TYPE_NONE,
2411 				      0, "RESET_REGS");
2412 
2413 	return offset;
2414 }
2415 
2416 /* Dump registers that are modified during GRC Dump and therefore must be
2417  * dumped first. Returns the dumped size in dwords.
2418  */
2419 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2420 				      struct qed_ptt *p_ptt,
2421 				      u32 *dump_buf, bool dump)
2422 {
2423 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2424 	u32 block_id, offset = 0, stall_regs_offset;
2425 	const struct dbg_attn_reg *attn_reg_arr;
2426 	u8 storm_id, reg_idx, num_attn_regs;
2427 	u32 num_reg_entries = 0;
2428 
2429 	/* Write empty header for attention registers */
2430 	offset += qed_grc_dump_regs_hdr(dump_buf,
2431 					false,
2432 					0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2433 
2434 	/* Write parity registers */
2435 	for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
2436 		if (dev_data->block_in_reset[block_id] && dump)
2437 			continue;
2438 
2439 		attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
2440 						       (enum block_id)block_id,
2441 						       ATTN_TYPE_PARITY,
2442 						       &num_attn_regs);
2443 
2444 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2445 			const struct dbg_attn_reg *reg_data =
2446 				&attn_reg_arr[reg_idx];
2447 			u16 modes_buf_offset;
2448 			bool eval_mode;
2449 			u32 addr;
2450 
2451 			/* Check mode */
2452 			eval_mode = GET_FIELD(reg_data->mode.data,
2453 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2454 			modes_buf_offset =
2455 				GET_FIELD(reg_data->mode.data,
2456 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2457 			if (eval_mode &&
2458 			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2459 				continue;
2460 
2461 			/* Mode match: read & dump registers */
2462 			addr = reg_data->mask_address;
2463 			offset += qed_grc_dump_reg_entry(p_hwfn,
2464 							 p_ptt,
2465 							 dump_buf + offset,
2466 							 dump,
2467 							 addr,
2468 							 1, false,
2469 							 SPLIT_TYPE_NONE, 0);
2470 			addr = GET_FIELD(reg_data->data,
2471 					 DBG_ATTN_REG_STS_ADDRESS);
2472 			offset += qed_grc_dump_reg_entry(p_hwfn,
2473 							 p_ptt,
2474 							 dump_buf + offset,
2475 							 dump,
2476 							 addr,
2477 							 1, false,
2478 							 SPLIT_TYPE_NONE, 0);
2479 			num_reg_entries += 2;
2480 		}
2481 	}
2482 
2483 	/* Overwrite header for attention registers */
2484 	if (dump)
2485 		qed_grc_dump_regs_hdr(dump_buf,
2486 				      true,
2487 				      num_reg_entries,
2488 				      SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2489 
2490 	/* Write empty header for stall registers */
2491 	stall_regs_offset = offset;
2492 	offset += qed_grc_dump_regs_hdr(dump_buf,
2493 					false, 0, SPLIT_TYPE_NONE, 0, "REGS");
2494 
2495 	/* Write Storm stall status registers */
2496 	for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
2497 	     storm_id++) {
2498 		struct storm_defs *storm = &s_storm_defs[storm_id];
2499 		u32 addr;
2500 
2501 		if (dev_data->block_in_reset[storm->sem_block_id] && dump)
2502 			continue;
2503 
2504 		addr =
2505 		    BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
2506 				    SEM_FAST_REG_STALLED);
2507 		offset += qed_grc_dump_reg_entry(p_hwfn,
2508 						 p_ptt,
2509 						 dump_buf + offset,
2510 						 dump,
2511 						 addr,
2512 						 1,
2513 						 false, SPLIT_TYPE_NONE, 0);
2514 		num_reg_entries++;
2515 	}
2516 
2517 	/* Overwrite header for stall registers */
2518 	if (dump)
2519 		qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
2520 				      true,
2521 				      num_reg_entries,
2522 				      SPLIT_TYPE_NONE, 0, "REGS");
2523 
2524 	return offset;
2525 }
2526 
2527 /* Dumps registers that can't be represented in the debug arrays */
2528 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2529 				     struct qed_ptt *p_ptt,
2530 				     u32 *dump_buf, bool dump)
2531 {
2532 	u32 offset = 0, addr;
2533 
2534 	offset += qed_grc_dump_regs_hdr(dump_buf,
2535 					dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
2536 
2537 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2538 	 * skipped).
2539 	 */
2540 	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2541 	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2542 					      p_ptt,
2543 					      dump_buf + offset,
2544 					      dump,
2545 					      addr,
2546 					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2547 					      7,
2548 					      1);
2549 	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2550 	offset +=
2551 	    qed_grc_dump_reg_entry_skip(p_hwfn,
2552 					p_ptt,
2553 					dump_buf + offset,
2554 					dump,
2555 					addr,
2556 					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2557 					7,
2558 					1);
2559 
2560 	return offset;
2561 }
2562 
2563 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2564  * dwords. The following parameters are dumped:
2565  * - name:	   dumped only if it's not NULL.
2566  * - addr:	   in dwords, dumped only if name is NULL.
2567  * - len:	   in dwords, always dumped.
2568  * - width:	   dumped if it's not zero.
2569  * - packed:	   dumped only if it's not false.
2570  * - mem_group:	   always dumped.
2571  * - is_storm:	   true only if the memory is related to a Storm.
2572  * - storm_letter: valid only if is_storm is true.
2573  *
2574  */
2575 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2576 				u32 *dump_buf,
2577 				bool dump,
2578 				const char *name,
2579 				u32 addr,
2580 				u32 len,
2581 				u32 bit_width,
2582 				bool packed,
2583 				const char *mem_group, char storm_letter)
2584 {
2585 	u8 num_params = 3;
2586 	u32 offset = 0;
2587 	char buf[64];
2588 
2589 	if (!len)
2590 		DP_NOTICE(p_hwfn,
2591 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2592 
2593 	if (bit_width)
2594 		num_params++;
2595 	if (packed)
2596 		num_params++;
2597 
2598 	/* Dump section header */
2599 	offset += qed_dump_section_hdr(dump_buf + offset,
2600 				       dump, "grc_mem", num_params);
2601 
2602 	if (name) {
2603 		/* Dump name */
2604 		if (storm_letter) {
2605 			strcpy(buf, "?STORM_");
2606 			buf[0] = storm_letter;
2607 			strcpy(buf + strlen(buf), name);
2608 		} else {
2609 			strcpy(buf, name);
2610 		}
2611 
2612 		offset += qed_dump_str_param(dump_buf + offset,
2613 					     dump, "name", buf);
2614 	} else {
2615 		/* Dump address */
2616 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
2617 
2618 		offset += qed_dump_num_param(dump_buf + offset,
2619 					     dump, "addr", addr_in_bytes);
2620 	}
2621 
2622 	/* Dump len */
2623 	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2624 
2625 	/* Dump bit width */
2626 	if (bit_width)
2627 		offset += qed_dump_num_param(dump_buf + offset,
2628 					     dump, "width", bit_width);
2629 
2630 	/* Dump packed */
2631 	if (packed)
2632 		offset += qed_dump_num_param(dump_buf + offset,
2633 					     dump, "packed", 1);
2634 
2635 	/* Dump reg type */
2636 	if (storm_letter) {
2637 		strcpy(buf, "?STORM_");
2638 		buf[0] = storm_letter;
2639 		strcpy(buf + strlen(buf), mem_group);
2640 	} else {
2641 		strcpy(buf, mem_group);
2642 	}
2643 
2644 	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
2645 
2646 	return offset;
2647 }
2648 
2649 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
2650  * Returns the dumped size in dwords.
2651  * The addr and len arguments are specified in dwords.
2652  */
2653 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2654 			    struct qed_ptt *p_ptt,
2655 			    u32 *dump_buf,
2656 			    bool dump,
2657 			    const char *name,
2658 			    u32 addr,
2659 			    u32 len,
2660 			    bool wide_bus,
2661 			    u32 bit_width,
2662 			    bool packed,
2663 			    const char *mem_group, char storm_letter)
2664 {
2665 	u32 offset = 0;
2666 
2667 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2668 				       dump_buf + offset,
2669 				       dump,
2670 				       name,
2671 				       addr,
2672 				       len,
2673 				       bit_width,
2674 				       packed, mem_group, storm_letter);
2675 	offset += qed_grc_dump_addr_range(p_hwfn,
2676 					  p_ptt,
2677 					  dump_buf + offset,
2678 					  dump, addr, len, wide_bus,
2679 					  SPLIT_TYPE_NONE, 0);
2680 
2681 	return offset;
2682 }
2683 
2684 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
2685 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2686 				    struct qed_ptt *p_ptt,
2687 				    struct virt_mem_desc input_mems_arr,
2688 				    u32 *dump_buf, bool dump)
2689 {
2690 	u32 i, offset = 0, input_offset = 0;
2691 	bool mode_match = true;
2692 
2693 	while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
2694 		const struct dbg_dump_cond_hdr *cond_hdr;
2695 		u16 modes_buf_offset;
2696 		u32 num_entries;
2697 		bool eval_mode;
2698 
2699 		cond_hdr =
2700 		    (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
2701 		    input_offset++;
2702 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
2703 
2704 		/* Check required mode */
2705 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2706 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2707 		if (eval_mode) {
2708 			modes_buf_offset =
2709 				GET_FIELD(cond_hdr->mode.data,
2710 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2711 			mode_match = qed_is_mode_match(p_hwfn,
2712 						       &modes_buf_offset);
2713 		}
2714 
2715 		if (!mode_match) {
2716 			input_offset += cond_hdr->data_size;
2717 			continue;
2718 		}
2719 
2720 		for (i = 0; i < num_entries;
2721 		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
2722 			const struct dbg_dump_mem *mem =
2723 			    (const struct dbg_dump_mem *)((u32 *)
2724 							  input_mems_arr.ptr
2725 							  + input_offset);
2726 			const struct dbg_block *block;
2727 			char storm_letter = 0;
2728 			u32 mem_addr, mem_len;
2729 			bool mem_wide_bus;
2730 			u8 mem_group_id;
2731 
2732 			mem_group_id = GET_FIELD(mem->dword0,
2733 						 DBG_DUMP_MEM_MEM_GROUP_ID);
2734 			if (mem_group_id >= MEM_GROUPS_NUM) {
2735 				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
2736 				return 0;
2737 			}
2738 
2739 			if (!qed_grc_is_mem_included(p_hwfn,
2740 						     (enum block_id)
2741 						     cond_hdr->block_id,
2742 						     mem_group_id))
2743 				continue;
2744 
2745 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
2746 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
2747 			mem_wide_bus = GET_FIELD(mem->dword1,
2748 						 DBG_DUMP_MEM_WIDE_BUS);
2749 
2750 			block = get_dbg_block(p_hwfn,
2751 					      cond_hdr->block_id);
2752 
2753 			/* If memory is associated with Storm,
2754 			 * update storm details
2755 			 */
2756 			if (block->associated_storm_letter)
2757 				storm_letter = block->associated_storm_letter;
2758 
2759 			/* Dump memory */
2760 			offset += qed_grc_dump_mem(p_hwfn,
2761 						p_ptt,
2762 						dump_buf + offset,
2763 						dump,
2764 						NULL,
2765 						mem_addr,
2766 						mem_len,
2767 						mem_wide_bus,
2768 						0,
2769 						false,
2770 						s_mem_group_names[mem_group_id],
2771 						storm_letter);
2772 		}
2773 	}
2774 
2775 	return offset;
2776 }
2777 
2778 /* Dumps GRC memories according to the input array dump_mem.
2779  * Returns the dumped size in dwords.
2780  */
2781 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
2782 				 struct qed_ptt *p_ptt,
2783 				 u32 *dump_buf, bool dump)
2784 {
2785 	struct virt_mem_desc *dbg_buf =
2786 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
2787 	u32 offset = 0, input_offset = 0;
2788 
2789 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2790 		const struct dbg_dump_split_hdr *split_hdr;
2791 		struct virt_mem_desc curr_input_mems_arr;
2792 		enum init_split_types split_type;
2793 		u32 split_data_size;
2794 
2795 		split_hdr =
2796 		    (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
2797 		    input_offset++;
2798 		split_type = GET_FIELD(split_hdr->hdr,
2799 				       DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2800 		split_data_size = GET_FIELD(split_hdr->hdr,
2801 					    DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2802 		curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
2803 		curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
2804 
2805 		if (split_type == SPLIT_TYPE_NONE)
2806 			offset += qed_grc_dump_mem_entries(p_hwfn,
2807 							   p_ptt,
2808 							   curr_input_mems_arr,
2809 							   dump_buf + offset,
2810 							   dump);
2811 		else
2812 			DP_NOTICE(p_hwfn,
2813 				  "Dumping split memories is currently not supported\n");
2814 
2815 		input_offset += split_data_size;
2816 	}
2817 
2818 	return offset;
2819 }
2820 
2821 /* Dumps GRC context data for the specified Storm.
2822  * Returns the dumped size in dwords.
2823  * The lid_size argument is specified in quad-regs.
2824  */
2825 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2826 				 struct qed_ptt *p_ptt,
2827 				 u32 *dump_buf,
2828 				 bool dump,
2829 				 const char *name,
2830 				 u32 num_lids,
2831 				 enum cm_ctx_types ctx_type, u8 storm_id)
2832 {
2833 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2834 	struct storm_defs *storm = &s_storm_defs[storm_id];
2835 	u32 i, lid, lid_size, total_size;
2836 	u32 rd_reg_addr, offset = 0;
2837 
2838 	/* Convert quad-regs to dwords */
2839 	lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
2840 
2841 	if (!lid_size)
2842 		return 0;
2843 
2844 	total_size = num_lids * lid_size;
2845 
2846 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2847 				       dump_buf + offset,
2848 				       dump,
2849 				       name,
2850 				       0,
2851 				       total_size,
2852 				       lid_size * 32,
2853 				       false, name, storm->letter);
2854 
2855 	if (!dump)
2856 		return offset + total_size;
2857 
2858 	rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
2859 
2860 	/* Dump context data */
2861 	for (lid = 0; lid < num_lids; lid++) {
2862 		for (i = 0; i < lid_size; i++) {
2863 			qed_wr(p_hwfn,
2864 			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
2865 			offset += qed_grc_dump_addr_range(p_hwfn,
2866 							  p_ptt,
2867 							  dump_buf + offset,
2868 							  dump,
2869 							  rd_reg_addr,
2870 							  1,
2871 							  false,
2872 							  SPLIT_TYPE_NONE, 0);
2873 		}
2874 	}
2875 
2876 	return offset;
2877 }
2878 
2879 /* Dumps GRC contexts. Returns the dumped size in dwords. */
2880 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
2881 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2882 {
2883 	u32 offset = 0;
2884 	u8 storm_id;
2885 
2886 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2887 		if (!qed_grc_is_storm_included(p_hwfn,
2888 					       (enum dbg_storms)storm_id))
2889 			continue;
2890 
2891 		/* Dump Conn AG context size */
2892 		offset += qed_grc_dump_ctx_data(p_hwfn,
2893 						p_ptt,
2894 						dump_buf + offset,
2895 						dump,
2896 						"CONN_AG_CTX",
2897 						NUM_OF_LCIDS,
2898 						CM_CTX_CONN_AG, storm_id);
2899 
2900 		/* Dump Conn ST context size */
2901 		offset += qed_grc_dump_ctx_data(p_hwfn,
2902 						p_ptt,
2903 						dump_buf + offset,
2904 						dump,
2905 						"CONN_ST_CTX",
2906 						NUM_OF_LCIDS,
2907 						CM_CTX_CONN_ST, storm_id);
2908 
2909 		/* Dump Task AG context size */
2910 		offset += qed_grc_dump_ctx_data(p_hwfn,
2911 						p_ptt,
2912 						dump_buf + offset,
2913 						dump,
2914 						"TASK_AG_CTX",
2915 						NUM_OF_LTIDS,
2916 						CM_CTX_TASK_AG, storm_id);
2917 
2918 		/* Dump Task ST context size */
2919 		offset += qed_grc_dump_ctx_data(p_hwfn,
2920 						p_ptt,
2921 						dump_buf + offset,
2922 						dump,
2923 						"TASK_ST_CTX",
2924 						NUM_OF_LTIDS,
2925 						CM_CTX_TASK_ST, storm_id);
2926 	}
2927 
2928 	return offset;
2929 }
2930 
2931 #define VFC_STATUS_RESP_READY_BIT	0
2932 #define VFC_STATUS_BUSY_BIT		1
2933 #define VFC_STATUS_SENDING_CMD_BIT	2
2934 
2935 #define VFC_POLLING_DELAY_MS	1
2936 #define VFC_POLLING_COUNT		20
2937 
2938 /* Reads data from VFC. Returns the number of dwords read (0 on error).
2939  * Sizes are specified in dwords.
2940  */
2941 static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn,
2942 				      struct qed_ptt *p_ptt,
2943 				      struct storm_defs *storm,
2944 				      u32 *cmd_data,
2945 				      u32 cmd_size,
2946 				      u32 *addr_data,
2947 				      u32 addr_size,
2948 				      u32 resp_size, u32 *dump_buf)
2949 {
2950 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2951 	u32 vfc_status, polling_ms, polling_count = 0, i;
2952 	u32 reg_addr, sem_base;
2953 	bool is_ready = false;
2954 
2955 	sem_base = storm->sem_fast_mem_addr;
2956 	polling_ms = VFC_POLLING_DELAY_MS *
2957 	    s_hw_type_defs[dev_data->hw_type].delay_factor;
2958 
2959 	/* Write VFC command */
2960 	ARR_REG_WR(p_hwfn,
2961 		   p_ptt,
2962 		   sem_base + SEM_FAST_REG_VFC_DATA_WR,
2963 		   cmd_data, cmd_size);
2964 
2965 	/* Write VFC address */
2966 	ARR_REG_WR(p_hwfn,
2967 		   p_ptt,
2968 		   sem_base + SEM_FAST_REG_VFC_ADDR,
2969 		   addr_data, addr_size);
2970 
2971 	/* Read response */
2972 	for (i = 0; i < resp_size; i++) {
2973 		/* Poll until ready */
2974 		do {
2975 			reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
2976 			qed_grc_dump_addr_range(p_hwfn,
2977 						p_ptt,
2978 						&vfc_status,
2979 						true,
2980 						BYTES_TO_DWORDS(reg_addr),
2981 						1,
2982 						false, SPLIT_TYPE_NONE, 0);
2983 			is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT);
2984 
2985 			if (!is_ready) {
2986 				if (polling_count++ == VFC_POLLING_COUNT)
2987 					return 0;
2988 
2989 				msleep(polling_ms);
2990 			}
2991 		} while (!is_ready);
2992 
2993 		reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
2994 		qed_grc_dump_addr_range(p_hwfn,
2995 					p_ptt,
2996 					dump_buf + i,
2997 					true,
2998 					BYTES_TO_DWORDS(reg_addr),
2999 					1, false, SPLIT_TYPE_NONE, 0);
3000 	}
3001 
3002 	return resp_size;
3003 }
3004 
3005 /* Dump VFC CAM. Returns the dumped size in dwords. */
3006 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3007 				struct qed_ptt *p_ptt,
3008 				u32 *dump_buf, bool dump, u8 storm_id)
3009 {
3010 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3011 	struct storm_defs *storm = &s_storm_defs[storm_id];
3012 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3013 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3014 	u32 row, offset = 0;
3015 
3016 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3017 				       dump_buf + offset,
3018 				       dump,
3019 				       "vfc_cam",
3020 				       0,
3021 				       total_size,
3022 				       256,
3023 				       false, "vfc_cam", storm->letter);
3024 
3025 	if (!dump)
3026 		return offset + total_size;
3027 
3028 	/* Prepare CAM address */
3029 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3030 
3031 	/* Read VFC CAM data */
3032 	for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
3033 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3034 		offset += qed_grc_dump_read_from_vfc(p_hwfn,
3035 						     p_ptt,
3036 						     storm,
3037 						     cam_cmd,
3038 						     VFC_CAM_CMD_DWORDS,
3039 						     cam_addr,
3040 						     VFC_CAM_ADDR_DWORDS,
3041 						     VFC_CAM_RESP_DWORDS,
3042 						     dump_buf + offset);
3043 	}
3044 
3045 	return offset;
3046 }
3047 
3048 /* Dump VFC RAM. Returns the dumped size in dwords. */
3049 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3050 				struct qed_ptt *p_ptt,
3051 				u32 *dump_buf,
3052 				bool dump,
3053 				u8 storm_id, struct vfc_ram_defs *ram_defs)
3054 {
3055 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3056 	struct storm_defs *storm = &s_storm_defs[storm_id];
3057 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3058 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3059 	u32 row, offset = 0;
3060 
3061 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3062 				       dump_buf + offset,
3063 				       dump,
3064 				       ram_defs->mem_name,
3065 				       0,
3066 				       total_size,
3067 				       256,
3068 				       false,
3069 				       ram_defs->type_name,
3070 				       storm->letter);
3071 
3072 	if (!dump)
3073 		return offset + total_size;
3074 
3075 	/* Prepare RAM address */
3076 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3077 
3078 	/* Read VFC RAM data */
3079 	for (row = ram_defs->base_row;
3080 	     row < ram_defs->base_row + ram_defs->num_rows; row++) {
3081 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3082 		offset += qed_grc_dump_read_from_vfc(p_hwfn,
3083 						     p_ptt,
3084 						     storm,
3085 						     ram_cmd,
3086 						     VFC_RAM_CMD_DWORDS,
3087 						     ram_addr,
3088 						     VFC_RAM_ADDR_DWORDS,
3089 						     VFC_RAM_RESP_DWORDS,
3090 						     dump_buf + offset);
3091 	}
3092 
3093 	return offset;
3094 }
3095 
3096 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3097 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3098 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3099 {
3100 	u8 storm_id, i;
3101 	u32 offset = 0;
3102 
3103 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3104 		if (!qed_grc_is_storm_included(p_hwfn,
3105 					       (enum dbg_storms)storm_id) ||
3106 		    !s_storm_defs[storm_id].has_vfc)
3107 			continue;
3108 
3109 		/* Read CAM */
3110 		offset += qed_grc_dump_vfc_cam(p_hwfn,
3111 					       p_ptt,
3112 					       dump_buf + offset,
3113 					       dump, storm_id);
3114 
3115 		/* Read RAM */
3116 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3117 			offset += qed_grc_dump_vfc_ram(p_hwfn,
3118 						       p_ptt,
3119 						       dump_buf + offset,
3120 						       dump,
3121 						       storm_id,
3122 						       &s_vfc_ram_defs[i]);
3123 	}
3124 
3125 	return offset;
3126 }
3127 
3128 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3129 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3130 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3131 {
3132 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3133 	u32 offset = 0;
3134 	u8 rss_mem_id;
3135 
3136 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3137 		u32 rss_addr, num_entries, total_dwords;
3138 		struct rss_mem_defs *rss_defs;
3139 		u32 addr, num_dwords_to_read;
3140 		bool packed;
3141 
3142 		rss_defs = &s_rss_mem_defs[rss_mem_id];
3143 		rss_addr = rss_defs->addr;
3144 		num_entries = rss_defs->num_entries[dev_data->chip_id];
3145 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3146 		packed = (rss_defs->entry_width == 16);
3147 
3148 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3149 					       dump_buf + offset,
3150 					       dump,
3151 					       rss_defs->mem_name,
3152 					       0,
3153 					       total_dwords,
3154 					       rss_defs->entry_width,
3155 					       packed,
3156 					       rss_defs->type_name, 0);
3157 
3158 		/* Dump RSS data */
3159 		if (!dump) {
3160 			offset += total_dwords;
3161 			continue;
3162 		}
3163 
3164 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3165 		while (total_dwords) {
3166 			num_dwords_to_read = min_t(u32,
3167 						   RSS_REG_RSS_RAM_DATA_SIZE,
3168 						   total_dwords);
3169 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3170 			offset += qed_grc_dump_addr_range(p_hwfn,
3171 							  p_ptt,
3172 							  dump_buf + offset,
3173 							  dump,
3174 							  addr,
3175 							  num_dwords_to_read,
3176 							  false,
3177 							  SPLIT_TYPE_NONE, 0);
3178 			total_dwords -= num_dwords_to_read;
3179 			rss_addr++;
3180 		}
3181 	}
3182 
3183 	return offset;
3184 }
3185 
3186 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3187 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3188 				struct qed_ptt *p_ptt,
3189 				u32 *dump_buf, bool dump, u8 big_ram_id)
3190 {
3191 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3192 	u32 block_size, ram_size, offset = 0, reg_val, i;
3193 	char mem_name[12] = "???_BIG_RAM";
3194 	char type_name[8] = "???_RAM";
3195 	struct big_ram_defs *big_ram;
3196 
3197 	big_ram = &s_big_ram_defs[big_ram_id];
3198 	ram_size = big_ram->ram_size[dev_data->chip_id];
3199 
3200 	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3201 	block_size = reg_val &
3202 		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3203 									 : 128;
3204 
3205 	strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3206 	strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3207 
3208 	/* Dump memory header */
3209 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3210 				       dump_buf + offset,
3211 				       dump,
3212 				       mem_name,
3213 				       0,
3214 				       ram_size,
3215 				       block_size * 8,
3216 				       false, type_name, 0);
3217 
3218 	/* Read and dump Big RAM data */
3219 	if (!dump)
3220 		return offset + ram_size;
3221 
3222 	/* Dump Big RAM */
3223 	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3224 	     i++) {
3225 		u32 addr, len;
3226 
3227 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3228 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3229 		len = BRB_REG_BIG_RAM_DATA_SIZE;
3230 		offset += qed_grc_dump_addr_range(p_hwfn,
3231 						  p_ptt,
3232 						  dump_buf + offset,
3233 						  dump,
3234 						  addr,
3235 						  len,
3236 						  false, SPLIT_TYPE_NONE, 0);
3237 	}
3238 
3239 	return offset;
3240 }
3241 
3242 /* Dumps MCP scratchpad. Returns the dumped size in dwords. */
3243 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3244 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3245 {
3246 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3247 	u32 offset = 0, addr;
3248 	bool halted = false;
3249 
3250 	/* Halt MCP */
3251 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3252 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3253 		if (!halted)
3254 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3255 	}
3256 
3257 	/* Dump MCP scratchpad */
3258 	offset += qed_grc_dump_mem(p_hwfn,
3259 				   p_ptt,
3260 				   dump_buf + offset,
3261 				   dump,
3262 				   NULL,
3263 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3264 				   MCP_REG_SCRATCH_SIZE,
3265 				   false, 0, false, "MCP", 0);
3266 
3267 	/* Dump MCP cpu_reg_file */
3268 	offset += qed_grc_dump_mem(p_hwfn,
3269 				   p_ptt,
3270 				   dump_buf + offset,
3271 				   dump,
3272 				   NULL,
3273 				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3274 				   MCP_REG_CPU_REG_FILE_SIZE,
3275 				   false, 0, false, "MCP", 0);
3276 
3277 	/* Dump MCP registers */
3278 	block_enable[BLOCK_MCP] = true;
3279 	offset += qed_grc_dump_registers(p_hwfn,
3280 					 p_ptt,
3281 					 dump_buf + offset,
3282 					 dump, block_enable, "MCP");
3283 
3284 	/* Dump required non-MCP registers */
3285 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3286 					dump, 1, SPLIT_TYPE_NONE, 0,
3287 					"MCP");
3288 	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3289 	offset += qed_grc_dump_reg_entry(p_hwfn,
3290 					 p_ptt,
3291 					 dump_buf + offset,
3292 					 dump,
3293 					 addr,
3294 					 1,
3295 					 false, SPLIT_TYPE_NONE, 0);
3296 
3297 	/* Release MCP */
3298 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3299 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3300 
3301 	return offset;
3302 }
3303 
3304 /* Dumps the tbus indirect memory for all PHYs.
3305  * Returns the dumped size in dwords.
3306  */
3307 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3308 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3309 {
3310 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3311 	char mem_name[32];
3312 	u8 phy_id;
3313 
3314 	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3315 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3316 		struct phy_defs *phy_defs;
3317 		u8 *bytes_buf;
3318 
3319 		phy_defs = &s_phy_defs[phy_id];
3320 		addr_lo_addr = phy_defs->base_addr +
3321 			       phy_defs->tbus_addr_lo_addr;
3322 		addr_hi_addr = phy_defs->base_addr +
3323 			       phy_defs->tbus_addr_hi_addr;
3324 		data_lo_addr = phy_defs->base_addr +
3325 			       phy_defs->tbus_data_lo_addr;
3326 		data_hi_addr = phy_defs->base_addr +
3327 			       phy_defs->tbus_data_hi_addr;
3328 
3329 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3330 			     phy_defs->phy_name) < 0)
3331 			DP_NOTICE(p_hwfn,
3332 				  "Unexpected debug error: invalid PHY memory name\n");
3333 
3334 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3335 					       dump_buf + offset,
3336 					       dump,
3337 					       mem_name,
3338 					       0,
3339 					       PHY_DUMP_SIZE_DWORDS,
3340 					       16, true, mem_name, 0);
3341 
3342 		if (!dump) {
3343 			offset += PHY_DUMP_SIZE_DWORDS;
3344 			continue;
3345 		}
3346 
3347 		bytes_buf = (u8 *)(dump_buf + offset);
3348 		for (tbus_hi_offset = 0;
3349 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3350 		     tbus_hi_offset++) {
3351 			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3352 			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3353 			     tbus_lo_offset++) {
3354 				qed_wr(p_hwfn,
3355 				       p_ptt, addr_lo_addr, tbus_lo_offset);
3356 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3357 							    p_ptt,
3358 							    data_lo_addr);
3359 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3360 							    p_ptt,
3361 							    data_hi_addr);
3362 			}
3363 		}
3364 
3365 		offset += PHY_DUMP_SIZE_DWORDS;
3366 	}
3367 
3368 	return offset;
3369 }
3370 
3371 /* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
3372 static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
3373 				    struct qed_ptt *p_ptt,
3374 				    u32 *dump_buf, bool dump)
3375 {
3376 	u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
3377 	u32 hw_dump_size_dwords = 0, offset = 0;
3378 	enum dbg_status status;
3379 
3380 	/* Read HW dump image from NVRAM */
3381 	status = qed_find_nvram_image(p_hwfn,
3382 				      p_ptt,
3383 				      NVM_TYPE_HW_DUMP_OUT,
3384 				      &hw_dump_offset_bytes,
3385 				      &hw_dump_size_bytes);
3386 	if (status != DBG_STATUS_OK)
3387 		return 0;
3388 
3389 	hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
3390 
3391 	/* Dump HW dump image section */
3392 	offset += qed_dump_section_hdr(dump_buf + offset,
3393 				       dump, "mcp_hw_dump", 1);
3394 	offset += qed_dump_num_param(dump_buf + offset,
3395 				     dump, "size", hw_dump_size_dwords);
3396 
3397 	/* Read MCP HW dump image into dump buffer */
3398 	if (dump && hw_dump_size_dwords) {
3399 		status = qed_nvram_read(p_hwfn,
3400 					p_ptt,
3401 					hw_dump_offset_bytes,
3402 					hw_dump_size_bytes, dump_buf + offset);
3403 		if (status != DBG_STATUS_OK) {
3404 			DP_NOTICE(p_hwfn,
3405 				  "Failed to read MCP HW Dump image from NVRAM\n");
3406 			return 0;
3407 		}
3408 	}
3409 	offset += hw_dump_size_dwords;
3410 
3411 	return offset;
3412 }
3413 
3414 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3415 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3416 				     struct qed_ptt *p_ptt,
3417 				     u32 *dump_buf, bool dump)
3418 {
3419 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3420 	u32 block_id, line_id, offset = 0, addr, len;
3421 
3422 	/* Don't dump static debug if a debug bus recording is in progress */
3423 	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3424 		return 0;
3425 
3426 	if (dump) {
3427 		/* Disable debug bus in all blocks */
3428 		qed_bus_disable_blocks(p_hwfn, p_ptt);
3429 
3430 		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3431 		qed_wr(p_hwfn,
3432 		       p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
3433 		qed_wr(p_hwfn,
3434 		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3435 		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3436 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3437 	}
3438 
3439 	/* Dump all static debug lines for each relevant block */
3440 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3441 		const struct dbg_block_chip *block_per_chip;
3442 		const struct dbg_block *block;
3443 		bool is_removed, has_dbg_bus;
3444 		u16 modes_buf_offset;
3445 		u32 block_dwords;
3446 
3447 		block_per_chip =
3448 		    qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
3449 		is_removed = GET_FIELD(block_per_chip->flags,
3450 				       DBG_BLOCK_CHIP_IS_REMOVED);
3451 		has_dbg_bus = GET_FIELD(block_per_chip->flags,
3452 					DBG_BLOCK_CHIP_HAS_DBG_BUS);
3453 
3454 		if (!is_removed && has_dbg_bus &&
3455 		    GET_FIELD(block_per_chip->dbg_bus_mode.data,
3456 			      DBG_MODE_HDR_EVAL_MODE) > 0) {
3457 			modes_buf_offset =
3458 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
3459 				      DBG_MODE_HDR_MODES_BUF_OFFSET);
3460 			if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
3461 				has_dbg_bus = false;
3462 		}
3463 
3464 		if (is_removed || !has_dbg_bus)
3465 			continue;
3466 
3467 		block_dwords = NUM_DBG_LINES(block_per_chip) *
3468 			       STATIC_DEBUG_LINE_DWORDS;
3469 
3470 		/* Dump static section params */
3471 		block = get_dbg_block(p_hwfn, (enum block_id)block_id);
3472 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3473 					       dump_buf + offset,
3474 					       dump,
3475 					       block->name,
3476 					       0,
3477 					       block_dwords,
3478 					       32, false, "STATIC", 0);
3479 
3480 		if (!dump) {
3481 			offset += block_dwords;
3482 			continue;
3483 		}
3484 
3485 		/* If all lines are invalid - dump zeros */
3486 		if (dev_data->block_in_reset[block_id]) {
3487 			memset(dump_buf + offset, 0,
3488 			       DWORDS_TO_BYTES(block_dwords));
3489 			offset += block_dwords;
3490 			continue;
3491 		}
3492 
3493 		/* Enable block's client */
3494 		qed_bus_enable_clients(p_hwfn,
3495 				       p_ptt,
3496 				       BIT(block_per_chip->dbg_client_id));
3497 
3498 		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3499 		len = STATIC_DEBUG_LINE_DWORDS;
3500 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
3501 		     line_id++) {
3502 			/* Configure debug line ID */
3503 			qed_bus_config_dbg_line(p_hwfn,
3504 						p_ptt,
3505 						(enum block_id)block_id,
3506 						(u8)line_id, 0xf, 0, 0, 0);
3507 
3508 			/* Read debug line info */
3509 			offset += qed_grc_dump_addr_range(p_hwfn,
3510 							  p_ptt,
3511 							  dump_buf + offset,
3512 							  dump,
3513 							  addr,
3514 							  len,
3515 							  true, SPLIT_TYPE_NONE,
3516 							  0);
3517 		}
3518 
3519 		/* Disable block's client and debug output */
3520 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3521 		qed_bus_config_dbg_line(p_hwfn, p_ptt,
3522 					(enum block_id)block_id, 0, 0, 0, 0, 0);
3523 	}
3524 
3525 	if (dump) {
3526 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3527 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3528 	}
3529 
3530 	return offset;
3531 }
3532 
3533 /* Performs GRC Dump to the specified buffer.
3534  * Returns the dumped size in dwords.
3535  */
3536 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3537 				    struct qed_ptt *p_ptt,
3538 				    u32 *dump_buf,
3539 				    bool dump, u32 *num_dumped_dwords)
3540 {
3541 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3542 	bool parities_masked = false;
3543 	u32 dwords_read, offset = 0;
3544 	u8 i;
3545 
3546 	*num_dumped_dwords = 0;
3547 	dev_data->num_regs_read = 0;
3548 
3549 	/* Update reset state */
3550 	if (dump)
3551 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3552 
3553 	/* Dump global params */
3554 	offset += qed_dump_common_global_params(p_hwfn,
3555 						p_ptt,
3556 						dump_buf + offset, dump, 4);
3557 	offset += qed_dump_str_param(dump_buf + offset,
3558 				     dump, "dump-type", "grc-dump");
3559 	offset += qed_dump_num_param(dump_buf + offset,
3560 				     dump,
3561 				     "num-lcids",
3562 				     NUM_OF_LCIDS);
3563 	offset += qed_dump_num_param(dump_buf + offset,
3564 				     dump,
3565 				     "num-ltids",
3566 				     NUM_OF_LTIDS);
3567 	offset += qed_dump_num_param(dump_buf + offset,
3568 				     dump, "num-ports", dev_data->num_ports);
3569 
3570 	/* Dump reset registers (dumped before taking blocks out of reset ) */
3571 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3572 		offset += qed_grc_dump_reset_regs(p_hwfn,
3573 						  p_ptt,
3574 						  dump_buf + offset, dump);
3575 
3576 	/* Take all blocks out of reset (using reset registers) */
3577 	if (dump) {
3578 		qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
3579 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3580 	}
3581 
3582 	/* Disable all parities using MFW command */
3583 	if (dump &&
3584 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3585 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3586 		if (!parities_masked) {
3587 			DP_NOTICE(p_hwfn,
3588 				  "Failed to mask parities using MFW\n");
3589 			if (qed_grc_get_param
3590 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3591 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3592 		}
3593 	}
3594 
3595 	/* Dump modified registers (dumped before modifying them) */
3596 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3597 		offset += qed_grc_dump_modified_regs(p_hwfn,
3598 						     p_ptt,
3599 						     dump_buf + offset, dump);
3600 
3601 	/* Stall storms */
3602 	if (dump &&
3603 	    (qed_grc_is_included(p_hwfn,
3604 				 DBG_GRC_PARAM_DUMP_IOR) ||
3605 	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3606 		qed_grc_stall_storms(p_hwfn, p_ptt, true);
3607 
3608 	/* Dump all regs  */
3609 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3610 		bool block_enable[MAX_BLOCK_ID];
3611 
3612 		/* Dump all blocks except MCP */
3613 		for (i = 0; i < MAX_BLOCK_ID; i++)
3614 			block_enable[i] = true;
3615 		block_enable[BLOCK_MCP] = false;
3616 		offset += qed_grc_dump_registers(p_hwfn,
3617 						 p_ptt,
3618 						 dump_buf +
3619 						 offset,
3620 						 dump,
3621 						 block_enable, NULL);
3622 
3623 		/* Dump special registers */
3624 		offset += qed_grc_dump_special_regs(p_hwfn,
3625 						    p_ptt,
3626 						    dump_buf + offset, dump);
3627 	}
3628 
3629 	/* Dump memories */
3630 	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3631 
3632 	/* Dump MCP */
3633 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
3634 		offset += qed_grc_dump_mcp(p_hwfn,
3635 					   p_ptt, dump_buf + offset, dump);
3636 
3637 	/* Dump context */
3638 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
3639 		offset += qed_grc_dump_ctx(p_hwfn,
3640 					   p_ptt, dump_buf + offset, dump);
3641 
3642 	/* Dump RSS memories */
3643 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
3644 		offset += qed_grc_dump_rss(p_hwfn,
3645 					   p_ptt, dump_buf + offset, dump);
3646 
3647 	/* Dump Big RAM */
3648 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
3649 		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
3650 			offset += qed_grc_dump_big_ram(p_hwfn,
3651 						       p_ptt,
3652 						       dump_buf + offset,
3653 						       dump, i);
3654 
3655 	/* Dump VFC */
3656 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
3657 		dwords_read = qed_grc_dump_vfc(p_hwfn,
3658 					       p_ptt, dump_buf + offset, dump);
3659 		offset += dwords_read;
3660 		if (!dwords_read)
3661 			return DBG_STATUS_VFC_READ_ERROR;
3662 	}
3663 
3664 	/* Dump PHY tbus */
3665 	if (qed_grc_is_included(p_hwfn,
3666 				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
3667 	    CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
3668 		offset += qed_grc_dump_phy(p_hwfn,
3669 					   p_ptt, dump_buf + offset, dump);
3670 
3671 	/* Dump MCP HW Dump */
3672 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
3673 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
3674 		offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
3675 						   p_ptt,
3676 						   dump_buf + offset, dump);
3677 
3678 	/* Dump static debug data (only if not during debug bus recording) */
3679 	if (qed_grc_is_included(p_hwfn,
3680 				DBG_GRC_PARAM_DUMP_STATIC) &&
3681 	    (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
3682 		offset += qed_grc_dump_static_debug(p_hwfn,
3683 						    p_ptt,
3684 						    dump_buf + offset, dump);
3685 
3686 	/* Dump last section */
3687 	offset += qed_dump_last_section(dump_buf, offset, dump);
3688 
3689 	if (dump) {
3690 		/* Unstall storms */
3691 		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
3692 			qed_grc_stall_storms(p_hwfn, p_ptt, false);
3693 
3694 		/* Clear parity status */
3695 		qed_grc_clear_all_prty(p_hwfn, p_ptt);
3696 
3697 		/* Enable all parities using MFW command */
3698 		if (parities_masked)
3699 			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
3700 	}
3701 
3702 	*num_dumped_dwords = offset;
3703 
3704 	return DBG_STATUS_OK;
3705 }
3706 
3707 /* Writes the specified failing Idle Check rule to the specified buffer.
3708  * Returns the dumped size in dwords.
3709  */
3710 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3711 				     struct qed_ptt *p_ptt,
3712 				     u32 *dump_buf,
3713 				     bool dump,
3714 				     u16 rule_id,
3715 				     const struct dbg_idle_chk_rule *rule,
3716 				     u16 fail_entry_id, u32 *cond_reg_values)
3717 {
3718 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3719 	const struct dbg_idle_chk_cond_reg *cond_regs;
3720 	const struct dbg_idle_chk_info_reg *info_regs;
3721 	u32 i, next_reg_offset = 0, offset = 0;
3722 	struct dbg_idle_chk_result_hdr *hdr;
3723 	const union dbg_idle_chk_reg *regs;
3724 	u8 reg_id;
3725 
3726 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
3727 	regs = (const union dbg_idle_chk_reg *)
3728 		p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3729 		rule->reg_offset;
3730 	cond_regs = &regs[0].cond_reg;
3731 	info_regs = &regs[rule->num_cond_regs].info_reg;
3732 
3733 	/* Dump rule data */
3734 	if (dump) {
3735 		memset(hdr, 0, sizeof(*hdr));
3736 		hdr->rule_id = rule_id;
3737 		hdr->mem_entry_id = fail_entry_id;
3738 		hdr->severity = rule->severity;
3739 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
3740 	}
3741 
3742 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
3743 
3744 	/* Dump condition register values */
3745 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
3746 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
3747 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3748 
3749 		reg_hdr =
3750 		    (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
3751 
3752 		/* Write register header */
3753 		if (!dump) {
3754 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
3755 			    reg->entry_size;
3756 			continue;
3757 		}
3758 
3759 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3760 		memset(reg_hdr, 0, sizeof(*reg_hdr));
3761 		reg_hdr->start_entry = reg->start_entry;
3762 		reg_hdr->size = reg->entry_size;
3763 		SET_FIELD(reg_hdr->data,
3764 			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
3765 			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
3766 		SET_FIELD(reg_hdr->data,
3767 			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
3768 
3769 		/* Write register values */
3770 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
3771 			dump_buf[offset] = cond_reg_values[next_reg_offset];
3772 	}
3773 
3774 	/* Dump info register values */
3775 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
3776 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
3777 		u32 block_id;
3778 
3779 		/* Check if register's block is in reset */
3780 		if (!dump) {
3781 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
3782 			continue;
3783 		}
3784 
3785 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
3786 		if (block_id >= MAX_BLOCK_ID) {
3787 			DP_NOTICE(p_hwfn, "Invalid block_id\n");
3788 			return 0;
3789 		}
3790 
3791 		if (!dev_data->block_in_reset[block_id]) {
3792 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3793 			bool wide_bus, eval_mode, mode_match = true;
3794 			u16 modes_buf_offset;
3795 			u32 addr;
3796 
3797 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
3798 				  (dump_buf + offset);
3799 
3800 			/* Check mode */
3801 			eval_mode = GET_FIELD(reg->mode.data,
3802 					      DBG_MODE_HDR_EVAL_MODE) > 0;
3803 			if (eval_mode) {
3804 				modes_buf_offset =
3805 				    GET_FIELD(reg->mode.data,
3806 					      DBG_MODE_HDR_MODES_BUF_OFFSET);
3807 				mode_match =
3808 					qed_is_mode_match(p_hwfn,
3809 							  &modes_buf_offset);
3810 			}
3811 
3812 			if (!mode_match)
3813 				continue;
3814 
3815 			addr = GET_FIELD(reg->data,
3816 					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
3817 			wide_bus = GET_FIELD(reg->data,
3818 					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
3819 
3820 			/* Write register header */
3821 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3822 			hdr->num_dumped_info_regs++;
3823 			memset(reg_hdr, 0, sizeof(*reg_hdr));
3824 			reg_hdr->size = reg->size;
3825 			SET_FIELD(reg_hdr->data,
3826 				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3827 				  rule->num_cond_regs + reg_id);
3828 
3829 			/* Write register values */
3830 			offset += qed_grc_dump_addr_range(p_hwfn,
3831 							  p_ptt,
3832 							  dump_buf + offset,
3833 							  dump,
3834 							  addr,
3835 							  reg->size, wide_bus,
3836 							  SPLIT_TYPE_NONE, 0);
3837 		}
3838 	}
3839 
3840 	return offset;
3841 }
3842 
3843 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
3844 static u32
3845 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3846 			       u32 *dump_buf, bool dump,
3847 			       const struct dbg_idle_chk_rule *input_rules,
3848 			       u32 num_input_rules, u32 *num_failing_rules)
3849 {
3850 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3851 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3852 	u32 i, offset = 0;
3853 	u16 entry_id;
3854 	u8 reg_id;
3855 
3856 	*num_failing_rules = 0;
3857 
3858 	for (i = 0; i < num_input_rules; i++) {
3859 		const struct dbg_idle_chk_cond_reg *cond_regs;
3860 		const struct dbg_idle_chk_rule *rule;
3861 		const union dbg_idle_chk_reg *regs;
3862 		u16 num_reg_entries = 1;
3863 		bool check_rule = true;
3864 		const u32 *imm_values;
3865 
3866 		rule = &input_rules[i];
3867 		regs = (const union dbg_idle_chk_reg *)
3868 			p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3869 			rule->reg_offset;
3870 		cond_regs = &regs[0].cond_reg;
3871 		imm_values =
3872 		    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
3873 		    rule->imm_offset;
3874 
3875 		/* Check if all condition register blocks are out of reset, and
3876 		 * find maximal number of entries (all condition registers that
3877 		 * are memories must have the same size, which is > 1).
3878 		 */
3879 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
3880 		     reg_id++) {
3881 			u32 block_id =
3882 				GET_FIELD(cond_regs[reg_id].data,
3883 					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
3884 
3885 			if (block_id >= MAX_BLOCK_ID) {
3886 				DP_NOTICE(p_hwfn, "Invalid block_id\n");
3887 				return 0;
3888 			}
3889 
3890 			check_rule = !dev_data->block_in_reset[block_id];
3891 			if (cond_regs[reg_id].num_entries > num_reg_entries)
3892 				num_reg_entries = cond_regs[reg_id].num_entries;
3893 		}
3894 
3895 		if (!check_rule && dump)
3896 			continue;
3897 
3898 		if (!dump) {
3899 			u32 entry_dump_size =
3900 				qed_idle_chk_dump_failure(p_hwfn,
3901 							  p_ptt,
3902 							  dump_buf + offset,
3903 							  false,
3904 							  rule->rule_id,
3905 							  rule,
3906 							  0,
3907 							  NULL);
3908 
3909 			offset += num_reg_entries * entry_dump_size;
3910 			(*num_failing_rules) += num_reg_entries;
3911 			continue;
3912 		}
3913 
3914 		/* Go over all register entries (number of entries is the same
3915 		 * for all condition registers).
3916 		 */
3917 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3918 			u32 next_reg_offset = 0;
3919 
3920 			/* Read current entry of all condition registers */
3921 			for (reg_id = 0; reg_id < rule->num_cond_regs;
3922 			     reg_id++) {
3923 				const struct dbg_idle_chk_cond_reg *reg =
3924 					&cond_regs[reg_id];
3925 				u32 padded_entry_size, addr;
3926 				bool wide_bus;
3927 
3928 				/* Find GRC address (if it's a memory, the
3929 				 * address of the specific entry is calculated).
3930 				 */
3931 				addr = GET_FIELD(reg->data,
3932 						 DBG_IDLE_CHK_COND_REG_ADDRESS);
3933 				wide_bus =
3934 				    GET_FIELD(reg->data,
3935 					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
3936 				if (reg->num_entries > 1 ||
3937 				    reg->start_entry > 0) {
3938 					padded_entry_size =
3939 					   reg->entry_size > 1 ?
3940 					   roundup_pow_of_two(reg->entry_size) :
3941 					   1;
3942 					addr += (reg->start_entry + entry_id) *
3943 						padded_entry_size;
3944 				}
3945 
3946 				/* Read registers */
3947 				if (next_reg_offset + reg->entry_size >=
3948 				    IDLE_CHK_MAX_ENTRIES_SIZE) {
3949 					DP_NOTICE(p_hwfn,
3950 						  "idle check registers entry is too large\n");
3951 					return 0;
3952 				}
3953 
3954 				next_reg_offset +=
3955 				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
3956 							    cond_reg_values +
3957 							    next_reg_offset,
3958 							    dump, addr,
3959 							    reg->entry_size,
3960 							    wide_bus,
3961 							    SPLIT_TYPE_NONE, 0);
3962 			}
3963 
3964 			/* Call rule condition function.
3965 			 * If returns true, it's a failure.
3966 			 */
3967 			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
3968 							imm_values)) {
3969 				offset += qed_idle_chk_dump_failure(p_hwfn,
3970 							p_ptt,
3971 							dump_buf + offset,
3972 							dump,
3973 							rule->rule_id,
3974 							rule,
3975 							entry_id,
3976 							cond_reg_values);
3977 				(*num_failing_rules)++;
3978 			}
3979 		}
3980 	}
3981 
3982 	return offset;
3983 }
3984 
3985 /* Performs Idle Check Dump to the specified buffer.
3986  * Returns the dumped size in dwords.
3987  */
3988 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
3989 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3990 {
3991 	struct virt_mem_desc *dbg_buf =
3992 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
3993 	u32 num_failing_rules_offset, offset = 0,
3994 	    input_offset = 0, num_failing_rules = 0;
3995 
3996 	/* Dump global params  - 1 must match below amount of params */
3997 	offset += qed_dump_common_global_params(p_hwfn,
3998 						p_ptt,
3999 						dump_buf + offset, dump, 1);
4000 	offset += qed_dump_str_param(dump_buf + offset,
4001 				     dump, "dump-type", "idle-chk");
4002 
4003 	/* Dump idle check section header with a single parameter */
4004 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4005 	num_failing_rules_offset = offset;
4006 	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4007 
4008 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
4009 		const struct dbg_idle_chk_cond_hdr *cond_hdr =
4010 		    (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr +
4011 		    input_offset++;
4012 		bool eval_mode, mode_match = true;
4013 		u32 curr_failing_rules;
4014 		u16 modes_buf_offset;
4015 
4016 		/* Check mode */
4017 		eval_mode = GET_FIELD(cond_hdr->mode.data,
4018 				      DBG_MODE_HDR_EVAL_MODE) > 0;
4019 		if (eval_mode) {
4020 			modes_buf_offset =
4021 				GET_FIELD(cond_hdr->mode.data,
4022 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
4023 			mode_match = qed_is_mode_match(p_hwfn,
4024 						       &modes_buf_offset);
4025 		}
4026 
4027 		if (mode_match) {
4028 			const struct dbg_idle_chk_rule *rule =
4029 			    (const struct dbg_idle_chk_rule *)((u32 *)
4030 							       dbg_buf->ptr
4031 							       + input_offset);
4032 			u32 num_input_rules =
4033 				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS;
4034 			offset +=
4035 			    qed_idle_chk_dump_rule_entries(p_hwfn,
4036 							   p_ptt,
4037 							   dump_buf +
4038 							   offset,
4039 							   dump,
4040 							   rule,
4041 							   num_input_rules,
4042 							   &curr_failing_rules);
4043 			num_failing_rules += curr_failing_rules;
4044 		}
4045 
4046 		input_offset += cond_hdr->data_size;
4047 	}
4048 
4049 	/* Overwrite num_rules parameter */
4050 	if (dump)
4051 		qed_dump_num_param(dump_buf + num_failing_rules_offset,
4052 				   dump, "num_rules", num_failing_rules);
4053 
4054 	/* Dump last section */
4055 	offset += qed_dump_last_section(dump_buf, offset, dump);
4056 
4057 	return offset;
4058 }
4059 
4060 /* Get info on the MCP Trace data in the scratchpad:
4061  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4062  * - trace_data_size (OUT): trace data size in bytes (without the header)
4063  */
4064 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4065 						   struct qed_ptt *p_ptt,
4066 						   u32 *trace_data_grc_addr,
4067 						   u32 *trace_data_size)
4068 {
4069 	u32 spad_trace_offsize, signature;
4070 
4071 	/* Read trace section offsize structure from MCP scratchpad */
4072 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4073 
4074 	/* Extract trace section address from offsize (in scratchpad) */
4075 	*trace_data_grc_addr =
4076 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4077 
4078 	/* Read signature from MCP trace section */
4079 	signature = qed_rd(p_hwfn, p_ptt,
4080 			   *trace_data_grc_addr +
4081 			   offsetof(struct mcp_trace, signature));
4082 
4083 	if (signature != MFW_TRACE_SIGNATURE)
4084 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4085 
4086 	/* Read trace size from MCP trace section */
4087 	*trace_data_size = qed_rd(p_hwfn,
4088 				  p_ptt,
4089 				  *trace_data_grc_addr +
4090 				  offsetof(struct mcp_trace, size));
4091 
4092 	return DBG_STATUS_OK;
4093 }
4094 
4095 /* Reads MCP trace meta data image from NVRAM
4096  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4097  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4098  *			      loaded from file).
4099  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4100  */
4101 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4102 						   struct qed_ptt *p_ptt,
4103 						   u32 trace_data_size_bytes,
4104 						   u32 *running_bundle_id,
4105 						   u32 *trace_meta_offset,
4106 						   u32 *trace_meta_size)
4107 {
4108 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4109 
4110 	/* Read MCP trace section offsize structure from MCP scratchpad */
4111 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4112 
4113 	/* Find running bundle ID */
4114 	running_mfw_addr =
4115 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4116 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4117 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4118 	if (*running_bundle_id > 1)
4119 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4120 
4121 	/* Find image in NVRAM */
4122 	nvram_image_type =
4123 	    (*running_bundle_id ==
4124 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4125 	return qed_find_nvram_image(p_hwfn,
4126 				    p_ptt,
4127 				    nvram_image_type,
4128 				    trace_meta_offset, trace_meta_size);
4129 }
4130 
4131 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4132 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4133 					       struct qed_ptt *p_ptt,
4134 					       u32 nvram_offset_in_bytes,
4135 					       u32 size_in_bytes, u32 *buf)
4136 {
4137 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4138 	enum dbg_status status;
4139 	u32 signature;
4140 
4141 	/* Read meta data from NVRAM */
4142 	status = qed_nvram_read(p_hwfn,
4143 				p_ptt,
4144 				nvram_offset_in_bytes, size_in_bytes, buf);
4145 	if (status != DBG_STATUS_OK)
4146 		return status;
4147 
4148 	/* Extract and check first signature */
4149 	signature = qed_read_unaligned_dword(byte_buf);
4150 	byte_buf += sizeof(signature);
4151 	if (signature != NVM_MAGIC_VALUE)
4152 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4153 
4154 	/* Extract number of modules */
4155 	modules_num = *(byte_buf++);
4156 
4157 	/* Skip all modules */
4158 	for (i = 0; i < modules_num; i++) {
4159 		module_len = *(byte_buf++);
4160 		byte_buf += module_len;
4161 	}
4162 
4163 	/* Extract and check second signature */
4164 	signature = qed_read_unaligned_dword(byte_buf);
4165 	byte_buf += sizeof(signature);
4166 	if (signature != NVM_MAGIC_VALUE)
4167 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4168 
4169 	return DBG_STATUS_OK;
4170 }
4171 
4172 /* Dump MCP Trace */
4173 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4174 					  struct qed_ptt *p_ptt,
4175 					  u32 *dump_buf,
4176 					  bool dump, u32 *num_dumped_dwords)
4177 {
4178 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4179 	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4180 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4181 	enum dbg_status status;
4182 	int halted = 0;
4183 	bool use_mfw;
4184 
4185 	*num_dumped_dwords = 0;
4186 
4187 	use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4188 
4189 	/* Get trace data info */
4190 	status = qed_mcp_trace_get_data_info(p_hwfn,
4191 					     p_ptt,
4192 					     &trace_data_grc_addr,
4193 					     &trace_data_size_bytes);
4194 	if (status != DBG_STATUS_OK)
4195 		return status;
4196 
4197 	/* Dump global params */
4198 	offset += qed_dump_common_global_params(p_hwfn,
4199 						p_ptt,
4200 						dump_buf + offset, dump, 1);
4201 	offset += qed_dump_str_param(dump_buf + offset,
4202 				     dump, "dump-type", "mcp-trace");
4203 
4204 	/* Halt MCP while reading from scratchpad so the read data will be
4205 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4206 	 * risk that it may be corrupt.
4207 	 */
4208 	if (dump && use_mfw) {
4209 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4210 		if (!halted)
4211 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4212 	}
4213 
4214 	/* Find trace data size */
4215 	trace_data_size_dwords =
4216 	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4217 			 BYTES_IN_DWORD);
4218 
4219 	/* Dump trace data section header and param */
4220 	offset += qed_dump_section_hdr(dump_buf + offset,
4221 				       dump, "mcp_trace_data", 1);
4222 	offset += qed_dump_num_param(dump_buf + offset,
4223 				     dump, "size", trace_data_size_dwords);
4224 
4225 	/* Read trace data from scratchpad into dump buffer */
4226 	offset += qed_grc_dump_addr_range(p_hwfn,
4227 					  p_ptt,
4228 					  dump_buf + offset,
4229 					  dump,
4230 					  BYTES_TO_DWORDS(trace_data_grc_addr),
4231 					  trace_data_size_dwords, false,
4232 					  SPLIT_TYPE_NONE, 0);
4233 
4234 	/* Resume MCP (only if halt succeeded) */
4235 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4236 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4237 
4238 	/* Dump trace meta section header */
4239 	offset += qed_dump_section_hdr(dump_buf + offset,
4240 				       dump, "mcp_trace_meta", 1);
4241 
4242 	/* If MCP Trace meta size parameter was set, use it.
4243 	 * Otherwise, read trace meta.
4244 	 * trace_meta_size_bytes is dword-aligned.
4245 	 */
4246 	trace_meta_size_bytes =
4247 		qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4248 	if ((!trace_meta_size_bytes || dump) && use_mfw)
4249 		status = qed_mcp_trace_get_meta_info(p_hwfn,
4250 						     p_ptt,
4251 						     trace_data_size_bytes,
4252 						     &running_bundle_id,
4253 						     &trace_meta_offset_bytes,
4254 						     &trace_meta_size_bytes);
4255 	if (status == DBG_STATUS_OK)
4256 		trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4257 
4258 	/* Dump trace meta size param */
4259 	offset += qed_dump_num_param(dump_buf + offset,
4260 				     dump, "size", trace_meta_size_dwords);
4261 
4262 	/* Read trace meta image into dump buffer */
4263 	if (dump && trace_meta_size_dwords)
4264 		status = qed_mcp_trace_read_meta(p_hwfn,
4265 						 p_ptt,
4266 						 trace_meta_offset_bytes,
4267 						 trace_meta_size_bytes,
4268 						 dump_buf + offset);
4269 	if (status == DBG_STATUS_OK)
4270 		offset += trace_meta_size_dwords;
4271 
4272 	/* Dump last section */
4273 	offset += qed_dump_last_section(dump_buf, offset, dump);
4274 
4275 	*num_dumped_dwords = offset;
4276 
4277 	/* If no mcp access, indicate that the dump doesn't contain the meta
4278 	 * data from NVRAM.
4279 	 */
4280 	return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4281 }
4282 
4283 /* Dump GRC FIFO */
4284 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4285 					 struct qed_ptt *p_ptt,
4286 					 u32 *dump_buf,
4287 					 bool dump, u32 *num_dumped_dwords)
4288 {
4289 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4290 	bool fifo_has_data;
4291 
4292 	*num_dumped_dwords = 0;
4293 
4294 	/* Dump global params */
4295 	offset += qed_dump_common_global_params(p_hwfn,
4296 						p_ptt,
4297 						dump_buf + offset, dump, 1);
4298 	offset += qed_dump_str_param(dump_buf + offset,
4299 				     dump, "dump-type", "reg-fifo");
4300 
4301 	/* Dump fifo data section header and param. The size param is 0 for
4302 	 * now, and is overwritten after reading the FIFO.
4303 	 */
4304 	offset += qed_dump_section_hdr(dump_buf + offset,
4305 				       dump, "reg_fifo_data", 1);
4306 	size_param_offset = offset;
4307 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4308 
4309 	if (!dump) {
4310 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4311 		 * test how much data is available, except for reading it.
4312 		 */
4313 		offset += REG_FIFO_DEPTH_DWORDS;
4314 		goto out;
4315 	}
4316 
4317 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4318 			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4319 
4320 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4321 	 * and must be accessed atomically. Test for dwords_read not passing
4322 	 * buffer size since more entries could be added to the buffer as we are
4323 	 * emptying it.
4324 	 */
4325 	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4326 	len = REG_FIFO_ELEMENT_DWORDS;
4327 	for (dwords_read = 0;
4328 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4329 	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4330 		offset += qed_grc_dump_addr_range(p_hwfn,
4331 						  p_ptt,
4332 						  dump_buf + offset,
4333 						  true,
4334 						  addr,
4335 						  len,
4336 						  true, SPLIT_TYPE_NONE,
4337 						  0);
4338 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4339 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4340 	}
4341 
4342 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4343 			   dwords_read);
4344 out:
4345 	/* Dump last section */
4346 	offset += qed_dump_last_section(dump_buf, offset, dump);
4347 
4348 	*num_dumped_dwords = offset;
4349 
4350 	return DBG_STATUS_OK;
4351 }
4352 
4353 /* Dump IGU FIFO */
4354 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4355 					 struct qed_ptt *p_ptt,
4356 					 u32 *dump_buf,
4357 					 bool dump, u32 *num_dumped_dwords)
4358 {
4359 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4360 	bool fifo_has_data;
4361 
4362 	*num_dumped_dwords = 0;
4363 
4364 	/* Dump global params */
4365 	offset += qed_dump_common_global_params(p_hwfn,
4366 						p_ptt,
4367 						dump_buf + offset, dump, 1);
4368 	offset += qed_dump_str_param(dump_buf + offset,
4369 				     dump, "dump-type", "igu-fifo");
4370 
4371 	/* Dump fifo data section header and param. The size param is 0 for
4372 	 * now, and is overwritten after reading the FIFO.
4373 	 */
4374 	offset += qed_dump_section_hdr(dump_buf + offset,
4375 				       dump, "igu_fifo_data", 1);
4376 	size_param_offset = offset;
4377 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4378 
4379 	if (!dump) {
4380 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4381 		 * test how much data is available, except for reading it.
4382 		 */
4383 		offset += IGU_FIFO_DEPTH_DWORDS;
4384 		goto out;
4385 	}
4386 
4387 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4388 			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4389 
4390 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4391 	 * and must be accessed atomically. Test for dwords_read not passing
4392 	 * buffer size since more entries could be added to the buffer as we are
4393 	 * emptying it.
4394 	 */
4395 	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4396 	len = IGU_FIFO_ELEMENT_DWORDS;
4397 	for (dwords_read = 0;
4398 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4399 	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4400 		offset += qed_grc_dump_addr_range(p_hwfn,
4401 						  p_ptt,
4402 						  dump_buf + offset,
4403 						  true,
4404 						  addr,
4405 						  len,
4406 						  true, SPLIT_TYPE_NONE,
4407 						  0);
4408 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4409 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4410 	}
4411 
4412 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4413 			   dwords_read);
4414 out:
4415 	/* Dump last section */
4416 	offset += qed_dump_last_section(dump_buf, offset, dump);
4417 
4418 	*num_dumped_dwords = offset;
4419 
4420 	return DBG_STATUS_OK;
4421 }
4422 
4423 /* Protection Override dump */
4424 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4425 						    struct qed_ptt *p_ptt,
4426 						    u32 *dump_buf,
4427 						    bool dump,
4428 						    u32 *num_dumped_dwords)
4429 {
4430 	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4431 
4432 	*num_dumped_dwords = 0;
4433 
4434 	/* Dump global params */
4435 	offset += qed_dump_common_global_params(p_hwfn,
4436 						p_ptt,
4437 						dump_buf + offset, dump, 1);
4438 	offset += qed_dump_str_param(dump_buf + offset,
4439 				     dump, "dump-type", "protection-override");
4440 
4441 	/* Dump data section header and param. The size param is 0 for now,
4442 	 * and is overwritten after reading the data.
4443 	 */
4444 	offset += qed_dump_section_hdr(dump_buf + offset,
4445 				       dump, "protection_override_data", 1);
4446 	size_param_offset = offset;
4447 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4448 
4449 	if (!dump) {
4450 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4451 		goto out;
4452 	}
4453 
4454 	/* Add override window info to buffer */
4455 	override_window_dwords =
4456 		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4457 		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4458 	if (override_window_dwords) {
4459 		addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4460 		offset += qed_grc_dump_addr_range(p_hwfn,
4461 						  p_ptt,
4462 						  dump_buf + offset,
4463 						  true,
4464 						  addr,
4465 						  override_window_dwords,
4466 						  true, SPLIT_TYPE_NONE, 0);
4467 		qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4468 				   override_window_dwords);
4469 	}
4470 out:
4471 	/* Dump last section */
4472 	offset += qed_dump_last_section(dump_buf, offset, dump);
4473 
4474 	*num_dumped_dwords = offset;
4475 
4476 	return DBG_STATUS_OK;
4477 }
4478 
4479 /* Performs FW Asserts Dump to the specified buffer.
4480  * Returns the dumped size in dwords.
4481  */
4482 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4483 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4484 {
4485 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4486 	struct fw_asserts_ram_section *asserts;
4487 	char storm_letter_str[2] = "?";
4488 	struct fw_info fw_info;
4489 	u32 offset = 0;
4490 	u8 storm_id;
4491 
4492 	/* Dump global params */
4493 	offset += qed_dump_common_global_params(p_hwfn,
4494 						p_ptt,
4495 						dump_buf + offset, dump, 1);
4496 	offset += qed_dump_str_param(dump_buf + offset,
4497 				     dump, "dump-type", "fw-asserts");
4498 
4499 	/* Find Storm dump size */
4500 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4501 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4502 		struct storm_defs *storm = &s_storm_defs[storm_id];
4503 		u32 last_list_idx, addr;
4504 
4505 		if (dev_data->block_in_reset[storm->sem_block_id])
4506 			continue;
4507 
4508 		/* Read FW info for the current Storm */
4509 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4510 
4511 		asserts = &fw_info.fw_asserts_section;
4512 
4513 		/* Dump FW Asserts section header and params */
4514 		storm_letter_str[0] = storm->letter;
4515 		offset += qed_dump_section_hdr(dump_buf + offset,
4516 					       dump, "fw_asserts", 2);
4517 		offset += qed_dump_str_param(dump_buf + offset,
4518 					     dump, "storm", storm_letter_str);
4519 		offset += qed_dump_num_param(dump_buf + offset,
4520 					     dump,
4521 					     "size",
4522 					     asserts->list_element_dword_size);
4523 
4524 		/* Read and dump FW Asserts data */
4525 		if (!dump) {
4526 			offset += asserts->list_element_dword_size;
4527 			continue;
4528 		}
4529 
4530 		addr = le16_to_cpu(asserts->section_ram_line_offset);
4531 		fw_asserts_section_addr = storm->sem_fast_mem_addr +
4532 					  SEM_FAST_REG_INT_RAM +
4533 					  RAM_LINES_TO_BYTES(addr);
4534 
4535 		next_list_idx_addr = fw_asserts_section_addr +
4536 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4537 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4538 		last_list_idx = (next_list_idx > 0 ?
4539 				 next_list_idx :
4540 				 asserts->list_num_elements) - 1;
4541 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4542 		       asserts->list_dword_offset +
4543 		       last_list_idx * asserts->list_element_dword_size;
4544 		offset +=
4545 		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4546 					    dump_buf + offset,
4547 					    dump, addr,
4548 					    asserts->list_element_dword_size,
4549 						  false, SPLIT_TYPE_NONE, 0);
4550 	}
4551 
4552 	/* Dump last section */
4553 	offset += qed_dump_last_section(dump_buf, offset, dump);
4554 
4555 	return offset;
4556 }
4557 
4558 /* Dumps the specified ILT pages to the specified buffer.
4559  * Returns the dumped size in dwords.
4560  */
4561 static u32 qed_ilt_dump_pages_range(u32 *dump_buf, u32 *given_offset,
4562 				    bool *dump, u32 start_page_id,
4563 				    u32 num_pages,
4564 				    struct phys_mem_desc *ilt_pages,
4565 				    bool dump_page_ids, u32 buf_size_in_dwords,
4566 				    u32 *given_actual_dump_size_in_dwords)
4567 {
4568 	u32 actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
4569 	u32 page_id, end_page_id, offset = *given_offset;
4570 	struct phys_mem_desc *mem_desc = NULL;
4571 	bool continue_dump = *dump;
4572 	u32 partial_page_size = 0;
4573 
4574 	if (num_pages == 0)
4575 		return offset;
4576 
4577 	end_page_id = start_page_id + num_pages - 1;
4578 
4579 	for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
4580 		mem_desc = &ilt_pages[page_id];
4581 		if (!ilt_pages[page_id].virt_addr)
4582 			continue;
4583 
4584 		if (dump_page_ids) {
4585 			/* Copy page ID to dump buffer
4586 			 * (if dump is needed and buffer is not full)
4587 			 */
4588 			if ((continue_dump) &&
4589 			    (offset + 1 > buf_size_in_dwords)) {
4590 				continue_dump = false;
4591 				actual_dump_size_in_dwords = offset;
4592 			}
4593 			if (continue_dump)
4594 				*(dump_buf + offset) = page_id;
4595 			offset++;
4596 		} else {
4597 			/* Copy page memory to dump buffer */
4598 			if ((continue_dump) &&
4599 			    (offset + BYTES_TO_DWORDS(mem_desc->size) >
4600 			     buf_size_in_dwords)) {
4601 				if (offset + BYTES_TO_DWORDS(mem_desc->size) >
4602 				    buf_size_in_dwords) {
4603 					partial_page_size =
4604 					    buf_size_in_dwords - offset;
4605 					memcpy(dump_buf + offset,
4606 					       mem_desc->virt_addr,
4607 					       partial_page_size);
4608 					continue_dump = false;
4609 					actual_dump_size_in_dwords =
4610 					    offset + partial_page_size;
4611 				}
4612 			}
4613 
4614 			if (continue_dump)
4615 				memcpy(dump_buf + offset,
4616 				       mem_desc->virt_addr, mem_desc->size);
4617 			offset += BYTES_TO_DWORDS(mem_desc->size);
4618 		}
4619 	}
4620 
4621 	*dump = continue_dump;
4622 	*given_offset = offset;
4623 	*given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
4624 
4625 	return offset;
4626 }
4627 
4628 /* Dumps a section containing the dumped ILT pages.
4629  * Returns the dumped size in dwords.
4630  */
4631 static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
4632 				      u32 *dump_buf,
4633 				      u32 *given_offset,
4634 				      bool *dump,
4635 				      u32 valid_conn_pf_pages,
4636 				      u32 valid_conn_vf_pages,
4637 				      struct phys_mem_desc *ilt_pages,
4638 				      bool dump_page_ids,
4639 				      u32 buf_size_in_dwords,
4640 				      u32 *given_actual_dump_size_in_dwords)
4641 {
4642 	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4643 	u32 pf_start_line, start_page_id, offset = *given_offset;
4644 	u32 cdut_pf_init_pages, cdut_vf_init_pages;
4645 	u32 cdut_pf_work_pages, cdut_vf_work_pages;
4646 	u32 base_data_offset, size_param_offset;
4647 	u32 src_pages;
4648 	u32 section_header_and_param_size;
4649 	u32 cdut_pf_pages, cdut_vf_pages;
4650 	u32 actual_dump_size_in_dwords;
4651 	bool continue_dump = *dump;
4652 	bool update_size = *dump;
4653 	const char *section_name;
4654 	u32 i;
4655 
4656 	actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
4657 	section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
4658 	cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
4659 	cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
4660 	cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn);
4661 	cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn);
4662 	cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
4663 	cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
4664 	pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
4665 	section_header_and_param_size = qed_dump_section_hdr(NULL,
4666 							     false,
4667 							     section_name,
4668 							     1) +
4669 	qed_dump_num_param(NULL, false, "size", 0);
4670 
4671 	if ((continue_dump) &&
4672 	    (offset + section_header_and_param_size > buf_size_in_dwords)) {
4673 		continue_dump = false;
4674 		update_size = false;
4675 		actual_dump_size_in_dwords = offset;
4676 	}
4677 
4678 	offset += qed_dump_section_hdr(dump_buf + offset,
4679 				       continue_dump, section_name, 1);
4680 
4681 	/* Dump size parameter (0 for now, overwritten with real size later) */
4682 	size_param_offset = offset;
4683 	offset += qed_dump_num_param(dump_buf + offset,
4684 				     continue_dump, "size", 0);
4685 	base_data_offset = offset;
4686 
4687 	/* CDUC pages are ordered as follows:
4688 	 * - PF pages - valid section (included in PF connection type mapping)
4689 	 * - PF pages - invalid section (not dumped)
4690 	 * - For each VF in the PF:
4691 	 *   - VF pages - valid section (included in VF connection type mapping)
4692 	 *   - VF pages - invalid section (not dumped)
4693 	 */
4694 	if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
4695 		/* Dump connection PF pages */
4696 		start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
4697 		qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
4698 					 start_page_id, valid_conn_pf_pages,
4699 					 ilt_pages, dump_page_ids,
4700 					 buf_size_in_dwords,
4701 					 &actual_dump_size_in_dwords);
4702 
4703 		/* Dump connection VF pages */
4704 		start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
4705 		for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4706 		     i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
4707 			qed_ilt_dump_pages_range(dump_buf, &offset,
4708 						 &continue_dump, start_page_id,
4709 						 valid_conn_vf_pages,
4710 						 ilt_pages, dump_page_ids,
4711 						 buf_size_in_dwords,
4712 						 &actual_dump_size_in_dwords);
4713 	}
4714 
4715 	/* CDUT pages are ordered as follows:
4716 	 * - PF init pages (not dumped)
4717 	 * - PF work pages
4718 	 * - For each VF in the PF:
4719 	 *   - VF init pages (not dumped)
4720 	 *   - VF work pages
4721 	 */
4722 	if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
4723 		/* Dump task PF pages */
4724 		start_page_id = clients[ILT_CLI_CDUT].first.val +
4725 		    cdut_pf_init_pages - pf_start_line;
4726 		qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
4727 					 start_page_id, cdut_pf_work_pages,
4728 					 ilt_pages, dump_page_ids,
4729 					 buf_size_in_dwords,
4730 					 &actual_dump_size_in_dwords);
4731 
4732 		/* Dump task VF pages */
4733 		start_page_id = clients[ILT_CLI_CDUT].first.val +
4734 		    cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
4735 		for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4736 		     i++, start_page_id += cdut_vf_pages)
4737 			qed_ilt_dump_pages_range(dump_buf, &offset,
4738 						 &continue_dump, start_page_id,
4739 						 cdut_vf_work_pages, ilt_pages,
4740 						 dump_page_ids,
4741 						 buf_size_in_dwords,
4742 						 &actual_dump_size_in_dwords);
4743 	}
4744 
4745 	/*Dump Searcher pages */
4746 	if (clients[ILT_CLI_SRC].active) {
4747 		start_page_id = clients[ILT_CLI_SRC].first.val - pf_start_line;
4748 		src_pages = clients[ILT_CLI_SRC].last.val -
4749 		    clients[ILT_CLI_SRC].first.val + 1;
4750 		qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
4751 					 start_page_id, src_pages, ilt_pages,
4752 					 dump_page_ids, buf_size_in_dwords,
4753 					 &actual_dump_size_in_dwords);
4754 	}
4755 
4756 	/* Overwrite size param */
4757 	if (update_size) {
4758 		u32 section_size = (*dump == continue_dump) ?
4759 		    offset - base_data_offset :
4760 		    actual_dump_size_in_dwords - base_data_offset;
4761 		if (section_size > 0)
4762 			qed_dump_num_param(dump_buf + size_param_offset,
4763 					   *dump, "size", section_size);
4764 		else if ((section_size == 0) && (*dump != continue_dump))
4765 			actual_dump_size_in_dwords -=
4766 			    section_header_and_param_size;
4767 	}
4768 
4769 	*dump = continue_dump;
4770 	*given_offset = offset;
4771 	*given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
4772 
4773 	return offset;
4774 }
4775 
4776 /* Dumps a section containing the global parameters.
4777  * Part of ilt dump process
4778  * Returns the dumped size in dwords.
4779  */
4780 static u32
4781 qed_ilt_dump_dump_common_global_params(struct qed_hwfn *p_hwfn,
4782 				       struct qed_ptt *p_ptt,
4783 				       u32 *dump_buf,
4784 				       bool dump,
4785 				       u32 cduc_page_size,
4786 				       u32 conn_ctx_size,
4787 				       u32 cdut_page_size,
4788 				       u32 *full_dump_size_param_offset,
4789 				       u32 *actual_dump_size_param_offset)
4790 {
4791 	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4792 	u32 offset = 0;
4793 
4794 	offset += qed_dump_common_global_params(p_hwfn, p_ptt,
4795 						dump_buf + offset,
4796 						dump, 30);
4797 	offset += qed_dump_str_param(dump_buf + offset,
4798 				     dump,
4799 				     "dump-type", "ilt-dump");
4800 	offset += qed_dump_num_param(dump_buf + offset,
4801 				     dump,
4802 				     "cduc-page-size",
4803 				     cduc_page_size);
4804 	offset += qed_dump_num_param(dump_buf + offset,
4805 				     dump,
4806 				     "cduc-first-page-id",
4807 				     clients[ILT_CLI_CDUC].first.val);
4808 	offset += qed_dump_num_param(dump_buf + offset,
4809 				     dump,
4810 				     "cduc-last-page-id",
4811 				     clients[ILT_CLI_CDUC].last.val);
4812 	offset += qed_dump_num_param(dump_buf + offset,
4813 				     dump,
4814 				     "cduc-num-pf-pages",
4815 				     clients[ILT_CLI_CDUC].pf_total_lines);
4816 	offset += qed_dump_num_param(dump_buf + offset,
4817 				     dump,
4818 				     "cduc-num-vf-pages",
4819 				     clients[ILT_CLI_CDUC].vf_total_lines);
4820 	offset += qed_dump_num_param(dump_buf + offset,
4821 				     dump,
4822 				     "max-conn-ctx-size",
4823 				     conn_ctx_size);
4824 	offset += qed_dump_num_param(dump_buf + offset,
4825 				     dump,
4826 				     "cdut-page-size",
4827 				     cdut_page_size);
4828 	offset += qed_dump_num_param(dump_buf + offset,
4829 				     dump,
4830 				     "cdut-first-page-id",
4831 				     clients[ILT_CLI_CDUT].first.val);
4832 	offset += qed_dump_num_param(dump_buf + offset,
4833 				     dump,
4834 				     "cdut-last-page-id",
4835 				     clients[ILT_CLI_CDUT].last.val);
4836 	offset += qed_dump_num_param(dump_buf + offset,
4837 				     dump,
4838 				     "cdut-num-pf-init-pages",
4839 				     qed_get_cdut_num_pf_init_pages(p_hwfn));
4840 	offset += qed_dump_num_param(dump_buf + offset,
4841 				     dump,
4842 				     "cdut-num-vf-init-pages",
4843 				     qed_get_cdut_num_vf_init_pages(p_hwfn));
4844 	offset += qed_dump_num_param(dump_buf + offset,
4845 				     dump,
4846 				     "cdut-num-pf-work-pages",
4847 				     qed_get_cdut_num_pf_work_pages(p_hwfn));
4848 	offset += qed_dump_num_param(dump_buf + offset,
4849 				     dump,
4850 				     "cdut-num-vf-work-pages",
4851 				     qed_get_cdut_num_vf_work_pages(p_hwfn));
4852 	offset += qed_dump_num_param(dump_buf + offset,
4853 				     dump,
4854 				     "max-task-ctx-size",
4855 				     p_hwfn->p_cxt_mngr->task_ctx_size);
4856 	offset += qed_dump_num_param(dump_buf + offset,
4857 				     dump,
4858 				     "first-vf-id-in-pf",
4859 				     p_hwfn->p_cxt_mngr->first_vf_in_pf);
4860 	offset += qed_dump_num_param(dump_buf + offset,
4861 				     dump,
4862 				     "num-vfs-in-pf",
4863 				     p_hwfn->p_cxt_mngr->vf_count);
4864 	offset += qed_dump_num_param(dump_buf + offset,
4865 				     dump,
4866 				     "ptr-size-bytes",
4867 				     sizeof(void *));
4868 	offset += qed_dump_num_param(dump_buf + offset,
4869 				     dump,
4870 				     "pf-start-line",
4871 				     p_hwfn->p_cxt_mngr->pf_start_line);
4872 	offset += qed_dump_num_param(dump_buf + offset,
4873 				     dump,
4874 				     "page-mem-desc-size-dwords",
4875 				     PAGE_MEM_DESC_SIZE_DWORDS);
4876 	offset += qed_dump_num_param(dump_buf + offset,
4877 				     dump,
4878 				     "ilt-shadow-size",
4879 				     p_hwfn->p_cxt_mngr->ilt_shadow_size);
4880 
4881 	*full_dump_size_param_offset = offset;
4882 
4883 	offset += qed_dump_num_param(dump_buf + offset,
4884 				     dump, "dump-size-full", 0);
4885 
4886 	*actual_dump_size_param_offset = offset;
4887 
4888 	offset += qed_dump_num_param(dump_buf + offset,
4889 				     dump,
4890 				     "dump-size-actual", 0);
4891 	offset += qed_dump_num_param(dump_buf + offset,
4892 				     dump,
4893 				     "iscsi_task_pages",
4894 				     p_hwfn->p_cxt_mngr->iscsi_task_pages);
4895 	offset += qed_dump_num_param(dump_buf + offset,
4896 				     dump,
4897 				     "fcoe_task_pages",
4898 				     p_hwfn->p_cxt_mngr->fcoe_task_pages);
4899 	offset += qed_dump_num_param(dump_buf + offset,
4900 				     dump,
4901 				     "roce_task_pages",
4902 				     p_hwfn->p_cxt_mngr->roce_task_pages);
4903 	offset += qed_dump_num_param(dump_buf + offset,
4904 				     dump,
4905 				     "eth_task_pages",
4906 				     p_hwfn->p_cxt_mngr->eth_task_pages);
4907 	offset += qed_dump_num_param(dump_buf + offset,
4908 				      dump,
4909 				      "src-first-page-id",
4910 				      clients[ILT_CLI_SRC].first.val);
4911 	offset += qed_dump_num_param(dump_buf + offset,
4912 				     dump,
4913 				     "src-last-page-id",
4914 				     clients[ILT_CLI_SRC].last.val);
4915 	offset += qed_dump_num_param(dump_buf + offset,
4916 				     dump,
4917 				     "src-is-active",
4918 				     clients[ILT_CLI_SRC].active);
4919 
4920 	/* Additional/Less parameters require matching of number in call to
4921 	 * dump_common_global_params()
4922 	 */
4923 
4924 	return offset;
4925 }
4926 
4927 /* Dump section containing number of PF CIDs per connection type.
4928  * Part of ilt dump process.
4929  * Returns the dumped size in dwords.
4930  */
4931 static u32 qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn *p_hwfn,
4932 					 u32 *dump_buf,
4933 					 bool dump, u32 *valid_conn_pf_cids)
4934 {
4935 	u32 num_pf_cids = 0;
4936 	u32 offset = 0;
4937 	u8 conn_type;
4938 
4939 	offset += qed_dump_section_hdr(dump_buf + offset,
4940 				       dump, "num_pf_cids_per_conn_type", 1);
4941 	offset += qed_dump_num_param(dump_buf + offset,
4942 				     dump, "size", NUM_OF_CONNECTION_TYPES);
4943 	for (conn_type = 0, *valid_conn_pf_cids = 0;
4944 	     conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
4945 		num_pf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
4946 		if (dump)
4947 			*(dump_buf + offset) = num_pf_cids;
4948 		*valid_conn_pf_cids += num_pf_cids;
4949 	}
4950 
4951 	return offset;
4952 }
4953 
4954 /* Dump section containing number of VF CIDs per connection type
4955  * Part of ilt dump process.
4956  * Returns the dumped size in dwords.
4957  */
4958 static u32 qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn *p_hwfn,
4959 					 u32 *dump_buf,
4960 					 bool dump, u32 *valid_conn_vf_cids)
4961 {
4962 	u32 num_vf_cids = 0;
4963 	u32 offset = 0;
4964 	u8 conn_type;
4965 
4966 	offset += qed_dump_section_hdr(dump_buf + offset, dump,
4967 				       "num_vf_cids_per_conn_type", 1);
4968 	offset += qed_dump_num_param(dump_buf + offset,
4969 				     dump, "size", NUM_OF_CONNECTION_TYPES);
4970 	for (conn_type = 0, *valid_conn_vf_cids = 0;
4971 	     conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
4972 		num_vf_cids =
4973 		    p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
4974 		if (dump)
4975 			*(dump_buf + offset) = num_vf_cids;
4976 		*valid_conn_vf_cids += num_vf_cids;
4977 	}
4978 
4979 	return offset;
4980 }
4981 
4982 /* Performs ILT Dump to the specified buffer.
4983  * buf_size_in_dwords - The dumped buffer size.
4984  * Returns the dumped size in dwords.
4985  */
4986 static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
4987 			struct qed_ptt *p_ptt,
4988 			u32 *dump_buf, u32 buf_size_in_dwords, bool dump)
4989 {
4990 #if ((!defined VMWARE) && (!defined UEFI))
4991 	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4992 #endif
4993 	u32 valid_conn_vf_cids = 0,
4994 	    valid_conn_vf_pages, offset = 0, real_dumped_size = 0;
4995 	u32 valid_conn_pf_cids = 0, valid_conn_pf_pages, num_pages;
4996 	u32 num_cids_per_page, conn_ctx_size;
4997 	u32 cduc_page_size, cdut_page_size;
4998 	u32 actual_dump_size_in_dwords = 0;
4999 	struct phys_mem_desc *ilt_pages;
5000 	u32 actul_dump_off = 0;
5001 	u32 last_section_size;
5002 	u32 full_dump_off = 0;
5003 	u32 section_size = 0;
5004 	bool continue_dump;
5005 	u32 page_id;
5006 
5007 	last_section_size = qed_dump_last_section(NULL, 0, false);
5008 	cduc_page_size = 1 <<
5009 	    (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
5010 	cdut_page_size = 1 <<
5011 	    (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
5012 	conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
5013 	num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
5014 	ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
5015 	continue_dump = dump;
5016 
5017 	/* if need to dump then save memory for the last section
5018 	 * (last section calculates CRC of dumped data)
5019 	 */
5020 	if (dump) {
5021 		if (buf_size_in_dwords >= last_section_size) {
5022 			buf_size_in_dwords -= last_section_size;
5023 		} else {
5024 			continue_dump = false;
5025 			actual_dump_size_in_dwords = offset;
5026 		}
5027 	}
5028 
5029 	/* Dump global params */
5030 
5031 	/* if need to dump then first check that there is enough memory
5032 	 * in dumped buffer for this section calculate the size of this
5033 	 * section without dumping. if there is not enough memory - then
5034 	 * stop the dumping.
5035 	 */
5036 	if (continue_dump) {
5037 		section_size =
5038 			qed_ilt_dump_dump_common_global_params(p_hwfn,
5039 							       p_ptt,
5040 							       NULL,
5041 							       false,
5042 							       cduc_page_size,
5043 							       conn_ctx_size,
5044 							       cdut_page_size,
5045 							       &full_dump_off,
5046 							       &actul_dump_off);
5047 		if (offset + section_size > buf_size_in_dwords) {
5048 			continue_dump = false;
5049 			actual_dump_size_in_dwords = offset;
5050 		}
5051 	}
5052 
5053 	offset += qed_ilt_dump_dump_common_global_params(p_hwfn,
5054 							 p_ptt,
5055 							 dump_buf + offset,
5056 							 continue_dump,
5057 							 cduc_page_size,
5058 							 conn_ctx_size,
5059 							 cdut_page_size,
5060 							 &full_dump_off,
5061 							 &actul_dump_off);
5062 
5063 	/* Dump section containing number of PF CIDs per connection type
5064 	 * If need to dump then first check that there is enough memory in
5065 	 * dumped buffer for this section.
5066 	 */
5067 	if (continue_dump) {
5068 		section_size =
5069 			qed_ilt_dump_dump_num_pf_cids(p_hwfn,
5070 						      NULL,
5071 						      false,
5072 						      &valid_conn_pf_cids);
5073 		if (offset + section_size > buf_size_in_dwords) {
5074 			continue_dump = false;
5075 			actual_dump_size_in_dwords = offset;
5076 		}
5077 	}
5078 
5079 	offset += qed_ilt_dump_dump_num_pf_cids(p_hwfn,
5080 						dump_buf + offset,
5081 						continue_dump,
5082 						&valid_conn_pf_cids);
5083 
5084 	/* Dump section containing number of VF CIDs per connection type
5085 	 * If need to dump then first check that there is enough memory in
5086 	 * dumped buffer for this section.
5087 	 */
5088 	if (continue_dump) {
5089 		section_size =
5090 			qed_ilt_dump_dump_num_vf_cids(p_hwfn,
5091 						      NULL,
5092 						      false,
5093 						      &valid_conn_vf_cids);
5094 		if (offset + section_size > buf_size_in_dwords) {
5095 			continue_dump = false;
5096 			actual_dump_size_in_dwords = offset;
5097 		}
5098 	}
5099 
5100 	offset += qed_ilt_dump_dump_num_vf_cids(p_hwfn,
5101 						dump_buf + offset,
5102 						continue_dump,
5103 						&valid_conn_vf_cids);
5104 
5105 	/* Dump section containing physical memory descriptors for each
5106 	 * ILT page.
5107 	 */
5108 	num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
5109 
5110 	/* If need to dump then first check that there is enough memory
5111 	 * in dumped buffer for the section header.
5112 	 */
5113 	if (continue_dump) {
5114 		section_size = qed_dump_section_hdr(NULL,
5115 						    false,
5116 						    "ilt_page_desc",
5117 						    1) +
5118 		    qed_dump_num_param(NULL,
5119 				       false,
5120 				       "size",
5121 				       num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
5122 		if (offset + section_size > buf_size_in_dwords) {
5123 			continue_dump = false;
5124 			actual_dump_size_in_dwords = offset;
5125 		}
5126 	}
5127 
5128 	offset += qed_dump_section_hdr(dump_buf + offset,
5129 				       continue_dump, "ilt_page_desc", 1);
5130 	offset += qed_dump_num_param(dump_buf + offset,
5131 				     continue_dump,
5132 				     "size",
5133 				     num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
5134 
5135 	/* Copy memory descriptors to dump buffer
5136 	 * If need to dump then dump till the dump buffer size
5137 	 */
5138 	if (continue_dump) {
5139 		for (page_id = 0; page_id < num_pages;
5140 		     page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) {
5141 			if (continue_dump &&
5142 			    (offset + PAGE_MEM_DESC_SIZE_DWORDS <=
5143 			     buf_size_in_dwords)) {
5144 				memcpy(dump_buf + offset,
5145 				       &ilt_pages[page_id],
5146 				       DWORDS_TO_BYTES
5147 				       (PAGE_MEM_DESC_SIZE_DWORDS));
5148 			} else {
5149 				if (continue_dump) {
5150 					continue_dump = false;
5151 					actual_dump_size_in_dwords = offset;
5152 				}
5153 			}
5154 		}
5155 	} else {
5156 		offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
5157 	}
5158 
5159 	valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
5160 					   num_cids_per_page);
5161 	valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
5162 					   num_cids_per_page);
5163 
5164 	/* Dump ILT pages IDs */
5165 	qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
5166 				   valid_conn_pf_pages, valid_conn_vf_pages,
5167 				   ilt_pages, true, buf_size_in_dwords,
5168 				   &actual_dump_size_in_dwords);
5169 
5170 	/* Dump ILT pages memory */
5171 	qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
5172 				   valid_conn_pf_pages, valid_conn_vf_pages,
5173 				   ilt_pages, false, buf_size_in_dwords,
5174 				   &actual_dump_size_in_dwords);
5175 
5176 	real_dumped_size =
5177 	    (continue_dump == dump) ? offset : actual_dump_size_in_dwords;
5178 	qed_dump_num_param(dump_buf + full_dump_off, dump,
5179 			   "full-dump-size", offset + last_section_size);
5180 	qed_dump_num_param(dump_buf + actul_dump_off,
5181 			   dump,
5182 			   "actual-dump-size",
5183 			   real_dumped_size + last_section_size);
5184 
5185 	/* Dump last section */
5186 	real_dumped_size += qed_dump_last_section(dump_buf,
5187 						  real_dumped_size, dump);
5188 
5189 	return real_dumped_size;
5190 }
5191 
5192 /***************************** Public Functions *******************************/
5193 
5194 enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
5195 				    const u8 * const bin_ptr)
5196 {
5197 	struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
5198 	u8 buf_id;
5199 
5200 	/* Convert binary data to debug arrays */
5201 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
5202 		qed_set_dbg_bin_buf(p_hwfn,
5203 				    buf_id,
5204 				    (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
5205 				    buf_hdrs[buf_id].length);
5206 
5207 	return DBG_STATUS_OK;
5208 }
5209 
5210 static enum dbg_status qed_dbg_set_app_ver(u32 ver)
5211 {
5212 	if (ver < TOOLS_VERSION)
5213 		return DBG_STATUS_UNSUPPORTED_APP_VERSION;
5214 
5215 	s_app_ver = ver;
5216 
5217 	return DBG_STATUS_OK;
5218 }
5219 
5220 bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
5221 		      struct qed_ptt *p_ptt, struct fw_info *fw_info)
5222 {
5223 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5224 	u8 storm_id;
5225 
5226 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5227 		struct storm_defs *storm = &s_storm_defs[storm_id];
5228 
5229 		/* Skip Storm if it's in reset */
5230 		if (dev_data->block_in_reset[storm->sem_block_id])
5231 			continue;
5232 
5233 		/* Read FW info for the current Storm */
5234 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
5235 
5236 		return true;
5237 	}
5238 
5239 	return false;
5240 }
5241 
5242 enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
5243 				   enum dbg_grc_params grc_param, u32 val)
5244 {
5245 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5246 	enum dbg_status status;
5247 	int i;
5248 
5249 	DP_VERBOSE(p_hwfn,
5250 		   QED_MSG_DEBUG,
5251 		   "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
5252 
5253 	status = qed_dbg_dev_init(p_hwfn);
5254 	if (status != DBG_STATUS_OK)
5255 		return status;
5256 
5257 	/* Initializes the GRC parameters (if not initialized). Needed in order
5258 	 * to set the default parameter values for the first time.
5259 	 */
5260 	qed_dbg_grc_init_params(p_hwfn);
5261 
5262 	if (grc_param >= MAX_DBG_GRC_PARAMS || grc_param < 0)
5263 		return DBG_STATUS_INVALID_ARGS;
5264 	if (val < s_grc_param_defs[grc_param].min ||
5265 	    val > s_grc_param_defs[grc_param].max)
5266 		return DBG_STATUS_INVALID_ARGS;
5267 
5268 	if (s_grc_param_defs[grc_param].is_preset) {
5269 		/* Preset param */
5270 
5271 		/* Disabling a preset is not allowed. Call
5272 		 * dbg_grc_set_params_default instead.
5273 		 */
5274 		if (!val)
5275 			return DBG_STATUS_INVALID_ARGS;
5276 
5277 		/* Update all params with the preset values */
5278 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
5279 			struct grc_param_defs *defs = &s_grc_param_defs[i];
5280 			u32 preset_val;
5281 			/* Skip persistent params */
5282 			if (defs->is_persistent)
5283 				continue;
5284 
5285 			/* Find preset value */
5286 			if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
5287 				preset_val =
5288 				    defs->exclude_all_preset_val;
5289 			else if (grc_param == DBG_GRC_PARAM_CRASH)
5290 				preset_val =
5291 				    defs->crash_preset_val[dev_data->chip_id];
5292 			else
5293 				return DBG_STATUS_INVALID_ARGS;
5294 
5295 			qed_grc_set_param(p_hwfn, i, preset_val);
5296 		}
5297 	} else {
5298 		/* Regular param - set its value */
5299 		qed_grc_set_param(p_hwfn, grc_param, val);
5300 	}
5301 
5302 	return DBG_STATUS_OK;
5303 }
5304 
5305 /* Assign default GRC param values */
5306 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5307 {
5308 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5309 	u32 i;
5310 
5311 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5312 		if (!s_grc_param_defs[i].is_persistent)
5313 			dev_data->grc.param_val[i] =
5314 			    s_grc_param_defs[i].default_val[dev_data->chip_id];
5315 }
5316 
5317 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5318 					      struct qed_ptt *p_ptt,
5319 					      u32 *buf_size)
5320 {
5321 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5322 
5323 	*buf_size = 0;
5324 
5325 	if (status != DBG_STATUS_OK)
5326 		return status;
5327 
5328 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5329 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5330 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5331 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5332 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5333 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5334 
5335 	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5336 }
5337 
5338 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5339 				 struct qed_ptt *p_ptt,
5340 				 u32 *dump_buf,
5341 				 u32 buf_size_in_dwords,
5342 				 u32 *num_dumped_dwords)
5343 {
5344 	u32 needed_buf_size_in_dwords;
5345 	enum dbg_status status;
5346 
5347 	*num_dumped_dwords = 0;
5348 
5349 	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5350 					       p_ptt,
5351 					       &needed_buf_size_in_dwords);
5352 	if (status != DBG_STATUS_OK)
5353 		return status;
5354 
5355 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5356 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5357 
5358 	/* Doesn't do anything, needed for compile time asserts */
5359 	qed_static_asserts();
5360 
5361 	/* GRC Dump */
5362 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5363 
5364 	/* Revert GRC params to their default */
5365 	qed_dbg_grc_set_params_default(p_hwfn);
5366 
5367 	return status;
5368 }
5369 
5370 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5371 						   struct qed_ptt *p_ptt,
5372 						   u32 *buf_size)
5373 {
5374 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5375 	struct idle_chk_data *idle_chk = &dev_data->idle_chk;
5376 	enum dbg_status status;
5377 
5378 	*buf_size = 0;
5379 
5380 	status = qed_dbg_dev_init(p_hwfn);
5381 	if (status != DBG_STATUS_OK)
5382 		return status;
5383 
5384 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5385 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5386 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5387 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5388 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5389 
5390 	if (!idle_chk->buf_size_set) {
5391 		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5392 						       p_ptt, NULL, false);
5393 		idle_chk->buf_size_set = true;
5394 	}
5395 
5396 	*buf_size = idle_chk->buf_size;
5397 
5398 	return DBG_STATUS_OK;
5399 }
5400 
5401 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5402 				      struct qed_ptt *p_ptt,
5403 				      u32 *dump_buf,
5404 				      u32 buf_size_in_dwords,
5405 				      u32 *num_dumped_dwords)
5406 {
5407 	u32 needed_buf_size_in_dwords;
5408 	enum dbg_status status;
5409 
5410 	*num_dumped_dwords = 0;
5411 
5412 	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5413 						    p_ptt,
5414 						    &needed_buf_size_in_dwords);
5415 	if (status != DBG_STATUS_OK)
5416 		return status;
5417 
5418 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5419 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5420 
5421 	/* Update reset state */
5422 	qed_grc_unreset_blocks(p_hwfn, p_ptt, true);
5423 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5424 
5425 	/* Idle Check Dump */
5426 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5427 
5428 	/* Revert GRC params to their default */
5429 	qed_dbg_grc_set_params_default(p_hwfn);
5430 
5431 	return DBG_STATUS_OK;
5432 }
5433 
5434 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5435 						    struct qed_ptt *p_ptt,
5436 						    u32 *buf_size)
5437 {
5438 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5439 
5440 	*buf_size = 0;
5441 
5442 	if (status != DBG_STATUS_OK)
5443 		return status;
5444 
5445 	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5446 }
5447 
5448 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5449 				       struct qed_ptt *p_ptt,
5450 				       u32 *dump_buf,
5451 				       u32 buf_size_in_dwords,
5452 				       u32 *num_dumped_dwords)
5453 {
5454 	u32 needed_buf_size_in_dwords;
5455 	enum dbg_status status;
5456 
5457 	status =
5458 		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5459 						    p_ptt,
5460 						    &needed_buf_size_in_dwords);
5461 	if (status != DBG_STATUS_OK && status !=
5462 	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5463 		return status;
5464 
5465 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5466 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5467 
5468 	/* Update reset state */
5469 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5470 
5471 	/* Perform dump */
5472 	status = qed_mcp_trace_dump(p_hwfn,
5473 				    p_ptt, dump_buf, true, num_dumped_dwords);
5474 
5475 	/* Revert GRC params to their default */
5476 	qed_dbg_grc_set_params_default(p_hwfn);
5477 
5478 	return status;
5479 }
5480 
5481 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5482 						   struct qed_ptt *p_ptt,
5483 						   u32 *buf_size)
5484 {
5485 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5486 
5487 	*buf_size = 0;
5488 
5489 	if (status != DBG_STATUS_OK)
5490 		return status;
5491 
5492 	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5493 }
5494 
5495 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5496 				      struct qed_ptt *p_ptt,
5497 				      u32 *dump_buf,
5498 				      u32 buf_size_in_dwords,
5499 				      u32 *num_dumped_dwords)
5500 {
5501 	u32 needed_buf_size_in_dwords;
5502 	enum dbg_status status;
5503 
5504 	*num_dumped_dwords = 0;
5505 
5506 	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5507 						    p_ptt,
5508 						    &needed_buf_size_in_dwords);
5509 	if (status != DBG_STATUS_OK)
5510 		return status;
5511 
5512 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5513 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5514 
5515 	/* Update reset state */
5516 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5517 
5518 	status = qed_reg_fifo_dump(p_hwfn,
5519 				   p_ptt, dump_buf, true, num_dumped_dwords);
5520 
5521 	/* Revert GRC params to their default */
5522 	qed_dbg_grc_set_params_default(p_hwfn);
5523 
5524 	return status;
5525 }
5526 
5527 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5528 						   struct qed_ptt *p_ptt,
5529 						   u32 *buf_size)
5530 {
5531 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5532 
5533 	*buf_size = 0;
5534 
5535 	if (status != DBG_STATUS_OK)
5536 		return status;
5537 
5538 	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5539 }
5540 
5541 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5542 				      struct qed_ptt *p_ptt,
5543 				      u32 *dump_buf,
5544 				      u32 buf_size_in_dwords,
5545 				      u32 *num_dumped_dwords)
5546 {
5547 	u32 needed_buf_size_in_dwords;
5548 	enum dbg_status status;
5549 
5550 	*num_dumped_dwords = 0;
5551 
5552 	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5553 						    p_ptt,
5554 						    &needed_buf_size_in_dwords);
5555 	if (status != DBG_STATUS_OK)
5556 		return status;
5557 
5558 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5559 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5560 
5561 	/* Update reset state */
5562 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5563 
5564 	status = qed_igu_fifo_dump(p_hwfn,
5565 				   p_ptt, dump_buf, true, num_dumped_dwords);
5566 	/* Revert GRC params to their default */
5567 	qed_dbg_grc_set_params_default(p_hwfn);
5568 
5569 	return status;
5570 }
5571 
5572 enum dbg_status
5573 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5574 					      struct qed_ptt *p_ptt,
5575 					      u32 *buf_size)
5576 {
5577 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5578 
5579 	*buf_size = 0;
5580 
5581 	if (status != DBG_STATUS_OK)
5582 		return status;
5583 
5584 	return qed_protection_override_dump(p_hwfn,
5585 					    p_ptt, NULL, false, buf_size);
5586 }
5587 
5588 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5589 						 struct qed_ptt *p_ptt,
5590 						 u32 *dump_buf,
5591 						 u32 buf_size_in_dwords,
5592 						 u32 *num_dumped_dwords)
5593 {
5594 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5595 	enum dbg_status status;
5596 
5597 	*num_dumped_dwords = 0;
5598 
5599 	status =
5600 		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5601 							      p_ptt,
5602 							      p_size);
5603 	if (status != DBG_STATUS_OK)
5604 		return status;
5605 
5606 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5607 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5608 
5609 	/* Update reset state */
5610 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5611 
5612 	status = qed_protection_override_dump(p_hwfn,
5613 					      p_ptt,
5614 					      dump_buf,
5615 					      true, num_dumped_dwords);
5616 
5617 	/* Revert GRC params to their default */
5618 	qed_dbg_grc_set_params_default(p_hwfn);
5619 
5620 	return status;
5621 }
5622 
5623 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5624 						     struct qed_ptt *p_ptt,
5625 						     u32 *buf_size)
5626 {
5627 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5628 
5629 	*buf_size = 0;
5630 
5631 	if (status != DBG_STATUS_OK)
5632 		return status;
5633 
5634 	/* Update reset state */
5635 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5636 
5637 	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5638 
5639 	return DBG_STATUS_OK;
5640 }
5641 
5642 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5643 					struct qed_ptt *p_ptt,
5644 					u32 *dump_buf,
5645 					u32 buf_size_in_dwords,
5646 					u32 *num_dumped_dwords)
5647 {
5648 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5649 	enum dbg_status status;
5650 
5651 	*num_dumped_dwords = 0;
5652 
5653 	status =
5654 		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5655 						     p_ptt,
5656 						     p_size);
5657 	if (status != DBG_STATUS_OK)
5658 		return status;
5659 
5660 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5661 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5662 
5663 	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5664 
5665 	/* Revert GRC params to their default */
5666 	qed_dbg_grc_set_params_default(p_hwfn);
5667 
5668 	return DBG_STATUS_OK;
5669 }
5670 
5671 static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5672 						     struct qed_ptt *p_ptt,
5673 						     u32 *buf_size)
5674 {
5675 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5676 
5677 	*buf_size = 0;
5678 
5679 	if (status != DBG_STATUS_OK)
5680 		return status;
5681 
5682 	*buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, 0, false);
5683 
5684 	return DBG_STATUS_OK;
5685 }
5686 
5687 static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
5688 					struct qed_ptt *p_ptt,
5689 					u32 *dump_buf,
5690 					u32 buf_size_in_dwords,
5691 					u32 *num_dumped_dwords)
5692 {
5693 	*num_dumped_dwords = qed_ilt_dump(p_hwfn,
5694 					  p_ptt,
5695 					  dump_buf, buf_size_in_dwords, true);
5696 
5697 	/* Reveret GRC params to their default */
5698 	qed_dbg_grc_set_params_default(p_hwfn);
5699 
5700 	return DBG_STATUS_OK;
5701 }
5702 
5703 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5704 				  struct qed_ptt *p_ptt,
5705 				  enum block_id block_id,
5706 				  enum dbg_attn_type attn_type,
5707 				  bool clear_status,
5708 				  struct dbg_attn_block_result *results)
5709 {
5710 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5711 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5712 	const struct dbg_attn_reg *attn_reg_arr;
5713 
5714 	if (status != DBG_STATUS_OK)
5715 		return status;
5716 
5717 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5718 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5719 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5720 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5721 
5722 	attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
5723 					       block_id,
5724 					       attn_type, &num_attn_regs);
5725 
5726 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5727 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5728 		struct dbg_attn_reg_result *reg_result;
5729 		u32 sts_addr, sts_val;
5730 		u16 modes_buf_offset;
5731 		bool eval_mode;
5732 
5733 		/* Check mode */
5734 		eval_mode = GET_FIELD(reg_data->mode.data,
5735 				      DBG_MODE_HDR_EVAL_MODE) > 0;
5736 		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5737 					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5738 		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5739 			continue;
5740 
5741 		/* Mode match - read attention status register */
5742 		sts_addr = DWORDS_TO_BYTES(clear_status ?
5743 					   reg_data->sts_clr_address :
5744 					   GET_FIELD(reg_data->data,
5745 						     DBG_ATTN_REG_STS_ADDRESS));
5746 		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5747 		if (!sts_val)
5748 			continue;
5749 
5750 		/* Non-zero attention status - add to results */
5751 		reg_result = &results->reg_results[num_result_regs];
5752 		SET_FIELD(reg_result->data,
5753 			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5754 		SET_FIELD(reg_result->data,
5755 			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5756 			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5757 		reg_result->block_attn_offset = reg_data->block_attn_offset;
5758 		reg_result->sts_val = sts_val;
5759 		reg_result->mask_val = qed_rd(p_hwfn,
5760 					      p_ptt,
5761 					      DWORDS_TO_BYTES
5762 					      (reg_data->mask_address));
5763 		num_result_regs++;
5764 	}
5765 
5766 	results->block_id = (u8)block_id;
5767 	results->names_offset =
5768 	    qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset;
5769 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5770 	SET_FIELD(results->data,
5771 		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5772 
5773 	return DBG_STATUS_OK;
5774 }
5775 
5776 /******************************* Data Types **********************************/
5777 
5778 /* REG fifo element */
5779 struct reg_fifo_element {
5780 	u64 data;
5781 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5782 #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5783 #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5784 #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5785 #define REG_FIFO_ELEMENT_PF_SHIFT		24
5786 #define REG_FIFO_ELEMENT_PF_MASK		0xf
5787 #define REG_FIFO_ELEMENT_VF_SHIFT		28
5788 #define REG_FIFO_ELEMENT_VF_MASK		0xff
5789 #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5790 #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5791 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5792 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5793 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5794 #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5795 #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5796 #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5797 #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5798 #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5799 };
5800 
5801 /* REG fifo error element */
5802 struct reg_fifo_err {
5803 	u32 err_code;
5804 	const char *err_msg;
5805 };
5806 
5807 /* IGU fifo element */
5808 struct igu_fifo_element {
5809 	u32 dword0;
5810 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5811 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5812 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5813 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5814 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5815 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5816 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5817 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5818 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5819 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5820 	u32 dword1;
5821 	u32 dword2;
5822 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5823 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5824 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5825 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5826 	u32 reserved;
5827 };
5828 
5829 struct igu_fifo_wr_data {
5830 	u32 data;
5831 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5832 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5833 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5834 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5835 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5836 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5837 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5838 #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5839 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5840 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5841 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5842 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5843 };
5844 
5845 struct igu_fifo_cleanup_wr_data {
5846 	u32 data;
5847 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5848 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5849 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5850 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5851 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5852 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5853 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5854 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5855 };
5856 
5857 /* Protection override element */
5858 struct protection_override_element {
5859 	u64 data;
5860 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5861 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5862 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5863 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5864 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5865 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5866 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5867 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5868 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5869 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5870 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5871 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5872 };
5873 
5874 enum igu_fifo_sources {
5875 	IGU_SRC_PXP0,
5876 	IGU_SRC_PXP1,
5877 	IGU_SRC_PXP2,
5878 	IGU_SRC_PXP3,
5879 	IGU_SRC_PXP4,
5880 	IGU_SRC_PXP5,
5881 	IGU_SRC_PXP6,
5882 	IGU_SRC_PXP7,
5883 	IGU_SRC_CAU,
5884 	IGU_SRC_ATTN,
5885 	IGU_SRC_GRC
5886 };
5887 
5888 enum igu_fifo_addr_types {
5889 	IGU_ADDR_TYPE_MSIX_MEM,
5890 	IGU_ADDR_TYPE_WRITE_PBA,
5891 	IGU_ADDR_TYPE_WRITE_INT_ACK,
5892 	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5893 	IGU_ADDR_TYPE_READ_INT,
5894 	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5895 	IGU_ADDR_TYPE_RESERVED
5896 };
5897 
5898 struct igu_fifo_addr_data {
5899 	u16 start_addr;
5900 	u16 end_addr;
5901 	char *desc;
5902 	char *vf_desc;
5903 	enum igu_fifo_addr_types type;
5904 };
5905 
5906 /******************************** Constants **********************************/
5907 
5908 #define MAX_MSG_LEN				1024
5909 
5910 #define MCP_TRACE_MAX_MODULE_LEN		8
5911 #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5912 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5913 	(MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET)
5914 
5915 #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5916 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5917 
5918 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5919 
5920 /***************************** Constant Arrays *******************************/
5921 
5922 /* Status string array */
5923 static const char * const s_status_str[] = {
5924 	/* DBG_STATUS_OK */
5925 	"Operation completed successfully",
5926 
5927 	/* DBG_STATUS_APP_VERSION_NOT_SET */
5928 	"Debug application version wasn't set",
5929 
5930 	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5931 	"Unsupported debug application version",
5932 
5933 	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5934 	"The debug block wasn't reset since the last recording",
5935 
5936 	/* DBG_STATUS_INVALID_ARGS */
5937 	"Invalid arguments",
5938 
5939 	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5940 	"The debug output was already set",
5941 
5942 	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5943 	"Invalid PCI buffer size",
5944 
5945 	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5946 	"PCI buffer allocation failed",
5947 
5948 	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5949 	"A PCI buffer wasn't allocated",
5950 
5951 	/* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
5952 	"The filter/trigger constraint dword offsets are not enabled for recording",
5953 	/* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
5954 	"No matching framing mode",
5955 
5956 	/* DBG_STATUS_VFC_READ_ERROR */
5957 	"Error reading from VFC",
5958 
5959 	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5960 	"The Storm was already enabled",
5961 
5962 	/* DBG_STATUS_STORM_NOT_ENABLED */
5963 	"The specified Storm wasn't enabled",
5964 
5965 	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5966 	"The block was already enabled",
5967 
5968 	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5969 	"The specified block wasn't enabled",
5970 
5971 	/* DBG_STATUS_NO_INPUT_ENABLED */
5972 	"No input was enabled for recording",
5973 
5974 	/* DBG_STATUS_NO_FILTER_TRIGGER_256B */
5975 	"Filters and triggers are not allowed in E4 256-bit mode",
5976 
5977 	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5978 	"The filter was already enabled",
5979 
5980 	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5981 	"The trigger was already enabled",
5982 
5983 	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5984 	"The trigger wasn't enabled",
5985 
5986 	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5987 	"A constraint can be added only after a filter was enabled or a trigger state was added",
5988 
5989 	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5990 	"Cannot add more than 3 trigger states",
5991 
5992 	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5993 	"Cannot add more than 4 constraints per filter or trigger state",
5994 
5995 	/* DBG_STATUS_RECORDING_NOT_STARTED */
5996 	"The recording wasn't started",
5997 
5998 	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
5999 	"A trigger was configured, but it didn't trigger",
6000 
6001 	/* DBG_STATUS_NO_DATA_RECORDED */
6002 	"No data was recorded",
6003 
6004 	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
6005 	"Dump buffer is too small",
6006 
6007 	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
6008 	"Dumped data is not aligned to chunks",
6009 
6010 	/* DBG_STATUS_UNKNOWN_CHIP */
6011 	"Unknown chip",
6012 
6013 	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
6014 	"Failed allocating virtual memory",
6015 
6016 	/* DBG_STATUS_BLOCK_IN_RESET */
6017 	"The input block is in reset",
6018 
6019 	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
6020 	"Invalid MCP trace signature found in NVRAM",
6021 
6022 	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
6023 	"Invalid bundle ID found in NVRAM",
6024 
6025 	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
6026 	"Failed getting NVRAM image",
6027 
6028 	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
6029 	"NVRAM image is not dword-aligned",
6030 
6031 	/* DBG_STATUS_NVRAM_READ_FAILED */
6032 	"Failed reading from NVRAM",
6033 
6034 	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
6035 	"Idle check parsing failed",
6036 
6037 	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
6038 	"MCP Trace data is corrupt",
6039 
6040 	/* DBG_STATUS_MCP_TRACE_NO_META */
6041 	"Dump doesn't contain meta data - it must be provided in image file",
6042 
6043 	/* DBG_STATUS_MCP_COULD_NOT_HALT */
6044 	"Failed to halt MCP",
6045 
6046 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
6047 	"Failed to resume MCP after halt",
6048 
6049 	/* DBG_STATUS_RESERVED0 */
6050 	"",
6051 
6052 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
6053 	"Failed to empty SEMI sync FIFO",
6054 
6055 	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
6056 	"IGU FIFO data is corrupt",
6057 
6058 	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
6059 	"MCP failed to mask parities",
6060 
6061 	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
6062 	"FW Asserts parsing failed",
6063 
6064 	/* DBG_STATUS_REG_FIFO_BAD_DATA */
6065 	"GRC FIFO data is corrupt",
6066 
6067 	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
6068 	"Protection Override data is corrupt",
6069 
6070 	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
6071 	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
6072 
6073 	/* DBG_STATUS_RESERVED1 */
6074 	"",
6075 
6076 	/* DBG_STATUS_NON_MATCHING_LINES */
6077 	"Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)",
6078 
6079 	/* DBG_STATUS_INSUFFICIENT_HW_IDS */
6080 	"Insufficient HW IDs. Try to record less Storms/blocks",
6081 
6082 	/* DBG_STATUS_DBG_BUS_IN_USE */
6083 	"The debug bus is in use",
6084 
6085 	/* DBG_STATUS_INVALID_STORM_DBG_MODE */
6086 	"The storm debug mode is not supported in the current chip",
6087 
6088 	/* DBG_STATUS_OTHER_ENGINE_BB_ONLY */
6089 	"Other engine is supported only in BB",
6090 
6091 	/* DBG_STATUS_FILTER_SINGLE_HW_ID */
6092 	"The configured filter mode requires a single Storm/block input",
6093 
6094 	/* DBG_STATUS_TRIGGER_SINGLE_HW_ID */
6095 	"The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
6096 
6097 	/* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
6098 	"When triggering on Storm data, the Storm to trigger on must be specified",
6099 
6100 	/* DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE */
6101 	"Failed to request MDUMP2 Offsize",
6102 
6103 	/* DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC */
6104 	"Expected CRC (part of the MDUMP2 data) is different than the calculated CRC over that data",
6105 
6106 	/* DBG_STATUS_MDUMP2_INVALID_SIGNATURE */
6107 	"Invalid Signature found at start of MDUMP2",
6108 
6109 	/* DBG_STATUS_MDUMP2_INVALID_LOG_SIZE */
6110 	"Invalid Log Size of MDUMP2",
6111 
6112 	/* DBG_STATUS_MDUMP2_INVALID_LOG_HDR */
6113 	"Invalid Log Header of MDUMP2",
6114 
6115 	/* DBG_STATUS_MDUMP2_INVALID_LOG_DATA */
6116 	"Invalid Log Data of MDUMP2",
6117 
6118 	/* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS */
6119 	"Could not extract number of ports from regval buf of MDUMP2",
6120 
6121 	/* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS */
6122 	"Could not extract MFW (link) status from regval buf of MDUMP2",
6123 
6124 	/* DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP */
6125 	"Could not display linkdump of MDUMP2",
6126 
6127 	/* DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG */
6128 	"Could not read PHY CFG of MDUMP2",
6129 
6130 	/* DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE */
6131 	"Could not read PLL Mode of MDUMP2",
6132 
6133 	/* DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS */
6134 	"Could not read TSCF/TSCE Lane Regs of MDUMP2",
6135 
6136 	/* DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF */
6137 	"Could not allocate MDUMP2 reg-val internal buffer"
6138 };
6139 
6140 /* Idle check severity names array */
6141 static const char * const s_idle_chk_severity_str[] = {
6142 	"Error",
6143 	"Error if no traffic",
6144 	"Warning"
6145 };
6146 
6147 /* MCP Trace level names array */
6148 static const char * const s_mcp_trace_level_str[] = {
6149 	"ERROR",
6150 	"TRACE",
6151 	"DEBUG"
6152 };
6153 
6154 /* Access type names array */
6155 static const char * const s_access_strs[] = {
6156 	"read",
6157 	"write"
6158 };
6159 
6160 /* Privilege type names array */
6161 static const char * const s_privilege_strs[] = {
6162 	"VF",
6163 	"PDA",
6164 	"HV",
6165 	"UA"
6166 };
6167 
6168 /* Protection type names array */
6169 static const char * const s_protection_strs[] = {
6170 	"(default)",
6171 	"(default)",
6172 	"(default)",
6173 	"(default)",
6174 	"override VF",
6175 	"override PDA",
6176 	"override HV",
6177 	"override UA"
6178 };
6179 
6180 /* Master type names array */
6181 static const char * const s_master_strs[] = {
6182 	"???",
6183 	"pxp",
6184 	"mcp",
6185 	"msdm",
6186 	"psdm",
6187 	"ysdm",
6188 	"usdm",
6189 	"tsdm",
6190 	"xsdm",
6191 	"dbu",
6192 	"dmae",
6193 	"jdap",
6194 	"???",
6195 	"???",
6196 	"???",
6197 	"???"
6198 };
6199 
6200 /* REG FIFO error messages array */
6201 static struct reg_fifo_err s_reg_fifo_errors[] = {
6202 	{1, "grc timeout"},
6203 	{2, "address doesn't belong to any block"},
6204 	{4, "reserved address in block or write to read-only address"},
6205 	{8, "privilege/protection mismatch"},
6206 	{16, "path isolation error"},
6207 	{17, "RSL error"}
6208 };
6209 
6210 /* IGU FIFO sources array */
6211 static const char * const s_igu_fifo_source_strs[] = {
6212 	"TSTORM",
6213 	"MSTORM",
6214 	"USTORM",
6215 	"XSTORM",
6216 	"YSTORM",
6217 	"PSTORM",
6218 	"PCIE",
6219 	"NIG_QM_PBF",
6220 	"CAU",
6221 	"ATTN",
6222 	"GRC",
6223 };
6224 
6225 /* IGU FIFO error messages */
6226 static const char * const s_igu_fifo_error_strs[] = {
6227 	"no error",
6228 	"length error",
6229 	"function disabled",
6230 	"VF sent command to attention address",
6231 	"host sent prod update command",
6232 	"read of during interrupt register while in MIMD mode",
6233 	"access to PXP BAR reserved address",
6234 	"producer update command to attention index",
6235 	"unknown error",
6236 	"SB index not valid",
6237 	"SB relative index and FID not found",
6238 	"FID not match",
6239 	"command with error flag asserted (PCI error or CAU discard)",
6240 	"VF sent cleanup and RF cleanup is disabled",
6241 	"cleanup command on type bigger than 4"
6242 };
6243 
6244 /* IGU FIFO address data */
6245 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6246 	{0x0, 0x101, "MSI-X Memory", NULL,
6247 	 IGU_ADDR_TYPE_MSIX_MEM},
6248 	{0x102, 0x1ff, "reserved", NULL,
6249 	 IGU_ADDR_TYPE_RESERVED},
6250 	{0x200, 0x200, "Write PBA[0:63]", NULL,
6251 	 IGU_ADDR_TYPE_WRITE_PBA},
6252 	{0x201, 0x201, "Write PBA[64:127]", "reserved",
6253 	 IGU_ADDR_TYPE_WRITE_PBA},
6254 	{0x202, 0x202, "Write PBA[128]", "reserved",
6255 	 IGU_ADDR_TYPE_WRITE_PBA},
6256 	{0x203, 0x3ff, "reserved", NULL,
6257 	 IGU_ADDR_TYPE_RESERVED},
6258 	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6259 	 IGU_ADDR_TYPE_WRITE_INT_ACK},
6260 	{0x5f0, 0x5f0, "Attention bits update", NULL,
6261 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6262 	{0x5f1, 0x5f1, "Attention bits set", NULL,
6263 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6264 	{0x5f2, 0x5f2, "Attention bits clear", NULL,
6265 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6266 	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6267 	 IGU_ADDR_TYPE_READ_INT},
6268 	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6269 	 IGU_ADDR_TYPE_READ_INT},
6270 	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6271 	 IGU_ADDR_TYPE_READ_INT},
6272 	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6273 	 IGU_ADDR_TYPE_READ_INT},
6274 	{0x5f7, 0x5ff, "reserved", NULL,
6275 	 IGU_ADDR_TYPE_RESERVED},
6276 	{0x600, 0x7ff, "Producer update", NULL,
6277 	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6278 };
6279 
6280 /******************************** Variables **********************************/
6281 
6282 /* Temporary buffer, used for print size calculations */
6283 static char s_temp_buf[MAX_MSG_LEN];
6284 
6285 /**************************** Private Functions ******************************/
6286 
6287 static void qed_user_static_asserts(void)
6288 {
6289 }
6290 
6291 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6292 {
6293 	return (a + b) % size;
6294 }
6295 
6296 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6297 {
6298 	return (size + a - b) % size;
6299 }
6300 
6301 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6302  * bytes) and returns them as a dword value. the specified buffer offset is
6303  * updated.
6304  */
6305 static u32 qed_read_from_cyclic_buf(void *buf,
6306 				    u32 *offset,
6307 				    u32 buf_size, u8 num_bytes_to_read)
6308 {
6309 	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6310 	u32 val = 0;
6311 
6312 	val_ptr = (u8 *)&val;
6313 
6314 	/* Assume running on a LITTLE ENDIAN and the buffer is network order
6315 	 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
6316 	 */
6317 	for (i = 0; i < num_bytes_to_read; i++) {
6318 		val_ptr[i] = bytes_buf[*offset];
6319 		*offset = qed_cyclic_add(*offset, 1, buf_size);
6320 	}
6321 
6322 	return val;
6323 }
6324 
6325 /* Reads and returns the next byte from the specified buffer.
6326  * The specified buffer offset is updated.
6327  */
6328 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6329 {
6330 	return ((u8 *)buf)[(*offset)++];
6331 }
6332 
6333 /* Reads and returns the next dword from the specified buffer.
6334  * The specified buffer offset is updated.
6335  */
6336 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6337 {
6338 	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6339 
6340 	*offset += 4;
6341 
6342 	return dword_val;
6343 }
6344 
6345 /* Reads the next string from the specified buffer, and copies it to the
6346  * specified pointer. The specified buffer offset is updated.
6347  */
6348 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6349 {
6350 	const char *source_str = &((const char *)buf)[*offset];
6351 
6352 	strncpy(dest, source_str, size);
6353 	dest[size - 1] = '\0';
6354 	*offset += size;
6355 }
6356 
6357 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6358  * If the specified buffer in NULL, a temporary buffer pointer is returned.
6359  */
6360 static char *qed_get_buf_ptr(void *buf, u32 offset)
6361 {
6362 	return buf ? (char *)buf + offset : s_temp_buf;
6363 }
6364 
6365 /* Reads a param from the specified buffer. Returns the number of dwords read.
6366  * If the returned str_param is NULL, the param is numeric and its value is
6367  * returned in num_param.
6368  * Otheriwise, the param is a string and its pointer is returned in str_param.
6369  */
6370 static u32 qed_read_param(u32 *dump_buf,
6371 			  const char **param_name,
6372 			  const char **param_str_val, u32 *param_num_val)
6373 {
6374 	char *char_buf = (char *)dump_buf;
6375 	size_t offset = 0;
6376 
6377 	/* Extract param name */
6378 	*param_name = char_buf;
6379 	offset += strlen(*param_name) + 1;
6380 
6381 	/* Check param type */
6382 	if (*(char_buf + offset++)) {
6383 		/* String param */
6384 		*param_str_val = char_buf + offset;
6385 		*param_num_val = 0;
6386 		offset += strlen(*param_str_val) + 1;
6387 		if (offset & 0x3)
6388 			offset += (4 - (offset & 0x3));
6389 	} else {
6390 		/* Numeric param */
6391 		*param_str_val = NULL;
6392 		if (offset & 0x3)
6393 			offset += (4 - (offset & 0x3));
6394 		*param_num_val = *(u32 *)(char_buf + offset);
6395 		offset += 4;
6396 	}
6397 
6398 	return (u32)offset / 4;
6399 }
6400 
6401 /* Reads a section header from the specified buffer.
6402  * Returns the number of dwords read.
6403  */
6404 static u32 qed_read_section_hdr(u32 *dump_buf,
6405 				const char **section_name,
6406 				u32 *num_section_params)
6407 {
6408 	const char *param_str_val;
6409 
6410 	return qed_read_param(dump_buf,
6411 			      section_name, &param_str_val, num_section_params);
6412 }
6413 
6414 /* Reads section params from the specified buffer and prints them to the results
6415  * buffer. Returns the number of dwords read.
6416  */
6417 static u32 qed_print_section_params(u32 *dump_buf,
6418 				    u32 num_section_params,
6419 				    char *results_buf, u32 *num_chars_printed)
6420 {
6421 	u32 i, dump_offset = 0, results_offset = 0;
6422 
6423 	for (i = 0; i < num_section_params; i++) {
6424 		const char *param_name, *param_str_val;
6425 		u32 param_num_val = 0;
6426 
6427 		dump_offset += qed_read_param(dump_buf + dump_offset,
6428 					      &param_name,
6429 					      &param_str_val, &param_num_val);
6430 
6431 		if (param_str_val)
6432 			results_offset +=
6433 				sprintf(qed_get_buf_ptr(results_buf,
6434 							results_offset),
6435 					"%s: %s\n", param_name, param_str_val);
6436 		else if (strcmp(param_name, "fw-timestamp"))
6437 			results_offset +=
6438 				sprintf(qed_get_buf_ptr(results_buf,
6439 							results_offset),
6440 					"%s: %d\n", param_name, param_num_val);
6441 	}
6442 
6443 	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6444 				  "\n");
6445 
6446 	*num_chars_printed = results_offset;
6447 
6448 	return dump_offset;
6449 }
6450 
6451 /* Returns the block name that matches the specified block ID,
6452  * or NULL if not found.
6453  */
6454 static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn,
6455 					  enum block_id block_id)
6456 {
6457 	const struct dbg_block_user *block =
6458 	    (const struct dbg_block_user *)
6459 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id;
6460 
6461 	return (const char *)block->name;
6462 }
6463 
6464 static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn
6465 							 *p_hwfn)
6466 {
6467 	return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6468 }
6469 
6470 /* Parses the idle check rules and returns the number of characters printed.
6471  * In case of parsing error, returns 0.
6472  */
6473 static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
6474 					 u32 *dump_buf,
6475 					 u32 *dump_buf_end,
6476 					 u32 num_rules,
6477 					 bool print_fw_idle_chk,
6478 					 char *results_buf,
6479 					 u32 *num_errors, u32 *num_warnings)
6480 {
6481 	/* Offset in results_buf in bytes */
6482 	u32 results_offset = 0;
6483 
6484 	u32 rule_idx;
6485 	u16 i, j;
6486 
6487 	*num_errors = 0;
6488 	*num_warnings = 0;
6489 
6490 	/* Go over dumped results */
6491 	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6492 	     rule_idx++) {
6493 		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6494 		struct dbg_idle_chk_result_hdr *hdr;
6495 		const char *parsing_str, *lsi_msg;
6496 		u32 parsing_str_offset;
6497 		bool has_fw_msg;
6498 		u8 curr_reg_id;
6499 
6500 		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6501 		rule_parsing_data =
6502 		    (const struct dbg_idle_chk_rule_parsing_data *)
6503 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr +
6504 		    hdr->rule_id;
6505 		parsing_str_offset =
6506 		    GET_FIELD(rule_parsing_data->data,
6507 			      DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6508 		has_fw_msg =
6509 		    GET_FIELD(rule_parsing_data->data,
6510 			      DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6511 		parsing_str = (const char *)
6512 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr +
6513 		    parsing_str_offset;
6514 		lsi_msg = parsing_str;
6515 		curr_reg_id = 0;
6516 
6517 		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6518 			return 0;
6519 
6520 		/* Skip rule header */
6521 		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6522 
6523 		/* Update errors/warnings count */
6524 		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6525 		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6526 			(*num_errors)++;
6527 		else
6528 			(*num_warnings)++;
6529 
6530 		/* Print rule severity */
6531 		results_offset +=
6532 		    sprintf(qed_get_buf_ptr(results_buf,
6533 					    results_offset), "%s: ",
6534 			    s_idle_chk_severity_str[hdr->severity]);
6535 
6536 		/* Print rule message */
6537 		if (has_fw_msg)
6538 			parsing_str += strlen(parsing_str) + 1;
6539 		results_offset +=
6540 		    sprintf(qed_get_buf_ptr(results_buf,
6541 					    results_offset), "%s.",
6542 			    has_fw_msg &&
6543 			    print_fw_idle_chk ? parsing_str : lsi_msg);
6544 		parsing_str += strlen(parsing_str) + 1;
6545 
6546 		/* Print register values */
6547 		results_offset +=
6548 		    sprintf(qed_get_buf_ptr(results_buf,
6549 					    results_offset), " Registers:");
6550 		for (i = 0;
6551 		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6552 		     i++) {
6553 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6554 			bool is_mem;
6555 			u8 reg_id;
6556 
6557 			reg_hdr =
6558 				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6559 			is_mem = GET_FIELD(reg_hdr->data,
6560 					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6561 			reg_id = GET_FIELD(reg_hdr->data,
6562 					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6563 
6564 			/* Skip reg header */
6565 			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6566 
6567 			/* Skip register names until the required reg_id is
6568 			 * reached.
6569 			 */
6570 			for (; reg_id > curr_reg_id; curr_reg_id++)
6571 				parsing_str += strlen(parsing_str) + 1;
6572 
6573 			results_offset +=
6574 			    sprintf(qed_get_buf_ptr(results_buf,
6575 						    results_offset), " %s",
6576 				    parsing_str);
6577 			if (i < hdr->num_dumped_cond_regs && is_mem)
6578 				results_offset +=
6579 				    sprintf(qed_get_buf_ptr(results_buf,
6580 							    results_offset),
6581 					    "[%d]", hdr->mem_entry_id +
6582 					    reg_hdr->start_entry);
6583 			results_offset +=
6584 			    sprintf(qed_get_buf_ptr(results_buf,
6585 						    results_offset), "=");
6586 			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6587 				results_offset +=
6588 				    sprintf(qed_get_buf_ptr(results_buf,
6589 							    results_offset),
6590 					    "0x%x", *dump_buf);
6591 				if (j < reg_hdr->size - 1)
6592 					results_offset +=
6593 					    sprintf(qed_get_buf_ptr
6594 						    (results_buf,
6595 						     results_offset), ",");
6596 			}
6597 		}
6598 
6599 		results_offset +=
6600 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6601 	}
6602 
6603 	/* Check if end of dump buffer was exceeded */
6604 	if (dump_buf > dump_buf_end)
6605 		return 0;
6606 
6607 	return results_offset;
6608 }
6609 
6610 /* Parses an idle check dump buffer.
6611  * If result_buf is not NULL, the idle check results are printed to it.
6612  * In any case, the required results buffer size is assigned to
6613  * parsed_results_bytes.
6614  * The parsing status is returned.
6615  */
6616 static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
6617 					       u32 *dump_buf,
6618 					       u32 num_dumped_dwords,
6619 					       char *results_buf,
6620 					       u32 *parsed_results_bytes,
6621 					       u32 *num_errors,
6622 					       u32 *num_warnings)
6623 {
6624 	u32 num_section_params = 0, num_rules, num_rules_not_dumped;
6625 	const char *section_name, *param_name, *param_str_val;
6626 	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6627 
6628 	/* Offset in results_buf in bytes */
6629 	u32 results_offset = 0;
6630 
6631 	*parsed_results_bytes = 0;
6632 	*num_errors = 0;
6633 	*num_warnings = 0;
6634 
6635 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6636 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6637 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6638 
6639 	/* Read global_params section */
6640 	dump_buf += qed_read_section_hdr(dump_buf,
6641 					 &section_name, &num_section_params);
6642 	if (strcmp(section_name, "global_params"))
6643 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6644 
6645 	/* Print global params */
6646 	dump_buf += qed_print_section_params(dump_buf,
6647 					     num_section_params,
6648 					     results_buf, &results_offset);
6649 
6650 	/* Read idle_chk section
6651 	 * There may be 1 or 2 idle_chk section parameters:
6652 	 * - 1st is "num_rules"
6653 	 * - 2nd is "num_rules_not_dumped" (optional)
6654 	 */
6655 
6656 	dump_buf += qed_read_section_hdr(dump_buf,
6657 					 &section_name, &num_section_params);
6658 	if (strcmp(section_name, "idle_chk") ||
6659 	    (num_section_params != 2 && num_section_params != 1))
6660 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6661 	dump_buf += qed_read_param(dump_buf,
6662 				   &param_name, &param_str_val, &num_rules);
6663 	if (strcmp(param_name, "num_rules"))
6664 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6665 	if (num_section_params > 1) {
6666 		dump_buf += qed_read_param(dump_buf,
6667 					   &param_name,
6668 					   &param_str_val,
6669 					   &num_rules_not_dumped);
6670 		if (strcmp(param_name, "num_rules_not_dumped"))
6671 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6672 	} else {
6673 		num_rules_not_dumped = 0;
6674 	}
6675 
6676 	if (num_rules) {
6677 		u32 rules_print_size;
6678 
6679 		/* Print FW output */
6680 		results_offset +=
6681 		    sprintf(qed_get_buf_ptr(results_buf,
6682 					    results_offset),
6683 			    "FW_IDLE_CHECK:\n");
6684 		rules_print_size =
6685 			qed_parse_idle_chk_dump_rules(p_hwfn,
6686 						      dump_buf,
6687 						      dump_buf_end,
6688 						      num_rules,
6689 						      true,
6690 						      results_buf ?
6691 						      results_buf +
6692 						      results_offset :
6693 						      NULL,
6694 						      num_errors,
6695 						      num_warnings);
6696 		results_offset += rules_print_size;
6697 		if (!rules_print_size)
6698 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6699 
6700 		/* Print LSI output */
6701 		results_offset +=
6702 		    sprintf(qed_get_buf_ptr(results_buf,
6703 					    results_offset),
6704 			    "\nLSI_IDLE_CHECK:\n");
6705 		rules_print_size =
6706 			qed_parse_idle_chk_dump_rules(p_hwfn,
6707 						      dump_buf,
6708 						      dump_buf_end,
6709 						      num_rules,
6710 						      false,
6711 						      results_buf ?
6712 						      results_buf +
6713 						      results_offset :
6714 						      NULL,
6715 						      num_errors,
6716 						      num_warnings);
6717 		results_offset += rules_print_size;
6718 		if (!rules_print_size)
6719 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6720 	}
6721 
6722 	/* Print errors/warnings count */
6723 	if (*num_errors)
6724 		results_offset +=
6725 		    sprintf(qed_get_buf_ptr(results_buf,
6726 					    results_offset),
6727 			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6728 			    *num_errors, *num_warnings);
6729 	else if (*num_warnings)
6730 		results_offset +=
6731 		    sprintf(qed_get_buf_ptr(results_buf,
6732 					    results_offset),
6733 			    "\nIdle Check completed successfully (with %d warnings)\n",
6734 			    *num_warnings);
6735 	else
6736 		results_offset +=
6737 		    sprintf(qed_get_buf_ptr(results_buf,
6738 					    results_offset),
6739 			    "\nIdle Check completed successfully\n");
6740 
6741 	if (num_rules_not_dumped)
6742 		results_offset +=
6743 		    sprintf(qed_get_buf_ptr(results_buf,
6744 					    results_offset),
6745 			    "\nIdle Check Partially dumped : num_rules_not_dumped = %d\n",
6746 			    num_rules_not_dumped);
6747 
6748 	/* Add 1 for string NULL termination */
6749 	*parsed_results_bytes = results_offset + 1;
6750 
6751 	return DBG_STATUS_OK;
6752 }
6753 
6754 /* Allocates and fills MCP Trace meta data based on the specified meta data
6755  * dump buffer.
6756  * Returns debug status code.
6757  */
6758 static enum dbg_status
6759 qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
6760 			      const u32 *meta_buf)
6761 {
6762 	struct dbg_tools_user_data *dev_user_data;
6763 	u32 offset = 0, signature, i;
6764 	struct mcp_trace_meta *meta;
6765 	u8 *meta_buf_bytes;
6766 
6767 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6768 	meta = &dev_user_data->mcp_trace_meta;
6769 	meta_buf_bytes = (u8 *)meta_buf;
6770 
6771 	/* Free the previous meta before loading a new one. */
6772 	if (meta->is_allocated)
6773 		qed_mcp_trace_free_meta_data(p_hwfn);
6774 
6775 	memset(meta, 0, sizeof(*meta));
6776 
6777 	/* Read first signature */
6778 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6779 	if (signature != NVM_MAGIC_VALUE)
6780 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6781 
6782 	/* Read no. of modules and allocate memory for their pointers */
6783 	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6784 	meta->modules = kcalloc(meta->modules_num, sizeof(char *),
6785 				GFP_KERNEL);
6786 	if (!meta->modules)
6787 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6788 
6789 	/* Allocate and read all module strings */
6790 	for (i = 0; i < meta->modules_num; i++) {
6791 		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6792 
6793 		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6794 		if (!(*(meta->modules + i))) {
6795 			/* Update number of modules to be released */
6796 			meta->modules_num = i ? i - 1 : 0;
6797 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6798 		}
6799 
6800 		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6801 				      *(meta->modules + i));
6802 		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6803 			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6804 	}
6805 
6806 	/* Read second signature */
6807 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6808 	if (signature != NVM_MAGIC_VALUE)
6809 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6810 
6811 	/* Read number of formats and allocate memory for all formats */
6812 	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6813 	meta->formats = kcalloc(meta->formats_num,
6814 				sizeof(struct mcp_trace_format),
6815 				GFP_KERNEL);
6816 	if (!meta->formats)
6817 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6818 
6819 	/* Allocate and read all strings */
6820 	for (i = 0; i < meta->formats_num; i++) {
6821 		struct mcp_trace_format *format_ptr = &meta->formats[i];
6822 		u8 format_len;
6823 
6824 		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6825 							   &offset);
6826 		format_len = GET_MFW_FIELD(format_ptr->data,
6827 					   MCP_TRACE_FORMAT_LEN);
6828 		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6829 		if (!format_ptr->format_str) {
6830 			/* Update number of modules to be released */
6831 			meta->formats_num = i ? i - 1 : 0;
6832 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6833 		}
6834 
6835 		qed_read_str_from_buf(meta_buf_bytes,
6836 				      &offset,
6837 				      format_len, format_ptr->format_str);
6838 	}
6839 
6840 	meta->is_allocated = true;
6841 	return DBG_STATUS_OK;
6842 }
6843 
6844 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6845  * are printed to it. The parsing status is returned.
6846  * Arguments:
6847  * trace_buf - MCP trace cyclic buffer
6848  * trace_buf_size - MCP trace cyclic buffer size in bytes
6849  * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6850  *		 buffer.
6851  * data_size - size in bytes of data to parse.
6852  * parsed_buf - destination buffer for parsed data.
6853  * parsed_results_bytes - size of parsed data in bytes.
6854  */
6855 static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
6856 					       u8 *trace_buf,
6857 					       u32 trace_buf_size,
6858 					       u32 data_offset,
6859 					       u32 data_size,
6860 					       char *parsed_buf,
6861 					       u32 *parsed_results_bytes)
6862 {
6863 	struct dbg_tools_user_data *dev_user_data;
6864 	struct mcp_trace_meta *meta;
6865 	u32 param_mask, param_shift;
6866 	enum dbg_status status;
6867 
6868 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6869 	meta = &dev_user_data->mcp_trace_meta;
6870 	*parsed_results_bytes = 0;
6871 
6872 	if (!meta->is_allocated)
6873 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6874 
6875 	status = DBG_STATUS_OK;
6876 
6877 	while (data_size) {
6878 		struct mcp_trace_format *format_ptr;
6879 		u8 format_level, format_module;
6880 		u32 params[3] = { 0, 0, 0 };
6881 		u32 header, format_idx, i;
6882 
6883 		if (data_size < MFW_TRACE_ENTRY_SIZE)
6884 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6885 
6886 		header = qed_read_from_cyclic_buf(trace_buf,
6887 						  &data_offset,
6888 						  trace_buf_size,
6889 						  MFW_TRACE_ENTRY_SIZE);
6890 		data_size -= MFW_TRACE_ENTRY_SIZE;
6891 		format_idx = header & MFW_TRACE_EVENTID_MASK;
6892 
6893 		/* Skip message if its index doesn't exist in the meta data */
6894 		if (format_idx >= meta->formats_num) {
6895 			u8 format_size = (u8)GET_MFW_FIELD(header,
6896 							   MFW_TRACE_PRM_SIZE);
6897 
6898 			if (data_size < format_size)
6899 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6900 
6901 			data_offset = qed_cyclic_add(data_offset,
6902 						     format_size,
6903 						     trace_buf_size);
6904 			data_size -= format_size;
6905 			continue;
6906 		}
6907 
6908 		format_ptr = &meta->formats[format_idx];
6909 
6910 		for (i = 0,
6911 		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6912 		     MCP_TRACE_FORMAT_P1_SIZE_OFFSET;
6913 		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6914 		     i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6915 		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6916 			/* Extract param size (0..3) */
6917 			u8 param_size = (u8)((format_ptr->data & param_mask) >>
6918 					     param_shift);
6919 
6920 			/* If the param size is zero, there are no other
6921 			 * parameters.
6922 			 */
6923 			if (!param_size)
6924 				break;
6925 
6926 			/* Size is encoded using 2 bits, where 3 is used to
6927 			 * encode 4.
6928 			 */
6929 			if (param_size == 3)
6930 				param_size = 4;
6931 
6932 			if (data_size < param_size)
6933 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6934 
6935 			params[i] = qed_read_from_cyclic_buf(trace_buf,
6936 							     &data_offset,
6937 							     trace_buf_size,
6938 							     param_size);
6939 			data_size -= param_size;
6940 		}
6941 
6942 		format_level = (u8)GET_MFW_FIELD(format_ptr->data,
6943 						 MCP_TRACE_FORMAT_LEVEL);
6944 		format_module = (u8)GET_MFW_FIELD(format_ptr->data,
6945 						  MCP_TRACE_FORMAT_MODULE);
6946 		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6947 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6948 
6949 		/* Print current message to results buffer */
6950 		*parsed_results_bytes +=
6951 			sprintf(qed_get_buf_ptr(parsed_buf,
6952 						*parsed_results_bytes),
6953 				"%s %-8s: ",
6954 				s_mcp_trace_level_str[format_level],
6955 				meta->modules[format_module]);
6956 		*parsed_results_bytes +=
6957 		    sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
6958 			    format_ptr->format_str,
6959 			    params[0], params[1], params[2]);
6960 	}
6961 
6962 	/* Add string NULL terminator */
6963 	(*parsed_results_bytes)++;
6964 
6965 	return status;
6966 }
6967 
6968 /* Parses an MCP Trace dump buffer.
6969  * If result_buf is not NULL, the MCP Trace results are printed to it.
6970  * In any case, the required results buffer size is assigned to
6971  * parsed_results_bytes.
6972  * The parsing status is returned.
6973  */
6974 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6975 						u32 *dump_buf,
6976 						char *results_buf,
6977 						u32 *parsed_results_bytes,
6978 						bool free_meta_data)
6979 {
6980 	const char *section_name, *param_name, *param_str_val;
6981 	u32 data_size, trace_data_dwords, trace_meta_dwords;
6982 	u32 offset, results_offset, results_buf_bytes;
6983 	u32 param_num_val, num_section_params;
6984 	struct mcp_trace *trace;
6985 	enum dbg_status status;
6986 	const u32 *meta_buf;
6987 	u8 *trace_buf;
6988 
6989 	*parsed_results_bytes = 0;
6990 
6991 	/* Read global_params section */
6992 	dump_buf += qed_read_section_hdr(dump_buf,
6993 					 &section_name, &num_section_params);
6994 	if (strcmp(section_name, "global_params"))
6995 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6996 
6997 	/* Print global params */
6998 	dump_buf += qed_print_section_params(dump_buf,
6999 					     num_section_params,
7000 					     results_buf, &results_offset);
7001 
7002 	/* Read trace_data section */
7003 	dump_buf += qed_read_section_hdr(dump_buf,
7004 					 &section_name, &num_section_params);
7005 	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
7006 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7007 	dump_buf += qed_read_param(dump_buf,
7008 				   &param_name, &param_str_val, &param_num_val);
7009 	if (strcmp(param_name, "size"))
7010 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7011 	trace_data_dwords = param_num_val;
7012 
7013 	/* Prepare trace info */
7014 	trace = (struct mcp_trace *)dump_buf;
7015 	if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
7016 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7017 
7018 	trace_buf = (u8 *)dump_buf + sizeof(*trace);
7019 	offset = trace->trace_oldest;
7020 	data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
7021 	dump_buf += trace_data_dwords;
7022 
7023 	/* Read meta_data section */
7024 	dump_buf += qed_read_section_hdr(dump_buf,
7025 					 &section_name, &num_section_params);
7026 	if (strcmp(section_name, "mcp_trace_meta"))
7027 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7028 	dump_buf += qed_read_param(dump_buf,
7029 				   &param_name, &param_str_val, &param_num_val);
7030 	if (strcmp(param_name, "size"))
7031 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7032 	trace_meta_dwords = param_num_val;
7033 
7034 	/* Choose meta data buffer */
7035 	if (!trace_meta_dwords) {
7036 		/* Dump doesn't include meta data */
7037 		struct dbg_tools_user_data *dev_user_data =
7038 			qed_dbg_get_user_data(p_hwfn);
7039 
7040 		if (!dev_user_data->mcp_trace_user_meta_buf)
7041 			return DBG_STATUS_MCP_TRACE_NO_META;
7042 
7043 		meta_buf = dev_user_data->mcp_trace_user_meta_buf;
7044 	} else {
7045 		/* Dump includes meta data */
7046 		meta_buf = dump_buf;
7047 	}
7048 
7049 	/* Allocate meta data memory */
7050 	status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
7051 	if (status != DBG_STATUS_OK)
7052 		return status;
7053 
7054 	status = qed_parse_mcp_trace_buf(p_hwfn,
7055 					 trace_buf,
7056 					 trace->size,
7057 					 offset,
7058 					 data_size,
7059 					 results_buf ?
7060 					 results_buf + results_offset :
7061 					 NULL,
7062 					 &results_buf_bytes);
7063 	if (status != DBG_STATUS_OK)
7064 		return status;
7065 
7066 	if (free_meta_data)
7067 		qed_mcp_trace_free_meta_data(p_hwfn);
7068 
7069 	*parsed_results_bytes = results_offset + results_buf_bytes;
7070 
7071 	return DBG_STATUS_OK;
7072 }
7073 
7074 /* Parses a Reg FIFO dump buffer.
7075  * If result_buf is not NULL, the Reg FIFO results are printed to it.
7076  * In any case, the required results buffer size is assigned to
7077  * parsed_results_bytes.
7078  * The parsing status is returned.
7079  */
7080 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
7081 					       char *results_buf,
7082 					       u32 *parsed_results_bytes)
7083 {
7084 	const char *section_name, *param_name, *param_str_val;
7085 	u32 param_num_val, num_section_params, num_elements;
7086 	struct reg_fifo_element *elements;
7087 	u8 i, j, err_code, vf_val;
7088 	u32 results_offset = 0;
7089 	char vf_str[4];
7090 
7091 	/* Read global_params section */
7092 	dump_buf += qed_read_section_hdr(dump_buf,
7093 					 &section_name, &num_section_params);
7094 	if (strcmp(section_name, "global_params"))
7095 		return DBG_STATUS_REG_FIFO_BAD_DATA;
7096 
7097 	/* Print global params */
7098 	dump_buf += qed_print_section_params(dump_buf,
7099 					     num_section_params,
7100 					     results_buf, &results_offset);
7101 
7102 	/* Read reg_fifo_data section */
7103 	dump_buf += qed_read_section_hdr(dump_buf,
7104 					 &section_name, &num_section_params);
7105 	if (strcmp(section_name, "reg_fifo_data"))
7106 		return DBG_STATUS_REG_FIFO_BAD_DATA;
7107 	dump_buf += qed_read_param(dump_buf,
7108 				   &param_name, &param_str_val, &param_num_val);
7109 	if (strcmp(param_name, "size"))
7110 		return DBG_STATUS_REG_FIFO_BAD_DATA;
7111 	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
7112 		return DBG_STATUS_REG_FIFO_BAD_DATA;
7113 	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
7114 	elements = (struct reg_fifo_element *)dump_buf;
7115 
7116 	/* Decode elements */
7117 	for (i = 0; i < num_elements; i++) {
7118 		const char *err_msg = NULL;
7119 
7120 		/* Discover if element belongs to a VF or a PF */
7121 		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
7122 		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
7123 			sprintf(vf_str, "%s", "N/A");
7124 		else
7125 			sprintf(vf_str, "%d", vf_val);
7126 
7127 		/* Find error message */
7128 		err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR);
7129 		for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++)
7130 			if (err_code == s_reg_fifo_errors[j].err_code)
7131 				err_msg = s_reg_fifo_errors[j].err_msg;
7132 
7133 		/* Add parsed element to parsed buffer */
7134 		results_offset +=
7135 		    sprintf(qed_get_buf_ptr(results_buf,
7136 					    results_offset),
7137 			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n",
7138 			    elements[i].data,
7139 			    (u32)GET_FIELD(elements[i].data,
7140 					   REG_FIFO_ELEMENT_ADDRESS) *
7141 			    REG_FIFO_ELEMENT_ADDR_FACTOR,
7142 			    s_access_strs[GET_FIELD(elements[i].data,
7143 						    REG_FIFO_ELEMENT_ACCESS)],
7144 			    (u32)GET_FIELD(elements[i].data,
7145 					   REG_FIFO_ELEMENT_PF),
7146 			    vf_str,
7147 			    (u32)GET_FIELD(elements[i].data,
7148 					   REG_FIFO_ELEMENT_PORT),
7149 			    s_privilege_strs[GET_FIELD(elements[i].data,
7150 						REG_FIFO_ELEMENT_PRIVILEGE)],
7151 			    s_protection_strs[GET_FIELD(elements[i].data,
7152 						REG_FIFO_ELEMENT_PROTECTION)],
7153 			    s_master_strs[GET_FIELD(elements[i].data,
7154 						    REG_FIFO_ELEMENT_MASTER)],
7155 			    err_msg ? err_msg : "unknown error code");
7156 	}
7157 
7158 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7159 						  results_offset),
7160 				  "fifo contained %d elements", num_elements);
7161 
7162 	/* Add 1 for string NULL termination */
7163 	*parsed_results_bytes = results_offset + 1;
7164 
7165 	return DBG_STATUS_OK;
7166 }
7167 
7168 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
7169 						  *element, char
7170 						  *results_buf,
7171 						  u32 *results_offset)
7172 {
7173 	const struct igu_fifo_addr_data *found_addr = NULL;
7174 	u8 source, err_type, i, is_cleanup;
7175 	char parsed_addr_data[32];
7176 	char parsed_wr_data[256];
7177 	u32 wr_data, prod_cons;
7178 	bool is_wr_cmd, is_pf;
7179 	u16 cmd_addr;
7180 	u64 dword12;
7181 
7182 	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
7183 	 * FIFO element.
7184 	 */
7185 	dword12 = ((u64)element->dword2 << 32) | element->dword1;
7186 	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
7187 	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
7188 	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
7189 	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
7190 	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
7191 
7192 	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
7193 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7194 	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
7195 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7196 
7197 	/* Find address data */
7198 	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
7199 		const struct igu_fifo_addr_data *curr_addr =
7200 			&s_igu_fifo_addr_data[i];
7201 
7202 		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
7203 		    curr_addr->end_addr)
7204 			found_addr = curr_addr;
7205 	}
7206 
7207 	if (!found_addr)
7208 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7209 
7210 	/* Prepare parsed address data */
7211 	switch (found_addr->type) {
7212 	case IGU_ADDR_TYPE_MSIX_MEM:
7213 		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
7214 		break;
7215 	case IGU_ADDR_TYPE_WRITE_INT_ACK:
7216 	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
7217 		sprintf(parsed_addr_data,
7218 			" SB = 0x%x", cmd_addr - found_addr->start_addr);
7219 		break;
7220 	default:
7221 		parsed_addr_data[0] = '\0';
7222 	}
7223 
7224 	if (!is_wr_cmd) {
7225 		parsed_wr_data[0] = '\0';
7226 		goto out;
7227 	}
7228 
7229 	/* Prepare parsed write data */
7230 	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
7231 	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
7232 	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
7233 
7234 	if (source == IGU_SRC_ATTN) {
7235 		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
7236 	} else {
7237 		if (is_cleanup) {
7238 			u8 cleanup_val, cleanup_type;
7239 
7240 			cleanup_val =
7241 				GET_FIELD(wr_data,
7242 					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
7243 			cleanup_type =
7244 			    GET_FIELD(wr_data,
7245 				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
7246 
7247 			sprintf(parsed_wr_data,
7248 				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
7249 				cleanup_val ? "set" : "clear",
7250 				cleanup_type);
7251 		} else {
7252 			u8 update_flag, en_dis_int_for_sb, segment;
7253 			u8 timer_mask;
7254 
7255 			update_flag = GET_FIELD(wr_data,
7256 						IGU_FIFO_WR_DATA_UPDATE_FLAG);
7257 			en_dis_int_for_sb =
7258 				GET_FIELD(wr_data,
7259 					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
7260 			segment = GET_FIELD(wr_data,
7261 					    IGU_FIFO_WR_DATA_SEGMENT);
7262 			timer_mask = GET_FIELD(wr_data,
7263 					       IGU_FIFO_WR_DATA_TIMER_MASK);
7264 
7265 			sprintf(parsed_wr_data,
7266 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
7267 				prod_cons,
7268 				update_flag ? "update" : "nop",
7269 				en_dis_int_for_sb ?
7270 				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
7271 				"enable",
7272 				segment ? "attn" : "regular",
7273 				timer_mask);
7274 		}
7275 	}
7276 out:
7277 	/* Add parsed element to parsed buffer */
7278 	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
7279 						   *results_offset),
7280 				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7281 				   element->dword2, element->dword1,
7282 				   element->dword0,
7283 				   is_pf ? "pf" : "vf",
7284 				   GET_FIELD(element->dword0,
7285 					     IGU_FIFO_ELEMENT_DWORD0_FID),
7286 				   s_igu_fifo_source_strs[source],
7287 				   is_wr_cmd ? "wr" : "rd",
7288 				   cmd_addr,
7289 				   (!is_pf && found_addr->vf_desc)
7290 				   ? found_addr->vf_desc
7291 				   : found_addr->desc,
7292 				   parsed_addr_data,
7293 				   parsed_wr_data,
7294 				   s_igu_fifo_error_strs[err_type]);
7295 
7296 	return DBG_STATUS_OK;
7297 }
7298 
7299 /* Parses an IGU FIFO dump buffer.
7300  * If result_buf is not NULL, the IGU FIFO results are printed to it.
7301  * In any case, the required results buffer size is assigned to
7302  * parsed_results_bytes.
7303  * The parsing status is returned.
7304  */
7305 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7306 					       char *results_buf,
7307 					       u32 *parsed_results_bytes)
7308 {
7309 	const char *section_name, *param_name, *param_str_val;
7310 	u32 param_num_val, num_section_params, num_elements;
7311 	struct igu_fifo_element *elements;
7312 	enum dbg_status status;
7313 	u32 results_offset = 0;
7314 	u8 i;
7315 
7316 	/* Read global_params section */
7317 	dump_buf += qed_read_section_hdr(dump_buf,
7318 					 &section_name, &num_section_params);
7319 	if (strcmp(section_name, "global_params"))
7320 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7321 
7322 	/* Print global params */
7323 	dump_buf += qed_print_section_params(dump_buf,
7324 					     num_section_params,
7325 					     results_buf, &results_offset);
7326 
7327 	/* Read igu_fifo_data section */
7328 	dump_buf += qed_read_section_hdr(dump_buf,
7329 					 &section_name, &num_section_params);
7330 	if (strcmp(section_name, "igu_fifo_data"))
7331 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7332 	dump_buf += qed_read_param(dump_buf,
7333 				   &param_name, &param_str_val, &param_num_val);
7334 	if (strcmp(param_name, "size"))
7335 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7336 	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7337 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7338 	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7339 	elements = (struct igu_fifo_element *)dump_buf;
7340 
7341 	/* Decode elements */
7342 	for (i = 0; i < num_elements; i++) {
7343 		status = qed_parse_igu_fifo_element(&elements[i],
7344 						    results_buf,
7345 						    &results_offset);
7346 		if (status != DBG_STATUS_OK)
7347 			return status;
7348 	}
7349 
7350 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7351 						  results_offset),
7352 				  "fifo contained %d elements", num_elements);
7353 
7354 	/* Add 1 for string NULL termination */
7355 	*parsed_results_bytes = results_offset + 1;
7356 
7357 	return DBG_STATUS_OK;
7358 }
7359 
7360 static enum dbg_status
7361 qed_parse_protection_override_dump(u32 *dump_buf,
7362 				   char *results_buf,
7363 				   u32 *parsed_results_bytes)
7364 {
7365 	const char *section_name, *param_name, *param_str_val;
7366 	u32 param_num_val, num_section_params, num_elements;
7367 	struct protection_override_element *elements;
7368 	u32 results_offset = 0;
7369 	u8 i;
7370 
7371 	/* Read global_params section */
7372 	dump_buf += qed_read_section_hdr(dump_buf,
7373 					 &section_name, &num_section_params);
7374 	if (strcmp(section_name, "global_params"))
7375 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7376 
7377 	/* Print global params */
7378 	dump_buf += qed_print_section_params(dump_buf,
7379 					     num_section_params,
7380 					     results_buf, &results_offset);
7381 
7382 	/* Read protection_override_data section */
7383 	dump_buf += qed_read_section_hdr(dump_buf,
7384 					 &section_name, &num_section_params);
7385 	if (strcmp(section_name, "protection_override_data"))
7386 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7387 	dump_buf += qed_read_param(dump_buf,
7388 				   &param_name, &param_str_val, &param_num_val);
7389 	if (strcmp(param_name, "size"))
7390 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7391 	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7392 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7393 	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7394 	elements = (struct protection_override_element *)dump_buf;
7395 
7396 	/* Decode elements */
7397 	for (i = 0; i < num_elements; i++) {
7398 		u32 address = GET_FIELD(elements[i].data,
7399 					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7400 			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7401 
7402 		results_offset +=
7403 		    sprintf(qed_get_buf_ptr(results_buf,
7404 					    results_offset),
7405 			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7406 			    i, address,
7407 			    (u32)GET_FIELD(elements[i].data,
7408 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7409 			    (u32)GET_FIELD(elements[i].data,
7410 				      PROTECTION_OVERRIDE_ELEMENT_READ),
7411 			    (u32)GET_FIELD(elements[i].data,
7412 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
7413 			    s_protection_strs[GET_FIELD(elements[i].data,
7414 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7415 			    s_protection_strs[GET_FIELD(elements[i].data,
7416 				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7417 	}
7418 
7419 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7420 						  results_offset),
7421 				  "protection override contained %d elements",
7422 				  num_elements);
7423 
7424 	/* Add 1 for string NULL termination */
7425 	*parsed_results_bytes = results_offset + 1;
7426 
7427 	return DBG_STATUS_OK;
7428 }
7429 
7430 /* Parses a FW Asserts dump buffer.
7431  * If result_buf is not NULL, the FW Asserts results are printed to it.
7432  * In any case, the required results buffer size is assigned to
7433  * parsed_results_bytes.
7434  * The parsing status is returned.
7435  */
7436 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7437 						 char *results_buf,
7438 						 u32 *parsed_results_bytes)
7439 {
7440 	u32 num_section_params, param_num_val, i, results_offset = 0;
7441 	const char *param_name, *param_str_val, *section_name;
7442 	bool last_section_found = false;
7443 
7444 	*parsed_results_bytes = 0;
7445 
7446 	/* Read global_params section */
7447 	dump_buf += qed_read_section_hdr(dump_buf,
7448 					 &section_name, &num_section_params);
7449 	if (strcmp(section_name, "global_params"))
7450 		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7451 
7452 	/* Print global params */
7453 	dump_buf += qed_print_section_params(dump_buf,
7454 					     num_section_params,
7455 					     results_buf, &results_offset);
7456 
7457 	while (!last_section_found) {
7458 		dump_buf += qed_read_section_hdr(dump_buf,
7459 						 &section_name,
7460 						 &num_section_params);
7461 		if (!strcmp(section_name, "fw_asserts")) {
7462 			/* Extract params */
7463 			const char *storm_letter = NULL;
7464 			u32 storm_dump_size = 0;
7465 
7466 			for (i = 0; i < num_section_params; i++) {
7467 				dump_buf += qed_read_param(dump_buf,
7468 							   &param_name,
7469 							   &param_str_val,
7470 							   &param_num_val);
7471 				if (!strcmp(param_name, "storm"))
7472 					storm_letter = param_str_val;
7473 				else if (!strcmp(param_name, "size"))
7474 					storm_dump_size = param_num_val;
7475 				else
7476 					return
7477 					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7478 			}
7479 
7480 			if (!storm_letter || !storm_dump_size)
7481 				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7482 
7483 			/* Print data */
7484 			results_offset +=
7485 			    sprintf(qed_get_buf_ptr(results_buf,
7486 						    results_offset),
7487 				    "\n%sSTORM_ASSERT: size=%d\n",
7488 				    storm_letter, storm_dump_size);
7489 			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7490 				results_offset +=
7491 				    sprintf(qed_get_buf_ptr(results_buf,
7492 							    results_offset),
7493 					    "%08x\n", *dump_buf);
7494 		} else if (!strcmp(section_name, "last")) {
7495 			last_section_found = true;
7496 		} else {
7497 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7498 		}
7499 	}
7500 
7501 	/* Add 1 for string NULL termination */
7502 	*parsed_results_bytes = results_offset + 1;
7503 
7504 	return DBG_STATUS_OK;
7505 }
7506 
7507 /***************************** Public Functions *******************************/
7508 
7509 enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
7510 					 const u8 * const bin_ptr)
7511 {
7512 	struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
7513 	u8 buf_id;
7514 
7515 	/* Convert binary data to debug arrays */
7516 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
7517 		qed_set_dbg_bin_buf(p_hwfn,
7518 				    (enum bin_dbg_buffer_type)buf_id,
7519 				    (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
7520 				    buf_hdrs[buf_id].length);
7521 
7522 	return DBG_STATUS_OK;
7523 }
7524 
7525 enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
7526 					void **user_data_ptr)
7527 {
7528 	*user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data),
7529 				 GFP_KERNEL);
7530 	if (!(*user_data_ptr))
7531 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7532 
7533 	return DBG_STATUS_OK;
7534 }
7535 
7536 const char *qed_dbg_get_status_str(enum dbg_status status)
7537 {
7538 	return (status <
7539 		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7540 }
7541 
7542 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7543 						  u32 *dump_buf,
7544 						  u32 num_dumped_dwords,
7545 						  u32 *results_buf_size)
7546 {
7547 	u32 num_errors, num_warnings;
7548 
7549 	return qed_parse_idle_chk_dump(p_hwfn,
7550 				       dump_buf,
7551 				       num_dumped_dwords,
7552 				       NULL,
7553 				       results_buf_size,
7554 				       &num_errors, &num_warnings);
7555 }
7556 
7557 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7558 					   u32 *dump_buf,
7559 					   u32 num_dumped_dwords,
7560 					   char *results_buf,
7561 					   u32 *num_errors,
7562 					   u32 *num_warnings)
7563 {
7564 	u32 parsed_buf_size;
7565 
7566 	return qed_parse_idle_chk_dump(p_hwfn,
7567 				       dump_buf,
7568 				       num_dumped_dwords,
7569 				       results_buf,
7570 				       &parsed_buf_size,
7571 				       num_errors, num_warnings);
7572 }
7573 
7574 void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
7575 				     const u32 *meta_buf)
7576 {
7577 	struct dbg_tools_user_data *dev_user_data =
7578 		qed_dbg_get_user_data(p_hwfn);
7579 
7580 	dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7581 }
7582 
7583 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7584 						   u32 *dump_buf,
7585 						   u32 num_dumped_dwords,
7586 						   u32 *results_buf_size)
7587 {
7588 	return qed_parse_mcp_trace_dump(p_hwfn,
7589 					dump_buf, NULL, results_buf_size, true);
7590 }
7591 
7592 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7593 					    u32 *dump_buf,
7594 					    u32 num_dumped_dwords,
7595 					    char *results_buf)
7596 {
7597 	u32 parsed_buf_size;
7598 
7599 	/* Doesn't do anything, needed for compile time asserts */
7600 	qed_user_static_asserts();
7601 
7602 	return qed_parse_mcp_trace_dump(p_hwfn,
7603 					dump_buf,
7604 					results_buf, &parsed_buf_size, true);
7605 }
7606 
7607 enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
7608 						 u32 *dump_buf,
7609 						 char *results_buf)
7610 {
7611 	u32 parsed_buf_size;
7612 
7613 	return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7614 					&parsed_buf_size, false);
7615 }
7616 
7617 enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
7618 					 u8 *dump_buf,
7619 					 u32 num_dumped_bytes,
7620 					 char *results_buf)
7621 {
7622 	u32 parsed_results_bytes;
7623 
7624 	return qed_parse_mcp_trace_buf(p_hwfn,
7625 				       dump_buf,
7626 				       num_dumped_bytes,
7627 				       0,
7628 				       num_dumped_bytes,
7629 				       results_buf, &parsed_results_bytes);
7630 }
7631 
7632 /* Frees the specified MCP Trace meta data */
7633 void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
7634 {
7635 	struct dbg_tools_user_data *dev_user_data;
7636 	struct mcp_trace_meta *meta;
7637 	u32 i;
7638 
7639 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
7640 	meta = &dev_user_data->mcp_trace_meta;
7641 	if (!meta->is_allocated)
7642 		return;
7643 
7644 	/* Release modules */
7645 	if (meta->modules) {
7646 		for (i = 0; i < meta->modules_num; i++)
7647 			kfree(meta->modules[i]);
7648 		kfree(meta->modules);
7649 	}
7650 
7651 	/* Release formats */
7652 	if (meta->formats) {
7653 		for (i = 0; i < meta->formats_num; i++)
7654 			kfree(meta->formats[i].format_str);
7655 		kfree(meta->formats);
7656 	}
7657 
7658 	meta->is_allocated = false;
7659 }
7660 
7661 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7662 						  u32 *dump_buf,
7663 						  u32 num_dumped_dwords,
7664 						  u32 *results_buf_size)
7665 {
7666 	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7667 }
7668 
7669 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7670 					   u32 *dump_buf,
7671 					   u32 num_dumped_dwords,
7672 					   char *results_buf)
7673 {
7674 	u32 parsed_buf_size;
7675 
7676 	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7677 }
7678 
7679 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7680 						  u32 *dump_buf,
7681 						  u32 num_dumped_dwords,
7682 						  u32 *results_buf_size)
7683 {
7684 	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7685 }
7686 
7687 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7688 					   u32 *dump_buf,
7689 					   u32 num_dumped_dwords,
7690 					   char *results_buf)
7691 {
7692 	u32 parsed_buf_size;
7693 
7694 	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7695 }
7696 
7697 enum dbg_status
7698 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7699 					     u32 *dump_buf,
7700 					     u32 num_dumped_dwords,
7701 					     u32 *results_buf_size)
7702 {
7703 	return qed_parse_protection_override_dump(dump_buf,
7704 						  NULL, results_buf_size);
7705 }
7706 
7707 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7708 						      u32 *dump_buf,
7709 						      u32 num_dumped_dwords,
7710 						      char *results_buf)
7711 {
7712 	u32 parsed_buf_size;
7713 
7714 	return qed_parse_protection_override_dump(dump_buf,
7715 						  results_buf,
7716 						  &parsed_buf_size);
7717 }
7718 
7719 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7720 						    u32 *dump_buf,
7721 						    u32 num_dumped_dwords,
7722 						    u32 *results_buf_size)
7723 {
7724 	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7725 }
7726 
7727 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7728 					     u32 *dump_buf,
7729 					     u32 num_dumped_dwords,
7730 					     char *results_buf)
7731 {
7732 	u32 parsed_buf_size;
7733 
7734 	return qed_parse_fw_asserts_dump(dump_buf,
7735 					 results_buf, &parsed_buf_size);
7736 }
7737 
7738 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7739 				   struct dbg_attn_block_result *results)
7740 {
7741 	const u32 *block_attn_name_offsets;
7742 	const char *attn_name_base;
7743 	const char *block_name;
7744 	enum dbg_attn_type attn_type;
7745 	u8 num_regs, i, j;
7746 
7747 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7748 	attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7749 	block_name = qed_dbg_get_block_name(p_hwfn, results->block_id);
7750 	if (!block_name)
7751 		return DBG_STATUS_INVALID_ARGS;
7752 
7753 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7754 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7755 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7756 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7757 
7758 	block_attn_name_offsets =
7759 	    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr +
7760 	    results->names_offset;
7761 
7762 	attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr;
7763 
7764 	/* Go over registers with a non-zero attention status */
7765 	for (i = 0; i < num_regs; i++) {
7766 		struct dbg_attn_bit_mapping *bit_mapping;
7767 		struct dbg_attn_reg_result *reg_result;
7768 		u8 num_reg_attn, bit_idx = 0;
7769 
7770 		reg_result = &results->reg_results[i];
7771 		num_reg_attn = GET_FIELD(reg_result->data,
7772 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7773 		bit_mapping = (struct dbg_attn_bit_mapping *)
7774 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr +
7775 		    reg_result->block_attn_offset;
7776 
7777 		/* Go over attention status bits */
7778 		for (j = 0; j < num_reg_attn; j++) {
7779 			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7780 						     DBG_ATTN_BIT_MAPPING_VAL);
7781 			const char *attn_name, *attn_type_str, *masked_str;
7782 			u32 attn_name_offset;
7783 			u32 sts_addr;
7784 
7785 			/* Check if bit mask should be advanced (due to unused
7786 			 * bits).
7787 			 */
7788 			if (GET_FIELD(bit_mapping[j].data,
7789 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7790 				bit_idx += (u8)attn_idx_val;
7791 				continue;
7792 			}
7793 
7794 			/* Check current bit index */
7795 			if (reg_result->sts_val & BIT(bit_idx)) {
7796 				/* An attention bit with value=1 was found
7797 				 * Find attention name
7798 				 */
7799 				attn_name_offset =
7800 					block_attn_name_offsets[attn_idx_val];
7801 				attn_name = attn_name_base + attn_name_offset;
7802 				attn_type_str =
7803 					(attn_type ==
7804 					 ATTN_TYPE_INTERRUPT ? "Interrupt" :
7805 					 "Parity");
7806 				masked_str = reg_result->mask_val &
7807 					     BIT(bit_idx) ?
7808 					     " [masked]" : "";
7809 				sts_addr =
7810 				GET_FIELD(reg_result->data,
7811 					  DBG_ATTN_REG_RESULT_STS_ADDRESS);
7812 				DP_NOTICE(p_hwfn,
7813 					  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7814 					  block_name, attn_type_str, attn_name,
7815 					  sts_addr * 4, bit_idx, masked_str);
7816 			}
7817 
7818 			bit_idx++;
7819 		}
7820 	}
7821 
7822 	return DBG_STATUS_OK;
7823 }
7824 
7825 /* Wrapper for unifying the idle_chk and mcp_trace api */
7826 static enum dbg_status
7827 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7828 				   u32 *dump_buf,
7829 				   u32 num_dumped_dwords,
7830 				   char *results_buf)
7831 {
7832 	u32 num_errors, num_warnnings;
7833 
7834 	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7835 					  results_buf, &num_errors,
7836 					  &num_warnnings);
7837 }
7838 
7839 static DEFINE_MUTEX(qed_dbg_lock);
7840 
7841 #define MAX_PHY_RESULT_BUFFER 9000
7842 
7843 /******************************** Feature Meta data section ******************/
7844 
7845 #define GRC_NUM_STR_FUNCS 2
7846 #define IDLE_CHK_NUM_STR_FUNCS 1
7847 #define MCP_TRACE_NUM_STR_FUNCS 1
7848 #define REG_FIFO_NUM_STR_FUNCS 1
7849 #define IGU_FIFO_NUM_STR_FUNCS 1
7850 #define PROTECTION_OVERRIDE_NUM_STR_FUNCS 1
7851 #define FW_ASSERTS_NUM_STR_FUNCS 1
7852 #define ILT_NUM_STR_FUNCS 1
7853 #define PHY_NUM_STR_FUNCS 20
7854 
7855 /* Feature meta data lookup table */
7856 static struct {
7857 	char *name;
7858 	u32 num_funcs;
7859 	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7860 				    struct qed_ptt *p_ptt, u32 *size);
7861 	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7862 					struct qed_ptt *p_ptt, u32 *dump_buf,
7863 					u32 buf_size, u32 *dumped_dwords);
7864 	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7865 					 u32 *dump_buf, u32 num_dumped_dwords,
7866 					 char *results_buf);
7867 	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7868 					    u32 *dump_buf,
7869 					    u32 num_dumped_dwords,
7870 					    u32 *results_buf_size);
7871 	const struct qed_func_lookup *hsi_func_lookup;
7872 } qed_features_lookup[] = {
7873 	{
7874 	"grc", GRC_NUM_STR_FUNCS, qed_dbg_grc_get_dump_buf_size,
7875 		    qed_dbg_grc_dump, NULL, NULL, NULL}, {
7876 	"idle_chk", IDLE_CHK_NUM_STR_FUNCS,
7877 		    qed_dbg_idle_chk_get_dump_buf_size,
7878 		    qed_dbg_idle_chk_dump,
7879 		    qed_print_idle_chk_results_wrapper,
7880 		    qed_get_idle_chk_results_buf_size,
7881 		    NULL}, {
7882 	"mcp_trace", MCP_TRACE_NUM_STR_FUNCS,
7883 		    qed_dbg_mcp_trace_get_dump_buf_size,
7884 		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7885 		    qed_get_mcp_trace_results_buf_size,
7886 		    NULL}, {
7887 	"reg_fifo", REG_FIFO_NUM_STR_FUNCS,
7888 		    qed_dbg_reg_fifo_get_dump_buf_size,
7889 		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7890 		    qed_get_reg_fifo_results_buf_size,
7891 		    NULL}, {
7892 	"igu_fifo", IGU_FIFO_NUM_STR_FUNCS,
7893 		    qed_dbg_igu_fifo_get_dump_buf_size,
7894 		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7895 		    qed_get_igu_fifo_results_buf_size,
7896 		    NULL}, {
7897 	"protection_override", PROTECTION_OVERRIDE_NUM_STR_FUNCS,
7898 		    qed_dbg_protection_override_get_dump_buf_size,
7899 		    qed_dbg_protection_override_dump,
7900 		    qed_print_protection_override_results,
7901 		    qed_get_protection_override_results_buf_size,
7902 		    NULL}, {
7903 	"fw_asserts", FW_ASSERTS_NUM_STR_FUNCS,
7904 		    qed_dbg_fw_asserts_get_dump_buf_size,
7905 		    qed_dbg_fw_asserts_dump,
7906 		    qed_print_fw_asserts_results,
7907 		    qed_get_fw_asserts_results_buf_size,
7908 		    NULL}, {
7909 	"ilt", ILT_NUM_STR_FUNCS, qed_dbg_ilt_get_dump_buf_size,
7910 		    qed_dbg_ilt_dump, NULL, NULL, NULL},};
7911 
7912 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7913 {
7914 	u32 i, precision = 80;
7915 
7916 	if (!p_text_buf)
7917 		return;
7918 
7919 	pr_notice("\n%.*s", precision, p_text_buf);
7920 	for (i = precision; i < text_size; i += precision)
7921 		pr_cont("%.*s", precision, p_text_buf + i);
7922 	pr_cont("\n");
7923 }
7924 
7925 #define QED_RESULTS_BUF_MIN_SIZE 16
7926 /* Generic function for decoding debug feature info */
7927 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7928 				      enum qed_dbg_features feature_idx)
7929 {
7930 	struct qed_dbg_feature *feature =
7931 	    &p_hwfn->cdev->dbg_features[feature_idx];
7932 	u32 txt_size_bytes, null_char_pos, i;
7933 	u32 *dbuf, dwords;
7934 	enum dbg_status rc;
7935 	char *text_buf;
7936 
7937 	/* Check if feature supports formatting capability */
7938 	if (!qed_features_lookup[feature_idx].results_buf_size)
7939 		return DBG_STATUS_OK;
7940 
7941 	dbuf = (u32 *)feature->dump_buf;
7942 	dwords = feature->dumped_dwords;
7943 
7944 	/* Obtain size of formatted output */
7945 	rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn,
7946 							       dbuf,
7947 							       dwords,
7948 							       &txt_size_bytes);
7949 	if (rc != DBG_STATUS_OK)
7950 		return rc;
7951 
7952 	/* Make sure that the allocated size is a multiple of dword
7953 	 * (4 bytes).
7954 	 */
7955 	null_char_pos = txt_size_bytes - 1;
7956 	txt_size_bytes = (txt_size_bytes + 3) & ~0x3;
7957 
7958 	if (txt_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7959 		DP_NOTICE(p_hwfn->cdev,
7960 			  "formatted size of feature was too small %d. Aborting\n",
7961 			  txt_size_bytes);
7962 		return DBG_STATUS_INVALID_ARGS;
7963 	}
7964 
7965 	/* allocate temp text buf */
7966 	text_buf = vzalloc(txt_size_bytes);
7967 	if (!text_buf) {
7968 		DP_NOTICE(p_hwfn->cdev,
7969 			  "failed to allocate text buffer. Aborting\n");
7970 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7971 	}
7972 
7973 	/* Decode feature opcodes to string on temp buf */
7974 	rc = qed_features_lookup[feature_idx].print_results(p_hwfn,
7975 							    dbuf,
7976 							    dwords,
7977 							    text_buf);
7978 	if (rc != DBG_STATUS_OK) {
7979 		vfree(text_buf);
7980 		return rc;
7981 	}
7982 
7983 	/* Replace the original null character with a '\n' character.
7984 	 * The bytes that were added as a result of the dword alignment are also
7985 	 * padded with '\n' characters.
7986 	 */
7987 	for (i = null_char_pos; i < txt_size_bytes; i++)
7988 		text_buf[i] = '\n';
7989 
7990 	/* Dump printable feature to log */
7991 	if (p_hwfn->cdev->print_dbg_data)
7992 		qed_dbg_print_feature(text_buf, txt_size_bytes);
7993 
7994 	/* Dump binary data as is to the output file */
7995 	if (p_hwfn->cdev->dbg_bin_dump) {
7996 		vfree(text_buf);
7997 		return rc;
7998 	}
7999 
8000 	/* Free the old dump_buf and point the dump_buf to the newly allocated
8001 	 * and formatted text buffer.
8002 	 */
8003 	vfree(feature->dump_buf);
8004 	feature->dump_buf = text_buf;
8005 	feature->buf_size = txt_size_bytes;
8006 	feature->dumped_dwords = txt_size_bytes / 4;
8007 
8008 	return rc;
8009 }
8010 
8011 #define MAX_DBG_FEATURE_SIZE_DWORDS	0x3FFFFFFF
8012 
8013 /* Generic function for performing the dump of a debug feature. */
8014 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
8015 				    struct qed_ptt *p_ptt,
8016 				    enum qed_dbg_features feature_idx)
8017 {
8018 	struct qed_dbg_feature *feature =
8019 	    &p_hwfn->cdev->dbg_features[feature_idx];
8020 	u32 buf_size_dwords, *dbuf, *dwords;
8021 	enum dbg_status rc;
8022 
8023 	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
8024 		  qed_features_lookup[feature_idx].name);
8025 
8026 	/* Dump_buf was already allocated need to free (this can happen if dump
8027 	 * was called but file was never read).
8028 	 * We can't use the buffer as is since size may have changed.
8029 	 */
8030 	if (feature->dump_buf) {
8031 		vfree(feature->dump_buf);
8032 		feature->dump_buf = NULL;
8033 	}
8034 
8035 	/* Get buffer size from hsi, allocate accordingly, and perform the
8036 	 * dump.
8037 	 */
8038 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
8039 						       &buf_size_dwords);
8040 	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
8041 		return rc;
8042 
8043 	if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) {
8044 		feature->buf_size = 0;
8045 		DP_NOTICE(p_hwfn->cdev,
8046 			  "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n",
8047 			  qed_features_lookup[feature_idx].name,
8048 			  buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS);
8049 
8050 		return DBG_STATUS_OK;
8051 	}
8052 
8053 	feature->buf_size = buf_size_dwords * sizeof(u32);
8054 	feature->dump_buf = vmalloc(feature->buf_size);
8055 	if (!feature->dump_buf)
8056 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
8057 
8058 	dbuf = (u32 *)feature->dump_buf;
8059 	dwords = &feature->dumped_dwords;
8060 	rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt,
8061 							   dbuf,
8062 							   feature->buf_size /
8063 							   sizeof(u32),
8064 							   dwords);
8065 
8066 	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
8067 	 * In this case the buffer holds valid binary data, but we won't able
8068 	 * to parse it (since parsing relies on data in NVRAM which is only
8069 	 * accessible when MFW is responsive). skip the formatting but return
8070 	 * success so that binary data is provided.
8071 	 */
8072 	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
8073 		return DBG_STATUS_OK;
8074 
8075 	if (rc != DBG_STATUS_OK)
8076 		return rc;
8077 
8078 	/* Format output */
8079 	rc = format_feature(p_hwfn, feature_idx);
8080 	return rc;
8081 }
8082 
8083 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8084 {
8085 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
8086 }
8087 
8088 int qed_dbg_grc_size(struct qed_dev *cdev)
8089 {
8090 	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
8091 }
8092 
8093 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8094 {
8095 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
8096 			       num_dumped_bytes);
8097 }
8098 
8099 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
8100 {
8101 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
8102 }
8103 
8104 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8105 {
8106 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
8107 			       num_dumped_bytes);
8108 }
8109 
8110 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
8111 {
8112 	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
8113 }
8114 
8115 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8116 {
8117 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
8118 			       num_dumped_bytes);
8119 }
8120 
8121 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
8122 {
8123 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
8124 }
8125 
8126 static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
8127 				    enum qed_nvm_images image_id, u32 *length)
8128 {
8129 	struct qed_nvm_image_att image_att;
8130 	int rc;
8131 
8132 	*length = 0;
8133 	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
8134 	if (rc)
8135 		return rc;
8136 
8137 	*length = image_att.length;
8138 
8139 	return rc;
8140 }
8141 
8142 static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
8143 			     u32 *num_dumped_bytes,
8144 			     enum qed_nvm_images image_id)
8145 {
8146 	struct qed_hwfn *p_hwfn =
8147 		&cdev->hwfns[cdev->engine_for_debug];
8148 	u32 len_rounded;
8149 	int rc;
8150 
8151 	*num_dumped_bytes = 0;
8152 	rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
8153 	if (rc)
8154 		return rc;
8155 
8156 	DP_NOTICE(p_hwfn->cdev,
8157 		  "Collecting a debug feature [\"nvram image %d\"]\n",
8158 		  image_id);
8159 
8160 	len_rounded = roundup(len_rounded, sizeof(u32));
8161 	rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
8162 	if (rc)
8163 		return rc;
8164 
8165 	/* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
8166 	if (image_id != QED_NVM_IMAGE_NVM_META)
8167 		cpu_to_be32_array((__force __be32 *)buffer,
8168 				  (const u32 *)buffer,
8169 				  len_rounded / sizeof(u32));
8170 
8171 	*num_dumped_bytes = len_rounded;
8172 
8173 	return rc;
8174 }
8175 
8176 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
8177 				u32 *num_dumped_bytes)
8178 {
8179 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
8180 			       num_dumped_bytes);
8181 }
8182 
8183 int qed_dbg_protection_override_size(struct qed_dev *cdev)
8184 {
8185 	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
8186 }
8187 
8188 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
8189 		       u32 *num_dumped_bytes)
8190 {
8191 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
8192 			       num_dumped_bytes);
8193 }
8194 
8195 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
8196 {
8197 	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
8198 }
8199 
8200 int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8201 {
8202 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
8203 }
8204 
8205 int qed_dbg_ilt_size(struct qed_dev *cdev)
8206 {
8207 	return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT);
8208 }
8209 
8210 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
8211 		      u32 *num_dumped_bytes)
8212 {
8213 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
8214 			       num_dumped_bytes);
8215 }
8216 
8217 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
8218 {
8219 	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
8220 }
8221 
8222 /* Defines the amount of bytes allocated for recording the length of debugfs
8223  * feature buffer.
8224  */
8225 #define REGDUMP_HEADER_SIZE			sizeof(u32)
8226 #define REGDUMP_HEADER_SIZE_SHIFT		0
8227 #define REGDUMP_HEADER_SIZE_MASK		0xffffff
8228 #define REGDUMP_HEADER_FEATURE_SHIFT		24
8229 #define REGDUMP_HEADER_FEATURE_MASK		0x1f
8230 #define REGDUMP_HEADER_BIN_DUMP_SHIFT		29
8231 #define REGDUMP_HEADER_BIN_DUMP_MASK		0x1
8232 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
8233 #define REGDUMP_HEADER_OMIT_ENGINE_MASK		0x1
8234 #define REGDUMP_HEADER_ENGINE_SHIFT		31
8235 #define REGDUMP_HEADER_ENGINE_MASK		0x1
8236 #define REGDUMP_MAX_SIZE			0x1000000
8237 #define ILT_DUMP_MAX_SIZE			(1024 * 1024 * 15)
8238 
8239 enum debug_print_features {
8240 	OLD_MODE = 0,
8241 	IDLE_CHK = 1,
8242 	GRC_DUMP = 2,
8243 	MCP_TRACE = 3,
8244 	REG_FIFO = 4,
8245 	PROTECTION_OVERRIDE = 5,
8246 	IGU_FIFO = 6,
8247 	PHY = 7,
8248 	FW_ASSERTS = 8,
8249 	NVM_CFG1 = 9,
8250 	DEFAULT_CFG = 10,
8251 	NVM_META = 11,
8252 	MDUMP = 12,
8253 	ILT_DUMP = 13,
8254 };
8255 
8256 static u32 qed_calc_regdump_header(struct qed_dev *cdev,
8257 				   enum debug_print_features feature,
8258 				   int engine, u32 feature_size,
8259 				   u8 omit_engine, u8 dbg_bin_dump)
8260 {
8261 	u32 res = 0;
8262 
8263 	SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size);
8264 	if (res != feature_size)
8265 		DP_NOTICE(cdev,
8266 			  "Feature %d is too large (size 0x%x) and will corrupt the dump\n",
8267 			  feature, feature_size);
8268 
8269 	SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
8270 	SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, dbg_bin_dump);
8271 	SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
8272 	SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
8273 
8274 	return res;
8275 }
8276 
8277 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
8278 {
8279 	u8 cur_engine, omit_engine = 0, org_engine;
8280 	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
8281 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
8282 	int grc_params[MAX_DBG_GRC_PARAMS], rc, i;
8283 	u32 offset = 0, feature_size;
8284 
8285 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
8286 		grc_params[i] = dev_data->grc.param_val[i];
8287 
8288 	if (!QED_IS_CMT(cdev))
8289 		omit_engine = 1;
8290 
8291 	cdev->dbg_bin_dump = 1;
8292 	mutex_lock(&qed_dbg_lock);
8293 
8294 	org_engine = qed_get_debug_engine(cdev);
8295 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8296 		/* Collect idle_chks and grcDump for each hw function */
8297 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8298 			   "obtaining idle_chk and grcdump for current engine\n");
8299 		qed_set_debug_engine(cdev, cur_engine);
8300 
8301 		/* First idle_chk */
8302 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8303 				      REGDUMP_HEADER_SIZE, &feature_size);
8304 		if (!rc) {
8305 			*(u32 *)((u8 *)buffer + offset) =
8306 			    qed_calc_regdump_header(cdev, IDLE_CHK,
8307 						    cur_engine,
8308 						    feature_size,
8309 						    omit_engine,
8310 						    cdev->dbg_bin_dump);
8311 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8312 		} else {
8313 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8314 		}
8315 
8316 		/* Second idle_chk */
8317 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8318 				      REGDUMP_HEADER_SIZE, &feature_size);
8319 		if (!rc) {
8320 			*(u32 *)((u8 *)buffer + offset) =
8321 			    qed_calc_regdump_header(cdev, IDLE_CHK,
8322 						    cur_engine,
8323 						    feature_size,
8324 						    omit_engine,
8325 						    cdev->dbg_bin_dump);
8326 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8327 		} else {
8328 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8329 		}
8330 
8331 		/* reg_fifo dump */
8332 		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
8333 				      REGDUMP_HEADER_SIZE, &feature_size);
8334 		if (!rc) {
8335 			*(u32 *)((u8 *)buffer + offset) =
8336 			    qed_calc_regdump_header(cdev, REG_FIFO,
8337 						    cur_engine,
8338 						    feature_size,
8339 						    omit_engine,
8340 						    cdev->dbg_bin_dump);
8341 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8342 		} else {
8343 			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
8344 		}
8345 
8346 		/* igu_fifo dump */
8347 		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
8348 				      REGDUMP_HEADER_SIZE, &feature_size);
8349 		if (!rc) {
8350 			*(u32 *)((u8 *)buffer + offset) =
8351 			    qed_calc_regdump_header(cdev, IGU_FIFO,
8352 						    cur_engine,
8353 						    feature_size,
8354 						    omit_engine,
8355 						    cdev->dbg_bin_dump);
8356 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8357 		} else {
8358 			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
8359 		}
8360 
8361 		/* protection_override dump */
8362 		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
8363 						 REGDUMP_HEADER_SIZE,
8364 						 &feature_size);
8365 		if (!rc) {
8366 			*(u32 *)((u8 *)buffer + offset) =
8367 			    qed_calc_regdump_header(cdev,
8368 						    PROTECTION_OVERRIDE,
8369 						    cur_engine,
8370 						    feature_size,
8371 						    omit_engine,
8372 						    cdev->dbg_bin_dump);
8373 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8374 		} else {
8375 			DP_ERR(cdev,
8376 			       "qed_dbg_protection_override failed. rc = %d\n",
8377 			       rc);
8378 		}
8379 
8380 		/* fw_asserts dump */
8381 		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
8382 					REGDUMP_HEADER_SIZE, &feature_size);
8383 		if (!rc) {
8384 			*(u32 *)((u8 *)buffer + offset) =
8385 			    qed_calc_regdump_header(cdev, FW_ASSERTS,
8386 						    cur_engine,
8387 						    feature_size,
8388 						    omit_engine,
8389 						    cdev->dbg_bin_dump);
8390 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8391 		} else {
8392 			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
8393 			       rc);
8394 		}
8395 
8396 		feature_size = qed_dbg_ilt_size(cdev);
8397 		if (!cdev->disable_ilt_dump && feature_size <
8398 		    ILT_DUMP_MAX_SIZE) {
8399 			rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
8400 					 REGDUMP_HEADER_SIZE, &feature_size);
8401 			if (!rc) {
8402 				*(u32 *)((u8 *)buffer + offset) =
8403 				    qed_calc_regdump_header(cdev, ILT_DUMP,
8404 							    cur_engine,
8405 							    feature_size,
8406 							    omit_engine,
8407 							    cdev->dbg_bin_dump);
8408 				offset += (feature_size + REGDUMP_HEADER_SIZE);
8409 			} else {
8410 				DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
8411 				       rc);
8412 			}
8413 		}
8414 
8415 		/* Grc dump - must be last because when mcp stuck it will
8416 		 * clutter idle_chk, reg_fifo, ...
8417 		 */
8418 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
8419 			dev_data->grc.param_val[i] = grc_params[i];
8420 
8421 		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
8422 				 REGDUMP_HEADER_SIZE, &feature_size);
8423 		if (!rc) {
8424 			*(u32 *)((u8 *)buffer + offset) =
8425 			    qed_calc_regdump_header(cdev, GRC_DUMP,
8426 						    cur_engine,
8427 						    feature_size,
8428 						    omit_engine,
8429 						    cdev->dbg_bin_dump);
8430 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8431 		} else {
8432 			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
8433 		}
8434 	}
8435 
8436 	qed_set_debug_engine(cdev, org_engine);
8437 
8438 	/* mcp_trace */
8439 	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
8440 			       REGDUMP_HEADER_SIZE, &feature_size);
8441 	if (!rc) {
8442 		*(u32 *)((u8 *)buffer + offset) =
8443 		    qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
8444 					    feature_size, omit_engine,
8445 					    cdev->dbg_bin_dump);
8446 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8447 	} else {
8448 		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
8449 	}
8450 
8451 	/* nvm cfg1 */
8452 	rc = qed_dbg_nvm_image(cdev,
8453 			       (u8 *)buffer + offset +
8454 			       REGDUMP_HEADER_SIZE, &feature_size,
8455 			       QED_NVM_IMAGE_NVM_CFG1);
8456 	if (!rc) {
8457 		*(u32 *)((u8 *)buffer + offset) =
8458 		    qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
8459 					    feature_size, omit_engine,
8460 					    cdev->dbg_bin_dump);
8461 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8462 	} else if (rc != -ENOENT) {
8463 		DP_ERR(cdev,
8464 		       "qed_dbg_nvm_image failed for image  %d (%s), rc = %d\n",
8465 		       QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1",
8466 		       rc);
8467 	}
8468 
8469 		/* nvm default */
8470 	rc = qed_dbg_nvm_image(cdev,
8471 			       (u8 *)buffer + offset +
8472 			       REGDUMP_HEADER_SIZE, &feature_size,
8473 			       QED_NVM_IMAGE_DEFAULT_CFG);
8474 	if (!rc) {
8475 		*(u32 *)((u8 *)buffer + offset) =
8476 		    qed_calc_regdump_header(cdev, DEFAULT_CFG,
8477 					    cur_engine, feature_size,
8478 					    omit_engine,
8479 					    cdev->dbg_bin_dump);
8480 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8481 	} else if (rc != -ENOENT) {
8482 		DP_ERR(cdev,
8483 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8484 		       QED_NVM_IMAGE_DEFAULT_CFG,
8485 		       "QED_NVM_IMAGE_DEFAULT_CFG", rc);
8486 	}
8487 
8488 	/* nvm meta */
8489 	rc = qed_dbg_nvm_image(cdev,
8490 			       (u8 *)buffer + offset +
8491 			       REGDUMP_HEADER_SIZE, &feature_size,
8492 			       QED_NVM_IMAGE_NVM_META);
8493 	if (!rc) {
8494 		*(u32 *)((u8 *)buffer + offset) =
8495 		    qed_calc_regdump_header(cdev, NVM_META, cur_engine,
8496 					    feature_size, omit_engine,
8497 					    cdev->dbg_bin_dump);
8498 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8499 	} else if (rc != -ENOENT) {
8500 		DP_ERR(cdev,
8501 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8502 		       QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META",
8503 		       rc);
8504 	}
8505 
8506 	/* nvm mdump */
8507 	rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset +
8508 			       REGDUMP_HEADER_SIZE, &feature_size,
8509 			       QED_NVM_IMAGE_MDUMP);
8510 	if (!rc) {
8511 		*(u32 *)((u8 *)buffer + offset) =
8512 		    qed_calc_regdump_header(cdev, MDUMP, cur_engine,
8513 					    feature_size, omit_engine,
8514 					    cdev->dbg_bin_dump);
8515 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8516 	} else if (rc != -ENOENT) {
8517 		DP_ERR(cdev,
8518 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8519 		       QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
8520 	}
8521 
8522 	mutex_unlock(&qed_dbg_lock);
8523 	cdev->dbg_bin_dump = 0;
8524 
8525 	return 0;
8526 }
8527 
8528 int qed_dbg_all_data_size(struct qed_dev *cdev)
8529 {
8530 	u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
8531 	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
8532 	u8 cur_engine, org_engine;
8533 
8534 	cdev->disable_ilt_dump = false;
8535 	org_engine = qed_get_debug_engine(cdev);
8536 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8537 		/* Engine specific */
8538 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8539 			   "calculating idle_chk and grcdump register length for current engine\n");
8540 		qed_set_debug_engine(cdev, cur_engine);
8541 		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8542 		    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8543 		    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8544 		    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8545 		    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8546 		    REGDUMP_HEADER_SIZE +
8547 		    qed_dbg_protection_override_size(cdev) +
8548 		    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8549 		ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
8550 		if (ilt_len < ILT_DUMP_MAX_SIZE) {
8551 			total_ilt_len += ilt_len;
8552 			regs_len += ilt_len;
8553 		}
8554 	}
8555 
8556 	qed_set_debug_engine(cdev, org_engine);
8557 
8558 	/* Engine common */
8559 	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev) +
8560 	    REGDUMP_HEADER_SIZE + qed_dbg_phy_size(cdev);
8561 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8562 	if (image_len)
8563 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8564 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8565 	if (image_len)
8566 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8567 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8568 	if (image_len)
8569 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8570 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len);
8571 	if (image_len)
8572 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8573 
8574 	if (regs_len > REGDUMP_MAX_SIZE) {
8575 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8576 			   "Dump exceeds max size 0x%x, disable ILT dump\n",
8577 			   REGDUMP_MAX_SIZE);
8578 		cdev->disable_ilt_dump = true;
8579 		regs_len -= total_ilt_len;
8580 	}
8581 
8582 	return regs_len;
8583 }
8584 
8585 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8586 		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
8587 {
8588 	struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
8589 	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
8590 	enum dbg_status dbg_rc;
8591 	struct qed_ptt *p_ptt;
8592 	int rc = 0;
8593 
8594 	/* Acquire ptt */
8595 	p_ptt = qed_ptt_acquire(p_hwfn);
8596 	if (!p_ptt)
8597 		return -EINVAL;
8598 
8599 	/* Get dump */
8600 	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8601 	if (dbg_rc != DBG_STATUS_OK) {
8602 		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8603 			   qed_dbg_get_status_str(dbg_rc));
8604 		*num_dumped_bytes = 0;
8605 		rc = -EINVAL;
8606 		goto out;
8607 	}
8608 
8609 	DP_VERBOSE(cdev, QED_MSG_DEBUG,
8610 		   "copying debugfs feature to external buffer\n");
8611 	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8612 	*num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords *
8613 			    4;
8614 
8615 out:
8616 	qed_ptt_release(p_hwfn, p_ptt);
8617 	return rc;
8618 }
8619 
8620 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8621 {
8622 	struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
8623 	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
8624 	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8625 	u32 buf_size_dwords;
8626 	enum dbg_status rc;
8627 
8628 	if (!p_ptt)
8629 		return -EINVAL;
8630 
8631 	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8632 						   &buf_size_dwords);
8633 	if (rc != DBG_STATUS_OK)
8634 		buf_size_dwords = 0;
8635 
8636 	/* Feature will not be dumped if it exceeds maximum size */
8637 	if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
8638 		buf_size_dwords = 0;
8639 
8640 	qed_ptt_release(p_hwfn, p_ptt);
8641 	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8642 	return qed_feature->buf_size;
8643 }
8644 
8645 int qed_dbg_phy_size(struct qed_dev *cdev)
8646 {
8647 	/* return max size of phy info and
8648 	 * phy mac_stat multiplied by the number of ports
8649 	 */
8650 	return MAX_PHY_RESULT_BUFFER * (1 + qed_device_num_ports(cdev));
8651 }
8652 
8653 u8 qed_get_debug_engine(struct qed_dev *cdev)
8654 {
8655 	return cdev->engine_for_debug;
8656 }
8657 
8658 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8659 {
8660 	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8661 		   engine_number);
8662 	cdev->engine_for_debug = engine_number;
8663 }
8664 
8665 void qed_dbg_pf_init(struct qed_dev *cdev)
8666 {
8667 	const u8 *dbg_values = NULL;
8668 	int i;
8669 
8670 	/* Sync ver with debugbus qed code */
8671 	qed_dbg_set_app_ver(TOOLS_VERSION);
8672 
8673 	/* Debug values are after init values.
8674 	 * The offset is the first dword of the file.
8675 	 */
8676 	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8677 
8678 	for_each_hwfn(cdev, i) {
8679 		qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8680 		qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8681 	}
8682 
8683 	/* Set the hwfn to be 0 as default */
8684 	cdev->engine_for_debug = 0;
8685 }
8686 
8687 void qed_dbg_pf_exit(struct qed_dev *cdev)
8688 {
8689 	struct qed_dbg_feature *feature = NULL;
8690 	enum qed_dbg_features feature_idx;
8691 
8692 	/* debug features' buffers may be allocated if debug feature was used
8693 	 * but dump wasn't called
8694 	 */
8695 	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8696 		feature = &cdev->dbg_features[feature_idx];
8697 		if (feature->dump_buf) {
8698 			vfree(feature->dump_buf);
8699 			feature->dump_buf = NULL;
8700 		}
8701 	}
8702 }
8703