1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015 QLogic Corporation
4  * Copyright (c) 2019-2021 Marvell International Ltd.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/vmalloc.h>
9 #include <linux/crc32.h>
10 #include "qed.h"
11 #include "qed_cxt.h"
12 #include "qed_hsi.h"
13 #include "qed_dbg_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_mcp.h"
16 #include "qed_reg_addr.h"
17 
18 /* Memory groups enum */
19 enum mem_groups {
20 	MEM_GROUP_PXP_MEM,
21 	MEM_GROUP_DMAE_MEM,
22 	MEM_GROUP_CM_MEM,
23 	MEM_GROUP_QM_MEM,
24 	MEM_GROUP_DORQ_MEM,
25 	MEM_GROUP_BRB_RAM,
26 	MEM_GROUP_BRB_MEM,
27 	MEM_GROUP_PRS_MEM,
28 	MEM_GROUP_SDM_MEM,
29 	MEM_GROUP_PBUF,
30 	MEM_GROUP_IOR,
31 	MEM_GROUP_RAM,
32 	MEM_GROUP_BTB_RAM,
33 	MEM_GROUP_RDIF_CTX,
34 	MEM_GROUP_TDIF_CTX,
35 	MEM_GROUP_CFC_MEM,
36 	MEM_GROUP_CONN_CFC_MEM,
37 	MEM_GROUP_CAU_PI,
38 	MEM_GROUP_CAU_MEM,
39 	MEM_GROUP_CAU_MEM_EXT,
40 	MEM_GROUP_PXP_ILT,
41 	MEM_GROUP_MULD_MEM,
42 	MEM_GROUP_BTB_MEM,
43 	MEM_GROUP_IGU_MEM,
44 	MEM_GROUP_IGU_MSIX,
45 	MEM_GROUP_CAU_SB,
46 	MEM_GROUP_BMB_RAM,
47 	MEM_GROUP_BMB_MEM,
48 	MEM_GROUP_TM_MEM,
49 	MEM_GROUP_TASK_CFC_MEM,
50 	MEM_GROUPS_NUM
51 };
52 
53 /* Memory groups names */
54 static const char * const s_mem_group_names[] = {
55 	"PXP_MEM",
56 	"DMAE_MEM",
57 	"CM_MEM",
58 	"QM_MEM",
59 	"DORQ_MEM",
60 	"BRB_RAM",
61 	"BRB_MEM",
62 	"PRS_MEM",
63 	"SDM_MEM",
64 	"PBUF",
65 	"IOR",
66 	"RAM",
67 	"BTB_RAM",
68 	"RDIF_CTX",
69 	"TDIF_CTX",
70 	"CFC_MEM",
71 	"CONN_CFC_MEM",
72 	"CAU_PI",
73 	"CAU_MEM",
74 	"CAU_MEM_EXT",
75 	"PXP_ILT",
76 	"MULD_MEM",
77 	"BTB_MEM",
78 	"IGU_MEM",
79 	"IGU_MSIX",
80 	"CAU_SB",
81 	"BMB_RAM",
82 	"BMB_MEM",
83 	"TM_MEM",
84 	"TASK_CFC_MEM",
85 };
86 
87 /* Idle check conditions */
88 
89 static u32 cond5(const u32 *r, const u32 *imm)
90 {
91 	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
92 }
93 
94 static u32 cond7(const u32 *r, const u32 *imm)
95 {
96 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
97 }
98 
99 static u32 cond6(const u32 *r, const u32 *imm)
100 {
101 	return (r[0] & imm[0]) != imm[1];
102 }
103 
104 static u32 cond9(const u32 *r, const u32 *imm)
105 {
106 	return ((r[0] & imm[0]) >> imm[1]) !=
107 	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
108 }
109 
110 static u32 cond10(const u32 *r, const u32 *imm)
111 {
112 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
113 }
114 
115 static u32 cond4(const u32 *r, const u32 *imm)
116 {
117 	return (r[0] & ~imm[0]) != imm[1];
118 }
119 
120 static u32 cond0(const u32 *r, const u32 *imm)
121 {
122 	return (r[0] & ~r[1]) != imm[0];
123 }
124 
125 static u32 cond14(const u32 *r, const u32 *imm)
126 {
127 	return (r[0] | imm[0]) != imm[1];
128 }
129 
130 static u32 cond1(const u32 *r, const u32 *imm)
131 {
132 	return r[0] != imm[0];
133 }
134 
135 static u32 cond11(const u32 *r, const u32 *imm)
136 {
137 	return r[0] != r[1] && r[2] == imm[0];
138 }
139 
140 static u32 cond12(const u32 *r, const u32 *imm)
141 {
142 	return r[0] != r[1] && r[2] > imm[0];
143 }
144 
145 static u32 cond3(const u32 *r, const u32 *imm)
146 {
147 	return r[0] != r[1];
148 }
149 
150 static u32 cond13(const u32 *r, const u32 *imm)
151 {
152 	return r[0] & imm[0];
153 }
154 
155 static u32 cond8(const u32 *r, const u32 *imm)
156 {
157 	return r[0] < (r[1] - imm[0]);
158 }
159 
160 static u32 cond2(const u32 *r, const u32 *imm)
161 {
162 	return r[0] > imm[0];
163 }
164 
165 /* Array of Idle Check conditions */
166 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
167 	cond0,
168 	cond1,
169 	cond2,
170 	cond3,
171 	cond4,
172 	cond5,
173 	cond6,
174 	cond7,
175 	cond8,
176 	cond9,
177 	cond10,
178 	cond11,
179 	cond12,
180 	cond13,
181 	cond14,
182 };
183 
184 #define NUM_PHYS_BLOCKS 84
185 
186 #define NUM_DBG_RESET_REGS 8
187 
188 /******************************* Data Types **********************************/
189 
190 enum hw_types {
191 	HW_TYPE_ASIC,
192 	PLATFORM_RESERVED,
193 	PLATFORM_RESERVED2,
194 	PLATFORM_RESERVED3,
195 	PLATFORM_RESERVED4,
196 	MAX_HW_TYPES
197 };
198 
199 /* CM context types */
200 enum cm_ctx_types {
201 	CM_CTX_CONN_AG,
202 	CM_CTX_CONN_ST,
203 	CM_CTX_TASK_AG,
204 	CM_CTX_TASK_ST,
205 	NUM_CM_CTX_TYPES
206 };
207 
208 /* Debug bus frame modes */
209 enum dbg_bus_frame_modes {
210 	DBG_BUS_FRAME_MODE_4ST = 0,	/* 4 Storm dwords (no HW) */
211 	DBG_BUS_FRAME_MODE_2ST_2HW = 1,	/* 2 Storm dwords, 2 HW dwords */
212 	DBG_BUS_FRAME_MODE_1ST_3HW = 2,	/* 1 Storm dwords, 3 HW dwords */
213 	DBG_BUS_FRAME_MODE_4HW = 3,	/* 4 HW dwords (no Storms) */
214 	DBG_BUS_FRAME_MODE_8HW = 4,	/* 8 HW dwords (no Storms) */
215 	DBG_BUS_NUM_FRAME_MODES
216 };
217 
218 /* Debug bus SEMI frame modes */
219 enum dbg_bus_semi_frame_modes {
220 	DBG_BUS_SEMI_FRAME_MODE_4FAST = 0,	/* 4 fast dw */
221 	DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */
222 	DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */
223 	DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3,	/* 4 slow dw */
224 	DBG_BUS_SEMI_NUM_FRAME_MODES
225 };
226 
227 /* Debug bus filter types */
228 enum dbg_bus_filter_types {
229 	DBG_BUS_FILTER_TYPE_OFF,	/* Filter always off */
230 	DBG_BUS_FILTER_TYPE_PRE,	/* Filter before trigger only */
231 	DBG_BUS_FILTER_TYPE_POST,	/* Filter after trigger only */
232 	DBG_BUS_FILTER_TYPE_ON	/* Filter always on */
233 };
234 
235 /* Debug bus pre-trigger recording types */
236 enum dbg_bus_pre_trigger_types {
237 	DBG_BUS_PRE_TRIGGER_FROM_ZERO,	/* Record from time 0 */
238 	DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,	/* Record some chunks before trigger */
239 	DBG_BUS_PRE_TRIGGER_DROP	/* Drop data before trigger */
240 };
241 
242 /* Debug bus post-trigger recording types */
243 enum dbg_bus_post_trigger_types {
244 	DBG_BUS_POST_TRIGGER_RECORD,	/* Start recording after trigger */
245 	DBG_BUS_POST_TRIGGER_DROP	/* Drop data after trigger */
246 };
247 
248 /* Debug bus other engine mode */
249 enum dbg_bus_other_engine_modes {
250 	DBG_BUS_OTHER_ENGINE_MODE_NONE,
251 	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
252 	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
253 	DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
254 	DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX
255 };
256 
257 /* DBG block Framing mode definitions */
258 struct framing_mode_defs {
259 	u8 id;
260 	u8 blocks_dword_mask;
261 	u8 storms_dword_mask;
262 	u8 semi_framing_mode_id;
263 	u8 full_buf_thr;
264 };
265 
266 /* Chip constant definitions */
267 struct chip_defs {
268 	const char *name;
269 	u8 dwords_per_cycle;
270 	u8 num_framing_modes;
271 	u32 num_ilt_pages;
272 	struct framing_mode_defs *framing_modes;
273 };
274 
275 /* HW type constant definitions */
276 struct hw_type_defs {
277 	const char *name;
278 	u32 delay_factor;
279 	u32 dmae_thresh;
280 	u32 log_thresh;
281 };
282 
283 /* RBC reset definitions */
284 struct rbc_reset_defs {
285 	u32 reset_reg_addr;
286 	u32 reset_val[MAX_CHIP_IDS];
287 };
288 
289 /* Storm constant definitions.
290  * Addresses are in bytes, sizes are in quad-regs.
291  */
292 struct storm_defs {
293 	char letter;
294 	enum block_id sem_block_id;
295 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
296 	bool has_vfc;
297 	u32 sem_fast_mem_addr;
298 	u32 sem_frame_mode_addr;
299 	u32 sem_slow_enable_addr;
300 	u32 sem_slow_mode_addr;
301 	u32 sem_slow_mode1_conf_addr;
302 	u32 sem_sync_dbg_empty_addr;
303 	u32 sem_gpre_vect_addr;
304 	u32 cm_ctx_wr_addr;
305 	u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
306 	u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
307 };
308 
309 /* Debug Bus Constraint operation constant definitions */
310 struct dbg_bus_constraint_op_defs {
311 	u8 hw_op_val;
312 	bool is_cyclic;
313 };
314 
315 /* Storm Mode definitions */
316 struct storm_mode_defs {
317 	const char *name;
318 	bool is_fast_dbg;
319 	u8 id_in_hw;
320 	u32 src_disable_reg_addr;
321 	u32 src_enable_val;
322 	bool exists[MAX_CHIP_IDS];
323 };
324 
325 struct grc_param_defs {
326 	u32 default_val[MAX_CHIP_IDS];
327 	u32 min;
328 	u32 max;
329 	bool is_preset;
330 	bool is_persistent;
331 	u32 exclude_all_preset_val;
332 	u32 crash_preset_val[MAX_CHIP_IDS];
333 };
334 
335 /* Address is in 128b units. Width is in bits. */
336 struct rss_mem_defs {
337 	const char *mem_name;
338 	const char *type_name;
339 	u32 addr;
340 	u32 entry_width;
341 	u32 num_entries[MAX_CHIP_IDS];
342 };
343 
344 struct vfc_ram_defs {
345 	const char *mem_name;
346 	const char *type_name;
347 	u32 base_row;
348 	u32 num_rows;
349 };
350 
351 struct big_ram_defs {
352 	const char *instance_name;
353 	enum mem_groups mem_group_id;
354 	enum mem_groups ram_mem_group_id;
355 	enum dbg_grc_params grc_param;
356 	u32 addr_reg_addr;
357 	u32 data_reg_addr;
358 	u32 is_256b_reg_addr;
359 	u32 is_256b_bit_offset[MAX_CHIP_IDS];
360 	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
361 };
362 
363 struct phy_defs {
364 	const char *phy_name;
365 
366 	/* PHY base GRC address */
367 	u32 base_addr;
368 
369 	/* Relative address of indirect TBUS address register (bits 0..7) */
370 	u32 tbus_addr_lo_addr;
371 
372 	/* Relative address of indirect TBUS address register (bits 8..10) */
373 	u32 tbus_addr_hi_addr;
374 
375 	/* Relative address of indirect TBUS data register (bits 0..7) */
376 	u32 tbus_data_lo_addr;
377 
378 	/* Relative address of indirect TBUS data register (bits 8..11) */
379 	u32 tbus_data_hi_addr;
380 };
381 
382 /* Split type definitions */
383 struct split_type_defs {
384 	const char *name;
385 };
386 
387 /******************************** Constants **********************************/
388 
389 #define BYTES_IN_DWORD			sizeof(u32)
390 /* In the macros below, size and offset are specified in bits */
391 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
392 #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
393 #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
394 #define FIELD_DWORD_OFFSET(type, field) \
395 	 ((int)(FIELD_BIT_OFFSET(type, field) / 32))
396 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
397 #define FIELD_BIT_MASK(type, field) \
398 	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
399 	 FIELD_DWORD_SHIFT(type, field))
400 
401 #define SET_VAR_FIELD(var, type, field, val) \
402 	do { \
403 		var[FIELD_DWORD_OFFSET(type, field)] &=	\
404 		(~FIELD_BIT_MASK(type, field));	\
405 		var[FIELD_DWORD_OFFSET(type, field)] |= \
406 		(val) << FIELD_DWORD_SHIFT(type, field); \
407 	} while (0)
408 
409 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
410 	do { \
411 		for (i = 0; i < (arr_size); i++) \
412 			qed_wr(dev, ptt, addr,	(arr)[i]); \
413 	} while (0)
414 
415 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
416 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
417 
418 /* extra lines include a signature line + optional latency events line */
419 #define NUM_EXTRA_DBG_LINES(block) \
420 	(GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
421 #define NUM_DBG_LINES(block) \
422 	((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
423 
424 #define USE_DMAE			true
425 #define PROTECT_WIDE_BUS		true
426 
427 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
428 #define RAM_LINES_TO_BYTES(lines) \
429 	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
430 
431 #define REG_DUMP_LEN_SHIFT		24
432 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
433 	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
434 
435 #define IDLE_CHK_RULE_SIZE_DWORDS \
436 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
437 
438 #define IDLE_CHK_RESULT_HDR_DWORDS \
439 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
440 
441 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
442 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
443 
444 #define PAGE_MEM_DESC_SIZE_DWORDS \
445 	BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
446 
447 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
448 
449 /* The sizes and offsets below are specified in bits */
450 #define VFC_CAM_CMD_STRUCT_SIZE		64
451 #define VFC_CAM_CMD_ROW_OFFSET		48
452 #define VFC_CAM_CMD_ROW_SIZE		9
453 #define VFC_CAM_ADDR_STRUCT_SIZE	16
454 #define VFC_CAM_ADDR_OP_OFFSET		0
455 #define VFC_CAM_ADDR_OP_SIZE		4
456 #define VFC_CAM_RESP_STRUCT_SIZE	256
457 #define VFC_RAM_ADDR_STRUCT_SIZE	16
458 #define VFC_RAM_ADDR_OP_OFFSET		0
459 #define VFC_RAM_ADDR_OP_SIZE		2
460 #define VFC_RAM_ADDR_ROW_OFFSET		2
461 #define VFC_RAM_ADDR_ROW_SIZE		10
462 #define VFC_RAM_RESP_STRUCT_SIZE	256
463 
464 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
465 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
466 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
467 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
468 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
469 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
470 
471 #define NUM_VFC_RAM_TYPES		4
472 
473 #define VFC_CAM_NUM_ROWS		512
474 
475 #define VFC_OPCODE_CAM_RD		14
476 #define VFC_OPCODE_RAM_RD		0
477 
478 #define NUM_RSS_MEM_TYPES		5
479 
480 #define NUM_BIG_RAM_TYPES		3
481 #define BIG_RAM_NAME_LEN		3
482 
483 #define NUM_PHY_TBUS_ADDRESSES		2048
484 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
485 
486 #define RESET_REG_UNRESET_OFFSET	4
487 
488 #define STALL_DELAY_MS			500
489 
490 #define STATIC_DEBUG_LINE_DWORDS	9
491 
492 #define NUM_COMMON_GLOBAL_PARAMS	10
493 
494 #define MAX_RECURSION_DEPTH		10
495 
496 #define FW_IMG_KUKU                     0
497 #define FW_IMG_MAIN			1
498 #define FW_IMG_L2B                      2
499 
500 #define REG_FIFO_ELEMENT_DWORDS		2
501 #define REG_FIFO_DEPTH_ELEMENTS		32
502 #define REG_FIFO_DEPTH_DWORDS \
503 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
504 
505 #define IGU_FIFO_ELEMENT_DWORDS		4
506 #define IGU_FIFO_DEPTH_ELEMENTS		64
507 #define IGU_FIFO_DEPTH_DWORDS \
508 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
509 
510 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
511 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
512 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
513 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
514 	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
515 
516 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
517 	(MCP_REG_SCRATCH + \
518 	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
519 
520 #define MAX_SW_PLTAFORM_STR_SIZE	64
521 
522 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
523 #define EMPTY_FW_IMAGE_STR		"???????????????"
524 
525 /***************************** Constant Arrays *******************************/
526 
527 /* DBG block framing mode definitions, in descending preference order */
528 static struct framing_mode_defs s_framing_mode_defs[4] = {
529 	{DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf,
530 	 DBG_BUS_SEMI_FRAME_MODE_4FAST,
531 	 10},
532 	{DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW,
533 	 10},
534 	{DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc,
535 	 DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10},
536 	{DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8,
537 	 DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10}
538 };
539 
540 /* Chip constant definitions array */
541 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
542 	{"bb", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2,
543 	 s_framing_mode_defs},
544 	{"ah", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2,
545 	 s_framing_mode_defs}
546 };
547 
548 /* Storm constant definitions array */
549 static struct storm_defs s_storm_defs[] = {
550 	/* Tstorm */
551 	{'T', BLOCK_TSEM,
552 		{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
553 		true,
554 		TSEM_REG_FAST_MEMORY,
555 		TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
556 		TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
557 		TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
558 		TCM_REG_CTX_RBC_ACCS,
559 		{TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
560 		 TCM_REG_SM_TASK_CTX},
561 		{{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */
562 	},
563 
564 	/* Mstorm */
565 	{'M', BLOCK_MSEM,
566 		{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
567 		false,
568 		MSEM_REG_FAST_MEMORY,
569 		MSEM_REG_DBG_FRAME_MODE,
570 		MSEM_REG_SLOW_DBG_ACTIVE,
571 		MSEM_REG_SLOW_DBG_MODE,
572 		MSEM_REG_DBG_MODE1_CFG,
573 		MSEM_REG_SYNC_DBG_EMPTY,
574 		MSEM_REG_DBG_GPRE_VECT,
575 		MCM_REG_CTX_RBC_ACCS,
576 		{MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
577 		 MCM_REG_SM_TASK_CTX },
578 		{{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/
579 	},
580 
581 	/* Ustorm */
582 	{'U', BLOCK_USEM,
583 		{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
584 		false,
585 		USEM_REG_FAST_MEMORY,
586 		USEM_REG_DBG_FRAME_MODE,
587 		USEM_REG_SLOW_DBG_ACTIVE,
588 		USEM_REG_SLOW_DBG_MODE,
589 		USEM_REG_DBG_MODE1_CFG,
590 		USEM_REG_SYNC_DBG_EMPTY,
591 		USEM_REG_DBG_GPRE_VECT,
592 		UCM_REG_CTX_RBC_ACCS,
593 		{UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
594 		 UCM_REG_SM_TASK_CTX},
595 		{{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */
596 	},
597 
598 	/* Xstorm */
599 	{'X', BLOCK_XSEM,
600 		{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
601 		false,
602 		XSEM_REG_FAST_MEMORY,
603 		XSEM_REG_DBG_FRAME_MODE,
604 		XSEM_REG_SLOW_DBG_ACTIVE,
605 		XSEM_REG_SLOW_DBG_MODE,
606 		XSEM_REG_DBG_MODE1_CFG,
607 		XSEM_REG_SYNC_DBG_EMPTY,
608 		XSEM_REG_DBG_GPRE_VECT,
609 		XCM_REG_CTX_RBC_ACCS,
610 		{XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
611 		{{9, 15, 0, 0}, {9, 15,	0, 0}} /* {bb} {k2} */
612 	},
613 
614 	/* Ystorm */
615 	{'Y', BLOCK_YSEM,
616 		{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
617 		false,
618 		YSEM_REG_FAST_MEMORY,
619 		YSEM_REG_DBG_FRAME_MODE,
620 		YSEM_REG_SLOW_DBG_ACTIVE,
621 		YSEM_REG_SLOW_DBG_MODE,
622 		YSEM_REG_DBG_MODE1_CFG,
623 		YSEM_REG_SYNC_DBG_EMPTY,
624 		YSEM_REG_DBG_GPRE_VECT,
625 		YCM_REG_CTX_RBC_ACCS,
626 		{YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
627 		 YCM_REG_SM_TASK_CTX},
628 		{{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */
629 	},
630 
631 	/* Pstorm */
632 	{'P', BLOCK_PSEM,
633 		{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
634 		true,
635 		PSEM_REG_FAST_MEMORY,
636 		PSEM_REG_DBG_FRAME_MODE,
637 		PSEM_REG_SLOW_DBG_ACTIVE,
638 		PSEM_REG_SLOW_DBG_MODE,
639 		PSEM_REG_DBG_MODE1_CFG,
640 		PSEM_REG_SYNC_DBG_EMPTY,
641 		PSEM_REG_DBG_GPRE_VECT,
642 		PCM_REG_CTX_RBC_ACCS,
643 		{0, PCM_REG_SM_CON_CTX, 0, 0},
644 		{{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */
645 	},
646 };
647 
648 static struct hw_type_defs s_hw_type_defs[] = {
649 	/* HW_TYPE_ASIC */
650 	{"asic", 1, 256, 32768},
651 	{"reserved", 0, 0, 0},
652 	{"reserved2", 0, 0, 0},
653 	{"reserved3", 0, 0, 0},
654 	{"reserved4", 0, 0, 0}
655 };
656 
657 static struct grc_param_defs s_grc_param_defs[] = {
658 	/* DBG_GRC_PARAM_DUMP_TSTORM */
659 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
660 
661 	/* DBG_GRC_PARAM_DUMP_MSTORM */
662 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
663 
664 	/* DBG_GRC_PARAM_DUMP_USTORM */
665 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
666 
667 	/* DBG_GRC_PARAM_DUMP_XSTORM */
668 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
669 
670 	/* DBG_GRC_PARAM_DUMP_YSTORM */
671 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
672 
673 	/* DBG_GRC_PARAM_DUMP_PSTORM */
674 	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
675 
676 	/* DBG_GRC_PARAM_DUMP_REGS */
677 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
678 
679 	/* DBG_GRC_PARAM_DUMP_RAM */
680 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
681 
682 	/* DBG_GRC_PARAM_DUMP_PBUF */
683 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
684 
685 	/* DBG_GRC_PARAM_DUMP_IOR */
686 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
687 
688 	/* DBG_GRC_PARAM_DUMP_VFC */
689 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
690 
691 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
692 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
693 
694 	/* DBG_GRC_PARAM_DUMP_ILT */
695 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
696 
697 	/* DBG_GRC_PARAM_DUMP_RSS */
698 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
699 
700 	/* DBG_GRC_PARAM_DUMP_CAU */
701 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
702 
703 	/* DBG_GRC_PARAM_DUMP_QM */
704 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
705 
706 	/* DBG_GRC_PARAM_DUMP_MCP */
707 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
708 
709 	/* DBG_GRC_PARAM_DUMP_DORQ */
710 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
711 
712 	/* DBG_GRC_PARAM_DUMP_CFC */
713 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
714 
715 	/* DBG_GRC_PARAM_DUMP_IGU */
716 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
717 
718 	/* DBG_GRC_PARAM_DUMP_BRB */
719 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
720 
721 	/* DBG_GRC_PARAM_DUMP_BTB */
722 	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
723 
724 	/* DBG_GRC_PARAM_DUMP_BMB */
725 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
726 
727 	/* DBG_GRC_PARAM_RESERVED1 */
728 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
729 
730 	/* DBG_GRC_PARAM_DUMP_MULD */
731 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
732 
733 	/* DBG_GRC_PARAM_DUMP_PRS */
734 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
735 
736 	/* DBG_GRC_PARAM_DUMP_DMAE */
737 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
738 
739 	/* DBG_GRC_PARAM_DUMP_TM */
740 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
741 
742 	/* DBG_GRC_PARAM_DUMP_SDM */
743 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
744 
745 	/* DBG_GRC_PARAM_DUMP_DIF */
746 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
747 
748 	/* DBG_GRC_PARAM_DUMP_STATIC */
749 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
750 
751 	/* DBG_GRC_PARAM_UNSTALL */
752 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
753 
754 	/* DBG_GRC_PARAM_RESERVED2 */
755 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
756 
757 	/* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
758 	{{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}},
759 
760 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
761 	{{0, 0}, 0, 1, true, false, 0, {0, 0}},
762 
763 	/* DBG_GRC_PARAM_CRASH */
764 	{{0, 0}, 0, 1, true, false, 0, {0, 0}},
765 
766 	/* DBG_GRC_PARAM_PARITY_SAFE */
767 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
768 
769 	/* DBG_GRC_PARAM_DUMP_CM */
770 	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
771 
772 	/* DBG_GRC_PARAM_DUMP_PHY */
773 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
774 
775 	/* DBG_GRC_PARAM_NO_MCP */
776 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
777 
778 	/* DBG_GRC_PARAM_NO_FW_VER */
779 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
780 
781 	/* DBG_GRC_PARAM_RESERVED3 */
782 	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
783 
784 	/* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
785 	{{0, 1}, 0, 1, false, false, 0, {0, 1}},
786 
787 	/* DBG_GRC_PARAM_DUMP_ILT_CDUC */
788 	{{1, 1}, 0, 1, false, false, 0, {0, 0}},
789 
790 	/* DBG_GRC_PARAM_DUMP_ILT_CDUT */
791 	{{1, 1}, 0, 1, false, false, 0, {0, 0}},
792 
793 	/* DBG_GRC_PARAM_DUMP_CAU_EXT */
794 	{{0, 0}, 0, 1, false, false, 0, {1, 1}}
795 };
796 
797 static struct rss_mem_defs s_rss_mem_defs[] = {
798 	{"rss_mem_cid", "rss_cid", 0, 32,
799 	 {256, 320}},
800 
801 	{"rss_mem_key_msb", "rss_key", 1024, 256,
802 	 {128, 208}},
803 
804 	{"rss_mem_key_lsb", "rss_key", 2048, 64,
805 	 {128, 208}},
806 
807 	{"rss_mem_info", "rss_info", 3072, 16,
808 	 {128, 208}},
809 
810 	{"rss_mem_ind", "rss_ind", 4096, 16,
811 	 {16384, 26624}}
812 };
813 
814 static struct vfc_ram_defs s_vfc_ram_defs[] = {
815 	{"vfc_ram_tt1", "vfc_ram", 0, 512},
816 	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
817 	{"vfc_ram_stt2", "vfc_ram", 640, 32},
818 	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
819 };
820 
821 static struct big_ram_defs s_big_ram_defs[] = {
822 	{"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
823 	 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
824 	 MISC_REG_BLOCK_256B_EN, {0, 0},
825 	 {153600, 180224}},
826 
827 	{"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
828 	 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
829 	 MISC_REG_BLOCK_256B_EN, {0, 1},
830 	 {92160, 117760}},
831 
832 	{"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
833 	 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
834 	 MISCS_REG_BLOCK_256B_EN, {0, 0},
835 	 {36864, 36864}}
836 };
837 
838 static struct rbc_reset_defs s_rbc_reset_defs[] = {
839 	{MISCS_REG_RESET_PL_HV,
840 	 {0x0, 0x400}},
841 	{MISC_REG_RESET_PL_PDA_VMAIN_1,
842 	 {0x4404040, 0x4404040}},
843 	{MISC_REG_RESET_PL_PDA_VMAIN_2,
844 	 {0x7, 0x7c00007}},
845 	{MISC_REG_RESET_PL_PDA_VAUX,
846 	 {0x2, 0x2}},
847 };
848 
849 static struct phy_defs s_phy_defs[] = {
850 	{"nw_phy", NWS_REG_NWS_CMU_K2,
851 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
852 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
853 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
854 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
855 	{"sgmii_phy", MS_REG_MS_CMU_K2,
856 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
857 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
858 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
859 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
860 	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
861 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
862 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
863 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
864 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
865 	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
866 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
867 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
868 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
869 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
870 };
871 
872 static struct split_type_defs s_split_type_defs[] = {
873 	/* SPLIT_TYPE_NONE */
874 	{"eng"},
875 
876 	/* SPLIT_TYPE_PORT */
877 	{"port"},
878 
879 	/* SPLIT_TYPE_PF */
880 	{"pf"},
881 
882 	/* SPLIT_TYPE_PORT_PF */
883 	{"port"},
884 
885 	/* SPLIT_TYPE_VF */
886 	{"vf"}
887 };
888 
889 /******************************** Variables **********************************/
890 
891 /* The version of the calling app */
892 static u32 s_app_ver;
893 
894 /**************************** Private Functions ******************************/
895 
896 static void qed_static_asserts(void)
897 {
898 }
899 
900 /* Reads and returns a single dword from the specified unaligned buffer */
901 static u32 qed_read_unaligned_dword(u8 *buf)
902 {
903 	u32 dword;
904 
905 	memcpy((u8 *)&dword, buf, sizeof(dword));
906 	return dword;
907 }
908 
909 /* Sets the value of the specified GRC param */
910 static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
911 			      enum dbg_grc_params grc_param, u32 val)
912 {
913 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
914 
915 	dev_data->grc.param_val[grc_param] = val;
916 }
917 
918 /* Returns the value of the specified GRC param */
919 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
920 			     enum dbg_grc_params grc_param)
921 {
922 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
923 
924 	return dev_data->grc.param_val[grc_param];
925 }
926 
927 /* Initializes the GRC parameters */
928 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
929 {
930 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
931 
932 	if (!dev_data->grc.params_initialized) {
933 		qed_dbg_grc_set_params_default(p_hwfn);
934 		dev_data->grc.params_initialized = 1;
935 	}
936 }
937 
938 /* Sets pointer and size for the specified binary buffer type */
939 static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
940 				enum bin_dbg_buffer_type buf_type,
941 				const u32 *ptr, u32 size)
942 {
943 	struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
944 
945 	buf->ptr = (void *)ptr;
946 	buf->size = size;
947 }
948 
949 /* Initializes debug data for the specified device */
950 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
951 {
952 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
953 	u8 num_pfs = 0, max_pfs_per_port = 0;
954 
955 	if (dev_data->initialized)
956 		return DBG_STATUS_OK;
957 
958 	if (!s_app_ver)
959 		return DBG_STATUS_APP_VERSION_NOT_SET;
960 
961 	/* Set chip */
962 	if (QED_IS_K2(p_hwfn->cdev)) {
963 		dev_data->chip_id = CHIP_K2;
964 		dev_data->mode_enable[MODE_K2] = 1;
965 		dev_data->num_vfs = MAX_NUM_VFS_K2;
966 		num_pfs = MAX_NUM_PFS_K2;
967 		max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
968 	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
969 		dev_data->chip_id = CHIP_BB;
970 		dev_data->mode_enable[MODE_BB] = 1;
971 		dev_data->num_vfs = MAX_NUM_VFS_BB;
972 		num_pfs = MAX_NUM_PFS_BB;
973 		max_pfs_per_port = MAX_NUM_PFS_BB;
974 	} else {
975 		return DBG_STATUS_UNKNOWN_CHIP;
976 	}
977 
978 	/* Set HW type */
979 	dev_data->hw_type = HW_TYPE_ASIC;
980 	dev_data->mode_enable[MODE_ASIC] = 1;
981 
982 	/* Set port mode */
983 	switch (p_hwfn->cdev->num_ports_in_engine) {
984 	case 1:
985 		dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
986 		break;
987 	case 2:
988 		dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
989 		break;
990 	case 4:
991 		dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
992 		break;
993 	}
994 
995 	/* Set 100G mode */
996 	if (QED_IS_CMT(p_hwfn->cdev))
997 		dev_data->mode_enable[MODE_100G] = 1;
998 
999 	/* Set number of ports */
1000 	if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
1001 	    dev_data->mode_enable[MODE_100G])
1002 		dev_data->num_ports = 1;
1003 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
1004 		dev_data->num_ports = 2;
1005 	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
1006 		dev_data->num_ports = 4;
1007 
1008 	/* Set number of PFs per port */
1009 	dev_data->num_pfs_per_port = min_t(u32,
1010 					   num_pfs / dev_data->num_ports,
1011 					   max_pfs_per_port);
1012 
1013 	/* Initializes the GRC parameters */
1014 	qed_dbg_grc_init_params(p_hwfn);
1015 
1016 	dev_data->use_dmae = true;
1017 	dev_data->initialized = 1;
1018 
1019 	return DBG_STATUS_OK;
1020 }
1021 
1022 static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
1023 					     enum block_id block_id)
1024 {
1025 	const struct dbg_block *dbg_block;
1026 
1027 	dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
1028 	return dbg_block + block_id;
1029 }
1030 
1031 static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
1032 							       *p_hwfn,
1033 							       enum block_id
1034 							       block_id)
1035 {
1036 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1037 
1038 	return (const struct dbg_block_chip *)
1039 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
1040 	    block_id * MAX_CHIP_IDS + dev_data->chip_id;
1041 }
1042 
1043 static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
1044 							 *p_hwfn,
1045 							 u8 reset_reg_id)
1046 {
1047 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1048 
1049 	return (const struct dbg_reset_reg *)
1050 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
1051 	    reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
1052 }
1053 
1054 /* Reads the FW info structure for the specified Storm from the chip,
1055  * and writes it to the specified fw_info pointer.
1056  */
1057 static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
1058 				   struct qed_ptt *p_ptt,
1059 				   u8 storm_id, struct fw_info *fw_info)
1060 {
1061 	struct storm_defs *storm = &s_storm_defs[storm_id];
1062 	struct fw_info_location fw_info_location;
1063 	u32 addr, i, size, *dest;
1064 
1065 	memset(&fw_info_location, 0, sizeof(fw_info_location));
1066 	memset(fw_info, 0, sizeof(*fw_info));
1067 
1068 	/* Read first the address that points to fw_info location.
1069 	 * The address is located in the last line of the Storm RAM.
1070 	 */
1071 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1072 	    DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
1073 	    sizeof(fw_info_location);
1074 
1075 	dest = (u32 *)&fw_info_location;
1076 	size = BYTES_TO_DWORDS(sizeof(fw_info_location));
1077 
1078 	for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
1079 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1080 
1081 	/* Read FW version info from Storm RAM */
1082 	size = le32_to_cpu(fw_info_location.size);
1083 	if (!size || size > sizeof(*fw_info))
1084 		return;
1085 
1086 	addr = le32_to_cpu(fw_info_location.grc_addr);
1087 	dest = (u32 *)fw_info;
1088 	size = BYTES_TO_DWORDS(size);
1089 
1090 	for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
1091 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1092 }
1093 
1094 /* Dumps the specified string to the specified buffer.
1095  * Returns the dumped size in bytes.
1096  */
1097 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1098 {
1099 	if (dump)
1100 		strcpy(dump_buf, str);
1101 
1102 	return (u32)strlen(str) + 1;
1103 }
1104 
1105 /* Dumps zeros to align the specified buffer to dwords.
1106  * Returns the dumped size in bytes.
1107  */
1108 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1109 {
1110 	u8 offset_in_dword, align_size;
1111 
1112 	offset_in_dword = (u8)(byte_offset & 0x3);
1113 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1114 
1115 	if (dump && align_size)
1116 		memset(dump_buf, 0, align_size);
1117 
1118 	return align_size;
1119 }
1120 
1121 /* Writes the specified string param to the specified buffer.
1122  * Returns the dumped size in dwords.
1123  */
1124 static u32 qed_dump_str_param(u32 *dump_buf,
1125 			      bool dump,
1126 			      const char *param_name, const char *param_val)
1127 {
1128 	char *char_buf = (char *)dump_buf;
1129 	u32 offset = 0;
1130 
1131 	/* Dump param name */
1132 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1133 
1134 	/* Indicate a string param value */
1135 	if (dump)
1136 		*(char_buf + offset) = 1;
1137 	offset++;
1138 
1139 	/* Dump param value */
1140 	offset += qed_dump_str(char_buf + offset, dump, param_val);
1141 
1142 	/* Align buffer to next dword */
1143 	offset += qed_dump_align(char_buf + offset, dump, offset);
1144 
1145 	return BYTES_TO_DWORDS(offset);
1146 }
1147 
1148 /* Writes the specified numeric param to the specified buffer.
1149  * Returns the dumped size in dwords.
1150  */
1151 static u32 qed_dump_num_param(u32 *dump_buf,
1152 			      bool dump, const char *param_name, u32 param_val)
1153 {
1154 	char *char_buf = (char *)dump_buf;
1155 	u32 offset = 0;
1156 
1157 	/* Dump param name */
1158 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1159 
1160 	/* Indicate a numeric param value */
1161 	if (dump)
1162 		*(char_buf + offset) = 0;
1163 	offset++;
1164 
1165 	/* Align buffer to next dword */
1166 	offset += qed_dump_align(char_buf + offset, dump, offset);
1167 
1168 	/* Dump param value (and change offset from bytes to dwords) */
1169 	offset = BYTES_TO_DWORDS(offset);
1170 	if (dump)
1171 		*(dump_buf + offset) = param_val;
1172 	offset++;
1173 
1174 	return offset;
1175 }
1176 
1177 /* Reads the FW version and writes it as a param to the specified buffer.
1178  * Returns the dumped size in dwords.
1179  */
1180 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1181 				 struct qed_ptt *p_ptt,
1182 				 u32 *dump_buf, bool dump)
1183 {
1184 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1185 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1186 	struct fw_info fw_info = { {0}, {0} };
1187 	u32 offset = 0;
1188 
1189 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1190 		/* Read FW info from chip */
1191 		qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
1192 
1193 		/* Create FW version/image strings */
1194 		if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1195 			     "%d_%d_%d_%d", fw_info.ver.num.major,
1196 			     fw_info.ver.num.minor, fw_info.ver.num.rev,
1197 			     fw_info.ver.num.eng) < 0)
1198 			DP_NOTICE(p_hwfn,
1199 				  "Unexpected debug error: invalid FW version string\n");
1200 		switch (fw_info.ver.image_id) {
1201 		case FW_IMG_KUKU:
1202 			strcpy(fw_img_str, "kuku");
1203 			break;
1204 		case FW_IMG_MAIN:
1205 			strcpy(fw_img_str, "main");
1206 			break;
1207 		case FW_IMG_L2B:
1208 			strcpy(fw_img_str, "l2b");
1209 			break;
1210 		default:
1211 			strcpy(fw_img_str, "unknown");
1212 			break;
1213 		}
1214 	}
1215 
1216 	/* Dump FW version, image and timestamp */
1217 	offset += qed_dump_str_param(dump_buf + offset,
1218 				     dump, "fw-version", fw_ver_str);
1219 	offset += qed_dump_str_param(dump_buf + offset,
1220 				     dump, "fw-image", fw_img_str);
1221 	offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp",
1222 				     le32_to_cpu(fw_info.ver.timestamp));
1223 
1224 	return offset;
1225 }
1226 
1227 /* Reads the MFW version and writes it as a param to the specified buffer.
1228  * Returns the dumped size in dwords.
1229  */
1230 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1231 				  struct qed_ptt *p_ptt,
1232 				  u32 *dump_buf, bool dump)
1233 {
1234 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1235 
1236 	if (dump &&
1237 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1238 		u32 global_section_offsize, global_section_addr, mfw_ver;
1239 		u32 public_data_addr, global_section_offsize_addr;
1240 
1241 		/* Find MCP public data GRC address. Needs to be ORed with
1242 		 * MCP_REG_SCRATCH due to a HW bug.
1243 		 */
1244 		public_data_addr = qed_rd(p_hwfn,
1245 					  p_ptt,
1246 					  MISC_REG_SHARED_MEM_ADDR) |
1247 				   MCP_REG_SCRATCH;
1248 
1249 		/* Find MCP public global section offset */
1250 		global_section_offsize_addr = public_data_addr +
1251 					      offsetof(struct mcp_public_data,
1252 						       sections) +
1253 					      sizeof(offsize_t) * PUBLIC_GLOBAL;
1254 		global_section_offsize = qed_rd(p_hwfn, p_ptt,
1255 						global_section_offsize_addr);
1256 		global_section_addr =
1257 			MCP_REG_SCRATCH +
1258 			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
1259 
1260 		/* Read MFW version from MCP public global section */
1261 		mfw_ver = qed_rd(p_hwfn, p_ptt,
1262 				 global_section_addr +
1263 				 offsetof(struct public_global, mfw_ver));
1264 
1265 		/* Dump MFW version param */
1266 		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
1267 			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
1268 			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
1269 			DP_NOTICE(p_hwfn,
1270 				  "Unexpected debug error: invalid MFW version string\n");
1271 	}
1272 
1273 	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
1274 }
1275 
1276 /* Reads the chip revision from the chip and writes it as a param to the
1277  * specified buffer. Returns the dumped size in dwords.
1278  */
1279 static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
1280 					struct qed_ptt *p_ptt,
1281 					u32 *dump_buf, bool dump)
1282 {
1283 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1284 	char param_str[3] = "??";
1285 
1286 	if (dev_data->hw_type == HW_TYPE_ASIC) {
1287 		u32 chip_rev, chip_metal;
1288 
1289 		chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
1290 		chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
1291 
1292 		param_str[0] = 'a' + (u8)chip_rev;
1293 		param_str[1] = '0' + (u8)chip_metal;
1294 	}
1295 
1296 	return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
1297 }
1298 
1299 /* Writes a section header to the specified buffer.
1300  * Returns the dumped size in dwords.
1301  */
1302 static u32 qed_dump_section_hdr(u32 *dump_buf,
1303 				bool dump, const char *name, u32 num_params)
1304 {
1305 	return qed_dump_num_param(dump_buf, dump, name, num_params);
1306 }
1307 
1308 /* Writes the common global params to the specified buffer.
1309  * Returns the dumped size in dwords.
1310  */
1311 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
1312 					 struct qed_ptt *p_ptt,
1313 					 u32 *dump_buf,
1314 					 bool dump,
1315 					 u8 num_specific_global_params)
1316 {
1317 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1318 	u32 offset = 0;
1319 	u8 num_params;
1320 
1321 	/* Dump global params section header */
1322 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
1323 		(dev_data->chip_id == CHIP_BB ? 1 : 0);
1324 	offset += qed_dump_section_hdr(dump_buf + offset,
1325 				       dump, "global_params", num_params);
1326 
1327 	/* Store params */
1328 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
1329 	offset += qed_dump_mfw_ver_param(p_hwfn,
1330 					 p_ptt, dump_buf + offset, dump);
1331 	offset += qed_dump_chip_revision_param(p_hwfn,
1332 					       p_ptt, dump_buf + offset, dump);
1333 	offset += qed_dump_num_param(dump_buf + offset,
1334 				     dump, "tools-version", TOOLS_VERSION);
1335 	offset += qed_dump_str_param(dump_buf + offset,
1336 				     dump,
1337 				     "chip",
1338 				     s_chip_defs[dev_data->chip_id].name);
1339 	offset += qed_dump_str_param(dump_buf + offset,
1340 				     dump,
1341 				     "platform",
1342 				     s_hw_type_defs[dev_data->hw_type].name);
1343 	offset += qed_dump_num_param(dump_buf + offset,
1344 				     dump, "pci-func", p_hwfn->abs_pf_id);
1345 	offset += qed_dump_num_param(dump_buf + offset,
1346 				     dump, "epoch", qed_get_epoch_time());
1347 	if (dev_data->chip_id == CHIP_BB)
1348 		offset += qed_dump_num_param(dump_buf + offset,
1349 					     dump, "path", QED_PATH_ID(p_hwfn));
1350 
1351 	return offset;
1352 }
1353 
1354 /* Writes the "last" section (including CRC) to the specified buffer at the
1355  * given offset. Returns the dumped size in dwords.
1356  */
1357 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
1358 {
1359 	u32 start_offset = offset;
1360 
1361 	/* Dump CRC section header */
1362 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
1363 
1364 	/* Calculate CRC32 and add it to the dword after the "last" section */
1365 	if (dump)
1366 		*(dump_buf + offset) = ~crc32(0xffffffff,
1367 					      (u8 *)dump_buf,
1368 					      DWORDS_TO_BYTES(offset));
1369 
1370 	offset++;
1371 
1372 	return offset - start_offset;
1373 }
1374 
1375 /* Update blocks reset state  */
1376 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
1377 					  struct qed_ptt *p_ptt)
1378 {
1379 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1380 	u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1381 	u8 rst_reg_id;
1382 	u32 blk_id;
1383 
1384 	/* Read reset registers */
1385 	for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
1386 		const struct dbg_reset_reg *rst_reg;
1387 		bool rst_reg_removed;
1388 		u32 rst_reg_addr;
1389 
1390 		rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
1391 		rst_reg_removed = GET_FIELD(rst_reg->data,
1392 					    DBG_RESET_REG_IS_REMOVED);
1393 		rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
1394 							 DBG_RESET_REG_ADDR));
1395 
1396 		if (!rst_reg_removed)
1397 			reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
1398 						     rst_reg_addr);
1399 	}
1400 
1401 	/* Check if blocks are in reset */
1402 	for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
1403 		const struct dbg_block_chip *blk;
1404 		bool has_rst_reg;
1405 		bool is_removed;
1406 
1407 		blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
1408 		is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1409 		has_rst_reg = GET_FIELD(blk->flags,
1410 					DBG_BLOCK_CHIP_HAS_RESET_REG);
1411 
1412 		if (!is_removed && has_rst_reg)
1413 			dev_data->block_in_reset[blk_id] =
1414 			    !(reg_val[blk->reset_reg_id] &
1415 			      BIT(blk->reset_reg_bit_offset));
1416 	}
1417 }
1418 
1419 /* is_mode_match recursive function */
1420 static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
1421 				  u16 *modes_buf_offset, u8 rec_depth)
1422 {
1423 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1424 	u8 *dbg_array;
1425 	bool arg1, arg2;
1426 	u8 tree_val;
1427 
1428 	if (rec_depth > MAX_RECURSION_DEPTH) {
1429 		DP_NOTICE(p_hwfn,
1430 			  "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
1431 		return false;
1432 	}
1433 
1434 	/* Get next element from modes tree buffer */
1435 	dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
1436 	tree_val = dbg_array[(*modes_buf_offset)++];
1437 
1438 	switch (tree_val) {
1439 	case INIT_MODE_OP_NOT:
1440 		return !qed_is_mode_match_rec(p_hwfn,
1441 					      modes_buf_offset, rec_depth + 1);
1442 	case INIT_MODE_OP_OR:
1443 	case INIT_MODE_OP_AND:
1444 		arg1 = qed_is_mode_match_rec(p_hwfn,
1445 					     modes_buf_offset, rec_depth + 1);
1446 		arg2 = qed_is_mode_match_rec(p_hwfn,
1447 					     modes_buf_offset, rec_depth + 1);
1448 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
1449 							arg2) : (arg1 && arg2);
1450 	default:
1451 		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
1452 	}
1453 }
1454 
1455 /* Returns true if the mode (specified using modes_buf_offset) is enabled */
1456 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
1457 {
1458 	return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
1459 }
1460 
1461 /* Enable / disable the Debug block */
1462 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
1463 				     struct qed_ptt *p_ptt, bool enable)
1464 {
1465 	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
1466 }
1467 
1468 /* Resets the Debug block */
1469 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
1470 				    struct qed_ptt *p_ptt)
1471 {
1472 	u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
1473 	const struct dbg_reset_reg *reset_reg;
1474 	const struct dbg_block_chip *block;
1475 
1476 	block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
1477 	reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
1478 	reset_reg_addr =
1479 	    DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
1480 
1481 	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
1482 	new_reset_reg_val =
1483 	    old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
1484 
1485 	qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
1486 	qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
1487 }
1488 
1489 /* Enable / disable Debug Bus clients according to the specified mask
1490  * (1 = enable, 0 = disable).
1491  */
1492 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
1493 				   struct qed_ptt *p_ptt, u32 client_mask)
1494 {
1495 	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
1496 }
1497 
1498 static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
1499 				    struct qed_ptt *p_ptt,
1500 				    enum block_id block_id,
1501 				    u8 line_id,
1502 				    u8 enable_mask,
1503 				    u8 right_shift,
1504 				    u8 force_valid_mask, u8 force_frame_mask)
1505 {
1506 	const struct dbg_block_chip *block =
1507 		qed_get_dbg_block_per_chip(p_hwfn, block_id);
1508 
1509 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
1510 	       line_id);
1511 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
1512 	       enable_mask);
1513 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
1514 	       right_shift);
1515 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
1516 	       force_valid_mask);
1517 	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
1518 	       force_frame_mask);
1519 }
1520 
1521 /* Disable debug bus in all blocks */
1522 static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
1523 				   struct qed_ptt *p_ptt)
1524 {
1525 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1526 	u32 block_id;
1527 
1528 	/* Disable all blocks */
1529 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
1530 		const struct dbg_block_chip *block_per_chip =
1531 		    qed_get_dbg_block_per_chip(p_hwfn,
1532 					       (enum block_id)block_id);
1533 
1534 		if (GET_FIELD(block_per_chip->flags,
1535 			      DBG_BLOCK_CHIP_IS_REMOVED) ||
1536 		    dev_data->block_in_reset[block_id])
1537 			continue;
1538 
1539 		/* Disable debug bus */
1540 		if (GET_FIELD(block_per_chip->flags,
1541 			      DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
1542 			u32 dbg_en_addr =
1543 				block_per_chip->dbg_dword_enable_reg_addr;
1544 			u16 modes_buf_offset =
1545 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
1546 				      DBG_MODE_HDR_MODES_BUF_OFFSET);
1547 			bool eval_mode =
1548 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
1549 				      DBG_MODE_HDR_EVAL_MODE) > 0;
1550 
1551 			if (!eval_mode ||
1552 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
1553 				qed_wr(p_hwfn, p_ptt,
1554 				       DWORDS_TO_BYTES(dbg_en_addr),
1555 				       0);
1556 		}
1557 	}
1558 }
1559 
1560 /* Returns true if the specified entity (indicated by GRC param) should be
1561  * included in the dump, false otherwise.
1562  */
1563 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
1564 				enum dbg_grc_params grc_param)
1565 {
1566 	return qed_grc_get_param(p_hwfn, grc_param) > 0;
1567 }
1568 
1569 /* Returns the storm_id that matches the specified Storm letter,
1570  * or MAX_DBG_STORMS if invalid storm letter.
1571  */
1572 static enum dbg_storms qed_get_id_from_letter(char storm_letter)
1573 {
1574 	u8 storm_id;
1575 
1576 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
1577 		if (s_storm_defs[storm_id].letter == storm_letter)
1578 			return (enum dbg_storms)storm_id;
1579 
1580 	return MAX_DBG_STORMS;
1581 }
1582 
1583 /* Returns true of the specified Storm should be included in the dump, false
1584  * otherwise.
1585  */
1586 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
1587 				      enum dbg_storms storm)
1588 {
1589 	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
1590 }
1591 
1592 /* Returns true if the specified memory should be included in the dump, false
1593  * otherwise.
1594  */
1595 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
1596 				    enum block_id block_id, u8 mem_group_id)
1597 {
1598 	const struct dbg_block *block;
1599 	u8 i;
1600 
1601 	block = get_dbg_block(p_hwfn, block_id);
1602 
1603 	/* If the block is associated with a Storm, check Storm match */
1604 	if (block->associated_storm_letter) {
1605 		enum dbg_storms associated_storm_id =
1606 		    qed_get_id_from_letter(block->associated_storm_letter);
1607 
1608 		if (associated_storm_id == MAX_DBG_STORMS ||
1609 		    !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
1610 			return false;
1611 	}
1612 
1613 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
1614 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
1615 
1616 		if (mem_group_id == big_ram->mem_group_id ||
1617 		    mem_group_id == big_ram->ram_mem_group_id)
1618 			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
1619 	}
1620 
1621 	switch (mem_group_id) {
1622 	case MEM_GROUP_PXP_ILT:
1623 	case MEM_GROUP_PXP_MEM:
1624 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
1625 	case MEM_GROUP_RAM:
1626 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
1627 	case MEM_GROUP_PBUF:
1628 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
1629 	case MEM_GROUP_CAU_MEM:
1630 	case MEM_GROUP_CAU_SB:
1631 	case MEM_GROUP_CAU_PI:
1632 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
1633 	case MEM_GROUP_CAU_MEM_EXT:
1634 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
1635 	case MEM_GROUP_QM_MEM:
1636 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
1637 	case MEM_GROUP_CFC_MEM:
1638 	case MEM_GROUP_CONN_CFC_MEM:
1639 	case MEM_GROUP_TASK_CFC_MEM:
1640 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
1641 		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
1642 	case MEM_GROUP_DORQ_MEM:
1643 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
1644 	case MEM_GROUP_IGU_MEM:
1645 	case MEM_GROUP_IGU_MSIX:
1646 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
1647 	case MEM_GROUP_MULD_MEM:
1648 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
1649 	case MEM_GROUP_PRS_MEM:
1650 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
1651 	case MEM_GROUP_DMAE_MEM:
1652 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
1653 	case MEM_GROUP_TM_MEM:
1654 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
1655 	case MEM_GROUP_SDM_MEM:
1656 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
1657 	case MEM_GROUP_TDIF_CTX:
1658 	case MEM_GROUP_RDIF_CTX:
1659 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
1660 	case MEM_GROUP_CM_MEM:
1661 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
1662 	case MEM_GROUP_IOR:
1663 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
1664 	default:
1665 		return true;
1666 	}
1667 }
1668 
1669 /* Stalls all Storms */
1670 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
1671 				 struct qed_ptt *p_ptt, bool stall)
1672 {
1673 	u32 reg_addr;
1674 	u8 storm_id;
1675 
1676 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
1677 		if (!qed_grc_is_storm_included(p_hwfn,
1678 					       (enum dbg_storms)storm_id))
1679 			continue;
1680 
1681 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
1682 		    SEM_FAST_REG_STALL_0;
1683 		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
1684 	}
1685 
1686 	msleep(STALL_DELAY_MS);
1687 }
1688 
1689 /* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
1690  * taken out of reset.
1691  */
1692 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
1693 				   struct qed_ptt *p_ptt, bool rbc_only)
1694 {
1695 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1696 	u8 chip_id = dev_data->chip_id;
1697 	u32 i;
1698 
1699 	/* Take RBCs out of reset */
1700 	for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
1701 		if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
1702 			qed_wr(p_hwfn,
1703 			       p_ptt,
1704 			       s_rbc_reset_defs[i].reset_reg_addr +
1705 			       RESET_REG_UNRESET_OFFSET,
1706 			       s_rbc_reset_defs[i].reset_val[chip_id]);
1707 
1708 	if (!rbc_only) {
1709 		u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1710 		u8 reset_reg_id;
1711 		u32 block_id;
1712 
1713 		/* Fill reset regs values */
1714 		for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1715 			bool is_removed, has_reset_reg, unreset_before_dump;
1716 			const struct dbg_block_chip *block;
1717 
1718 			block = qed_get_dbg_block_per_chip(p_hwfn,
1719 							   (enum block_id)
1720 							   block_id);
1721 			is_removed =
1722 			    GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1723 			has_reset_reg =
1724 			    GET_FIELD(block->flags,
1725 				      DBG_BLOCK_CHIP_HAS_RESET_REG);
1726 			unreset_before_dump =
1727 			    GET_FIELD(block->flags,
1728 				      DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
1729 
1730 			if (!is_removed && has_reset_reg && unreset_before_dump)
1731 				reg_val[block->reset_reg_id] |=
1732 				    BIT(block->reset_reg_bit_offset);
1733 		}
1734 
1735 		/* Write reset registers */
1736 		for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
1737 		     reset_reg_id++) {
1738 			const struct dbg_reset_reg *reset_reg;
1739 			u32 reset_reg_addr;
1740 
1741 			reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
1742 
1743 			if (GET_FIELD
1744 			    (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
1745 				continue;
1746 
1747 			if (reg_val[reset_reg_id]) {
1748 				reset_reg_addr =
1749 				    GET_FIELD(reset_reg->data,
1750 					      DBG_RESET_REG_ADDR);
1751 				qed_wr(p_hwfn,
1752 				       p_ptt,
1753 				       DWORDS_TO_BYTES(reset_reg_addr) +
1754 				       RESET_REG_UNRESET_OFFSET,
1755 				       reg_val[reset_reg_id]);
1756 			}
1757 		}
1758 	}
1759 }
1760 
1761 /* Returns the attention block data of the specified block */
1762 static const struct dbg_attn_block_type_data *
1763 qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
1764 			enum block_id block_id, enum dbg_attn_type attn_type)
1765 {
1766 	const struct dbg_attn_block *base_attn_block_arr =
1767 	    (const struct dbg_attn_block *)
1768 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
1769 
1770 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
1771 }
1772 
1773 /* Returns the attention registers of the specified block */
1774 static const struct dbg_attn_reg *
1775 qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
1776 			enum block_id block_id, enum dbg_attn_type attn_type,
1777 			u8 *num_attn_regs)
1778 {
1779 	const struct dbg_attn_block_type_data *block_type_data =
1780 	    qed_get_block_attn_data(p_hwfn, block_id, attn_type);
1781 
1782 	*num_attn_regs = block_type_data->num_regs;
1783 
1784 	return (const struct dbg_attn_reg *)
1785 		p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
1786 		block_type_data->regs_offset;
1787 }
1788 
1789 /* For each block, clear the status of all parities */
1790 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
1791 				   struct qed_ptt *p_ptt)
1792 {
1793 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1794 	const struct dbg_attn_reg *attn_reg_arr;
1795 	u32 block_id, sts_clr_address;
1796 	u8 reg_idx, num_attn_regs;
1797 
1798 	for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1799 		if (dev_data->block_in_reset[block_id])
1800 			continue;
1801 
1802 		attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
1803 						       (enum block_id)block_id,
1804 						       ATTN_TYPE_PARITY,
1805 						       &num_attn_regs);
1806 
1807 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
1808 			const struct dbg_attn_reg *reg_data =
1809 				&attn_reg_arr[reg_idx];
1810 			u16 modes_buf_offset;
1811 			bool eval_mode;
1812 
1813 			/* Check mode */
1814 			eval_mode = GET_FIELD(reg_data->mode.data,
1815 					      DBG_MODE_HDR_EVAL_MODE) > 0;
1816 			modes_buf_offset =
1817 				GET_FIELD(reg_data->mode.data,
1818 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
1819 
1820 			sts_clr_address = reg_data->sts_clr_address;
1821 			/* If Mode match: clear parity status */
1822 			if (!eval_mode ||
1823 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
1824 				qed_rd(p_hwfn, p_ptt,
1825 				       DWORDS_TO_BYTES(sts_clr_address));
1826 		}
1827 	}
1828 }
1829 
1830 /* Finds the meta data image in NVRAM */
1831 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
1832 					    struct qed_ptt *p_ptt,
1833 					    u32 image_type,
1834 					    u32 *nvram_offset_bytes,
1835 					    u32 *nvram_size_bytes)
1836 {
1837 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
1838 	struct mcp_file_att file_att;
1839 	int nvm_result;
1840 
1841 	/* Call NVRAM get file command */
1842 	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
1843 					p_ptt,
1844 					DRV_MSG_CODE_NVM_GET_FILE_ATT,
1845 					image_type,
1846 					&ret_mcp_resp,
1847 					&ret_mcp_param,
1848 					&ret_txn_size,
1849 					(u32 *)&file_att, false);
1850 
1851 	/* Check response */
1852 	if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) !=
1853 	    FW_MSG_CODE_NVM_OK)
1854 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
1855 
1856 	/* Update return values */
1857 	*nvram_offset_bytes = file_att.nvm_start_addr;
1858 	*nvram_size_bytes = file_att.len;
1859 
1860 	DP_VERBOSE(p_hwfn,
1861 		   QED_MSG_DEBUG,
1862 		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
1863 		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
1864 
1865 	/* Check alignment */
1866 	if (*nvram_size_bytes & 0x3)
1867 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
1868 
1869 	return DBG_STATUS_OK;
1870 }
1871 
1872 /* Reads data from NVRAM */
1873 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
1874 				      struct qed_ptt *p_ptt,
1875 				      u32 nvram_offset_bytes,
1876 				      u32 nvram_size_bytes, u32 *ret_buf)
1877 {
1878 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
1879 	s32 bytes_left = nvram_size_bytes;
1880 	u32 read_offset = 0, param = 0;
1881 
1882 	DP_VERBOSE(p_hwfn,
1883 		   QED_MSG_DEBUG,
1884 		   "nvram_read: reading image of size %d bytes from NVRAM\n",
1885 		   nvram_size_bytes);
1886 
1887 	do {
1888 		bytes_to_copy =
1889 		    (bytes_left >
1890 		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
1891 
1892 		/* Call NVRAM read command */
1893 		SET_MFW_FIELD(param,
1894 			      DRV_MB_PARAM_NVM_OFFSET,
1895 			      nvram_offset_bytes + read_offset);
1896 		SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
1897 		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
1898 				       DRV_MSG_CODE_NVM_READ_NVRAM, param,
1899 				       &ret_mcp_resp,
1900 				       &ret_mcp_param, &ret_read_size,
1901 				       (u32 *)((u8 *)ret_buf + read_offset),
1902 				       false))
1903 			return DBG_STATUS_NVRAM_READ_FAILED;
1904 
1905 		/* Check response */
1906 		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
1907 			return DBG_STATUS_NVRAM_READ_FAILED;
1908 
1909 		/* Update read offset */
1910 		read_offset += ret_read_size;
1911 		bytes_left -= ret_read_size;
1912 	} while (bytes_left > 0);
1913 
1914 	return DBG_STATUS_OK;
1915 }
1916 
1917 /* Dumps GRC registers section header. Returns the dumped size in dwords.
1918  * the following parameters are dumped:
1919  * - count: no. of dumped entries
1920  * - split_type: split type
1921  * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
1922  * - reg_type_name: register type name (dumped only if reg_type_name != NULL)
1923  */
1924 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
1925 				 bool dump,
1926 				 u32 num_reg_entries,
1927 				 enum init_split_types split_type,
1928 				 u8 split_id, const char *reg_type_name)
1929 {
1930 	u8 num_params = 2 +
1931 	    (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
1932 	u32 offset = 0;
1933 
1934 	offset += qed_dump_section_hdr(dump_buf + offset,
1935 				       dump, "grc_regs", num_params);
1936 	offset += qed_dump_num_param(dump_buf + offset,
1937 				     dump, "count", num_reg_entries);
1938 	offset += qed_dump_str_param(dump_buf + offset,
1939 				     dump, "split",
1940 				     s_split_type_defs[split_type].name);
1941 	if (split_type != SPLIT_TYPE_NONE)
1942 		offset += qed_dump_num_param(dump_buf + offset,
1943 					     dump, "id", split_id);
1944 	if (reg_type_name)
1945 		offset += qed_dump_str_param(dump_buf + offset,
1946 					     dump, "type", reg_type_name);
1947 
1948 	return offset;
1949 }
1950 
1951 /* Reads the specified registers into the specified buffer.
1952  * The addr and len arguments are specified in dwords.
1953  */
1954 void qed_read_regs(struct qed_hwfn *p_hwfn,
1955 		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
1956 {
1957 	u32 i;
1958 
1959 	for (i = 0; i < len; i++)
1960 		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
1961 }
1962 
1963 /* Dumps the GRC registers in the specified address range.
1964  * Returns the dumped size in dwords.
1965  * The addr and len arguments are specified in dwords.
1966  */
1967 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
1968 				   struct qed_ptt *p_ptt,
1969 				   u32 *dump_buf,
1970 				   bool dump, u32 addr, u32 len, bool wide_bus,
1971 				   enum init_split_types split_type,
1972 				   u8 split_id)
1973 {
1974 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1975 	u8 port_id = 0, pf_id = 0, vf_id = 0;
1976 	bool read_using_dmae = false;
1977 	u32 thresh;
1978 	u16 fid;
1979 
1980 	if (!dump)
1981 		return len;
1982 
1983 	switch (split_type) {
1984 	case SPLIT_TYPE_PORT:
1985 		port_id = split_id;
1986 		break;
1987 	case SPLIT_TYPE_PF:
1988 		pf_id = split_id;
1989 		break;
1990 	case SPLIT_TYPE_PORT_PF:
1991 		port_id = split_id / dev_data->num_pfs_per_port;
1992 		pf_id = port_id + dev_data->num_ports *
1993 		    (split_id % dev_data->num_pfs_per_port);
1994 		break;
1995 	case SPLIT_TYPE_VF:
1996 		vf_id = split_id;
1997 		break;
1998 	default:
1999 		break;
2000 	}
2001 
2002 	/* Try reading using DMAE */
2003 	if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
2004 	    (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
2005 	     (PROTECT_WIDE_BUS && wide_bus))) {
2006 		struct qed_dmae_params dmae_params;
2007 
2008 		/* Set DMAE params */
2009 		memset(&dmae_params, 0, sizeof(dmae_params));
2010 		SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
2011 		switch (split_type) {
2012 		case SPLIT_TYPE_PORT:
2013 			SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
2014 				  1);
2015 			dmae_params.port_id = port_id;
2016 			break;
2017 		case SPLIT_TYPE_PF:
2018 			SET_FIELD(dmae_params.flags,
2019 				  QED_DMAE_PARAMS_SRC_PF_VALID, 1);
2020 			dmae_params.src_pfid = pf_id;
2021 			break;
2022 		case SPLIT_TYPE_PORT_PF:
2023 			SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
2024 				  1);
2025 			SET_FIELD(dmae_params.flags,
2026 				  QED_DMAE_PARAMS_SRC_PF_VALID, 1);
2027 			dmae_params.port_id = port_id;
2028 			dmae_params.src_pfid = pf_id;
2029 			break;
2030 		default:
2031 			break;
2032 		}
2033 
2034 		/* Execute DMAE command */
2035 		read_using_dmae = !qed_dmae_grc2host(p_hwfn,
2036 						     p_ptt,
2037 						     DWORDS_TO_BYTES(addr),
2038 						     (u64)(uintptr_t)(dump_buf),
2039 						     len, &dmae_params);
2040 		if (!read_using_dmae) {
2041 			dev_data->use_dmae = 0;
2042 			DP_VERBOSE(p_hwfn,
2043 				   QED_MSG_DEBUG,
2044 				   "Failed reading from chip using DMAE, using GRC instead\n");
2045 		}
2046 	}
2047 
2048 	if (read_using_dmae)
2049 		goto print_log;
2050 
2051 	/* If not read using DMAE, read using GRC */
2052 
2053 	/* Set pretend */
2054 	if (split_type != dev_data->pretend.split_type ||
2055 	    split_id != dev_data->pretend.split_id) {
2056 		switch (split_type) {
2057 		case SPLIT_TYPE_PORT:
2058 			qed_port_pretend(p_hwfn, p_ptt, port_id);
2059 			break;
2060 		case SPLIT_TYPE_PF:
2061 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2062 					  pf_id);
2063 			qed_fid_pretend(p_hwfn, p_ptt, fid);
2064 			break;
2065 		case SPLIT_TYPE_PORT_PF:
2066 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2067 					  pf_id);
2068 			qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
2069 			break;
2070 		case SPLIT_TYPE_VF:
2071 			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
2072 			      | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
2073 					  vf_id);
2074 			qed_fid_pretend(p_hwfn, p_ptt, fid);
2075 			break;
2076 		default:
2077 			break;
2078 		}
2079 
2080 		dev_data->pretend.split_type = (u8)split_type;
2081 		dev_data->pretend.split_id = split_id;
2082 	}
2083 
2084 	/* Read registers using GRC */
2085 	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2086 
2087 print_log:
2088 	/* Print log */
2089 	dev_data->num_regs_read += len;
2090 	thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
2091 	if ((dev_data->num_regs_read / thresh) >
2092 	    ((dev_data->num_regs_read - len) / thresh))
2093 		DP_VERBOSE(p_hwfn,
2094 			   QED_MSG_DEBUG,
2095 			   "Dumped %d registers...\n", dev_data->num_regs_read);
2096 
2097 	return len;
2098 }
2099 
2100 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2101  * The addr and len arguments are specified in dwords.
2102  */
2103 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2104 				      bool dump, u32 addr, u32 len)
2105 {
2106 	if (dump)
2107 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2108 
2109 	return 1;
2110 }
2111 
2112 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2113  * The addr and len arguments are specified in dwords.
2114  */
2115 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2116 				  struct qed_ptt *p_ptt,
2117 				  u32 *dump_buf,
2118 				  bool dump, u32 addr, u32 len, bool wide_bus,
2119 				  enum init_split_types split_type, u8 split_id)
2120 {
2121 	u32 offset = 0;
2122 
2123 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2124 	offset += qed_grc_dump_addr_range(p_hwfn,
2125 					  p_ptt,
2126 					  dump_buf + offset,
2127 					  dump, addr, len, wide_bus,
2128 					  split_type, split_id);
2129 
2130 	return offset;
2131 }
2132 
2133 /* Dumps GRC registers sequence with skip cycle.
2134  * Returns the dumped size in dwords.
2135  * - addr:	start GRC address in dwords
2136  * - total_len:	total no. of dwords to dump
2137  * - read_len:	no. consecutive dwords to read
2138  * - skip_len:	no. of dwords to skip (and fill with zeros)
2139  */
2140 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2141 				       struct qed_ptt *p_ptt,
2142 				       u32 *dump_buf,
2143 				       bool dump,
2144 				       u32 addr,
2145 				       u32 total_len,
2146 				       u32 read_len, u32 skip_len)
2147 {
2148 	u32 offset = 0, reg_offset = 0;
2149 
2150 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2151 
2152 	if (!dump)
2153 		return offset + total_len;
2154 
2155 	while (reg_offset < total_len) {
2156 		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2157 
2158 		offset += qed_grc_dump_addr_range(p_hwfn,
2159 						  p_ptt,
2160 						  dump_buf + offset,
2161 						  dump,  addr, curr_len, false,
2162 						  SPLIT_TYPE_NONE, 0);
2163 		reg_offset += curr_len;
2164 		addr += curr_len;
2165 
2166 		if (reg_offset < total_len) {
2167 			curr_len = min_t(u32, skip_len, total_len - skip_len);
2168 			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2169 			offset += curr_len;
2170 			reg_offset += curr_len;
2171 			addr += curr_len;
2172 		}
2173 	}
2174 
2175 	return offset;
2176 }
2177 
2178 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2179 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2180 				     struct qed_ptt *p_ptt,
2181 				     struct virt_mem_desc input_regs_arr,
2182 				     u32 *dump_buf,
2183 				     bool dump,
2184 				     enum init_split_types split_type,
2185 				     u8 split_id,
2186 				     bool block_enable[MAX_BLOCK_ID],
2187 				     u32 *num_dumped_reg_entries)
2188 {
2189 	u32 i, offset = 0, input_offset = 0;
2190 	bool mode_match = true;
2191 
2192 	*num_dumped_reg_entries = 0;
2193 
2194 	while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
2195 		const struct dbg_dump_cond_hdr *cond_hdr =
2196 		    (const struct dbg_dump_cond_hdr *)
2197 		    input_regs_arr.ptr + input_offset++;
2198 		u16 modes_buf_offset;
2199 		bool eval_mode;
2200 
2201 		/* Check mode/block */
2202 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2203 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2204 		if (eval_mode) {
2205 			modes_buf_offset =
2206 				GET_FIELD(cond_hdr->mode.data,
2207 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2208 			mode_match = qed_is_mode_match(p_hwfn,
2209 						       &modes_buf_offset);
2210 		}
2211 
2212 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2213 			input_offset += cond_hdr->data_size;
2214 			continue;
2215 		}
2216 
2217 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2218 			const struct dbg_dump_reg *reg =
2219 			    (const struct dbg_dump_reg *)
2220 			    input_regs_arr.ptr + input_offset;
2221 			u32 addr, len;
2222 			bool wide_bus;
2223 
2224 			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2225 			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2226 			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2227 			offset += qed_grc_dump_reg_entry(p_hwfn,
2228 							 p_ptt,
2229 							 dump_buf + offset,
2230 							 dump,
2231 							 addr,
2232 							 len,
2233 							 wide_bus,
2234 							 split_type, split_id);
2235 			(*num_dumped_reg_entries)++;
2236 		}
2237 	}
2238 
2239 	return offset;
2240 }
2241 
2242 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2243 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2244 				   struct qed_ptt *p_ptt,
2245 				   struct virt_mem_desc input_regs_arr,
2246 				   u32 *dump_buf,
2247 				   bool dump,
2248 				   bool block_enable[MAX_BLOCK_ID],
2249 				   enum init_split_types split_type,
2250 				   u8 split_id, const char *reg_type_name)
2251 {
2252 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2253 	enum init_split_types hdr_split_type = split_type;
2254 	u32 num_dumped_reg_entries, offset;
2255 	u8 hdr_split_id = split_id;
2256 
2257 	/* In PORT_PF split type, print a port split header */
2258 	if (split_type == SPLIT_TYPE_PORT_PF) {
2259 		hdr_split_type = SPLIT_TYPE_PORT;
2260 		hdr_split_id = split_id / dev_data->num_pfs_per_port;
2261 	}
2262 
2263 	/* Calculate register dump header size (and skip it for now) */
2264 	offset = qed_grc_dump_regs_hdr(dump_buf,
2265 				       false,
2266 				       0,
2267 				       hdr_split_type,
2268 				       hdr_split_id, reg_type_name);
2269 
2270 	/* Dump registers */
2271 	offset += qed_grc_dump_regs_entries(p_hwfn,
2272 					    p_ptt,
2273 					    input_regs_arr,
2274 					    dump_buf + offset,
2275 					    dump,
2276 					    split_type,
2277 					    split_id,
2278 					    block_enable,
2279 					    &num_dumped_reg_entries);
2280 
2281 	/* Write register dump header */
2282 	if (dump && num_dumped_reg_entries > 0)
2283 		qed_grc_dump_regs_hdr(dump_buf,
2284 				      dump,
2285 				      num_dumped_reg_entries,
2286 				      hdr_split_type,
2287 				      hdr_split_id, reg_type_name);
2288 
2289 	return num_dumped_reg_entries > 0 ? offset : 0;
2290 }
2291 
2292 /* Dumps registers according to the input registers array. Returns the dumped
2293  * size in dwords.
2294  */
2295 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2296 				  struct qed_ptt *p_ptt,
2297 				  u32 *dump_buf,
2298 				  bool dump,
2299 				  bool block_enable[MAX_BLOCK_ID],
2300 				  const char *reg_type_name)
2301 {
2302 	struct virt_mem_desc *dbg_buf =
2303 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
2304 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2305 	u32 offset = 0, input_offset = 0;
2306 
2307 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2308 		const struct dbg_dump_split_hdr *split_hdr;
2309 		struct virt_mem_desc curr_input_regs_arr;
2310 		enum init_split_types split_type;
2311 		u16 split_count = 0;
2312 		u32 split_data_size;
2313 		u8 split_id;
2314 
2315 		split_hdr =
2316 		    (const struct dbg_dump_split_hdr *)
2317 		    dbg_buf->ptr + input_offset++;
2318 		split_type =
2319 		    GET_FIELD(split_hdr->hdr,
2320 			      DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2321 		split_data_size = GET_FIELD(split_hdr->hdr,
2322 					    DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2323 		curr_input_regs_arr.ptr =
2324 		    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
2325 		    input_offset;
2326 		curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
2327 
2328 		switch (split_type) {
2329 		case SPLIT_TYPE_NONE:
2330 			split_count = 1;
2331 			break;
2332 		case SPLIT_TYPE_PORT:
2333 			split_count = dev_data->num_ports;
2334 			break;
2335 		case SPLIT_TYPE_PF:
2336 		case SPLIT_TYPE_PORT_PF:
2337 			split_count = dev_data->num_ports *
2338 			    dev_data->num_pfs_per_port;
2339 			break;
2340 		case SPLIT_TYPE_VF:
2341 			split_count = dev_data->num_vfs;
2342 			break;
2343 		default:
2344 			return 0;
2345 		}
2346 
2347 		for (split_id = 0; split_id < split_count; split_id++)
2348 			offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2349 							  curr_input_regs_arr,
2350 							  dump_buf + offset,
2351 							  dump, block_enable,
2352 							  split_type,
2353 							  split_id,
2354 							  reg_type_name);
2355 
2356 		input_offset += split_data_size;
2357 	}
2358 
2359 	/* Cancel pretends (pretend to original PF) */
2360 	if (dump) {
2361 		qed_fid_pretend(p_hwfn, p_ptt,
2362 				FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2363 					    p_hwfn->rel_pf_id));
2364 		dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2365 		dev_data->pretend.split_id = 0;
2366 	}
2367 
2368 	return offset;
2369 }
2370 
2371 /* Dump reset registers. Returns the dumped size in dwords. */
2372 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2373 				   struct qed_ptt *p_ptt,
2374 				   u32 *dump_buf, bool dump)
2375 {
2376 	u32 offset = 0, num_regs = 0;
2377 	u8 reset_reg_id;
2378 
2379 	/* Calculate header size */
2380 	offset += qed_grc_dump_regs_hdr(dump_buf,
2381 					false,
2382 					0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
2383 
2384 	/* Write reset registers */
2385 	for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
2386 	     reset_reg_id++) {
2387 		const struct dbg_reset_reg *reset_reg;
2388 		u32 reset_reg_addr;
2389 
2390 		reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
2391 
2392 		if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
2393 			continue;
2394 
2395 		reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
2396 		offset += qed_grc_dump_reg_entry(p_hwfn,
2397 						 p_ptt,
2398 						 dump_buf + offset,
2399 						 dump,
2400 						 reset_reg_addr,
2401 						 1, false, SPLIT_TYPE_NONE, 0);
2402 		num_regs++;
2403 	}
2404 
2405 	/* Write header */
2406 	if (dump)
2407 		qed_grc_dump_regs_hdr(dump_buf,
2408 				      true, num_regs, SPLIT_TYPE_NONE,
2409 				      0, "RESET_REGS");
2410 
2411 	return offset;
2412 }
2413 
2414 /* Dump registers that are modified during GRC Dump and therefore must be
2415  * dumped first. Returns the dumped size in dwords.
2416  */
2417 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2418 				      struct qed_ptt *p_ptt,
2419 				      u32 *dump_buf, bool dump)
2420 {
2421 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2422 	u32 block_id, offset = 0, stall_regs_offset;
2423 	const struct dbg_attn_reg *attn_reg_arr;
2424 	u8 storm_id, reg_idx, num_attn_regs;
2425 	u32 num_reg_entries = 0;
2426 
2427 	/* Write empty header for attention registers */
2428 	offset += qed_grc_dump_regs_hdr(dump_buf,
2429 					false,
2430 					0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2431 
2432 	/* Write parity registers */
2433 	for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
2434 		if (dev_data->block_in_reset[block_id] && dump)
2435 			continue;
2436 
2437 		attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
2438 						       (enum block_id)block_id,
2439 						       ATTN_TYPE_PARITY,
2440 						       &num_attn_regs);
2441 
2442 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2443 			const struct dbg_attn_reg *reg_data =
2444 				&attn_reg_arr[reg_idx];
2445 			u16 modes_buf_offset;
2446 			bool eval_mode;
2447 			u32 addr;
2448 
2449 			/* Check mode */
2450 			eval_mode = GET_FIELD(reg_data->mode.data,
2451 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2452 			modes_buf_offset =
2453 				GET_FIELD(reg_data->mode.data,
2454 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2455 			if (eval_mode &&
2456 			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2457 				continue;
2458 
2459 			/* Mode match: read & dump registers */
2460 			addr = reg_data->mask_address;
2461 			offset += qed_grc_dump_reg_entry(p_hwfn,
2462 							 p_ptt,
2463 							 dump_buf + offset,
2464 							 dump,
2465 							 addr,
2466 							 1, false,
2467 							 SPLIT_TYPE_NONE, 0);
2468 			addr = GET_FIELD(reg_data->data,
2469 					 DBG_ATTN_REG_STS_ADDRESS);
2470 			offset += qed_grc_dump_reg_entry(p_hwfn,
2471 							 p_ptt,
2472 							 dump_buf + offset,
2473 							 dump,
2474 							 addr,
2475 							 1, false,
2476 							 SPLIT_TYPE_NONE, 0);
2477 			num_reg_entries += 2;
2478 		}
2479 	}
2480 
2481 	/* Overwrite header for attention registers */
2482 	if (dump)
2483 		qed_grc_dump_regs_hdr(dump_buf,
2484 				      true,
2485 				      num_reg_entries,
2486 				      SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2487 
2488 	/* Write empty header for stall registers */
2489 	stall_regs_offset = offset;
2490 	offset += qed_grc_dump_regs_hdr(dump_buf,
2491 					false, 0, SPLIT_TYPE_NONE, 0, "REGS");
2492 
2493 	/* Write Storm stall status registers */
2494 	for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
2495 	     storm_id++) {
2496 		struct storm_defs *storm = &s_storm_defs[storm_id];
2497 		u32 addr;
2498 
2499 		if (dev_data->block_in_reset[storm->sem_block_id] && dump)
2500 			continue;
2501 
2502 		addr =
2503 		    BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
2504 				    SEM_FAST_REG_STALLED);
2505 		offset += qed_grc_dump_reg_entry(p_hwfn,
2506 						 p_ptt,
2507 						 dump_buf + offset,
2508 						 dump,
2509 						 addr,
2510 						 1,
2511 						 false, SPLIT_TYPE_NONE, 0);
2512 		num_reg_entries++;
2513 	}
2514 
2515 	/* Overwrite header for stall registers */
2516 	if (dump)
2517 		qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
2518 				      true,
2519 				      num_reg_entries,
2520 				      SPLIT_TYPE_NONE, 0, "REGS");
2521 
2522 	return offset;
2523 }
2524 
2525 /* Dumps registers that can't be represented in the debug arrays */
2526 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2527 				     struct qed_ptt *p_ptt,
2528 				     u32 *dump_buf, bool dump)
2529 {
2530 	u32 offset = 0, addr;
2531 
2532 	offset += qed_grc_dump_regs_hdr(dump_buf,
2533 					dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
2534 
2535 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2536 	 * skipped).
2537 	 */
2538 	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2539 	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2540 					      p_ptt,
2541 					      dump_buf + offset,
2542 					      dump,
2543 					      addr,
2544 					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2545 					      7,
2546 					      1);
2547 	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2548 	offset +=
2549 	    qed_grc_dump_reg_entry_skip(p_hwfn,
2550 					p_ptt,
2551 					dump_buf + offset,
2552 					dump,
2553 					addr,
2554 					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2555 					7,
2556 					1);
2557 
2558 	return offset;
2559 }
2560 
2561 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2562  * dwords. The following parameters are dumped:
2563  * - name:	   dumped only if it's not NULL.
2564  * - addr:	   in dwords, dumped only if name is NULL.
2565  * - len:	   in dwords, always dumped.
2566  * - width:	   dumped if it's not zero.
2567  * - packed:	   dumped only if it's not false.
2568  * - mem_group:	   always dumped.
2569  * - is_storm:	   true only if the memory is related to a Storm.
2570  * - storm_letter: valid only if is_storm is true.
2571  *
2572  */
2573 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2574 				u32 *dump_buf,
2575 				bool dump,
2576 				const char *name,
2577 				u32 addr,
2578 				u32 len,
2579 				u32 bit_width,
2580 				bool packed,
2581 				const char *mem_group, char storm_letter)
2582 {
2583 	u8 num_params = 3;
2584 	u32 offset = 0;
2585 	char buf[64];
2586 
2587 	if (!len)
2588 		DP_NOTICE(p_hwfn,
2589 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2590 
2591 	if (bit_width)
2592 		num_params++;
2593 	if (packed)
2594 		num_params++;
2595 
2596 	/* Dump section header */
2597 	offset += qed_dump_section_hdr(dump_buf + offset,
2598 				       dump, "grc_mem", num_params);
2599 
2600 	if (name) {
2601 		/* Dump name */
2602 		if (storm_letter) {
2603 			strcpy(buf, "?STORM_");
2604 			buf[0] = storm_letter;
2605 			strcpy(buf + strlen(buf), name);
2606 		} else {
2607 			strcpy(buf, name);
2608 		}
2609 
2610 		offset += qed_dump_str_param(dump_buf + offset,
2611 					     dump, "name", buf);
2612 	} else {
2613 		/* Dump address */
2614 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
2615 
2616 		offset += qed_dump_num_param(dump_buf + offset,
2617 					     dump, "addr", addr_in_bytes);
2618 	}
2619 
2620 	/* Dump len */
2621 	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2622 
2623 	/* Dump bit width */
2624 	if (bit_width)
2625 		offset += qed_dump_num_param(dump_buf + offset,
2626 					     dump, "width", bit_width);
2627 
2628 	/* Dump packed */
2629 	if (packed)
2630 		offset += qed_dump_num_param(dump_buf + offset,
2631 					     dump, "packed", 1);
2632 
2633 	/* Dump reg type */
2634 	if (storm_letter) {
2635 		strcpy(buf, "?STORM_");
2636 		buf[0] = storm_letter;
2637 		strcpy(buf + strlen(buf), mem_group);
2638 	} else {
2639 		strcpy(buf, mem_group);
2640 	}
2641 
2642 	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
2643 
2644 	return offset;
2645 }
2646 
2647 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
2648  * Returns the dumped size in dwords.
2649  * The addr and len arguments are specified in dwords.
2650  */
2651 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2652 			    struct qed_ptt *p_ptt,
2653 			    u32 *dump_buf,
2654 			    bool dump,
2655 			    const char *name,
2656 			    u32 addr,
2657 			    u32 len,
2658 			    bool wide_bus,
2659 			    u32 bit_width,
2660 			    bool packed,
2661 			    const char *mem_group, char storm_letter)
2662 {
2663 	u32 offset = 0;
2664 
2665 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2666 				       dump_buf + offset,
2667 				       dump,
2668 				       name,
2669 				       addr,
2670 				       len,
2671 				       bit_width,
2672 				       packed, mem_group, storm_letter);
2673 	offset += qed_grc_dump_addr_range(p_hwfn,
2674 					  p_ptt,
2675 					  dump_buf + offset,
2676 					  dump, addr, len, wide_bus,
2677 					  SPLIT_TYPE_NONE, 0);
2678 
2679 	return offset;
2680 }
2681 
2682 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
2683 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2684 				    struct qed_ptt *p_ptt,
2685 				    struct virt_mem_desc input_mems_arr,
2686 				    u32 *dump_buf, bool dump)
2687 {
2688 	u32 i, offset = 0, input_offset = 0;
2689 	bool mode_match = true;
2690 
2691 	while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
2692 		const struct dbg_dump_cond_hdr *cond_hdr;
2693 		u16 modes_buf_offset;
2694 		u32 num_entries;
2695 		bool eval_mode;
2696 
2697 		cond_hdr =
2698 		    (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
2699 		    input_offset++;
2700 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
2701 
2702 		/* Check required mode */
2703 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2704 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2705 		if (eval_mode) {
2706 			modes_buf_offset =
2707 				GET_FIELD(cond_hdr->mode.data,
2708 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2709 			mode_match = qed_is_mode_match(p_hwfn,
2710 						       &modes_buf_offset);
2711 		}
2712 
2713 		if (!mode_match) {
2714 			input_offset += cond_hdr->data_size;
2715 			continue;
2716 		}
2717 
2718 		for (i = 0; i < num_entries;
2719 		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
2720 			const struct dbg_dump_mem *mem =
2721 			    (const struct dbg_dump_mem *)((u32 *)
2722 							  input_mems_arr.ptr
2723 							  + input_offset);
2724 			const struct dbg_block *block;
2725 			char storm_letter = 0;
2726 			u32 mem_addr, mem_len;
2727 			bool mem_wide_bus;
2728 			u8 mem_group_id;
2729 
2730 			mem_group_id = GET_FIELD(mem->dword0,
2731 						 DBG_DUMP_MEM_MEM_GROUP_ID);
2732 			if (mem_group_id >= MEM_GROUPS_NUM) {
2733 				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
2734 				return 0;
2735 			}
2736 
2737 			if (!qed_grc_is_mem_included(p_hwfn,
2738 						     (enum block_id)
2739 						     cond_hdr->block_id,
2740 						     mem_group_id))
2741 				continue;
2742 
2743 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
2744 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
2745 			mem_wide_bus = GET_FIELD(mem->dword1,
2746 						 DBG_DUMP_MEM_WIDE_BUS);
2747 
2748 			block = get_dbg_block(p_hwfn,
2749 					      cond_hdr->block_id);
2750 
2751 			/* If memory is associated with Storm,
2752 			 * update storm details
2753 			 */
2754 			if (block->associated_storm_letter)
2755 				storm_letter = block->associated_storm_letter;
2756 
2757 			/* Dump memory */
2758 			offset += qed_grc_dump_mem(p_hwfn,
2759 						p_ptt,
2760 						dump_buf + offset,
2761 						dump,
2762 						NULL,
2763 						mem_addr,
2764 						mem_len,
2765 						mem_wide_bus,
2766 						0,
2767 						false,
2768 						s_mem_group_names[mem_group_id],
2769 						storm_letter);
2770 		}
2771 	}
2772 
2773 	return offset;
2774 }
2775 
2776 /* Dumps GRC memories according to the input array dump_mem.
2777  * Returns the dumped size in dwords.
2778  */
2779 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
2780 				 struct qed_ptt *p_ptt,
2781 				 u32 *dump_buf, bool dump)
2782 {
2783 	struct virt_mem_desc *dbg_buf =
2784 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
2785 	u32 offset = 0, input_offset = 0;
2786 
2787 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2788 		const struct dbg_dump_split_hdr *split_hdr;
2789 		struct virt_mem_desc curr_input_mems_arr;
2790 		enum init_split_types split_type;
2791 		u32 split_data_size;
2792 
2793 		split_hdr =
2794 		    (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
2795 		    input_offset++;
2796 		split_type = GET_FIELD(split_hdr->hdr,
2797 				       DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2798 		split_data_size = GET_FIELD(split_hdr->hdr,
2799 					    DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2800 		curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
2801 		curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
2802 
2803 		if (split_type == SPLIT_TYPE_NONE)
2804 			offset += qed_grc_dump_mem_entries(p_hwfn,
2805 							   p_ptt,
2806 							   curr_input_mems_arr,
2807 							   dump_buf + offset,
2808 							   dump);
2809 		else
2810 			DP_NOTICE(p_hwfn,
2811 				  "Dumping split memories is currently not supported\n");
2812 
2813 		input_offset += split_data_size;
2814 	}
2815 
2816 	return offset;
2817 }
2818 
2819 /* Dumps GRC context data for the specified Storm.
2820  * Returns the dumped size in dwords.
2821  * The lid_size argument is specified in quad-regs.
2822  */
2823 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2824 				 struct qed_ptt *p_ptt,
2825 				 u32 *dump_buf,
2826 				 bool dump,
2827 				 const char *name,
2828 				 u32 num_lids,
2829 				 enum cm_ctx_types ctx_type, u8 storm_id)
2830 {
2831 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2832 	struct storm_defs *storm = &s_storm_defs[storm_id];
2833 	u32 i, lid, lid_size, total_size;
2834 	u32 rd_reg_addr, offset = 0;
2835 
2836 	/* Convert quad-regs to dwords */
2837 	lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
2838 
2839 	if (!lid_size)
2840 		return 0;
2841 
2842 	total_size = num_lids * lid_size;
2843 
2844 	offset += qed_grc_dump_mem_hdr(p_hwfn,
2845 				       dump_buf + offset,
2846 				       dump,
2847 				       name,
2848 				       0,
2849 				       total_size,
2850 				       lid_size * 32,
2851 				       false, name, storm->letter);
2852 
2853 	if (!dump)
2854 		return offset + total_size;
2855 
2856 	rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
2857 
2858 	/* Dump context data */
2859 	for (lid = 0; lid < num_lids; lid++) {
2860 		for (i = 0; i < lid_size; i++) {
2861 			qed_wr(p_hwfn,
2862 			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
2863 			offset += qed_grc_dump_addr_range(p_hwfn,
2864 							  p_ptt,
2865 							  dump_buf + offset,
2866 							  dump,
2867 							  rd_reg_addr,
2868 							  1,
2869 							  false,
2870 							  SPLIT_TYPE_NONE, 0);
2871 		}
2872 	}
2873 
2874 	return offset;
2875 }
2876 
2877 /* Dumps GRC contexts. Returns the dumped size in dwords. */
2878 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
2879 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2880 {
2881 	u32 offset = 0;
2882 	u8 storm_id;
2883 
2884 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2885 		if (!qed_grc_is_storm_included(p_hwfn,
2886 					       (enum dbg_storms)storm_id))
2887 			continue;
2888 
2889 		/* Dump Conn AG context size */
2890 		offset += qed_grc_dump_ctx_data(p_hwfn,
2891 						p_ptt,
2892 						dump_buf + offset,
2893 						dump,
2894 						"CONN_AG_CTX",
2895 						NUM_OF_LCIDS,
2896 						CM_CTX_CONN_AG, storm_id);
2897 
2898 		/* Dump Conn ST context size */
2899 		offset += qed_grc_dump_ctx_data(p_hwfn,
2900 						p_ptt,
2901 						dump_buf + offset,
2902 						dump,
2903 						"CONN_ST_CTX",
2904 						NUM_OF_LCIDS,
2905 						CM_CTX_CONN_ST, storm_id);
2906 
2907 		/* Dump Task AG context size */
2908 		offset += qed_grc_dump_ctx_data(p_hwfn,
2909 						p_ptt,
2910 						dump_buf + offset,
2911 						dump,
2912 						"TASK_AG_CTX",
2913 						NUM_OF_LTIDS,
2914 						CM_CTX_TASK_AG, storm_id);
2915 
2916 		/* Dump Task ST context size */
2917 		offset += qed_grc_dump_ctx_data(p_hwfn,
2918 						p_ptt,
2919 						dump_buf + offset,
2920 						dump,
2921 						"TASK_ST_CTX",
2922 						NUM_OF_LTIDS,
2923 						CM_CTX_TASK_ST, storm_id);
2924 	}
2925 
2926 	return offset;
2927 }
2928 
2929 #define VFC_STATUS_RESP_READY_BIT	0
2930 #define VFC_STATUS_BUSY_BIT		1
2931 #define VFC_STATUS_SENDING_CMD_BIT	2
2932 
2933 #define VFC_POLLING_DELAY_MS	1
2934 #define VFC_POLLING_COUNT		20
2935 
2936 /* Reads data from VFC. Returns the number of dwords read (0 on error).
2937  * Sizes are specified in dwords.
2938  */
2939 static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn,
2940 				      struct qed_ptt *p_ptt,
2941 				      struct storm_defs *storm,
2942 				      u32 *cmd_data,
2943 				      u32 cmd_size,
2944 				      u32 *addr_data,
2945 				      u32 addr_size,
2946 				      u32 resp_size, u32 *dump_buf)
2947 {
2948 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2949 	u32 vfc_status, polling_ms, polling_count = 0, i;
2950 	u32 reg_addr, sem_base;
2951 	bool is_ready = false;
2952 
2953 	sem_base = storm->sem_fast_mem_addr;
2954 	polling_ms = VFC_POLLING_DELAY_MS *
2955 	    s_hw_type_defs[dev_data->hw_type].delay_factor;
2956 
2957 	/* Write VFC command */
2958 	ARR_REG_WR(p_hwfn,
2959 		   p_ptt,
2960 		   sem_base + SEM_FAST_REG_VFC_DATA_WR,
2961 		   cmd_data, cmd_size);
2962 
2963 	/* Write VFC address */
2964 	ARR_REG_WR(p_hwfn,
2965 		   p_ptt,
2966 		   sem_base + SEM_FAST_REG_VFC_ADDR,
2967 		   addr_data, addr_size);
2968 
2969 	/* Read response */
2970 	for (i = 0; i < resp_size; i++) {
2971 		/* Poll until ready */
2972 		do {
2973 			reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
2974 			qed_grc_dump_addr_range(p_hwfn,
2975 						p_ptt,
2976 						&vfc_status,
2977 						true,
2978 						BYTES_TO_DWORDS(reg_addr),
2979 						1,
2980 						false, SPLIT_TYPE_NONE, 0);
2981 			is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT);
2982 
2983 			if (!is_ready) {
2984 				if (polling_count++ == VFC_POLLING_COUNT)
2985 					return 0;
2986 
2987 				msleep(polling_ms);
2988 			}
2989 		} while (!is_ready);
2990 
2991 		reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
2992 		qed_grc_dump_addr_range(p_hwfn,
2993 					p_ptt,
2994 					dump_buf + i,
2995 					true,
2996 					BYTES_TO_DWORDS(reg_addr),
2997 					1, false, SPLIT_TYPE_NONE, 0);
2998 	}
2999 
3000 	return resp_size;
3001 }
3002 
3003 /* Dump VFC CAM. Returns the dumped size in dwords. */
3004 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3005 				struct qed_ptt *p_ptt,
3006 				u32 *dump_buf, bool dump, u8 storm_id)
3007 {
3008 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3009 	struct storm_defs *storm = &s_storm_defs[storm_id];
3010 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3011 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3012 	u32 row, offset = 0;
3013 
3014 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3015 				       dump_buf + offset,
3016 				       dump,
3017 				       "vfc_cam",
3018 				       0,
3019 				       total_size,
3020 				       256,
3021 				       false, "vfc_cam", storm->letter);
3022 
3023 	if (!dump)
3024 		return offset + total_size;
3025 
3026 	/* Prepare CAM address */
3027 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3028 
3029 	/* Read VFC CAM data */
3030 	for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
3031 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3032 		offset += qed_grc_dump_read_from_vfc(p_hwfn,
3033 						     p_ptt,
3034 						     storm,
3035 						     cam_cmd,
3036 						     VFC_CAM_CMD_DWORDS,
3037 						     cam_addr,
3038 						     VFC_CAM_ADDR_DWORDS,
3039 						     VFC_CAM_RESP_DWORDS,
3040 						     dump_buf + offset);
3041 	}
3042 
3043 	return offset;
3044 }
3045 
3046 /* Dump VFC RAM. Returns the dumped size in dwords. */
3047 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3048 				struct qed_ptt *p_ptt,
3049 				u32 *dump_buf,
3050 				bool dump,
3051 				u8 storm_id, struct vfc_ram_defs *ram_defs)
3052 {
3053 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3054 	struct storm_defs *storm = &s_storm_defs[storm_id];
3055 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3056 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3057 	u32 row, offset = 0;
3058 
3059 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3060 				       dump_buf + offset,
3061 				       dump,
3062 				       ram_defs->mem_name,
3063 				       0,
3064 				       total_size,
3065 				       256,
3066 				       false,
3067 				       ram_defs->type_name,
3068 				       storm->letter);
3069 
3070 	if (!dump)
3071 		return offset + total_size;
3072 
3073 	/* Prepare RAM address */
3074 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3075 
3076 	/* Read VFC RAM data */
3077 	for (row = ram_defs->base_row;
3078 	     row < ram_defs->base_row + ram_defs->num_rows; row++) {
3079 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3080 		offset += qed_grc_dump_read_from_vfc(p_hwfn,
3081 						     p_ptt,
3082 						     storm,
3083 						     ram_cmd,
3084 						     VFC_RAM_CMD_DWORDS,
3085 						     ram_addr,
3086 						     VFC_RAM_ADDR_DWORDS,
3087 						     VFC_RAM_RESP_DWORDS,
3088 						     dump_buf + offset);
3089 	}
3090 
3091 	return offset;
3092 }
3093 
3094 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3095 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3096 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3097 {
3098 	u8 storm_id, i;
3099 	u32 offset = 0;
3100 
3101 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3102 		if (!qed_grc_is_storm_included(p_hwfn,
3103 					       (enum dbg_storms)storm_id) ||
3104 		    !s_storm_defs[storm_id].has_vfc)
3105 			continue;
3106 
3107 		/* Read CAM */
3108 		offset += qed_grc_dump_vfc_cam(p_hwfn,
3109 					       p_ptt,
3110 					       dump_buf + offset,
3111 					       dump, storm_id);
3112 
3113 		/* Read RAM */
3114 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3115 			offset += qed_grc_dump_vfc_ram(p_hwfn,
3116 						       p_ptt,
3117 						       dump_buf + offset,
3118 						       dump,
3119 						       storm_id,
3120 						       &s_vfc_ram_defs[i]);
3121 	}
3122 
3123 	return offset;
3124 }
3125 
3126 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3127 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3128 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3129 {
3130 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3131 	u32 offset = 0;
3132 	u8 rss_mem_id;
3133 
3134 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3135 		u32 rss_addr, num_entries, total_dwords;
3136 		struct rss_mem_defs *rss_defs;
3137 		u32 addr, num_dwords_to_read;
3138 		bool packed;
3139 
3140 		rss_defs = &s_rss_mem_defs[rss_mem_id];
3141 		rss_addr = rss_defs->addr;
3142 		num_entries = rss_defs->num_entries[dev_data->chip_id];
3143 		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3144 		packed = (rss_defs->entry_width == 16);
3145 
3146 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3147 					       dump_buf + offset,
3148 					       dump,
3149 					       rss_defs->mem_name,
3150 					       0,
3151 					       total_dwords,
3152 					       rss_defs->entry_width,
3153 					       packed,
3154 					       rss_defs->type_name, 0);
3155 
3156 		/* Dump RSS data */
3157 		if (!dump) {
3158 			offset += total_dwords;
3159 			continue;
3160 		}
3161 
3162 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3163 		while (total_dwords) {
3164 			num_dwords_to_read = min_t(u32,
3165 						   RSS_REG_RSS_RAM_DATA_SIZE,
3166 						   total_dwords);
3167 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3168 			offset += qed_grc_dump_addr_range(p_hwfn,
3169 							  p_ptt,
3170 							  dump_buf + offset,
3171 							  dump,
3172 							  addr,
3173 							  num_dwords_to_read,
3174 							  false,
3175 							  SPLIT_TYPE_NONE, 0);
3176 			total_dwords -= num_dwords_to_read;
3177 			rss_addr++;
3178 		}
3179 	}
3180 
3181 	return offset;
3182 }
3183 
3184 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3185 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3186 				struct qed_ptt *p_ptt,
3187 				u32 *dump_buf, bool dump, u8 big_ram_id)
3188 {
3189 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3190 	u32 block_size, ram_size, offset = 0, reg_val, i;
3191 	char mem_name[12] = "???_BIG_RAM";
3192 	char type_name[8] = "???_RAM";
3193 	struct big_ram_defs *big_ram;
3194 
3195 	big_ram = &s_big_ram_defs[big_ram_id];
3196 	ram_size = big_ram->ram_size[dev_data->chip_id];
3197 
3198 	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3199 	block_size = reg_val &
3200 		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3201 									 : 128;
3202 
3203 	strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3204 	strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3205 
3206 	/* Dump memory header */
3207 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3208 				       dump_buf + offset,
3209 				       dump,
3210 				       mem_name,
3211 				       0,
3212 				       ram_size,
3213 				       block_size * 8,
3214 				       false, type_name, 0);
3215 
3216 	/* Read and dump Big RAM data */
3217 	if (!dump)
3218 		return offset + ram_size;
3219 
3220 	/* Dump Big RAM */
3221 	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3222 	     i++) {
3223 		u32 addr, len;
3224 
3225 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3226 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3227 		len = BRB_REG_BIG_RAM_DATA_SIZE;
3228 		offset += qed_grc_dump_addr_range(p_hwfn,
3229 						  p_ptt,
3230 						  dump_buf + offset,
3231 						  dump,
3232 						  addr,
3233 						  len,
3234 						  false, SPLIT_TYPE_NONE, 0);
3235 	}
3236 
3237 	return offset;
3238 }
3239 
3240 /* Dumps MCP scratchpad. Returns the dumped size in dwords. */
3241 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3242 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3243 {
3244 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3245 	u32 offset = 0, addr;
3246 	bool halted = false;
3247 
3248 	/* Halt MCP */
3249 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3250 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3251 		if (!halted)
3252 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3253 	}
3254 
3255 	/* Dump MCP scratchpad */
3256 	offset += qed_grc_dump_mem(p_hwfn,
3257 				   p_ptt,
3258 				   dump_buf + offset,
3259 				   dump,
3260 				   NULL,
3261 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3262 				   MCP_REG_SCRATCH_SIZE,
3263 				   false, 0, false, "MCP", 0);
3264 
3265 	/* Dump MCP cpu_reg_file */
3266 	offset += qed_grc_dump_mem(p_hwfn,
3267 				   p_ptt,
3268 				   dump_buf + offset,
3269 				   dump,
3270 				   NULL,
3271 				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3272 				   MCP_REG_CPU_REG_FILE_SIZE,
3273 				   false, 0, false, "MCP", 0);
3274 
3275 	/* Dump MCP registers */
3276 	block_enable[BLOCK_MCP] = true;
3277 	offset += qed_grc_dump_registers(p_hwfn,
3278 					 p_ptt,
3279 					 dump_buf + offset,
3280 					 dump, block_enable, "MCP");
3281 
3282 	/* Dump required non-MCP registers */
3283 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3284 					dump, 1, SPLIT_TYPE_NONE, 0,
3285 					"MCP");
3286 	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3287 	offset += qed_grc_dump_reg_entry(p_hwfn,
3288 					 p_ptt,
3289 					 dump_buf + offset,
3290 					 dump,
3291 					 addr,
3292 					 1,
3293 					 false, SPLIT_TYPE_NONE, 0);
3294 
3295 	/* Release MCP */
3296 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3297 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3298 
3299 	return offset;
3300 }
3301 
3302 /* Dumps the tbus indirect memory for all PHYs.
3303  * Returns the dumped size in dwords.
3304  */
3305 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3306 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3307 {
3308 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3309 	char mem_name[32];
3310 	u8 phy_id;
3311 
3312 	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3313 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3314 		struct phy_defs *phy_defs;
3315 		u8 *bytes_buf;
3316 
3317 		phy_defs = &s_phy_defs[phy_id];
3318 		addr_lo_addr = phy_defs->base_addr +
3319 			       phy_defs->tbus_addr_lo_addr;
3320 		addr_hi_addr = phy_defs->base_addr +
3321 			       phy_defs->tbus_addr_hi_addr;
3322 		data_lo_addr = phy_defs->base_addr +
3323 			       phy_defs->tbus_data_lo_addr;
3324 		data_hi_addr = phy_defs->base_addr +
3325 			       phy_defs->tbus_data_hi_addr;
3326 
3327 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3328 			     phy_defs->phy_name) < 0)
3329 			DP_NOTICE(p_hwfn,
3330 				  "Unexpected debug error: invalid PHY memory name\n");
3331 
3332 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3333 					       dump_buf + offset,
3334 					       dump,
3335 					       mem_name,
3336 					       0,
3337 					       PHY_DUMP_SIZE_DWORDS,
3338 					       16, true, mem_name, 0);
3339 
3340 		if (!dump) {
3341 			offset += PHY_DUMP_SIZE_DWORDS;
3342 			continue;
3343 		}
3344 
3345 		bytes_buf = (u8 *)(dump_buf + offset);
3346 		for (tbus_hi_offset = 0;
3347 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3348 		     tbus_hi_offset++) {
3349 			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3350 			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3351 			     tbus_lo_offset++) {
3352 				qed_wr(p_hwfn,
3353 				       p_ptt, addr_lo_addr, tbus_lo_offset);
3354 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3355 							    p_ptt,
3356 							    data_lo_addr);
3357 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3358 							    p_ptt,
3359 							    data_hi_addr);
3360 			}
3361 		}
3362 
3363 		offset += PHY_DUMP_SIZE_DWORDS;
3364 	}
3365 
3366 	return offset;
3367 }
3368 
3369 /* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
3370 static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
3371 				    struct qed_ptt *p_ptt,
3372 				    u32 *dump_buf, bool dump)
3373 {
3374 	u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
3375 	u32 hw_dump_size_dwords = 0, offset = 0;
3376 	enum dbg_status status;
3377 
3378 	/* Read HW dump image from NVRAM */
3379 	status = qed_find_nvram_image(p_hwfn,
3380 				      p_ptt,
3381 				      NVM_TYPE_HW_DUMP_OUT,
3382 				      &hw_dump_offset_bytes,
3383 				      &hw_dump_size_bytes);
3384 	if (status != DBG_STATUS_OK)
3385 		return 0;
3386 
3387 	hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
3388 
3389 	/* Dump HW dump image section */
3390 	offset += qed_dump_section_hdr(dump_buf + offset,
3391 				       dump, "mcp_hw_dump", 1);
3392 	offset += qed_dump_num_param(dump_buf + offset,
3393 				     dump, "size", hw_dump_size_dwords);
3394 
3395 	/* Read MCP HW dump image into dump buffer */
3396 	if (dump && hw_dump_size_dwords) {
3397 		status = qed_nvram_read(p_hwfn,
3398 					p_ptt,
3399 					hw_dump_offset_bytes,
3400 					hw_dump_size_bytes, dump_buf + offset);
3401 		if (status != DBG_STATUS_OK) {
3402 			DP_NOTICE(p_hwfn,
3403 				  "Failed to read MCP HW Dump image from NVRAM\n");
3404 			return 0;
3405 		}
3406 	}
3407 	offset += hw_dump_size_dwords;
3408 
3409 	return offset;
3410 }
3411 
3412 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3413 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3414 				     struct qed_ptt *p_ptt,
3415 				     u32 *dump_buf, bool dump)
3416 {
3417 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3418 	u32 block_id, line_id, offset = 0, addr, len;
3419 
3420 	/* Don't dump static debug if a debug bus recording is in progress */
3421 	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3422 		return 0;
3423 
3424 	if (dump) {
3425 		/* Disable debug bus in all blocks */
3426 		qed_bus_disable_blocks(p_hwfn, p_ptt);
3427 
3428 		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3429 		qed_wr(p_hwfn,
3430 		       p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
3431 		qed_wr(p_hwfn,
3432 		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3433 		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3434 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3435 	}
3436 
3437 	/* Dump all static debug lines for each relevant block */
3438 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3439 		const struct dbg_block_chip *block_per_chip;
3440 		const struct dbg_block *block;
3441 		bool is_removed, has_dbg_bus;
3442 		u16 modes_buf_offset;
3443 		u32 block_dwords;
3444 
3445 		block_per_chip =
3446 		    qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
3447 		is_removed = GET_FIELD(block_per_chip->flags,
3448 				       DBG_BLOCK_CHIP_IS_REMOVED);
3449 		has_dbg_bus = GET_FIELD(block_per_chip->flags,
3450 					DBG_BLOCK_CHIP_HAS_DBG_BUS);
3451 
3452 		if (!is_removed && has_dbg_bus &&
3453 		    GET_FIELD(block_per_chip->dbg_bus_mode.data,
3454 			      DBG_MODE_HDR_EVAL_MODE) > 0) {
3455 			modes_buf_offset =
3456 			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
3457 				      DBG_MODE_HDR_MODES_BUF_OFFSET);
3458 			if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
3459 				has_dbg_bus = false;
3460 		}
3461 
3462 		if (is_removed || !has_dbg_bus)
3463 			continue;
3464 
3465 		block_dwords = NUM_DBG_LINES(block_per_chip) *
3466 			       STATIC_DEBUG_LINE_DWORDS;
3467 
3468 		/* Dump static section params */
3469 		block = get_dbg_block(p_hwfn, (enum block_id)block_id);
3470 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3471 					       dump_buf + offset,
3472 					       dump,
3473 					       block->name,
3474 					       0,
3475 					       block_dwords,
3476 					       32, false, "STATIC", 0);
3477 
3478 		if (!dump) {
3479 			offset += block_dwords;
3480 			continue;
3481 		}
3482 
3483 		/* If all lines are invalid - dump zeros */
3484 		if (dev_data->block_in_reset[block_id]) {
3485 			memset(dump_buf + offset, 0,
3486 			       DWORDS_TO_BYTES(block_dwords));
3487 			offset += block_dwords;
3488 			continue;
3489 		}
3490 
3491 		/* Enable block's client */
3492 		qed_bus_enable_clients(p_hwfn,
3493 				       p_ptt,
3494 				       BIT(block_per_chip->dbg_client_id));
3495 
3496 		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3497 		len = STATIC_DEBUG_LINE_DWORDS;
3498 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
3499 		     line_id++) {
3500 			/* Configure debug line ID */
3501 			qed_bus_config_dbg_line(p_hwfn,
3502 						p_ptt,
3503 						(enum block_id)block_id,
3504 						(u8)line_id, 0xf, 0, 0, 0);
3505 
3506 			/* Read debug line info */
3507 			offset += qed_grc_dump_addr_range(p_hwfn,
3508 							  p_ptt,
3509 							  dump_buf + offset,
3510 							  dump,
3511 							  addr,
3512 							  len,
3513 							  true, SPLIT_TYPE_NONE,
3514 							  0);
3515 		}
3516 
3517 		/* Disable block's client and debug output */
3518 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3519 		qed_bus_config_dbg_line(p_hwfn, p_ptt,
3520 					(enum block_id)block_id, 0, 0, 0, 0, 0);
3521 	}
3522 
3523 	if (dump) {
3524 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3525 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3526 	}
3527 
3528 	return offset;
3529 }
3530 
3531 /* Performs GRC Dump to the specified buffer.
3532  * Returns the dumped size in dwords.
3533  */
3534 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3535 				    struct qed_ptt *p_ptt,
3536 				    u32 *dump_buf,
3537 				    bool dump, u32 *num_dumped_dwords)
3538 {
3539 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3540 	bool parities_masked = false;
3541 	u32 dwords_read, offset = 0;
3542 	u8 i;
3543 
3544 	*num_dumped_dwords = 0;
3545 	dev_data->num_regs_read = 0;
3546 
3547 	/* Update reset state */
3548 	if (dump)
3549 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3550 
3551 	/* Dump global params */
3552 	offset += qed_dump_common_global_params(p_hwfn,
3553 						p_ptt,
3554 						dump_buf + offset, dump, 4);
3555 	offset += qed_dump_str_param(dump_buf + offset,
3556 				     dump, "dump-type", "grc-dump");
3557 	offset += qed_dump_num_param(dump_buf + offset,
3558 				     dump,
3559 				     "num-lcids",
3560 				     NUM_OF_LCIDS);
3561 	offset += qed_dump_num_param(dump_buf + offset,
3562 				     dump,
3563 				     "num-ltids",
3564 				     NUM_OF_LTIDS);
3565 	offset += qed_dump_num_param(dump_buf + offset,
3566 				     dump, "num-ports", dev_data->num_ports);
3567 
3568 	/* Dump reset registers (dumped before taking blocks out of reset ) */
3569 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3570 		offset += qed_grc_dump_reset_regs(p_hwfn,
3571 						  p_ptt,
3572 						  dump_buf + offset, dump);
3573 
3574 	/* Take all blocks out of reset (using reset registers) */
3575 	if (dump) {
3576 		qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
3577 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3578 	}
3579 
3580 	/* Disable all parities using MFW command */
3581 	if (dump &&
3582 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3583 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3584 		if (!parities_masked) {
3585 			DP_NOTICE(p_hwfn,
3586 				  "Failed to mask parities using MFW\n");
3587 			if (qed_grc_get_param
3588 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3589 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3590 		}
3591 	}
3592 
3593 	/* Dump modified registers (dumped before modifying them) */
3594 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3595 		offset += qed_grc_dump_modified_regs(p_hwfn,
3596 						     p_ptt,
3597 						     dump_buf + offset, dump);
3598 
3599 	/* Stall storms */
3600 	if (dump &&
3601 	    (qed_grc_is_included(p_hwfn,
3602 				 DBG_GRC_PARAM_DUMP_IOR) ||
3603 	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3604 		qed_grc_stall_storms(p_hwfn, p_ptt, true);
3605 
3606 	/* Dump all regs  */
3607 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3608 		bool block_enable[MAX_BLOCK_ID];
3609 
3610 		/* Dump all blocks except MCP */
3611 		for (i = 0; i < MAX_BLOCK_ID; i++)
3612 			block_enable[i] = true;
3613 		block_enable[BLOCK_MCP] = false;
3614 		offset += qed_grc_dump_registers(p_hwfn,
3615 						 p_ptt,
3616 						 dump_buf +
3617 						 offset,
3618 						 dump,
3619 						 block_enable, NULL);
3620 
3621 		/* Dump special registers */
3622 		offset += qed_grc_dump_special_regs(p_hwfn,
3623 						    p_ptt,
3624 						    dump_buf + offset, dump);
3625 	}
3626 
3627 	/* Dump memories */
3628 	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3629 
3630 	/* Dump MCP */
3631 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
3632 		offset += qed_grc_dump_mcp(p_hwfn,
3633 					   p_ptt, dump_buf + offset, dump);
3634 
3635 	/* Dump context */
3636 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
3637 		offset += qed_grc_dump_ctx(p_hwfn,
3638 					   p_ptt, dump_buf + offset, dump);
3639 
3640 	/* Dump RSS memories */
3641 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
3642 		offset += qed_grc_dump_rss(p_hwfn,
3643 					   p_ptt, dump_buf + offset, dump);
3644 
3645 	/* Dump Big RAM */
3646 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
3647 		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
3648 			offset += qed_grc_dump_big_ram(p_hwfn,
3649 						       p_ptt,
3650 						       dump_buf + offset,
3651 						       dump, i);
3652 
3653 	/* Dump VFC */
3654 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
3655 		dwords_read = qed_grc_dump_vfc(p_hwfn,
3656 					       p_ptt, dump_buf + offset, dump);
3657 		offset += dwords_read;
3658 		if (!dwords_read)
3659 			return DBG_STATUS_VFC_READ_ERROR;
3660 	}
3661 
3662 	/* Dump PHY tbus */
3663 	if (qed_grc_is_included(p_hwfn,
3664 				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
3665 	    CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
3666 		offset += qed_grc_dump_phy(p_hwfn,
3667 					   p_ptt, dump_buf + offset, dump);
3668 
3669 	/* Dump MCP HW Dump */
3670 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
3671 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
3672 		offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
3673 						   p_ptt,
3674 						   dump_buf + offset, dump);
3675 
3676 	/* Dump static debug data (only if not during debug bus recording) */
3677 	if (qed_grc_is_included(p_hwfn,
3678 				DBG_GRC_PARAM_DUMP_STATIC) &&
3679 	    (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
3680 		offset += qed_grc_dump_static_debug(p_hwfn,
3681 						    p_ptt,
3682 						    dump_buf + offset, dump);
3683 
3684 	/* Dump last section */
3685 	offset += qed_dump_last_section(dump_buf, offset, dump);
3686 
3687 	if (dump) {
3688 		/* Unstall storms */
3689 		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
3690 			qed_grc_stall_storms(p_hwfn, p_ptt, false);
3691 
3692 		/* Clear parity status */
3693 		qed_grc_clear_all_prty(p_hwfn, p_ptt);
3694 
3695 		/* Enable all parities using MFW command */
3696 		if (parities_masked)
3697 			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
3698 	}
3699 
3700 	*num_dumped_dwords = offset;
3701 
3702 	return DBG_STATUS_OK;
3703 }
3704 
3705 /* Writes the specified failing Idle Check rule to the specified buffer.
3706  * Returns the dumped size in dwords.
3707  */
3708 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3709 				     struct qed_ptt *p_ptt,
3710 				     u32 *dump_buf,
3711 				     bool dump,
3712 				     u16 rule_id,
3713 				     const struct dbg_idle_chk_rule *rule,
3714 				     u16 fail_entry_id, u32 *cond_reg_values)
3715 {
3716 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3717 	const struct dbg_idle_chk_cond_reg *cond_regs;
3718 	const struct dbg_idle_chk_info_reg *info_regs;
3719 	u32 i, next_reg_offset = 0, offset = 0;
3720 	struct dbg_idle_chk_result_hdr *hdr;
3721 	const union dbg_idle_chk_reg *regs;
3722 	u8 reg_id;
3723 
3724 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
3725 	regs = (const union dbg_idle_chk_reg *)
3726 		p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3727 		rule->reg_offset;
3728 	cond_regs = &regs[0].cond_reg;
3729 	info_regs = &regs[rule->num_cond_regs].info_reg;
3730 
3731 	/* Dump rule data */
3732 	if (dump) {
3733 		memset(hdr, 0, sizeof(*hdr));
3734 		hdr->rule_id = rule_id;
3735 		hdr->mem_entry_id = fail_entry_id;
3736 		hdr->severity = rule->severity;
3737 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
3738 	}
3739 
3740 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
3741 
3742 	/* Dump condition register values */
3743 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
3744 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
3745 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3746 
3747 		reg_hdr =
3748 		    (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
3749 
3750 		/* Write register header */
3751 		if (!dump) {
3752 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
3753 			    reg->entry_size;
3754 			continue;
3755 		}
3756 
3757 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3758 		memset(reg_hdr, 0, sizeof(*reg_hdr));
3759 		reg_hdr->start_entry = reg->start_entry;
3760 		reg_hdr->size = reg->entry_size;
3761 		SET_FIELD(reg_hdr->data,
3762 			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
3763 			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
3764 		SET_FIELD(reg_hdr->data,
3765 			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
3766 
3767 		/* Write register values */
3768 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
3769 			dump_buf[offset] = cond_reg_values[next_reg_offset];
3770 	}
3771 
3772 	/* Dump info register values */
3773 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
3774 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
3775 		u32 block_id;
3776 
3777 		/* Check if register's block is in reset */
3778 		if (!dump) {
3779 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
3780 			continue;
3781 		}
3782 
3783 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
3784 		if (block_id >= MAX_BLOCK_ID) {
3785 			DP_NOTICE(p_hwfn, "Invalid block_id\n");
3786 			return 0;
3787 		}
3788 
3789 		if (!dev_data->block_in_reset[block_id]) {
3790 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3791 			bool wide_bus, eval_mode, mode_match = true;
3792 			u16 modes_buf_offset;
3793 			u32 addr;
3794 
3795 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
3796 				  (dump_buf + offset);
3797 
3798 			/* Check mode */
3799 			eval_mode = GET_FIELD(reg->mode.data,
3800 					      DBG_MODE_HDR_EVAL_MODE) > 0;
3801 			if (eval_mode) {
3802 				modes_buf_offset =
3803 				    GET_FIELD(reg->mode.data,
3804 					      DBG_MODE_HDR_MODES_BUF_OFFSET);
3805 				mode_match =
3806 					qed_is_mode_match(p_hwfn,
3807 							  &modes_buf_offset);
3808 			}
3809 
3810 			if (!mode_match)
3811 				continue;
3812 
3813 			addr = GET_FIELD(reg->data,
3814 					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
3815 			wide_bus = GET_FIELD(reg->data,
3816 					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
3817 
3818 			/* Write register header */
3819 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3820 			hdr->num_dumped_info_regs++;
3821 			memset(reg_hdr, 0, sizeof(*reg_hdr));
3822 			reg_hdr->size = reg->size;
3823 			SET_FIELD(reg_hdr->data,
3824 				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3825 				  rule->num_cond_regs + reg_id);
3826 
3827 			/* Write register values */
3828 			offset += qed_grc_dump_addr_range(p_hwfn,
3829 							  p_ptt,
3830 							  dump_buf + offset,
3831 							  dump,
3832 							  addr,
3833 							  reg->size, wide_bus,
3834 							  SPLIT_TYPE_NONE, 0);
3835 		}
3836 	}
3837 
3838 	return offset;
3839 }
3840 
3841 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
3842 static u32
3843 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3844 			       u32 *dump_buf, bool dump,
3845 			       const struct dbg_idle_chk_rule *input_rules,
3846 			       u32 num_input_rules, u32 *num_failing_rules)
3847 {
3848 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3849 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3850 	u32 i, offset = 0;
3851 	u16 entry_id;
3852 	u8 reg_id;
3853 
3854 	*num_failing_rules = 0;
3855 
3856 	for (i = 0; i < num_input_rules; i++) {
3857 		const struct dbg_idle_chk_cond_reg *cond_regs;
3858 		const struct dbg_idle_chk_rule *rule;
3859 		const union dbg_idle_chk_reg *regs;
3860 		u16 num_reg_entries = 1;
3861 		bool check_rule = true;
3862 		const u32 *imm_values;
3863 
3864 		rule = &input_rules[i];
3865 		regs = (const union dbg_idle_chk_reg *)
3866 			p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3867 			rule->reg_offset;
3868 		cond_regs = &regs[0].cond_reg;
3869 		imm_values =
3870 		    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
3871 		    rule->imm_offset;
3872 
3873 		/* Check if all condition register blocks are out of reset, and
3874 		 * find maximal number of entries (all condition registers that
3875 		 * are memories must have the same size, which is > 1).
3876 		 */
3877 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
3878 		     reg_id++) {
3879 			u32 block_id =
3880 				GET_FIELD(cond_regs[reg_id].data,
3881 					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
3882 
3883 			if (block_id >= MAX_BLOCK_ID) {
3884 				DP_NOTICE(p_hwfn, "Invalid block_id\n");
3885 				return 0;
3886 			}
3887 
3888 			check_rule = !dev_data->block_in_reset[block_id];
3889 			if (cond_regs[reg_id].num_entries > num_reg_entries)
3890 				num_reg_entries = cond_regs[reg_id].num_entries;
3891 		}
3892 
3893 		if (!check_rule && dump)
3894 			continue;
3895 
3896 		if (!dump) {
3897 			u32 entry_dump_size =
3898 				qed_idle_chk_dump_failure(p_hwfn,
3899 							  p_ptt,
3900 							  dump_buf + offset,
3901 							  false,
3902 							  rule->rule_id,
3903 							  rule,
3904 							  0,
3905 							  NULL);
3906 
3907 			offset += num_reg_entries * entry_dump_size;
3908 			(*num_failing_rules) += num_reg_entries;
3909 			continue;
3910 		}
3911 
3912 		/* Go over all register entries (number of entries is the same
3913 		 * for all condition registers).
3914 		 */
3915 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3916 			u32 next_reg_offset = 0;
3917 
3918 			/* Read current entry of all condition registers */
3919 			for (reg_id = 0; reg_id < rule->num_cond_regs;
3920 			     reg_id++) {
3921 				const struct dbg_idle_chk_cond_reg *reg =
3922 					&cond_regs[reg_id];
3923 				u32 padded_entry_size, addr;
3924 				bool wide_bus;
3925 
3926 				/* Find GRC address (if it's a memory, the
3927 				 * address of the specific entry is calculated).
3928 				 */
3929 				addr = GET_FIELD(reg->data,
3930 						 DBG_IDLE_CHK_COND_REG_ADDRESS);
3931 				wide_bus =
3932 				    GET_FIELD(reg->data,
3933 					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
3934 				if (reg->num_entries > 1 ||
3935 				    reg->start_entry > 0) {
3936 					padded_entry_size =
3937 					   reg->entry_size > 1 ?
3938 					   roundup_pow_of_two(reg->entry_size) :
3939 					   1;
3940 					addr += (reg->start_entry + entry_id) *
3941 						padded_entry_size;
3942 				}
3943 
3944 				/* Read registers */
3945 				if (next_reg_offset + reg->entry_size >=
3946 				    IDLE_CHK_MAX_ENTRIES_SIZE) {
3947 					DP_NOTICE(p_hwfn,
3948 						  "idle check registers entry is too large\n");
3949 					return 0;
3950 				}
3951 
3952 				next_reg_offset +=
3953 				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
3954 							    cond_reg_values +
3955 							    next_reg_offset,
3956 							    dump, addr,
3957 							    reg->entry_size,
3958 							    wide_bus,
3959 							    SPLIT_TYPE_NONE, 0);
3960 			}
3961 
3962 			/* Call rule condition function.
3963 			 * If returns true, it's a failure.
3964 			 */
3965 			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
3966 							imm_values)) {
3967 				offset += qed_idle_chk_dump_failure(p_hwfn,
3968 							p_ptt,
3969 							dump_buf + offset,
3970 							dump,
3971 							rule->rule_id,
3972 							rule,
3973 							entry_id,
3974 							cond_reg_values);
3975 				(*num_failing_rules)++;
3976 			}
3977 		}
3978 	}
3979 
3980 	return offset;
3981 }
3982 
3983 /* Performs Idle Check Dump to the specified buffer.
3984  * Returns the dumped size in dwords.
3985  */
3986 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
3987 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3988 {
3989 	struct virt_mem_desc *dbg_buf =
3990 	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
3991 	u32 num_failing_rules_offset, offset = 0,
3992 	    input_offset = 0, num_failing_rules = 0;
3993 
3994 	/* Dump global params  - 1 must match below amount of params */
3995 	offset += qed_dump_common_global_params(p_hwfn,
3996 						p_ptt,
3997 						dump_buf + offset, dump, 1);
3998 	offset += qed_dump_str_param(dump_buf + offset,
3999 				     dump, "dump-type", "idle-chk");
4000 
4001 	/* Dump idle check section header with a single parameter */
4002 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4003 	num_failing_rules_offset = offset;
4004 	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4005 
4006 	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
4007 		const struct dbg_idle_chk_cond_hdr *cond_hdr =
4008 		    (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr +
4009 		    input_offset++;
4010 		bool eval_mode, mode_match = true;
4011 		u32 curr_failing_rules;
4012 		u16 modes_buf_offset;
4013 
4014 		/* Check mode */
4015 		eval_mode = GET_FIELD(cond_hdr->mode.data,
4016 				      DBG_MODE_HDR_EVAL_MODE) > 0;
4017 		if (eval_mode) {
4018 			modes_buf_offset =
4019 				GET_FIELD(cond_hdr->mode.data,
4020 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
4021 			mode_match = qed_is_mode_match(p_hwfn,
4022 						       &modes_buf_offset);
4023 		}
4024 
4025 		if (mode_match) {
4026 			const struct dbg_idle_chk_rule *rule =
4027 			    (const struct dbg_idle_chk_rule *)((u32 *)
4028 							       dbg_buf->ptr
4029 							       + input_offset);
4030 			u32 num_input_rules =
4031 				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS;
4032 			offset +=
4033 			    qed_idle_chk_dump_rule_entries(p_hwfn,
4034 							   p_ptt,
4035 							   dump_buf +
4036 							   offset,
4037 							   dump,
4038 							   rule,
4039 							   num_input_rules,
4040 							   &curr_failing_rules);
4041 			num_failing_rules += curr_failing_rules;
4042 		}
4043 
4044 		input_offset += cond_hdr->data_size;
4045 	}
4046 
4047 	/* Overwrite num_rules parameter */
4048 	if (dump)
4049 		qed_dump_num_param(dump_buf + num_failing_rules_offset,
4050 				   dump, "num_rules", num_failing_rules);
4051 
4052 	/* Dump last section */
4053 	offset += qed_dump_last_section(dump_buf, offset, dump);
4054 
4055 	return offset;
4056 }
4057 
4058 /* Get info on the MCP Trace data in the scratchpad:
4059  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4060  * - trace_data_size (OUT): trace data size in bytes (without the header)
4061  */
4062 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4063 						   struct qed_ptt *p_ptt,
4064 						   u32 *trace_data_grc_addr,
4065 						   u32 *trace_data_size)
4066 {
4067 	u32 spad_trace_offsize, signature;
4068 
4069 	/* Read trace section offsize structure from MCP scratchpad */
4070 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4071 
4072 	/* Extract trace section address from offsize (in scratchpad) */
4073 	*trace_data_grc_addr =
4074 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4075 
4076 	/* Read signature from MCP trace section */
4077 	signature = qed_rd(p_hwfn, p_ptt,
4078 			   *trace_data_grc_addr +
4079 			   offsetof(struct mcp_trace, signature));
4080 
4081 	if (signature != MFW_TRACE_SIGNATURE)
4082 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4083 
4084 	/* Read trace size from MCP trace section */
4085 	*trace_data_size = qed_rd(p_hwfn,
4086 				  p_ptt,
4087 				  *trace_data_grc_addr +
4088 				  offsetof(struct mcp_trace, size));
4089 
4090 	return DBG_STATUS_OK;
4091 }
4092 
4093 /* Reads MCP trace meta data image from NVRAM
4094  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4095  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4096  *			      loaded from file).
4097  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4098  */
4099 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4100 						   struct qed_ptt *p_ptt,
4101 						   u32 trace_data_size_bytes,
4102 						   u32 *running_bundle_id,
4103 						   u32 *trace_meta_offset,
4104 						   u32 *trace_meta_size)
4105 {
4106 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4107 
4108 	/* Read MCP trace section offsize structure from MCP scratchpad */
4109 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4110 
4111 	/* Find running bundle ID */
4112 	running_mfw_addr =
4113 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4114 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4115 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4116 	if (*running_bundle_id > 1)
4117 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4118 
4119 	/* Find image in NVRAM */
4120 	nvram_image_type =
4121 	    (*running_bundle_id ==
4122 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4123 	return qed_find_nvram_image(p_hwfn,
4124 				    p_ptt,
4125 				    nvram_image_type,
4126 				    trace_meta_offset, trace_meta_size);
4127 }
4128 
4129 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4130 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4131 					       struct qed_ptt *p_ptt,
4132 					       u32 nvram_offset_in_bytes,
4133 					       u32 size_in_bytes, u32 *buf)
4134 {
4135 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4136 	enum dbg_status status;
4137 	u32 signature;
4138 
4139 	/* Read meta data from NVRAM */
4140 	status = qed_nvram_read(p_hwfn,
4141 				p_ptt,
4142 				nvram_offset_in_bytes, size_in_bytes, buf);
4143 	if (status != DBG_STATUS_OK)
4144 		return status;
4145 
4146 	/* Extract and check first signature */
4147 	signature = qed_read_unaligned_dword(byte_buf);
4148 	byte_buf += sizeof(signature);
4149 	if (signature != NVM_MAGIC_VALUE)
4150 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4151 
4152 	/* Extract number of modules */
4153 	modules_num = *(byte_buf++);
4154 
4155 	/* Skip all modules */
4156 	for (i = 0; i < modules_num; i++) {
4157 		module_len = *(byte_buf++);
4158 		byte_buf += module_len;
4159 	}
4160 
4161 	/* Extract and check second signature */
4162 	signature = qed_read_unaligned_dword(byte_buf);
4163 	byte_buf += sizeof(signature);
4164 	if (signature != NVM_MAGIC_VALUE)
4165 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4166 
4167 	return DBG_STATUS_OK;
4168 }
4169 
4170 /* Dump MCP Trace */
4171 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4172 					  struct qed_ptt *p_ptt,
4173 					  u32 *dump_buf,
4174 					  bool dump, u32 *num_dumped_dwords)
4175 {
4176 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4177 	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4178 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4179 	enum dbg_status status;
4180 	int halted = 0;
4181 	bool use_mfw;
4182 
4183 	*num_dumped_dwords = 0;
4184 
4185 	use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4186 
4187 	/* Get trace data info */
4188 	status = qed_mcp_trace_get_data_info(p_hwfn,
4189 					     p_ptt,
4190 					     &trace_data_grc_addr,
4191 					     &trace_data_size_bytes);
4192 	if (status != DBG_STATUS_OK)
4193 		return status;
4194 
4195 	/* Dump global params */
4196 	offset += qed_dump_common_global_params(p_hwfn,
4197 						p_ptt,
4198 						dump_buf + offset, dump, 1);
4199 	offset += qed_dump_str_param(dump_buf + offset,
4200 				     dump, "dump-type", "mcp-trace");
4201 
4202 	/* Halt MCP while reading from scratchpad so the read data will be
4203 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4204 	 * risk that it may be corrupt.
4205 	 */
4206 	if (dump && use_mfw) {
4207 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4208 		if (!halted)
4209 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4210 	}
4211 
4212 	/* Find trace data size */
4213 	trace_data_size_dwords =
4214 	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4215 			 BYTES_IN_DWORD);
4216 
4217 	/* Dump trace data section header and param */
4218 	offset += qed_dump_section_hdr(dump_buf + offset,
4219 				       dump, "mcp_trace_data", 1);
4220 	offset += qed_dump_num_param(dump_buf + offset,
4221 				     dump, "size", trace_data_size_dwords);
4222 
4223 	/* Read trace data from scratchpad into dump buffer */
4224 	offset += qed_grc_dump_addr_range(p_hwfn,
4225 					  p_ptt,
4226 					  dump_buf + offset,
4227 					  dump,
4228 					  BYTES_TO_DWORDS(trace_data_grc_addr),
4229 					  trace_data_size_dwords, false,
4230 					  SPLIT_TYPE_NONE, 0);
4231 
4232 	/* Resume MCP (only if halt succeeded) */
4233 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4234 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4235 
4236 	/* Dump trace meta section header */
4237 	offset += qed_dump_section_hdr(dump_buf + offset,
4238 				       dump, "mcp_trace_meta", 1);
4239 
4240 	/* If MCP Trace meta size parameter was set, use it.
4241 	 * Otherwise, read trace meta.
4242 	 * trace_meta_size_bytes is dword-aligned.
4243 	 */
4244 	trace_meta_size_bytes =
4245 		qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4246 	if ((!trace_meta_size_bytes || dump) && use_mfw)
4247 		status = qed_mcp_trace_get_meta_info(p_hwfn,
4248 						     p_ptt,
4249 						     trace_data_size_bytes,
4250 						     &running_bundle_id,
4251 						     &trace_meta_offset_bytes,
4252 						     &trace_meta_size_bytes);
4253 	if (status == DBG_STATUS_OK)
4254 		trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4255 
4256 	/* Dump trace meta size param */
4257 	offset += qed_dump_num_param(dump_buf + offset,
4258 				     dump, "size", trace_meta_size_dwords);
4259 
4260 	/* Read trace meta image into dump buffer */
4261 	if (dump && trace_meta_size_dwords)
4262 		status = qed_mcp_trace_read_meta(p_hwfn,
4263 						 p_ptt,
4264 						 trace_meta_offset_bytes,
4265 						 trace_meta_size_bytes,
4266 						 dump_buf + offset);
4267 	if (status == DBG_STATUS_OK)
4268 		offset += trace_meta_size_dwords;
4269 
4270 	/* Dump last section */
4271 	offset += qed_dump_last_section(dump_buf, offset, dump);
4272 
4273 	*num_dumped_dwords = offset;
4274 
4275 	/* If no mcp access, indicate that the dump doesn't contain the meta
4276 	 * data from NVRAM.
4277 	 */
4278 	return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4279 }
4280 
4281 /* Dump GRC FIFO */
4282 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4283 					 struct qed_ptt *p_ptt,
4284 					 u32 *dump_buf,
4285 					 bool dump, u32 *num_dumped_dwords)
4286 {
4287 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4288 	bool fifo_has_data;
4289 
4290 	*num_dumped_dwords = 0;
4291 
4292 	/* Dump global params */
4293 	offset += qed_dump_common_global_params(p_hwfn,
4294 						p_ptt,
4295 						dump_buf + offset, dump, 1);
4296 	offset += qed_dump_str_param(dump_buf + offset,
4297 				     dump, "dump-type", "reg-fifo");
4298 
4299 	/* Dump fifo data section header and param. The size param is 0 for
4300 	 * now, and is overwritten after reading the FIFO.
4301 	 */
4302 	offset += qed_dump_section_hdr(dump_buf + offset,
4303 				       dump, "reg_fifo_data", 1);
4304 	size_param_offset = offset;
4305 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4306 
4307 	if (!dump) {
4308 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4309 		 * test how much data is available, except for reading it.
4310 		 */
4311 		offset += REG_FIFO_DEPTH_DWORDS;
4312 		goto out;
4313 	}
4314 
4315 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4316 			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4317 
4318 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4319 	 * and must be accessed atomically. Test for dwords_read not passing
4320 	 * buffer size since more entries could be added to the buffer as we are
4321 	 * emptying it.
4322 	 */
4323 	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4324 	len = REG_FIFO_ELEMENT_DWORDS;
4325 	for (dwords_read = 0;
4326 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4327 	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4328 		offset += qed_grc_dump_addr_range(p_hwfn,
4329 						  p_ptt,
4330 						  dump_buf + offset,
4331 						  true,
4332 						  addr,
4333 						  len,
4334 						  true, SPLIT_TYPE_NONE,
4335 						  0);
4336 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4337 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4338 	}
4339 
4340 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4341 			   dwords_read);
4342 out:
4343 	/* Dump last section */
4344 	offset += qed_dump_last_section(dump_buf, offset, dump);
4345 
4346 	*num_dumped_dwords = offset;
4347 
4348 	return DBG_STATUS_OK;
4349 }
4350 
4351 /* Dump IGU FIFO */
4352 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4353 					 struct qed_ptt *p_ptt,
4354 					 u32 *dump_buf,
4355 					 bool dump, u32 *num_dumped_dwords)
4356 {
4357 	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4358 	bool fifo_has_data;
4359 
4360 	*num_dumped_dwords = 0;
4361 
4362 	/* Dump global params */
4363 	offset += qed_dump_common_global_params(p_hwfn,
4364 						p_ptt,
4365 						dump_buf + offset, dump, 1);
4366 	offset += qed_dump_str_param(dump_buf + offset,
4367 				     dump, "dump-type", "igu-fifo");
4368 
4369 	/* Dump fifo data section header and param. The size param is 0 for
4370 	 * now, and is overwritten after reading the FIFO.
4371 	 */
4372 	offset += qed_dump_section_hdr(dump_buf + offset,
4373 				       dump, "igu_fifo_data", 1);
4374 	size_param_offset = offset;
4375 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4376 
4377 	if (!dump) {
4378 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4379 		 * test how much data is available, except for reading it.
4380 		 */
4381 		offset += IGU_FIFO_DEPTH_DWORDS;
4382 		goto out;
4383 	}
4384 
4385 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4386 			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4387 
4388 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4389 	 * and must be accessed atomically. Test for dwords_read not passing
4390 	 * buffer size since more entries could be added to the buffer as we are
4391 	 * emptying it.
4392 	 */
4393 	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4394 	len = IGU_FIFO_ELEMENT_DWORDS;
4395 	for (dwords_read = 0;
4396 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4397 	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4398 		offset += qed_grc_dump_addr_range(p_hwfn,
4399 						  p_ptt,
4400 						  dump_buf + offset,
4401 						  true,
4402 						  addr,
4403 						  len,
4404 						  true, SPLIT_TYPE_NONE,
4405 						  0);
4406 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4407 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4408 	}
4409 
4410 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4411 			   dwords_read);
4412 out:
4413 	/* Dump last section */
4414 	offset += qed_dump_last_section(dump_buf, offset, dump);
4415 
4416 	*num_dumped_dwords = offset;
4417 
4418 	return DBG_STATUS_OK;
4419 }
4420 
4421 /* Protection Override dump */
4422 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4423 						    struct qed_ptt *p_ptt,
4424 						    u32 *dump_buf,
4425 						    bool dump,
4426 						    u32 *num_dumped_dwords)
4427 {
4428 	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4429 
4430 	*num_dumped_dwords = 0;
4431 
4432 	/* Dump global params */
4433 	offset += qed_dump_common_global_params(p_hwfn,
4434 						p_ptt,
4435 						dump_buf + offset, dump, 1);
4436 	offset += qed_dump_str_param(dump_buf + offset,
4437 				     dump, "dump-type", "protection-override");
4438 
4439 	/* Dump data section header and param. The size param is 0 for now,
4440 	 * and is overwritten after reading the data.
4441 	 */
4442 	offset += qed_dump_section_hdr(dump_buf + offset,
4443 				       dump, "protection_override_data", 1);
4444 	size_param_offset = offset;
4445 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4446 
4447 	if (!dump) {
4448 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4449 		goto out;
4450 	}
4451 
4452 	/* Add override window info to buffer */
4453 	override_window_dwords =
4454 		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4455 		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4456 	if (override_window_dwords) {
4457 		addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4458 		offset += qed_grc_dump_addr_range(p_hwfn,
4459 						  p_ptt,
4460 						  dump_buf + offset,
4461 						  true,
4462 						  addr,
4463 						  override_window_dwords,
4464 						  true, SPLIT_TYPE_NONE, 0);
4465 		qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4466 				   override_window_dwords);
4467 	}
4468 out:
4469 	/* Dump last section */
4470 	offset += qed_dump_last_section(dump_buf, offset, dump);
4471 
4472 	*num_dumped_dwords = offset;
4473 
4474 	return DBG_STATUS_OK;
4475 }
4476 
4477 /* Performs FW Asserts Dump to the specified buffer.
4478  * Returns the dumped size in dwords.
4479  */
4480 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4481 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4482 {
4483 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4484 	struct fw_asserts_ram_section *asserts;
4485 	char storm_letter_str[2] = "?";
4486 	struct fw_info fw_info;
4487 	u32 offset = 0;
4488 	u8 storm_id;
4489 
4490 	/* Dump global params */
4491 	offset += qed_dump_common_global_params(p_hwfn,
4492 						p_ptt,
4493 						dump_buf + offset, dump, 1);
4494 	offset += qed_dump_str_param(dump_buf + offset,
4495 				     dump, "dump-type", "fw-asserts");
4496 
4497 	/* Find Storm dump size */
4498 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4499 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4500 		struct storm_defs *storm = &s_storm_defs[storm_id];
4501 		u32 last_list_idx, addr;
4502 
4503 		if (dev_data->block_in_reset[storm->sem_block_id])
4504 			continue;
4505 
4506 		/* Read FW info for the current Storm */
4507 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4508 
4509 		asserts = &fw_info.fw_asserts_section;
4510 
4511 		/* Dump FW Asserts section header and params */
4512 		storm_letter_str[0] = storm->letter;
4513 		offset += qed_dump_section_hdr(dump_buf + offset,
4514 					       dump, "fw_asserts", 2);
4515 		offset += qed_dump_str_param(dump_buf + offset,
4516 					     dump, "storm", storm_letter_str);
4517 		offset += qed_dump_num_param(dump_buf + offset,
4518 					     dump,
4519 					     "size",
4520 					     asserts->list_element_dword_size);
4521 
4522 		/* Read and dump FW Asserts data */
4523 		if (!dump) {
4524 			offset += asserts->list_element_dword_size;
4525 			continue;
4526 		}
4527 
4528 		addr = le16_to_cpu(asserts->section_ram_line_offset);
4529 		fw_asserts_section_addr = storm->sem_fast_mem_addr +
4530 					  SEM_FAST_REG_INT_RAM +
4531 					  RAM_LINES_TO_BYTES(addr);
4532 
4533 		next_list_idx_addr = fw_asserts_section_addr +
4534 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4535 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4536 		last_list_idx = (next_list_idx > 0 ?
4537 				 next_list_idx :
4538 				 asserts->list_num_elements) - 1;
4539 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4540 		       asserts->list_dword_offset +
4541 		       last_list_idx * asserts->list_element_dword_size;
4542 		offset +=
4543 		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4544 					    dump_buf + offset,
4545 					    dump, addr,
4546 					    asserts->list_element_dword_size,
4547 						  false, SPLIT_TYPE_NONE, 0);
4548 	}
4549 
4550 	/* Dump last section */
4551 	offset += qed_dump_last_section(dump_buf, offset, dump);
4552 
4553 	return offset;
4554 }
4555 
4556 /* Dumps the specified ILT pages to the specified buffer.
4557  * Returns the dumped size in dwords.
4558  */
4559 static u32 qed_ilt_dump_pages_range(u32 *dump_buf, u32 *given_offset,
4560 				    bool *dump, u32 start_page_id,
4561 				    u32 num_pages,
4562 				    struct phys_mem_desc *ilt_pages,
4563 				    bool dump_page_ids, u32 buf_size_in_dwords,
4564 				    u32 *given_actual_dump_size_in_dwords)
4565 {
4566 	u32 actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
4567 	u32 page_id, end_page_id, offset = *given_offset;
4568 	struct phys_mem_desc *mem_desc = NULL;
4569 	bool continue_dump = *dump;
4570 	u32 partial_page_size = 0;
4571 
4572 	if (num_pages == 0)
4573 		return offset;
4574 
4575 	end_page_id = start_page_id + num_pages - 1;
4576 
4577 	for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
4578 		mem_desc = &ilt_pages[page_id];
4579 		if (!ilt_pages[page_id].virt_addr)
4580 			continue;
4581 
4582 		if (dump_page_ids) {
4583 			/* Copy page ID to dump buffer
4584 			 * (if dump is needed and buffer is not full)
4585 			 */
4586 			if ((continue_dump) &&
4587 			    (offset + 1 > buf_size_in_dwords)) {
4588 				continue_dump = false;
4589 				actual_dump_size_in_dwords = offset;
4590 			}
4591 			if (continue_dump)
4592 				*(dump_buf + offset) = page_id;
4593 			offset++;
4594 		} else {
4595 			/* Copy page memory to dump buffer */
4596 			if ((continue_dump) &&
4597 			    (offset + BYTES_TO_DWORDS(mem_desc->size) >
4598 			     buf_size_in_dwords)) {
4599 				if (offset + BYTES_TO_DWORDS(mem_desc->size) >
4600 				    buf_size_in_dwords) {
4601 					partial_page_size =
4602 					    buf_size_in_dwords - offset;
4603 					memcpy(dump_buf + offset,
4604 					       mem_desc->virt_addr,
4605 					       partial_page_size);
4606 					continue_dump = false;
4607 					actual_dump_size_in_dwords =
4608 					    offset + partial_page_size;
4609 				}
4610 			}
4611 
4612 			if (continue_dump)
4613 				memcpy(dump_buf + offset,
4614 				       mem_desc->virt_addr, mem_desc->size);
4615 			offset += BYTES_TO_DWORDS(mem_desc->size);
4616 		}
4617 	}
4618 
4619 	*dump = continue_dump;
4620 	*given_offset = offset;
4621 	*given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
4622 
4623 	return offset;
4624 }
4625 
4626 /* Dumps a section containing the dumped ILT pages.
4627  * Returns the dumped size in dwords.
4628  */
4629 static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
4630 				      u32 *dump_buf,
4631 				      u32 *given_offset,
4632 				      bool *dump,
4633 				      u32 valid_conn_pf_pages,
4634 				      u32 valid_conn_vf_pages,
4635 				      struct phys_mem_desc *ilt_pages,
4636 				      bool dump_page_ids,
4637 				      u32 buf_size_in_dwords,
4638 				      u32 *given_actual_dump_size_in_dwords)
4639 {
4640 	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4641 	u32 pf_start_line, start_page_id, offset = *given_offset;
4642 	u32 cdut_pf_init_pages, cdut_vf_init_pages;
4643 	u32 cdut_pf_work_pages, cdut_vf_work_pages;
4644 	u32 base_data_offset, size_param_offset;
4645 	u32 src_pages;
4646 	u32 section_header_and_param_size;
4647 	u32 cdut_pf_pages, cdut_vf_pages;
4648 	u32 actual_dump_size_in_dwords;
4649 	bool continue_dump = *dump;
4650 	bool update_size = *dump;
4651 	const char *section_name;
4652 	u32 i;
4653 
4654 	actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
4655 	section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
4656 	cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
4657 	cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
4658 	cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn);
4659 	cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn);
4660 	cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
4661 	cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
4662 	pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
4663 	section_header_and_param_size = qed_dump_section_hdr(NULL,
4664 							     false,
4665 							     section_name,
4666 							     1) +
4667 	qed_dump_num_param(NULL, false, "size", 0);
4668 
4669 	if ((continue_dump) &&
4670 	    (offset + section_header_and_param_size > buf_size_in_dwords)) {
4671 		continue_dump = false;
4672 		update_size = false;
4673 		actual_dump_size_in_dwords = offset;
4674 	}
4675 
4676 	offset += qed_dump_section_hdr(dump_buf + offset,
4677 				       continue_dump, section_name, 1);
4678 
4679 	/* Dump size parameter (0 for now, overwritten with real size later) */
4680 	size_param_offset = offset;
4681 	offset += qed_dump_num_param(dump_buf + offset,
4682 				     continue_dump, "size", 0);
4683 	base_data_offset = offset;
4684 
4685 	/* CDUC pages are ordered as follows:
4686 	 * - PF pages - valid section (included in PF connection type mapping)
4687 	 * - PF pages - invalid section (not dumped)
4688 	 * - For each VF in the PF:
4689 	 *   - VF pages - valid section (included in VF connection type mapping)
4690 	 *   - VF pages - invalid section (not dumped)
4691 	 */
4692 	if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
4693 		/* Dump connection PF pages */
4694 		start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
4695 		qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
4696 					 start_page_id, valid_conn_pf_pages,
4697 					 ilt_pages, dump_page_ids,
4698 					 buf_size_in_dwords,
4699 					 &actual_dump_size_in_dwords);
4700 
4701 		/* Dump connection VF pages */
4702 		start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
4703 		for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4704 		     i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
4705 			qed_ilt_dump_pages_range(dump_buf, &offset,
4706 						 &continue_dump, start_page_id,
4707 						 valid_conn_vf_pages,
4708 						 ilt_pages, dump_page_ids,
4709 						 buf_size_in_dwords,
4710 						 &actual_dump_size_in_dwords);
4711 	}
4712 
4713 	/* CDUT pages are ordered as follows:
4714 	 * - PF init pages (not dumped)
4715 	 * - PF work pages
4716 	 * - For each VF in the PF:
4717 	 *   - VF init pages (not dumped)
4718 	 *   - VF work pages
4719 	 */
4720 	if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
4721 		/* Dump task PF pages */
4722 		start_page_id = clients[ILT_CLI_CDUT].first.val +
4723 		    cdut_pf_init_pages - pf_start_line;
4724 		qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
4725 					 start_page_id, cdut_pf_work_pages,
4726 					 ilt_pages, dump_page_ids,
4727 					 buf_size_in_dwords,
4728 					 &actual_dump_size_in_dwords);
4729 
4730 		/* Dump task VF pages */
4731 		start_page_id = clients[ILT_CLI_CDUT].first.val +
4732 		    cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
4733 		for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4734 		     i++, start_page_id += cdut_vf_pages)
4735 			qed_ilt_dump_pages_range(dump_buf, &offset,
4736 						 &continue_dump, start_page_id,
4737 						 cdut_vf_work_pages, ilt_pages,
4738 						 dump_page_ids,
4739 						 buf_size_in_dwords,
4740 						 &actual_dump_size_in_dwords);
4741 	}
4742 
4743 	/*Dump Searcher pages */
4744 	if (clients[ILT_CLI_SRC].active) {
4745 		start_page_id = clients[ILT_CLI_SRC].first.val - pf_start_line;
4746 		src_pages = clients[ILT_CLI_SRC].last.val -
4747 		    clients[ILT_CLI_SRC].first.val + 1;
4748 		qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
4749 					 start_page_id, src_pages, ilt_pages,
4750 					 dump_page_ids, buf_size_in_dwords,
4751 					 &actual_dump_size_in_dwords);
4752 	}
4753 
4754 	/* Overwrite size param */
4755 	if (update_size) {
4756 		u32 section_size = (*dump == continue_dump) ?
4757 		    offset - base_data_offset :
4758 		    actual_dump_size_in_dwords - base_data_offset;
4759 		if (section_size > 0)
4760 			qed_dump_num_param(dump_buf + size_param_offset,
4761 					   *dump, "size", section_size);
4762 		else if ((section_size == 0) && (*dump != continue_dump))
4763 			actual_dump_size_in_dwords -=
4764 			    section_header_and_param_size;
4765 	}
4766 
4767 	*dump = continue_dump;
4768 	*given_offset = offset;
4769 	*given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
4770 
4771 	return offset;
4772 }
4773 
4774 /* Dumps a section containing the global parameters.
4775  * Part of ilt dump process
4776  * Returns the dumped size in dwords.
4777  */
4778 static u32
4779 qed_ilt_dump_dump_common_global_params(struct qed_hwfn *p_hwfn,
4780 				       struct qed_ptt *p_ptt,
4781 				       u32 *dump_buf,
4782 				       bool dump,
4783 				       u32 cduc_page_size,
4784 				       u32 conn_ctx_size,
4785 				       u32 cdut_page_size,
4786 				       u32 *full_dump_size_param_offset,
4787 				       u32 *actual_dump_size_param_offset)
4788 {
4789 	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4790 	u32 offset = 0;
4791 
4792 	offset += qed_dump_common_global_params(p_hwfn, p_ptt,
4793 						dump_buf + offset,
4794 						dump, 30);
4795 	offset += qed_dump_str_param(dump_buf + offset,
4796 				     dump,
4797 				     "dump-type", "ilt-dump");
4798 	offset += qed_dump_num_param(dump_buf + offset,
4799 				     dump,
4800 				     "cduc-page-size",
4801 				     cduc_page_size);
4802 	offset += qed_dump_num_param(dump_buf + offset,
4803 				     dump,
4804 				     "cduc-first-page-id",
4805 				     clients[ILT_CLI_CDUC].first.val);
4806 	offset += qed_dump_num_param(dump_buf + offset,
4807 				     dump,
4808 				     "cduc-last-page-id",
4809 				     clients[ILT_CLI_CDUC].last.val);
4810 	offset += qed_dump_num_param(dump_buf + offset,
4811 				     dump,
4812 				     "cduc-num-pf-pages",
4813 				     clients[ILT_CLI_CDUC].pf_total_lines);
4814 	offset += qed_dump_num_param(dump_buf + offset,
4815 				     dump,
4816 				     "cduc-num-vf-pages",
4817 				     clients[ILT_CLI_CDUC].vf_total_lines);
4818 	offset += qed_dump_num_param(dump_buf + offset,
4819 				     dump,
4820 				     "max-conn-ctx-size",
4821 				     conn_ctx_size);
4822 	offset += qed_dump_num_param(dump_buf + offset,
4823 				     dump,
4824 				     "cdut-page-size",
4825 				     cdut_page_size);
4826 	offset += qed_dump_num_param(dump_buf + offset,
4827 				     dump,
4828 				     "cdut-first-page-id",
4829 				     clients[ILT_CLI_CDUT].first.val);
4830 	offset += qed_dump_num_param(dump_buf + offset,
4831 				     dump,
4832 				     "cdut-last-page-id",
4833 				     clients[ILT_CLI_CDUT].last.val);
4834 	offset += qed_dump_num_param(dump_buf + offset,
4835 				     dump,
4836 				     "cdut-num-pf-init-pages",
4837 				     qed_get_cdut_num_pf_init_pages(p_hwfn));
4838 	offset += qed_dump_num_param(dump_buf + offset,
4839 				     dump,
4840 				     "cdut-num-vf-init-pages",
4841 				     qed_get_cdut_num_vf_init_pages(p_hwfn));
4842 	offset += qed_dump_num_param(dump_buf + offset,
4843 				     dump,
4844 				     "cdut-num-pf-work-pages",
4845 				     qed_get_cdut_num_pf_work_pages(p_hwfn));
4846 	offset += qed_dump_num_param(dump_buf + offset,
4847 				     dump,
4848 				     "cdut-num-vf-work-pages",
4849 				     qed_get_cdut_num_vf_work_pages(p_hwfn));
4850 	offset += qed_dump_num_param(dump_buf + offset,
4851 				     dump,
4852 				     "max-task-ctx-size",
4853 				     p_hwfn->p_cxt_mngr->task_ctx_size);
4854 	offset += qed_dump_num_param(dump_buf + offset,
4855 				     dump,
4856 				     "first-vf-id-in-pf",
4857 				     p_hwfn->p_cxt_mngr->first_vf_in_pf);
4858 	offset += qed_dump_num_param(dump_buf + offset,
4859 				     dump,
4860 				     "num-vfs-in-pf",
4861 				     p_hwfn->p_cxt_mngr->vf_count);
4862 	offset += qed_dump_num_param(dump_buf + offset,
4863 				     dump,
4864 				     "ptr-size-bytes",
4865 				     sizeof(void *));
4866 	offset += qed_dump_num_param(dump_buf + offset,
4867 				     dump,
4868 				     "pf-start-line",
4869 				     p_hwfn->p_cxt_mngr->pf_start_line);
4870 	offset += qed_dump_num_param(dump_buf + offset,
4871 				     dump,
4872 				     "page-mem-desc-size-dwords",
4873 				     PAGE_MEM_DESC_SIZE_DWORDS);
4874 	offset += qed_dump_num_param(dump_buf + offset,
4875 				     dump,
4876 				     "ilt-shadow-size",
4877 				     p_hwfn->p_cxt_mngr->ilt_shadow_size);
4878 
4879 	*full_dump_size_param_offset = offset;
4880 
4881 	offset += qed_dump_num_param(dump_buf + offset,
4882 				     dump, "dump-size-full", 0);
4883 
4884 	*actual_dump_size_param_offset = offset;
4885 
4886 	offset += qed_dump_num_param(dump_buf + offset,
4887 				     dump,
4888 				     "dump-size-actual", 0);
4889 	offset += qed_dump_num_param(dump_buf + offset,
4890 				     dump,
4891 				     "iscsi_task_pages",
4892 				     p_hwfn->p_cxt_mngr->iscsi_task_pages);
4893 	offset += qed_dump_num_param(dump_buf + offset,
4894 				     dump,
4895 				     "fcoe_task_pages",
4896 				     p_hwfn->p_cxt_mngr->fcoe_task_pages);
4897 	offset += qed_dump_num_param(dump_buf + offset,
4898 				     dump,
4899 				     "roce_task_pages",
4900 				     p_hwfn->p_cxt_mngr->roce_task_pages);
4901 	offset += qed_dump_num_param(dump_buf + offset,
4902 				     dump,
4903 				     "eth_task_pages",
4904 				     p_hwfn->p_cxt_mngr->eth_task_pages);
4905 	offset += qed_dump_num_param(dump_buf + offset,
4906 				      dump,
4907 				      "src-first-page-id",
4908 				      clients[ILT_CLI_SRC].first.val);
4909 	offset += qed_dump_num_param(dump_buf + offset,
4910 				     dump,
4911 				     "src-last-page-id",
4912 				     clients[ILT_CLI_SRC].last.val);
4913 	offset += qed_dump_num_param(dump_buf + offset,
4914 				     dump,
4915 				     "src-is-active",
4916 				     clients[ILT_CLI_SRC].active);
4917 
4918 	/* Additional/Less parameters require matching of number in call to
4919 	 * dump_common_global_params()
4920 	 */
4921 
4922 	return offset;
4923 }
4924 
4925 /* Dump section containing number of PF CIDs per connection type.
4926  * Part of ilt dump process.
4927  * Returns the dumped size in dwords.
4928  */
4929 static u32 qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn *p_hwfn,
4930 					 u32 *dump_buf,
4931 					 bool dump, u32 *valid_conn_pf_cids)
4932 {
4933 	u32 num_pf_cids = 0;
4934 	u32 offset = 0;
4935 	u8 conn_type;
4936 
4937 	offset += qed_dump_section_hdr(dump_buf + offset,
4938 				       dump, "num_pf_cids_per_conn_type", 1);
4939 	offset += qed_dump_num_param(dump_buf + offset,
4940 				     dump, "size", NUM_OF_CONNECTION_TYPES);
4941 	for (conn_type = 0, *valid_conn_pf_cids = 0;
4942 	     conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
4943 		num_pf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
4944 		if (dump)
4945 			*(dump_buf + offset) = num_pf_cids;
4946 		*valid_conn_pf_cids += num_pf_cids;
4947 	}
4948 
4949 	return offset;
4950 }
4951 
4952 /* Dump section containing number of VF CIDs per connection type
4953  * Part of ilt dump process.
4954  * Returns the dumped size in dwords.
4955  */
4956 static u32 qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn *p_hwfn,
4957 					 u32 *dump_buf,
4958 					 bool dump, u32 *valid_conn_vf_cids)
4959 {
4960 	u32 num_vf_cids = 0;
4961 	u32 offset = 0;
4962 	u8 conn_type;
4963 
4964 	offset += qed_dump_section_hdr(dump_buf + offset, dump,
4965 				       "num_vf_cids_per_conn_type", 1);
4966 	offset += qed_dump_num_param(dump_buf + offset,
4967 				     dump, "size", NUM_OF_CONNECTION_TYPES);
4968 	for (conn_type = 0, *valid_conn_vf_cids = 0;
4969 	     conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
4970 		num_vf_cids =
4971 		    p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
4972 		if (dump)
4973 			*(dump_buf + offset) = num_vf_cids;
4974 		*valid_conn_vf_cids += num_vf_cids;
4975 	}
4976 
4977 	return offset;
4978 }
4979 
4980 /* Performs ILT Dump to the specified buffer.
4981  * buf_size_in_dwords - The dumped buffer size.
4982  * Returns the dumped size in dwords.
4983  */
4984 static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
4985 			struct qed_ptt *p_ptt,
4986 			u32 *dump_buf, u32 buf_size_in_dwords, bool dump)
4987 {
4988 #if ((!defined VMWARE) && (!defined UEFI))
4989 	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4990 #endif
4991 	u32 valid_conn_vf_cids = 0,
4992 	    valid_conn_vf_pages, offset = 0, real_dumped_size = 0;
4993 	u32 valid_conn_pf_cids = 0, valid_conn_pf_pages, num_pages;
4994 	u32 num_cids_per_page, conn_ctx_size;
4995 	u32 cduc_page_size, cdut_page_size;
4996 	u32 actual_dump_size_in_dwords = 0;
4997 	struct phys_mem_desc *ilt_pages;
4998 	u32 actul_dump_off = 0;
4999 	u32 last_section_size;
5000 	u32 full_dump_off = 0;
5001 	u32 section_size = 0;
5002 	bool continue_dump;
5003 	u32 page_id;
5004 
5005 	last_section_size = qed_dump_last_section(NULL, 0, false);
5006 	cduc_page_size = 1 <<
5007 	    (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
5008 	cdut_page_size = 1 <<
5009 	    (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
5010 	conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
5011 	num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
5012 	ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
5013 	continue_dump = dump;
5014 
5015 	/* if need to dump then save memory for the last section
5016 	 * (last section calculates CRC of dumped data)
5017 	 */
5018 	if (dump) {
5019 		if (buf_size_in_dwords >= last_section_size) {
5020 			buf_size_in_dwords -= last_section_size;
5021 		} else {
5022 			continue_dump = false;
5023 			actual_dump_size_in_dwords = offset;
5024 		}
5025 	}
5026 
5027 	/* Dump global params */
5028 
5029 	/* if need to dump then first check that there is enough memory
5030 	 * in dumped buffer for this section calculate the size of this
5031 	 * section without dumping. if there is not enough memory - then
5032 	 * stop the dumping.
5033 	 */
5034 	if (continue_dump) {
5035 		section_size =
5036 			qed_ilt_dump_dump_common_global_params(p_hwfn,
5037 							       p_ptt,
5038 							       NULL,
5039 							       false,
5040 							       cduc_page_size,
5041 							       conn_ctx_size,
5042 							       cdut_page_size,
5043 							       &full_dump_off,
5044 							       &actul_dump_off);
5045 		if (offset + section_size > buf_size_in_dwords) {
5046 			continue_dump = false;
5047 			actual_dump_size_in_dwords = offset;
5048 		}
5049 	}
5050 
5051 	offset += qed_ilt_dump_dump_common_global_params(p_hwfn,
5052 							 p_ptt,
5053 							 dump_buf + offset,
5054 							 continue_dump,
5055 							 cduc_page_size,
5056 							 conn_ctx_size,
5057 							 cdut_page_size,
5058 							 &full_dump_off,
5059 							 &actul_dump_off);
5060 
5061 	/* Dump section containing number of PF CIDs per connection type
5062 	 * If need to dump then first check that there is enough memory in
5063 	 * dumped buffer for this section.
5064 	 */
5065 	if (continue_dump) {
5066 		section_size =
5067 			qed_ilt_dump_dump_num_pf_cids(p_hwfn,
5068 						      NULL,
5069 						      false,
5070 						      &valid_conn_pf_cids);
5071 		if (offset + section_size > buf_size_in_dwords) {
5072 			continue_dump = false;
5073 			actual_dump_size_in_dwords = offset;
5074 		}
5075 	}
5076 
5077 	offset += qed_ilt_dump_dump_num_pf_cids(p_hwfn,
5078 						dump_buf + offset,
5079 						continue_dump,
5080 						&valid_conn_pf_cids);
5081 
5082 	/* Dump section containing number of VF CIDs per connection type
5083 	 * If need to dump then first check that there is enough memory in
5084 	 * dumped buffer for this section.
5085 	 */
5086 	if (continue_dump) {
5087 		section_size =
5088 			qed_ilt_dump_dump_num_vf_cids(p_hwfn,
5089 						      NULL,
5090 						      false,
5091 						      &valid_conn_vf_cids);
5092 		if (offset + section_size > buf_size_in_dwords) {
5093 			continue_dump = false;
5094 			actual_dump_size_in_dwords = offset;
5095 		}
5096 	}
5097 
5098 	offset += qed_ilt_dump_dump_num_vf_cids(p_hwfn,
5099 						dump_buf + offset,
5100 						continue_dump,
5101 						&valid_conn_vf_cids);
5102 
5103 	/* Dump section containing physical memory descriptors for each
5104 	 * ILT page.
5105 	 */
5106 	num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
5107 
5108 	/* If need to dump then first check that there is enough memory
5109 	 * in dumped buffer for the section header.
5110 	 */
5111 	if (continue_dump) {
5112 		section_size = qed_dump_section_hdr(NULL,
5113 						    false,
5114 						    "ilt_page_desc",
5115 						    1) +
5116 		    qed_dump_num_param(NULL,
5117 				       false,
5118 				       "size",
5119 				       num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
5120 		if (offset + section_size > buf_size_in_dwords) {
5121 			continue_dump = false;
5122 			actual_dump_size_in_dwords = offset;
5123 		}
5124 	}
5125 
5126 	offset += qed_dump_section_hdr(dump_buf + offset,
5127 				       continue_dump, "ilt_page_desc", 1);
5128 	offset += qed_dump_num_param(dump_buf + offset,
5129 				     continue_dump,
5130 				     "size",
5131 				     num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
5132 
5133 	/* Copy memory descriptors to dump buffer
5134 	 * If need to dump then dump till the dump buffer size
5135 	 */
5136 	if (continue_dump) {
5137 		for (page_id = 0; page_id < num_pages;
5138 		     page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) {
5139 			if (continue_dump &&
5140 			    (offset + PAGE_MEM_DESC_SIZE_DWORDS <=
5141 			     buf_size_in_dwords)) {
5142 				memcpy(dump_buf + offset,
5143 				       &ilt_pages[page_id],
5144 				       DWORDS_TO_BYTES
5145 				       (PAGE_MEM_DESC_SIZE_DWORDS));
5146 			} else {
5147 				if (continue_dump) {
5148 					continue_dump = false;
5149 					actual_dump_size_in_dwords = offset;
5150 				}
5151 			}
5152 		}
5153 	} else {
5154 		offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
5155 	}
5156 
5157 	valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
5158 					   num_cids_per_page);
5159 	valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
5160 					   num_cids_per_page);
5161 
5162 	/* Dump ILT pages IDs */
5163 	qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
5164 				   valid_conn_pf_pages, valid_conn_vf_pages,
5165 				   ilt_pages, true, buf_size_in_dwords,
5166 				   &actual_dump_size_in_dwords);
5167 
5168 	/* Dump ILT pages memory */
5169 	qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
5170 				   valid_conn_pf_pages, valid_conn_vf_pages,
5171 				   ilt_pages, false, buf_size_in_dwords,
5172 				   &actual_dump_size_in_dwords);
5173 
5174 	real_dumped_size =
5175 	    (continue_dump == dump) ? offset : actual_dump_size_in_dwords;
5176 	qed_dump_num_param(dump_buf + full_dump_off, dump,
5177 			   "full-dump-size", offset + last_section_size);
5178 	qed_dump_num_param(dump_buf + actul_dump_off,
5179 			   dump,
5180 			   "actual-dump-size",
5181 			   real_dumped_size + last_section_size);
5182 
5183 	/* Dump last section */
5184 	real_dumped_size += qed_dump_last_section(dump_buf,
5185 						  real_dumped_size, dump);
5186 
5187 	return real_dumped_size;
5188 }
5189 
5190 /***************************** Public Functions *******************************/
5191 
5192 enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
5193 				    const u8 * const bin_ptr)
5194 {
5195 	struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
5196 	u8 buf_id;
5197 
5198 	/* Convert binary data to debug arrays */
5199 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
5200 		qed_set_dbg_bin_buf(p_hwfn,
5201 				    buf_id,
5202 				    (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
5203 				    buf_hdrs[buf_id].length);
5204 
5205 	return DBG_STATUS_OK;
5206 }
5207 
5208 static enum dbg_status qed_dbg_set_app_ver(u32 ver)
5209 {
5210 	if (ver < TOOLS_VERSION)
5211 		return DBG_STATUS_UNSUPPORTED_APP_VERSION;
5212 
5213 	s_app_ver = ver;
5214 
5215 	return DBG_STATUS_OK;
5216 }
5217 
5218 bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
5219 		      struct qed_ptt *p_ptt, struct fw_info *fw_info)
5220 {
5221 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5222 	u8 storm_id;
5223 
5224 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5225 		struct storm_defs *storm = &s_storm_defs[storm_id];
5226 
5227 		/* Skip Storm if it's in reset */
5228 		if (dev_data->block_in_reset[storm->sem_block_id])
5229 			continue;
5230 
5231 		/* Read FW info for the current Storm */
5232 		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
5233 
5234 		return true;
5235 	}
5236 
5237 	return false;
5238 }
5239 
5240 enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
5241 				   enum dbg_grc_params grc_param, u32 val)
5242 {
5243 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5244 	enum dbg_status status;
5245 	int i;
5246 
5247 	DP_VERBOSE(p_hwfn,
5248 		   QED_MSG_DEBUG,
5249 		   "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
5250 
5251 	status = qed_dbg_dev_init(p_hwfn);
5252 	if (status != DBG_STATUS_OK)
5253 		return status;
5254 
5255 	/* Initializes the GRC parameters (if not initialized). Needed in order
5256 	 * to set the default parameter values for the first time.
5257 	 */
5258 	qed_dbg_grc_init_params(p_hwfn);
5259 
5260 	if (grc_param >= MAX_DBG_GRC_PARAMS)
5261 		return DBG_STATUS_INVALID_ARGS;
5262 	if (val < s_grc_param_defs[grc_param].min ||
5263 	    val > s_grc_param_defs[grc_param].max)
5264 		return DBG_STATUS_INVALID_ARGS;
5265 
5266 	if (s_grc_param_defs[grc_param].is_preset) {
5267 		/* Preset param */
5268 
5269 		/* Disabling a preset is not allowed. Call
5270 		 * dbg_grc_set_params_default instead.
5271 		 */
5272 		if (!val)
5273 			return DBG_STATUS_INVALID_ARGS;
5274 
5275 		/* Update all params with the preset values */
5276 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
5277 			struct grc_param_defs *defs = &s_grc_param_defs[i];
5278 			u32 preset_val;
5279 			/* Skip persistent params */
5280 			if (defs->is_persistent)
5281 				continue;
5282 
5283 			/* Find preset value */
5284 			if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
5285 				preset_val =
5286 				    defs->exclude_all_preset_val;
5287 			else if (grc_param == DBG_GRC_PARAM_CRASH)
5288 				preset_val =
5289 				    defs->crash_preset_val[dev_data->chip_id];
5290 			else
5291 				return DBG_STATUS_INVALID_ARGS;
5292 
5293 			qed_grc_set_param(p_hwfn, i, preset_val);
5294 		}
5295 	} else {
5296 		/* Regular param - set its value */
5297 		qed_grc_set_param(p_hwfn, grc_param, val);
5298 	}
5299 
5300 	return DBG_STATUS_OK;
5301 }
5302 
5303 /* Assign default GRC param values */
5304 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5305 {
5306 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5307 	u32 i;
5308 
5309 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5310 		if (!s_grc_param_defs[i].is_persistent)
5311 			dev_data->grc.param_val[i] =
5312 			    s_grc_param_defs[i].default_val[dev_data->chip_id];
5313 }
5314 
5315 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5316 					      struct qed_ptt *p_ptt,
5317 					      u32 *buf_size)
5318 {
5319 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5320 
5321 	*buf_size = 0;
5322 
5323 	if (status != DBG_STATUS_OK)
5324 		return status;
5325 
5326 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5327 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5328 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5329 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5330 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5331 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5332 
5333 	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5334 }
5335 
5336 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5337 				 struct qed_ptt *p_ptt,
5338 				 u32 *dump_buf,
5339 				 u32 buf_size_in_dwords,
5340 				 u32 *num_dumped_dwords)
5341 {
5342 	u32 needed_buf_size_in_dwords;
5343 	enum dbg_status status;
5344 
5345 	*num_dumped_dwords = 0;
5346 
5347 	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5348 					       p_ptt,
5349 					       &needed_buf_size_in_dwords);
5350 	if (status != DBG_STATUS_OK)
5351 		return status;
5352 
5353 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5354 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5355 
5356 	/* Doesn't do anything, needed for compile time asserts */
5357 	qed_static_asserts();
5358 
5359 	/* GRC Dump */
5360 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5361 
5362 	/* Revert GRC params to their default */
5363 	qed_dbg_grc_set_params_default(p_hwfn);
5364 
5365 	return status;
5366 }
5367 
5368 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5369 						   struct qed_ptt *p_ptt,
5370 						   u32 *buf_size)
5371 {
5372 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5373 	struct idle_chk_data *idle_chk = &dev_data->idle_chk;
5374 	enum dbg_status status;
5375 
5376 	*buf_size = 0;
5377 
5378 	status = qed_dbg_dev_init(p_hwfn);
5379 	if (status != DBG_STATUS_OK)
5380 		return status;
5381 
5382 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5383 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5384 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5385 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5386 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5387 
5388 	if (!idle_chk->buf_size_set) {
5389 		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5390 						       p_ptt, NULL, false);
5391 		idle_chk->buf_size_set = true;
5392 	}
5393 
5394 	*buf_size = idle_chk->buf_size;
5395 
5396 	return DBG_STATUS_OK;
5397 }
5398 
5399 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5400 				      struct qed_ptt *p_ptt,
5401 				      u32 *dump_buf,
5402 				      u32 buf_size_in_dwords,
5403 				      u32 *num_dumped_dwords)
5404 {
5405 	u32 needed_buf_size_in_dwords;
5406 	enum dbg_status status;
5407 
5408 	*num_dumped_dwords = 0;
5409 
5410 	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5411 						    p_ptt,
5412 						    &needed_buf_size_in_dwords);
5413 	if (status != DBG_STATUS_OK)
5414 		return status;
5415 
5416 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5417 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5418 
5419 	/* Update reset state */
5420 	qed_grc_unreset_blocks(p_hwfn, p_ptt, true);
5421 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5422 
5423 	/* Idle Check Dump */
5424 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5425 
5426 	/* Revert GRC params to their default */
5427 	qed_dbg_grc_set_params_default(p_hwfn);
5428 
5429 	return DBG_STATUS_OK;
5430 }
5431 
5432 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5433 						    struct qed_ptt *p_ptt,
5434 						    u32 *buf_size)
5435 {
5436 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5437 
5438 	*buf_size = 0;
5439 
5440 	if (status != DBG_STATUS_OK)
5441 		return status;
5442 
5443 	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5444 }
5445 
5446 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5447 				       struct qed_ptt *p_ptt,
5448 				       u32 *dump_buf,
5449 				       u32 buf_size_in_dwords,
5450 				       u32 *num_dumped_dwords)
5451 {
5452 	u32 needed_buf_size_in_dwords;
5453 	enum dbg_status status;
5454 
5455 	status =
5456 		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5457 						    p_ptt,
5458 						    &needed_buf_size_in_dwords);
5459 	if (status != DBG_STATUS_OK && status !=
5460 	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5461 		return status;
5462 
5463 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5464 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5465 
5466 	/* Update reset state */
5467 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5468 
5469 	/* Perform dump */
5470 	status = qed_mcp_trace_dump(p_hwfn,
5471 				    p_ptt, dump_buf, true, num_dumped_dwords);
5472 
5473 	/* Revert GRC params to their default */
5474 	qed_dbg_grc_set_params_default(p_hwfn);
5475 
5476 	return status;
5477 }
5478 
5479 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5480 						   struct qed_ptt *p_ptt,
5481 						   u32 *buf_size)
5482 {
5483 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5484 
5485 	*buf_size = 0;
5486 
5487 	if (status != DBG_STATUS_OK)
5488 		return status;
5489 
5490 	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5491 }
5492 
5493 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5494 				      struct qed_ptt *p_ptt,
5495 				      u32 *dump_buf,
5496 				      u32 buf_size_in_dwords,
5497 				      u32 *num_dumped_dwords)
5498 {
5499 	u32 needed_buf_size_in_dwords;
5500 	enum dbg_status status;
5501 
5502 	*num_dumped_dwords = 0;
5503 
5504 	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5505 						    p_ptt,
5506 						    &needed_buf_size_in_dwords);
5507 	if (status != DBG_STATUS_OK)
5508 		return status;
5509 
5510 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5511 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5512 
5513 	/* Update reset state */
5514 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5515 
5516 	status = qed_reg_fifo_dump(p_hwfn,
5517 				   p_ptt, dump_buf, true, num_dumped_dwords);
5518 
5519 	/* Revert GRC params to their default */
5520 	qed_dbg_grc_set_params_default(p_hwfn);
5521 
5522 	return status;
5523 }
5524 
5525 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5526 						   struct qed_ptt *p_ptt,
5527 						   u32 *buf_size)
5528 {
5529 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5530 
5531 	*buf_size = 0;
5532 
5533 	if (status != DBG_STATUS_OK)
5534 		return status;
5535 
5536 	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5537 }
5538 
5539 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5540 				      struct qed_ptt *p_ptt,
5541 				      u32 *dump_buf,
5542 				      u32 buf_size_in_dwords,
5543 				      u32 *num_dumped_dwords)
5544 {
5545 	u32 needed_buf_size_in_dwords;
5546 	enum dbg_status status;
5547 
5548 	*num_dumped_dwords = 0;
5549 
5550 	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5551 						    p_ptt,
5552 						    &needed_buf_size_in_dwords);
5553 	if (status != DBG_STATUS_OK)
5554 		return status;
5555 
5556 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5557 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5558 
5559 	/* Update reset state */
5560 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5561 
5562 	status = qed_igu_fifo_dump(p_hwfn,
5563 				   p_ptt, dump_buf, true, num_dumped_dwords);
5564 	/* Revert GRC params to their default */
5565 	qed_dbg_grc_set_params_default(p_hwfn);
5566 
5567 	return status;
5568 }
5569 
5570 enum dbg_status
5571 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5572 					      struct qed_ptt *p_ptt,
5573 					      u32 *buf_size)
5574 {
5575 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5576 
5577 	*buf_size = 0;
5578 
5579 	if (status != DBG_STATUS_OK)
5580 		return status;
5581 
5582 	return qed_protection_override_dump(p_hwfn,
5583 					    p_ptt, NULL, false, buf_size);
5584 }
5585 
5586 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5587 						 struct qed_ptt *p_ptt,
5588 						 u32 *dump_buf,
5589 						 u32 buf_size_in_dwords,
5590 						 u32 *num_dumped_dwords)
5591 {
5592 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5593 	enum dbg_status status;
5594 
5595 	*num_dumped_dwords = 0;
5596 
5597 	status =
5598 		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5599 							      p_ptt,
5600 							      p_size);
5601 	if (status != DBG_STATUS_OK)
5602 		return status;
5603 
5604 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5605 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5606 
5607 	/* Update reset state */
5608 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5609 
5610 	status = qed_protection_override_dump(p_hwfn,
5611 					      p_ptt,
5612 					      dump_buf,
5613 					      true, num_dumped_dwords);
5614 
5615 	/* Revert GRC params to their default */
5616 	qed_dbg_grc_set_params_default(p_hwfn);
5617 
5618 	return status;
5619 }
5620 
5621 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5622 						     struct qed_ptt *p_ptt,
5623 						     u32 *buf_size)
5624 {
5625 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5626 
5627 	*buf_size = 0;
5628 
5629 	if (status != DBG_STATUS_OK)
5630 		return status;
5631 
5632 	/* Update reset state */
5633 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5634 
5635 	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5636 
5637 	return DBG_STATUS_OK;
5638 }
5639 
5640 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5641 					struct qed_ptt *p_ptt,
5642 					u32 *dump_buf,
5643 					u32 buf_size_in_dwords,
5644 					u32 *num_dumped_dwords)
5645 {
5646 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5647 	enum dbg_status status;
5648 
5649 	*num_dumped_dwords = 0;
5650 
5651 	status =
5652 		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5653 						     p_ptt,
5654 						     p_size);
5655 	if (status != DBG_STATUS_OK)
5656 		return status;
5657 
5658 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5659 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5660 
5661 	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5662 
5663 	/* Revert GRC params to their default */
5664 	qed_dbg_grc_set_params_default(p_hwfn);
5665 
5666 	return DBG_STATUS_OK;
5667 }
5668 
5669 static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5670 						     struct qed_ptt *p_ptt,
5671 						     u32 *buf_size)
5672 {
5673 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5674 
5675 	*buf_size = 0;
5676 
5677 	if (status != DBG_STATUS_OK)
5678 		return status;
5679 
5680 	*buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, 0, false);
5681 
5682 	return DBG_STATUS_OK;
5683 }
5684 
5685 static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
5686 					struct qed_ptt *p_ptt,
5687 					u32 *dump_buf,
5688 					u32 buf_size_in_dwords,
5689 					u32 *num_dumped_dwords)
5690 {
5691 	*num_dumped_dwords = qed_ilt_dump(p_hwfn,
5692 					  p_ptt,
5693 					  dump_buf, buf_size_in_dwords, true);
5694 
5695 	/* Reveret GRC params to their default */
5696 	qed_dbg_grc_set_params_default(p_hwfn);
5697 
5698 	return DBG_STATUS_OK;
5699 }
5700 
5701 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5702 				  struct qed_ptt *p_ptt,
5703 				  enum block_id block_id,
5704 				  enum dbg_attn_type attn_type,
5705 				  bool clear_status,
5706 				  struct dbg_attn_block_result *results)
5707 {
5708 	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5709 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5710 	const struct dbg_attn_reg *attn_reg_arr;
5711 
5712 	if (status != DBG_STATUS_OK)
5713 		return status;
5714 
5715 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5716 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5717 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5718 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5719 
5720 	attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
5721 					       block_id,
5722 					       attn_type, &num_attn_regs);
5723 
5724 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5725 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5726 		struct dbg_attn_reg_result *reg_result;
5727 		u32 sts_addr, sts_val;
5728 		u16 modes_buf_offset;
5729 		bool eval_mode;
5730 
5731 		/* Check mode */
5732 		eval_mode = GET_FIELD(reg_data->mode.data,
5733 				      DBG_MODE_HDR_EVAL_MODE) > 0;
5734 		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5735 					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5736 		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5737 			continue;
5738 
5739 		/* Mode match - read attention status register */
5740 		sts_addr = DWORDS_TO_BYTES(clear_status ?
5741 					   reg_data->sts_clr_address :
5742 					   GET_FIELD(reg_data->data,
5743 						     DBG_ATTN_REG_STS_ADDRESS));
5744 		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5745 		if (!sts_val)
5746 			continue;
5747 
5748 		/* Non-zero attention status - add to results */
5749 		reg_result = &results->reg_results[num_result_regs];
5750 		SET_FIELD(reg_result->data,
5751 			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5752 		SET_FIELD(reg_result->data,
5753 			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5754 			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5755 		reg_result->block_attn_offset = reg_data->block_attn_offset;
5756 		reg_result->sts_val = sts_val;
5757 		reg_result->mask_val = qed_rd(p_hwfn,
5758 					      p_ptt,
5759 					      DWORDS_TO_BYTES
5760 					      (reg_data->mask_address));
5761 		num_result_regs++;
5762 	}
5763 
5764 	results->block_id = (u8)block_id;
5765 	results->names_offset =
5766 	    qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset;
5767 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5768 	SET_FIELD(results->data,
5769 		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5770 
5771 	return DBG_STATUS_OK;
5772 }
5773 
5774 /******************************* Data Types **********************************/
5775 
5776 /* REG fifo element */
5777 struct reg_fifo_element {
5778 	u64 data;
5779 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5780 #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5781 #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5782 #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5783 #define REG_FIFO_ELEMENT_PF_SHIFT		24
5784 #define REG_FIFO_ELEMENT_PF_MASK		0xf
5785 #define REG_FIFO_ELEMENT_VF_SHIFT		28
5786 #define REG_FIFO_ELEMENT_VF_MASK		0xff
5787 #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5788 #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5789 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5790 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5791 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5792 #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5793 #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5794 #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5795 #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5796 #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5797 };
5798 
5799 /* REG fifo error element */
5800 struct reg_fifo_err {
5801 	u32 err_code;
5802 	const char *err_msg;
5803 };
5804 
5805 /* IGU fifo element */
5806 struct igu_fifo_element {
5807 	u32 dword0;
5808 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5809 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5810 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5811 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5812 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5813 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5814 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5815 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5816 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5817 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5818 	u32 dword1;
5819 	u32 dword2;
5820 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5821 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5822 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5823 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5824 	u32 reserved;
5825 };
5826 
5827 struct igu_fifo_wr_data {
5828 	u32 data;
5829 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5830 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5831 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5832 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5833 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5834 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5835 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5836 #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5837 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5838 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5839 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5840 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5841 };
5842 
5843 struct igu_fifo_cleanup_wr_data {
5844 	u32 data;
5845 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5846 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5847 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5848 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5849 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5850 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5851 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5852 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5853 };
5854 
5855 /* Protection override element */
5856 struct protection_override_element {
5857 	u64 data;
5858 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5859 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5860 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5861 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5862 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5863 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5864 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5865 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5866 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5867 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5868 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5869 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5870 };
5871 
5872 enum igu_fifo_sources {
5873 	IGU_SRC_PXP0,
5874 	IGU_SRC_PXP1,
5875 	IGU_SRC_PXP2,
5876 	IGU_SRC_PXP3,
5877 	IGU_SRC_PXP4,
5878 	IGU_SRC_PXP5,
5879 	IGU_SRC_PXP6,
5880 	IGU_SRC_PXP7,
5881 	IGU_SRC_CAU,
5882 	IGU_SRC_ATTN,
5883 	IGU_SRC_GRC
5884 };
5885 
5886 enum igu_fifo_addr_types {
5887 	IGU_ADDR_TYPE_MSIX_MEM,
5888 	IGU_ADDR_TYPE_WRITE_PBA,
5889 	IGU_ADDR_TYPE_WRITE_INT_ACK,
5890 	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5891 	IGU_ADDR_TYPE_READ_INT,
5892 	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5893 	IGU_ADDR_TYPE_RESERVED
5894 };
5895 
5896 struct igu_fifo_addr_data {
5897 	u16 start_addr;
5898 	u16 end_addr;
5899 	char *desc;
5900 	char *vf_desc;
5901 	enum igu_fifo_addr_types type;
5902 };
5903 
5904 /******************************** Constants **********************************/
5905 
5906 #define MAX_MSG_LEN				1024
5907 
5908 #define MCP_TRACE_MAX_MODULE_LEN		8
5909 #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5910 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5911 	(MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET)
5912 
5913 #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5914 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5915 
5916 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5917 
5918 /***************************** Constant Arrays *******************************/
5919 
5920 /* Status string array */
5921 static const char * const s_status_str[] = {
5922 	/* DBG_STATUS_OK */
5923 	"Operation completed successfully",
5924 
5925 	/* DBG_STATUS_APP_VERSION_NOT_SET */
5926 	"Debug application version wasn't set",
5927 
5928 	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5929 	"Unsupported debug application version",
5930 
5931 	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5932 	"The debug block wasn't reset since the last recording",
5933 
5934 	/* DBG_STATUS_INVALID_ARGS */
5935 	"Invalid arguments",
5936 
5937 	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5938 	"The debug output was already set",
5939 
5940 	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5941 	"Invalid PCI buffer size",
5942 
5943 	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5944 	"PCI buffer allocation failed",
5945 
5946 	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5947 	"A PCI buffer wasn't allocated",
5948 
5949 	/* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
5950 	"The filter/trigger constraint dword offsets are not enabled for recording",
5951 	/* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
5952 	"No matching framing mode",
5953 
5954 	/* DBG_STATUS_VFC_READ_ERROR */
5955 	"Error reading from VFC",
5956 
5957 	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5958 	"The Storm was already enabled",
5959 
5960 	/* DBG_STATUS_STORM_NOT_ENABLED */
5961 	"The specified Storm wasn't enabled",
5962 
5963 	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5964 	"The block was already enabled",
5965 
5966 	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5967 	"The specified block wasn't enabled",
5968 
5969 	/* DBG_STATUS_NO_INPUT_ENABLED */
5970 	"No input was enabled for recording",
5971 
5972 	/* DBG_STATUS_NO_FILTER_TRIGGER_256B */
5973 	"Filters and triggers are not allowed in E4 256-bit mode",
5974 
5975 	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5976 	"The filter was already enabled",
5977 
5978 	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5979 	"The trigger was already enabled",
5980 
5981 	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5982 	"The trigger wasn't enabled",
5983 
5984 	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5985 	"A constraint can be added only after a filter was enabled or a trigger state was added",
5986 
5987 	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5988 	"Cannot add more than 3 trigger states",
5989 
5990 	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5991 	"Cannot add more than 4 constraints per filter or trigger state",
5992 
5993 	/* DBG_STATUS_RECORDING_NOT_STARTED */
5994 	"The recording wasn't started",
5995 
5996 	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
5997 	"A trigger was configured, but it didn't trigger",
5998 
5999 	/* DBG_STATUS_NO_DATA_RECORDED */
6000 	"No data was recorded",
6001 
6002 	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
6003 	"Dump buffer is too small",
6004 
6005 	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
6006 	"Dumped data is not aligned to chunks",
6007 
6008 	/* DBG_STATUS_UNKNOWN_CHIP */
6009 	"Unknown chip",
6010 
6011 	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
6012 	"Failed allocating virtual memory",
6013 
6014 	/* DBG_STATUS_BLOCK_IN_RESET */
6015 	"The input block is in reset",
6016 
6017 	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
6018 	"Invalid MCP trace signature found in NVRAM",
6019 
6020 	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
6021 	"Invalid bundle ID found in NVRAM",
6022 
6023 	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
6024 	"Failed getting NVRAM image",
6025 
6026 	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
6027 	"NVRAM image is not dword-aligned",
6028 
6029 	/* DBG_STATUS_NVRAM_READ_FAILED */
6030 	"Failed reading from NVRAM",
6031 
6032 	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
6033 	"Idle check parsing failed",
6034 
6035 	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
6036 	"MCP Trace data is corrupt",
6037 
6038 	/* DBG_STATUS_MCP_TRACE_NO_META */
6039 	"Dump doesn't contain meta data - it must be provided in image file",
6040 
6041 	/* DBG_STATUS_MCP_COULD_NOT_HALT */
6042 	"Failed to halt MCP",
6043 
6044 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
6045 	"Failed to resume MCP after halt",
6046 
6047 	/* DBG_STATUS_RESERVED0 */
6048 	"",
6049 
6050 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
6051 	"Failed to empty SEMI sync FIFO",
6052 
6053 	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
6054 	"IGU FIFO data is corrupt",
6055 
6056 	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
6057 	"MCP failed to mask parities",
6058 
6059 	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
6060 	"FW Asserts parsing failed",
6061 
6062 	/* DBG_STATUS_REG_FIFO_BAD_DATA */
6063 	"GRC FIFO data is corrupt",
6064 
6065 	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
6066 	"Protection Override data is corrupt",
6067 
6068 	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
6069 	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
6070 
6071 	/* DBG_STATUS_RESERVED1 */
6072 	"",
6073 
6074 	/* DBG_STATUS_NON_MATCHING_LINES */
6075 	"Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)",
6076 
6077 	/* DBG_STATUS_INSUFFICIENT_HW_IDS */
6078 	"Insufficient HW IDs. Try to record less Storms/blocks",
6079 
6080 	/* DBG_STATUS_DBG_BUS_IN_USE */
6081 	"The debug bus is in use",
6082 
6083 	/* DBG_STATUS_INVALID_STORM_DBG_MODE */
6084 	"The storm debug mode is not supported in the current chip",
6085 
6086 	/* DBG_STATUS_OTHER_ENGINE_BB_ONLY */
6087 	"Other engine is supported only in BB",
6088 
6089 	/* DBG_STATUS_FILTER_SINGLE_HW_ID */
6090 	"The configured filter mode requires a single Storm/block input",
6091 
6092 	/* DBG_STATUS_TRIGGER_SINGLE_HW_ID */
6093 	"The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
6094 
6095 	/* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
6096 	"When triggering on Storm data, the Storm to trigger on must be specified",
6097 
6098 	/* DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE */
6099 	"Failed to request MDUMP2 Offsize",
6100 
6101 	/* DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC */
6102 	"Expected CRC (part of the MDUMP2 data) is different than the calculated CRC over that data",
6103 
6104 	/* DBG_STATUS_MDUMP2_INVALID_SIGNATURE */
6105 	"Invalid Signature found at start of MDUMP2",
6106 
6107 	/* DBG_STATUS_MDUMP2_INVALID_LOG_SIZE */
6108 	"Invalid Log Size of MDUMP2",
6109 
6110 	/* DBG_STATUS_MDUMP2_INVALID_LOG_HDR */
6111 	"Invalid Log Header of MDUMP2",
6112 
6113 	/* DBG_STATUS_MDUMP2_INVALID_LOG_DATA */
6114 	"Invalid Log Data of MDUMP2",
6115 
6116 	/* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS */
6117 	"Could not extract number of ports from regval buf of MDUMP2",
6118 
6119 	/* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS */
6120 	"Could not extract MFW (link) status from regval buf of MDUMP2",
6121 
6122 	/* DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP */
6123 	"Could not display linkdump of MDUMP2",
6124 
6125 	/* DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG */
6126 	"Could not read PHY CFG of MDUMP2",
6127 
6128 	/* DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE */
6129 	"Could not read PLL Mode of MDUMP2",
6130 
6131 	/* DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS */
6132 	"Could not read TSCF/TSCE Lane Regs of MDUMP2",
6133 
6134 	/* DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF */
6135 	"Could not allocate MDUMP2 reg-val internal buffer"
6136 };
6137 
6138 /* Idle check severity names array */
6139 static const char * const s_idle_chk_severity_str[] = {
6140 	"Error",
6141 	"Error if no traffic",
6142 	"Warning"
6143 };
6144 
6145 /* MCP Trace level names array */
6146 static const char * const s_mcp_trace_level_str[] = {
6147 	"ERROR",
6148 	"TRACE",
6149 	"DEBUG"
6150 };
6151 
6152 /* Access type names array */
6153 static const char * const s_access_strs[] = {
6154 	"read",
6155 	"write"
6156 };
6157 
6158 /* Privilege type names array */
6159 static const char * const s_privilege_strs[] = {
6160 	"VF",
6161 	"PDA",
6162 	"HV",
6163 	"UA"
6164 };
6165 
6166 /* Protection type names array */
6167 static const char * const s_protection_strs[] = {
6168 	"(default)",
6169 	"(default)",
6170 	"(default)",
6171 	"(default)",
6172 	"override VF",
6173 	"override PDA",
6174 	"override HV",
6175 	"override UA"
6176 };
6177 
6178 /* Master type names array */
6179 static const char * const s_master_strs[] = {
6180 	"???",
6181 	"pxp",
6182 	"mcp",
6183 	"msdm",
6184 	"psdm",
6185 	"ysdm",
6186 	"usdm",
6187 	"tsdm",
6188 	"xsdm",
6189 	"dbu",
6190 	"dmae",
6191 	"jdap",
6192 	"???",
6193 	"???",
6194 	"???",
6195 	"???"
6196 };
6197 
6198 /* REG FIFO error messages array */
6199 static struct reg_fifo_err s_reg_fifo_errors[] = {
6200 	{1, "grc timeout"},
6201 	{2, "address doesn't belong to any block"},
6202 	{4, "reserved address in block or write to read-only address"},
6203 	{8, "privilege/protection mismatch"},
6204 	{16, "path isolation error"},
6205 	{17, "RSL error"}
6206 };
6207 
6208 /* IGU FIFO sources array */
6209 static const char * const s_igu_fifo_source_strs[] = {
6210 	"TSTORM",
6211 	"MSTORM",
6212 	"USTORM",
6213 	"XSTORM",
6214 	"YSTORM",
6215 	"PSTORM",
6216 	"PCIE",
6217 	"NIG_QM_PBF",
6218 	"CAU",
6219 	"ATTN",
6220 	"GRC",
6221 };
6222 
6223 /* IGU FIFO error messages */
6224 static const char * const s_igu_fifo_error_strs[] = {
6225 	"no error",
6226 	"length error",
6227 	"function disabled",
6228 	"VF sent command to attention address",
6229 	"host sent prod update command",
6230 	"read of during interrupt register while in MIMD mode",
6231 	"access to PXP BAR reserved address",
6232 	"producer update command to attention index",
6233 	"unknown error",
6234 	"SB index not valid",
6235 	"SB relative index and FID not found",
6236 	"FID not match",
6237 	"command with error flag asserted (PCI error or CAU discard)",
6238 	"VF sent cleanup and RF cleanup is disabled",
6239 	"cleanup command on type bigger than 4"
6240 };
6241 
6242 /* IGU FIFO address data */
6243 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6244 	{0x0, 0x101, "MSI-X Memory", NULL,
6245 	 IGU_ADDR_TYPE_MSIX_MEM},
6246 	{0x102, 0x1ff, "reserved", NULL,
6247 	 IGU_ADDR_TYPE_RESERVED},
6248 	{0x200, 0x200, "Write PBA[0:63]", NULL,
6249 	 IGU_ADDR_TYPE_WRITE_PBA},
6250 	{0x201, 0x201, "Write PBA[64:127]", "reserved",
6251 	 IGU_ADDR_TYPE_WRITE_PBA},
6252 	{0x202, 0x202, "Write PBA[128]", "reserved",
6253 	 IGU_ADDR_TYPE_WRITE_PBA},
6254 	{0x203, 0x3ff, "reserved", NULL,
6255 	 IGU_ADDR_TYPE_RESERVED},
6256 	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6257 	 IGU_ADDR_TYPE_WRITE_INT_ACK},
6258 	{0x5f0, 0x5f0, "Attention bits update", NULL,
6259 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6260 	{0x5f1, 0x5f1, "Attention bits set", NULL,
6261 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6262 	{0x5f2, 0x5f2, "Attention bits clear", NULL,
6263 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6264 	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6265 	 IGU_ADDR_TYPE_READ_INT},
6266 	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6267 	 IGU_ADDR_TYPE_READ_INT},
6268 	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6269 	 IGU_ADDR_TYPE_READ_INT},
6270 	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6271 	 IGU_ADDR_TYPE_READ_INT},
6272 	{0x5f7, 0x5ff, "reserved", NULL,
6273 	 IGU_ADDR_TYPE_RESERVED},
6274 	{0x600, 0x7ff, "Producer update", NULL,
6275 	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6276 };
6277 
6278 /******************************** Variables **********************************/
6279 
6280 /* Temporary buffer, used for print size calculations */
6281 static char s_temp_buf[MAX_MSG_LEN];
6282 
6283 /**************************** Private Functions ******************************/
6284 
6285 static void qed_user_static_asserts(void)
6286 {
6287 }
6288 
6289 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6290 {
6291 	return (a + b) % size;
6292 }
6293 
6294 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6295 {
6296 	return (size + a - b) % size;
6297 }
6298 
6299 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6300  * bytes) and returns them as a dword value. the specified buffer offset is
6301  * updated.
6302  */
6303 static u32 qed_read_from_cyclic_buf(void *buf,
6304 				    u32 *offset,
6305 				    u32 buf_size, u8 num_bytes_to_read)
6306 {
6307 	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6308 	u32 val = 0;
6309 
6310 	val_ptr = (u8 *)&val;
6311 
6312 	/* Assume running on a LITTLE ENDIAN and the buffer is network order
6313 	 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
6314 	 */
6315 	for (i = 0; i < num_bytes_to_read; i++) {
6316 		val_ptr[i] = bytes_buf[*offset];
6317 		*offset = qed_cyclic_add(*offset, 1, buf_size);
6318 	}
6319 
6320 	return val;
6321 }
6322 
6323 /* Reads and returns the next byte from the specified buffer.
6324  * The specified buffer offset is updated.
6325  */
6326 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6327 {
6328 	return ((u8 *)buf)[(*offset)++];
6329 }
6330 
6331 /* Reads and returns the next dword from the specified buffer.
6332  * The specified buffer offset is updated.
6333  */
6334 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6335 {
6336 	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6337 
6338 	*offset += 4;
6339 
6340 	return dword_val;
6341 }
6342 
6343 /* Reads the next string from the specified buffer, and copies it to the
6344  * specified pointer. The specified buffer offset is updated.
6345  */
6346 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6347 {
6348 	const char *source_str = &((const char *)buf)[*offset];
6349 
6350 	strncpy(dest, source_str, size);
6351 	dest[size - 1] = '\0';
6352 	*offset += size;
6353 }
6354 
6355 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6356  * If the specified buffer in NULL, a temporary buffer pointer is returned.
6357  */
6358 static char *qed_get_buf_ptr(void *buf, u32 offset)
6359 {
6360 	return buf ? (char *)buf + offset : s_temp_buf;
6361 }
6362 
6363 /* Reads a param from the specified buffer. Returns the number of dwords read.
6364  * If the returned str_param is NULL, the param is numeric and its value is
6365  * returned in num_param.
6366  * Otheriwise, the param is a string and its pointer is returned in str_param.
6367  */
6368 static u32 qed_read_param(u32 *dump_buf,
6369 			  const char **param_name,
6370 			  const char **param_str_val, u32 *param_num_val)
6371 {
6372 	char *char_buf = (char *)dump_buf;
6373 	size_t offset = 0;
6374 
6375 	/* Extract param name */
6376 	*param_name = char_buf;
6377 	offset += strlen(*param_name) + 1;
6378 
6379 	/* Check param type */
6380 	if (*(char_buf + offset++)) {
6381 		/* String param */
6382 		*param_str_val = char_buf + offset;
6383 		*param_num_val = 0;
6384 		offset += strlen(*param_str_val) + 1;
6385 		if (offset & 0x3)
6386 			offset += (4 - (offset & 0x3));
6387 	} else {
6388 		/* Numeric param */
6389 		*param_str_val = NULL;
6390 		if (offset & 0x3)
6391 			offset += (4 - (offset & 0x3));
6392 		*param_num_val = *(u32 *)(char_buf + offset);
6393 		offset += 4;
6394 	}
6395 
6396 	return (u32)offset / 4;
6397 }
6398 
6399 /* Reads a section header from the specified buffer.
6400  * Returns the number of dwords read.
6401  */
6402 static u32 qed_read_section_hdr(u32 *dump_buf,
6403 				const char **section_name,
6404 				u32 *num_section_params)
6405 {
6406 	const char *param_str_val;
6407 
6408 	return qed_read_param(dump_buf,
6409 			      section_name, &param_str_val, num_section_params);
6410 }
6411 
6412 /* Reads section params from the specified buffer and prints them to the results
6413  * buffer. Returns the number of dwords read.
6414  */
6415 static u32 qed_print_section_params(u32 *dump_buf,
6416 				    u32 num_section_params,
6417 				    char *results_buf, u32 *num_chars_printed)
6418 {
6419 	u32 i, dump_offset = 0, results_offset = 0;
6420 
6421 	for (i = 0; i < num_section_params; i++) {
6422 		const char *param_name, *param_str_val;
6423 		u32 param_num_val = 0;
6424 
6425 		dump_offset += qed_read_param(dump_buf + dump_offset,
6426 					      &param_name,
6427 					      &param_str_val, &param_num_val);
6428 
6429 		if (param_str_val)
6430 			results_offset +=
6431 				sprintf(qed_get_buf_ptr(results_buf,
6432 							results_offset),
6433 					"%s: %s\n", param_name, param_str_val);
6434 		else if (strcmp(param_name, "fw-timestamp"))
6435 			results_offset +=
6436 				sprintf(qed_get_buf_ptr(results_buf,
6437 							results_offset),
6438 					"%s: %d\n", param_name, param_num_val);
6439 	}
6440 
6441 	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6442 				  "\n");
6443 
6444 	*num_chars_printed = results_offset;
6445 
6446 	return dump_offset;
6447 }
6448 
6449 /* Returns the block name that matches the specified block ID,
6450  * or NULL if not found.
6451  */
6452 static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn,
6453 					  enum block_id block_id)
6454 {
6455 	const struct dbg_block_user *block =
6456 	    (const struct dbg_block_user *)
6457 	    p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id;
6458 
6459 	return (const char *)block->name;
6460 }
6461 
6462 static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn
6463 							 *p_hwfn)
6464 {
6465 	return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6466 }
6467 
6468 /* Parses the idle check rules and returns the number of characters printed.
6469  * In case of parsing error, returns 0.
6470  */
6471 static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
6472 					 u32 *dump_buf,
6473 					 u32 *dump_buf_end,
6474 					 u32 num_rules,
6475 					 bool print_fw_idle_chk,
6476 					 char *results_buf,
6477 					 u32 *num_errors, u32 *num_warnings)
6478 {
6479 	/* Offset in results_buf in bytes */
6480 	u32 results_offset = 0;
6481 
6482 	u32 rule_idx;
6483 	u16 i, j;
6484 
6485 	*num_errors = 0;
6486 	*num_warnings = 0;
6487 
6488 	/* Go over dumped results */
6489 	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6490 	     rule_idx++) {
6491 		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6492 		struct dbg_idle_chk_result_hdr *hdr;
6493 		const char *parsing_str, *lsi_msg;
6494 		u32 parsing_str_offset;
6495 		bool has_fw_msg;
6496 		u8 curr_reg_id;
6497 
6498 		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6499 		rule_parsing_data =
6500 		    (const struct dbg_idle_chk_rule_parsing_data *)
6501 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr +
6502 		    hdr->rule_id;
6503 		parsing_str_offset =
6504 		    GET_FIELD(rule_parsing_data->data,
6505 			      DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6506 		has_fw_msg =
6507 		    GET_FIELD(rule_parsing_data->data,
6508 			      DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6509 		parsing_str = (const char *)
6510 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr +
6511 		    parsing_str_offset;
6512 		lsi_msg = parsing_str;
6513 		curr_reg_id = 0;
6514 
6515 		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6516 			return 0;
6517 
6518 		/* Skip rule header */
6519 		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6520 
6521 		/* Update errors/warnings count */
6522 		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6523 		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6524 			(*num_errors)++;
6525 		else
6526 			(*num_warnings)++;
6527 
6528 		/* Print rule severity */
6529 		results_offset +=
6530 		    sprintf(qed_get_buf_ptr(results_buf,
6531 					    results_offset), "%s: ",
6532 			    s_idle_chk_severity_str[hdr->severity]);
6533 
6534 		/* Print rule message */
6535 		if (has_fw_msg)
6536 			parsing_str += strlen(parsing_str) + 1;
6537 		results_offset +=
6538 		    sprintf(qed_get_buf_ptr(results_buf,
6539 					    results_offset), "%s.",
6540 			    has_fw_msg &&
6541 			    print_fw_idle_chk ? parsing_str : lsi_msg);
6542 		parsing_str += strlen(parsing_str) + 1;
6543 
6544 		/* Print register values */
6545 		results_offset +=
6546 		    sprintf(qed_get_buf_ptr(results_buf,
6547 					    results_offset), " Registers:");
6548 		for (i = 0;
6549 		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6550 		     i++) {
6551 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6552 			bool is_mem;
6553 			u8 reg_id;
6554 
6555 			reg_hdr =
6556 				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6557 			is_mem = GET_FIELD(reg_hdr->data,
6558 					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6559 			reg_id = GET_FIELD(reg_hdr->data,
6560 					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6561 
6562 			/* Skip reg header */
6563 			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6564 
6565 			/* Skip register names until the required reg_id is
6566 			 * reached.
6567 			 */
6568 			for (; reg_id > curr_reg_id; curr_reg_id++)
6569 				parsing_str += strlen(parsing_str) + 1;
6570 
6571 			results_offset +=
6572 			    sprintf(qed_get_buf_ptr(results_buf,
6573 						    results_offset), " %s",
6574 				    parsing_str);
6575 			if (i < hdr->num_dumped_cond_regs && is_mem)
6576 				results_offset +=
6577 				    sprintf(qed_get_buf_ptr(results_buf,
6578 							    results_offset),
6579 					    "[%d]", hdr->mem_entry_id +
6580 					    reg_hdr->start_entry);
6581 			results_offset +=
6582 			    sprintf(qed_get_buf_ptr(results_buf,
6583 						    results_offset), "=");
6584 			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6585 				results_offset +=
6586 				    sprintf(qed_get_buf_ptr(results_buf,
6587 							    results_offset),
6588 					    "0x%x", *dump_buf);
6589 				if (j < reg_hdr->size - 1)
6590 					results_offset +=
6591 					    sprintf(qed_get_buf_ptr
6592 						    (results_buf,
6593 						     results_offset), ",");
6594 			}
6595 		}
6596 
6597 		results_offset +=
6598 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6599 	}
6600 
6601 	/* Check if end of dump buffer was exceeded */
6602 	if (dump_buf > dump_buf_end)
6603 		return 0;
6604 
6605 	return results_offset;
6606 }
6607 
6608 /* Parses an idle check dump buffer.
6609  * If result_buf is not NULL, the idle check results are printed to it.
6610  * In any case, the required results buffer size is assigned to
6611  * parsed_results_bytes.
6612  * The parsing status is returned.
6613  */
6614 static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
6615 					       u32 *dump_buf,
6616 					       u32 num_dumped_dwords,
6617 					       char *results_buf,
6618 					       u32 *parsed_results_bytes,
6619 					       u32 *num_errors,
6620 					       u32 *num_warnings)
6621 {
6622 	u32 num_section_params = 0, num_rules, num_rules_not_dumped;
6623 	const char *section_name, *param_name, *param_str_val;
6624 	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6625 
6626 	/* Offset in results_buf in bytes */
6627 	u32 results_offset = 0;
6628 
6629 	*parsed_results_bytes = 0;
6630 	*num_errors = 0;
6631 	*num_warnings = 0;
6632 
6633 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6634 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6635 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6636 
6637 	/* Read global_params section */
6638 	dump_buf += qed_read_section_hdr(dump_buf,
6639 					 &section_name, &num_section_params);
6640 	if (strcmp(section_name, "global_params"))
6641 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6642 
6643 	/* Print global params */
6644 	dump_buf += qed_print_section_params(dump_buf,
6645 					     num_section_params,
6646 					     results_buf, &results_offset);
6647 
6648 	/* Read idle_chk section
6649 	 * There may be 1 or 2 idle_chk section parameters:
6650 	 * - 1st is "num_rules"
6651 	 * - 2nd is "num_rules_not_dumped" (optional)
6652 	 */
6653 
6654 	dump_buf += qed_read_section_hdr(dump_buf,
6655 					 &section_name, &num_section_params);
6656 	if (strcmp(section_name, "idle_chk") ||
6657 	    (num_section_params != 2 && num_section_params != 1))
6658 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6659 	dump_buf += qed_read_param(dump_buf,
6660 				   &param_name, &param_str_val, &num_rules);
6661 	if (strcmp(param_name, "num_rules"))
6662 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6663 	if (num_section_params > 1) {
6664 		dump_buf += qed_read_param(dump_buf,
6665 					   &param_name,
6666 					   &param_str_val,
6667 					   &num_rules_not_dumped);
6668 		if (strcmp(param_name, "num_rules_not_dumped"))
6669 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6670 	} else {
6671 		num_rules_not_dumped = 0;
6672 	}
6673 
6674 	if (num_rules) {
6675 		u32 rules_print_size;
6676 
6677 		/* Print FW output */
6678 		results_offset +=
6679 		    sprintf(qed_get_buf_ptr(results_buf,
6680 					    results_offset),
6681 			    "FW_IDLE_CHECK:\n");
6682 		rules_print_size =
6683 			qed_parse_idle_chk_dump_rules(p_hwfn,
6684 						      dump_buf,
6685 						      dump_buf_end,
6686 						      num_rules,
6687 						      true,
6688 						      results_buf ?
6689 						      results_buf +
6690 						      results_offset :
6691 						      NULL,
6692 						      num_errors,
6693 						      num_warnings);
6694 		results_offset += rules_print_size;
6695 		if (!rules_print_size)
6696 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6697 
6698 		/* Print LSI output */
6699 		results_offset +=
6700 		    sprintf(qed_get_buf_ptr(results_buf,
6701 					    results_offset),
6702 			    "\nLSI_IDLE_CHECK:\n");
6703 		rules_print_size =
6704 			qed_parse_idle_chk_dump_rules(p_hwfn,
6705 						      dump_buf,
6706 						      dump_buf_end,
6707 						      num_rules,
6708 						      false,
6709 						      results_buf ?
6710 						      results_buf +
6711 						      results_offset :
6712 						      NULL,
6713 						      num_errors,
6714 						      num_warnings);
6715 		results_offset += rules_print_size;
6716 		if (!rules_print_size)
6717 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6718 	}
6719 
6720 	/* Print errors/warnings count */
6721 	if (*num_errors)
6722 		results_offset +=
6723 		    sprintf(qed_get_buf_ptr(results_buf,
6724 					    results_offset),
6725 			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6726 			    *num_errors, *num_warnings);
6727 	else if (*num_warnings)
6728 		results_offset +=
6729 		    sprintf(qed_get_buf_ptr(results_buf,
6730 					    results_offset),
6731 			    "\nIdle Check completed successfully (with %d warnings)\n",
6732 			    *num_warnings);
6733 	else
6734 		results_offset +=
6735 		    sprintf(qed_get_buf_ptr(results_buf,
6736 					    results_offset),
6737 			    "\nIdle Check completed successfully\n");
6738 
6739 	if (num_rules_not_dumped)
6740 		results_offset +=
6741 		    sprintf(qed_get_buf_ptr(results_buf,
6742 					    results_offset),
6743 			    "\nIdle Check Partially dumped : num_rules_not_dumped = %d\n",
6744 			    num_rules_not_dumped);
6745 
6746 	/* Add 1 for string NULL termination */
6747 	*parsed_results_bytes = results_offset + 1;
6748 
6749 	return DBG_STATUS_OK;
6750 }
6751 
6752 /* Allocates and fills MCP Trace meta data based on the specified meta data
6753  * dump buffer.
6754  * Returns debug status code.
6755  */
6756 static enum dbg_status
6757 qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
6758 			      const u32 *meta_buf)
6759 {
6760 	struct dbg_tools_user_data *dev_user_data;
6761 	u32 offset = 0, signature, i;
6762 	struct mcp_trace_meta *meta;
6763 	u8 *meta_buf_bytes;
6764 
6765 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6766 	meta = &dev_user_data->mcp_trace_meta;
6767 	meta_buf_bytes = (u8 *)meta_buf;
6768 
6769 	/* Free the previous meta before loading a new one. */
6770 	if (meta->is_allocated)
6771 		qed_mcp_trace_free_meta_data(p_hwfn);
6772 
6773 	memset(meta, 0, sizeof(*meta));
6774 
6775 	/* Read first signature */
6776 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6777 	if (signature != NVM_MAGIC_VALUE)
6778 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6779 
6780 	/* Read no. of modules and allocate memory for their pointers */
6781 	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6782 	meta->modules = kcalloc(meta->modules_num, sizeof(char *),
6783 				GFP_KERNEL);
6784 	if (!meta->modules)
6785 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6786 
6787 	/* Allocate and read all module strings */
6788 	for (i = 0; i < meta->modules_num; i++) {
6789 		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6790 
6791 		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6792 		if (!(*(meta->modules + i))) {
6793 			/* Update number of modules to be released */
6794 			meta->modules_num = i ? i - 1 : 0;
6795 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6796 		}
6797 
6798 		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6799 				      *(meta->modules + i));
6800 		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6801 			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6802 	}
6803 
6804 	/* Read second signature */
6805 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6806 	if (signature != NVM_MAGIC_VALUE)
6807 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6808 
6809 	/* Read number of formats and allocate memory for all formats */
6810 	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6811 	meta->formats = kcalloc(meta->formats_num,
6812 				sizeof(struct mcp_trace_format),
6813 				GFP_KERNEL);
6814 	if (!meta->formats)
6815 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6816 
6817 	/* Allocate and read all strings */
6818 	for (i = 0; i < meta->formats_num; i++) {
6819 		struct mcp_trace_format *format_ptr = &meta->formats[i];
6820 		u8 format_len;
6821 
6822 		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6823 							   &offset);
6824 		format_len = GET_MFW_FIELD(format_ptr->data,
6825 					   MCP_TRACE_FORMAT_LEN);
6826 		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6827 		if (!format_ptr->format_str) {
6828 			/* Update number of modules to be released */
6829 			meta->formats_num = i ? i - 1 : 0;
6830 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6831 		}
6832 
6833 		qed_read_str_from_buf(meta_buf_bytes,
6834 				      &offset,
6835 				      format_len, format_ptr->format_str);
6836 	}
6837 
6838 	meta->is_allocated = true;
6839 	return DBG_STATUS_OK;
6840 }
6841 
6842 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6843  * are printed to it. The parsing status is returned.
6844  * Arguments:
6845  * trace_buf - MCP trace cyclic buffer
6846  * trace_buf_size - MCP trace cyclic buffer size in bytes
6847  * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6848  *		 buffer.
6849  * data_size - size in bytes of data to parse.
6850  * parsed_buf - destination buffer for parsed data.
6851  * parsed_results_bytes - size of parsed data in bytes.
6852  */
6853 static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
6854 					       u8 *trace_buf,
6855 					       u32 trace_buf_size,
6856 					       u32 data_offset,
6857 					       u32 data_size,
6858 					       char *parsed_buf,
6859 					       u32 *parsed_results_bytes)
6860 {
6861 	struct dbg_tools_user_data *dev_user_data;
6862 	struct mcp_trace_meta *meta;
6863 	u32 param_mask, param_shift;
6864 	enum dbg_status status;
6865 
6866 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6867 	meta = &dev_user_data->mcp_trace_meta;
6868 	*parsed_results_bytes = 0;
6869 
6870 	if (!meta->is_allocated)
6871 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6872 
6873 	status = DBG_STATUS_OK;
6874 
6875 	while (data_size) {
6876 		struct mcp_trace_format *format_ptr;
6877 		u8 format_level, format_module;
6878 		u32 params[3] = { 0, 0, 0 };
6879 		u32 header, format_idx, i;
6880 
6881 		if (data_size < MFW_TRACE_ENTRY_SIZE)
6882 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6883 
6884 		header = qed_read_from_cyclic_buf(trace_buf,
6885 						  &data_offset,
6886 						  trace_buf_size,
6887 						  MFW_TRACE_ENTRY_SIZE);
6888 		data_size -= MFW_TRACE_ENTRY_SIZE;
6889 		format_idx = header & MFW_TRACE_EVENTID_MASK;
6890 
6891 		/* Skip message if its index doesn't exist in the meta data */
6892 		if (format_idx >= meta->formats_num) {
6893 			u8 format_size = (u8)GET_MFW_FIELD(header,
6894 							   MFW_TRACE_PRM_SIZE);
6895 
6896 			if (data_size < format_size)
6897 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6898 
6899 			data_offset = qed_cyclic_add(data_offset,
6900 						     format_size,
6901 						     trace_buf_size);
6902 			data_size -= format_size;
6903 			continue;
6904 		}
6905 
6906 		format_ptr = &meta->formats[format_idx];
6907 
6908 		for (i = 0,
6909 		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6910 		     MCP_TRACE_FORMAT_P1_SIZE_OFFSET;
6911 		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6912 		     i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6913 		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6914 			/* Extract param size (0..3) */
6915 			u8 param_size = (u8)((format_ptr->data & param_mask) >>
6916 					     param_shift);
6917 
6918 			/* If the param size is zero, there are no other
6919 			 * parameters.
6920 			 */
6921 			if (!param_size)
6922 				break;
6923 
6924 			/* Size is encoded using 2 bits, where 3 is used to
6925 			 * encode 4.
6926 			 */
6927 			if (param_size == 3)
6928 				param_size = 4;
6929 
6930 			if (data_size < param_size)
6931 				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6932 
6933 			params[i] = qed_read_from_cyclic_buf(trace_buf,
6934 							     &data_offset,
6935 							     trace_buf_size,
6936 							     param_size);
6937 			data_size -= param_size;
6938 		}
6939 
6940 		format_level = (u8)GET_MFW_FIELD(format_ptr->data,
6941 						 MCP_TRACE_FORMAT_LEVEL);
6942 		format_module = (u8)GET_MFW_FIELD(format_ptr->data,
6943 						  MCP_TRACE_FORMAT_MODULE);
6944 		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6945 			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6946 
6947 		/* Print current message to results buffer */
6948 		*parsed_results_bytes +=
6949 			sprintf(qed_get_buf_ptr(parsed_buf,
6950 						*parsed_results_bytes),
6951 				"%s %-8s: ",
6952 				s_mcp_trace_level_str[format_level],
6953 				meta->modules[format_module]);
6954 		*parsed_results_bytes +=
6955 		    sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
6956 			    format_ptr->format_str,
6957 			    params[0], params[1], params[2]);
6958 	}
6959 
6960 	/* Add string NULL terminator */
6961 	(*parsed_results_bytes)++;
6962 
6963 	return status;
6964 }
6965 
6966 /* Parses an MCP Trace dump buffer.
6967  * If result_buf is not NULL, the MCP Trace results are printed to it.
6968  * In any case, the required results buffer size is assigned to
6969  * parsed_results_bytes.
6970  * The parsing status is returned.
6971  */
6972 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6973 						u32 *dump_buf,
6974 						char *results_buf,
6975 						u32 *parsed_results_bytes,
6976 						bool free_meta_data)
6977 {
6978 	const char *section_name, *param_name, *param_str_val;
6979 	u32 data_size, trace_data_dwords, trace_meta_dwords;
6980 	u32 offset, results_offset, results_buf_bytes;
6981 	u32 param_num_val, num_section_params;
6982 	struct mcp_trace *trace;
6983 	enum dbg_status status;
6984 	const u32 *meta_buf;
6985 	u8 *trace_buf;
6986 
6987 	*parsed_results_bytes = 0;
6988 
6989 	/* Read global_params section */
6990 	dump_buf += qed_read_section_hdr(dump_buf,
6991 					 &section_name, &num_section_params);
6992 	if (strcmp(section_name, "global_params"))
6993 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6994 
6995 	/* Print global params */
6996 	dump_buf += qed_print_section_params(dump_buf,
6997 					     num_section_params,
6998 					     results_buf, &results_offset);
6999 
7000 	/* Read trace_data section */
7001 	dump_buf += qed_read_section_hdr(dump_buf,
7002 					 &section_name, &num_section_params);
7003 	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
7004 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7005 	dump_buf += qed_read_param(dump_buf,
7006 				   &param_name, &param_str_val, &param_num_val);
7007 	if (strcmp(param_name, "size"))
7008 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7009 	trace_data_dwords = param_num_val;
7010 
7011 	/* Prepare trace info */
7012 	trace = (struct mcp_trace *)dump_buf;
7013 	if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
7014 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7015 
7016 	trace_buf = (u8 *)dump_buf + sizeof(*trace);
7017 	offset = trace->trace_oldest;
7018 	data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
7019 	dump_buf += trace_data_dwords;
7020 
7021 	/* Read meta_data section */
7022 	dump_buf += qed_read_section_hdr(dump_buf,
7023 					 &section_name, &num_section_params);
7024 	if (strcmp(section_name, "mcp_trace_meta"))
7025 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7026 	dump_buf += qed_read_param(dump_buf,
7027 				   &param_name, &param_str_val, &param_num_val);
7028 	if (strcmp(param_name, "size"))
7029 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7030 	trace_meta_dwords = param_num_val;
7031 
7032 	/* Choose meta data buffer */
7033 	if (!trace_meta_dwords) {
7034 		/* Dump doesn't include meta data */
7035 		struct dbg_tools_user_data *dev_user_data =
7036 			qed_dbg_get_user_data(p_hwfn);
7037 
7038 		if (!dev_user_data->mcp_trace_user_meta_buf)
7039 			return DBG_STATUS_MCP_TRACE_NO_META;
7040 
7041 		meta_buf = dev_user_data->mcp_trace_user_meta_buf;
7042 	} else {
7043 		/* Dump includes meta data */
7044 		meta_buf = dump_buf;
7045 	}
7046 
7047 	/* Allocate meta data memory */
7048 	status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
7049 	if (status != DBG_STATUS_OK)
7050 		return status;
7051 
7052 	status = qed_parse_mcp_trace_buf(p_hwfn,
7053 					 trace_buf,
7054 					 trace->size,
7055 					 offset,
7056 					 data_size,
7057 					 results_buf ?
7058 					 results_buf + results_offset :
7059 					 NULL,
7060 					 &results_buf_bytes);
7061 	if (status != DBG_STATUS_OK)
7062 		return status;
7063 
7064 	if (free_meta_data)
7065 		qed_mcp_trace_free_meta_data(p_hwfn);
7066 
7067 	*parsed_results_bytes = results_offset + results_buf_bytes;
7068 
7069 	return DBG_STATUS_OK;
7070 }
7071 
7072 /* Parses a Reg FIFO dump buffer.
7073  * If result_buf is not NULL, the Reg FIFO results are printed to it.
7074  * In any case, the required results buffer size is assigned to
7075  * parsed_results_bytes.
7076  * The parsing status is returned.
7077  */
7078 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
7079 					       char *results_buf,
7080 					       u32 *parsed_results_bytes)
7081 {
7082 	const char *section_name, *param_name, *param_str_val;
7083 	u32 param_num_val, num_section_params, num_elements;
7084 	struct reg_fifo_element *elements;
7085 	u8 i, j, err_code, vf_val;
7086 	u32 results_offset = 0;
7087 	char vf_str[4];
7088 
7089 	/* Read global_params section */
7090 	dump_buf += qed_read_section_hdr(dump_buf,
7091 					 &section_name, &num_section_params);
7092 	if (strcmp(section_name, "global_params"))
7093 		return DBG_STATUS_REG_FIFO_BAD_DATA;
7094 
7095 	/* Print global params */
7096 	dump_buf += qed_print_section_params(dump_buf,
7097 					     num_section_params,
7098 					     results_buf, &results_offset);
7099 
7100 	/* Read reg_fifo_data section */
7101 	dump_buf += qed_read_section_hdr(dump_buf,
7102 					 &section_name, &num_section_params);
7103 	if (strcmp(section_name, "reg_fifo_data"))
7104 		return DBG_STATUS_REG_FIFO_BAD_DATA;
7105 	dump_buf += qed_read_param(dump_buf,
7106 				   &param_name, &param_str_val, &param_num_val);
7107 	if (strcmp(param_name, "size"))
7108 		return DBG_STATUS_REG_FIFO_BAD_DATA;
7109 	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
7110 		return DBG_STATUS_REG_FIFO_BAD_DATA;
7111 	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
7112 	elements = (struct reg_fifo_element *)dump_buf;
7113 
7114 	/* Decode elements */
7115 	for (i = 0; i < num_elements; i++) {
7116 		const char *err_msg = NULL;
7117 
7118 		/* Discover if element belongs to a VF or a PF */
7119 		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
7120 		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
7121 			sprintf(vf_str, "%s", "N/A");
7122 		else
7123 			sprintf(vf_str, "%d", vf_val);
7124 
7125 		/* Find error message */
7126 		err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR);
7127 		for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++)
7128 			if (err_code == s_reg_fifo_errors[j].err_code)
7129 				err_msg = s_reg_fifo_errors[j].err_msg;
7130 
7131 		/* Add parsed element to parsed buffer */
7132 		results_offset +=
7133 		    sprintf(qed_get_buf_ptr(results_buf,
7134 					    results_offset),
7135 			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n",
7136 			    elements[i].data,
7137 			    (u32)GET_FIELD(elements[i].data,
7138 					   REG_FIFO_ELEMENT_ADDRESS) *
7139 			    REG_FIFO_ELEMENT_ADDR_FACTOR,
7140 			    s_access_strs[GET_FIELD(elements[i].data,
7141 						    REG_FIFO_ELEMENT_ACCESS)],
7142 			    (u32)GET_FIELD(elements[i].data,
7143 					   REG_FIFO_ELEMENT_PF),
7144 			    vf_str,
7145 			    (u32)GET_FIELD(elements[i].data,
7146 					   REG_FIFO_ELEMENT_PORT),
7147 			    s_privilege_strs[GET_FIELD(elements[i].data,
7148 						REG_FIFO_ELEMENT_PRIVILEGE)],
7149 			    s_protection_strs[GET_FIELD(elements[i].data,
7150 						REG_FIFO_ELEMENT_PROTECTION)],
7151 			    s_master_strs[GET_FIELD(elements[i].data,
7152 						    REG_FIFO_ELEMENT_MASTER)],
7153 			    err_msg ? err_msg : "unknown error code");
7154 	}
7155 
7156 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7157 						  results_offset),
7158 				  "fifo contained %d elements", num_elements);
7159 
7160 	/* Add 1 for string NULL termination */
7161 	*parsed_results_bytes = results_offset + 1;
7162 
7163 	return DBG_STATUS_OK;
7164 }
7165 
7166 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
7167 						  *element, char
7168 						  *results_buf,
7169 						  u32 *results_offset)
7170 {
7171 	const struct igu_fifo_addr_data *found_addr = NULL;
7172 	u8 source, err_type, i, is_cleanup;
7173 	char parsed_addr_data[32];
7174 	char parsed_wr_data[256];
7175 	u32 wr_data, prod_cons;
7176 	bool is_wr_cmd, is_pf;
7177 	u16 cmd_addr;
7178 	u64 dword12;
7179 
7180 	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
7181 	 * FIFO element.
7182 	 */
7183 	dword12 = ((u64)element->dword2 << 32) | element->dword1;
7184 	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
7185 	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
7186 	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
7187 	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
7188 	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
7189 
7190 	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
7191 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7192 	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
7193 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7194 
7195 	/* Find address data */
7196 	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
7197 		const struct igu_fifo_addr_data *curr_addr =
7198 			&s_igu_fifo_addr_data[i];
7199 
7200 		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
7201 		    curr_addr->end_addr)
7202 			found_addr = curr_addr;
7203 	}
7204 
7205 	if (!found_addr)
7206 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7207 
7208 	/* Prepare parsed address data */
7209 	switch (found_addr->type) {
7210 	case IGU_ADDR_TYPE_MSIX_MEM:
7211 		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
7212 		break;
7213 	case IGU_ADDR_TYPE_WRITE_INT_ACK:
7214 	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
7215 		sprintf(parsed_addr_data,
7216 			" SB = 0x%x", cmd_addr - found_addr->start_addr);
7217 		break;
7218 	default:
7219 		parsed_addr_data[0] = '\0';
7220 	}
7221 
7222 	if (!is_wr_cmd) {
7223 		parsed_wr_data[0] = '\0';
7224 		goto out;
7225 	}
7226 
7227 	/* Prepare parsed write data */
7228 	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
7229 	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
7230 	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
7231 
7232 	if (source == IGU_SRC_ATTN) {
7233 		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
7234 	} else {
7235 		if (is_cleanup) {
7236 			u8 cleanup_val, cleanup_type;
7237 
7238 			cleanup_val =
7239 				GET_FIELD(wr_data,
7240 					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
7241 			cleanup_type =
7242 			    GET_FIELD(wr_data,
7243 				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
7244 
7245 			sprintf(parsed_wr_data,
7246 				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
7247 				cleanup_val ? "set" : "clear",
7248 				cleanup_type);
7249 		} else {
7250 			u8 update_flag, en_dis_int_for_sb, segment;
7251 			u8 timer_mask;
7252 
7253 			update_flag = GET_FIELD(wr_data,
7254 						IGU_FIFO_WR_DATA_UPDATE_FLAG);
7255 			en_dis_int_for_sb =
7256 				GET_FIELD(wr_data,
7257 					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
7258 			segment = GET_FIELD(wr_data,
7259 					    IGU_FIFO_WR_DATA_SEGMENT);
7260 			timer_mask = GET_FIELD(wr_data,
7261 					       IGU_FIFO_WR_DATA_TIMER_MASK);
7262 
7263 			sprintf(parsed_wr_data,
7264 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
7265 				prod_cons,
7266 				update_flag ? "update" : "nop",
7267 				en_dis_int_for_sb ?
7268 				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
7269 				"enable",
7270 				segment ? "attn" : "regular",
7271 				timer_mask);
7272 		}
7273 	}
7274 out:
7275 	/* Add parsed element to parsed buffer */
7276 	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
7277 						   *results_offset),
7278 				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7279 				   element->dword2, element->dword1,
7280 				   element->dword0,
7281 				   is_pf ? "pf" : "vf",
7282 				   GET_FIELD(element->dword0,
7283 					     IGU_FIFO_ELEMENT_DWORD0_FID),
7284 				   s_igu_fifo_source_strs[source],
7285 				   is_wr_cmd ? "wr" : "rd",
7286 				   cmd_addr,
7287 				   (!is_pf && found_addr->vf_desc)
7288 				   ? found_addr->vf_desc
7289 				   : found_addr->desc,
7290 				   parsed_addr_data,
7291 				   parsed_wr_data,
7292 				   s_igu_fifo_error_strs[err_type]);
7293 
7294 	return DBG_STATUS_OK;
7295 }
7296 
7297 /* Parses an IGU FIFO dump buffer.
7298  * If result_buf is not NULL, the IGU FIFO results are printed to it.
7299  * In any case, the required results buffer size is assigned to
7300  * parsed_results_bytes.
7301  * The parsing status is returned.
7302  */
7303 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7304 					       char *results_buf,
7305 					       u32 *parsed_results_bytes)
7306 {
7307 	const char *section_name, *param_name, *param_str_val;
7308 	u32 param_num_val, num_section_params, num_elements;
7309 	struct igu_fifo_element *elements;
7310 	enum dbg_status status;
7311 	u32 results_offset = 0;
7312 	u8 i;
7313 
7314 	/* Read global_params section */
7315 	dump_buf += qed_read_section_hdr(dump_buf,
7316 					 &section_name, &num_section_params);
7317 	if (strcmp(section_name, "global_params"))
7318 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7319 
7320 	/* Print global params */
7321 	dump_buf += qed_print_section_params(dump_buf,
7322 					     num_section_params,
7323 					     results_buf, &results_offset);
7324 
7325 	/* Read igu_fifo_data section */
7326 	dump_buf += qed_read_section_hdr(dump_buf,
7327 					 &section_name, &num_section_params);
7328 	if (strcmp(section_name, "igu_fifo_data"))
7329 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7330 	dump_buf += qed_read_param(dump_buf,
7331 				   &param_name, &param_str_val, &param_num_val);
7332 	if (strcmp(param_name, "size"))
7333 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7334 	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7335 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7336 	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7337 	elements = (struct igu_fifo_element *)dump_buf;
7338 
7339 	/* Decode elements */
7340 	for (i = 0; i < num_elements; i++) {
7341 		status = qed_parse_igu_fifo_element(&elements[i],
7342 						    results_buf,
7343 						    &results_offset);
7344 		if (status != DBG_STATUS_OK)
7345 			return status;
7346 	}
7347 
7348 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7349 						  results_offset),
7350 				  "fifo contained %d elements", num_elements);
7351 
7352 	/* Add 1 for string NULL termination */
7353 	*parsed_results_bytes = results_offset + 1;
7354 
7355 	return DBG_STATUS_OK;
7356 }
7357 
7358 static enum dbg_status
7359 qed_parse_protection_override_dump(u32 *dump_buf,
7360 				   char *results_buf,
7361 				   u32 *parsed_results_bytes)
7362 {
7363 	const char *section_name, *param_name, *param_str_val;
7364 	u32 param_num_val, num_section_params, num_elements;
7365 	struct protection_override_element *elements;
7366 	u32 results_offset = 0;
7367 	u8 i;
7368 
7369 	/* Read global_params section */
7370 	dump_buf += qed_read_section_hdr(dump_buf,
7371 					 &section_name, &num_section_params);
7372 	if (strcmp(section_name, "global_params"))
7373 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7374 
7375 	/* Print global params */
7376 	dump_buf += qed_print_section_params(dump_buf,
7377 					     num_section_params,
7378 					     results_buf, &results_offset);
7379 
7380 	/* Read protection_override_data section */
7381 	dump_buf += qed_read_section_hdr(dump_buf,
7382 					 &section_name, &num_section_params);
7383 	if (strcmp(section_name, "protection_override_data"))
7384 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7385 	dump_buf += qed_read_param(dump_buf,
7386 				   &param_name, &param_str_val, &param_num_val);
7387 	if (strcmp(param_name, "size"))
7388 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7389 	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7390 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7391 	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7392 	elements = (struct protection_override_element *)dump_buf;
7393 
7394 	/* Decode elements */
7395 	for (i = 0; i < num_elements; i++) {
7396 		u32 address = GET_FIELD(elements[i].data,
7397 					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7398 			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7399 
7400 		results_offset +=
7401 		    sprintf(qed_get_buf_ptr(results_buf,
7402 					    results_offset),
7403 			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7404 			    i, address,
7405 			    (u32)GET_FIELD(elements[i].data,
7406 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7407 			    (u32)GET_FIELD(elements[i].data,
7408 				      PROTECTION_OVERRIDE_ELEMENT_READ),
7409 			    (u32)GET_FIELD(elements[i].data,
7410 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
7411 			    s_protection_strs[GET_FIELD(elements[i].data,
7412 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7413 			    s_protection_strs[GET_FIELD(elements[i].data,
7414 				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7415 	}
7416 
7417 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7418 						  results_offset),
7419 				  "protection override contained %d elements",
7420 				  num_elements);
7421 
7422 	/* Add 1 for string NULL termination */
7423 	*parsed_results_bytes = results_offset + 1;
7424 
7425 	return DBG_STATUS_OK;
7426 }
7427 
7428 /* Parses a FW Asserts dump buffer.
7429  * If result_buf is not NULL, the FW Asserts results are printed to it.
7430  * In any case, the required results buffer size is assigned to
7431  * parsed_results_bytes.
7432  * The parsing status is returned.
7433  */
7434 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7435 						 char *results_buf,
7436 						 u32 *parsed_results_bytes)
7437 {
7438 	u32 num_section_params, param_num_val, i, results_offset = 0;
7439 	const char *param_name, *param_str_val, *section_name;
7440 	bool last_section_found = false;
7441 
7442 	*parsed_results_bytes = 0;
7443 
7444 	/* Read global_params section */
7445 	dump_buf += qed_read_section_hdr(dump_buf,
7446 					 &section_name, &num_section_params);
7447 	if (strcmp(section_name, "global_params"))
7448 		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7449 
7450 	/* Print global params */
7451 	dump_buf += qed_print_section_params(dump_buf,
7452 					     num_section_params,
7453 					     results_buf, &results_offset);
7454 
7455 	while (!last_section_found) {
7456 		dump_buf += qed_read_section_hdr(dump_buf,
7457 						 &section_name,
7458 						 &num_section_params);
7459 		if (!strcmp(section_name, "fw_asserts")) {
7460 			/* Extract params */
7461 			const char *storm_letter = NULL;
7462 			u32 storm_dump_size = 0;
7463 
7464 			for (i = 0; i < num_section_params; i++) {
7465 				dump_buf += qed_read_param(dump_buf,
7466 							   &param_name,
7467 							   &param_str_val,
7468 							   &param_num_val);
7469 				if (!strcmp(param_name, "storm"))
7470 					storm_letter = param_str_val;
7471 				else if (!strcmp(param_name, "size"))
7472 					storm_dump_size = param_num_val;
7473 				else
7474 					return
7475 					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7476 			}
7477 
7478 			if (!storm_letter || !storm_dump_size)
7479 				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7480 
7481 			/* Print data */
7482 			results_offset +=
7483 			    sprintf(qed_get_buf_ptr(results_buf,
7484 						    results_offset),
7485 				    "\n%sSTORM_ASSERT: size=%d\n",
7486 				    storm_letter, storm_dump_size);
7487 			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7488 				results_offset +=
7489 				    sprintf(qed_get_buf_ptr(results_buf,
7490 							    results_offset),
7491 					    "%08x\n", *dump_buf);
7492 		} else if (!strcmp(section_name, "last")) {
7493 			last_section_found = true;
7494 		} else {
7495 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7496 		}
7497 	}
7498 
7499 	/* Add 1 for string NULL termination */
7500 	*parsed_results_bytes = results_offset + 1;
7501 
7502 	return DBG_STATUS_OK;
7503 }
7504 
7505 /***************************** Public Functions *******************************/
7506 
7507 enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
7508 					 const u8 * const bin_ptr)
7509 {
7510 	struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
7511 	u8 buf_id;
7512 
7513 	/* Convert binary data to debug arrays */
7514 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
7515 		qed_set_dbg_bin_buf(p_hwfn,
7516 				    (enum bin_dbg_buffer_type)buf_id,
7517 				    (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
7518 				    buf_hdrs[buf_id].length);
7519 
7520 	return DBG_STATUS_OK;
7521 }
7522 
7523 enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
7524 					void **user_data_ptr)
7525 {
7526 	*user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data),
7527 				 GFP_KERNEL);
7528 	if (!(*user_data_ptr))
7529 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7530 
7531 	return DBG_STATUS_OK;
7532 }
7533 
7534 const char *qed_dbg_get_status_str(enum dbg_status status)
7535 {
7536 	return (status <
7537 		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7538 }
7539 
7540 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7541 						  u32 *dump_buf,
7542 						  u32 num_dumped_dwords,
7543 						  u32 *results_buf_size)
7544 {
7545 	u32 num_errors, num_warnings;
7546 
7547 	return qed_parse_idle_chk_dump(p_hwfn,
7548 				       dump_buf,
7549 				       num_dumped_dwords,
7550 				       NULL,
7551 				       results_buf_size,
7552 				       &num_errors, &num_warnings);
7553 }
7554 
7555 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7556 					   u32 *dump_buf,
7557 					   u32 num_dumped_dwords,
7558 					   char *results_buf,
7559 					   u32 *num_errors,
7560 					   u32 *num_warnings)
7561 {
7562 	u32 parsed_buf_size;
7563 
7564 	return qed_parse_idle_chk_dump(p_hwfn,
7565 				       dump_buf,
7566 				       num_dumped_dwords,
7567 				       results_buf,
7568 				       &parsed_buf_size,
7569 				       num_errors, num_warnings);
7570 }
7571 
7572 void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
7573 				     const u32 *meta_buf)
7574 {
7575 	struct dbg_tools_user_data *dev_user_data =
7576 		qed_dbg_get_user_data(p_hwfn);
7577 
7578 	dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7579 }
7580 
7581 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7582 						   u32 *dump_buf,
7583 						   u32 num_dumped_dwords,
7584 						   u32 *results_buf_size)
7585 {
7586 	return qed_parse_mcp_trace_dump(p_hwfn,
7587 					dump_buf, NULL, results_buf_size, true);
7588 }
7589 
7590 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7591 					    u32 *dump_buf,
7592 					    u32 num_dumped_dwords,
7593 					    char *results_buf)
7594 {
7595 	u32 parsed_buf_size;
7596 
7597 	/* Doesn't do anything, needed for compile time asserts */
7598 	qed_user_static_asserts();
7599 
7600 	return qed_parse_mcp_trace_dump(p_hwfn,
7601 					dump_buf,
7602 					results_buf, &parsed_buf_size, true);
7603 }
7604 
7605 enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
7606 						 u32 *dump_buf,
7607 						 char *results_buf)
7608 {
7609 	u32 parsed_buf_size;
7610 
7611 	return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7612 					&parsed_buf_size, false);
7613 }
7614 
7615 enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
7616 					 u8 *dump_buf,
7617 					 u32 num_dumped_bytes,
7618 					 char *results_buf)
7619 {
7620 	u32 parsed_results_bytes;
7621 
7622 	return qed_parse_mcp_trace_buf(p_hwfn,
7623 				       dump_buf,
7624 				       num_dumped_bytes,
7625 				       0,
7626 				       num_dumped_bytes,
7627 				       results_buf, &parsed_results_bytes);
7628 }
7629 
7630 /* Frees the specified MCP Trace meta data */
7631 void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
7632 {
7633 	struct dbg_tools_user_data *dev_user_data;
7634 	struct mcp_trace_meta *meta;
7635 	u32 i;
7636 
7637 	dev_user_data = qed_dbg_get_user_data(p_hwfn);
7638 	meta = &dev_user_data->mcp_trace_meta;
7639 	if (!meta->is_allocated)
7640 		return;
7641 
7642 	/* Release modules */
7643 	if (meta->modules) {
7644 		for (i = 0; i < meta->modules_num; i++)
7645 			kfree(meta->modules[i]);
7646 		kfree(meta->modules);
7647 	}
7648 
7649 	/* Release formats */
7650 	if (meta->formats) {
7651 		for (i = 0; i < meta->formats_num; i++)
7652 			kfree(meta->formats[i].format_str);
7653 		kfree(meta->formats);
7654 	}
7655 
7656 	meta->is_allocated = false;
7657 }
7658 
7659 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7660 						  u32 *dump_buf,
7661 						  u32 num_dumped_dwords,
7662 						  u32 *results_buf_size)
7663 {
7664 	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7665 }
7666 
7667 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7668 					   u32 *dump_buf,
7669 					   u32 num_dumped_dwords,
7670 					   char *results_buf)
7671 {
7672 	u32 parsed_buf_size;
7673 
7674 	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7675 }
7676 
7677 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7678 						  u32 *dump_buf,
7679 						  u32 num_dumped_dwords,
7680 						  u32 *results_buf_size)
7681 {
7682 	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7683 }
7684 
7685 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7686 					   u32 *dump_buf,
7687 					   u32 num_dumped_dwords,
7688 					   char *results_buf)
7689 {
7690 	u32 parsed_buf_size;
7691 
7692 	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7693 }
7694 
7695 enum dbg_status
7696 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7697 					     u32 *dump_buf,
7698 					     u32 num_dumped_dwords,
7699 					     u32 *results_buf_size)
7700 {
7701 	return qed_parse_protection_override_dump(dump_buf,
7702 						  NULL, results_buf_size);
7703 }
7704 
7705 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7706 						      u32 *dump_buf,
7707 						      u32 num_dumped_dwords,
7708 						      char *results_buf)
7709 {
7710 	u32 parsed_buf_size;
7711 
7712 	return qed_parse_protection_override_dump(dump_buf,
7713 						  results_buf,
7714 						  &parsed_buf_size);
7715 }
7716 
7717 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7718 						    u32 *dump_buf,
7719 						    u32 num_dumped_dwords,
7720 						    u32 *results_buf_size)
7721 {
7722 	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7723 }
7724 
7725 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7726 					     u32 *dump_buf,
7727 					     u32 num_dumped_dwords,
7728 					     char *results_buf)
7729 {
7730 	u32 parsed_buf_size;
7731 
7732 	return qed_parse_fw_asserts_dump(dump_buf,
7733 					 results_buf, &parsed_buf_size);
7734 }
7735 
7736 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7737 				   struct dbg_attn_block_result *results)
7738 {
7739 	const u32 *block_attn_name_offsets;
7740 	const char *attn_name_base;
7741 	const char *block_name;
7742 	enum dbg_attn_type attn_type;
7743 	u8 num_regs, i, j;
7744 
7745 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7746 	attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7747 	block_name = qed_dbg_get_block_name(p_hwfn, results->block_id);
7748 	if (!block_name)
7749 		return DBG_STATUS_INVALID_ARGS;
7750 
7751 	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7752 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7753 	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7754 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7755 
7756 	block_attn_name_offsets =
7757 	    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr +
7758 	    results->names_offset;
7759 
7760 	attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr;
7761 
7762 	/* Go over registers with a non-zero attention status */
7763 	for (i = 0; i < num_regs; i++) {
7764 		struct dbg_attn_bit_mapping *bit_mapping;
7765 		struct dbg_attn_reg_result *reg_result;
7766 		u8 num_reg_attn, bit_idx = 0;
7767 
7768 		reg_result = &results->reg_results[i];
7769 		num_reg_attn = GET_FIELD(reg_result->data,
7770 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7771 		bit_mapping = (struct dbg_attn_bit_mapping *)
7772 		    p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr +
7773 		    reg_result->block_attn_offset;
7774 
7775 		/* Go over attention status bits */
7776 		for (j = 0; j < num_reg_attn; j++) {
7777 			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7778 						     DBG_ATTN_BIT_MAPPING_VAL);
7779 			const char *attn_name, *attn_type_str, *masked_str;
7780 			u32 attn_name_offset;
7781 			u32 sts_addr;
7782 
7783 			/* Check if bit mask should be advanced (due to unused
7784 			 * bits).
7785 			 */
7786 			if (GET_FIELD(bit_mapping[j].data,
7787 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7788 				bit_idx += (u8)attn_idx_val;
7789 				continue;
7790 			}
7791 
7792 			/* Check current bit index */
7793 			if (reg_result->sts_val & BIT(bit_idx)) {
7794 				/* An attention bit with value=1 was found
7795 				 * Find attention name
7796 				 */
7797 				attn_name_offset =
7798 					block_attn_name_offsets[attn_idx_val];
7799 				attn_name = attn_name_base + attn_name_offset;
7800 				attn_type_str =
7801 					(attn_type ==
7802 					 ATTN_TYPE_INTERRUPT ? "Interrupt" :
7803 					 "Parity");
7804 				masked_str = reg_result->mask_val &
7805 					     BIT(bit_idx) ?
7806 					     " [masked]" : "";
7807 				sts_addr =
7808 				GET_FIELD(reg_result->data,
7809 					  DBG_ATTN_REG_RESULT_STS_ADDRESS);
7810 				DP_NOTICE(p_hwfn,
7811 					  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7812 					  block_name, attn_type_str, attn_name,
7813 					  sts_addr * 4, bit_idx, masked_str);
7814 			}
7815 
7816 			bit_idx++;
7817 		}
7818 	}
7819 
7820 	return DBG_STATUS_OK;
7821 }
7822 
7823 /* Wrapper for unifying the idle_chk and mcp_trace api */
7824 static enum dbg_status
7825 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7826 				   u32 *dump_buf,
7827 				   u32 num_dumped_dwords,
7828 				   char *results_buf)
7829 {
7830 	u32 num_errors, num_warnnings;
7831 
7832 	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7833 					  results_buf, &num_errors,
7834 					  &num_warnnings);
7835 }
7836 
7837 static DEFINE_MUTEX(qed_dbg_lock);
7838 
7839 #define MAX_PHY_RESULT_BUFFER 9000
7840 
7841 /******************************** Feature Meta data section ******************/
7842 
7843 #define GRC_NUM_STR_FUNCS 2
7844 #define IDLE_CHK_NUM_STR_FUNCS 1
7845 #define MCP_TRACE_NUM_STR_FUNCS 1
7846 #define REG_FIFO_NUM_STR_FUNCS 1
7847 #define IGU_FIFO_NUM_STR_FUNCS 1
7848 #define PROTECTION_OVERRIDE_NUM_STR_FUNCS 1
7849 #define FW_ASSERTS_NUM_STR_FUNCS 1
7850 #define ILT_NUM_STR_FUNCS 1
7851 #define PHY_NUM_STR_FUNCS 20
7852 
7853 /* Feature meta data lookup table */
7854 static struct {
7855 	char *name;
7856 	u32 num_funcs;
7857 	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7858 				    struct qed_ptt *p_ptt, u32 *size);
7859 	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7860 					struct qed_ptt *p_ptt, u32 *dump_buf,
7861 					u32 buf_size, u32 *dumped_dwords);
7862 	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7863 					 u32 *dump_buf, u32 num_dumped_dwords,
7864 					 char *results_buf);
7865 	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7866 					    u32 *dump_buf,
7867 					    u32 num_dumped_dwords,
7868 					    u32 *results_buf_size);
7869 	const struct qed_func_lookup *hsi_func_lookup;
7870 } qed_features_lookup[] = {
7871 	{
7872 	"grc", GRC_NUM_STR_FUNCS, qed_dbg_grc_get_dump_buf_size,
7873 		    qed_dbg_grc_dump, NULL, NULL, NULL}, {
7874 	"idle_chk", IDLE_CHK_NUM_STR_FUNCS,
7875 		    qed_dbg_idle_chk_get_dump_buf_size,
7876 		    qed_dbg_idle_chk_dump,
7877 		    qed_print_idle_chk_results_wrapper,
7878 		    qed_get_idle_chk_results_buf_size,
7879 		    NULL}, {
7880 	"mcp_trace", MCP_TRACE_NUM_STR_FUNCS,
7881 		    qed_dbg_mcp_trace_get_dump_buf_size,
7882 		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7883 		    qed_get_mcp_trace_results_buf_size,
7884 		    NULL}, {
7885 	"reg_fifo", REG_FIFO_NUM_STR_FUNCS,
7886 		    qed_dbg_reg_fifo_get_dump_buf_size,
7887 		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7888 		    qed_get_reg_fifo_results_buf_size,
7889 		    NULL}, {
7890 	"igu_fifo", IGU_FIFO_NUM_STR_FUNCS,
7891 		    qed_dbg_igu_fifo_get_dump_buf_size,
7892 		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7893 		    qed_get_igu_fifo_results_buf_size,
7894 		    NULL}, {
7895 	"protection_override", PROTECTION_OVERRIDE_NUM_STR_FUNCS,
7896 		    qed_dbg_protection_override_get_dump_buf_size,
7897 		    qed_dbg_protection_override_dump,
7898 		    qed_print_protection_override_results,
7899 		    qed_get_protection_override_results_buf_size,
7900 		    NULL}, {
7901 	"fw_asserts", FW_ASSERTS_NUM_STR_FUNCS,
7902 		    qed_dbg_fw_asserts_get_dump_buf_size,
7903 		    qed_dbg_fw_asserts_dump,
7904 		    qed_print_fw_asserts_results,
7905 		    qed_get_fw_asserts_results_buf_size,
7906 		    NULL}, {
7907 	"ilt", ILT_NUM_STR_FUNCS, qed_dbg_ilt_get_dump_buf_size,
7908 		    qed_dbg_ilt_dump, NULL, NULL, NULL},};
7909 
7910 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7911 {
7912 	u32 i, precision = 80;
7913 
7914 	if (!p_text_buf)
7915 		return;
7916 
7917 	pr_notice("\n%.*s", precision, p_text_buf);
7918 	for (i = precision; i < text_size; i += precision)
7919 		pr_cont("%.*s", precision, p_text_buf + i);
7920 	pr_cont("\n");
7921 }
7922 
7923 #define QED_RESULTS_BUF_MIN_SIZE 16
7924 /* Generic function for decoding debug feature info */
7925 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7926 				      enum qed_dbg_features feature_idx)
7927 {
7928 	struct qed_dbg_feature *feature =
7929 	    &p_hwfn->cdev->dbg_features[feature_idx];
7930 	u32 txt_size_bytes, null_char_pos, i;
7931 	u32 *dbuf, dwords;
7932 	enum dbg_status rc;
7933 	char *text_buf;
7934 
7935 	/* Check if feature supports formatting capability */
7936 	if (!qed_features_lookup[feature_idx].results_buf_size)
7937 		return DBG_STATUS_OK;
7938 
7939 	dbuf = (u32 *)feature->dump_buf;
7940 	dwords = feature->dumped_dwords;
7941 
7942 	/* Obtain size of formatted output */
7943 	rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn,
7944 							       dbuf,
7945 							       dwords,
7946 							       &txt_size_bytes);
7947 	if (rc != DBG_STATUS_OK)
7948 		return rc;
7949 
7950 	/* Make sure that the allocated size is a multiple of dword
7951 	 * (4 bytes).
7952 	 */
7953 	null_char_pos = txt_size_bytes - 1;
7954 	txt_size_bytes = (txt_size_bytes + 3) & ~0x3;
7955 
7956 	if (txt_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7957 		DP_NOTICE(p_hwfn->cdev,
7958 			  "formatted size of feature was too small %d. Aborting\n",
7959 			  txt_size_bytes);
7960 		return DBG_STATUS_INVALID_ARGS;
7961 	}
7962 
7963 	/* allocate temp text buf */
7964 	text_buf = vzalloc(txt_size_bytes);
7965 	if (!text_buf) {
7966 		DP_NOTICE(p_hwfn->cdev,
7967 			  "failed to allocate text buffer. Aborting\n");
7968 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7969 	}
7970 
7971 	/* Decode feature opcodes to string on temp buf */
7972 	rc = qed_features_lookup[feature_idx].print_results(p_hwfn,
7973 							    dbuf,
7974 							    dwords,
7975 							    text_buf);
7976 	if (rc != DBG_STATUS_OK) {
7977 		vfree(text_buf);
7978 		return rc;
7979 	}
7980 
7981 	/* Replace the original null character with a '\n' character.
7982 	 * The bytes that were added as a result of the dword alignment are also
7983 	 * padded with '\n' characters.
7984 	 */
7985 	for (i = null_char_pos; i < txt_size_bytes; i++)
7986 		text_buf[i] = '\n';
7987 
7988 	/* Dump printable feature to log */
7989 	if (p_hwfn->cdev->print_dbg_data)
7990 		qed_dbg_print_feature(text_buf, txt_size_bytes);
7991 
7992 	/* Dump binary data as is to the output file */
7993 	if (p_hwfn->cdev->dbg_bin_dump) {
7994 		vfree(text_buf);
7995 		return rc;
7996 	}
7997 
7998 	/* Free the old dump_buf and point the dump_buf to the newly allocated
7999 	 * and formatted text buffer.
8000 	 */
8001 	vfree(feature->dump_buf);
8002 	feature->dump_buf = text_buf;
8003 	feature->buf_size = txt_size_bytes;
8004 	feature->dumped_dwords = txt_size_bytes / 4;
8005 
8006 	return rc;
8007 }
8008 
8009 #define MAX_DBG_FEATURE_SIZE_DWORDS	0x3FFFFFFF
8010 
8011 /* Generic function for performing the dump of a debug feature. */
8012 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
8013 				    struct qed_ptt *p_ptt,
8014 				    enum qed_dbg_features feature_idx)
8015 {
8016 	struct qed_dbg_feature *feature =
8017 	    &p_hwfn->cdev->dbg_features[feature_idx];
8018 	u32 buf_size_dwords, *dbuf, *dwords;
8019 	enum dbg_status rc;
8020 
8021 	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
8022 		  qed_features_lookup[feature_idx].name);
8023 
8024 	/* Dump_buf was already allocated need to free (this can happen if dump
8025 	 * was called but file was never read).
8026 	 * We can't use the buffer as is since size may have changed.
8027 	 */
8028 	if (feature->dump_buf) {
8029 		vfree(feature->dump_buf);
8030 		feature->dump_buf = NULL;
8031 	}
8032 
8033 	/* Get buffer size from hsi, allocate accordingly, and perform the
8034 	 * dump.
8035 	 */
8036 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
8037 						       &buf_size_dwords);
8038 	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
8039 		return rc;
8040 
8041 	if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) {
8042 		feature->buf_size = 0;
8043 		DP_NOTICE(p_hwfn->cdev,
8044 			  "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n",
8045 			  qed_features_lookup[feature_idx].name,
8046 			  buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS);
8047 
8048 		return DBG_STATUS_OK;
8049 	}
8050 
8051 	feature->buf_size = buf_size_dwords * sizeof(u32);
8052 	feature->dump_buf = vmalloc(feature->buf_size);
8053 	if (!feature->dump_buf)
8054 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
8055 
8056 	dbuf = (u32 *)feature->dump_buf;
8057 	dwords = &feature->dumped_dwords;
8058 	rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt,
8059 							   dbuf,
8060 							   feature->buf_size /
8061 							   sizeof(u32),
8062 							   dwords);
8063 
8064 	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
8065 	 * In this case the buffer holds valid binary data, but we won't able
8066 	 * to parse it (since parsing relies on data in NVRAM which is only
8067 	 * accessible when MFW is responsive). skip the formatting but return
8068 	 * success so that binary data is provided.
8069 	 */
8070 	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
8071 		return DBG_STATUS_OK;
8072 
8073 	if (rc != DBG_STATUS_OK)
8074 		return rc;
8075 
8076 	/* Format output */
8077 	rc = format_feature(p_hwfn, feature_idx);
8078 	return rc;
8079 }
8080 
8081 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8082 {
8083 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
8084 }
8085 
8086 int qed_dbg_grc_size(struct qed_dev *cdev)
8087 {
8088 	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
8089 }
8090 
8091 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8092 {
8093 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
8094 			       num_dumped_bytes);
8095 }
8096 
8097 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
8098 {
8099 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
8100 }
8101 
8102 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8103 {
8104 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
8105 			       num_dumped_bytes);
8106 }
8107 
8108 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
8109 {
8110 	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
8111 }
8112 
8113 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8114 {
8115 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
8116 			       num_dumped_bytes);
8117 }
8118 
8119 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
8120 {
8121 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
8122 }
8123 
8124 static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
8125 				    enum qed_nvm_images image_id, u32 *length)
8126 {
8127 	struct qed_nvm_image_att image_att;
8128 	int rc;
8129 
8130 	*length = 0;
8131 	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
8132 	if (rc)
8133 		return rc;
8134 
8135 	*length = image_att.length;
8136 
8137 	return rc;
8138 }
8139 
8140 static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
8141 			     u32 *num_dumped_bytes,
8142 			     enum qed_nvm_images image_id)
8143 {
8144 	struct qed_hwfn *p_hwfn =
8145 		&cdev->hwfns[cdev->engine_for_debug];
8146 	u32 len_rounded;
8147 	int rc;
8148 
8149 	*num_dumped_bytes = 0;
8150 	rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
8151 	if (rc)
8152 		return rc;
8153 
8154 	DP_NOTICE(p_hwfn->cdev,
8155 		  "Collecting a debug feature [\"nvram image %d\"]\n",
8156 		  image_id);
8157 
8158 	len_rounded = roundup(len_rounded, sizeof(u32));
8159 	rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
8160 	if (rc)
8161 		return rc;
8162 
8163 	/* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
8164 	if (image_id != QED_NVM_IMAGE_NVM_META)
8165 		cpu_to_be32_array((__force __be32 *)buffer,
8166 				  (const u32 *)buffer,
8167 				  len_rounded / sizeof(u32));
8168 
8169 	*num_dumped_bytes = len_rounded;
8170 
8171 	return rc;
8172 }
8173 
8174 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
8175 				u32 *num_dumped_bytes)
8176 {
8177 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
8178 			       num_dumped_bytes);
8179 }
8180 
8181 int qed_dbg_protection_override_size(struct qed_dev *cdev)
8182 {
8183 	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
8184 }
8185 
8186 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
8187 		       u32 *num_dumped_bytes)
8188 {
8189 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
8190 			       num_dumped_bytes);
8191 }
8192 
8193 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
8194 {
8195 	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
8196 }
8197 
8198 int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8199 {
8200 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
8201 }
8202 
8203 int qed_dbg_ilt_size(struct qed_dev *cdev)
8204 {
8205 	return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT);
8206 }
8207 
8208 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
8209 		      u32 *num_dumped_bytes)
8210 {
8211 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
8212 			       num_dumped_bytes);
8213 }
8214 
8215 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
8216 {
8217 	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
8218 }
8219 
8220 /* Defines the amount of bytes allocated for recording the length of debugfs
8221  * feature buffer.
8222  */
8223 #define REGDUMP_HEADER_SIZE			sizeof(u32)
8224 #define REGDUMP_HEADER_SIZE_SHIFT		0
8225 #define REGDUMP_HEADER_SIZE_MASK		0xffffff
8226 #define REGDUMP_HEADER_FEATURE_SHIFT		24
8227 #define REGDUMP_HEADER_FEATURE_MASK		0x1f
8228 #define REGDUMP_HEADER_BIN_DUMP_SHIFT		29
8229 #define REGDUMP_HEADER_BIN_DUMP_MASK		0x1
8230 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
8231 #define REGDUMP_HEADER_OMIT_ENGINE_MASK		0x1
8232 #define REGDUMP_HEADER_ENGINE_SHIFT		31
8233 #define REGDUMP_HEADER_ENGINE_MASK		0x1
8234 #define REGDUMP_MAX_SIZE			0x1000000
8235 #define ILT_DUMP_MAX_SIZE			(1024 * 1024 * 15)
8236 
8237 enum debug_print_features {
8238 	OLD_MODE = 0,
8239 	IDLE_CHK = 1,
8240 	GRC_DUMP = 2,
8241 	MCP_TRACE = 3,
8242 	REG_FIFO = 4,
8243 	PROTECTION_OVERRIDE = 5,
8244 	IGU_FIFO = 6,
8245 	PHY = 7,
8246 	FW_ASSERTS = 8,
8247 	NVM_CFG1 = 9,
8248 	DEFAULT_CFG = 10,
8249 	NVM_META = 11,
8250 	MDUMP = 12,
8251 	ILT_DUMP = 13,
8252 };
8253 
8254 static u32 qed_calc_regdump_header(struct qed_dev *cdev,
8255 				   enum debug_print_features feature,
8256 				   int engine, u32 feature_size,
8257 				   u8 omit_engine, u8 dbg_bin_dump)
8258 {
8259 	u32 res = 0;
8260 
8261 	SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size);
8262 	if (res != feature_size)
8263 		DP_NOTICE(cdev,
8264 			  "Feature %d is too large (size 0x%x) and will corrupt the dump\n",
8265 			  feature, feature_size);
8266 
8267 	SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
8268 	SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, dbg_bin_dump);
8269 	SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
8270 	SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
8271 
8272 	return res;
8273 }
8274 
8275 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
8276 {
8277 	u8 cur_engine, omit_engine = 0, org_engine;
8278 	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
8279 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
8280 	int grc_params[MAX_DBG_GRC_PARAMS], rc, i;
8281 	u32 offset = 0, feature_size;
8282 
8283 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
8284 		grc_params[i] = dev_data->grc.param_val[i];
8285 
8286 	if (!QED_IS_CMT(cdev))
8287 		omit_engine = 1;
8288 
8289 	cdev->dbg_bin_dump = 1;
8290 	mutex_lock(&qed_dbg_lock);
8291 
8292 	org_engine = qed_get_debug_engine(cdev);
8293 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8294 		/* Collect idle_chks and grcDump for each hw function */
8295 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8296 			   "obtaining idle_chk and grcdump for current engine\n");
8297 		qed_set_debug_engine(cdev, cur_engine);
8298 
8299 		/* First idle_chk */
8300 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8301 				      REGDUMP_HEADER_SIZE, &feature_size);
8302 		if (!rc) {
8303 			*(u32 *)((u8 *)buffer + offset) =
8304 			    qed_calc_regdump_header(cdev, IDLE_CHK,
8305 						    cur_engine,
8306 						    feature_size,
8307 						    omit_engine,
8308 						    cdev->dbg_bin_dump);
8309 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8310 		} else {
8311 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8312 		}
8313 
8314 		/* Second idle_chk */
8315 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8316 				      REGDUMP_HEADER_SIZE, &feature_size);
8317 		if (!rc) {
8318 			*(u32 *)((u8 *)buffer + offset) =
8319 			    qed_calc_regdump_header(cdev, IDLE_CHK,
8320 						    cur_engine,
8321 						    feature_size,
8322 						    omit_engine,
8323 						    cdev->dbg_bin_dump);
8324 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8325 		} else {
8326 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8327 		}
8328 
8329 		/* reg_fifo dump */
8330 		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
8331 				      REGDUMP_HEADER_SIZE, &feature_size);
8332 		if (!rc) {
8333 			*(u32 *)((u8 *)buffer + offset) =
8334 			    qed_calc_regdump_header(cdev, REG_FIFO,
8335 						    cur_engine,
8336 						    feature_size,
8337 						    omit_engine,
8338 						    cdev->dbg_bin_dump);
8339 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8340 		} else {
8341 			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
8342 		}
8343 
8344 		/* igu_fifo dump */
8345 		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
8346 				      REGDUMP_HEADER_SIZE, &feature_size);
8347 		if (!rc) {
8348 			*(u32 *)((u8 *)buffer + offset) =
8349 			    qed_calc_regdump_header(cdev, IGU_FIFO,
8350 						    cur_engine,
8351 						    feature_size,
8352 						    omit_engine,
8353 						    cdev->dbg_bin_dump);
8354 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8355 		} else {
8356 			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
8357 		}
8358 
8359 		/* protection_override dump */
8360 		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
8361 						 REGDUMP_HEADER_SIZE,
8362 						 &feature_size);
8363 		if (!rc) {
8364 			*(u32 *)((u8 *)buffer + offset) =
8365 			    qed_calc_regdump_header(cdev,
8366 						    PROTECTION_OVERRIDE,
8367 						    cur_engine,
8368 						    feature_size,
8369 						    omit_engine,
8370 						    cdev->dbg_bin_dump);
8371 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8372 		} else {
8373 			DP_ERR(cdev,
8374 			       "qed_dbg_protection_override failed. rc = %d\n",
8375 			       rc);
8376 		}
8377 
8378 		/* fw_asserts dump */
8379 		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
8380 					REGDUMP_HEADER_SIZE, &feature_size);
8381 		if (!rc) {
8382 			*(u32 *)((u8 *)buffer + offset) =
8383 			    qed_calc_regdump_header(cdev, FW_ASSERTS,
8384 						    cur_engine,
8385 						    feature_size,
8386 						    omit_engine,
8387 						    cdev->dbg_bin_dump);
8388 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8389 		} else {
8390 			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
8391 			       rc);
8392 		}
8393 
8394 		feature_size = qed_dbg_ilt_size(cdev);
8395 		if (!cdev->disable_ilt_dump && feature_size <
8396 		    ILT_DUMP_MAX_SIZE) {
8397 			rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
8398 					 REGDUMP_HEADER_SIZE, &feature_size);
8399 			if (!rc) {
8400 				*(u32 *)((u8 *)buffer + offset) =
8401 				    qed_calc_regdump_header(cdev, ILT_DUMP,
8402 							    cur_engine,
8403 							    feature_size,
8404 							    omit_engine,
8405 							    cdev->dbg_bin_dump);
8406 				offset += (feature_size + REGDUMP_HEADER_SIZE);
8407 			} else {
8408 				DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
8409 				       rc);
8410 			}
8411 		}
8412 
8413 		/* Grc dump - must be last because when mcp stuck it will
8414 		 * clutter idle_chk, reg_fifo, ...
8415 		 */
8416 		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
8417 			dev_data->grc.param_val[i] = grc_params[i];
8418 
8419 		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
8420 				 REGDUMP_HEADER_SIZE, &feature_size);
8421 		if (!rc) {
8422 			*(u32 *)((u8 *)buffer + offset) =
8423 			    qed_calc_regdump_header(cdev, GRC_DUMP,
8424 						    cur_engine,
8425 						    feature_size,
8426 						    omit_engine,
8427 						    cdev->dbg_bin_dump);
8428 			offset += (feature_size + REGDUMP_HEADER_SIZE);
8429 		} else {
8430 			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
8431 		}
8432 	}
8433 
8434 	qed_set_debug_engine(cdev, org_engine);
8435 
8436 	/* mcp_trace */
8437 	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
8438 			       REGDUMP_HEADER_SIZE, &feature_size);
8439 	if (!rc) {
8440 		*(u32 *)((u8 *)buffer + offset) =
8441 		    qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
8442 					    feature_size, omit_engine,
8443 					    cdev->dbg_bin_dump);
8444 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8445 	} else {
8446 		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
8447 	}
8448 
8449 	/* nvm cfg1 */
8450 	rc = qed_dbg_nvm_image(cdev,
8451 			       (u8 *)buffer + offset +
8452 			       REGDUMP_HEADER_SIZE, &feature_size,
8453 			       QED_NVM_IMAGE_NVM_CFG1);
8454 	if (!rc) {
8455 		*(u32 *)((u8 *)buffer + offset) =
8456 		    qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
8457 					    feature_size, omit_engine,
8458 					    cdev->dbg_bin_dump);
8459 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8460 	} else if (rc != -ENOENT) {
8461 		DP_ERR(cdev,
8462 		       "qed_dbg_nvm_image failed for image  %d (%s), rc = %d\n",
8463 		       QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1",
8464 		       rc);
8465 	}
8466 
8467 		/* nvm default */
8468 	rc = qed_dbg_nvm_image(cdev,
8469 			       (u8 *)buffer + offset +
8470 			       REGDUMP_HEADER_SIZE, &feature_size,
8471 			       QED_NVM_IMAGE_DEFAULT_CFG);
8472 	if (!rc) {
8473 		*(u32 *)((u8 *)buffer + offset) =
8474 		    qed_calc_regdump_header(cdev, DEFAULT_CFG,
8475 					    cur_engine, feature_size,
8476 					    omit_engine,
8477 					    cdev->dbg_bin_dump);
8478 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8479 	} else if (rc != -ENOENT) {
8480 		DP_ERR(cdev,
8481 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8482 		       QED_NVM_IMAGE_DEFAULT_CFG,
8483 		       "QED_NVM_IMAGE_DEFAULT_CFG", rc);
8484 	}
8485 
8486 	/* nvm meta */
8487 	rc = qed_dbg_nvm_image(cdev,
8488 			       (u8 *)buffer + offset +
8489 			       REGDUMP_HEADER_SIZE, &feature_size,
8490 			       QED_NVM_IMAGE_NVM_META);
8491 	if (!rc) {
8492 		*(u32 *)((u8 *)buffer + offset) =
8493 		    qed_calc_regdump_header(cdev, NVM_META, cur_engine,
8494 					    feature_size, omit_engine,
8495 					    cdev->dbg_bin_dump);
8496 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8497 	} else if (rc != -ENOENT) {
8498 		DP_ERR(cdev,
8499 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8500 		       QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META",
8501 		       rc);
8502 	}
8503 
8504 	/* nvm mdump */
8505 	rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset +
8506 			       REGDUMP_HEADER_SIZE, &feature_size,
8507 			       QED_NVM_IMAGE_MDUMP);
8508 	if (!rc) {
8509 		*(u32 *)((u8 *)buffer + offset) =
8510 		    qed_calc_regdump_header(cdev, MDUMP, cur_engine,
8511 					    feature_size, omit_engine,
8512 					    cdev->dbg_bin_dump);
8513 		offset += (feature_size + REGDUMP_HEADER_SIZE);
8514 	} else if (rc != -ENOENT) {
8515 		DP_ERR(cdev,
8516 		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8517 		       QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
8518 	}
8519 
8520 	mutex_unlock(&qed_dbg_lock);
8521 	cdev->dbg_bin_dump = 0;
8522 
8523 	return 0;
8524 }
8525 
8526 int qed_dbg_all_data_size(struct qed_dev *cdev)
8527 {
8528 	u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
8529 	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
8530 	u8 cur_engine, org_engine;
8531 
8532 	cdev->disable_ilt_dump = false;
8533 	org_engine = qed_get_debug_engine(cdev);
8534 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8535 		/* Engine specific */
8536 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8537 			   "calculating idle_chk and grcdump register length for current engine\n");
8538 		qed_set_debug_engine(cdev, cur_engine);
8539 		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8540 		    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8541 		    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8542 		    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8543 		    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8544 		    REGDUMP_HEADER_SIZE +
8545 		    qed_dbg_protection_override_size(cdev) +
8546 		    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8547 		ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
8548 		if (ilt_len < ILT_DUMP_MAX_SIZE) {
8549 			total_ilt_len += ilt_len;
8550 			regs_len += ilt_len;
8551 		}
8552 	}
8553 
8554 	qed_set_debug_engine(cdev, org_engine);
8555 
8556 	/* Engine common */
8557 	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev) +
8558 	    REGDUMP_HEADER_SIZE + qed_dbg_phy_size(cdev);
8559 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8560 	if (image_len)
8561 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8562 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8563 	if (image_len)
8564 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8565 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8566 	if (image_len)
8567 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8568 	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len);
8569 	if (image_len)
8570 		regs_len += REGDUMP_HEADER_SIZE + image_len;
8571 
8572 	if (regs_len > REGDUMP_MAX_SIZE) {
8573 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8574 			   "Dump exceeds max size 0x%x, disable ILT dump\n",
8575 			   REGDUMP_MAX_SIZE);
8576 		cdev->disable_ilt_dump = true;
8577 		regs_len -= total_ilt_len;
8578 	}
8579 
8580 	return regs_len;
8581 }
8582 
8583 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8584 		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
8585 {
8586 	struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
8587 	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
8588 	enum dbg_status dbg_rc;
8589 	struct qed_ptt *p_ptt;
8590 	int rc = 0;
8591 
8592 	/* Acquire ptt */
8593 	p_ptt = qed_ptt_acquire(p_hwfn);
8594 	if (!p_ptt)
8595 		return -EINVAL;
8596 
8597 	/* Get dump */
8598 	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8599 	if (dbg_rc != DBG_STATUS_OK) {
8600 		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8601 			   qed_dbg_get_status_str(dbg_rc));
8602 		*num_dumped_bytes = 0;
8603 		rc = -EINVAL;
8604 		goto out;
8605 	}
8606 
8607 	DP_VERBOSE(cdev, QED_MSG_DEBUG,
8608 		   "copying debugfs feature to external buffer\n");
8609 	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8610 	*num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords *
8611 			    4;
8612 
8613 out:
8614 	qed_ptt_release(p_hwfn, p_ptt);
8615 	return rc;
8616 }
8617 
8618 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8619 {
8620 	struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
8621 	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
8622 	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8623 	u32 buf_size_dwords;
8624 	enum dbg_status rc;
8625 
8626 	if (!p_ptt)
8627 		return -EINVAL;
8628 
8629 	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8630 						   &buf_size_dwords);
8631 	if (rc != DBG_STATUS_OK)
8632 		buf_size_dwords = 0;
8633 
8634 	/* Feature will not be dumped if it exceeds maximum size */
8635 	if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
8636 		buf_size_dwords = 0;
8637 
8638 	qed_ptt_release(p_hwfn, p_ptt);
8639 	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8640 	return qed_feature->buf_size;
8641 }
8642 
8643 int qed_dbg_phy_size(struct qed_dev *cdev)
8644 {
8645 	/* return max size of phy info and
8646 	 * phy mac_stat multiplied by the number of ports
8647 	 */
8648 	return MAX_PHY_RESULT_BUFFER * (1 + qed_device_num_ports(cdev));
8649 }
8650 
8651 u8 qed_get_debug_engine(struct qed_dev *cdev)
8652 {
8653 	return cdev->engine_for_debug;
8654 }
8655 
8656 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8657 {
8658 	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8659 		   engine_number);
8660 	cdev->engine_for_debug = engine_number;
8661 }
8662 
8663 void qed_dbg_pf_init(struct qed_dev *cdev)
8664 {
8665 	const u8 *dbg_values = NULL;
8666 	int i;
8667 
8668 	/* Sync ver with debugbus qed code */
8669 	qed_dbg_set_app_ver(TOOLS_VERSION);
8670 
8671 	/* Debug values are after init values.
8672 	 * The offset is the first dword of the file.
8673 	 */
8674 	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8675 
8676 	for_each_hwfn(cdev, i) {
8677 		qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8678 		qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8679 	}
8680 
8681 	/* Set the hwfn to be 0 as default */
8682 	cdev->engine_for_debug = 0;
8683 }
8684 
8685 void qed_dbg_pf_exit(struct qed_dev *cdev)
8686 {
8687 	struct qed_dbg_feature *feature = NULL;
8688 	enum qed_dbg_features feature_idx;
8689 
8690 	/* debug features' buffers may be allocated if debug feature was used
8691 	 * but dump wasn't called
8692 	 */
8693 	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8694 		feature = &cdev->dbg_features[feature_idx];
8695 		if (feature->dump_buf) {
8696 			vfree(feature->dump_buf);
8697 			feature->dump_buf = NULL;
8698 		}
8699 	}
8700 }
8701