xref: /openbmc/linux/drivers/net/ethernet/qlogic/qed/qed_debug.c (revision ea68a3e9d14e9e0bf017d178fb4bd53b6deb1482)
1  // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2  /* QLogic qed NIC Driver
3   * Copyright (c) 2015 QLogic Corporation
4   * Copyright (c) 2019-2021 Marvell International Ltd.
5   */
6  
7  #include <linux/module.h>
8  #include <linux/vmalloc.h>
9  #include <linux/crc32.h>
10  #include "qed.h"
11  #include "qed_cxt.h"
12  #include "qed_hsi.h"
13  #include "qed_dbg_hsi.h"
14  #include "qed_hw.h"
15  #include "qed_mcp.h"
16  #include "qed_reg_addr.h"
17  
18  /* Memory groups enum */
19  enum mem_groups {
20  	MEM_GROUP_PXP_MEM,
21  	MEM_GROUP_DMAE_MEM,
22  	MEM_GROUP_CM_MEM,
23  	MEM_GROUP_QM_MEM,
24  	MEM_GROUP_DORQ_MEM,
25  	MEM_GROUP_BRB_RAM,
26  	MEM_GROUP_BRB_MEM,
27  	MEM_GROUP_PRS_MEM,
28  	MEM_GROUP_SDM_MEM,
29  	MEM_GROUP_PBUF,
30  	MEM_GROUP_IOR,
31  	MEM_GROUP_RAM,
32  	MEM_GROUP_BTB_RAM,
33  	MEM_GROUP_RDIF_CTX,
34  	MEM_GROUP_TDIF_CTX,
35  	MEM_GROUP_CFC_MEM,
36  	MEM_GROUP_CONN_CFC_MEM,
37  	MEM_GROUP_CAU_PI,
38  	MEM_GROUP_CAU_MEM,
39  	MEM_GROUP_CAU_MEM_EXT,
40  	MEM_GROUP_PXP_ILT,
41  	MEM_GROUP_MULD_MEM,
42  	MEM_GROUP_BTB_MEM,
43  	MEM_GROUP_IGU_MEM,
44  	MEM_GROUP_IGU_MSIX,
45  	MEM_GROUP_CAU_SB,
46  	MEM_GROUP_BMB_RAM,
47  	MEM_GROUP_BMB_MEM,
48  	MEM_GROUP_TM_MEM,
49  	MEM_GROUP_TASK_CFC_MEM,
50  	MEM_GROUPS_NUM
51  };
52  
53  /* Memory groups names */
54  static const char * const s_mem_group_names[] = {
55  	"PXP_MEM",
56  	"DMAE_MEM",
57  	"CM_MEM",
58  	"QM_MEM",
59  	"DORQ_MEM",
60  	"BRB_RAM",
61  	"BRB_MEM",
62  	"PRS_MEM",
63  	"SDM_MEM",
64  	"PBUF",
65  	"IOR",
66  	"RAM",
67  	"BTB_RAM",
68  	"RDIF_CTX",
69  	"TDIF_CTX",
70  	"CFC_MEM",
71  	"CONN_CFC_MEM",
72  	"CAU_PI",
73  	"CAU_MEM",
74  	"CAU_MEM_EXT",
75  	"PXP_ILT",
76  	"MULD_MEM",
77  	"BTB_MEM",
78  	"IGU_MEM",
79  	"IGU_MSIX",
80  	"CAU_SB",
81  	"BMB_RAM",
82  	"BMB_MEM",
83  	"TM_MEM",
84  	"TASK_CFC_MEM",
85  };
86  
87  /* Idle check conditions */
88  
cond5(const u32 * r,const u32 * imm)89  static u32 cond5(const u32 *r, const u32 *imm)
90  {
91  	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
92  }
93  
cond7(const u32 * r,const u32 * imm)94  static u32 cond7(const u32 *r, const u32 *imm)
95  {
96  	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
97  }
98  
cond6(const u32 * r,const u32 * imm)99  static u32 cond6(const u32 *r, const u32 *imm)
100  {
101  	return (r[0] & imm[0]) != imm[1];
102  }
103  
cond9(const u32 * r,const u32 * imm)104  static u32 cond9(const u32 *r, const u32 *imm)
105  {
106  	return ((r[0] & imm[0]) >> imm[1]) !=
107  	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
108  }
109  
cond10(const u32 * r,const u32 * imm)110  static u32 cond10(const u32 *r, const u32 *imm)
111  {
112  	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
113  }
114  
cond4(const u32 * r,const u32 * imm)115  static u32 cond4(const u32 *r, const u32 *imm)
116  {
117  	return (r[0] & ~imm[0]) != imm[1];
118  }
119  
cond0(const u32 * r,const u32 * imm)120  static u32 cond0(const u32 *r, const u32 *imm)
121  {
122  	return (r[0] & ~r[1]) != imm[0];
123  }
124  
cond14(const u32 * r,const u32 * imm)125  static u32 cond14(const u32 *r, const u32 *imm)
126  {
127  	return (r[0] | imm[0]) != imm[1];
128  }
129  
cond1(const u32 * r,const u32 * imm)130  static u32 cond1(const u32 *r, const u32 *imm)
131  {
132  	return r[0] != imm[0];
133  }
134  
cond11(const u32 * r,const u32 * imm)135  static u32 cond11(const u32 *r, const u32 *imm)
136  {
137  	return r[0] != r[1] && r[2] == imm[0];
138  }
139  
cond12(const u32 * r,const u32 * imm)140  static u32 cond12(const u32 *r, const u32 *imm)
141  {
142  	return r[0] != r[1] && r[2] > imm[0];
143  }
144  
cond3(const u32 * r,const u32 * imm)145  static u32 cond3(const u32 *r, const u32 *imm)
146  {
147  	return r[0] != r[1];
148  }
149  
cond13(const u32 * r,const u32 * imm)150  static u32 cond13(const u32 *r, const u32 *imm)
151  {
152  	return r[0] & imm[0];
153  }
154  
cond8(const u32 * r,const u32 * imm)155  static u32 cond8(const u32 *r, const u32 *imm)
156  {
157  	return r[0] < (r[1] - imm[0]);
158  }
159  
cond2(const u32 * r,const u32 * imm)160  static u32 cond2(const u32 *r, const u32 *imm)
161  {
162  	return r[0] > imm[0];
163  }
164  
165  /* Array of Idle Check conditions */
166  static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
167  	cond0,
168  	cond1,
169  	cond2,
170  	cond3,
171  	cond4,
172  	cond5,
173  	cond6,
174  	cond7,
175  	cond8,
176  	cond9,
177  	cond10,
178  	cond11,
179  	cond12,
180  	cond13,
181  	cond14,
182  };
183  
184  #define NUM_PHYS_BLOCKS 84
185  
186  #define NUM_DBG_RESET_REGS 8
187  
188  /******************************* Data Types **********************************/
189  
190  enum hw_types {
191  	HW_TYPE_ASIC,
192  	PLATFORM_RESERVED,
193  	PLATFORM_RESERVED2,
194  	PLATFORM_RESERVED3,
195  	PLATFORM_RESERVED4,
196  	MAX_HW_TYPES
197  };
198  
199  /* CM context types */
200  enum cm_ctx_types {
201  	CM_CTX_CONN_AG,
202  	CM_CTX_CONN_ST,
203  	CM_CTX_TASK_AG,
204  	CM_CTX_TASK_ST,
205  	NUM_CM_CTX_TYPES
206  };
207  
208  /* Debug bus frame modes */
209  enum dbg_bus_frame_modes {
210  	DBG_BUS_FRAME_MODE_4ST = 0,	/* 4 Storm dwords (no HW) */
211  	DBG_BUS_FRAME_MODE_2ST_2HW = 1,	/* 2 Storm dwords, 2 HW dwords */
212  	DBG_BUS_FRAME_MODE_1ST_3HW = 2,	/* 1 Storm dwords, 3 HW dwords */
213  	DBG_BUS_FRAME_MODE_4HW = 3,	/* 4 HW dwords (no Storms) */
214  	DBG_BUS_FRAME_MODE_8HW = 4,	/* 8 HW dwords (no Storms) */
215  	DBG_BUS_NUM_FRAME_MODES
216  };
217  
218  /* Debug bus SEMI frame modes */
219  enum dbg_bus_semi_frame_modes {
220  	DBG_BUS_SEMI_FRAME_MODE_4FAST = 0,	/* 4 fast dw */
221  	DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */
222  	DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */
223  	DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3,	/* 4 slow dw */
224  	DBG_BUS_SEMI_NUM_FRAME_MODES
225  };
226  
227  /* Debug bus filter types */
228  enum dbg_bus_filter_types {
229  	DBG_BUS_FILTER_TYPE_OFF,	/* Filter always off */
230  	DBG_BUS_FILTER_TYPE_PRE,	/* Filter before trigger only */
231  	DBG_BUS_FILTER_TYPE_POST,	/* Filter after trigger only */
232  	DBG_BUS_FILTER_TYPE_ON	/* Filter always on */
233  };
234  
235  /* Debug bus pre-trigger recording types */
236  enum dbg_bus_pre_trigger_types {
237  	DBG_BUS_PRE_TRIGGER_FROM_ZERO,	/* Record from time 0 */
238  	DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,	/* Record some chunks before trigger */
239  	DBG_BUS_PRE_TRIGGER_DROP	/* Drop data before trigger */
240  };
241  
242  /* Debug bus post-trigger recording types */
243  enum dbg_bus_post_trigger_types {
244  	DBG_BUS_POST_TRIGGER_RECORD,	/* Start recording after trigger */
245  	DBG_BUS_POST_TRIGGER_DROP	/* Drop data after trigger */
246  };
247  
248  /* Debug bus other engine mode */
249  enum dbg_bus_other_engine_modes {
250  	DBG_BUS_OTHER_ENGINE_MODE_NONE,
251  	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
252  	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
253  	DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
254  	DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX
255  };
256  
257  /* DBG block Framing mode definitions */
258  struct framing_mode_defs {
259  	u8 id;
260  	u8 blocks_dword_mask;
261  	u8 storms_dword_mask;
262  	u8 semi_framing_mode_id;
263  	u8 full_buf_thr;
264  };
265  
266  /* Chip constant definitions */
267  struct chip_defs {
268  	const char *name;
269  	u8 dwords_per_cycle;
270  	u8 num_framing_modes;
271  	u32 num_ilt_pages;
272  	struct framing_mode_defs *framing_modes;
273  };
274  
275  /* HW type constant definitions */
276  struct hw_type_defs {
277  	const char *name;
278  	u32 delay_factor;
279  	u32 dmae_thresh;
280  	u32 log_thresh;
281  };
282  
283  /* RBC reset definitions */
284  struct rbc_reset_defs {
285  	u32 reset_reg_addr;
286  	u32 reset_val[MAX_CHIP_IDS];
287  };
288  
289  /* Storm constant definitions.
290   * Addresses are in bytes, sizes are in quad-regs.
291   */
292  struct storm_defs {
293  	char letter;
294  	enum block_id sem_block_id;
295  	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
296  	bool has_vfc;
297  	u32 sem_fast_mem_addr;
298  	u32 sem_frame_mode_addr;
299  	u32 sem_slow_enable_addr;
300  	u32 sem_slow_mode_addr;
301  	u32 sem_slow_mode1_conf_addr;
302  	u32 sem_sync_dbg_empty_addr;
303  	u32 sem_gpre_vect_addr;
304  	u32 cm_ctx_wr_addr;
305  	u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
306  	u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
307  };
308  
309  /* Debug Bus Constraint operation constant definitions */
310  struct dbg_bus_constraint_op_defs {
311  	u8 hw_op_val;
312  	bool is_cyclic;
313  };
314  
315  /* Storm Mode definitions */
316  struct storm_mode_defs {
317  	const char *name;
318  	bool is_fast_dbg;
319  	u8 id_in_hw;
320  	u32 src_disable_reg_addr;
321  	u32 src_enable_val;
322  	bool exists[MAX_CHIP_IDS];
323  };
324  
325  struct grc_param_defs {
326  	u32 default_val[MAX_CHIP_IDS];
327  	u32 min;
328  	u32 max;
329  	bool is_preset;
330  	bool is_persistent;
331  	u32 exclude_all_preset_val;
332  	u32 crash_preset_val[MAX_CHIP_IDS];
333  };
334  
335  /* Address is in 128b units. Width is in bits. */
336  struct rss_mem_defs {
337  	const char *mem_name;
338  	const char *type_name;
339  	u32 addr;
340  	u32 entry_width;
341  	u32 num_entries[MAX_CHIP_IDS];
342  };
343  
344  struct vfc_ram_defs {
345  	const char *mem_name;
346  	const char *type_name;
347  	u32 base_row;
348  	u32 num_rows;
349  };
350  
351  struct big_ram_defs {
352  	const char *instance_name;
353  	enum mem_groups mem_group_id;
354  	enum mem_groups ram_mem_group_id;
355  	enum dbg_grc_params grc_param;
356  	u32 addr_reg_addr;
357  	u32 data_reg_addr;
358  	u32 is_256b_reg_addr;
359  	u32 is_256b_bit_offset[MAX_CHIP_IDS];
360  	u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
361  };
362  
363  struct phy_defs {
364  	const char *phy_name;
365  
366  	/* PHY base GRC address */
367  	u32 base_addr;
368  
369  	/* Relative address of indirect TBUS address register (bits 0..7) */
370  	u32 tbus_addr_lo_addr;
371  
372  	/* Relative address of indirect TBUS address register (bits 8..10) */
373  	u32 tbus_addr_hi_addr;
374  
375  	/* Relative address of indirect TBUS data register (bits 0..7) */
376  	u32 tbus_data_lo_addr;
377  
378  	/* Relative address of indirect TBUS data register (bits 8..11) */
379  	u32 tbus_data_hi_addr;
380  };
381  
382  /* Split type definitions */
383  struct split_type_defs {
384  	const char *name;
385  };
386  
387  /******************************** Constants **********************************/
388  
389  #define BYTES_IN_DWORD			sizeof(u32)
390  /* In the macros below, size and offset are specified in bits */
391  #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
392  #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
393  #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
394  #define FIELD_DWORD_OFFSET(type, field) \
395  	 ((int)(FIELD_BIT_OFFSET(type, field) / 32))
396  #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
397  #define FIELD_BIT_MASK(type, field) \
398  	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
399  	 FIELD_DWORD_SHIFT(type, field))
400  
401  #define SET_VAR_FIELD(var, type, field, val) \
402  	do { \
403  		var[FIELD_DWORD_OFFSET(type, field)] &=	\
404  		(~FIELD_BIT_MASK(type, field));	\
405  		var[FIELD_DWORD_OFFSET(type, field)] |= \
406  		(val) << FIELD_DWORD_SHIFT(type, field); \
407  	} while (0)
408  
409  #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
410  	do { \
411  		for (i = 0; i < (arr_size); i++) \
412  			qed_wr(dev, ptt, addr,	(arr)[i]); \
413  	} while (0)
414  
415  #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
416  #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
417  
418  /* extra lines include a signature line + optional latency events line */
419  #define NUM_EXTRA_DBG_LINES(block) \
420  	(GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
421  #define NUM_DBG_LINES(block) \
422  	((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
423  
424  #define USE_DMAE			true
425  #define PROTECT_WIDE_BUS		true
426  
427  #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
428  #define RAM_LINES_TO_BYTES(lines) \
429  	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
430  
431  #define REG_DUMP_LEN_SHIFT		24
432  #define MEM_DUMP_ENTRY_SIZE_DWORDS \
433  	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
434  
435  #define IDLE_CHK_RULE_SIZE_DWORDS \
436  	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
437  
438  #define IDLE_CHK_RESULT_HDR_DWORDS \
439  	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
440  
441  #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
442  	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
443  
444  #define PAGE_MEM_DESC_SIZE_DWORDS \
445  	BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
446  
447  #define IDLE_CHK_MAX_ENTRIES_SIZE	32
448  
449  /* The sizes and offsets below are specified in bits */
450  #define VFC_CAM_CMD_STRUCT_SIZE		64
451  #define VFC_CAM_CMD_ROW_OFFSET		48
452  #define VFC_CAM_CMD_ROW_SIZE		9
453  #define VFC_CAM_ADDR_STRUCT_SIZE	16
454  #define VFC_CAM_ADDR_OP_OFFSET		0
455  #define VFC_CAM_ADDR_OP_SIZE		4
456  #define VFC_CAM_RESP_STRUCT_SIZE	256
457  #define VFC_RAM_ADDR_STRUCT_SIZE	16
458  #define VFC_RAM_ADDR_OP_OFFSET		0
459  #define VFC_RAM_ADDR_OP_SIZE		2
460  #define VFC_RAM_ADDR_ROW_OFFSET		2
461  #define VFC_RAM_ADDR_ROW_SIZE		10
462  #define VFC_RAM_RESP_STRUCT_SIZE	256
463  
464  #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
465  #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
466  #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
467  #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
468  #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
469  #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
470  
471  #define NUM_VFC_RAM_TYPES		4
472  
473  #define VFC_CAM_NUM_ROWS		512
474  
475  #define VFC_OPCODE_CAM_RD		14
476  #define VFC_OPCODE_RAM_RD		0
477  
478  #define NUM_RSS_MEM_TYPES		5
479  
480  #define NUM_BIG_RAM_TYPES		3
481  #define BIG_RAM_NAME_LEN		3
482  
483  #define NUM_PHY_TBUS_ADDRESSES		2048
484  #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
485  
486  #define RESET_REG_UNRESET_OFFSET	4
487  
488  #define STALL_DELAY_MS			500
489  
490  #define STATIC_DEBUG_LINE_DWORDS	9
491  
492  #define NUM_COMMON_GLOBAL_PARAMS	10
493  
494  #define MAX_RECURSION_DEPTH		10
495  
496  #define FW_IMG_KUKU                     0
497  #define FW_IMG_MAIN			1
498  #define FW_IMG_L2B                      2
499  
500  #define REG_FIFO_ELEMENT_DWORDS		2
501  #define REG_FIFO_DEPTH_ELEMENTS		32
502  #define REG_FIFO_DEPTH_DWORDS \
503  	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
504  
505  #define IGU_FIFO_ELEMENT_DWORDS		4
506  #define IGU_FIFO_DEPTH_ELEMENTS		64
507  #define IGU_FIFO_DEPTH_DWORDS \
508  	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
509  
510  #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
511  #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
512  #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
513  	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
514  	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
515  
516  #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
517  	(MCP_REG_SCRATCH + \
518  	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
519  
520  #define MAX_SW_PLTAFORM_STR_SIZE	64
521  
522  #define EMPTY_FW_VERSION_STR		"???_???_???_???"
523  #define EMPTY_FW_IMAGE_STR		"???????????????"
524  
525  /***************************** Constant Arrays *******************************/
526  
527  /* DBG block framing mode definitions, in descending preference order */
528  static struct framing_mode_defs s_framing_mode_defs[4] = {
529  	{DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf,
530  	 DBG_BUS_SEMI_FRAME_MODE_4FAST,
531  	 10},
532  	{DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW,
533  	 10},
534  	{DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc,
535  	 DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10},
536  	{DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8,
537  	 DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10}
538  };
539  
540  /* Chip constant definitions array */
541  static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
542  	{"bb", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2,
543  	 s_framing_mode_defs},
544  	{"ah", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2,
545  	 s_framing_mode_defs}
546  };
547  
548  /* Storm constant definitions array */
549  static struct storm_defs s_storm_defs[] = {
550  	/* Tstorm */
551  	{'T', BLOCK_TSEM,
552  		{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
553  		true,
554  		TSEM_REG_FAST_MEMORY,
555  		TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
556  		TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
557  		TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
558  		TCM_REG_CTX_RBC_ACCS,
559  		{TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
560  		 TCM_REG_SM_TASK_CTX},
561  		{{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */
562  	},
563  
564  	/* Mstorm */
565  	{'M', BLOCK_MSEM,
566  		{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
567  		false,
568  		MSEM_REG_FAST_MEMORY,
569  		MSEM_REG_DBG_FRAME_MODE,
570  		MSEM_REG_SLOW_DBG_ACTIVE,
571  		MSEM_REG_SLOW_DBG_MODE,
572  		MSEM_REG_DBG_MODE1_CFG,
573  		MSEM_REG_SYNC_DBG_EMPTY,
574  		MSEM_REG_DBG_GPRE_VECT,
575  		MCM_REG_CTX_RBC_ACCS,
576  		{MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
577  		 MCM_REG_SM_TASK_CTX },
578  		{{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/
579  	},
580  
581  	/* Ustorm */
582  	{'U', BLOCK_USEM,
583  		{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
584  		false,
585  		USEM_REG_FAST_MEMORY,
586  		USEM_REG_DBG_FRAME_MODE,
587  		USEM_REG_SLOW_DBG_ACTIVE,
588  		USEM_REG_SLOW_DBG_MODE,
589  		USEM_REG_DBG_MODE1_CFG,
590  		USEM_REG_SYNC_DBG_EMPTY,
591  		USEM_REG_DBG_GPRE_VECT,
592  		UCM_REG_CTX_RBC_ACCS,
593  		{UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
594  		 UCM_REG_SM_TASK_CTX},
595  		{{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */
596  	},
597  
598  	/* Xstorm */
599  	{'X', BLOCK_XSEM,
600  		{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
601  		false,
602  		XSEM_REG_FAST_MEMORY,
603  		XSEM_REG_DBG_FRAME_MODE,
604  		XSEM_REG_SLOW_DBG_ACTIVE,
605  		XSEM_REG_SLOW_DBG_MODE,
606  		XSEM_REG_DBG_MODE1_CFG,
607  		XSEM_REG_SYNC_DBG_EMPTY,
608  		XSEM_REG_DBG_GPRE_VECT,
609  		XCM_REG_CTX_RBC_ACCS,
610  		{XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
611  		{{9, 15, 0, 0}, {9, 15,	0, 0}} /* {bb} {k2} */
612  	},
613  
614  	/* Ystorm */
615  	{'Y', BLOCK_YSEM,
616  		{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
617  		false,
618  		YSEM_REG_FAST_MEMORY,
619  		YSEM_REG_DBG_FRAME_MODE,
620  		YSEM_REG_SLOW_DBG_ACTIVE,
621  		YSEM_REG_SLOW_DBG_MODE,
622  		YSEM_REG_DBG_MODE1_CFG,
623  		YSEM_REG_SYNC_DBG_EMPTY,
624  		YSEM_REG_DBG_GPRE_VECT,
625  		YCM_REG_CTX_RBC_ACCS,
626  		{YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
627  		 YCM_REG_SM_TASK_CTX},
628  		{{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */
629  	},
630  
631  	/* Pstorm */
632  	{'P', BLOCK_PSEM,
633  		{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
634  		true,
635  		PSEM_REG_FAST_MEMORY,
636  		PSEM_REG_DBG_FRAME_MODE,
637  		PSEM_REG_SLOW_DBG_ACTIVE,
638  		PSEM_REG_SLOW_DBG_MODE,
639  		PSEM_REG_DBG_MODE1_CFG,
640  		PSEM_REG_SYNC_DBG_EMPTY,
641  		PSEM_REG_DBG_GPRE_VECT,
642  		PCM_REG_CTX_RBC_ACCS,
643  		{0, PCM_REG_SM_CON_CTX, 0, 0},
644  		{{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */
645  	},
646  };
647  
648  static struct hw_type_defs s_hw_type_defs[] = {
649  	/* HW_TYPE_ASIC */
650  	{"asic", 1, 256, 32768},
651  	{"reserved", 0, 0, 0},
652  	{"reserved2", 0, 0, 0},
653  	{"reserved3", 0, 0, 0},
654  	{"reserved4", 0, 0, 0}
655  };
656  
657  static struct grc_param_defs s_grc_param_defs[] = {
658  	/* DBG_GRC_PARAM_DUMP_TSTORM */
659  	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
660  
661  	/* DBG_GRC_PARAM_DUMP_MSTORM */
662  	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
663  
664  	/* DBG_GRC_PARAM_DUMP_USTORM */
665  	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
666  
667  	/* DBG_GRC_PARAM_DUMP_XSTORM */
668  	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
669  
670  	/* DBG_GRC_PARAM_DUMP_YSTORM */
671  	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
672  
673  	/* DBG_GRC_PARAM_DUMP_PSTORM */
674  	{{1, 1}, 0, 1, false, false, 1, {1, 1}},
675  
676  	/* DBG_GRC_PARAM_DUMP_REGS */
677  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
678  
679  	/* DBG_GRC_PARAM_DUMP_RAM */
680  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
681  
682  	/* DBG_GRC_PARAM_DUMP_PBUF */
683  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
684  
685  	/* DBG_GRC_PARAM_DUMP_IOR */
686  	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
687  
688  	/* DBG_GRC_PARAM_DUMP_VFC */
689  	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
690  
691  	/* DBG_GRC_PARAM_DUMP_CM_CTX */
692  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
693  
694  	/* DBG_GRC_PARAM_DUMP_ILT */
695  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
696  
697  	/* DBG_GRC_PARAM_DUMP_RSS */
698  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
699  
700  	/* DBG_GRC_PARAM_DUMP_CAU */
701  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
702  
703  	/* DBG_GRC_PARAM_DUMP_QM */
704  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
705  
706  	/* DBG_GRC_PARAM_DUMP_MCP */
707  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
708  
709  	/* DBG_GRC_PARAM_DUMP_DORQ */
710  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
711  
712  	/* DBG_GRC_PARAM_DUMP_CFC */
713  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
714  
715  	/* DBG_GRC_PARAM_DUMP_IGU */
716  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
717  
718  	/* DBG_GRC_PARAM_DUMP_BRB */
719  	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
720  
721  	/* DBG_GRC_PARAM_DUMP_BTB */
722  	{{0, 0}, 0, 1, false, false, 0, {1, 1}},
723  
724  	/* DBG_GRC_PARAM_DUMP_BMB */
725  	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
726  
727  	/* DBG_GRC_PARAM_RESERVED1 */
728  	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
729  
730  	/* DBG_GRC_PARAM_DUMP_MULD */
731  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
732  
733  	/* DBG_GRC_PARAM_DUMP_PRS */
734  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
735  
736  	/* DBG_GRC_PARAM_DUMP_DMAE */
737  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
738  
739  	/* DBG_GRC_PARAM_DUMP_TM */
740  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
741  
742  	/* DBG_GRC_PARAM_DUMP_SDM */
743  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
744  
745  	/* DBG_GRC_PARAM_DUMP_DIF */
746  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
747  
748  	/* DBG_GRC_PARAM_DUMP_STATIC */
749  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
750  
751  	/* DBG_GRC_PARAM_UNSTALL */
752  	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
753  
754  	/* DBG_GRC_PARAM_RESERVED2 */
755  	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
756  
757  	/* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
758  	{{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}},
759  
760  	/* DBG_GRC_PARAM_EXCLUDE_ALL */
761  	{{0, 0}, 0, 1, true, false, 0, {0, 0}},
762  
763  	/* DBG_GRC_PARAM_CRASH */
764  	{{0, 0}, 0, 1, true, false, 0, {0, 0}},
765  
766  	/* DBG_GRC_PARAM_PARITY_SAFE */
767  	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
768  
769  	/* DBG_GRC_PARAM_DUMP_CM */
770  	{{1, 1}, 0, 1, false, false, 0, {1, 1}},
771  
772  	/* DBG_GRC_PARAM_DUMP_PHY */
773  	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
774  
775  	/* DBG_GRC_PARAM_NO_MCP */
776  	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
777  
778  	/* DBG_GRC_PARAM_NO_FW_VER */
779  	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
780  
781  	/* DBG_GRC_PARAM_RESERVED3 */
782  	{{0, 0}, 0, 1, false, false, 0, {0, 0}},
783  
784  	/* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
785  	{{0, 1}, 0, 1, false, false, 0, {0, 1}},
786  
787  	/* DBG_GRC_PARAM_DUMP_ILT_CDUC */
788  	{{1, 1}, 0, 1, false, false, 0, {0, 0}},
789  
790  	/* DBG_GRC_PARAM_DUMP_ILT_CDUT */
791  	{{1, 1}, 0, 1, false, false, 0, {0, 0}},
792  
793  	/* DBG_GRC_PARAM_DUMP_CAU_EXT */
794  	{{0, 0}, 0, 1, false, false, 0, {1, 1}}
795  };
796  
797  static struct rss_mem_defs s_rss_mem_defs[] = {
798  	{"rss_mem_cid", "rss_cid", 0, 32,
799  	 {256, 320}},
800  
801  	{"rss_mem_key_msb", "rss_key", 1024, 256,
802  	 {128, 208}},
803  
804  	{"rss_mem_key_lsb", "rss_key", 2048, 64,
805  	 {128, 208}},
806  
807  	{"rss_mem_info", "rss_info", 3072, 16,
808  	 {128, 208}},
809  
810  	{"rss_mem_ind", "rss_ind", 4096, 16,
811  	 {16384, 26624}}
812  };
813  
814  static struct vfc_ram_defs s_vfc_ram_defs[] = {
815  	{"vfc_ram_tt1", "vfc_ram", 0, 512},
816  	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
817  	{"vfc_ram_stt2", "vfc_ram", 640, 32},
818  	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
819  };
820  
821  static struct big_ram_defs s_big_ram_defs[] = {
822  	{"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
823  	 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
824  	 MISC_REG_BLOCK_256B_EN, {0, 0},
825  	 {153600, 180224}},
826  
827  	{"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
828  	 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
829  	 MISC_REG_BLOCK_256B_EN, {0, 1},
830  	 {92160, 117760}},
831  
832  	{"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
833  	 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
834  	 MISCS_REG_BLOCK_256B_EN, {0, 0},
835  	 {36864, 36864}}
836  };
837  
838  static struct rbc_reset_defs s_rbc_reset_defs[] = {
839  	{MISCS_REG_RESET_PL_HV,
840  	 {0x0, 0x400}},
841  	{MISC_REG_RESET_PL_PDA_VMAIN_1,
842  	 {0x4404040, 0x4404040}},
843  	{MISC_REG_RESET_PL_PDA_VMAIN_2,
844  	 {0x7, 0x7c00007}},
845  	{MISC_REG_RESET_PL_PDA_VAUX,
846  	 {0x2, 0x2}},
847  };
848  
849  static struct phy_defs s_phy_defs[] = {
850  	{"nw_phy", NWS_REG_NWS_CMU_K2,
851  	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
852  	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
853  	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
854  	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
855  	{"sgmii_phy", MS_REG_MS_CMU_K2,
856  	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
857  	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
858  	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
859  	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
860  	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
861  	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
862  	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
863  	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
864  	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
865  	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
866  	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
867  	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
868  	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
869  	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
870  };
871  
872  static struct split_type_defs s_split_type_defs[] = {
873  	/* SPLIT_TYPE_NONE */
874  	{"eng"},
875  
876  	/* SPLIT_TYPE_PORT */
877  	{"port"},
878  
879  	/* SPLIT_TYPE_PF */
880  	{"pf"},
881  
882  	/* SPLIT_TYPE_PORT_PF */
883  	{"port"},
884  
885  	/* SPLIT_TYPE_VF */
886  	{"vf"}
887  };
888  
889  /******************************** Variables **********************************/
890  
891  /* The version of the calling app */
892  static u32 s_app_ver;
893  
894  /**************************** Private Functions ******************************/
895  
qed_static_asserts(void)896  static void qed_static_asserts(void)
897  {
898  }
899  
900  /* Reads and returns a single dword from the specified unaligned buffer */
qed_read_unaligned_dword(u8 * buf)901  static u32 qed_read_unaligned_dword(u8 *buf)
902  {
903  	u32 dword;
904  
905  	memcpy((u8 *)&dword, buf, sizeof(dword));
906  	return dword;
907  }
908  
909  /* Sets the value of the specified GRC param */
qed_grc_set_param(struct qed_hwfn * p_hwfn,enum dbg_grc_params grc_param,u32 val)910  static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
911  			      enum dbg_grc_params grc_param, u32 val)
912  {
913  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
914  
915  	dev_data->grc.param_val[grc_param] = val;
916  }
917  
918  /* Returns the value of the specified GRC param */
qed_grc_get_param(struct qed_hwfn * p_hwfn,enum dbg_grc_params grc_param)919  static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
920  			     enum dbg_grc_params grc_param)
921  {
922  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
923  
924  	return dev_data->grc.param_val[grc_param];
925  }
926  
927  /* Initializes the GRC parameters */
qed_dbg_grc_init_params(struct qed_hwfn * p_hwfn)928  static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
929  {
930  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
931  
932  	if (!dev_data->grc.params_initialized) {
933  		qed_dbg_grc_set_params_default(p_hwfn);
934  		dev_data->grc.params_initialized = 1;
935  	}
936  }
937  
938  /* Sets pointer and size for the specified binary buffer type */
qed_set_dbg_bin_buf(struct qed_hwfn * p_hwfn,enum bin_dbg_buffer_type buf_type,const u32 * ptr,u32 size)939  static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
940  				enum bin_dbg_buffer_type buf_type,
941  				const u32 *ptr, u32 size)
942  {
943  	struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
944  
945  	buf->ptr = (void *)ptr;
946  	buf->size = size;
947  }
948  
949  /* Initializes debug data for the specified device */
qed_dbg_dev_init(struct qed_hwfn * p_hwfn)950  static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
951  {
952  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
953  	u8 num_pfs = 0, max_pfs_per_port = 0;
954  
955  	if (dev_data->initialized)
956  		return DBG_STATUS_OK;
957  
958  	if (!s_app_ver)
959  		return DBG_STATUS_APP_VERSION_NOT_SET;
960  
961  	/* Set chip */
962  	if (QED_IS_K2(p_hwfn->cdev)) {
963  		dev_data->chip_id = CHIP_K2;
964  		dev_data->mode_enable[MODE_K2] = 1;
965  		dev_data->num_vfs = MAX_NUM_VFS_K2;
966  		num_pfs = MAX_NUM_PFS_K2;
967  		max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
968  	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
969  		dev_data->chip_id = CHIP_BB;
970  		dev_data->mode_enable[MODE_BB] = 1;
971  		dev_data->num_vfs = MAX_NUM_VFS_BB;
972  		num_pfs = MAX_NUM_PFS_BB;
973  		max_pfs_per_port = MAX_NUM_PFS_BB;
974  	} else {
975  		return DBG_STATUS_UNKNOWN_CHIP;
976  	}
977  
978  	/* Set HW type */
979  	dev_data->hw_type = HW_TYPE_ASIC;
980  	dev_data->mode_enable[MODE_ASIC] = 1;
981  
982  	/* Set port mode */
983  	switch (p_hwfn->cdev->num_ports_in_engine) {
984  	case 1:
985  		dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
986  		break;
987  	case 2:
988  		dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
989  		break;
990  	case 4:
991  		dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
992  		break;
993  	}
994  
995  	/* Set 100G mode */
996  	if (QED_IS_CMT(p_hwfn->cdev))
997  		dev_data->mode_enable[MODE_100G] = 1;
998  
999  	/* Set number of ports */
1000  	if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
1001  	    dev_data->mode_enable[MODE_100G])
1002  		dev_data->num_ports = 1;
1003  	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
1004  		dev_data->num_ports = 2;
1005  	else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
1006  		dev_data->num_ports = 4;
1007  
1008  	/* Set number of PFs per port */
1009  	dev_data->num_pfs_per_port = min_t(u32,
1010  					   num_pfs / dev_data->num_ports,
1011  					   max_pfs_per_port);
1012  
1013  	/* Initializes the GRC parameters */
1014  	qed_dbg_grc_init_params(p_hwfn);
1015  
1016  	dev_data->use_dmae = true;
1017  	dev_data->initialized = 1;
1018  
1019  	return DBG_STATUS_OK;
1020  }
1021  
get_dbg_block(struct qed_hwfn * p_hwfn,enum block_id block_id)1022  static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
1023  					     enum block_id block_id)
1024  {
1025  	const struct dbg_block *dbg_block;
1026  
1027  	dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
1028  	return dbg_block + block_id;
1029  }
1030  
qed_get_dbg_block_per_chip(struct qed_hwfn * p_hwfn,enum block_id block_id)1031  static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
1032  							       *p_hwfn,
1033  							       enum block_id
1034  							       block_id)
1035  {
1036  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1037  
1038  	return (const struct dbg_block_chip *)
1039  	    p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
1040  	    block_id * MAX_CHIP_IDS + dev_data->chip_id;
1041  }
1042  
qed_get_dbg_reset_reg(struct qed_hwfn * p_hwfn,u8 reset_reg_id)1043  static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
1044  							 *p_hwfn,
1045  							 u8 reset_reg_id)
1046  {
1047  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1048  
1049  	return (const struct dbg_reset_reg *)
1050  	    p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
1051  	    reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
1052  }
1053  
1054  /* Reads the FW info structure for the specified Storm from the chip,
1055   * and writes it to the specified fw_info pointer.
1056   */
qed_read_storm_fw_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 storm_id,struct fw_info * fw_info)1057  static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
1058  				   struct qed_ptt *p_ptt,
1059  				   u8 storm_id, struct fw_info *fw_info)
1060  {
1061  	struct storm_defs *storm = &s_storm_defs[storm_id];
1062  	struct fw_info_location fw_info_location;
1063  	u32 addr, i, size, *dest;
1064  
1065  	memset(&fw_info_location, 0, sizeof(fw_info_location));
1066  	memset(fw_info, 0, sizeof(*fw_info));
1067  
1068  	/* Read first the address that points to fw_info location.
1069  	 * The address is located in the last line of the Storm RAM.
1070  	 */
1071  	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1072  	    DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
1073  	    sizeof(fw_info_location);
1074  
1075  	dest = (u32 *)&fw_info_location;
1076  	size = BYTES_TO_DWORDS(sizeof(fw_info_location));
1077  
1078  	for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
1079  		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1080  
1081  	/* Read FW version info from Storm RAM */
1082  	size = le32_to_cpu(fw_info_location.size);
1083  	if (!size || size > sizeof(*fw_info))
1084  		return;
1085  
1086  	addr = le32_to_cpu(fw_info_location.grc_addr);
1087  	dest = (u32 *)fw_info;
1088  	size = BYTES_TO_DWORDS(size);
1089  
1090  	for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
1091  		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1092  }
1093  
1094  /* Dumps the specified string to the specified buffer.
1095   * Returns the dumped size in bytes.
1096   */
qed_dump_str(char * dump_buf,bool dump,const char * str)1097  static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1098  {
1099  	if (dump)
1100  		strcpy(dump_buf, str);
1101  
1102  	return (u32)strlen(str) + 1;
1103  }
1104  
1105  /* Dumps zeros to align the specified buffer to dwords.
1106   * Returns the dumped size in bytes.
1107   */
qed_dump_align(char * dump_buf,bool dump,u32 byte_offset)1108  static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1109  {
1110  	u8 offset_in_dword, align_size;
1111  
1112  	offset_in_dword = (u8)(byte_offset & 0x3);
1113  	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1114  
1115  	if (dump && align_size)
1116  		memset(dump_buf, 0, align_size);
1117  
1118  	return align_size;
1119  }
1120  
1121  /* Writes the specified string param to the specified buffer.
1122   * Returns the dumped size in dwords.
1123   */
qed_dump_str_param(u32 * dump_buf,bool dump,const char * param_name,const char * param_val)1124  static u32 qed_dump_str_param(u32 *dump_buf,
1125  			      bool dump,
1126  			      const char *param_name, const char *param_val)
1127  {
1128  	char *char_buf = (char *)dump_buf;
1129  	u32 offset = 0;
1130  
1131  	/* Dump param name */
1132  	offset += qed_dump_str(char_buf + offset, dump, param_name);
1133  
1134  	/* Indicate a string param value */
1135  	if (dump)
1136  		*(char_buf + offset) = 1;
1137  	offset++;
1138  
1139  	/* Dump param value */
1140  	offset += qed_dump_str(char_buf + offset, dump, param_val);
1141  
1142  	/* Align buffer to next dword */
1143  	offset += qed_dump_align(char_buf + offset, dump, offset);
1144  
1145  	return BYTES_TO_DWORDS(offset);
1146  }
1147  
1148  /* Writes the specified numeric param to the specified buffer.
1149   * Returns the dumped size in dwords.
1150   */
qed_dump_num_param(u32 * dump_buf,bool dump,const char * param_name,u32 param_val)1151  static u32 qed_dump_num_param(u32 *dump_buf,
1152  			      bool dump, const char *param_name, u32 param_val)
1153  {
1154  	char *char_buf = (char *)dump_buf;
1155  	u32 offset = 0;
1156  
1157  	/* Dump param name */
1158  	offset += qed_dump_str(char_buf + offset, dump, param_name);
1159  
1160  	/* Indicate a numeric param value */
1161  	if (dump)
1162  		*(char_buf + offset) = 0;
1163  	offset++;
1164  
1165  	/* Align buffer to next dword */
1166  	offset += qed_dump_align(char_buf + offset, dump, offset);
1167  
1168  	/* Dump param value (and change offset from bytes to dwords) */
1169  	offset = BYTES_TO_DWORDS(offset);
1170  	if (dump)
1171  		*(dump_buf + offset) = param_val;
1172  	offset++;
1173  
1174  	return offset;
1175  }
1176  
1177  /* Reads the FW version and writes it as a param to the specified buffer.
1178   * Returns the dumped size in dwords.
1179   */
qed_dump_fw_ver_param(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)1180  static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1181  				 struct qed_ptt *p_ptt,
1182  				 u32 *dump_buf, bool dump)
1183  {
1184  	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1185  	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1186  	struct fw_info fw_info = { {0}, {0} };
1187  	u32 offset = 0;
1188  
1189  	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1190  		/* Read FW info from chip */
1191  		qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
1192  
1193  		/* Create FW version/image strings */
1194  		if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1195  			     "%d_%d_%d_%d", fw_info.ver.num.major,
1196  			     fw_info.ver.num.minor, fw_info.ver.num.rev,
1197  			     fw_info.ver.num.eng) < 0)
1198  			DP_NOTICE(p_hwfn,
1199  				  "Unexpected debug error: invalid FW version string\n");
1200  		switch (fw_info.ver.image_id) {
1201  		case FW_IMG_KUKU:
1202  			strcpy(fw_img_str, "kuku");
1203  			break;
1204  		case FW_IMG_MAIN:
1205  			strcpy(fw_img_str, "main");
1206  			break;
1207  		case FW_IMG_L2B:
1208  			strcpy(fw_img_str, "l2b");
1209  			break;
1210  		default:
1211  			strcpy(fw_img_str, "unknown");
1212  			break;
1213  		}
1214  	}
1215  
1216  	/* Dump FW version, image and timestamp */
1217  	offset += qed_dump_str_param(dump_buf + offset,
1218  				     dump, "fw-version", fw_ver_str);
1219  	offset += qed_dump_str_param(dump_buf + offset,
1220  				     dump, "fw-image", fw_img_str);
1221  	offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp",
1222  				     le32_to_cpu(fw_info.ver.timestamp));
1223  
1224  	return offset;
1225  }
1226  
1227  /* Reads the MFW version and writes it as a param to the specified buffer.
1228   * Returns the dumped size in dwords.
1229   */
qed_dump_mfw_ver_param(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)1230  static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1231  				  struct qed_ptt *p_ptt,
1232  				  u32 *dump_buf, bool dump)
1233  {
1234  	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1235  
1236  	if (dump &&
1237  	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1238  		u32 global_section_offsize, global_section_addr, mfw_ver;
1239  		u32 public_data_addr, global_section_offsize_addr;
1240  
1241  		/* Find MCP public data GRC address. Needs to be ORed with
1242  		 * MCP_REG_SCRATCH due to a HW bug.
1243  		 */
1244  		public_data_addr = qed_rd(p_hwfn,
1245  					  p_ptt,
1246  					  MISC_REG_SHARED_MEM_ADDR) |
1247  				   MCP_REG_SCRATCH;
1248  
1249  		/* Find MCP public global section offset */
1250  		global_section_offsize_addr = public_data_addr +
1251  					      offsetof(struct mcp_public_data,
1252  						       sections) +
1253  					      sizeof(offsize_t) * PUBLIC_GLOBAL;
1254  		global_section_offsize = qed_rd(p_hwfn, p_ptt,
1255  						global_section_offsize_addr);
1256  		global_section_addr =
1257  			MCP_REG_SCRATCH +
1258  			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
1259  
1260  		/* Read MFW version from MCP public global section */
1261  		mfw_ver = qed_rd(p_hwfn, p_ptt,
1262  				 global_section_addr +
1263  				 offsetof(struct public_global, mfw_ver));
1264  
1265  		/* Dump MFW version param */
1266  		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
1267  			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
1268  			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
1269  			DP_NOTICE(p_hwfn,
1270  				  "Unexpected debug error: invalid MFW version string\n");
1271  	}
1272  
1273  	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
1274  }
1275  
1276  /* Reads the chip revision from the chip and writes it as a param to the
1277   * specified buffer. Returns the dumped size in dwords.
1278   */
qed_dump_chip_revision_param(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)1279  static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
1280  					struct qed_ptt *p_ptt,
1281  					u32 *dump_buf, bool dump)
1282  {
1283  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1284  	char param_str[3] = "??";
1285  
1286  	if (dev_data->hw_type == HW_TYPE_ASIC) {
1287  		u32 chip_rev, chip_metal;
1288  
1289  		chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
1290  		chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
1291  
1292  		param_str[0] = 'a' + (u8)chip_rev;
1293  		param_str[1] = '0' + (u8)chip_metal;
1294  	}
1295  
1296  	return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
1297  }
1298  
1299  /* Writes a section header to the specified buffer.
1300   * Returns the dumped size in dwords.
1301   */
qed_dump_section_hdr(u32 * dump_buf,bool dump,const char * name,u32 num_params)1302  static u32 qed_dump_section_hdr(u32 *dump_buf,
1303  				bool dump, const char *name, u32 num_params)
1304  {
1305  	return qed_dump_num_param(dump_buf, dump, name, num_params);
1306  }
1307  
1308  /* Writes the common global params to the specified buffer.
1309   * Returns the dumped size in dwords.
1310   */
qed_dump_common_global_params(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 num_specific_global_params)1311  static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
1312  					 struct qed_ptt *p_ptt,
1313  					 u32 *dump_buf,
1314  					 bool dump,
1315  					 u8 num_specific_global_params)
1316  {
1317  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1318  	u32 offset = 0;
1319  	u8 num_params;
1320  
1321  	/* Dump global params section header */
1322  	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
1323  		(dev_data->chip_id == CHIP_BB ? 1 : 0);
1324  	offset += qed_dump_section_hdr(dump_buf + offset,
1325  				       dump, "global_params", num_params);
1326  
1327  	/* Store params */
1328  	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
1329  	offset += qed_dump_mfw_ver_param(p_hwfn,
1330  					 p_ptt, dump_buf + offset, dump);
1331  	offset += qed_dump_chip_revision_param(p_hwfn,
1332  					       p_ptt, dump_buf + offset, dump);
1333  	offset += qed_dump_num_param(dump_buf + offset,
1334  				     dump, "tools-version", TOOLS_VERSION);
1335  	offset += qed_dump_str_param(dump_buf + offset,
1336  				     dump,
1337  				     "chip",
1338  				     s_chip_defs[dev_data->chip_id].name);
1339  	offset += qed_dump_str_param(dump_buf + offset,
1340  				     dump,
1341  				     "platform",
1342  				     s_hw_type_defs[dev_data->hw_type].name);
1343  	offset += qed_dump_num_param(dump_buf + offset,
1344  				     dump, "pci-func", p_hwfn->abs_pf_id);
1345  	offset += qed_dump_num_param(dump_buf + offset,
1346  				     dump, "epoch", qed_get_epoch_time());
1347  	if (dev_data->chip_id == CHIP_BB)
1348  		offset += qed_dump_num_param(dump_buf + offset,
1349  					     dump, "path", QED_PATH_ID(p_hwfn));
1350  
1351  	return offset;
1352  }
1353  
1354  /* Writes the "last" section (including CRC) to the specified buffer at the
1355   * given offset. Returns the dumped size in dwords.
1356   */
qed_dump_last_section(u32 * dump_buf,u32 offset,bool dump)1357  static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
1358  {
1359  	u32 start_offset = offset;
1360  
1361  	/* Dump CRC section header */
1362  	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
1363  
1364  	/* Calculate CRC32 and add it to the dword after the "last" section */
1365  	if (dump)
1366  		*(dump_buf + offset) = ~crc32(0xffffffff,
1367  					      (u8 *)dump_buf,
1368  					      DWORDS_TO_BYTES(offset));
1369  
1370  	offset++;
1371  
1372  	return offset - start_offset;
1373  }
1374  
1375  /* Update blocks reset state  */
qed_update_blocks_reset_state(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1376  static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
1377  					  struct qed_ptt *p_ptt)
1378  {
1379  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1380  	u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1381  	u8 rst_reg_id;
1382  	u32 blk_id;
1383  
1384  	/* Read reset registers */
1385  	for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
1386  		const struct dbg_reset_reg *rst_reg;
1387  		bool rst_reg_removed;
1388  		u32 rst_reg_addr;
1389  
1390  		rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
1391  		rst_reg_removed = GET_FIELD(rst_reg->data,
1392  					    DBG_RESET_REG_IS_REMOVED);
1393  		rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
1394  							 DBG_RESET_REG_ADDR));
1395  
1396  		if (!rst_reg_removed)
1397  			reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
1398  						     rst_reg_addr);
1399  	}
1400  
1401  	/* Check if blocks are in reset */
1402  	for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
1403  		const struct dbg_block_chip *blk;
1404  		bool has_rst_reg;
1405  		bool is_removed;
1406  
1407  		blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
1408  		is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1409  		has_rst_reg = GET_FIELD(blk->flags,
1410  					DBG_BLOCK_CHIP_HAS_RESET_REG);
1411  
1412  		if (!is_removed && has_rst_reg)
1413  			dev_data->block_in_reset[blk_id] =
1414  			    !(reg_val[blk->reset_reg_id] &
1415  			      BIT(blk->reset_reg_bit_offset));
1416  	}
1417  }
1418  
1419  /* is_mode_match recursive function */
qed_is_mode_match_rec(struct qed_hwfn * p_hwfn,u16 * modes_buf_offset,u8 rec_depth)1420  static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
1421  				  u16 *modes_buf_offset, u8 rec_depth)
1422  {
1423  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1424  	u8 *dbg_array;
1425  	bool arg1, arg2;
1426  	u8 tree_val;
1427  
1428  	if (rec_depth > MAX_RECURSION_DEPTH) {
1429  		DP_NOTICE(p_hwfn,
1430  			  "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
1431  		return false;
1432  	}
1433  
1434  	/* Get next element from modes tree buffer */
1435  	dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
1436  	tree_val = dbg_array[(*modes_buf_offset)++];
1437  
1438  	switch (tree_val) {
1439  	case INIT_MODE_OP_NOT:
1440  		return !qed_is_mode_match_rec(p_hwfn,
1441  					      modes_buf_offset, rec_depth + 1);
1442  	case INIT_MODE_OP_OR:
1443  	case INIT_MODE_OP_AND:
1444  		arg1 = qed_is_mode_match_rec(p_hwfn,
1445  					     modes_buf_offset, rec_depth + 1);
1446  		arg2 = qed_is_mode_match_rec(p_hwfn,
1447  					     modes_buf_offset, rec_depth + 1);
1448  		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
1449  							arg2) : (arg1 && arg2);
1450  	default:
1451  		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
1452  	}
1453  }
1454  
1455  /* Returns true if the mode (specified using modes_buf_offset) is enabled */
qed_is_mode_match(struct qed_hwfn * p_hwfn,u16 * modes_buf_offset)1456  static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
1457  {
1458  	return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
1459  }
1460  
1461  /* Enable / disable the Debug block */
qed_bus_enable_dbg_block(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool enable)1462  static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
1463  				     struct qed_ptt *p_ptt, bool enable)
1464  {
1465  	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
1466  }
1467  
1468  /* Resets the Debug block */
qed_bus_reset_dbg_block(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1469  static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
1470  				    struct qed_ptt *p_ptt)
1471  {
1472  	u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
1473  	const struct dbg_reset_reg *reset_reg;
1474  	const struct dbg_block_chip *block;
1475  
1476  	block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
1477  	reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
1478  	reset_reg_addr =
1479  	    DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
1480  
1481  	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
1482  	new_reset_reg_val =
1483  	    old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
1484  
1485  	qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
1486  	qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
1487  }
1488  
1489  /* Enable / disable Debug Bus clients according to the specified mask
1490   * (1 = enable, 0 = disable).
1491   */
qed_bus_enable_clients(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 client_mask)1492  static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
1493  				   struct qed_ptt *p_ptt, u32 client_mask)
1494  {
1495  	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
1496  }
1497  
qed_bus_config_dbg_line(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum block_id block_id,u8 line_id,u8 enable_mask,u8 right_shift,u8 force_valid_mask,u8 force_frame_mask)1498  static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
1499  				    struct qed_ptt *p_ptt,
1500  				    enum block_id block_id,
1501  				    u8 line_id,
1502  				    u8 enable_mask,
1503  				    u8 right_shift,
1504  				    u8 force_valid_mask, u8 force_frame_mask)
1505  {
1506  	const struct dbg_block_chip *block =
1507  		qed_get_dbg_block_per_chip(p_hwfn, block_id);
1508  
1509  	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
1510  	       line_id);
1511  	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
1512  	       enable_mask);
1513  	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
1514  	       right_shift);
1515  	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
1516  	       force_valid_mask);
1517  	qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
1518  	       force_frame_mask);
1519  }
1520  
1521  /* Disable debug bus in all blocks */
qed_bus_disable_blocks(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1522  static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
1523  				   struct qed_ptt *p_ptt)
1524  {
1525  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1526  	u32 block_id;
1527  
1528  	/* Disable all blocks */
1529  	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
1530  		const struct dbg_block_chip *block_per_chip =
1531  		    qed_get_dbg_block_per_chip(p_hwfn,
1532  					       (enum block_id)block_id);
1533  
1534  		if (GET_FIELD(block_per_chip->flags,
1535  			      DBG_BLOCK_CHIP_IS_REMOVED) ||
1536  		    dev_data->block_in_reset[block_id])
1537  			continue;
1538  
1539  		/* Disable debug bus */
1540  		if (GET_FIELD(block_per_chip->flags,
1541  			      DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
1542  			u32 dbg_en_addr =
1543  				block_per_chip->dbg_dword_enable_reg_addr;
1544  			u16 modes_buf_offset =
1545  			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
1546  				      DBG_MODE_HDR_MODES_BUF_OFFSET);
1547  			bool eval_mode =
1548  			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
1549  				      DBG_MODE_HDR_EVAL_MODE) > 0;
1550  
1551  			if (!eval_mode ||
1552  			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
1553  				qed_wr(p_hwfn, p_ptt,
1554  				       DWORDS_TO_BYTES(dbg_en_addr),
1555  				       0);
1556  		}
1557  	}
1558  }
1559  
1560  /* Returns true if the specified entity (indicated by GRC param) should be
1561   * included in the dump, false otherwise.
1562   */
qed_grc_is_included(struct qed_hwfn * p_hwfn,enum dbg_grc_params grc_param)1563  static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
1564  				enum dbg_grc_params grc_param)
1565  {
1566  	return qed_grc_get_param(p_hwfn, grc_param) > 0;
1567  }
1568  
1569  /* Returns the storm_id that matches the specified Storm letter,
1570   * or MAX_DBG_STORMS if invalid storm letter.
1571   */
qed_get_id_from_letter(char storm_letter)1572  static enum dbg_storms qed_get_id_from_letter(char storm_letter)
1573  {
1574  	u8 storm_id;
1575  
1576  	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
1577  		if (s_storm_defs[storm_id].letter == storm_letter)
1578  			return (enum dbg_storms)storm_id;
1579  
1580  	return MAX_DBG_STORMS;
1581  }
1582  
1583  /* Returns true of the specified Storm should be included in the dump, false
1584   * otherwise.
1585   */
qed_grc_is_storm_included(struct qed_hwfn * p_hwfn,enum dbg_storms storm)1586  static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
1587  				      enum dbg_storms storm)
1588  {
1589  	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
1590  }
1591  
1592  /* Returns true if the specified memory should be included in the dump, false
1593   * otherwise.
1594   */
qed_grc_is_mem_included(struct qed_hwfn * p_hwfn,enum block_id block_id,u8 mem_group_id)1595  static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
1596  				    enum block_id block_id, u8 mem_group_id)
1597  {
1598  	const struct dbg_block *block;
1599  	u8 i;
1600  
1601  	block = get_dbg_block(p_hwfn, block_id);
1602  
1603  	/* If the block is associated with a Storm, check Storm match */
1604  	if (block->associated_storm_letter) {
1605  		enum dbg_storms associated_storm_id =
1606  		    qed_get_id_from_letter(block->associated_storm_letter);
1607  
1608  		if (associated_storm_id == MAX_DBG_STORMS ||
1609  		    !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
1610  			return false;
1611  	}
1612  
1613  	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
1614  		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
1615  
1616  		if (mem_group_id == big_ram->mem_group_id ||
1617  		    mem_group_id == big_ram->ram_mem_group_id)
1618  			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
1619  	}
1620  
1621  	switch (mem_group_id) {
1622  	case MEM_GROUP_PXP_ILT:
1623  	case MEM_GROUP_PXP_MEM:
1624  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
1625  	case MEM_GROUP_RAM:
1626  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
1627  	case MEM_GROUP_PBUF:
1628  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
1629  	case MEM_GROUP_CAU_MEM:
1630  	case MEM_GROUP_CAU_SB:
1631  	case MEM_GROUP_CAU_PI:
1632  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
1633  	case MEM_GROUP_CAU_MEM_EXT:
1634  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
1635  	case MEM_GROUP_QM_MEM:
1636  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
1637  	case MEM_GROUP_CFC_MEM:
1638  	case MEM_GROUP_CONN_CFC_MEM:
1639  	case MEM_GROUP_TASK_CFC_MEM:
1640  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
1641  		       qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
1642  	case MEM_GROUP_DORQ_MEM:
1643  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
1644  	case MEM_GROUP_IGU_MEM:
1645  	case MEM_GROUP_IGU_MSIX:
1646  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
1647  	case MEM_GROUP_MULD_MEM:
1648  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
1649  	case MEM_GROUP_PRS_MEM:
1650  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
1651  	case MEM_GROUP_DMAE_MEM:
1652  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
1653  	case MEM_GROUP_TM_MEM:
1654  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
1655  	case MEM_GROUP_SDM_MEM:
1656  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
1657  	case MEM_GROUP_TDIF_CTX:
1658  	case MEM_GROUP_RDIF_CTX:
1659  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
1660  	case MEM_GROUP_CM_MEM:
1661  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
1662  	case MEM_GROUP_IOR:
1663  		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
1664  	default:
1665  		return true;
1666  	}
1667  }
1668  
1669  /* Stalls all Storms */
qed_grc_stall_storms(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool stall)1670  static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
1671  				 struct qed_ptt *p_ptt, bool stall)
1672  {
1673  	u32 reg_addr;
1674  	u8 storm_id;
1675  
1676  	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
1677  		if (!qed_grc_is_storm_included(p_hwfn,
1678  					       (enum dbg_storms)storm_id))
1679  			continue;
1680  
1681  		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
1682  		    SEM_FAST_REG_STALL_0;
1683  		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
1684  	}
1685  
1686  	msleep(STALL_DELAY_MS);
1687  }
1688  
1689  /* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
1690   * taken out of reset.
1691   */
qed_grc_unreset_blocks(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool rbc_only)1692  static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
1693  				   struct qed_ptt *p_ptt, bool rbc_only)
1694  {
1695  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1696  	u8 chip_id = dev_data->chip_id;
1697  	u32 i;
1698  
1699  	/* Take RBCs out of reset */
1700  	for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
1701  		if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
1702  			qed_wr(p_hwfn,
1703  			       p_ptt,
1704  			       s_rbc_reset_defs[i].reset_reg_addr +
1705  			       RESET_REG_UNRESET_OFFSET,
1706  			       s_rbc_reset_defs[i].reset_val[chip_id]);
1707  
1708  	if (!rbc_only) {
1709  		u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1710  		u8 reset_reg_id;
1711  		u32 block_id;
1712  
1713  		/* Fill reset regs values */
1714  		for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1715  			bool is_removed, has_reset_reg, unreset_before_dump;
1716  			const struct dbg_block_chip *block;
1717  
1718  			block = qed_get_dbg_block_per_chip(p_hwfn,
1719  							   (enum block_id)
1720  							   block_id);
1721  			is_removed =
1722  			    GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1723  			has_reset_reg =
1724  			    GET_FIELD(block->flags,
1725  				      DBG_BLOCK_CHIP_HAS_RESET_REG);
1726  			unreset_before_dump =
1727  			    GET_FIELD(block->flags,
1728  				      DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
1729  
1730  			if (!is_removed && has_reset_reg && unreset_before_dump)
1731  				reg_val[block->reset_reg_id] |=
1732  				    BIT(block->reset_reg_bit_offset);
1733  		}
1734  
1735  		/* Write reset registers */
1736  		for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
1737  		     reset_reg_id++) {
1738  			const struct dbg_reset_reg *reset_reg;
1739  			u32 reset_reg_addr;
1740  
1741  			reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
1742  
1743  			if (GET_FIELD
1744  			    (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
1745  				continue;
1746  
1747  			if (reg_val[reset_reg_id]) {
1748  				reset_reg_addr =
1749  				    GET_FIELD(reset_reg->data,
1750  					      DBG_RESET_REG_ADDR);
1751  				qed_wr(p_hwfn,
1752  				       p_ptt,
1753  				       DWORDS_TO_BYTES(reset_reg_addr) +
1754  				       RESET_REG_UNRESET_OFFSET,
1755  				       reg_val[reset_reg_id]);
1756  			}
1757  		}
1758  	}
1759  }
1760  
1761  /* Returns the attention block data of the specified block */
1762  static const struct dbg_attn_block_type_data *
qed_get_block_attn_data(struct qed_hwfn * p_hwfn,enum block_id block_id,enum dbg_attn_type attn_type)1763  qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
1764  			enum block_id block_id, enum dbg_attn_type attn_type)
1765  {
1766  	const struct dbg_attn_block *base_attn_block_arr =
1767  	    (const struct dbg_attn_block *)
1768  	    p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
1769  
1770  	return &base_attn_block_arr[block_id].per_type_data[attn_type];
1771  }
1772  
1773  /* Returns the attention registers of the specified block */
1774  static const struct dbg_attn_reg *
qed_get_block_attn_regs(struct qed_hwfn * p_hwfn,enum block_id block_id,enum dbg_attn_type attn_type,u8 * num_attn_regs)1775  qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
1776  			enum block_id block_id, enum dbg_attn_type attn_type,
1777  			u8 *num_attn_regs)
1778  {
1779  	const struct dbg_attn_block_type_data *block_type_data =
1780  	    qed_get_block_attn_data(p_hwfn, block_id, attn_type);
1781  
1782  	*num_attn_regs = block_type_data->num_regs;
1783  
1784  	return (const struct dbg_attn_reg *)
1785  		p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
1786  		block_type_data->regs_offset;
1787  }
1788  
1789  /* For each block, clear the status of all parities */
qed_grc_clear_all_prty(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1790  static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
1791  				   struct qed_ptt *p_ptt)
1792  {
1793  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1794  	const struct dbg_attn_reg *attn_reg_arr;
1795  	u32 block_id, sts_clr_address;
1796  	u8 reg_idx, num_attn_regs;
1797  
1798  	for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1799  		if (dev_data->block_in_reset[block_id])
1800  			continue;
1801  
1802  		attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
1803  						       (enum block_id)block_id,
1804  						       ATTN_TYPE_PARITY,
1805  						       &num_attn_regs);
1806  
1807  		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
1808  			const struct dbg_attn_reg *reg_data =
1809  				&attn_reg_arr[reg_idx];
1810  			u16 modes_buf_offset;
1811  			bool eval_mode;
1812  
1813  			/* Check mode */
1814  			eval_mode = GET_FIELD(reg_data->mode.data,
1815  					      DBG_MODE_HDR_EVAL_MODE) > 0;
1816  			modes_buf_offset =
1817  				GET_FIELD(reg_data->mode.data,
1818  					  DBG_MODE_HDR_MODES_BUF_OFFSET);
1819  
1820  			sts_clr_address = reg_data->sts_clr_address;
1821  			/* If Mode match: clear parity status */
1822  			if (!eval_mode ||
1823  			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
1824  				qed_rd(p_hwfn, p_ptt,
1825  				       DWORDS_TO_BYTES(sts_clr_address));
1826  		}
1827  	}
1828  }
1829  
1830  /* Finds the meta data image in NVRAM */
qed_find_nvram_image(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 image_type,u32 * nvram_offset_bytes,u32 * nvram_size_bytes,bool b_can_sleep)1831  static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
1832  					    struct qed_ptt *p_ptt,
1833  					    u32 image_type,
1834  					    u32 *nvram_offset_bytes,
1835  					    u32 *nvram_size_bytes,
1836  					    bool b_can_sleep)
1837  {
1838  	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
1839  	struct mcp_file_att file_att;
1840  	int nvm_result;
1841  
1842  	/* Call NVRAM get file command */
1843  	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
1844  					p_ptt,
1845  					DRV_MSG_CODE_NVM_GET_FILE_ATT,
1846  					image_type,
1847  					&ret_mcp_resp,
1848  					&ret_mcp_param,
1849  					&ret_txn_size,
1850  					(u32 *)&file_att,
1851  					b_can_sleep);
1852  
1853  	/* Check response */
1854  	if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) !=
1855  	    FW_MSG_CODE_NVM_OK)
1856  		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
1857  
1858  	/* Update return values */
1859  	*nvram_offset_bytes = file_att.nvm_start_addr;
1860  	*nvram_size_bytes = file_att.len;
1861  
1862  	DP_VERBOSE(p_hwfn,
1863  		   QED_MSG_DEBUG,
1864  		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
1865  		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
1866  
1867  	/* Check alignment */
1868  	if (*nvram_size_bytes & 0x3)
1869  		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
1870  
1871  	return DBG_STATUS_OK;
1872  }
1873  
1874  /* Reads data from NVRAM */
qed_nvram_read(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 nvram_offset_bytes,u32 nvram_size_bytes,u32 * ret_buf,bool b_can_sleep)1875  static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
1876  				      struct qed_ptt *p_ptt,
1877  				      u32 nvram_offset_bytes,
1878  				      u32 nvram_size_bytes,
1879  				      u32 *ret_buf,
1880  				      bool b_can_sleep)
1881  {
1882  	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
1883  	s32 bytes_left = nvram_size_bytes;
1884  	u32 read_offset = 0, param = 0;
1885  
1886  	DP_VERBOSE(p_hwfn,
1887  		   QED_MSG_DEBUG,
1888  		   "nvram_read: reading image of size %d bytes from NVRAM\n",
1889  		   nvram_size_bytes);
1890  
1891  	do {
1892  		bytes_to_copy =
1893  		    (bytes_left >
1894  		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
1895  
1896  		/* Call NVRAM read command */
1897  		SET_MFW_FIELD(param,
1898  			      DRV_MB_PARAM_NVM_OFFSET,
1899  			      nvram_offset_bytes + read_offset);
1900  		SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
1901  		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
1902  				       DRV_MSG_CODE_NVM_READ_NVRAM, param,
1903  				       &ret_mcp_resp,
1904  				       &ret_mcp_param, &ret_read_size,
1905  				       (u32 *)((u8 *)ret_buf + read_offset),
1906  				       b_can_sleep))
1907  			return DBG_STATUS_NVRAM_READ_FAILED;
1908  
1909  		/* Check response */
1910  		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
1911  			return DBG_STATUS_NVRAM_READ_FAILED;
1912  
1913  		/* Update read offset */
1914  		read_offset += ret_read_size;
1915  		bytes_left -= ret_read_size;
1916  	} while (bytes_left > 0);
1917  
1918  	return DBG_STATUS_OK;
1919  }
1920  
1921  /* Dumps GRC registers section header. Returns the dumped size in dwords.
1922   * the following parameters are dumped:
1923   * - count: no. of dumped entries
1924   * - split_type: split type
1925   * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
1926   * - reg_type_name: register type name (dumped only if reg_type_name != NULL)
1927   */
qed_grc_dump_regs_hdr(u32 * dump_buf,bool dump,u32 num_reg_entries,enum init_split_types split_type,u8 split_id,const char * reg_type_name)1928  static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
1929  				 bool dump,
1930  				 u32 num_reg_entries,
1931  				 enum init_split_types split_type,
1932  				 u8 split_id, const char *reg_type_name)
1933  {
1934  	u8 num_params = 2 +
1935  	    (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
1936  	u32 offset = 0;
1937  
1938  	offset += qed_dump_section_hdr(dump_buf + offset,
1939  				       dump, "grc_regs", num_params);
1940  	offset += qed_dump_num_param(dump_buf + offset,
1941  				     dump, "count", num_reg_entries);
1942  	offset += qed_dump_str_param(dump_buf + offset,
1943  				     dump, "split",
1944  				     s_split_type_defs[split_type].name);
1945  	if (split_type != SPLIT_TYPE_NONE)
1946  		offset += qed_dump_num_param(dump_buf + offset,
1947  					     dump, "id", split_id);
1948  	if (reg_type_name)
1949  		offset += qed_dump_str_param(dump_buf + offset,
1950  					     dump, "type", reg_type_name);
1951  
1952  	return offset;
1953  }
1954  
1955  /* Reads the specified registers into the specified buffer.
1956   * The addr and len arguments are specified in dwords.
1957   */
qed_read_regs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf,u32 addr,u32 len)1958  void qed_read_regs(struct qed_hwfn *p_hwfn,
1959  		   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
1960  {
1961  	u32 i;
1962  
1963  	for (i = 0; i < len; i++)
1964  		buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
1965  }
1966  
1967  /* Dumps the GRC registers in the specified address range.
1968   * Returns the dumped size in dwords.
1969   * The addr and len arguments are specified in dwords.
1970   */
qed_grc_dump_addr_range(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 len,bool wide_bus,enum init_split_types split_type,u8 split_id)1971  static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
1972  				   struct qed_ptt *p_ptt,
1973  				   u32 *dump_buf,
1974  				   bool dump, u32 addr, u32 len, bool wide_bus,
1975  				   enum init_split_types split_type,
1976  				   u8 split_id)
1977  {
1978  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1979  	u8 port_id = 0, pf_id = 0, vf_id = 0;
1980  	bool read_using_dmae = false;
1981  	u32 thresh;
1982  	u16 fid;
1983  
1984  	if (!dump)
1985  		return len;
1986  
1987  	switch (split_type) {
1988  	case SPLIT_TYPE_PORT:
1989  		port_id = split_id;
1990  		break;
1991  	case SPLIT_TYPE_PF:
1992  		pf_id = split_id;
1993  		break;
1994  	case SPLIT_TYPE_PORT_PF:
1995  		port_id = split_id / dev_data->num_pfs_per_port;
1996  		pf_id = port_id + dev_data->num_ports *
1997  		    (split_id % dev_data->num_pfs_per_port);
1998  		break;
1999  	case SPLIT_TYPE_VF:
2000  		vf_id = split_id;
2001  		break;
2002  	default:
2003  		break;
2004  	}
2005  
2006  	/* Try reading using DMAE */
2007  	if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
2008  	    (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
2009  	     (PROTECT_WIDE_BUS && wide_bus))) {
2010  		struct qed_dmae_params dmae_params;
2011  
2012  		/* Set DMAE params */
2013  		memset(&dmae_params, 0, sizeof(dmae_params));
2014  		SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
2015  		switch (split_type) {
2016  		case SPLIT_TYPE_PORT:
2017  			SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
2018  				  1);
2019  			dmae_params.port_id = port_id;
2020  			break;
2021  		case SPLIT_TYPE_PF:
2022  			SET_FIELD(dmae_params.flags,
2023  				  QED_DMAE_PARAMS_SRC_PF_VALID, 1);
2024  			dmae_params.src_pfid = pf_id;
2025  			break;
2026  		case SPLIT_TYPE_PORT_PF:
2027  			SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
2028  				  1);
2029  			SET_FIELD(dmae_params.flags,
2030  				  QED_DMAE_PARAMS_SRC_PF_VALID, 1);
2031  			dmae_params.port_id = port_id;
2032  			dmae_params.src_pfid = pf_id;
2033  			break;
2034  		default:
2035  			break;
2036  		}
2037  
2038  		/* Execute DMAE command */
2039  		read_using_dmae = !qed_dmae_grc2host(p_hwfn,
2040  						     p_ptt,
2041  						     DWORDS_TO_BYTES(addr),
2042  						     (u64)(uintptr_t)(dump_buf),
2043  						     len, &dmae_params);
2044  		if (!read_using_dmae) {
2045  			dev_data->use_dmae = 0;
2046  			DP_VERBOSE(p_hwfn,
2047  				   QED_MSG_DEBUG,
2048  				   "Failed reading from chip using DMAE, using GRC instead\n");
2049  		}
2050  	}
2051  
2052  	if (read_using_dmae)
2053  		goto print_log;
2054  
2055  	/* If not read using DMAE, read using GRC */
2056  
2057  	/* Set pretend */
2058  	if (split_type != dev_data->pretend.split_type ||
2059  	    split_id != dev_data->pretend.split_id) {
2060  		switch (split_type) {
2061  		case SPLIT_TYPE_PORT:
2062  			qed_port_pretend(p_hwfn, p_ptt, port_id);
2063  			break;
2064  		case SPLIT_TYPE_PF:
2065  			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2066  					  pf_id);
2067  			qed_fid_pretend(p_hwfn, p_ptt, fid);
2068  			break;
2069  		case SPLIT_TYPE_PORT_PF:
2070  			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2071  					  pf_id);
2072  			qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
2073  			break;
2074  		case SPLIT_TYPE_VF:
2075  			fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
2076  			      | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
2077  					  vf_id);
2078  			qed_fid_pretend(p_hwfn, p_ptt, fid);
2079  			break;
2080  		default:
2081  			break;
2082  		}
2083  
2084  		dev_data->pretend.split_type = (u8)split_type;
2085  		dev_data->pretend.split_id = split_id;
2086  	}
2087  
2088  	/* Read registers using GRC */
2089  	qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
2090  
2091  print_log:
2092  	/* Print log */
2093  	dev_data->num_regs_read += len;
2094  	thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
2095  	if ((dev_data->num_regs_read / thresh) >
2096  	    ((dev_data->num_regs_read - len) / thresh))
2097  		DP_VERBOSE(p_hwfn,
2098  			   QED_MSG_DEBUG,
2099  			   "Dumped %d registers...\n", dev_data->num_regs_read);
2100  
2101  	return len;
2102  }
2103  
2104  /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2105   * The addr and len arguments are specified in dwords.
2106   */
qed_grc_dump_reg_entry_hdr(u32 * dump_buf,bool dump,u32 addr,u32 len)2107  static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2108  				      bool dump, u32 addr, u32 len)
2109  {
2110  	if (dump)
2111  		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2112  
2113  	return 1;
2114  }
2115  
2116  /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2117   * The addr and len arguments are specified in dwords.
2118   */
qed_grc_dump_reg_entry(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 len,bool wide_bus,enum init_split_types split_type,u8 split_id)2119  static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2120  				  struct qed_ptt *p_ptt,
2121  				  u32 *dump_buf,
2122  				  bool dump, u32 addr, u32 len, bool wide_bus,
2123  				  enum init_split_types split_type, u8 split_id)
2124  {
2125  	u32 offset = 0;
2126  
2127  	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2128  	offset += qed_grc_dump_addr_range(p_hwfn,
2129  					  p_ptt,
2130  					  dump_buf + offset,
2131  					  dump, addr, len, wide_bus,
2132  					  split_type, split_id);
2133  
2134  	return offset;
2135  }
2136  
2137  /* Dumps GRC registers sequence with skip cycle.
2138   * Returns the dumped size in dwords.
2139   * - addr:	start GRC address in dwords
2140   * - total_len:	total no. of dwords to dump
2141   * - read_len:	no. consecutive dwords to read
2142   * - skip_len:	no. of dwords to skip (and fill with zeros)
2143   */
qed_grc_dump_reg_entry_skip(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 addr,u32 total_len,u32 read_len,u32 skip_len)2144  static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2145  				       struct qed_ptt *p_ptt,
2146  				       u32 *dump_buf,
2147  				       bool dump,
2148  				       u32 addr,
2149  				       u32 total_len,
2150  				       u32 read_len, u32 skip_len)
2151  {
2152  	u32 offset = 0, reg_offset = 0;
2153  
2154  	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2155  
2156  	if (!dump)
2157  		return offset + total_len;
2158  
2159  	while (reg_offset < total_len) {
2160  		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2161  
2162  		offset += qed_grc_dump_addr_range(p_hwfn,
2163  						  p_ptt,
2164  						  dump_buf + offset,
2165  						  dump,  addr, curr_len, false,
2166  						  SPLIT_TYPE_NONE, 0);
2167  		reg_offset += curr_len;
2168  		addr += curr_len;
2169  
2170  		if (reg_offset < total_len) {
2171  			curr_len = min_t(u32, skip_len, total_len - skip_len);
2172  			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2173  			offset += curr_len;
2174  			reg_offset += curr_len;
2175  			addr += curr_len;
2176  		}
2177  	}
2178  
2179  	return offset;
2180  }
2181  
2182  /* Dumps GRC registers entries. Returns the dumped size in dwords. */
qed_grc_dump_regs_entries(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct virt_mem_desc input_regs_arr,u32 * dump_buf,bool dump,enum init_split_types split_type,u8 split_id,bool block_enable[MAX_BLOCK_ID],u32 * num_dumped_reg_entries)2183  static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2184  				     struct qed_ptt *p_ptt,
2185  				     struct virt_mem_desc input_regs_arr,
2186  				     u32 *dump_buf,
2187  				     bool dump,
2188  				     enum init_split_types split_type,
2189  				     u8 split_id,
2190  				     bool block_enable[MAX_BLOCK_ID],
2191  				     u32 *num_dumped_reg_entries)
2192  {
2193  	u32 i, offset = 0, input_offset = 0;
2194  	bool mode_match = true;
2195  
2196  	*num_dumped_reg_entries = 0;
2197  
2198  	while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
2199  		const struct dbg_dump_cond_hdr *cond_hdr =
2200  		    (const struct dbg_dump_cond_hdr *)
2201  		    input_regs_arr.ptr + input_offset++;
2202  		u16 modes_buf_offset;
2203  		bool eval_mode;
2204  
2205  		/* Check mode/block */
2206  		eval_mode = GET_FIELD(cond_hdr->mode.data,
2207  				      DBG_MODE_HDR_EVAL_MODE) > 0;
2208  		if (eval_mode) {
2209  			modes_buf_offset =
2210  				GET_FIELD(cond_hdr->mode.data,
2211  					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2212  			mode_match = qed_is_mode_match(p_hwfn,
2213  						       &modes_buf_offset);
2214  		}
2215  
2216  		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2217  			input_offset += cond_hdr->data_size;
2218  			continue;
2219  		}
2220  
2221  		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2222  			const struct dbg_dump_reg *reg =
2223  			    (const struct dbg_dump_reg *)
2224  			    input_regs_arr.ptr + input_offset;
2225  			u32 addr, len;
2226  			bool wide_bus;
2227  
2228  			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2229  			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2230  			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2231  			offset += qed_grc_dump_reg_entry(p_hwfn,
2232  							 p_ptt,
2233  							 dump_buf + offset,
2234  							 dump,
2235  							 addr,
2236  							 len,
2237  							 wide_bus,
2238  							 split_type, split_id);
2239  			(*num_dumped_reg_entries)++;
2240  		}
2241  	}
2242  
2243  	return offset;
2244  }
2245  
2246  /* Dumps GRC registers entries. Returns the dumped size in dwords. */
qed_grc_dump_split_data(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct virt_mem_desc input_regs_arr,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],enum init_split_types split_type,u8 split_id,const char * reg_type_name)2247  static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2248  				   struct qed_ptt *p_ptt,
2249  				   struct virt_mem_desc input_regs_arr,
2250  				   u32 *dump_buf,
2251  				   bool dump,
2252  				   bool block_enable[MAX_BLOCK_ID],
2253  				   enum init_split_types split_type,
2254  				   u8 split_id, const char *reg_type_name)
2255  {
2256  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2257  	enum init_split_types hdr_split_type = split_type;
2258  	u32 num_dumped_reg_entries, offset;
2259  	u8 hdr_split_id = split_id;
2260  
2261  	/* In PORT_PF split type, print a port split header */
2262  	if (split_type == SPLIT_TYPE_PORT_PF) {
2263  		hdr_split_type = SPLIT_TYPE_PORT;
2264  		hdr_split_id = split_id / dev_data->num_pfs_per_port;
2265  	}
2266  
2267  	/* Calculate register dump header size (and skip it for now) */
2268  	offset = qed_grc_dump_regs_hdr(dump_buf,
2269  				       false,
2270  				       0,
2271  				       hdr_split_type,
2272  				       hdr_split_id, reg_type_name);
2273  
2274  	/* Dump registers */
2275  	offset += qed_grc_dump_regs_entries(p_hwfn,
2276  					    p_ptt,
2277  					    input_regs_arr,
2278  					    dump_buf + offset,
2279  					    dump,
2280  					    split_type,
2281  					    split_id,
2282  					    block_enable,
2283  					    &num_dumped_reg_entries);
2284  
2285  	/* Write register dump header */
2286  	if (dump && num_dumped_reg_entries > 0)
2287  		qed_grc_dump_regs_hdr(dump_buf,
2288  				      dump,
2289  				      num_dumped_reg_entries,
2290  				      hdr_split_type,
2291  				      hdr_split_id, reg_type_name);
2292  
2293  	return num_dumped_reg_entries > 0 ? offset : 0;
2294  }
2295  
2296  /* Dumps registers according to the input registers array. Returns the dumped
2297   * size in dwords.
2298   */
qed_grc_dump_registers(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,bool block_enable[MAX_BLOCK_ID],const char * reg_type_name)2299  static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2300  				  struct qed_ptt *p_ptt,
2301  				  u32 *dump_buf,
2302  				  bool dump,
2303  				  bool block_enable[MAX_BLOCK_ID],
2304  				  const char *reg_type_name)
2305  {
2306  	struct virt_mem_desc *dbg_buf =
2307  	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
2308  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2309  	u32 offset = 0, input_offset = 0;
2310  
2311  	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2312  		const struct dbg_dump_split_hdr *split_hdr;
2313  		struct virt_mem_desc curr_input_regs_arr;
2314  		enum init_split_types split_type;
2315  		u16 split_count = 0;
2316  		u32 split_data_size;
2317  		u8 split_id;
2318  
2319  		split_hdr =
2320  		    (const struct dbg_dump_split_hdr *)
2321  		    dbg_buf->ptr + input_offset++;
2322  		split_type =
2323  		    GET_FIELD(split_hdr->hdr,
2324  			      DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2325  		split_data_size = GET_FIELD(split_hdr->hdr,
2326  					    DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2327  		curr_input_regs_arr.ptr =
2328  		    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
2329  		    input_offset;
2330  		curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
2331  
2332  		switch (split_type) {
2333  		case SPLIT_TYPE_NONE:
2334  			split_count = 1;
2335  			break;
2336  		case SPLIT_TYPE_PORT:
2337  			split_count = dev_data->num_ports;
2338  			break;
2339  		case SPLIT_TYPE_PF:
2340  		case SPLIT_TYPE_PORT_PF:
2341  			split_count = dev_data->num_ports *
2342  			    dev_data->num_pfs_per_port;
2343  			break;
2344  		case SPLIT_TYPE_VF:
2345  			split_count = dev_data->num_vfs;
2346  			break;
2347  		default:
2348  			return 0;
2349  		}
2350  
2351  		for (split_id = 0; split_id < split_count; split_id++)
2352  			offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2353  							  curr_input_regs_arr,
2354  							  dump_buf + offset,
2355  							  dump, block_enable,
2356  							  split_type,
2357  							  split_id,
2358  							  reg_type_name);
2359  
2360  		input_offset += split_data_size;
2361  	}
2362  
2363  	/* Cancel pretends (pretend to original PF) */
2364  	if (dump) {
2365  		qed_fid_pretend(p_hwfn, p_ptt,
2366  				FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2367  					    p_hwfn->rel_pf_id));
2368  		dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2369  		dev_data->pretend.split_id = 0;
2370  	}
2371  
2372  	return offset;
2373  }
2374  
2375  /* Dump reset registers. Returns the dumped size in dwords. */
qed_grc_dump_reset_regs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2376  static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2377  				   struct qed_ptt *p_ptt,
2378  				   u32 *dump_buf, bool dump)
2379  {
2380  	u32 offset = 0, num_regs = 0;
2381  	u8 reset_reg_id;
2382  
2383  	/* Calculate header size */
2384  	offset += qed_grc_dump_regs_hdr(dump_buf,
2385  					false,
2386  					0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
2387  
2388  	/* Write reset registers */
2389  	for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
2390  	     reset_reg_id++) {
2391  		const struct dbg_reset_reg *reset_reg;
2392  		u32 reset_reg_addr;
2393  
2394  		reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
2395  
2396  		if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
2397  			continue;
2398  
2399  		reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
2400  		offset += qed_grc_dump_reg_entry(p_hwfn,
2401  						 p_ptt,
2402  						 dump_buf + offset,
2403  						 dump,
2404  						 reset_reg_addr,
2405  						 1, false, SPLIT_TYPE_NONE, 0);
2406  		num_regs++;
2407  	}
2408  
2409  	/* Write header */
2410  	if (dump)
2411  		qed_grc_dump_regs_hdr(dump_buf,
2412  				      true, num_regs, SPLIT_TYPE_NONE,
2413  				      0, "RESET_REGS");
2414  
2415  	return offset;
2416  }
2417  
2418  /* Dump registers that are modified during GRC Dump and therefore must be
2419   * dumped first. Returns the dumped size in dwords.
2420   */
qed_grc_dump_modified_regs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2421  static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2422  				      struct qed_ptt *p_ptt,
2423  				      u32 *dump_buf, bool dump)
2424  {
2425  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2426  	u32 block_id, offset = 0, stall_regs_offset;
2427  	const struct dbg_attn_reg *attn_reg_arr;
2428  	u8 storm_id, reg_idx, num_attn_regs;
2429  	u32 num_reg_entries = 0;
2430  
2431  	/* Write empty header for attention registers */
2432  	offset += qed_grc_dump_regs_hdr(dump_buf,
2433  					false,
2434  					0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2435  
2436  	/* Write parity registers */
2437  	for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
2438  		if (dev_data->block_in_reset[block_id] && dump)
2439  			continue;
2440  
2441  		attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
2442  						       (enum block_id)block_id,
2443  						       ATTN_TYPE_PARITY,
2444  						       &num_attn_regs);
2445  
2446  		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2447  			const struct dbg_attn_reg *reg_data =
2448  				&attn_reg_arr[reg_idx];
2449  			u16 modes_buf_offset;
2450  			bool eval_mode;
2451  			u32 addr;
2452  
2453  			/* Check mode */
2454  			eval_mode = GET_FIELD(reg_data->mode.data,
2455  					      DBG_MODE_HDR_EVAL_MODE) > 0;
2456  			modes_buf_offset =
2457  				GET_FIELD(reg_data->mode.data,
2458  					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2459  			if (eval_mode &&
2460  			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2461  				continue;
2462  
2463  			/* Mode match: read & dump registers */
2464  			addr = reg_data->mask_address;
2465  			offset += qed_grc_dump_reg_entry(p_hwfn,
2466  							 p_ptt,
2467  							 dump_buf + offset,
2468  							 dump,
2469  							 addr,
2470  							 1, false,
2471  							 SPLIT_TYPE_NONE, 0);
2472  			addr = GET_FIELD(reg_data->data,
2473  					 DBG_ATTN_REG_STS_ADDRESS);
2474  			offset += qed_grc_dump_reg_entry(p_hwfn,
2475  							 p_ptt,
2476  							 dump_buf + offset,
2477  							 dump,
2478  							 addr,
2479  							 1, false,
2480  							 SPLIT_TYPE_NONE, 0);
2481  			num_reg_entries += 2;
2482  		}
2483  	}
2484  
2485  	/* Overwrite header for attention registers */
2486  	if (dump)
2487  		qed_grc_dump_regs_hdr(dump_buf,
2488  				      true,
2489  				      num_reg_entries,
2490  				      SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2491  
2492  	/* Write empty header for stall registers */
2493  	stall_regs_offset = offset;
2494  	offset += qed_grc_dump_regs_hdr(dump_buf,
2495  					false, 0, SPLIT_TYPE_NONE, 0, "REGS");
2496  
2497  	/* Write Storm stall status registers */
2498  	for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
2499  	     storm_id++) {
2500  		struct storm_defs *storm = &s_storm_defs[storm_id];
2501  		u32 addr;
2502  
2503  		if (dev_data->block_in_reset[storm->sem_block_id] && dump)
2504  			continue;
2505  
2506  		addr =
2507  		    BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
2508  				    SEM_FAST_REG_STALLED);
2509  		offset += qed_grc_dump_reg_entry(p_hwfn,
2510  						 p_ptt,
2511  						 dump_buf + offset,
2512  						 dump,
2513  						 addr,
2514  						 1,
2515  						 false, SPLIT_TYPE_NONE, 0);
2516  		num_reg_entries++;
2517  	}
2518  
2519  	/* Overwrite header for stall registers */
2520  	if (dump)
2521  		qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
2522  				      true,
2523  				      num_reg_entries,
2524  				      SPLIT_TYPE_NONE, 0, "REGS");
2525  
2526  	return offset;
2527  }
2528  
2529  /* Dumps registers that can't be represented in the debug arrays */
qed_grc_dump_special_regs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2530  static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2531  				     struct qed_ptt *p_ptt,
2532  				     u32 *dump_buf, bool dump)
2533  {
2534  	u32 offset = 0, addr;
2535  
2536  	offset += qed_grc_dump_regs_hdr(dump_buf,
2537  					dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
2538  
2539  	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2540  	 * skipped).
2541  	 */
2542  	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2543  	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2544  					      p_ptt,
2545  					      dump_buf + offset,
2546  					      dump,
2547  					      addr,
2548  					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2549  					      7,
2550  					      1);
2551  	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2552  	offset +=
2553  	    qed_grc_dump_reg_entry_skip(p_hwfn,
2554  					p_ptt,
2555  					dump_buf + offset,
2556  					dump,
2557  					addr,
2558  					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2559  					7,
2560  					1);
2561  
2562  	return offset;
2563  }
2564  
2565  /* Dumps a GRC memory header (section and params). Returns the dumped size in
2566   * dwords. The following parameters are dumped:
2567   * - name:	   dumped only if it's not NULL.
2568   * - addr:	   in dwords, dumped only if name is NULL.
2569   * - len:	   in dwords, always dumped.
2570   * - width:	   dumped if it's not zero.
2571   * - packed:	   dumped only if it's not false.
2572   * - mem_group:	   always dumped.
2573   * - is_storm:	   true only if the memory is related to a Storm.
2574   * - storm_letter: valid only if is_storm is true.
2575   *
2576   */
qed_grc_dump_mem_hdr(struct qed_hwfn * p_hwfn,u32 * dump_buf,bool dump,const char * name,u32 addr,u32 len,u32 bit_width,bool packed,const char * mem_group,char storm_letter)2577  static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2578  				u32 *dump_buf,
2579  				bool dump,
2580  				const char *name,
2581  				u32 addr,
2582  				u32 len,
2583  				u32 bit_width,
2584  				bool packed,
2585  				const char *mem_group, char storm_letter)
2586  {
2587  	u8 num_params = 3;
2588  	u32 offset = 0;
2589  	char buf[64];
2590  
2591  	if (!len)
2592  		DP_NOTICE(p_hwfn,
2593  			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2594  
2595  	if (bit_width)
2596  		num_params++;
2597  	if (packed)
2598  		num_params++;
2599  
2600  	/* Dump section header */
2601  	offset += qed_dump_section_hdr(dump_buf + offset,
2602  				       dump, "grc_mem", num_params);
2603  
2604  	if (name) {
2605  		/* Dump name */
2606  		if (storm_letter) {
2607  			strcpy(buf, "?STORM_");
2608  			buf[0] = storm_letter;
2609  			strcpy(buf + strlen(buf), name);
2610  		} else {
2611  			strcpy(buf, name);
2612  		}
2613  
2614  		offset += qed_dump_str_param(dump_buf + offset,
2615  					     dump, "name", buf);
2616  	} else {
2617  		/* Dump address */
2618  		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
2619  
2620  		offset += qed_dump_num_param(dump_buf + offset,
2621  					     dump, "addr", addr_in_bytes);
2622  	}
2623  
2624  	/* Dump len */
2625  	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2626  
2627  	/* Dump bit width */
2628  	if (bit_width)
2629  		offset += qed_dump_num_param(dump_buf + offset,
2630  					     dump, "width", bit_width);
2631  
2632  	/* Dump packed */
2633  	if (packed)
2634  		offset += qed_dump_num_param(dump_buf + offset,
2635  					     dump, "packed", 1);
2636  
2637  	/* Dump reg type */
2638  	if (storm_letter) {
2639  		strcpy(buf, "?STORM_");
2640  		buf[0] = storm_letter;
2641  		strcpy(buf + strlen(buf), mem_group);
2642  	} else {
2643  		strcpy(buf, mem_group);
2644  	}
2645  
2646  	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
2647  
2648  	return offset;
2649  }
2650  
2651  /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
2652   * Returns the dumped size in dwords.
2653   * The addr and len arguments are specified in dwords.
2654   */
qed_grc_dump_mem(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,const char * name,u32 addr,u32 len,bool wide_bus,u32 bit_width,bool packed,const char * mem_group,char storm_letter)2655  static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2656  			    struct qed_ptt *p_ptt,
2657  			    u32 *dump_buf,
2658  			    bool dump,
2659  			    const char *name,
2660  			    u32 addr,
2661  			    u32 len,
2662  			    bool wide_bus,
2663  			    u32 bit_width,
2664  			    bool packed,
2665  			    const char *mem_group, char storm_letter)
2666  {
2667  	u32 offset = 0;
2668  
2669  	offset += qed_grc_dump_mem_hdr(p_hwfn,
2670  				       dump_buf + offset,
2671  				       dump,
2672  				       name,
2673  				       addr,
2674  				       len,
2675  				       bit_width,
2676  				       packed, mem_group, storm_letter);
2677  	offset += qed_grc_dump_addr_range(p_hwfn,
2678  					  p_ptt,
2679  					  dump_buf + offset,
2680  					  dump, addr, len, wide_bus,
2681  					  SPLIT_TYPE_NONE, 0);
2682  
2683  	return offset;
2684  }
2685  
2686  /* Dumps GRC memories entries. Returns the dumped size in dwords. */
qed_grc_dump_mem_entries(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct virt_mem_desc input_mems_arr,u32 * dump_buf,bool dump)2687  static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2688  				    struct qed_ptt *p_ptt,
2689  				    struct virt_mem_desc input_mems_arr,
2690  				    u32 *dump_buf, bool dump)
2691  {
2692  	u32 i, offset = 0, input_offset = 0;
2693  	bool mode_match = true;
2694  
2695  	while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
2696  		const struct dbg_dump_cond_hdr *cond_hdr;
2697  		u16 modes_buf_offset;
2698  		u32 num_entries;
2699  		bool eval_mode;
2700  
2701  		cond_hdr =
2702  		    (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
2703  		    input_offset++;
2704  		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
2705  
2706  		/* Check required mode */
2707  		eval_mode = GET_FIELD(cond_hdr->mode.data,
2708  				      DBG_MODE_HDR_EVAL_MODE) > 0;
2709  		if (eval_mode) {
2710  			modes_buf_offset =
2711  				GET_FIELD(cond_hdr->mode.data,
2712  					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2713  			mode_match = qed_is_mode_match(p_hwfn,
2714  						       &modes_buf_offset);
2715  		}
2716  
2717  		if (!mode_match) {
2718  			input_offset += cond_hdr->data_size;
2719  			continue;
2720  		}
2721  
2722  		for (i = 0; i < num_entries;
2723  		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
2724  			const struct dbg_dump_mem *mem =
2725  			    (const struct dbg_dump_mem *)((u32 *)
2726  							  input_mems_arr.ptr
2727  							  + input_offset);
2728  			const struct dbg_block *block;
2729  			char storm_letter = 0;
2730  			u32 mem_addr, mem_len;
2731  			bool mem_wide_bus;
2732  			u8 mem_group_id;
2733  
2734  			mem_group_id = GET_FIELD(mem->dword0,
2735  						 DBG_DUMP_MEM_MEM_GROUP_ID);
2736  			if (mem_group_id >= MEM_GROUPS_NUM) {
2737  				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
2738  				return 0;
2739  			}
2740  
2741  			if (!qed_grc_is_mem_included(p_hwfn,
2742  						     (enum block_id)
2743  						     cond_hdr->block_id,
2744  						     mem_group_id))
2745  				continue;
2746  
2747  			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
2748  			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
2749  			mem_wide_bus = GET_FIELD(mem->dword1,
2750  						 DBG_DUMP_MEM_WIDE_BUS);
2751  
2752  			block = get_dbg_block(p_hwfn,
2753  					      cond_hdr->block_id);
2754  
2755  			/* If memory is associated with Storm,
2756  			 * update storm details
2757  			 */
2758  			if (block->associated_storm_letter)
2759  				storm_letter = block->associated_storm_letter;
2760  
2761  			/* Dump memory */
2762  			offset += qed_grc_dump_mem(p_hwfn,
2763  						p_ptt,
2764  						dump_buf + offset,
2765  						dump,
2766  						NULL,
2767  						mem_addr,
2768  						mem_len,
2769  						mem_wide_bus,
2770  						0,
2771  						false,
2772  						s_mem_group_names[mem_group_id],
2773  						storm_letter);
2774  		}
2775  	}
2776  
2777  	return offset;
2778  }
2779  
2780  /* Dumps GRC memories according to the input array dump_mem.
2781   * Returns the dumped size in dwords.
2782   */
qed_grc_dump_memories(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2783  static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
2784  				 struct qed_ptt *p_ptt,
2785  				 u32 *dump_buf, bool dump)
2786  {
2787  	struct virt_mem_desc *dbg_buf =
2788  	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
2789  	u32 offset = 0, input_offset = 0;
2790  
2791  	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2792  		const struct dbg_dump_split_hdr *split_hdr;
2793  		struct virt_mem_desc curr_input_mems_arr;
2794  		enum init_split_types split_type;
2795  		u32 split_data_size;
2796  
2797  		split_hdr =
2798  		    (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
2799  		    input_offset++;
2800  		split_type = GET_FIELD(split_hdr->hdr,
2801  				       DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2802  		split_data_size = GET_FIELD(split_hdr->hdr,
2803  					    DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2804  		curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
2805  		curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
2806  
2807  		if (split_type == SPLIT_TYPE_NONE)
2808  			offset += qed_grc_dump_mem_entries(p_hwfn,
2809  							   p_ptt,
2810  							   curr_input_mems_arr,
2811  							   dump_buf + offset,
2812  							   dump);
2813  		else
2814  			DP_NOTICE(p_hwfn,
2815  				  "Dumping split memories is currently not supported\n");
2816  
2817  		input_offset += split_data_size;
2818  	}
2819  
2820  	return offset;
2821  }
2822  
2823  /* Dumps GRC context data for the specified Storm.
2824   * Returns the dumped size in dwords.
2825   * The lid_size argument is specified in quad-regs.
2826   */
qed_grc_dump_ctx_data(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,const char * name,u32 num_lids,enum cm_ctx_types ctx_type,u8 storm_id)2827  static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2828  				 struct qed_ptt *p_ptt,
2829  				 u32 *dump_buf,
2830  				 bool dump,
2831  				 const char *name,
2832  				 u32 num_lids,
2833  				 enum cm_ctx_types ctx_type, u8 storm_id)
2834  {
2835  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2836  	struct storm_defs *storm = &s_storm_defs[storm_id];
2837  	u32 i, lid, lid_size, total_size;
2838  	u32 rd_reg_addr, offset = 0;
2839  
2840  	/* Convert quad-regs to dwords */
2841  	lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
2842  
2843  	if (!lid_size)
2844  		return 0;
2845  
2846  	total_size = num_lids * lid_size;
2847  
2848  	offset += qed_grc_dump_mem_hdr(p_hwfn,
2849  				       dump_buf + offset,
2850  				       dump,
2851  				       name,
2852  				       0,
2853  				       total_size,
2854  				       lid_size * 32,
2855  				       false, name, storm->letter);
2856  
2857  	if (!dump)
2858  		return offset + total_size;
2859  
2860  	rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
2861  
2862  	/* Dump context data */
2863  	for (lid = 0; lid < num_lids; lid++) {
2864  		for (i = 0; i < lid_size; i++) {
2865  			qed_wr(p_hwfn,
2866  			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
2867  			offset += qed_grc_dump_addr_range(p_hwfn,
2868  							  p_ptt,
2869  							  dump_buf + offset,
2870  							  dump,
2871  							  rd_reg_addr,
2872  							  1,
2873  							  false,
2874  							  SPLIT_TYPE_NONE, 0);
2875  		}
2876  	}
2877  
2878  	return offset;
2879  }
2880  
2881  /* Dumps GRC contexts. Returns the dumped size in dwords. */
qed_grc_dump_ctx(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)2882  static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
2883  			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2884  {
2885  	u32 offset = 0;
2886  	u8 storm_id;
2887  
2888  	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2889  		if (!qed_grc_is_storm_included(p_hwfn,
2890  					       (enum dbg_storms)storm_id))
2891  			continue;
2892  
2893  		/* Dump Conn AG context size */
2894  		offset += qed_grc_dump_ctx_data(p_hwfn,
2895  						p_ptt,
2896  						dump_buf + offset,
2897  						dump,
2898  						"CONN_AG_CTX",
2899  						NUM_OF_LCIDS,
2900  						CM_CTX_CONN_AG, storm_id);
2901  
2902  		/* Dump Conn ST context size */
2903  		offset += qed_grc_dump_ctx_data(p_hwfn,
2904  						p_ptt,
2905  						dump_buf + offset,
2906  						dump,
2907  						"CONN_ST_CTX",
2908  						NUM_OF_LCIDS,
2909  						CM_CTX_CONN_ST, storm_id);
2910  
2911  		/* Dump Task AG context size */
2912  		offset += qed_grc_dump_ctx_data(p_hwfn,
2913  						p_ptt,
2914  						dump_buf + offset,
2915  						dump,
2916  						"TASK_AG_CTX",
2917  						NUM_OF_LTIDS,
2918  						CM_CTX_TASK_AG, storm_id);
2919  
2920  		/* Dump Task ST context size */
2921  		offset += qed_grc_dump_ctx_data(p_hwfn,
2922  						p_ptt,
2923  						dump_buf + offset,
2924  						dump,
2925  						"TASK_ST_CTX",
2926  						NUM_OF_LTIDS,
2927  						CM_CTX_TASK_ST, storm_id);
2928  	}
2929  
2930  	return offset;
2931  }
2932  
2933  #define VFC_STATUS_RESP_READY_BIT	0
2934  #define VFC_STATUS_BUSY_BIT		1
2935  #define VFC_STATUS_SENDING_CMD_BIT	2
2936  
2937  #define VFC_POLLING_DELAY_MS	1
2938  #define VFC_POLLING_COUNT		20
2939  
2940  /* Reads data from VFC. Returns the number of dwords read (0 on error).
2941   * Sizes are specified in dwords.
2942   */
qed_grc_dump_read_from_vfc(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct storm_defs * storm,u32 * cmd_data,u32 cmd_size,u32 * addr_data,u32 addr_size,u32 resp_size,u32 * dump_buf)2943  static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn,
2944  				      struct qed_ptt *p_ptt,
2945  				      struct storm_defs *storm,
2946  				      u32 *cmd_data,
2947  				      u32 cmd_size,
2948  				      u32 *addr_data,
2949  				      u32 addr_size,
2950  				      u32 resp_size, u32 *dump_buf)
2951  {
2952  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2953  	u32 vfc_status, polling_ms, polling_count = 0, i;
2954  	u32 reg_addr, sem_base;
2955  	bool is_ready = false;
2956  
2957  	sem_base = storm->sem_fast_mem_addr;
2958  	polling_ms = VFC_POLLING_DELAY_MS *
2959  	    s_hw_type_defs[dev_data->hw_type].delay_factor;
2960  
2961  	/* Write VFC command */
2962  	ARR_REG_WR(p_hwfn,
2963  		   p_ptt,
2964  		   sem_base + SEM_FAST_REG_VFC_DATA_WR,
2965  		   cmd_data, cmd_size);
2966  
2967  	/* Write VFC address */
2968  	ARR_REG_WR(p_hwfn,
2969  		   p_ptt,
2970  		   sem_base + SEM_FAST_REG_VFC_ADDR,
2971  		   addr_data, addr_size);
2972  
2973  	/* Read response */
2974  	for (i = 0; i < resp_size; i++) {
2975  		/* Poll until ready */
2976  		do {
2977  			reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
2978  			qed_grc_dump_addr_range(p_hwfn,
2979  						p_ptt,
2980  						&vfc_status,
2981  						true,
2982  						BYTES_TO_DWORDS(reg_addr),
2983  						1,
2984  						false, SPLIT_TYPE_NONE, 0);
2985  			is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT);
2986  
2987  			if (!is_ready) {
2988  				if (polling_count++ == VFC_POLLING_COUNT)
2989  					return 0;
2990  
2991  				msleep(polling_ms);
2992  			}
2993  		} while (!is_ready);
2994  
2995  		reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
2996  		qed_grc_dump_addr_range(p_hwfn,
2997  					p_ptt,
2998  					dump_buf + i,
2999  					true,
3000  					BYTES_TO_DWORDS(reg_addr),
3001  					1, false, SPLIT_TYPE_NONE, 0);
3002  	}
3003  
3004  	return resp_size;
3005  }
3006  
3007  /* Dump VFC CAM. Returns the dumped size in dwords. */
qed_grc_dump_vfc_cam(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 storm_id)3008  static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3009  				struct qed_ptt *p_ptt,
3010  				u32 *dump_buf, bool dump, u8 storm_id)
3011  {
3012  	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3013  	struct storm_defs *storm = &s_storm_defs[storm_id];
3014  	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3015  	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3016  	u32 row, offset = 0;
3017  
3018  	offset += qed_grc_dump_mem_hdr(p_hwfn,
3019  				       dump_buf + offset,
3020  				       dump,
3021  				       "vfc_cam",
3022  				       0,
3023  				       total_size,
3024  				       256,
3025  				       false, "vfc_cam", storm->letter);
3026  
3027  	if (!dump)
3028  		return offset + total_size;
3029  
3030  	/* Prepare CAM address */
3031  	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3032  
3033  	/* Read VFC CAM data */
3034  	for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
3035  		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3036  		offset += qed_grc_dump_read_from_vfc(p_hwfn,
3037  						     p_ptt,
3038  						     storm,
3039  						     cam_cmd,
3040  						     VFC_CAM_CMD_DWORDS,
3041  						     cam_addr,
3042  						     VFC_CAM_ADDR_DWORDS,
3043  						     VFC_CAM_RESP_DWORDS,
3044  						     dump_buf + offset);
3045  	}
3046  
3047  	return offset;
3048  }
3049  
3050  /* Dump VFC RAM. Returns the dumped size in dwords. */
qed_grc_dump_vfc_ram(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 storm_id,struct vfc_ram_defs * ram_defs)3051  static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3052  				struct qed_ptt *p_ptt,
3053  				u32 *dump_buf,
3054  				bool dump,
3055  				u8 storm_id, struct vfc_ram_defs *ram_defs)
3056  {
3057  	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3058  	struct storm_defs *storm = &s_storm_defs[storm_id];
3059  	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3060  	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3061  	u32 row, offset = 0;
3062  
3063  	offset += qed_grc_dump_mem_hdr(p_hwfn,
3064  				       dump_buf + offset,
3065  				       dump,
3066  				       ram_defs->mem_name,
3067  				       0,
3068  				       total_size,
3069  				       256,
3070  				       false,
3071  				       ram_defs->type_name,
3072  				       storm->letter);
3073  
3074  	if (!dump)
3075  		return offset + total_size;
3076  
3077  	/* Prepare RAM address */
3078  	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3079  
3080  	/* Read VFC RAM data */
3081  	for (row = ram_defs->base_row;
3082  	     row < ram_defs->base_row + ram_defs->num_rows; row++) {
3083  		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3084  		offset += qed_grc_dump_read_from_vfc(p_hwfn,
3085  						     p_ptt,
3086  						     storm,
3087  						     ram_cmd,
3088  						     VFC_RAM_CMD_DWORDS,
3089  						     ram_addr,
3090  						     VFC_RAM_ADDR_DWORDS,
3091  						     VFC_RAM_RESP_DWORDS,
3092  						     dump_buf + offset);
3093  	}
3094  
3095  	return offset;
3096  }
3097  
3098  /* Dumps GRC VFC data. Returns the dumped size in dwords. */
qed_grc_dump_vfc(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3099  static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3100  			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3101  {
3102  	u8 storm_id, i;
3103  	u32 offset = 0;
3104  
3105  	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3106  		if (!qed_grc_is_storm_included(p_hwfn,
3107  					       (enum dbg_storms)storm_id) ||
3108  		    !s_storm_defs[storm_id].has_vfc)
3109  			continue;
3110  
3111  		/* Read CAM */
3112  		offset += qed_grc_dump_vfc_cam(p_hwfn,
3113  					       p_ptt,
3114  					       dump_buf + offset,
3115  					       dump, storm_id);
3116  
3117  		/* Read RAM */
3118  		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3119  			offset += qed_grc_dump_vfc_ram(p_hwfn,
3120  						       p_ptt,
3121  						       dump_buf + offset,
3122  						       dump,
3123  						       storm_id,
3124  						       &s_vfc_ram_defs[i]);
3125  	}
3126  
3127  	return offset;
3128  }
3129  
3130  /* Dumps GRC RSS data. Returns the dumped size in dwords. */
qed_grc_dump_rss(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3131  static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3132  			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3133  {
3134  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3135  	u32 offset = 0;
3136  	u8 rss_mem_id;
3137  
3138  	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3139  		u32 rss_addr, num_entries, total_dwords;
3140  		struct rss_mem_defs *rss_defs;
3141  		u32 addr, num_dwords_to_read;
3142  		bool packed;
3143  
3144  		rss_defs = &s_rss_mem_defs[rss_mem_id];
3145  		rss_addr = rss_defs->addr;
3146  		num_entries = rss_defs->num_entries[dev_data->chip_id];
3147  		total_dwords = (num_entries * rss_defs->entry_width) / 32;
3148  		packed = (rss_defs->entry_width == 16);
3149  
3150  		offset += qed_grc_dump_mem_hdr(p_hwfn,
3151  					       dump_buf + offset,
3152  					       dump,
3153  					       rss_defs->mem_name,
3154  					       0,
3155  					       total_dwords,
3156  					       rss_defs->entry_width,
3157  					       packed,
3158  					       rss_defs->type_name, 0);
3159  
3160  		/* Dump RSS data */
3161  		if (!dump) {
3162  			offset += total_dwords;
3163  			continue;
3164  		}
3165  
3166  		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3167  		while (total_dwords) {
3168  			num_dwords_to_read = min_t(u32,
3169  						   RSS_REG_RSS_RAM_DATA_SIZE,
3170  						   total_dwords);
3171  			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3172  			offset += qed_grc_dump_addr_range(p_hwfn,
3173  							  p_ptt,
3174  							  dump_buf + offset,
3175  							  dump,
3176  							  addr,
3177  							  num_dwords_to_read,
3178  							  false,
3179  							  SPLIT_TYPE_NONE, 0);
3180  			total_dwords -= num_dwords_to_read;
3181  			rss_addr++;
3182  		}
3183  	}
3184  
3185  	return offset;
3186  }
3187  
3188  /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
qed_grc_dump_big_ram(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u8 big_ram_id)3189  static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3190  				struct qed_ptt *p_ptt,
3191  				u32 *dump_buf, bool dump, u8 big_ram_id)
3192  {
3193  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3194  	u32 block_size, ram_size, offset = 0, reg_val, i;
3195  	char mem_name[12] = "???_BIG_RAM";
3196  	char type_name[8] = "???_RAM";
3197  	struct big_ram_defs *big_ram;
3198  
3199  	big_ram = &s_big_ram_defs[big_ram_id];
3200  	ram_size = big_ram->ram_size[dev_data->chip_id];
3201  
3202  	reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3203  	block_size = reg_val &
3204  		     BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3205  									 : 128;
3206  
3207  	strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3208  	strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3209  
3210  	/* Dump memory header */
3211  	offset += qed_grc_dump_mem_hdr(p_hwfn,
3212  				       dump_buf + offset,
3213  				       dump,
3214  				       mem_name,
3215  				       0,
3216  				       ram_size,
3217  				       block_size * 8,
3218  				       false, type_name, 0);
3219  
3220  	/* Read and dump Big RAM data */
3221  	if (!dump)
3222  		return offset + ram_size;
3223  
3224  	/* Dump Big RAM */
3225  	for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3226  	     i++) {
3227  		u32 addr, len;
3228  
3229  		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3230  		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3231  		len = BRB_REG_BIG_RAM_DATA_SIZE;
3232  		offset += qed_grc_dump_addr_range(p_hwfn,
3233  						  p_ptt,
3234  						  dump_buf + offset,
3235  						  dump,
3236  						  addr,
3237  						  len,
3238  						  false, SPLIT_TYPE_NONE, 0);
3239  	}
3240  
3241  	return offset;
3242  }
3243  
3244  /* Dumps MCP scratchpad. Returns the dumped size in dwords. */
qed_grc_dump_mcp(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3245  static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3246  			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3247  {
3248  	bool block_enable[MAX_BLOCK_ID] = { 0 };
3249  	u32 offset = 0, addr;
3250  	bool halted = false;
3251  
3252  	/* Halt MCP */
3253  	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3254  		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3255  		if (!halted)
3256  			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3257  	}
3258  
3259  	/* Dump MCP scratchpad */
3260  	offset += qed_grc_dump_mem(p_hwfn,
3261  				   p_ptt,
3262  				   dump_buf + offset,
3263  				   dump,
3264  				   NULL,
3265  				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3266  				   MCP_REG_SCRATCH_SIZE,
3267  				   false, 0, false, "MCP", 0);
3268  
3269  	/* Dump MCP cpu_reg_file */
3270  	offset += qed_grc_dump_mem(p_hwfn,
3271  				   p_ptt,
3272  				   dump_buf + offset,
3273  				   dump,
3274  				   NULL,
3275  				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3276  				   MCP_REG_CPU_REG_FILE_SIZE,
3277  				   false, 0, false, "MCP", 0);
3278  
3279  	/* Dump MCP registers */
3280  	block_enable[BLOCK_MCP] = true;
3281  	offset += qed_grc_dump_registers(p_hwfn,
3282  					 p_ptt,
3283  					 dump_buf + offset,
3284  					 dump, block_enable, "MCP");
3285  
3286  	/* Dump required non-MCP registers */
3287  	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3288  					dump, 1, SPLIT_TYPE_NONE, 0,
3289  					"MCP");
3290  	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3291  	offset += qed_grc_dump_reg_entry(p_hwfn,
3292  					 p_ptt,
3293  					 dump_buf + offset,
3294  					 dump,
3295  					 addr,
3296  					 1,
3297  					 false, SPLIT_TYPE_NONE, 0);
3298  
3299  	/* Release MCP */
3300  	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3301  		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3302  
3303  	return offset;
3304  }
3305  
3306  /* Dumps the tbus indirect memory for all PHYs.
3307   * Returns the dumped size in dwords.
3308   */
qed_grc_dump_phy(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3309  static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3310  			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3311  {
3312  	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3313  	char mem_name[32];
3314  	u8 phy_id;
3315  
3316  	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3317  		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3318  		struct phy_defs *phy_defs;
3319  		u8 *bytes_buf;
3320  
3321  		phy_defs = &s_phy_defs[phy_id];
3322  		addr_lo_addr = phy_defs->base_addr +
3323  			       phy_defs->tbus_addr_lo_addr;
3324  		addr_hi_addr = phy_defs->base_addr +
3325  			       phy_defs->tbus_addr_hi_addr;
3326  		data_lo_addr = phy_defs->base_addr +
3327  			       phy_defs->tbus_data_lo_addr;
3328  		data_hi_addr = phy_defs->base_addr +
3329  			       phy_defs->tbus_data_hi_addr;
3330  
3331  		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3332  			     phy_defs->phy_name) < 0)
3333  			DP_NOTICE(p_hwfn,
3334  				  "Unexpected debug error: invalid PHY memory name\n");
3335  
3336  		offset += qed_grc_dump_mem_hdr(p_hwfn,
3337  					       dump_buf + offset,
3338  					       dump,
3339  					       mem_name,
3340  					       0,
3341  					       PHY_DUMP_SIZE_DWORDS,
3342  					       16, true, mem_name, 0);
3343  
3344  		if (!dump) {
3345  			offset += PHY_DUMP_SIZE_DWORDS;
3346  			continue;
3347  		}
3348  
3349  		bytes_buf = (u8 *)(dump_buf + offset);
3350  		for (tbus_hi_offset = 0;
3351  		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3352  		     tbus_hi_offset++) {
3353  			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3354  			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3355  			     tbus_lo_offset++) {
3356  				qed_wr(p_hwfn,
3357  				       p_ptt, addr_lo_addr, tbus_lo_offset);
3358  				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3359  							    p_ptt,
3360  							    data_lo_addr);
3361  				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3362  							    p_ptt,
3363  							    data_hi_addr);
3364  			}
3365  		}
3366  
3367  		offset += PHY_DUMP_SIZE_DWORDS;
3368  	}
3369  
3370  	return offset;
3371  }
3372  
3373  /* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
qed_grc_dump_mcp_hw_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3374  static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
3375  				    struct qed_ptt *p_ptt,
3376  				    u32 *dump_buf, bool dump)
3377  {
3378  	u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
3379  	u32 hw_dump_size_dwords = 0, offset = 0;
3380  	enum dbg_status status;
3381  
3382  	/* Read HW dump image from NVRAM */
3383  	status = qed_find_nvram_image(p_hwfn,
3384  				      p_ptt,
3385  				      NVM_TYPE_HW_DUMP_OUT,
3386  				      &hw_dump_offset_bytes,
3387  				      &hw_dump_size_bytes,
3388  				      false);
3389  	if (status != DBG_STATUS_OK)
3390  		return 0;
3391  
3392  	hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
3393  
3394  	/* Dump HW dump image section */
3395  	offset += qed_dump_section_hdr(dump_buf + offset,
3396  				       dump, "mcp_hw_dump", 1);
3397  	offset += qed_dump_num_param(dump_buf + offset,
3398  				     dump, "size", hw_dump_size_dwords);
3399  
3400  	/* Read MCP HW dump image into dump buffer */
3401  	if (dump && hw_dump_size_dwords) {
3402  		status = qed_nvram_read(p_hwfn,
3403  					p_ptt,
3404  					hw_dump_offset_bytes,
3405  					hw_dump_size_bytes,
3406  					dump_buf + offset,
3407  					false);
3408  		if (status != DBG_STATUS_OK) {
3409  			DP_NOTICE(p_hwfn,
3410  				  "Failed to read MCP HW Dump image from NVRAM\n");
3411  			return 0;
3412  		}
3413  	}
3414  	offset += hw_dump_size_dwords;
3415  
3416  	return offset;
3417  }
3418  
3419  /* Dumps Static Debug data. Returns the dumped size in dwords. */
qed_grc_dump_static_debug(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3420  static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3421  				     struct qed_ptt *p_ptt,
3422  				     u32 *dump_buf, bool dump)
3423  {
3424  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3425  	u32 block_id, line_id, offset = 0, addr, len;
3426  
3427  	/* Don't dump static debug if a debug bus recording is in progress */
3428  	if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3429  		return 0;
3430  
3431  	if (dump) {
3432  		/* Disable debug bus in all blocks */
3433  		qed_bus_disable_blocks(p_hwfn, p_ptt);
3434  
3435  		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3436  		qed_wr(p_hwfn,
3437  		       p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
3438  		qed_wr(p_hwfn,
3439  		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3440  		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3441  		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3442  	}
3443  
3444  	/* Dump all static debug lines for each relevant block */
3445  	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3446  		const struct dbg_block_chip *block_per_chip;
3447  		const struct dbg_block *block;
3448  		bool is_removed, has_dbg_bus;
3449  		u16 modes_buf_offset;
3450  		u32 block_dwords;
3451  
3452  		block_per_chip =
3453  		    qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
3454  		is_removed = GET_FIELD(block_per_chip->flags,
3455  				       DBG_BLOCK_CHIP_IS_REMOVED);
3456  		has_dbg_bus = GET_FIELD(block_per_chip->flags,
3457  					DBG_BLOCK_CHIP_HAS_DBG_BUS);
3458  
3459  		if (!is_removed && has_dbg_bus &&
3460  		    GET_FIELD(block_per_chip->dbg_bus_mode.data,
3461  			      DBG_MODE_HDR_EVAL_MODE) > 0) {
3462  			modes_buf_offset =
3463  			    GET_FIELD(block_per_chip->dbg_bus_mode.data,
3464  				      DBG_MODE_HDR_MODES_BUF_OFFSET);
3465  			if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
3466  				has_dbg_bus = false;
3467  		}
3468  
3469  		if (is_removed || !has_dbg_bus)
3470  			continue;
3471  
3472  		block_dwords = NUM_DBG_LINES(block_per_chip) *
3473  			       STATIC_DEBUG_LINE_DWORDS;
3474  
3475  		/* Dump static section params */
3476  		block = get_dbg_block(p_hwfn, (enum block_id)block_id);
3477  		offset += qed_grc_dump_mem_hdr(p_hwfn,
3478  					       dump_buf + offset,
3479  					       dump,
3480  					       block->name,
3481  					       0,
3482  					       block_dwords,
3483  					       32, false, "STATIC", 0);
3484  
3485  		if (!dump) {
3486  			offset += block_dwords;
3487  			continue;
3488  		}
3489  
3490  		/* If all lines are invalid - dump zeros */
3491  		if (dev_data->block_in_reset[block_id]) {
3492  			memset(dump_buf + offset, 0,
3493  			       DWORDS_TO_BYTES(block_dwords));
3494  			offset += block_dwords;
3495  			continue;
3496  		}
3497  
3498  		/* Enable block's client */
3499  		qed_bus_enable_clients(p_hwfn,
3500  				       p_ptt,
3501  				       BIT(block_per_chip->dbg_client_id));
3502  
3503  		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3504  		len = STATIC_DEBUG_LINE_DWORDS;
3505  		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
3506  		     line_id++) {
3507  			/* Configure debug line ID */
3508  			qed_bus_config_dbg_line(p_hwfn,
3509  						p_ptt,
3510  						(enum block_id)block_id,
3511  						(u8)line_id, 0xf, 0, 0, 0);
3512  
3513  			/* Read debug line info */
3514  			offset += qed_grc_dump_addr_range(p_hwfn,
3515  							  p_ptt,
3516  							  dump_buf + offset,
3517  							  dump,
3518  							  addr,
3519  							  len,
3520  							  true, SPLIT_TYPE_NONE,
3521  							  0);
3522  		}
3523  
3524  		/* Disable block's client and debug output */
3525  		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3526  		qed_bus_config_dbg_line(p_hwfn, p_ptt,
3527  					(enum block_id)block_id, 0, 0, 0, 0, 0);
3528  	}
3529  
3530  	if (dump) {
3531  		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3532  		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3533  	}
3534  
3535  	return offset;
3536  }
3537  
3538  /* Performs GRC Dump to the specified buffer.
3539   * Returns the dumped size in dwords.
3540   */
qed_grc_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)3541  static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3542  				    struct qed_ptt *p_ptt,
3543  				    u32 *dump_buf,
3544  				    bool dump, u32 *num_dumped_dwords)
3545  {
3546  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3547  	bool parities_masked = false;
3548  	u32 dwords_read, offset = 0;
3549  	u8 i;
3550  
3551  	*num_dumped_dwords = 0;
3552  	dev_data->num_regs_read = 0;
3553  
3554  	/* Update reset state */
3555  	if (dump)
3556  		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3557  
3558  	/* Dump global params */
3559  	offset += qed_dump_common_global_params(p_hwfn,
3560  						p_ptt,
3561  						dump_buf + offset, dump, 4);
3562  	offset += qed_dump_str_param(dump_buf + offset,
3563  				     dump, "dump-type", "grc-dump");
3564  	offset += qed_dump_num_param(dump_buf + offset,
3565  				     dump,
3566  				     "num-lcids",
3567  				     NUM_OF_LCIDS);
3568  	offset += qed_dump_num_param(dump_buf + offset,
3569  				     dump,
3570  				     "num-ltids",
3571  				     NUM_OF_LTIDS);
3572  	offset += qed_dump_num_param(dump_buf + offset,
3573  				     dump, "num-ports", dev_data->num_ports);
3574  
3575  	/* Dump reset registers (dumped before taking blocks out of reset ) */
3576  	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3577  		offset += qed_grc_dump_reset_regs(p_hwfn,
3578  						  p_ptt,
3579  						  dump_buf + offset, dump);
3580  
3581  	/* Take all blocks out of reset (using reset registers) */
3582  	if (dump) {
3583  		qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
3584  		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3585  	}
3586  
3587  	/* Disable all parities using MFW command */
3588  	if (dump &&
3589  	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3590  		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3591  		if (!parities_masked) {
3592  			DP_NOTICE(p_hwfn,
3593  				  "Failed to mask parities using MFW\n");
3594  			if (qed_grc_get_param
3595  			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3596  				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3597  		}
3598  	}
3599  
3600  	/* Dump modified registers (dumped before modifying them) */
3601  	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3602  		offset += qed_grc_dump_modified_regs(p_hwfn,
3603  						     p_ptt,
3604  						     dump_buf + offset, dump);
3605  
3606  	/* Stall storms */
3607  	if (dump &&
3608  	    (qed_grc_is_included(p_hwfn,
3609  				 DBG_GRC_PARAM_DUMP_IOR) ||
3610  	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3611  		qed_grc_stall_storms(p_hwfn, p_ptt, true);
3612  
3613  	/* Dump all regs  */
3614  	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3615  		bool block_enable[MAX_BLOCK_ID];
3616  
3617  		/* Dump all blocks except MCP */
3618  		for (i = 0; i < MAX_BLOCK_ID; i++)
3619  			block_enable[i] = true;
3620  		block_enable[BLOCK_MCP] = false;
3621  		offset += qed_grc_dump_registers(p_hwfn,
3622  						 p_ptt,
3623  						 dump_buf +
3624  						 offset,
3625  						 dump,
3626  						 block_enable, NULL);
3627  
3628  		/* Dump special registers */
3629  		offset += qed_grc_dump_special_regs(p_hwfn,
3630  						    p_ptt,
3631  						    dump_buf + offset, dump);
3632  	}
3633  
3634  	/* Dump memories */
3635  	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3636  
3637  	/* Dump MCP */
3638  	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
3639  		offset += qed_grc_dump_mcp(p_hwfn,
3640  					   p_ptt, dump_buf + offset, dump);
3641  
3642  	/* Dump context */
3643  	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
3644  		offset += qed_grc_dump_ctx(p_hwfn,
3645  					   p_ptt, dump_buf + offset, dump);
3646  
3647  	/* Dump RSS memories */
3648  	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
3649  		offset += qed_grc_dump_rss(p_hwfn,
3650  					   p_ptt, dump_buf + offset, dump);
3651  
3652  	/* Dump Big RAM */
3653  	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
3654  		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
3655  			offset += qed_grc_dump_big_ram(p_hwfn,
3656  						       p_ptt,
3657  						       dump_buf + offset,
3658  						       dump, i);
3659  
3660  	/* Dump VFC */
3661  	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
3662  		dwords_read = qed_grc_dump_vfc(p_hwfn,
3663  					       p_ptt, dump_buf + offset, dump);
3664  		offset += dwords_read;
3665  		if (!dwords_read)
3666  			return DBG_STATUS_VFC_READ_ERROR;
3667  	}
3668  
3669  	/* Dump PHY tbus */
3670  	if (qed_grc_is_included(p_hwfn,
3671  				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
3672  	    CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
3673  		offset += qed_grc_dump_phy(p_hwfn,
3674  					   p_ptt, dump_buf + offset, dump);
3675  
3676  	/* Dump MCP HW Dump */
3677  	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
3678  	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
3679  		offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
3680  						   p_ptt,
3681  						   dump_buf + offset, dump);
3682  
3683  	/* Dump static debug data (only if not during debug bus recording) */
3684  	if (qed_grc_is_included(p_hwfn,
3685  				DBG_GRC_PARAM_DUMP_STATIC) &&
3686  	    (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
3687  		offset += qed_grc_dump_static_debug(p_hwfn,
3688  						    p_ptt,
3689  						    dump_buf + offset, dump);
3690  
3691  	/* Dump last section */
3692  	offset += qed_dump_last_section(dump_buf, offset, dump);
3693  
3694  	if (dump) {
3695  		/* Unstall storms */
3696  		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
3697  			qed_grc_stall_storms(p_hwfn, p_ptt, false);
3698  
3699  		/* Clear parity status */
3700  		qed_grc_clear_all_prty(p_hwfn, p_ptt);
3701  
3702  		/* Enable all parities using MFW command */
3703  		if (parities_masked)
3704  			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
3705  	}
3706  
3707  	*num_dumped_dwords = offset;
3708  
3709  	return DBG_STATUS_OK;
3710  }
3711  
3712  /* Writes the specified failing Idle Check rule to the specified buffer.
3713   * Returns the dumped size in dwords.
3714   */
qed_idle_chk_dump_failure(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u16 rule_id,const struct dbg_idle_chk_rule * rule,u16 fail_entry_id,u32 * cond_reg_values)3715  static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3716  				     struct qed_ptt *p_ptt,
3717  				     u32 *dump_buf,
3718  				     bool dump,
3719  				     u16 rule_id,
3720  				     const struct dbg_idle_chk_rule *rule,
3721  				     u16 fail_entry_id, u32 *cond_reg_values)
3722  {
3723  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3724  	const struct dbg_idle_chk_cond_reg *cond_regs;
3725  	const struct dbg_idle_chk_info_reg *info_regs;
3726  	u32 i, next_reg_offset = 0, offset = 0;
3727  	struct dbg_idle_chk_result_hdr *hdr;
3728  	const union dbg_idle_chk_reg *regs;
3729  	u8 reg_id;
3730  
3731  	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
3732  	regs = (const union dbg_idle_chk_reg *)
3733  		p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3734  		rule->reg_offset;
3735  	cond_regs = &regs[0].cond_reg;
3736  	info_regs = &regs[rule->num_cond_regs].info_reg;
3737  
3738  	/* Dump rule data */
3739  	if (dump) {
3740  		memset(hdr, 0, sizeof(*hdr));
3741  		hdr->rule_id = rule_id;
3742  		hdr->mem_entry_id = fail_entry_id;
3743  		hdr->severity = rule->severity;
3744  		hdr->num_dumped_cond_regs = rule->num_cond_regs;
3745  	}
3746  
3747  	offset += IDLE_CHK_RESULT_HDR_DWORDS;
3748  
3749  	/* Dump condition register values */
3750  	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
3751  		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
3752  		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3753  
3754  		reg_hdr =
3755  		    (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
3756  
3757  		/* Write register header */
3758  		if (!dump) {
3759  			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
3760  			    reg->entry_size;
3761  			continue;
3762  		}
3763  
3764  		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3765  		memset(reg_hdr, 0, sizeof(*reg_hdr));
3766  		reg_hdr->start_entry = reg->start_entry;
3767  		reg_hdr->size = reg->entry_size;
3768  		SET_FIELD(reg_hdr->data,
3769  			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
3770  			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
3771  		SET_FIELD(reg_hdr->data,
3772  			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
3773  
3774  		/* Write register values */
3775  		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
3776  			dump_buf[offset] = cond_reg_values[next_reg_offset];
3777  	}
3778  
3779  	/* Dump info register values */
3780  	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
3781  		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
3782  		u32 block_id;
3783  
3784  		/* Check if register's block is in reset */
3785  		if (!dump) {
3786  			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
3787  			continue;
3788  		}
3789  
3790  		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
3791  		if (block_id >= MAX_BLOCK_ID) {
3792  			DP_NOTICE(p_hwfn, "Invalid block_id\n");
3793  			return 0;
3794  		}
3795  
3796  		if (!dev_data->block_in_reset[block_id]) {
3797  			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3798  			bool wide_bus, eval_mode, mode_match = true;
3799  			u16 modes_buf_offset;
3800  			u32 addr;
3801  
3802  			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
3803  				  (dump_buf + offset);
3804  
3805  			/* Check mode */
3806  			eval_mode = GET_FIELD(reg->mode.data,
3807  					      DBG_MODE_HDR_EVAL_MODE) > 0;
3808  			if (eval_mode) {
3809  				modes_buf_offset =
3810  				    GET_FIELD(reg->mode.data,
3811  					      DBG_MODE_HDR_MODES_BUF_OFFSET);
3812  				mode_match =
3813  					qed_is_mode_match(p_hwfn,
3814  							  &modes_buf_offset);
3815  			}
3816  
3817  			if (!mode_match)
3818  				continue;
3819  
3820  			addr = GET_FIELD(reg->data,
3821  					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
3822  			wide_bus = GET_FIELD(reg->data,
3823  					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
3824  
3825  			/* Write register header */
3826  			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3827  			hdr->num_dumped_info_regs++;
3828  			memset(reg_hdr, 0, sizeof(*reg_hdr));
3829  			reg_hdr->size = reg->size;
3830  			SET_FIELD(reg_hdr->data,
3831  				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3832  				  rule->num_cond_regs + reg_id);
3833  
3834  			/* Write register values */
3835  			offset += qed_grc_dump_addr_range(p_hwfn,
3836  							  p_ptt,
3837  							  dump_buf + offset,
3838  							  dump,
3839  							  addr,
3840  							  reg->size, wide_bus,
3841  							  SPLIT_TYPE_NONE, 0);
3842  		}
3843  	}
3844  
3845  	return offset;
3846  }
3847  
3848  /* Dumps idle check rule entries. Returns the dumped size in dwords. */
3849  static u32
qed_idle_chk_dump_rule_entries(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,const struct dbg_idle_chk_rule * input_rules,u32 num_input_rules,u32 * num_failing_rules)3850  qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3851  			       u32 *dump_buf, bool dump,
3852  			       const struct dbg_idle_chk_rule *input_rules,
3853  			       u32 num_input_rules, u32 *num_failing_rules)
3854  {
3855  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3856  	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3857  	u32 i, offset = 0;
3858  	u16 entry_id;
3859  	u8 reg_id;
3860  
3861  	*num_failing_rules = 0;
3862  
3863  	for (i = 0; i < num_input_rules; i++) {
3864  		const struct dbg_idle_chk_cond_reg *cond_regs;
3865  		const struct dbg_idle_chk_rule *rule;
3866  		const union dbg_idle_chk_reg *regs;
3867  		u16 num_reg_entries = 1;
3868  		bool check_rule = true;
3869  		const u32 *imm_values;
3870  
3871  		rule = &input_rules[i];
3872  		regs = (const union dbg_idle_chk_reg *)
3873  			p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3874  			rule->reg_offset;
3875  		cond_regs = &regs[0].cond_reg;
3876  		imm_values =
3877  		    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
3878  		    rule->imm_offset;
3879  
3880  		/* Check if all condition register blocks are out of reset, and
3881  		 * find maximal number of entries (all condition registers that
3882  		 * are memories must have the same size, which is > 1).
3883  		 */
3884  		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
3885  		     reg_id++) {
3886  			u32 block_id =
3887  				GET_FIELD(cond_regs[reg_id].data,
3888  					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
3889  
3890  			if (block_id >= MAX_BLOCK_ID) {
3891  				DP_NOTICE(p_hwfn, "Invalid block_id\n");
3892  				return 0;
3893  			}
3894  
3895  			check_rule = !dev_data->block_in_reset[block_id];
3896  			if (cond_regs[reg_id].num_entries > num_reg_entries)
3897  				num_reg_entries = cond_regs[reg_id].num_entries;
3898  		}
3899  
3900  		if (!check_rule && dump)
3901  			continue;
3902  
3903  		if (!dump) {
3904  			u32 entry_dump_size =
3905  				qed_idle_chk_dump_failure(p_hwfn,
3906  							  p_ptt,
3907  							  dump_buf + offset,
3908  							  false,
3909  							  rule->rule_id,
3910  							  rule,
3911  							  0,
3912  							  NULL);
3913  
3914  			offset += num_reg_entries * entry_dump_size;
3915  			(*num_failing_rules) += num_reg_entries;
3916  			continue;
3917  		}
3918  
3919  		/* Go over all register entries (number of entries is the same
3920  		 * for all condition registers).
3921  		 */
3922  		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3923  			u32 next_reg_offset = 0;
3924  
3925  			/* Read current entry of all condition registers */
3926  			for (reg_id = 0; reg_id < rule->num_cond_regs;
3927  			     reg_id++) {
3928  				const struct dbg_idle_chk_cond_reg *reg =
3929  					&cond_regs[reg_id];
3930  				u32 padded_entry_size, addr;
3931  				bool wide_bus;
3932  
3933  				/* Find GRC address (if it's a memory, the
3934  				 * address of the specific entry is calculated).
3935  				 */
3936  				addr = GET_FIELD(reg->data,
3937  						 DBG_IDLE_CHK_COND_REG_ADDRESS);
3938  				wide_bus =
3939  				    GET_FIELD(reg->data,
3940  					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
3941  				if (reg->num_entries > 1 ||
3942  				    reg->start_entry > 0) {
3943  					padded_entry_size =
3944  					   reg->entry_size > 1 ?
3945  					   roundup_pow_of_two(reg->entry_size) :
3946  					   1;
3947  					addr += (reg->start_entry + entry_id) *
3948  						padded_entry_size;
3949  				}
3950  
3951  				/* Read registers */
3952  				if (next_reg_offset + reg->entry_size >=
3953  				    IDLE_CHK_MAX_ENTRIES_SIZE) {
3954  					DP_NOTICE(p_hwfn,
3955  						  "idle check registers entry is too large\n");
3956  					return 0;
3957  				}
3958  
3959  				next_reg_offset +=
3960  				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
3961  							    cond_reg_values +
3962  							    next_reg_offset,
3963  							    dump, addr,
3964  							    reg->entry_size,
3965  							    wide_bus,
3966  							    SPLIT_TYPE_NONE, 0);
3967  			}
3968  
3969  			/* Call rule condition function.
3970  			 * If returns true, it's a failure.
3971  			 */
3972  			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
3973  							imm_values)) {
3974  				offset += qed_idle_chk_dump_failure(p_hwfn,
3975  							p_ptt,
3976  							dump_buf + offset,
3977  							dump,
3978  							rule->rule_id,
3979  							rule,
3980  							entry_id,
3981  							cond_reg_values);
3982  				(*num_failing_rules)++;
3983  			}
3984  		}
3985  	}
3986  
3987  	return offset;
3988  }
3989  
3990  /* Performs Idle Check Dump to the specified buffer.
3991   * Returns the dumped size in dwords.
3992   */
qed_idle_chk_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)3993  static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
3994  			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3995  {
3996  	struct virt_mem_desc *dbg_buf =
3997  	    &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
3998  	u32 num_failing_rules_offset, offset = 0,
3999  	    input_offset = 0, num_failing_rules = 0;
4000  
4001  	/* Dump global params  - 1 must match below amount of params */
4002  	offset += qed_dump_common_global_params(p_hwfn,
4003  						p_ptt,
4004  						dump_buf + offset, dump, 1);
4005  	offset += qed_dump_str_param(dump_buf + offset,
4006  				     dump, "dump-type", "idle-chk");
4007  
4008  	/* Dump idle check section header with a single parameter */
4009  	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4010  	num_failing_rules_offset = offset;
4011  	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4012  
4013  	while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
4014  		const struct dbg_idle_chk_cond_hdr *cond_hdr =
4015  		    (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr +
4016  		    input_offset++;
4017  		bool eval_mode, mode_match = true;
4018  		u32 curr_failing_rules;
4019  		u16 modes_buf_offset;
4020  
4021  		/* Check mode */
4022  		eval_mode = GET_FIELD(cond_hdr->mode.data,
4023  				      DBG_MODE_HDR_EVAL_MODE) > 0;
4024  		if (eval_mode) {
4025  			modes_buf_offset =
4026  				GET_FIELD(cond_hdr->mode.data,
4027  					  DBG_MODE_HDR_MODES_BUF_OFFSET);
4028  			mode_match = qed_is_mode_match(p_hwfn,
4029  						       &modes_buf_offset);
4030  		}
4031  
4032  		if (mode_match) {
4033  			const struct dbg_idle_chk_rule *rule =
4034  			    (const struct dbg_idle_chk_rule *)((u32 *)
4035  							       dbg_buf->ptr
4036  							       + input_offset);
4037  			u32 num_input_rules =
4038  				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS;
4039  			offset +=
4040  			    qed_idle_chk_dump_rule_entries(p_hwfn,
4041  							   p_ptt,
4042  							   dump_buf +
4043  							   offset,
4044  							   dump,
4045  							   rule,
4046  							   num_input_rules,
4047  							   &curr_failing_rules);
4048  			num_failing_rules += curr_failing_rules;
4049  		}
4050  
4051  		input_offset += cond_hdr->data_size;
4052  	}
4053  
4054  	/* Overwrite num_rules parameter */
4055  	if (dump)
4056  		qed_dump_num_param(dump_buf + num_failing_rules_offset,
4057  				   dump, "num_rules", num_failing_rules);
4058  
4059  	/* Dump last section */
4060  	offset += qed_dump_last_section(dump_buf, offset, dump);
4061  
4062  	return offset;
4063  }
4064  
4065  /* Get info on the MCP Trace data in the scratchpad:
4066   * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4067   * - trace_data_size (OUT): trace data size in bytes (without the header)
4068   */
qed_mcp_trace_get_data_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * trace_data_grc_addr,u32 * trace_data_size)4069  static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4070  						   struct qed_ptt *p_ptt,
4071  						   u32 *trace_data_grc_addr,
4072  						   u32 *trace_data_size)
4073  {
4074  	u32 spad_trace_offsize, signature;
4075  
4076  	/* Read trace section offsize structure from MCP scratchpad */
4077  	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4078  
4079  	/* Extract trace section address from offsize (in scratchpad) */
4080  	*trace_data_grc_addr =
4081  		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4082  
4083  	/* Read signature from MCP trace section */
4084  	signature = qed_rd(p_hwfn, p_ptt,
4085  			   *trace_data_grc_addr +
4086  			   offsetof(struct mcp_trace, signature));
4087  
4088  	if (signature != MFW_TRACE_SIGNATURE)
4089  		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4090  
4091  	/* Read trace size from MCP trace section */
4092  	*trace_data_size = qed_rd(p_hwfn,
4093  				  p_ptt,
4094  				  *trace_data_grc_addr +
4095  				  offsetof(struct mcp_trace, size));
4096  
4097  	return DBG_STATUS_OK;
4098  }
4099  
4100  /* Reads MCP trace meta data image from NVRAM
4101   * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4102   * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4103   *			      loaded from file).
4104   * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4105   */
qed_mcp_trace_get_meta_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 trace_data_size_bytes,u32 * running_bundle_id,u32 * trace_meta_offset,u32 * trace_meta_size)4106  static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4107  						   struct qed_ptt *p_ptt,
4108  						   u32 trace_data_size_bytes,
4109  						   u32 *running_bundle_id,
4110  						   u32 *trace_meta_offset,
4111  						   u32 *trace_meta_size)
4112  {
4113  	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4114  
4115  	/* Read MCP trace section offsize structure from MCP scratchpad */
4116  	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4117  
4118  	/* Find running bundle ID */
4119  	running_mfw_addr =
4120  		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4121  		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4122  	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4123  	if (*running_bundle_id > 1)
4124  		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4125  
4126  	/* Find image in NVRAM */
4127  	nvram_image_type =
4128  	    (*running_bundle_id ==
4129  	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4130  	return qed_find_nvram_image(p_hwfn,
4131  				    p_ptt,
4132  				    nvram_image_type,
4133  				    trace_meta_offset,
4134  				    trace_meta_size,
4135  				    true);
4136  }
4137  
4138  /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
qed_mcp_trace_read_meta(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 nvram_offset_in_bytes,u32 size_in_bytes,u32 * buf)4139  static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4140  					       struct qed_ptt *p_ptt,
4141  					       u32 nvram_offset_in_bytes,
4142  					       u32 size_in_bytes, u32 *buf)
4143  {
4144  	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4145  	enum dbg_status status;
4146  	u32 signature;
4147  
4148  	/* Read meta data from NVRAM */
4149  	status = qed_nvram_read(p_hwfn,
4150  				p_ptt,
4151  				nvram_offset_in_bytes,
4152  				size_in_bytes,
4153  				buf,
4154  				true);
4155  	if (status != DBG_STATUS_OK)
4156  		return status;
4157  
4158  	/* Extract and check first signature */
4159  	signature = qed_read_unaligned_dword(byte_buf);
4160  	byte_buf += sizeof(signature);
4161  	if (signature != NVM_MAGIC_VALUE)
4162  		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4163  
4164  	/* Extract number of modules */
4165  	modules_num = *(byte_buf++);
4166  
4167  	/* Skip all modules */
4168  	for (i = 0; i < modules_num; i++) {
4169  		module_len = *(byte_buf++);
4170  		byte_buf += module_len;
4171  	}
4172  
4173  	/* Extract and check second signature */
4174  	signature = qed_read_unaligned_dword(byte_buf);
4175  	byte_buf += sizeof(signature);
4176  	if (signature != NVM_MAGIC_VALUE)
4177  		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4178  
4179  	return DBG_STATUS_OK;
4180  }
4181  
4182  /* Dump MCP Trace */
qed_mcp_trace_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4183  static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4184  					  struct qed_ptt *p_ptt,
4185  					  u32 *dump_buf,
4186  					  bool dump, u32 *num_dumped_dwords)
4187  {
4188  	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4189  	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4190  	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4191  	enum dbg_status status;
4192  	int halted = 0;
4193  	bool use_mfw;
4194  
4195  	*num_dumped_dwords = 0;
4196  
4197  	use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4198  
4199  	/* Get trace data info */
4200  	status = qed_mcp_trace_get_data_info(p_hwfn,
4201  					     p_ptt,
4202  					     &trace_data_grc_addr,
4203  					     &trace_data_size_bytes);
4204  	if (status != DBG_STATUS_OK)
4205  		return status;
4206  
4207  	/* Dump global params */
4208  	offset += qed_dump_common_global_params(p_hwfn,
4209  						p_ptt,
4210  						dump_buf + offset, dump, 1);
4211  	offset += qed_dump_str_param(dump_buf + offset,
4212  				     dump, "dump-type", "mcp-trace");
4213  
4214  	/* Halt MCP while reading from scratchpad so the read data will be
4215  	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4216  	 * risk that it may be corrupt.
4217  	 */
4218  	if (dump && use_mfw) {
4219  		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4220  		if (!halted)
4221  			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4222  	}
4223  
4224  	/* Find trace data size */
4225  	trace_data_size_dwords =
4226  	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4227  			 BYTES_IN_DWORD);
4228  
4229  	/* Dump trace data section header and param */
4230  	offset += qed_dump_section_hdr(dump_buf + offset,
4231  				       dump, "mcp_trace_data", 1);
4232  	offset += qed_dump_num_param(dump_buf + offset,
4233  				     dump, "size", trace_data_size_dwords);
4234  
4235  	/* Read trace data from scratchpad into dump buffer */
4236  	offset += qed_grc_dump_addr_range(p_hwfn,
4237  					  p_ptt,
4238  					  dump_buf + offset,
4239  					  dump,
4240  					  BYTES_TO_DWORDS(trace_data_grc_addr),
4241  					  trace_data_size_dwords, false,
4242  					  SPLIT_TYPE_NONE, 0);
4243  
4244  	/* Resume MCP (only if halt succeeded) */
4245  	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4246  		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4247  
4248  	/* Dump trace meta section header */
4249  	offset += qed_dump_section_hdr(dump_buf + offset,
4250  				       dump, "mcp_trace_meta", 1);
4251  
4252  	/* If MCP Trace meta size parameter was set, use it.
4253  	 * Otherwise, read trace meta.
4254  	 * trace_meta_size_bytes is dword-aligned.
4255  	 */
4256  	trace_meta_size_bytes =
4257  		qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4258  	if ((!trace_meta_size_bytes || dump) && use_mfw)
4259  		status = qed_mcp_trace_get_meta_info(p_hwfn,
4260  						     p_ptt,
4261  						     trace_data_size_bytes,
4262  						     &running_bundle_id,
4263  						     &trace_meta_offset_bytes,
4264  						     &trace_meta_size_bytes);
4265  	if (status == DBG_STATUS_OK)
4266  		trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4267  
4268  	/* Dump trace meta size param */
4269  	offset += qed_dump_num_param(dump_buf + offset,
4270  				     dump, "size", trace_meta_size_dwords);
4271  
4272  	/* Read trace meta image into dump buffer */
4273  	if (dump && trace_meta_size_dwords)
4274  		status = qed_mcp_trace_read_meta(p_hwfn,
4275  						 p_ptt,
4276  						 trace_meta_offset_bytes,
4277  						 trace_meta_size_bytes,
4278  						 dump_buf + offset);
4279  	if (status == DBG_STATUS_OK)
4280  		offset += trace_meta_size_dwords;
4281  
4282  	/* Dump last section */
4283  	offset += qed_dump_last_section(dump_buf, offset, dump);
4284  
4285  	*num_dumped_dwords = offset;
4286  
4287  	/* If no mcp access, indicate that the dump doesn't contain the meta
4288  	 * data from NVRAM.
4289  	 */
4290  	return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4291  }
4292  
4293  /* Dump GRC FIFO */
qed_reg_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4294  static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4295  					 struct qed_ptt *p_ptt,
4296  					 u32 *dump_buf,
4297  					 bool dump, u32 *num_dumped_dwords)
4298  {
4299  	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4300  	bool fifo_has_data;
4301  
4302  	*num_dumped_dwords = 0;
4303  
4304  	/* Dump global params */
4305  	offset += qed_dump_common_global_params(p_hwfn,
4306  						p_ptt,
4307  						dump_buf + offset, dump, 1);
4308  	offset += qed_dump_str_param(dump_buf + offset,
4309  				     dump, "dump-type", "reg-fifo");
4310  
4311  	/* Dump fifo data section header and param. The size param is 0 for
4312  	 * now, and is overwritten after reading the FIFO.
4313  	 */
4314  	offset += qed_dump_section_hdr(dump_buf + offset,
4315  				       dump, "reg_fifo_data", 1);
4316  	size_param_offset = offset;
4317  	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4318  
4319  	if (!dump) {
4320  		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4321  		 * test how much data is available, except for reading it.
4322  		 */
4323  		offset += REG_FIFO_DEPTH_DWORDS;
4324  		goto out;
4325  	}
4326  
4327  	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4328  			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4329  
4330  	/* Pull available data from fifo. Use DMAE since this is widebus memory
4331  	 * and must be accessed atomically. Test for dwords_read not passing
4332  	 * buffer size since more entries could be added to the buffer as we are
4333  	 * emptying it.
4334  	 */
4335  	addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4336  	len = REG_FIFO_ELEMENT_DWORDS;
4337  	for (dwords_read = 0;
4338  	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4339  	     dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4340  		offset += qed_grc_dump_addr_range(p_hwfn,
4341  						  p_ptt,
4342  						  dump_buf + offset,
4343  						  true,
4344  						  addr,
4345  						  len,
4346  						  true, SPLIT_TYPE_NONE,
4347  						  0);
4348  		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4349  				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4350  	}
4351  
4352  	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4353  			   dwords_read);
4354  out:
4355  	/* Dump last section */
4356  	offset += qed_dump_last_section(dump_buf, offset, dump);
4357  
4358  	*num_dumped_dwords = offset;
4359  
4360  	return DBG_STATUS_OK;
4361  }
4362  
4363  /* Dump IGU FIFO */
qed_igu_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4364  static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4365  					 struct qed_ptt *p_ptt,
4366  					 u32 *dump_buf,
4367  					 bool dump, u32 *num_dumped_dwords)
4368  {
4369  	u32 dwords_read, size_param_offset, offset = 0, addr, len;
4370  	bool fifo_has_data;
4371  
4372  	*num_dumped_dwords = 0;
4373  
4374  	/* Dump global params */
4375  	offset += qed_dump_common_global_params(p_hwfn,
4376  						p_ptt,
4377  						dump_buf + offset, dump, 1);
4378  	offset += qed_dump_str_param(dump_buf + offset,
4379  				     dump, "dump-type", "igu-fifo");
4380  
4381  	/* Dump fifo data section header and param. The size param is 0 for
4382  	 * now, and is overwritten after reading the FIFO.
4383  	 */
4384  	offset += qed_dump_section_hdr(dump_buf + offset,
4385  				       dump, "igu_fifo_data", 1);
4386  	size_param_offset = offset;
4387  	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4388  
4389  	if (!dump) {
4390  		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4391  		 * test how much data is available, except for reading it.
4392  		 */
4393  		offset += IGU_FIFO_DEPTH_DWORDS;
4394  		goto out;
4395  	}
4396  
4397  	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4398  			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4399  
4400  	/* Pull available data from fifo. Use DMAE since this is widebus memory
4401  	 * and must be accessed atomically. Test for dwords_read not passing
4402  	 * buffer size since more entries could be added to the buffer as we are
4403  	 * emptying it.
4404  	 */
4405  	addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4406  	len = IGU_FIFO_ELEMENT_DWORDS;
4407  	for (dwords_read = 0;
4408  	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4409  	     dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4410  		offset += qed_grc_dump_addr_range(p_hwfn,
4411  						  p_ptt,
4412  						  dump_buf + offset,
4413  						  true,
4414  						  addr,
4415  						  len,
4416  						  true, SPLIT_TYPE_NONE,
4417  						  0);
4418  		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4419  				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4420  	}
4421  
4422  	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4423  			   dwords_read);
4424  out:
4425  	/* Dump last section */
4426  	offset += qed_dump_last_section(dump_buf, offset, dump);
4427  
4428  	*num_dumped_dwords = offset;
4429  
4430  	return DBG_STATUS_OK;
4431  }
4432  
4433  /* Protection Override dump */
qed_protection_override_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 * num_dumped_dwords)4434  static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4435  						    struct qed_ptt *p_ptt,
4436  						    u32 *dump_buf,
4437  						    bool dump,
4438  						    u32 *num_dumped_dwords)
4439  {
4440  	u32 size_param_offset, override_window_dwords, offset = 0, addr;
4441  
4442  	*num_dumped_dwords = 0;
4443  
4444  	/* Dump global params */
4445  	offset += qed_dump_common_global_params(p_hwfn,
4446  						p_ptt,
4447  						dump_buf + offset, dump, 1);
4448  	offset += qed_dump_str_param(dump_buf + offset,
4449  				     dump, "dump-type", "protection-override");
4450  
4451  	/* Dump data section header and param. The size param is 0 for now,
4452  	 * and is overwritten after reading the data.
4453  	 */
4454  	offset += qed_dump_section_hdr(dump_buf + offset,
4455  				       dump, "protection_override_data", 1);
4456  	size_param_offset = offset;
4457  	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4458  
4459  	if (!dump) {
4460  		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4461  		goto out;
4462  	}
4463  
4464  	/* Add override window info to buffer */
4465  	override_window_dwords =
4466  		qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4467  		PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4468  	if (override_window_dwords) {
4469  		addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4470  		offset += qed_grc_dump_addr_range(p_hwfn,
4471  						  p_ptt,
4472  						  dump_buf + offset,
4473  						  true,
4474  						  addr,
4475  						  override_window_dwords,
4476  						  true, SPLIT_TYPE_NONE, 0);
4477  		qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4478  				   override_window_dwords);
4479  	}
4480  out:
4481  	/* Dump last section */
4482  	offset += qed_dump_last_section(dump_buf, offset, dump);
4483  
4484  	*num_dumped_dwords = offset;
4485  
4486  	return DBG_STATUS_OK;
4487  }
4488  
4489  /* Performs FW Asserts Dump to the specified buffer.
4490   * Returns the dumped size in dwords.
4491   */
qed_fw_asserts_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump)4492  static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4493  			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4494  {
4495  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4496  	struct fw_asserts_ram_section *asserts;
4497  	char storm_letter_str[2] = "?";
4498  	struct fw_info fw_info;
4499  	u32 offset = 0;
4500  	u8 storm_id;
4501  
4502  	/* Dump global params */
4503  	offset += qed_dump_common_global_params(p_hwfn,
4504  						p_ptt,
4505  						dump_buf + offset, dump, 1);
4506  	offset += qed_dump_str_param(dump_buf + offset,
4507  				     dump, "dump-type", "fw-asserts");
4508  
4509  	/* Find Storm dump size */
4510  	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4511  		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4512  		struct storm_defs *storm = &s_storm_defs[storm_id];
4513  		u32 last_list_idx, addr;
4514  
4515  		if (dev_data->block_in_reset[storm->sem_block_id])
4516  			continue;
4517  
4518  		/* Read FW info for the current Storm */
4519  		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4520  
4521  		asserts = &fw_info.fw_asserts_section;
4522  
4523  		/* Dump FW Asserts section header and params */
4524  		storm_letter_str[0] = storm->letter;
4525  		offset += qed_dump_section_hdr(dump_buf + offset,
4526  					       dump, "fw_asserts", 2);
4527  		offset += qed_dump_str_param(dump_buf + offset,
4528  					     dump, "storm", storm_letter_str);
4529  		offset += qed_dump_num_param(dump_buf + offset,
4530  					     dump,
4531  					     "size",
4532  					     asserts->list_element_dword_size);
4533  
4534  		/* Read and dump FW Asserts data */
4535  		if (!dump) {
4536  			offset += asserts->list_element_dword_size;
4537  			continue;
4538  		}
4539  
4540  		addr = le16_to_cpu(asserts->section_ram_line_offset);
4541  		fw_asserts_section_addr = storm->sem_fast_mem_addr +
4542  					  SEM_FAST_REG_INT_RAM +
4543  					  RAM_LINES_TO_BYTES(addr);
4544  
4545  		next_list_idx_addr = fw_asserts_section_addr +
4546  			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4547  		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4548  		last_list_idx = (next_list_idx > 0 ?
4549  				 next_list_idx :
4550  				 asserts->list_num_elements) - 1;
4551  		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4552  		       asserts->list_dword_offset +
4553  		       last_list_idx * asserts->list_element_dword_size;
4554  		offset +=
4555  		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4556  					    dump_buf + offset,
4557  					    dump, addr,
4558  					    asserts->list_element_dword_size,
4559  						  false, SPLIT_TYPE_NONE, 0);
4560  	}
4561  
4562  	/* Dump last section */
4563  	offset += qed_dump_last_section(dump_buf, offset, dump);
4564  
4565  	return offset;
4566  }
4567  
4568  /* Dumps the specified ILT pages to the specified buffer.
4569   * Returns the dumped size in dwords.
4570   */
qed_ilt_dump_pages_range(u32 * dump_buf,u32 * given_offset,bool * dump,u32 start_page_id,u32 num_pages,struct phys_mem_desc * ilt_pages,bool dump_page_ids,u32 buf_size_in_dwords,u32 * given_actual_dump_size_in_dwords)4571  static u32 qed_ilt_dump_pages_range(u32 *dump_buf, u32 *given_offset,
4572  				    bool *dump, u32 start_page_id,
4573  				    u32 num_pages,
4574  				    struct phys_mem_desc *ilt_pages,
4575  				    bool dump_page_ids, u32 buf_size_in_dwords,
4576  				    u32 *given_actual_dump_size_in_dwords)
4577  {
4578  	u32 actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
4579  	u32 page_id, end_page_id, offset = *given_offset;
4580  	struct phys_mem_desc *mem_desc = NULL;
4581  	bool continue_dump = *dump;
4582  	u32 partial_page_size = 0;
4583  
4584  	if (num_pages == 0)
4585  		return offset;
4586  
4587  	end_page_id = start_page_id + num_pages - 1;
4588  
4589  	for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
4590  		mem_desc = &ilt_pages[page_id];
4591  		if (!ilt_pages[page_id].virt_addr)
4592  			continue;
4593  
4594  		if (dump_page_ids) {
4595  			/* Copy page ID to dump buffer
4596  			 * (if dump is needed and buffer is not full)
4597  			 */
4598  			if ((continue_dump) &&
4599  			    (offset + 1 > buf_size_in_dwords)) {
4600  				continue_dump = false;
4601  				actual_dump_size_in_dwords = offset;
4602  			}
4603  			if (continue_dump)
4604  				*(dump_buf + offset) = page_id;
4605  			offset++;
4606  		} else {
4607  			/* Copy page memory to dump buffer */
4608  			if ((continue_dump) &&
4609  			    (offset + BYTES_TO_DWORDS(mem_desc->size) >
4610  			     buf_size_in_dwords)) {
4611  				if (offset + BYTES_TO_DWORDS(mem_desc->size) >
4612  				    buf_size_in_dwords) {
4613  					partial_page_size =
4614  					    buf_size_in_dwords - offset;
4615  					memcpy(dump_buf + offset,
4616  					       mem_desc->virt_addr,
4617  					       partial_page_size);
4618  					continue_dump = false;
4619  					actual_dump_size_in_dwords =
4620  					    offset + partial_page_size;
4621  				}
4622  			}
4623  
4624  			if (continue_dump)
4625  				memcpy(dump_buf + offset,
4626  				       mem_desc->virt_addr, mem_desc->size);
4627  			offset += BYTES_TO_DWORDS(mem_desc->size);
4628  		}
4629  	}
4630  
4631  	*dump = continue_dump;
4632  	*given_offset = offset;
4633  	*given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
4634  
4635  	return offset;
4636  }
4637  
4638  /* Dumps a section containing the dumped ILT pages.
4639   * Returns the dumped size in dwords.
4640   */
qed_ilt_dump_pages_section(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 * given_offset,bool * dump,u32 valid_conn_pf_pages,u32 valid_conn_vf_pages,struct phys_mem_desc * ilt_pages,bool dump_page_ids,u32 buf_size_in_dwords,u32 * given_actual_dump_size_in_dwords)4641  static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
4642  				      u32 *dump_buf,
4643  				      u32 *given_offset,
4644  				      bool *dump,
4645  				      u32 valid_conn_pf_pages,
4646  				      u32 valid_conn_vf_pages,
4647  				      struct phys_mem_desc *ilt_pages,
4648  				      bool dump_page_ids,
4649  				      u32 buf_size_in_dwords,
4650  				      u32 *given_actual_dump_size_in_dwords)
4651  {
4652  	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4653  	u32 pf_start_line, start_page_id, offset = *given_offset;
4654  	u32 cdut_pf_init_pages, cdut_vf_init_pages;
4655  	u32 cdut_pf_work_pages, cdut_vf_work_pages;
4656  	u32 base_data_offset, size_param_offset;
4657  	u32 src_pages;
4658  	u32 section_header_and_param_size;
4659  	u32 cdut_pf_pages, cdut_vf_pages;
4660  	u32 actual_dump_size_in_dwords;
4661  	bool continue_dump = *dump;
4662  	bool update_size = *dump;
4663  	const char *section_name;
4664  	u32 i;
4665  
4666  	actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
4667  	section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
4668  	cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
4669  	cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
4670  	cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn);
4671  	cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn);
4672  	cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
4673  	cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
4674  	pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
4675  	section_header_and_param_size = qed_dump_section_hdr(NULL,
4676  							     false,
4677  							     section_name,
4678  							     1) +
4679  	qed_dump_num_param(NULL, false, "size", 0);
4680  
4681  	if ((continue_dump) &&
4682  	    (offset + section_header_and_param_size > buf_size_in_dwords)) {
4683  		continue_dump = false;
4684  		update_size = false;
4685  		actual_dump_size_in_dwords = offset;
4686  	}
4687  
4688  	offset += qed_dump_section_hdr(dump_buf + offset,
4689  				       continue_dump, section_name, 1);
4690  
4691  	/* Dump size parameter (0 for now, overwritten with real size later) */
4692  	size_param_offset = offset;
4693  	offset += qed_dump_num_param(dump_buf + offset,
4694  				     continue_dump, "size", 0);
4695  	base_data_offset = offset;
4696  
4697  	/* CDUC pages are ordered as follows:
4698  	 * - PF pages - valid section (included in PF connection type mapping)
4699  	 * - PF pages - invalid section (not dumped)
4700  	 * - For each VF in the PF:
4701  	 *   - VF pages - valid section (included in VF connection type mapping)
4702  	 *   - VF pages - invalid section (not dumped)
4703  	 */
4704  	if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
4705  		/* Dump connection PF pages */
4706  		start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
4707  		qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
4708  					 start_page_id, valid_conn_pf_pages,
4709  					 ilt_pages, dump_page_ids,
4710  					 buf_size_in_dwords,
4711  					 &actual_dump_size_in_dwords);
4712  
4713  		/* Dump connection VF pages */
4714  		start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
4715  		for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4716  		     i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
4717  			qed_ilt_dump_pages_range(dump_buf, &offset,
4718  						 &continue_dump, start_page_id,
4719  						 valid_conn_vf_pages,
4720  						 ilt_pages, dump_page_ids,
4721  						 buf_size_in_dwords,
4722  						 &actual_dump_size_in_dwords);
4723  	}
4724  
4725  	/* CDUT pages are ordered as follows:
4726  	 * - PF init pages (not dumped)
4727  	 * - PF work pages
4728  	 * - For each VF in the PF:
4729  	 *   - VF init pages (not dumped)
4730  	 *   - VF work pages
4731  	 */
4732  	if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
4733  		/* Dump task PF pages */
4734  		start_page_id = clients[ILT_CLI_CDUT].first.val +
4735  		    cdut_pf_init_pages - pf_start_line;
4736  		qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
4737  					 start_page_id, cdut_pf_work_pages,
4738  					 ilt_pages, dump_page_ids,
4739  					 buf_size_in_dwords,
4740  					 &actual_dump_size_in_dwords);
4741  
4742  		/* Dump task VF pages */
4743  		start_page_id = clients[ILT_CLI_CDUT].first.val +
4744  		    cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
4745  		for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4746  		     i++, start_page_id += cdut_vf_pages)
4747  			qed_ilt_dump_pages_range(dump_buf, &offset,
4748  						 &continue_dump, start_page_id,
4749  						 cdut_vf_work_pages, ilt_pages,
4750  						 dump_page_ids,
4751  						 buf_size_in_dwords,
4752  						 &actual_dump_size_in_dwords);
4753  	}
4754  
4755  	/*Dump Searcher pages */
4756  	if (clients[ILT_CLI_SRC].active) {
4757  		start_page_id = clients[ILT_CLI_SRC].first.val - pf_start_line;
4758  		src_pages = clients[ILT_CLI_SRC].last.val -
4759  		    clients[ILT_CLI_SRC].first.val + 1;
4760  		qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
4761  					 start_page_id, src_pages, ilt_pages,
4762  					 dump_page_ids, buf_size_in_dwords,
4763  					 &actual_dump_size_in_dwords);
4764  	}
4765  
4766  	/* Overwrite size param */
4767  	if (update_size) {
4768  		u32 section_size = (*dump == continue_dump) ?
4769  		    offset - base_data_offset :
4770  		    actual_dump_size_in_dwords - base_data_offset;
4771  		if (section_size > 0)
4772  			qed_dump_num_param(dump_buf + size_param_offset,
4773  					   *dump, "size", section_size);
4774  		else if ((section_size == 0) && (*dump != continue_dump))
4775  			actual_dump_size_in_dwords -=
4776  			    section_header_and_param_size;
4777  	}
4778  
4779  	*dump = continue_dump;
4780  	*given_offset = offset;
4781  	*given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
4782  
4783  	return offset;
4784  }
4785  
4786  /* Dumps a section containing the global parameters.
4787   * Part of ilt dump process
4788   * Returns the dumped size in dwords.
4789   */
4790  static u32
qed_ilt_dump_dump_common_global_params(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,bool dump,u32 cduc_page_size,u32 conn_ctx_size,u32 cdut_page_size,u32 * full_dump_size_param_offset,u32 * actual_dump_size_param_offset)4791  qed_ilt_dump_dump_common_global_params(struct qed_hwfn *p_hwfn,
4792  				       struct qed_ptt *p_ptt,
4793  				       u32 *dump_buf,
4794  				       bool dump,
4795  				       u32 cduc_page_size,
4796  				       u32 conn_ctx_size,
4797  				       u32 cdut_page_size,
4798  				       u32 *full_dump_size_param_offset,
4799  				       u32 *actual_dump_size_param_offset)
4800  {
4801  	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4802  	u32 offset = 0;
4803  
4804  	offset += qed_dump_common_global_params(p_hwfn, p_ptt,
4805  						dump_buf + offset,
4806  						dump, 30);
4807  	offset += qed_dump_str_param(dump_buf + offset,
4808  				     dump,
4809  				     "dump-type", "ilt-dump");
4810  	offset += qed_dump_num_param(dump_buf + offset,
4811  				     dump,
4812  				     "cduc-page-size",
4813  				     cduc_page_size);
4814  	offset += qed_dump_num_param(dump_buf + offset,
4815  				     dump,
4816  				     "cduc-first-page-id",
4817  				     clients[ILT_CLI_CDUC].first.val);
4818  	offset += qed_dump_num_param(dump_buf + offset,
4819  				     dump,
4820  				     "cduc-last-page-id",
4821  				     clients[ILT_CLI_CDUC].last.val);
4822  	offset += qed_dump_num_param(dump_buf + offset,
4823  				     dump,
4824  				     "cduc-num-pf-pages",
4825  				     clients[ILT_CLI_CDUC].pf_total_lines);
4826  	offset += qed_dump_num_param(dump_buf + offset,
4827  				     dump,
4828  				     "cduc-num-vf-pages",
4829  				     clients[ILT_CLI_CDUC].vf_total_lines);
4830  	offset += qed_dump_num_param(dump_buf + offset,
4831  				     dump,
4832  				     "max-conn-ctx-size",
4833  				     conn_ctx_size);
4834  	offset += qed_dump_num_param(dump_buf + offset,
4835  				     dump,
4836  				     "cdut-page-size",
4837  				     cdut_page_size);
4838  	offset += qed_dump_num_param(dump_buf + offset,
4839  				     dump,
4840  				     "cdut-first-page-id",
4841  				     clients[ILT_CLI_CDUT].first.val);
4842  	offset += qed_dump_num_param(dump_buf + offset,
4843  				     dump,
4844  				     "cdut-last-page-id",
4845  				     clients[ILT_CLI_CDUT].last.val);
4846  	offset += qed_dump_num_param(dump_buf + offset,
4847  				     dump,
4848  				     "cdut-num-pf-init-pages",
4849  				     qed_get_cdut_num_pf_init_pages(p_hwfn));
4850  	offset += qed_dump_num_param(dump_buf + offset,
4851  				     dump,
4852  				     "cdut-num-vf-init-pages",
4853  				     qed_get_cdut_num_vf_init_pages(p_hwfn));
4854  	offset += qed_dump_num_param(dump_buf + offset,
4855  				     dump,
4856  				     "cdut-num-pf-work-pages",
4857  				     qed_get_cdut_num_pf_work_pages(p_hwfn));
4858  	offset += qed_dump_num_param(dump_buf + offset,
4859  				     dump,
4860  				     "cdut-num-vf-work-pages",
4861  				     qed_get_cdut_num_vf_work_pages(p_hwfn));
4862  	offset += qed_dump_num_param(dump_buf + offset,
4863  				     dump,
4864  				     "max-task-ctx-size",
4865  				     p_hwfn->p_cxt_mngr->task_ctx_size);
4866  	offset += qed_dump_num_param(dump_buf + offset,
4867  				     dump,
4868  				     "first-vf-id-in-pf",
4869  				     p_hwfn->p_cxt_mngr->first_vf_in_pf);
4870  	offset += qed_dump_num_param(dump_buf + offset,
4871  				     dump,
4872  				     "num-vfs-in-pf",
4873  				     p_hwfn->p_cxt_mngr->vf_count);
4874  	offset += qed_dump_num_param(dump_buf + offset,
4875  				     dump,
4876  				     "ptr-size-bytes",
4877  				     sizeof(void *));
4878  	offset += qed_dump_num_param(dump_buf + offset,
4879  				     dump,
4880  				     "pf-start-line",
4881  				     p_hwfn->p_cxt_mngr->pf_start_line);
4882  	offset += qed_dump_num_param(dump_buf + offset,
4883  				     dump,
4884  				     "page-mem-desc-size-dwords",
4885  				     PAGE_MEM_DESC_SIZE_DWORDS);
4886  	offset += qed_dump_num_param(dump_buf + offset,
4887  				     dump,
4888  				     "ilt-shadow-size",
4889  				     p_hwfn->p_cxt_mngr->ilt_shadow_size);
4890  
4891  	*full_dump_size_param_offset = offset;
4892  
4893  	offset += qed_dump_num_param(dump_buf + offset,
4894  				     dump, "dump-size-full", 0);
4895  
4896  	*actual_dump_size_param_offset = offset;
4897  
4898  	offset += qed_dump_num_param(dump_buf + offset,
4899  				     dump,
4900  				     "dump-size-actual", 0);
4901  	offset += qed_dump_num_param(dump_buf + offset,
4902  				     dump,
4903  				     "iscsi_task_pages",
4904  				     p_hwfn->p_cxt_mngr->iscsi_task_pages);
4905  	offset += qed_dump_num_param(dump_buf + offset,
4906  				     dump,
4907  				     "fcoe_task_pages",
4908  				     p_hwfn->p_cxt_mngr->fcoe_task_pages);
4909  	offset += qed_dump_num_param(dump_buf + offset,
4910  				     dump,
4911  				     "roce_task_pages",
4912  				     p_hwfn->p_cxt_mngr->roce_task_pages);
4913  	offset += qed_dump_num_param(dump_buf + offset,
4914  				     dump,
4915  				     "eth_task_pages",
4916  				     p_hwfn->p_cxt_mngr->eth_task_pages);
4917  	offset += qed_dump_num_param(dump_buf + offset,
4918  				      dump,
4919  				      "src-first-page-id",
4920  				      clients[ILT_CLI_SRC].first.val);
4921  	offset += qed_dump_num_param(dump_buf + offset,
4922  				     dump,
4923  				     "src-last-page-id",
4924  				     clients[ILT_CLI_SRC].last.val);
4925  	offset += qed_dump_num_param(dump_buf + offset,
4926  				     dump,
4927  				     "src-is-active",
4928  				     clients[ILT_CLI_SRC].active);
4929  
4930  	/* Additional/Less parameters require matching of number in call to
4931  	 * dump_common_global_params()
4932  	 */
4933  
4934  	return offset;
4935  }
4936  
4937  /* Dump section containing number of PF CIDs per connection type.
4938   * Part of ilt dump process.
4939   * Returns the dumped size in dwords.
4940   */
qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn * p_hwfn,u32 * dump_buf,bool dump,u32 * valid_conn_pf_cids)4941  static u32 qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn *p_hwfn,
4942  					 u32 *dump_buf,
4943  					 bool dump, u32 *valid_conn_pf_cids)
4944  {
4945  	u32 num_pf_cids = 0;
4946  	u32 offset = 0;
4947  	u8 conn_type;
4948  
4949  	offset += qed_dump_section_hdr(dump_buf + offset,
4950  				       dump, "num_pf_cids_per_conn_type", 1);
4951  	offset += qed_dump_num_param(dump_buf + offset,
4952  				     dump, "size", NUM_OF_CONNECTION_TYPES);
4953  	for (conn_type = 0, *valid_conn_pf_cids = 0;
4954  	     conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
4955  		num_pf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
4956  		if (dump)
4957  			*(dump_buf + offset) = num_pf_cids;
4958  		*valid_conn_pf_cids += num_pf_cids;
4959  	}
4960  
4961  	return offset;
4962  }
4963  
4964  /* Dump section containing number of VF CIDs per connection type
4965   * Part of ilt dump process.
4966   * Returns the dumped size in dwords.
4967   */
qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn * p_hwfn,u32 * dump_buf,bool dump,u32 * valid_conn_vf_cids)4968  static u32 qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn *p_hwfn,
4969  					 u32 *dump_buf,
4970  					 bool dump, u32 *valid_conn_vf_cids)
4971  {
4972  	u32 num_vf_cids = 0;
4973  	u32 offset = 0;
4974  	u8 conn_type;
4975  
4976  	offset += qed_dump_section_hdr(dump_buf + offset, dump,
4977  				       "num_vf_cids_per_conn_type", 1);
4978  	offset += qed_dump_num_param(dump_buf + offset,
4979  				     dump, "size", NUM_OF_CONNECTION_TYPES);
4980  	for (conn_type = 0, *valid_conn_vf_cids = 0;
4981  	     conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
4982  		num_vf_cids =
4983  		    p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
4984  		if (dump)
4985  			*(dump_buf + offset) = num_vf_cids;
4986  		*valid_conn_vf_cids += num_vf_cids;
4987  	}
4988  
4989  	return offset;
4990  }
4991  
4992  /* Performs ILT Dump to the specified buffer.
4993   * buf_size_in_dwords - The dumped buffer size.
4994   * Returns the dumped size in dwords.
4995   */
qed_ilt_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,bool dump)4996  static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
4997  			struct qed_ptt *p_ptt,
4998  			u32 *dump_buf, u32 buf_size_in_dwords, bool dump)
4999  {
5000  #if ((!defined VMWARE) && (!defined UEFI))
5001  	struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
5002  #endif
5003  	u32 valid_conn_vf_cids = 0,
5004  	    valid_conn_vf_pages, offset = 0, real_dumped_size = 0;
5005  	u32 valid_conn_pf_cids = 0, valid_conn_pf_pages, num_pages;
5006  	u32 num_cids_per_page, conn_ctx_size;
5007  	u32 cduc_page_size, cdut_page_size;
5008  	u32 actual_dump_size_in_dwords = 0;
5009  	struct phys_mem_desc *ilt_pages;
5010  	u32 actul_dump_off = 0;
5011  	u32 last_section_size;
5012  	u32 full_dump_off = 0;
5013  	u32 section_size = 0;
5014  	bool continue_dump;
5015  	u32 page_id;
5016  
5017  	last_section_size = qed_dump_last_section(NULL, 0, false);
5018  	cduc_page_size = 1 <<
5019  	    (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
5020  	cdut_page_size = 1 <<
5021  	    (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
5022  	conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
5023  	num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
5024  	ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
5025  	continue_dump = dump;
5026  
5027  	/* if need to dump then save memory for the last section
5028  	 * (last section calculates CRC of dumped data)
5029  	 */
5030  	if (dump) {
5031  		if (buf_size_in_dwords >= last_section_size) {
5032  			buf_size_in_dwords -= last_section_size;
5033  		} else {
5034  			continue_dump = false;
5035  			actual_dump_size_in_dwords = offset;
5036  		}
5037  	}
5038  
5039  	/* Dump global params */
5040  
5041  	/* if need to dump then first check that there is enough memory
5042  	 * in dumped buffer for this section calculate the size of this
5043  	 * section without dumping. if there is not enough memory - then
5044  	 * stop the dumping.
5045  	 */
5046  	if (continue_dump) {
5047  		section_size =
5048  			qed_ilt_dump_dump_common_global_params(p_hwfn,
5049  							       p_ptt,
5050  							       NULL,
5051  							       false,
5052  							       cduc_page_size,
5053  							       conn_ctx_size,
5054  							       cdut_page_size,
5055  							       &full_dump_off,
5056  							       &actul_dump_off);
5057  		if (offset + section_size > buf_size_in_dwords) {
5058  			continue_dump = false;
5059  			actual_dump_size_in_dwords = offset;
5060  		}
5061  	}
5062  
5063  	offset += qed_ilt_dump_dump_common_global_params(p_hwfn,
5064  							 p_ptt,
5065  							 dump_buf + offset,
5066  							 continue_dump,
5067  							 cduc_page_size,
5068  							 conn_ctx_size,
5069  							 cdut_page_size,
5070  							 &full_dump_off,
5071  							 &actul_dump_off);
5072  
5073  	/* Dump section containing number of PF CIDs per connection type
5074  	 * If need to dump then first check that there is enough memory in
5075  	 * dumped buffer for this section.
5076  	 */
5077  	if (continue_dump) {
5078  		section_size =
5079  			qed_ilt_dump_dump_num_pf_cids(p_hwfn,
5080  						      NULL,
5081  						      false,
5082  						      &valid_conn_pf_cids);
5083  		if (offset + section_size > buf_size_in_dwords) {
5084  			continue_dump = false;
5085  			actual_dump_size_in_dwords = offset;
5086  		}
5087  	}
5088  
5089  	offset += qed_ilt_dump_dump_num_pf_cids(p_hwfn,
5090  						dump_buf + offset,
5091  						continue_dump,
5092  						&valid_conn_pf_cids);
5093  
5094  	/* Dump section containing number of VF CIDs per connection type
5095  	 * If need to dump then first check that there is enough memory in
5096  	 * dumped buffer for this section.
5097  	 */
5098  	if (continue_dump) {
5099  		section_size =
5100  			qed_ilt_dump_dump_num_vf_cids(p_hwfn,
5101  						      NULL,
5102  						      false,
5103  						      &valid_conn_vf_cids);
5104  		if (offset + section_size > buf_size_in_dwords) {
5105  			continue_dump = false;
5106  			actual_dump_size_in_dwords = offset;
5107  		}
5108  	}
5109  
5110  	offset += qed_ilt_dump_dump_num_vf_cids(p_hwfn,
5111  						dump_buf + offset,
5112  						continue_dump,
5113  						&valid_conn_vf_cids);
5114  
5115  	/* Dump section containing physical memory descriptors for each
5116  	 * ILT page.
5117  	 */
5118  	num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
5119  
5120  	/* If need to dump then first check that there is enough memory
5121  	 * in dumped buffer for the section header.
5122  	 */
5123  	if (continue_dump) {
5124  		section_size = qed_dump_section_hdr(NULL,
5125  						    false,
5126  						    "ilt_page_desc",
5127  						    1) +
5128  		    qed_dump_num_param(NULL,
5129  				       false,
5130  				       "size",
5131  				       num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
5132  		if (offset + section_size > buf_size_in_dwords) {
5133  			continue_dump = false;
5134  			actual_dump_size_in_dwords = offset;
5135  		}
5136  	}
5137  
5138  	offset += qed_dump_section_hdr(dump_buf + offset,
5139  				       continue_dump, "ilt_page_desc", 1);
5140  	offset += qed_dump_num_param(dump_buf + offset,
5141  				     continue_dump,
5142  				     "size",
5143  				     num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
5144  
5145  	/* Copy memory descriptors to dump buffer
5146  	 * If need to dump then dump till the dump buffer size
5147  	 */
5148  	if (continue_dump) {
5149  		for (page_id = 0; page_id < num_pages;
5150  		     page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) {
5151  			if (continue_dump &&
5152  			    (offset + PAGE_MEM_DESC_SIZE_DWORDS <=
5153  			     buf_size_in_dwords)) {
5154  				memcpy(dump_buf + offset,
5155  				       &ilt_pages[page_id],
5156  				       DWORDS_TO_BYTES
5157  				       (PAGE_MEM_DESC_SIZE_DWORDS));
5158  			} else {
5159  				if (continue_dump) {
5160  					continue_dump = false;
5161  					actual_dump_size_in_dwords = offset;
5162  				}
5163  			}
5164  		}
5165  	} else {
5166  		offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
5167  	}
5168  
5169  	valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
5170  					   num_cids_per_page);
5171  	valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
5172  					   num_cids_per_page);
5173  
5174  	/* Dump ILT pages IDs */
5175  	qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
5176  				   valid_conn_pf_pages, valid_conn_vf_pages,
5177  				   ilt_pages, true, buf_size_in_dwords,
5178  				   &actual_dump_size_in_dwords);
5179  
5180  	/* Dump ILT pages memory */
5181  	qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
5182  				   valid_conn_pf_pages, valid_conn_vf_pages,
5183  				   ilt_pages, false, buf_size_in_dwords,
5184  				   &actual_dump_size_in_dwords);
5185  
5186  	real_dumped_size =
5187  	    (continue_dump == dump) ? offset : actual_dump_size_in_dwords;
5188  	qed_dump_num_param(dump_buf + full_dump_off, dump,
5189  			   "full-dump-size", offset + last_section_size);
5190  	qed_dump_num_param(dump_buf + actul_dump_off,
5191  			   dump,
5192  			   "actual-dump-size",
5193  			   real_dumped_size + last_section_size);
5194  
5195  	/* Dump last section */
5196  	real_dumped_size += qed_dump_last_section(dump_buf,
5197  						  real_dumped_size, dump);
5198  
5199  	return real_dumped_size;
5200  }
5201  
5202  /***************************** Public Functions *******************************/
5203  
qed_dbg_set_bin_ptr(struct qed_hwfn * p_hwfn,const u8 * const bin_ptr)5204  enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
5205  				    const u8 * const bin_ptr)
5206  {
5207  	struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
5208  	u8 buf_id;
5209  
5210  	/* Convert binary data to debug arrays */
5211  	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
5212  		qed_set_dbg_bin_buf(p_hwfn,
5213  				    buf_id,
5214  				    (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
5215  				    buf_hdrs[buf_id].length);
5216  
5217  	return DBG_STATUS_OK;
5218  }
5219  
qed_dbg_set_app_ver(u32 ver)5220  static enum dbg_status qed_dbg_set_app_ver(u32 ver)
5221  {
5222  	if (ver < TOOLS_VERSION)
5223  		return DBG_STATUS_UNSUPPORTED_APP_VERSION;
5224  
5225  	s_app_ver = ver;
5226  
5227  	return DBG_STATUS_OK;
5228  }
5229  
qed_read_fw_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct fw_info * fw_info)5230  bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
5231  		      struct qed_ptt *p_ptt, struct fw_info *fw_info)
5232  {
5233  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5234  	u8 storm_id;
5235  
5236  	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
5237  		struct storm_defs *storm = &s_storm_defs[storm_id];
5238  
5239  		/* Skip Storm if it's in reset */
5240  		if (dev_data->block_in_reset[storm->sem_block_id])
5241  			continue;
5242  
5243  		/* Read FW info for the current Storm */
5244  		qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
5245  
5246  		return true;
5247  	}
5248  
5249  	return false;
5250  }
5251  
qed_dbg_grc_config(struct qed_hwfn * p_hwfn,enum dbg_grc_params grc_param,u32 val)5252  enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
5253  				   enum dbg_grc_params grc_param, u32 val)
5254  {
5255  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5256  	enum dbg_status status;
5257  	int i;
5258  
5259  	DP_VERBOSE(p_hwfn,
5260  		   QED_MSG_DEBUG,
5261  		   "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
5262  
5263  	status = qed_dbg_dev_init(p_hwfn);
5264  	if (status != DBG_STATUS_OK)
5265  		return status;
5266  
5267  	/* Initializes the GRC parameters (if not initialized). Needed in order
5268  	 * to set the default parameter values for the first time.
5269  	 */
5270  	qed_dbg_grc_init_params(p_hwfn);
5271  
5272  	if (grc_param >= MAX_DBG_GRC_PARAMS)
5273  		return DBG_STATUS_INVALID_ARGS;
5274  	if (val < s_grc_param_defs[grc_param].min ||
5275  	    val > s_grc_param_defs[grc_param].max)
5276  		return DBG_STATUS_INVALID_ARGS;
5277  
5278  	if (s_grc_param_defs[grc_param].is_preset) {
5279  		/* Preset param */
5280  
5281  		/* Disabling a preset is not allowed. Call
5282  		 * dbg_grc_set_params_default instead.
5283  		 */
5284  		if (!val)
5285  			return DBG_STATUS_INVALID_ARGS;
5286  
5287  		/* Update all params with the preset values */
5288  		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
5289  			struct grc_param_defs *defs = &s_grc_param_defs[i];
5290  			u32 preset_val;
5291  			/* Skip persistent params */
5292  			if (defs->is_persistent)
5293  				continue;
5294  
5295  			/* Find preset value */
5296  			if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
5297  				preset_val =
5298  				    defs->exclude_all_preset_val;
5299  			else if (grc_param == DBG_GRC_PARAM_CRASH)
5300  				preset_val =
5301  				    defs->crash_preset_val[dev_data->chip_id];
5302  			else
5303  				return DBG_STATUS_INVALID_ARGS;
5304  
5305  			qed_grc_set_param(p_hwfn, i, preset_val);
5306  		}
5307  	} else {
5308  		/* Regular param - set its value */
5309  		qed_grc_set_param(p_hwfn, grc_param, val);
5310  	}
5311  
5312  	return DBG_STATUS_OK;
5313  }
5314  
5315  /* Assign default GRC param values */
qed_dbg_grc_set_params_default(struct qed_hwfn * p_hwfn)5316  void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
5317  {
5318  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5319  	u32 i;
5320  
5321  	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5322  		if (!s_grc_param_defs[i].is_persistent)
5323  			dev_data->grc.param_val[i] =
5324  			    s_grc_param_defs[i].default_val[dev_data->chip_id];
5325  }
5326  
qed_dbg_grc_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5327  enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5328  					      struct qed_ptt *p_ptt,
5329  					      u32 *buf_size)
5330  {
5331  	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5332  
5333  	*buf_size = 0;
5334  
5335  	if (status != DBG_STATUS_OK)
5336  		return status;
5337  
5338  	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5339  	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5340  	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5341  	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5342  	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5343  		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5344  
5345  	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5346  }
5347  
qed_dbg_grc_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5348  enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5349  				 struct qed_ptt *p_ptt,
5350  				 u32 *dump_buf,
5351  				 u32 buf_size_in_dwords,
5352  				 u32 *num_dumped_dwords)
5353  {
5354  	u32 needed_buf_size_in_dwords;
5355  	enum dbg_status status;
5356  
5357  	*num_dumped_dwords = 0;
5358  
5359  	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5360  					       p_ptt,
5361  					       &needed_buf_size_in_dwords);
5362  	if (status != DBG_STATUS_OK)
5363  		return status;
5364  
5365  	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5366  		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5367  
5368  	/* Doesn't do anything, needed for compile time asserts */
5369  	qed_static_asserts();
5370  
5371  	/* GRC Dump */
5372  	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5373  
5374  	/* Revert GRC params to their default */
5375  	qed_dbg_grc_set_params_default(p_hwfn);
5376  
5377  	return status;
5378  }
5379  
qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5380  enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5381  						   struct qed_ptt *p_ptt,
5382  						   u32 *buf_size)
5383  {
5384  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5385  	struct idle_chk_data *idle_chk = &dev_data->idle_chk;
5386  	enum dbg_status status;
5387  
5388  	*buf_size = 0;
5389  
5390  	status = qed_dbg_dev_init(p_hwfn);
5391  	if (status != DBG_STATUS_OK)
5392  		return status;
5393  
5394  	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5395  	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5396  	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5397  	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5398  		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5399  
5400  	if (!idle_chk->buf_size_set) {
5401  		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5402  						       p_ptt, NULL, false);
5403  		idle_chk->buf_size_set = true;
5404  	}
5405  
5406  	*buf_size = idle_chk->buf_size;
5407  
5408  	return DBG_STATUS_OK;
5409  }
5410  
qed_dbg_idle_chk_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5411  enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5412  				      struct qed_ptt *p_ptt,
5413  				      u32 *dump_buf,
5414  				      u32 buf_size_in_dwords,
5415  				      u32 *num_dumped_dwords)
5416  {
5417  	u32 needed_buf_size_in_dwords;
5418  	enum dbg_status status;
5419  
5420  	*num_dumped_dwords = 0;
5421  
5422  	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5423  						    p_ptt,
5424  						    &needed_buf_size_in_dwords);
5425  	if (status != DBG_STATUS_OK)
5426  		return status;
5427  
5428  	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5429  		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5430  
5431  	/* Update reset state */
5432  	qed_grc_unreset_blocks(p_hwfn, p_ptt, true);
5433  	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5434  
5435  	/* Idle Check Dump */
5436  	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5437  
5438  	/* Revert GRC params to their default */
5439  	qed_dbg_grc_set_params_default(p_hwfn);
5440  
5441  	return DBG_STATUS_OK;
5442  }
5443  
qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5444  enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5445  						    struct qed_ptt *p_ptt,
5446  						    u32 *buf_size)
5447  {
5448  	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5449  
5450  	*buf_size = 0;
5451  
5452  	if (status != DBG_STATUS_OK)
5453  		return status;
5454  
5455  	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5456  }
5457  
qed_dbg_mcp_trace_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5458  enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5459  				       struct qed_ptt *p_ptt,
5460  				       u32 *dump_buf,
5461  				       u32 buf_size_in_dwords,
5462  				       u32 *num_dumped_dwords)
5463  {
5464  	u32 needed_buf_size_in_dwords;
5465  	enum dbg_status status;
5466  
5467  	status =
5468  		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5469  						    p_ptt,
5470  						    &needed_buf_size_in_dwords);
5471  	if (status != DBG_STATUS_OK && status !=
5472  	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5473  		return status;
5474  
5475  	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5476  		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5477  
5478  	/* Update reset state */
5479  	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5480  
5481  	/* Perform dump */
5482  	status = qed_mcp_trace_dump(p_hwfn,
5483  				    p_ptt, dump_buf, true, num_dumped_dwords);
5484  
5485  	/* Revert GRC params to their default */
5486  	qed_dbg_grc_set_params_default(p_hwfn);
5487  
5488  	return status;
5489  }
5490  
qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5491  enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5492  						   struct qed_ptt *p_ptt,
5493  						   u32 *buf_size)
5494  {
5495  	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5496  
5497  	*buf_size = 0;
5498  
5499  	if (status != DBG_STATUS_OK)
5500  		return status;
5501  
5502  	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5503  }
5504  
qed_dbg_reg_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5505  enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5506  				      struct qed_ptt *p_ptt,
5507  				      u32 *dump_buf,
5508  				      u32 buf_size_in_dwords,
5509  				      u32 *num_dumped_dwords)
5510  {
5511  	u32 needed_buf_size_in_dwords;
5512  	enum dbg_status status;
5513  
5514  	*num_dumped_dwords = 0;
5515  
5516  	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5517  						    p_ptt,
5518  						    &needed_buf_size_in_dwords);
5519  	if (status != DBG_STATUS_OK)
5520  		return status;
5521  
5522  	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5523  		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5524  
5525  	/* Update reset state */
5526  	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5527  
5528  	status = qed_reg_fifo_dump(p_hwfn,
5529  				   p_ptt, dump_buf, true, num_dumped_dwords);
5530  
5531  	/* Revert GRC params to their default */
5532  	qed_dbg_grc_set_params_default(p_hwfn);
5533  
5534  	return status;
5535  }
5536  
qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5537  enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5538  						   struct qed_ptt *p_ptt,
5539  						   u32 *buf_size)
5540  {
5541  	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5542  
5543  	*buf_size = 0;
5544  
5545  	if (status != DBG_STATUS_OK)
5546  		return status;
5547  
5548  	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5549  }
5550  
qed_dbg_igu_fifo_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5551  enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5552  				      struct qed_ptt *p_ptt,
5553  				      u32 *dump_buf,
5554  				      u32 buf_size_in_dwords,
5555  				      u32 *num_dumped_dwords)
5556  {
5557  	u32 needed_buf_size_in_dwords;
5558  	enum dbg_status status;
5559  
5560  	*num_dumped_dwords = 0;
5561  
5562  	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5563  						    p_ptt,
5564  						    &needed_buf_size_in_dwords);
5565  	if (status != DBG_STATUS_OK)
5566  		return status;
5567  
5568  	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5569  		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5570  
5571  	/* Update reset state */
5572  	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5573  
5574  	status = qed_igu_fifo_dump(p_hwfn,
5575  				   p_ptt, dump_buf, true, num_dumped_dwords);
5576  	/* Revert GRC params to their default */
5577  	qed_dbg_grc_set_params_default(p_hwfn);
5578  
5579  	return status;
5580  }
5581  
5582  enum dbg_status
qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5583  qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5584  					      struct qed_ptt *p_ptt,
5585  					      u32 *buf_size)
5586  {
5587  	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5588  
5589  	*buf_size = 0;
5590  
5591  	if (status != DBG_STATUS_OK)
5592  		return status;
5593  
5594  	return qed_protection_override_dump(p_hwfn,
5595  					    p_ptt, NULL, false, buf_size);
5596  }
5597  
qed_dbg_protection_override_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5598  enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5599  						 struct qed_ptt *p_ptt,
5600  						 u32 *dump_buf,
5601  						 u32 buf_size_in_dwords,
5602  						 u32 *num_dumped_dwords)
5603  {
5604  	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5605  	enum dbg_status status;
5606  
5607  	*num_dumped_dwords = 0;
5608  
5609  	status =
5610  		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5611  							      p_ptt,
5612  							      p_size);
5613  	if (status != DBG_STATUS_OK)
5614  		return status;
5615  
5616  	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5617  		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5618  
5619  	/* Update reset state */
5620  	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5621  
5622  	status = qed_protection_override_dump(p_hwfn,
5623  					      p_ptt,
5624  					      dump_buf,
5625  					      true, num_dumped_dwords);
5626  
5627  	/* Revert GRC params to their default */
5628  	qed_dbg_grc_set_params_default(p_hwfn);
5629  
5630  	return status;
5631  }
5632  
qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5633  enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5634  						     struct qed_ptt *p_ptt,
5635  						     u32 *buf_size)
5636  {
5637  	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5638  
5639  	*buf_size = 0;
5640  
5641  	if (status != DBG_STATUS_OK)
5642  		return status;
5643  
5644  	/* Update reset state */
5645  	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5646  
5647  	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5648  
5649  	return DBG_STATUS_OK;
5650  }
5651  
qed_dbg_fw_asserts_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5652  enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5653  					struct qed_ptt *p_ptt,
5654  					u32 *dump_buf,
5655  					u32 buf_size_in_dwords,
5656  					u32 *num_dumped_dwords)
5657  {
5658  	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5659  	enum dbg_status status;
5660  
5661  	*num_dumped_dwords = 0;
5662  
5663  	status =
5664  		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5665  						     p_ptt,
5666  						     p_size);
5667  	if (status != DBG_STATUS_OK)
5668  		return status;
5669  
5670  	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5671  		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5672  
5673  	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5674  
5675  	/* Revert GRC params to their default */
5676  	qed_dbg_grc_set_params_default(p_hwfn);
5677  
5678  	return DBG_STATUS_OK;
5679  }
5680  
qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * buf_size)5681  static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5682  						     struct qed_ptt *p_ptt,
5683  						     u32 *buf_size)
5684  {
5685  	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5686  
5687  	*buf_size = 0;
5688  
5689  	if (status != DBG_STATUS_OK)
5690  		return status;
5691  
5692  	*buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, 0, false);
5693  
5694  	return DBG_STATUS_OK;
5695  }
5696  
qed_dbg_ilt_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * dump_buf,u32 buf_size_in_dwords,u32 * num_dumped_dwords)5697  static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
5698  					struct qed_ptt *p_ptt,
5699  					u32 *dump_buf,
5700  					u32 buf_size_in_dwords,
5701  					u32 *num_dumped_dwords)
5702  {
5703  	*num_dumped_dwords = qed_ilt_dump(p_hwfn,
5704  					  p_ptt,
5705  					  dump_buf, buf_size_in_dwords, true);
5706  
5707  	/* Reveret GRC params to their default */
5708  	qed_dbg_grc_set_params_default(p_hwfn);
5709  
5710  	return DBG_STATUS_OK;
5711  }
5712  
qed_dbg_read_attn(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum block_id block_id,enum dbg_attn_type attn_type,bool clear_status,struct dbg_attn_block_result * results)5713  enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5714  				  struct qed_ptt *p_ptt,
5715  				  enum block_id block_id,
5716  				  enum dbg_attn_type attn_type,
5717  				  bool clear_status,
5718  				  struct dbg_attn_block_result *results)
5719  {
5720  	enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5721  	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5722  	const struct dbg_attn_reg *attn_reg_arr;
5723  
5724  	if (status != DBG_STATUS_OK)
5725  		return status;
5726  
5727  	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5728  	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5729  	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5730  		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5731  
5732  	attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
5733  					       block_id,
5734  					       attn_type, &num_attn_regs);
5735  
5736  	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5737  		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5738  		struct dbg_attn_reg_result *reg_result;
5739  		u32 sts_addr, sts_val;
5740  		u16 modes_buf_offset;
5741  		bool eval_mode;
5742  
5743  		/* Check mode */
5744  		eval_mode = GET_FIELD(reg_data->mode.data,
5745  				      DBG_MODE_HDR_EVAL_MODE) > 0;
5746  		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5747  					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5748  		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5749  			continue;
5750  
5751  		/* Mode match - read attention status register */
5752  		sts_addr = DWORDS_TO_BYTES(clear_status ?
5753  					   reg_data->sts_clr_address :
5754  					   GET_FIELD(reg_data->data,
5755  						     DBG_ATTN_REG_STS_ADDRESS));
5756  		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5757  		if (!sts_val)
5758  			continue;
5759  
5760  		/* Non-zero attention status - add to results */
5761  		reg_result = &results->reg_results[num_result_regs];
5762  		SET_FIELD(reg_result->data,
5763  			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5764  		SET_FIELD(reg_result->data,
5765  			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5766  			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5767  		reg_result->block_attn_offset = reg_data->block_attn_offset;
5768  		reg_result->sts_val = sts_val;
5769  		reg_result->mask_val = qed_rd(p_hwfn,
5770  					      p_ptt,
5771  					      DWORDS_TO_BYTES
5772  					      (reg_data->mask_address));
5773  		num_result_regs++;
5774  	}
5775  
5776  	results->block_id = (u8)block_id;
5777  	results->names_offset =
5778  	    qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset;
5779  	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5780  	SET_FIELD(results->data,
5781  		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5782  
5783  	return DBG_STATUS_OK;
5784  }
5785  
5786  /******************************* Data Types **********************************/
5787  
5788  /* REG fifo element */
5789  struct reg_fifo_element {
5790  	u64 data;
5791  #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5792  #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5793  #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5794  #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5795  #define REG_FIFO_ELEMENT_PF_SHIFT		24
5796  #define REG_FIFO_ELEMENT_PF_MASK		0xf
5797  #define REG_FIFO_ELEMENT_VF_SHIFT		28
5798  #define REG_FIFO_ELEMENT_VF_MASK		0xff
5799  #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5800  #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5801  #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5802  #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5803  #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5804  #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5805  #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5806  #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5807  #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5808  #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5809  };
5810  
5811  /* REG fifo error element */
5812  struct reg_fifo_err {
5813  	u32 err_code;
5814  	const char *err_msg;
5815  };
5816  
5817  /* IGU fifo element */
5818  struct igu_fifo_element {
5819  	u32 dword0;
5820  #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5821  #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5822  #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5823  #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5824  #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5825  #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5826  #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5827  #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5828  #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5829  #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5830  	u32 dword1;
5831  	u32 dword2;
5832  #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5833  #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5834  #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5835  #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5836  	u32 reserved;
5837  };
5838  
5839  struct igu_fifo_wr_data {
5840  	u32 data;
5841  #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5842  #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5843  #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5844  #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5845  #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5846  #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5847  #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5848  #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5849  #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5850  #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5851  #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5852  #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5853  };
5854  
5855  struct igu_fifo_cleanup_wr_data {
5856  	u32 data;
5857  #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5858  #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5859  #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5860  #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5861  #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5862  #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5863  #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5864  #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5865  };
5866  
5867  /* Protection override element */
5868  struct protection_override_element {
5869  	u64 data;
5870  #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5871  #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5872  #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5873  #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5874  #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5875  #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5876  #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5877  #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5878  #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5879  #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5880  #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5881  #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5882  };
5883  
5884  enum igu_fifo_sources {
5885  	IGU_SRC_PXP0,
5886  	IGU_SRC_PXP1,
5887  	IGU_SRC_PXP2,
5888  	IGU_SRC_PXP3,
5889  	IGU_SRC_PXP4,
5890  	IGU_SRC_PXP5,
5891  	IGU_SRC_PXP6,
5892  	IGU_SRC_PXP7,
5893  	IGU_SRC_CAU,
5894  	IGU_SRC_ATTN,
5895  	IGU_SRC_GRC
5896  };
5897  
5898  enum igu_fifo_addr_types {
5899  	IGU_ADDR_TYPE_MSIX_MEM,
5900  	IGU_ADDR_TYPE_WRITE_PBA,
5901  	IGU_ADDR_TYPE_WRITE_INT_ACK,
5902  	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5903  	IGU_ADDR_TYPE_READ_INT,
5904  	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5905  	IGU_ADDR_TYPE_RESERVED
5906  };
5907  
5908  struct igu_fifo_addr_data {
5909  	u16 start_addr;
5910  	u16 end_addr;
5911  	char *desc;
5912  	char *vf_desc;
5913  	enum igu_fifo_addr_types type;
5914  };
5915  
5916  /******************************** Constants **********************************/
5917  
5918  #define MAX_MSG_LEN				1024
5919  
5920  #define MCP_TRACE_MAX_MODULE_LEN		8
5921  #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5922  #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5923  	(MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET)
5924  
5925  #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5926  #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5927  
5928  #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5929  
5930  /***************************** Constant Arrays *******************************/
5931  
5932  /* Status string array */
5933  static const char * const s_status_str[] = {
5934  	/* DBG_STATUS_OK */
5935  	"Operation completed successfully",
5936  
5937  	/* DBG_STATUS_APP_VERSION_NOT_SET */
5938  	"Debug application version wasn't set",
5939  
5940  	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5941  	"Unsupported debug application version",
5942  
5943  	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5944  	"The debug block wasn't reset since the last recording",
5945  
5946  	/* DBG_STATUS_INVALID_ARGS */
5947  	"Invalid arguments",
5948  
5949  	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5950  	"The debug output was already set",
5951  
5952  	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5953  	"Invalid PCI buffer size",
5954  
5955  	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5956  	"PCI buffer allocation failed",
5957  
5958  	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5959  	"A PCI buffer wasn't allocated",
5960  
5961  	/* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
5962  	"The filter/trigger constraint dword offsets are not enabled for recording",
5963  	/* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
5964  	"No matching framing mode",
5965  
5966  	/* DBG_STATUS_VFC_READ_ERROR */
5967  	"Error reading from VFC",
5968  
5969  	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5970  	"The Storm was already enabled",
5971  
5972  	/* DBG_STATUS_STORM_NOT_ENABLED */
5973  	"The specified Storm wasn't enabled",
5974  
5975  	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5976  	"The block was already enabled",
5977  
5978  	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5979  	"The specified block wasn't enabled",
5980  
5981  	/* DBG_STATUS_NO_INPUT_ENABLED */
5982  	"No input was enabled for recording",
5983  
5984  	/* DBG_STATUS_NO_FILTER_TRIGGER_256B */
5985  	"Filters and triggers are not allowed in E4 256-bit mode",
5986  
5987  	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5988  	"The filter was already enabled",
5989  
5990  	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5991  	"The trigger was already enabled",
5992  
5993  	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5994  	"The trigger wasn't enabled",
5995  
5996  	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5997  	"A constraint can be added only after a filter was enabled or a trigger state was added",
5998  
5999  	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
6000  	"Cannot add more than 3 trigger states",
6001  
6002  	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
6003  	"Cannot add more than 4 constraints per filter or trigger state",
6004  
6005  	/* DBG_STATUS_RECORDING_NOT_STARTED */
6006  	"The recording wasn't started",
6007  
6008  	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
6009  	"A trigger was configured, but it didn't trigger",
6010  
6011  	/* DBG_STATUS_NO_DATA_RECORDED */
6012  	"No data was recorded",
6013  
6014  	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
6015  	"Dump buffer is too small",
6016  
6017  	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
6018  	"Dumped data is not aligned to chunks",
6019  
6020  	/* DBG_STATUS_UNKNOWN_CHIP */
6021  	"Unknown chip",
6022  
6023  	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
6024  	"Failed allocating virtual memory",
6025  
6026  	/* DBG_STATUS_BLOCK_IN_RESET */
6027  	"The input block is in reset",
6028  
6029  	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
6030  	"Invalid MCP trace signature found in NVRAM",
6031  
6032  	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
6033  	"Invalid bundle ID found in NVRAM",
6034  
6035  	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
6036  	"Failed getting NVRAM image",
6037  
6038  	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
6039  	"NVRAM image is not dword-aligned",
6040  
6041  	/* DBG_STATUS_NVRAM_READ_FAILED */
6042  	"Failed reading from NVRAM",
6043  
6044  	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
6045  	"Idle check parsing failed",
6046  
6047  	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
6048  	"MCP Trace data is corrupt",
6049  
6050  	/* DBG_STATUS_MCP_TRACE_NO_META */
6051  	"Dump doesn't contain meta data - it must be provided in image file",
6052  
6053  	/* DBG_STATUS_MCP_COULD_NOT_HALT */
6054  	"Failed to halt MCP",
6055  
6056  	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
6057  	"Failed to resume MCP after halt",
6058  
6059  	/* DBG_STATUS_RESERVED0 */
6060  	"",
6061  
6062  	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
6063  	"Failed to empty SEMI sync FIFO",
6064  
6065  	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
6066  	"IGU FIFO data is corrupt",
6067  
6068  	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
6069  	"MCP failed to mask parities",
6070  
6071  	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
6072  	"FW Asserts parsing failed",
6073  
6074  	/* DBG_STATUS_REG_FIFO_BAD_DATA */
6075  	"GRC FIFO data is corrupt",
6076  
6077  	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
6078  	"Protection Override data is corrupt",
6079  
6080  	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
6081  	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
6082  
6083  	/* DBG_STATUS_RESERVED1 */
6084  	"",
6085  
6086  	/* DBG_STATUS_NON_MATCHING_LINES */
6087  	"Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)",
6088  
6089  	/* DBG_STATUS_INSUFFICIENT_HW_IDS */
6090  	"Insufficient HW IDs. Try to record less Storms/blocks",
6091  
6092  	/* DBG_STATUS_DBG_BUS_IN_USE */
6093  	"The debug bus is in use",
6094  
6095  	/* DBG_STATUS_INVALID_STORM_DBG_MODE */
6096  	"The storm debug mode is not supported in the current chip",
6097  
6098  	/* DBG_STATUS_OTHER_ENGINE_BB_ONLY */
6099  	"Other engine is supported only in BB",
6100  
6101  	/* DBG_STATUS_FILTER_SINGLE_HW_ID */
6102  	"The configured filter mode requires a single Storm/block input",
6103  
6104  	/* DBG_STATUS_TRIGGER_SINGLE_HW_ID */
6105  	"The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
6106  
6107  	/* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
6108  	"When triggering on Storm data, the Storm to trigger on must be specified",
6109  
6110  	/* DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE */
6111  	"Failed to request MDUMP2 Offsize",
6112  
6113  	/* DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC */
6114  	"Expected CRC (part of the MDUMP2 data) is different than the calculated CRC over that data",
6115  
6116  	/* DBG_STATUS_MDUMP2_INVALID_SIGNATURE */
6117  	"Invalid Signature found at start of MDUMP2",
6118  
6119  	/* DBG_STATUS_MDUMP2_INVALID_LOG_SIZE */
6120  	"Invalid Log Size of MDUMP2",
6121  
6122  	/* DBG_STATUS_MDUMP2_INVALID_LOG_HDR */
6123  	"Invalid Log Header of MDUMP2",
6124  
6125  	/* DBG_STATUS_MDUMP2_INVALID_LOG_DATA */
6126  	"Invalid Log Data of MDUMP2",
6127  
6128  	/* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS */
6129  	"Could not extract number of ports from regval buf of MDUMP2",
6130  
6131  	/* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS */
6132  	"Could not extract MFW (link) status from regval buf of MDUMP2",
6133  
6134  	/* DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP */
6135  	"Could not display linkdump of MDUMP2",
6136  
6137  	/* DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG */
6138  	"Could not read PHY CFG of MDUMP2",
6139  
6140  	/* DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE */
6141  	"Could not read PLL Mode of MDUMP2",
6142  
6143  	/* DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS */
6144  	"Could not read TSCF/TSCE Lane Regs of MDUMP2",
6145  
6146  	/* DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF */
6147  	"Could not allocate MDUMP2 reg-val internal buffer"
6148  };
6149  
6150  /* Idle check severity names array */
6151  static const char * const s_idle_chk_severity_str[] = {
6152  	"Error",
6153  	"Error if no traffic",
6154  	"Warning"
6155  };
6156  
6157  /* MCP Trace level names array */
6158  static const char * const s_mcp_trace_level_str[] = {
6159  	"ERROR",
6160  	"TRACE",
6161  	"DEBUG"
6162  };
6163  
6164  /* Access type names array */
6165  static const char * const s_access_strs[] = {
6166  	"read",
6167  	"write"
6168  };
6169  
6170  /* Privilege type names array */
6171  static const char * const s_privilege_strs[] = {
6172  	"VF",
6173  	"PDA",
6174  	"HV",
6175  	"UA"
6176  };
6177  
6178  /* Protection type names array */
6179  static const char * const s_protection_strs[] = {
6180  	"(default)",
6181  	"(default)",
6182  	"(default)",
6183  	"(default)",
6184  	"override VF",
6185  	"override PDA",
6186  	"override HV",
6187  	"override UA"
6188  };
6189  
6190  /* Master type names array */
6191  static const char * const s_master_strs[] = {
6192  	"???",
6193  	"pxp",
6194  	"mcp",
6195  	"msdm",
6196  	"psdm",
6197  	"ysdm",
6198  	"usdm",
6199  	"tsdm",
6200  	"xsdm",
6201  	"dbu",
6202  	"dmae",
6203  	"jdap",
6204  	"???",
6205  	"???",
6206  	"???",
6207  	"???"
6208  };
6209  
6210  /* REG FIFO error messages array */
6211  static struct reg_fifo_err s_reg_fifo_errors[] = {
6212  	{1, "grc timeout"},
6213  	{2, "address doesn't belong to any block"},
6214  	{4, "reserved address in block or write to read-only address"},
6215  	{8, "privilege/protection mismatch"},
6216  	{16, "path isolation error"},
6217  	{17, "RSL error"}
6218  };
6219  
6220  /* IGU FIFO sources array */
6221  static const char * const s_igu_fifo_source_strs[] = {
6222  	"TSTORM",
6223  	"MSTORM",
6224  	"USTORM",
6225  	"XSTORM",
6226  	"YSTORM",
6227  	"PSTORM",
6228  	"PCIE",
6229  	"NIG_QM_PBF",
6230  	"CAU",
6231  	"ATTN",
6232  	"GRC",
6233  };
6234  
6235  /* IGU FIFO error messages */
6236  static const char * const s_igu_fifo_error_strs[] = {
6237  	"no error",
6238  	"length error",
6239  	"function disabled",
6240  	"VF sent command to attention address",
6241  	"host sent prod update command",
6242  	"read of during interrupt register while in MIMD mode",
6243  	"access to PXP BAR reserved address",
6244  	"producer update command to attention index",
6245  	"unknown error",
6246  	"SB index not valid",
6247  	"SB relative index and FID not found",
6248  	"FID not match",
6249  	"command with error flag asserted (PCI error or CAU discard)",
6250  	"VF sent cleanup and RF cleanup is disabled",
6251  	"cleanup command on type bigger than 4"
6252  };
6253  
6254  /* IGU FIFO address data */
6255  static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
6256  	{0x0, 0x101, "MSI-X Memory", NULL,
6257  	 IGU_ADDR_TYPE_MSIX_MEM},
6258  	{0x102, 0x1ff, "reserved", NULL,
6259  	 IGU_ADDR_TYPE_RESERVED},
6260  	{0x200, 0x200, "Write PBA[0:63]", NULL,
6261  	 IGU_ADDR_TYPE_WRITE_PBA},
6262  	{0x201, 0x201, "Write PBA[64:127]", "reserved",
6263  	 IGU_ADDR_TYPE_WRITE_PBA},
6264  	{0x202, 0x202, "Write PBA[128]", "reserved",
6265  	 IGU_ADDR_TYPE_WRITE_PBA},
6266  	{0x203, 0x3ff, "reserved", NULL,
6267  	 IGU_ADDR_TYPE_RESERVED},
6268  	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
6269  	 IGU_ADDR_TYPE_WRITE_INT_ACK},
6270  	{0x5f0, 0x5f0, "Attention bits update", NULL,
6271  	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6272  	{0x5f1, 0x5f1, "Attention bits set", NULL,
6273  	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6274  	{0x5f2, 0x5f2, "Attention bits clear", NULL,
6275  	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
6276  	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
6277  	 IGU_ADDR_TYPE_READ_INT},
6278  	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
6279  	 IGU_ADDR_TYPE_READ_INT},
6280  	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6281  	 IGU_ADDR_TYPE_READ_INT},
6282  	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6283  	 IGU_ADDR_TYPE_READ_INT},
6284  	{0x5f7, 0x5ff, "reserved", NULL,
6285  	 IGU_ADDR_TYPE_RESERVED},
6286  	{0x600, 0x7ff, "Producer update", NULL,
6287  	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6288  };
6289  
6290  /******************************** Variables **********************************/
6291  
6292  /* Temporary buffer, used for print size calculations */
6293  static char s_temp_buf[MAX_MSG_LEN];
6294  
6295  /**************************** Private Functions ******************************/
6296  
qed_user_static_asserts(void)6297  static void qed_user_static_asserts(void)
6298  {
6299  }
6300  
qed_cyclic_add(u32 a,u32 b,u32 size)6301  static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6302  {
6303  	return (a + b) % size;
6304  }
6305  
qed_cyclic_sub(u32 a,u32 b,u32 size)6306  static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6307  {
6308  	return (size + a - b) % size;
6309  }
6310  
6311  /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6312   * bytes) and returns them as a dword value. the specified buffer offset is
6313   * updated.
6314   */
qed_read_from_cyclic_buf(void * buf,u32 * offset,u32 buf_size,u8 num_bytes_to_read)6315  static u32 qed_read_from_cyclic_buf(void *buf,
6316  				    u32 *offset,
6317  				    u32 buf_size, u8 num_bytes_to_read)
6318  {
6319  	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6320  	u32 val = 0;
6321  
6322  	val_ptr = (u8 *)&val;
6323  
6324  	/* Assume running on a LITTLE ENDIAN and the buffer is network order
6325  	 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
6326  	 */
6327  	for (i = 0; i < num_bytes_to_read; i++) {
6328  		val_ptr[i] = bytes_buf[*offset];
6329  		*offset = qed_cyclic_add(*offset, 1, buf_size);
6330  	}
6331  
6332  	return val;
6333  }
6334  
6335  /* Reads and returns the next byte from the specified buffer.
6336   * The specified buffer offset is updated.
6337   */
qed_read_byte_from_buf(void * buf,u32 * offset)6338  static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6339  {
6340  	return ((u8 *)buf)[(*offset)++];
6341  }
6342  
6343  /* Reads and returns the next dword from the specified buffer.
6344   * The specified buffer offset is updated.
6345   */
qed_read_dword_from_buf(void * buf,u32 * offset)6346  static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6347  {
6348  	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6349  
6350  	*offset += 4;
6351  
6352  	return dword_val;
6353  }
6354  
6355  /* Reads the next string from the specified buffer, and copies it to the
6356   * specified pointer. The specified buffer offset is updated.
6357   */
qed_read_str_from_buf(void * buf,u32 * offset,u32 size,char * dest)6358  static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6359  {
6360  	const char *source_str = &((const char *)buf)[*offset];
6361  
6362  	strncpy(dest, source_str, size);
6363  	dest[size - 1] = '\0';
6364  	*offset += size;
6365  }
6366  
6367  /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6368   * If the specified buffer in NULL, a temporary buffer pointer is returned.
6369   */
qed_get_buf_ptr(void * buf,u32 offset)6370  static char *qed_get_buf_ptr(void *buf, u32 offset)
6371  {
6372  	return buf ? (char *)buf + offset : s_temp_buf;
6373  }
6374  
6375  /* Reads a param from the specified buffer. Returns the number of dwords read.
6376   * If the returned str_param is NULL, the param is numeric and its value is
6377   * returned in num_param.
6378   * Otheriwise, the param is a string and its pointer is returned in str_param.
6379   */
qed_read_param(u32 * dump_buf,const char ** param_name,const char ** param_str_val,u32 * param_num_val)6380  static u32 qed_read_param(u32 *dump_buf,
6381  			  const char **param_name,
6382  			  const char **param_str_val, u32 *param_num_val)
6383  {
6384  	char *char_buf = (char *)dump_buf;
6385  	size_t offset = 0;
6386  
6387  	/* Extract param name */
6388  	*param_name = char_buf;
6389  	offset += strlen(*param_name) + 1;
6390  
6391  	/* Check param type */
6392  	if (*(char_buf + offset++)) {
6393  		/* String param */
6394  		*param_str_val = char_buf + offset;
6395  		*param_num_val = 0;
6396  		offset += strlen(*param_str_val) + 1;
6397  		if (offset & 0x3)
6398  			offset += (4 - (offset & 0x3));
6399  	} else {
6400  		/* Numeric param */
6401  		*param_str_val = NULL;
6402  		if (offset & 0x3)
6403  			offset += (4 - (offset & 0x3));
6404  		*param_num_val = *(u32 *)(char_buf + offset);
6405  		offset += 4;
6406  	}
6407  
6408  	return (u32)offset / 4;
6409  }
6410  
6411  /* Reads a section header from the specified buffer.
6412   * Returns the number of dwords read.
6413   */
qed_read_section_hdr(u32 * dump_buf,const char ** section_name,u32 * num_section_params)6414  static u32 qed_read_section_hdr(u32 *dump_buf,
6415  				const char **section_name,
6416  				u32 *num_section_params)
6417  {
6418  	const char *param_str_val;
6419  
6420  	return qed_read_param(dump_buf,
6421  			      section_name, &param_str_val, num_section_params);
6422  }
6423  
6424  /* Reads section params from the specified buffer and prints them to the results
6425   * buffer. Returns the number of dwords read.
6426   */
qed_print_section_params(u32 * dump_buf,u32 num_section_params,char * results_buf,u32 * num_chars_printed)6427  static u32 qed_print_section_params(u32 *dump_buf,
6428  				    u32 num_section_params,
6429  				    char *results_buf, u32 *num_chars_printed)
6430  {
6431  	u32 i, dump_offset = 0, results_offset = 0;
6432  
6433  	for (i = 0; i < num_section_params; i++) {
6434  		const char *param_name, *param_str_val;
6435  		u32 param_num_val = 0;
6436  
6437  		dump_offset += qed_read_param(dump_buf + dump_offset,
6438  					      &param_name,
6439  					      &param_str_val, &param_num_val);
6440  
6441  		if (param_str_val)
6442  			results_offset +=
6443  				sprintf(qed_get_buf_ptr(results_buf,
6444  							results_offset),
6445  					"%s: %s\n", param_name, param_str_val);
6446  		else if (strcmp(param_name, "fw-timestamp"))
6447  			results_offset +=
6448  				sprintf(qed_get_buf_ptr(results_buf,
6449  							results_offset),
6450  					"%s: %d\n", param_name, param_num_val);
6451  	}
6452  
6453  	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6454  				  "\n");
6455  
6456  	*num_chars_printed = results_offset;
6457  
6458  	return dump_offset;
6459  }
6460  
6461  /* Returns the block name that matches the specified block ID,
6462   * or NULL if not found.
6463   */
qed_dbg_get_block_name(struct qed_hwfn * p_hwfn,enum block_id block_id)6464  static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn,
6465  					  enum block_id block_id)
6466  {
6467  	const struct dbg_block_user *block =
6468  	    (const struct dbg_block_user *)
6469  	    p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id;
6470  
6471  	return (const char *)block->name;
6472  }
6473  
qed_dbg_get_user_data(struct qed_hwfn * p_hwfn)6474  static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn
6475  							 *p_hwfn)
6476  {
6477  	return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6478  }
6479  
6480  /* Parses the idle check rules and returns the number of characters printed.
6481   * In case of parsing error, returns 0.
6482   */
qed_parse_idle_chk_dump_rules(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 * dump_buf_end,u32 num_rules,bool print_fw_idle_chk,char * results_buf,u32 * num_errors,u32 * num_warnings)6483  static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
6484  					 u32 *dump_buf,
6485  					 u32 *dump_buf_end,
6486  					 u32 num_rules,
6487  					 bool print_fw_idle_chk,
6488  					 char *results_buf,
6489  					 u32 *num_errors, u32 *num_warnings)
6490  {
6491  	/* Offset in results_buf in bytes */
6492  	u32 results_offset = 0;
6493  
6494  	u32 rule_idx;
6495  	u16 i, j;
6496  
6497  	*num_errors = 0;
6498  	*num_warnings = 0;
6499  
6500  	/* Go over dumped results */
6501  	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6502  	     rule_idx++) {
6503  		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6504  		struct dbg_idle_chk_result_hdr *hdr;
6505  		const char *parsing_str, *lsi_msg;
6506  		u32 parsing_str_offset;
6507  		bool has_fw_msg;
6508  		u8 curr_reg_id;
6509  
6510  		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6511  		rule_parsing_data =
6512  		    (const struct dbg_idle_chk_rule_parsing_data *)
6513  		    p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr +
6514  		    hdr->rule_id;
6515  		parsing_str_offset =
6516  		    GET_FIELD(rule_parsing_data->data,
6517  			      DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6518  		has_fw_msg =
6519  		    GET_FIELD(rule_parsing_data->data,
6520  			      DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6521  		parsing_str = (const char *)
6522  		    p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr +
6523  		    parsing_str_offset;
6524  		lsi_msg = parsing_str;
6525  		curr_reg_id = 0;
6526  
6527  		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6528  			return 0;
6529  
6530  		/* Skip rule header */
6531  		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6532  
6533  		/* Update errors/warnings count */
6534  		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6535  		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6536  			(*num_errors)++;
6537  		else
6538  			(*num_warnings)++;
6539  
6540  		/* Print rule severity */
6541  		results_offset +=
6542  		    sprintf(qed_get_buf_ptr(results_buf,
6543  					    results_offset), "%s: ",
6544  			    s_idle_chk_severity_str[hdr->severity]);
6545  
6546  		/* Print rule message */
6547  		if (has_fw_msg)
6548  			parsing_str += strlen(parsing_str) + 1;
6549  		results_offset +=
6550  		    sprintf(qed_get_buf_ptr(results_buf,
6551  					    results_offset), "%s.",
6552  			    has_fw_msg &&
6553  			    print_fw_idle_chk ? parsing_str : lsi_msg);
6554  		parsing_str += strlen(parsing_str) + 1;
6555  
6556  		/* Print register values */
6557  		results_offset +=
6558  		    sprintf(qed_get_buf_ptr(results_buf,
6559  					    results_offset), " Registers:");
6560  		for (i = 0;
6561  		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6562  		     i++) {
6563  			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6564  			bool is_mem;
6565  			u8 reg_id;
6566  
6567  			reg_hdr =
6568  				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6569  			is_mem = GET_FIELD(reg_hdr->data,
6570  					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6571  			reg_id = GET_FIELD(reg_hdr->data,
6572  					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6573  
6574  			/* Skip reg header */
6575  			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6576  
6577  			/* Skip register names until the required reg_id is
6578  			 * reached.
6579  			 */
6580  			for (; reg_id > curr_reg_id; curr_reg_id++)
6581  				parsing_str += strlen(parsing_str) + 1;
6582  
6583  			results_offset +=
6584  			    sprintf(qed_get_buf_ptr(results_buf,
6585  						    results_offset), " %s",
6586  				    parsing_str);
6587  			if (i < hdr->num_dumped_cond_regs && is_mem)
6588  				results_offset +=
6589  				    sprintf(qed_get_buf_ptr(results_buf,
6590  							    results_offset),
6591  					    "[%d]", hdr->mem_entry_id +
6592  					    reg_hdr->start_entry);
6593  			results_offset +=
6594  			    sprintf(qed_get_buf_ptr(results_buf,
6595  						    results_offset), "=");
6596  			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6597  				results_offset +=
6598  				    sprintf(qed_get_buf_ptr(results_buf,
6599  							    results_offset),
6600  					    "0x%x", *dump_buf);
6601  				if (j < reg_hdr->size - 1)
6602  					results_offset +=
6603  					    sprintf(qed_get_buf_ptr
6604  						    (results_buf,
6605  						     results_offset), ",");
6606  			}
6607  		}
6608  
6609  		results_offset +=
6610  		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6611  	}
6612  
6613  	/* Check if end of dump buffer was exceeded */
6614  	if (dump_buf > dump_buf_end)
6615  		return 0;
6616  
6617  	return results_offset;
6618  }
6619  
6620  /* Parses an idle check dump buffer.
6621   * If result_buf is not NULL, the idle check results are printed to it.
6622   * In any case, the required results buffer size is assigned to
6623   * parsed_results_bytes.
6624   * The parsing status is returned.
6625   */
qed_parse_idle_chk_dump(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * parsed_results_bytes,u32 * num_errors,u32 * num_warnings)6626  static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
6627  					       u32 *dump_buf,
6628  					       u32 num_dumped_dwords,
6629  					       char *results_buf,
6630  					       u32 *parsed_results_bytes,
6631  					       u32 *num_errors,
6632  					       u32 *num_warnings)
6633  {
6634  	u32 num_section_params = 0, num_rules, num_rules_not_dumped;
6635  	const char *section_name, *param_name, *param_str_val;
6636  	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6637  
6638  	/* Offset in results_buf in bytes */
6639  	u32 results_offset = 0;
6640  
6641  	*parsed_results_bytes = 0;
6642  	*num_errors = 0;
6643  	*num_warnings = 0;
6644  
6645  	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6646  	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6647  		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6648  
6649  	/* Read global_params section */
6650  	dump_buf += qed_read_section_hdr(dump_buf,
6651  					 &section_name, &num_section_params);
6652  	if (strcmp(section_name, "global_params"))
6653  		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6654  
6655  	/* Print global params */
6656  	dump_buf += qed_print_section_params(dump_buf,
6657  					     num_section_params,
6658  					     results_buf, &results_offset);
6659  
6660  	/* Read idle_chk section
6661  	 * There may be 1 or 2 idle_chk section parameters:
6662  	 * - 1st is "num_rules"
6663  	 * - 2nd is "num_rules_not_dumped" (optional)
6664  	 */
6665  
6666  	dump_buf += qed_read_section_hdr(dump_buf,
6667  					 &section_name, &num_section_params);
6668  	if (strcmp(section_name, "idle_chk") ||
6669  	    (num_section_params != 2 && num_section_params != 1))
6670  		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6671  	dump_buf += qed_read_param(dump_buf,
6672  				   &param_name, &param_str_val, &num_rules);
6673  	if (strcmp(param_name, "num_rules"))
6674  		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6675  	if (num_section_params > 1) {
6676  		dump_buf += qed_read_param(dump_buf,
6677  					   &param_name,
6678  					   &param_str_val,
6679  					   &num_rules_not_dumped);
6680  		if (strcmp(param_name, "num_rules_not_dumped"))
6681  			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6682  	} else {
6683  		num_rules_not_dumped = 0;
6684  	}
6685  
6686  	if (num_rules) {
6687  		u32 rules_print_size;
6688  
6689  		/* Print FW output */
6690  		results_offset +=
6691  		    sprintf(qed_get_buf_ptr(results_buf,
6692  					    results_offset),
6693  			    "FW_IDLE_CHECK:\n");
6694  		rules_print_size =
6695  			qed_parse_idle_chk_dump_rules(p_hwfn,
6696  						      dump_buf,
6697  						      dump_buf_end,
6698  						      num_rules,
6699  						      true,
6700  						      results_buf ?
6701  						      results_buf +
6702  						      results_offset :
6703  						      NULL,
6704  						      num_errors,
6705  						      num_warnings);
6706  		results_offset += rules_print_size;
6707  		if (!rules_print_size)
6708  			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6709  
6710  		/* Print LSI output */
6711  		results_offset +=
6712  		    sprintf(qed_get_buf_ptr(results_buf,
6713  					    results_offset),
6714  			    "\nLSI_IDLE_CHECK:\n");
6715  		rules_print_size =
6716  			qed_parse_idle_chk_dump_rules(p_hwfn,
6717  						      dump_buf,
6718  						      dump_buf_end,
6719  						      num_rules,
6720  						      false,
6721  						      results_buf ?
6722  						      results_buf +
6723  						      results_offset :
6724  						      NULL,
6725  						      num_errors,
6726  						      num_warnings);
6727  		results_offset += rules_print_size;
6728  		if (!rules_print_size)
6729  			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6730  	}
6731  
6732  	/* Print errors/warnings count */
6733  	if (*num_errors)
6734  		results_offset +=
6735  		    sprintf(qed_get_buf_ptr(results_buf,
6736  					    results_offset),
6737  			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6738  			    *num_errors, *num_warnings);
6739  	else if (*num_warnings)
6740  		results_offset +=
6741  		    sprintf(qed_get_buf_ptr(results_buf,
6742  					    results_offset),
6743  			    "\nIdle Check completed successfully (with %d warnings)\n",
6744  			    *num_warnings);
6745  	else
6746  		results_offset +=
6747  		    sprintf(qed_get_buf_ptr(results_buf,
6748  					    results_offset),
6749  			    "\nIdle Check completed successfully\n");
6750  
6751  	if (num_rules_not_dumped)
6752  		results_offset +=
6753  		    sprintf(qed_get_buf_ptr(results_buf,
6754  					    results_offset),
6755  			    "\nIdle Check Partially dumped : num_rules_not_dumped = %d\n",
6756  			    num_rules_not_dumped);
6757  
6758  	/* Add 1 for string NULL termination */
6759  	*parsed_results_bytes = results_offset + 1;
6760  
6761  	return DBG_STATUS_OK;
6762  }
6763  
6764  /* Allocates and fills MCP Trace meta data based on the specified meta data
6765   * dump buffer.
6766   * Returns debug status code.
6767   */
6768  static enum dbg_status
qed_mcp_trace_alloc_meta_data(struct qed_hwfn * p_hwfn,const u32 * meta_buf)6769  qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
6770  			      const u32 *meta_buf)
6771  {
6772  	struct dbg_tools_user_data *dev_user_data;
6773  	u32 offset = 0, signature, i;
6774  	struct mcp_trace_meta *meta;
6775  	u8 *meta_buf_bytes;
6776  
6777  	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6778  	meta = &dev_user_data->mcp_trace_meta;
6779  	meta_buf_bytes = (u8 *)meta_buf;
6780  
6781  	/* Free the previous meta before loading a new one. */
6782  	if (meta->is_allocated)
6783  		qed_mcp_trace_free_meta_data(p_hwfn);
6784  
6785  	memset(meta, 0, sizeof(*meta));
6786  
6787  	/* Read first signature */
6788  	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6789  	if (signature != NVM_MAGIC_VALUE)
6790  		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6791  
6792  	/* Read no. of modules and allocate memory for their pointers */
6793  	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6794  	meta->modules = kcalloc(meta->modules_num, sizeof(char *),
6795  				GFP_KERNEL);
6796  	if (!meta->modules)
6797  		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6798  
6799  	/* Allocate and read all module strings */
6800  	for (i = 0; i < meta->modules_num; i++) {
6801  		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6802  
6803  		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6804  		if (!(*(meta->modules + i))) {
6805  			/* Update number of modules to be released */
6806  			meta->modules_num = i ? i - 1 : 0;
6807  			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6808  		}
6809  
6810  		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6811  				      *(meta->modules + i));
6812  		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6813  			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6814  	}
6815  
6816  	/* Read second signature */
6817  	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6818  	if (signature != NVM_MAGIC_VALUE)
6819  		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6820  
6821  	/* Read number of formats and allocate memory for all formats */
6822  	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6823  	meta->formats = kcalloc(meta->formats_num,
6824  				sizeof(struct mcp_trace_format),
6825  				GFP_KERNEL);
6826  	if (!meta->formats)
6827  		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6828  
6829  	/* Allocate and read all strings */
6830  	for (i = 0; i < meta->formats_num; i++) {
6831  		struct mcp_trace_format *format_ptr = &meta->formats[i];
6832  		u8 format_len;
6833  
6834  		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6835  							   &offset);
6836  		format_len = GET_MFW_FIELD(format_ptr->data,
6837  					   MCP_TRACE_FORMAT_LEN);
6838  		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6839  		if (!format_ptr->format_str) {
6840  			/* Update number of modules to be released */
6841  			meta->formats_num = i ? i - 1 : 0;
6842  			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6843  		}
6844  
6845  		qed_read_str_from_buf(meta_buf_bytes,
6846  				      &offset,
6847  				      format_len, format_ptr->format_str);
6848  	}
6849  
6850  	meta->is_allocated = true;
6851  	return DBG_STATUS_OK;
6852  }
6853  
6854  /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6855   * are printed to it. The parsing status is returned.
6856   * Arguments:
6857   * trace_buf - MCP trace cyclic buffer
6858   * trace_buf_size - MCP trace cyclic buffer size in bytes
6859   * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6860   *		 buffer.
6861   * data_size - size in bytes of data to parse.
6862   * parsed_buf - destination buffer for parsed data.
6863   * parsed_results_bytes - size of parsed data in bytes.
6864   */
qed_parse_mcp_trace_buf(struct qed_hwfn * p_hwfn,u8 * trace_buf,u32 trace_buf_size,u32 data_offset,u32 data_size,char * parsed_buf,u32 * parsed_results_bytes)6865  static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
6866  					       u8 *trace_buf,
6867  					       u32 trace_buf_size,
6868  					       u32 data_offset,
6869  					       u32 data_size,
6870  					       char *parsed_buf,
6871  					       u32 *parsed_results_bytes)
6872  {
6873  	struct dbg_tools_user_data *dev_user_data;
6874  	struct mcp_trace_meta *meta;
6875  	u32 param_mask, param_shift;
6876  	enum dbg_status status;
6877  
6878  	dev_user_data = qed_dbg_get_user_data(p_hwfn);
6879  	meta = &dev_user_data->mcp_trace_meta;
6880  	*parsed_results_bytes = 0;
6881  
6882  	if (!meta->is_allocated)
6883  		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6884  
6885  	status = DBG_STATUS_OK;
6886  
6887  	while (data_size) {
6888  		struct mcp_trace_format *format_ptr;
6889  		u8 format_level, format_module;
6890  		u32 params[3] = { 0, 0, 0 };
6891  		u32 header, format_idx, i;
6892  
6893  		if (data_size < MFW_TRACE_ENTRY_SIZE)
6894  			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6895  
6896  		header = qed_read_from_cyclic_buf(trace_buf,
6897  						  &data_offset,
6898  						  trace_buf_size,
6899  						  MFW_TRACE_ENTRY_SIZE);
6900  		data_size -= MFW_TRACE_ENTRY_SIZE;
6901  		format_idx = header & MFW_TRACE_EVENTID_MASK;
6902  
6903  		/* Skip message if its index doesn't exist in the meta data */
6904  		if (format_idx >= meta->formats_num) {
6905  			u8 format_size = (u8)GET_MFW_FIELD(header,
6906  							   MFW_TRACE_PRM_SIZE);
6907  
6908  			if (data_size < format_size)
6909  				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6910  
6911  			data_offset = qed_cyclic_add(data_offset,
6912  						     format_size,
6913  						     trace_buf_size);
6914  			data_size -= format_size;
6915  			continue;
6916  		}
6917  
6918  		format_ptr = &meta->formats[format_idx];
6919  
6920  		for (i = 0,
6921  		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6922  		     MCP_TRACE_FORMAT_P1_SIZE_OFFSET;
6923  		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6924  		     i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6925  		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6926  			/* Extract param size (0..3) */
6927  			u8 param_size = (u8)((format_ptr->data & param_mask) >>
6928  					     param_shift);
6929  
6930  			/* If the param size is zero, there are no other
6931  			 * parameters.
6932  			 */
6933  			if (!param_size)
6934  				break;
6935  
6936  			/* Size is encoded using 2 bits, where 3 is used to
6937  			 * encode 4.
6938  			 */
6939  			if (param_size == 3)
6940  				param_size = 4;
6941  
6942  			if (data_size < param_size)
6943  				return DBG_STATUS_MCP_TRACE_BAD_DATA;
6944  
6945  			params[i] = qed_read_from_cyclic_buf(trace_buf,
6946  							     &data_offset,
6947  							     trace_buf_size,
6948  							     param_size);
6949  			data_size -= param_size;
6950  		}
6951  
6952  		format_level = (u8)GET_MFW_FIELD(format_ptr->data,
6953  						 MCP_TRACE_FORMAT_LEVEL);
6954  		format_module = (u8)GET_MFW_FIELD(format_ptr->data,
6955  						  MCP_TRACE_FORMAT_MODULE);
6956  		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6957  			return DBG_STATUS_MCP_TRACE_BAD_DATA;
6958  
6959  		/* Print current message to results buffer */
6960  		*parsed_results_bytes +=
6961  			sprintf(qed_get_buf_ptr(parsed_buf,
6962  						*parsed_results_bytes),
6963  				"%s %-8s: ",
6964  				s_mcp_trace_level_str[format_level],
6965  				meta->modules[format_module]);
6966  		*parsed_results_bytes +=
6967  		    sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
6968  			    format_ptr->format_str,
6969  			    params[0], params[1], params[2]);
6970  	}
6971  
6972  	/* Add string NULL terminator */
6973  	(*parsed_results_bytes)++;
6974  
6975  	return status;
6976  }
6977  
6978  /* Parses an MCP Trace dump buffer.
6979   * If result_buf is not NULL, the MCP Trace results are printed to it.
6980   * In any case, the required results buffer size is assigned to
6981   * parsed_results_bytes.
6982   * The parsing status is returned.
6983   */
qed_parse_mcp_trace_dump(struct qed_hwfn * p_hwfn,u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes,bool free_meta_data)6984  static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6985  						u32 *dump_buf,
6986  						char *results_buf,
6987  						u32 *parsed_results_bytes,
6988  						bool free_meta_data)
6989  {
6990  	const char *section_name, *param_name, *param_str_val;
6991  	u32 data_size, trace_data_dwords, trace_meta_dwords;
6992  	u32 offset, results_offset, results_buf_bytes;
6993  	u32 param_num_val, num_section_params;
6994  	struct mcp_trace *trace;
6995  	enum dbg_status status;
6996  	const u32 *meta_buf;
6997  	u8 *trace_buf;
6998  
6999  	*parsed_results_bytes = 0;
7000  
7001  	/* Read global_params section */
7002  	dump_buf += qed_read_section_hdr(dump_buf,
7003  					 &section_name, &num_section_params);
7004  	if (strcmp(section_name, "global_params"))
7005  		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7006  
7007  	/* Print global params */
7008  	dump_buf += qed_print_section_params(dump_buf,
7009  					     num_section_params,
7010  					     results_buf, &results_offset);
7011  
7012  	/* Read trace_data section */
7013  	dump_buf += qed_read_section_hdr(dump_buf,
7014  					 &section_name, &num_section_params);
7015  	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
7016  		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7017  	dump_buf += qed_read_param(dump_buf,
7018  				   &param_name, &param_str_val, &param_num_val);
7019  	if (strcmp(param_name, "size"))
7020  		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7021  	trace_data_dwords = param_num_val;
7022  
7023  	/* Prepare trace info */
7024  	trace = (struct mcp_trace *)dump_buf;
7025  	if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
7026  		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7027  
7028  	trace_buf = (u8 *)dump_buf + sizeof(*trace);
7029  	offset = trace->trace_oldest;
7030  	data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
7031  	dump_buf += trace_data_dwords;
7032  
7033  	/* Read meta_data section */
7034  	dump_buf += qed_read_section_hdr(dump_buf,
7035  					 &section_name, &num_section_params);
7036  	if (strcmp(section_name, "mcp_trace_meta"))
7037  		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7038  	dump_buf += qed_read_param(dump_buf,
7039  				   &param_name, &param_str_val, &param_num_val);
7040  	if (strcmp(param_name, "size"))
7041  		return DBG_STATUS_MCP_TRACE_BAD_DATA;
7042  	trace_meta_dwords = param_num_val;
7043  
7044  	/* Choose meta data buffer */
7045  	if (!trace_meta_dwords) {
7046  		/* Dump doesn't include meta data */
7047  		struct dbg_tools_user_data *dev_user_data =
7048  			qed_dbg_get_user_data(p_hwfn);
7049  
7050  		if (!dev_user_data->mcp_trace_user_meta_buf)
7051  			return DBG_STATUS_MCP_TRACE_NO_META;
7052  
7053  		meta_buf = dev_user_data->mcp_trace_user_meta_buf;
7054  	} else {
7055  		/* Dump includes meta data */
7056  		meta_buf = dump_buf;
7057  	}
7058  
7059  	/* Allocate meta data memory */
7060  	status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
7061  	if (status != DBG_STATUS_OK)
7062  		return status;
7063  
7064  	status = qed_parse_mcp_trace_buf(p_hwfn,
7065  					 trace_buf,
7066  					 trace->size,
7067  					 offset,
7068  					 data_size,
7069  					 results_buf ?
7070  					 results_buf + results_offset :
7071  					 NULL,
7072  					 &results_buf_bytes);
7073  	if (status != DBG_STATUS_OK)
7074  		return status;
7075  
7076  	if (free_meta_data)
7077  		qed_mcp_trace_free_meta_data(p_hwfn);
7078  
7079  	*parsed_results_bytes = results_offset + results_buf_bytes;
7080  
7081  	return DBG_STATUS_OK;
7082  }
7083  
7084  /* Parses a Reg FIFO dump buffer.
7085   * If result_buf is not NULL, the Reg FIFO results are printed to it.
7086   * In any case, the required results buffer size is assigned to
7087   * parsed_results_bytes.
7088   * The parsing status is returned.
7089   */
qed_parse_reg_fifo_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)7090  static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
7091  					       char *results_buf,
7092  					       u32 *parsed_results_bytes)
7093  {
7094  	const char *section_name, *param_name, *param_str_val;
7095  	u32 param_num_val, num_section_params, num_elements;
7096  	struct reg_fifo_element *elements;
7097  	u8 i, j, err_code, vf_val;
7098  	u32 results_offset = 0;
7099  	char vf_str[4];
7100  
7101  	/* Read global_params section */
7102  	dump_buf += qed_read_section_hdr(dump_buf,
7103  					 &section_name, &num_section_params);
7104  	if (strcmp(section_name, "global_params"))
7105  		return DBG_STATUS_REG_FIFO_BAD_DATA;
7106  
7107  	/* Print global params */
7108  	dump_buf += qed_print_section_params(dump_buf,
7109  					     num_section_params,
7110  					     results_buf, &results_offset);
7111  
7112  	/* Read reg_fifo_data section */
7113  	dump_buf += qed_read_section_hdr(dump_buf,
7114  					 &section_name, &num_section_params);
7115  	if (strcmp(section_name, "reg_fifo_data"))
7116  		return DBG_STATUS_REG_FIFO_BAD_DATA;
7117  	dump_buf += qed_read_param(dump_buf,
7118  				   &param_name, &param_str_val, &param_num_val);
7119  	if (strcmp(param_name, "size"))
7120  		return DBG_STATUS_REG_FIFO_BAD_DATA;
7121  	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
7122  		return DBG_STATUS_REG_FIFO_BAD_DATA;
7123  	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
7124  	elements = (struct reg_fifo_element *)dump_buf;
7125  
7126  	/* Decode elements */
7127  	for (i = 0; i < num_elements; i++) {
7128  		const char *err_msg = NULL;
7129  
7130  		/* Discover if element belongs to a VF or a PF */
7131  		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
7132  		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
7133  			sprintf(vf_str, "%s", "N/A");
7134  		else
7135  			sprintf(vf_str, "%d", vf_val);
7136  
7137  		/* Find error message */
7138  		err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR);
7139  		for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++)
7140  			if (err_code == s_reg_fifo_errors[j].err_code)
7141  				err_msg = s_reg_fifo_errors[j].err_msg;
7142  
7143  		/* Add parsed element to parsed buffer */
7144  		results_offset +=
7145  		    sprintf(qed_get_buf_ptr(results_buf,
7146  					    results_offset),
7147  			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n",
7148  			    elements[i].data,
7149  			    (u32)GET_FIELD(elements[i].data,
7150  					   REG_FIFO_ELEMENT_ADDRESS) *
7151  			    REG_FIFO_ELEMENT_ADDR_FACTOR,
7152  			    s_access_strs[GET_FIELD(elements[i].data,
7153  						    REG_FIFO_ELEMENT_ACCESS)],
7154  			    (u32)GET_FIELD(elements[i].data,
7155  					   REG_FIFO_ELEMENT_PF),
7156  			    vf_str,
7157  			    (u32)GET_FIELD(elements[i].data,
7158  					   REG_FIFO_ELEMENT_PORT),
7159  			    s_privilege_strs[GET_FIELD(elements[i].data,
7160  						REG_FIFO_ELEMENT_PRIVILEGE)],
7161  			    s_protection_strs[GET_FIELD(elements[i].data,
7162  						REG_FIFO_ELEMENT_PROTECTION)],
7163  			    s_master_strs[GET_FIELD(elements[i].data,
7164  						    REG_FIFO_ELEMENT_MASTER)],
7165  			    err_msg ? err_msg : "unknown error code");
7166  	}
7167  
7168  	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7169  						  results_offset),
7170  				  "fifo contained %d elements", num_elements);
7171  
7172  	/* Add 1 for string NULL termination */
7173  	*parsed_results_bytes = results_offset + 1;
7174  
7175  	return DBG_STATUS_OK;
7176  }
7177  
qed_parse_igu_fifo_element(struct igu_fifo_element * element,char * results_buf,u32 * results_offset)7178  static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
7179  						  *element, char
7180  						  *results_buf,
7181  						  u32 *results_offset)
7182  {
7183  	const struct igu_fifo_addr_data *found_addr = NULL;
7184  	u8 source, err_type, i, is_cleanup;
7185  	char parsed_addr_data[32];
7186  	char parsed_wr_data[256];
7187  	u32 wr_data, prod_cons;
7188  	bool is_wr_cmd, is_pf;
7189  	u16 cmd_addr;
7190  	u64 dword12;
7191  
7192  	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
7193  	 * FIFO element.
7194  	 */
7195  	dword12 = ((u64)element->dword2 << 32) | element->dword1;
7196  	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
7197  	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
7198  	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
7199  	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
7200  	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
7201  
7202  	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
7203  		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7204  	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
7205  		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7206  
7207  	/* Find address data */
7208  	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
7209  		const struct igu_fifo_addr_data *curr_addr =
7210  			&s_igu_fifo_addr_data[i];
7211  
7212  		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
7213  		    curr_addr->end_addr)
7214  			found_addr = curr_addr;
7215  	}
7216  
7217  	if (!found_addr)
7218  		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7219  
7220  	/* Prepare parsed address data */
7221  	switch (found_addr->type) {
7222  	case IGU_ADDR_TYPE_MSIX_MEM:
7223  		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
7224  		break;
7225  	case IGU_ADDR_TYPE_WRITE_INT_ACK:
7226  	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
7227  		sprintf(parsed_addr_data,
7228  			" SB = 0x%x", cmd_addr - found_addr->start_addr);
7229  		break;
7230  	default:
7231  		parsed_addr_data[0] = '\0';
7232  	}
7233  
7234  	if (!is_wr_cmd) {
7235  		parsed_wr_data[0] = '\0';
7236  		goto out;
7237  	}
7238  
7239  	/* Prepare parsed write data */
7240  	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
7241  	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
7242  	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
7243  
7244  	if (source == IGU_SRC_ATTN) {
7245  		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
7246  	} else {
7247  		if (is_cleanup) {
7248  			u8 cleanup_val, cleanup_type;
7249  
7250  			cleanup_val =
7251  				GET_FIELD(wr_data,
7252  					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
7253  			cleanup_type =
7254  			    GET_FIELD(wr_data,
7255  				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
7256  
7257  			sprintf(parsed_wr_data,
7258  				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
7259  				cleanup_val ? "set" : "clear",
7260  				cleanup_type);
7261  		} else {
7262  			u8 update_flag, en_dis_int_for_sb, segment;
7263  			u8 timer_mask;
7264  
7265  			update_flag = GET_FIELD(wr_data,
7266  						IGU_FIFO_WR_DATA_UPDATE_FLAG);
7267  			en_dis_int_for_sb =
7268  				GET_FIELD(wr_data,
7269  					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
7270  			segment = GET_FIELD(wr_data,
7271  					    IGU_FIFO_WR_DATA_SEGMENT);
7272  			timer_mask = GET_FIELD(wr_data,
7273  					       IGU_FIFO_WR_DATA_TIMER_MASK);
7274  
7275  			sprintf(parsed_wr_data,
7276  				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
7277  				prod_cons,
7278  				update_flag ? "update" : "nop",
7279  				en_dis_int_for_sb ?
7280  				(en_dis_int_for_sb == 1 ? "disable" : "nop") :
7281  				"enable",
7282  				segment ? "attn" : "regular",
7283  				timer_mask);
7284  		}
7285  	}
7286  out:
7287  	/* Add parsed element to parsed buffer */
7288  	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
7289  						   *results_offset),
7290  				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
7291  				   element->dword2, element->dword1,
7292  				   element->dword0,
7293  				   is_pf ? "pf" : "vf",
7294  				   GET_FIELD(element->dword0,
7295  					     IGU_FIFO_ELEMENT_DWORD0_FID),
7296  				   s_igu_fifo_source_strs[source],
7297  				   is_wr_cmd ? "wr" : "rd",
7298  				   cmd_addr,
7299  				   (!is_pf && found_addr->vf_desc)
7300  				   ? found_addr->vf_desc
7301  				   : found_addr->desc,
7302  				   parsed_addr_data,
7303  				   parsed_wr_data,
7304  				   s_igu_fifo_error_strs[err_type]);
7305  
7306  	return DBG_STATUS_OK;
7307  }
7308  
7309  /* Parses an IGU FIFO dump buffer.
7310   * If result_buf is not NULL, the IGU FIFO results are printed to it.
7311   * In any case, the required results buffer size is assigned to
7312   * parsed_results_bytes.
7313   * The parsing status is returned.
7314   */
qed_parse_igu_fifo_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)7315  static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
7316  					       char *results_buf,
7317  					       u32 *parsed_results_bytes)
7318  {
7319  	const char *section_name, *param_name, *param_str_val;
7320  	u32 param_num_val, num_section_params, num_elements;
7321  	struct igu_fifo_element *elements;
7322  	enum dbg_status status;
7323  	u32 results_offset = 0;
7324  	u8 i;
7325  
7326  	/* Read global_params section */
7327  	dump_buf += qed_read_section_hdr(dump_buf,
7328  					 &section_name, &num_section_params);
7329  	if (strcmp(section_name, "global_params"))
7330  		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7331  
7332  	/* Print global params */
7333  	dump_buf += qed_print_section_params(dump_buf,
7334  					     num_section_params,
7335  					     results_buf, &results_offset);
7336  
7337  	/* Read igu_fifo_data section */
7338  	dump_buf += qed_read_section_hdr(dump_buf,
7339  					 &section_name, &num_section_params);
7340  	if (strcmp(section_name, "igu_fifo_data"))
7341  		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7342  	dump_buf += qed_read_param(dump_buf,
7343  				   &param_name, &param_str_val, &param_num_val);
7344  	if (strcmp(param_name, "size"))
7345  		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7346  	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7347  		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7348  	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7349  	elements = (struct igu_fifo_element *)dump_buf;
7350  
7351  	/* Decode elements */
7352  	for (i = 0; i < num_elements; i++) {
7353  		status = qed_parse_igu_fifo_element(&elements[i],
7354  						    results_buf,
7355  						    &results_offset);
7356  		if (status != DBG_STATUS_OK)
7357  			return status;
7358  	}
7359  
7360  	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7361  						  results_offset),
7362  				  "fifo contained %d elements", num_elements);
7363  
7364  	/* Add 1 for string NULL termination */
7365  	*parsed_results_bytes = results_offset + 1;
7366  
7367  	return DBG_STATUS_OK;
7368  }
7369  
7370  static enum dbg_status
qed_parse_protection_override_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)7371  qed_parse_protection_override_dump(u32 *dump_buf,
7372  				   char *results_buf,
7373  				   u32 *parsed_results_bytes)
7374  {
7375  	const char *section_name, *param_name, *param_str_val;
7376  	u32 param_num_val, num_section_params, num_elements;
7377  	struct protection_override_element *elements;
7378  	u32 results_offset = 0;
7379  	u8 i;
7380  
7381  	/* Read global_params section */
7382  	dump_buf += qed_read_section_hdr(dump_buf,
7383  					 &section_name, &num_section_params);
7384  	if (strcmp(section_name, "global_params"))
7385  		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7386  
7387  	/* Print global params */
7388  	dump_buf += qed_print_section_params(dump_buf,
7389  					     num_section_params,
7390  					     results_buf, &results_offset);
7391  
7392  	/* Read protection_override_data section */
7393  	dump_buf += qed_read_section_hdr(dump_buf,
7394  					 &section_name, &num_section_params);
7395  	if (strcmp(section_name, "protection_override_data"))
7396  		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7397  	dump_buf += qed_read_param(dump_buf,
7398  				   &param_name, &param_str_val, &param_num_val);
7399  	if (strcmp(param_name, "size"))
7400  		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7401  	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7402  		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7403  	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7404  	elements = (struct protection_override_element *)dump_buf;
7405  
7406  	/* Decode elements */
7407  	for (i = 0; i < num_elements; i++) {
7408  		u32 address = GET_FIELD(elements[i].data,
7409  					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7410  			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7411  
7412  		results_offset +=
7413  		    sprintf(qed_get_buf_ptr(results_buf,
7414  					    results_offset),
7415  			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7416  			    i, address,
7417  			    (u32)GET_FIELD(elements[i].data,
7418  				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7419  			    (u32)GET_FIELD(elements[i].data,
7420  				      PROTECTION_OVERRIDE_ELEMENT_READ),
7421  			    (u32)GET_FIELD(elements[i].data,
7422  				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
7423  			    s_protection_strs[GET_FIELD(elements[i].data,
7424  				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7425  			    s_protection_strs[GET_FIELD(elements[i].data,
7426  				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7427  	}
7428  
7429  	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7430  						  results_offset),
7431  				  "protection override contained %d elements",
7432  				  num_elements);
7433  
7434  	/* Add 1 for string NULL termination */
7435  	*parsed_results_bytes = results_offset + 1;
7436  
7437  	return DBG_STATUS_OK;
7438  }
7439  
7440  /* Parses a FW Asserts dump buffer.
7441   * If result_buf is not NULL, the FW Asserts results are printed to it.
7442   * In any case, the required results buffer size is assigned to
7443   * parsed_results_bytes.
7444   * The parsing status is returned.
7445   */
qed_parse_fw_asserts_dump(u32 * dump_buf,char * results_buf,u32 * parsed_results_bytes)7446  static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7447  						 char *results_buf,
7448  						 u32 *parsed_results_bytes)
7449  {
7450  	u32 num_section_params, param_num_val, i, results_offset = 0;
7451  	const char *param_name, *param_str_val, *section_name;
7452  	bool last_section_found = false;
7453  
7454  	*parsed_results_bytes = 0;
7455  
7456  	/* Read global_params section */
7457  	dump_buf += qed_read_section_hdr(dump_buf,
7458  					 &section_name, &num_section_params);
7459  	if (strcmp(section_name, "global_params"))
7460  		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7461  
7462  	/* Print global params */
7463  	dump_buf += qed_print_section_params(dump_buf,
7464  					     num_section_params,
7465  					     results_buf, &results_offset);
7466  
7467  	while (!last_section_found) {
7468  		dump_buf += qed_read_section_hdr(dump_buf,
7469  						 &section_name,
7470  						 &num_section_params);
7471  		if (!strcmp(section_name, "fw_asserts")) {
7472  			/* Extract params */
7473  			const char *storm_letter = NULL;
7474  			u32 storm_dump_size = 0;
7475  
7476  			for (i = 0; i < num_section_params; i++) {
7477  				dump_buf += qed_read_param(dump_buf,
7478  							   &param_name,
7479  							   &param_str_val,
7480  							   &param_num_val);
7481  				if (!strcmp(param_name, "storm"))
7482  					storm_letter = param_str_val;
7483  				else if (!strcmp(param_name, "size"))
7484  					storm_dump_size = param_num_val;
7485  				else
7486  					return
7487  					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7488  			}
7489  
7490  			if (!storm_letter || !storm_dump_size)
7491  				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7492  
7493  			/* Print data */
7494  			results_offset +=
7495  			    sprintf(qed_get_buf_ptr(results_buf,
7496  						    results_offset),
7497  				    "\n%sSTORM_ASSERT: size=%d\n",
7498  				    storm_letter, storm_dump_size);
7499  			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7500  				results_offset +=
7501  				    sprintf(qed_get_buf_ptr(results_buf,
7502  							    results_offset),
7503  					    "%08x\n", *dump_buf);
7504  		} else if (!strcmp(section_name, "last")) {
7505  			last_section_found = true;
7506  		} else {
7507  			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7508  		}
7509  	}
7510  
7511  	/* Add 1 for string NULL termination */
7512  	*parsed_results_bytes = results_offset + 1;
7513  
7514  	return DBG_STATUS_OK;
7515  }
7516  
7517  /***************************** Public Functions *******************************/
7518  
qed_dbg_user_set_bin_ptr(struct qed_hwfn * p_hwfn,const u8 * const bin_ptr)7519  enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
7520  					 const u8 * const bin_ptr)
7521  {
7522  	struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
7523  	u8 buf_id;
7524  
7525  	/* Convert binary data to debug arrays */
7526  	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
7527  		qed_set_dbg_bin_buf(p_hwfn,
7528  				    (enum bin_dbg_buffer_type)buf_id,
7529  				    (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
7530  				    buf_hdrs[buf_id].length);
7531  
7532  	return DBG_STATUS_OK;
7533  }
7534  
qed_dbg_alloc_user_data(struct qed_hwfn * p_hwfn,void ** user_data_ptr)7535  enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
7536  					void **user_data_ptr)
7537  {
7538  	*user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data),
7539  				 GFP_KERNEL);
7540  	if (!(*user_data_ptr))
7541  		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7542  
7543  	return DBG_STATUS_OK;
7544  }
7545  
qed_dbg_get_status_str(enum dbg_status status)7546  const char *qed_dbg_get_status_str(enum dbg_status status)
7547  {
7548  	return (status <
7549  		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7550  }
7551  
qed_get_idle_chk_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7552  enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7553  						  u32 *dump_buf,
7554  						  u32 num_dumped_dwords,
7555  						  u32 *results_buf_size)
7556  {
7557  	u32 num_errors, num_warnings;
7558  
7559  	return qed_parse_idle_chk_dump(p_hwfn,
7560  				       dump_buf,
7561  				       num_dumped_dwords,
7562  				       NULL,
7563  				       results_buf_size,
7564  				       &num_errors, &num_warnings);
7565  }
7566  
qed_print_idle_chk_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf,u32 * num_errors,u32 * num_warnings)7567  enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7568  					   u32 *dump_buf,
7569  					   u32 num_dumped_dwords,
7570  					   char *results_buf,
7571  					   u32 *num_errors,
7572  					   u32 *num_warnings)
7573  {
7574  	u32 parsed_buf_size;
7575  
7576  	return qed_parse_idle_chk_dump(p_hwfn,
7577  				       dump_buf,
7578  				       num_dumped_dwords,
7579  				       results_buf,
7580  				       &parsed_buf_size,
7581  				       num_errors, num_warnings);
7582  }
7583  
qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn * p_hwfn,const u32 * meta_buf)7584  void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
7585  				     const u32 *meta_buf)
7586  {
7587  	struct dbg_tools_user_data *dev_user_data =
7588  		qed_dbg_get_user_data(p_hwfn);
7589  
7590  	dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7591  }
7592  
qed_get_mcp_trace_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7593  enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7594  						   u32 *dump_buf,
7595  						   u32 num_dumped_dwords,
7596  						   u32 *results_buf_size)
7597  {
7598  	return qed_parse_mcp_trace_dump(p_hwfn,
7599  					dump_buf, NULL, results_buf_size, true);
7600  }
7601  
qed_print_mcp_trace_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7602  enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7603  					    u32 *dump_buf,
7604  					    u32 num_dumped_dwords,
7605  					    char *results_buf)
7606  {
7607  	u32 parsed_buf_size;
7608  
7609  	/* Doesn't do anything, needed for compile time asserts */
7610  	qed_user_static_asserts();
7611  
7612  	return qed_parse_mcp_trace_dump(p_hwfn,
7613  					dump_buf,
7614  					results_buf, &parsed_buf_size, true);
7615  }
7616  
qed_print_mcp_trace_results_cont(struct qed_hwfn * p_hwfn,u32 * dump_buf,char * results_buf)7617  enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
7618  						 u32 *dump_buf,
7619  						 char *results_buf)
7620  {
7621  	u32 parsed_buf_size;
7622  
7623  	return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7624  					&parsed_buf_size, false);
7625  }
7626  
qed_print_mcp_trace_line(struct qed_hwfn * p_hwfn,u8 * dump_buf,u32 num_dumped_bytes,char * results_buf)7627  enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
7628  					 u8 *dump_buf,
7629  					 u32 num_dumped_bytes,
7630  					 char *results_buf)
7631  {
7632  	u32 parsed_results_bytes;
7633  
7634  	return qed_parse_mcp_trace_buf(p_hwfn,
7635  				       dump_buf,
7636  				       num_dumped_bytes,
7637  				       0,
7638  				       num_dumped_bytes,
7639  				       results_buf, &parsed_results_bytes);
7640  }
7641  
7642  /* Frees the specified MCP Trace meta data */
qed_mcp_trace_free_meta_data(struct qed_hwfn * p_hwfn)7643  void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
7644  {
7645  	struct dbg_tools_user_data *dev_user_data;
7646  	struct mcp_trace_meta *meta;
7647  	u32 i;
7648  
7649  	dev_user_data = qed_dbg_get_user_data(p_hwfn);
7650  	meta = &dev_user_data->mcp_trace_meta;
7651  	if (!meta->is_allocated)
7652  		return;
7653  
7654  	/* Release modules */
7655  	if (meta->modules) {
7656  		for (i = 0; i < meta->modules_num; i++)
7657  			kfree(meta->modules[i]);
7658  		kfree(meta->modules);
7659  	}
7660  
7661  	/* Release formats */
7662  	if (meta->formats) {
7663  		for (i = 0; i < meta->formats_num; i++)
7664  			kfree(meta->formats[i].format_str);
7665  		kfree(meta->formats);
7666  	}
7667  
7668  	meta->is_allocated = false;
7669  }
7670  
qed_get_reg_fifo_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7671  enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7672  						  u32 *dump_buf,
7673  						  u32 num_dumped_dwords,
7674  						  u32 *results_buf_size)
7675  {
7676  	return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7677  }
7678  
qed_print_reg_fifo_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7679  enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7680  					   u32 *dump_buf,
7681  					   u32 num_dumped_dwords,
7682  					   char *results_buf)
7683  {
7684  	u32 parsed_buf_size;
7685  
7686  	return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7687  }
7688  
qed_get_igu_fifo_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7689  enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7690  						  u32 *dump_buf,
7691  						  u32 num_dumped_dwords,
7692  						  u32 *results_buf_size)
7693  {
7694  	return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7695  }
7696  
qed_print_igu_fifo_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7697  enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7698  					   u32 *dump_buf,
7699  					   u32 num_dumped_dwords,
7700  					   char *results_buf)
7701  {
7702  	u32 parsed_buf_size;
7703  
7704  	return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7705  }
7706  
7707  enum dbg_status
qed_get_protection_override_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7708  qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7709  					     u32 *dump_buf,
7710  					     u32 num_dumped_dwords,
7711  					     u32 *results_buf_size)
7712  {
7713  	return qed_parse_protection_override_dump(dump_buf,
7714  						  NULL, results_buf_size);
7715  }
7716  
qed_print_protection_override_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7717  enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7718  						      u32 *dump_buf,
7719  						      u32 num_dumped_dwords,
7720  						      char *results_buf)
7721  {
7722  	u32 parsed_buf_size;
7723  
7724  	return qed_parse_protection_override_dump(dump_buf,
7725  						  results_buf,
7726  						  &parsed_buf_size);
7727  }
7728  
qed_get_fw_asserts_results_buf_size(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,u32 * results_buf_size)7729  enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7730  						    u32 *dump_buf,
7731  						    u32 num_dumped_dwords,
7732  						    u32 *results_buf_size)
7733  {
7734  	return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7735  }
7736  
qed_print_fw_asserts_results(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7737  enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7738  					     u32 *dump_buf,
7739  					     u32 num_dumped_dwords,
7740  					     char *results_buf)
7741  {
7742  	u32 parsed_buf_size;
7743  
7744  	return qed_parse_fw_asserts_dump(dump_buf,
7745  					 results_buf, &parsed_buf_size);
7746  }
7747  
qed_dbg_parse_attn(struct qed_hwfn * p_hwfn,struct dbg_attn_block_result * results)7748  enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7749  				   struct dbg_attn_block_result *results)
7750  {
7751  	const u32 *block_attn_name_offsets;
7752  	const char *attn_name_base;
7753  	const char *block_name;
7754  	enum dbg_attn_type attn_type;
7755  	u8 num_regs, i, j;
7756  
7757  	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7758  	attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7759  	block_name = qed_dbg_get_block_name(p_hwfn, results->block_id);
7760  	if (!block_name)
7761  		return DBG_STATUS_INVALID_ARGS;
7762  
7763  	if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7764  	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7765  	    !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7766  		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7767  
7768  	block_attn_name_offsets =
7769  	    (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr +
7770  	    results->names_offset;
7771  
7772  	attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr;
7773  
7774  	/* Go over registers with a non-zero attention status */
7775  	for (i = 0; i < num_regs; i++) {
7776  		struct dbg_attn_bit_mapping *bit_mapping;
7777  		struct dbg_attn_reg_result *reg_result;
7778  		u8 num_reg_attn, bit_idx = 0;
7779  
7780  		reg_result = &results->reg_results[i];
7781  		num_reg_attn = GET_FIELD(reg_result->data,
7782  					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7783  		bit_mapping = (struct dbg_attn_bit_mapping *)
7784  		    p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr +
7785  		    reg_result->block_attn_offset;
7786  
7787  		/* Go over attention status bits */
7788  		for (j = 0; j < num_reg_attn; j++) {
7789  			u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7790  						     DBG_ATTN_BIT_MAPPING_VAL);
7791  			const char *attn_name, *attn_type_str, *masked_str;
7792  			u32 attn_name_offset;
7793  			u32 sts_addr;
7794  
7795  			/* Check if bit mask should be advanced (due to unused
7796  			 * bits).
7797  			 */
7798  			if (GET_FIELD(bit_mapping[j].data,
7799  				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7800  				bit_idx += (u8)attn_idx_val;
7801  				continue;
7802  			}
7803  
7804  			/* Check current bit index */
7805  			if (reg_result->sts_val & BIT(bit_idx)) {
7806  				/* An attention bit with value=1 was found
7807  				 * Find attention name
7808  				 */
7809  				attn_name_offset =
7810  					block_attn_name_offsets[attn_idx_val];
7811  				attn_name = attn_name_base + attn_name_offset;
7812  				attn_type_str =
7813  					(attn_type ==
7814  					 ATTN_TYPE_INTERRUPT ? "Interrupt" :
7815  					 "Parity");
7816  				masked_str = reg_result->mask_val &
7817  					     BIT(bit_idx) ?
7818  					     " [masked]" : "";
7819  				sts_addr =
7820  				GET_FIELD(reg_result->data,
7821  					  DBG_ATTN_REG_RESULT_STS_ADDRESS);
7822  				DP_NOTICE(p_hwfn,
7823  					  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7824  					  block_name, attn_type_str, attn_name,
7825  					  sts_addr * 4, bit_idx, masked_str);
7826  			}
7827  
7828  			bit_idx++;
7829  		}
7830  	}
7831  
7832  	return DBG_STATUS_OK;
7833  }
7834  
7835  /* Wrapper for unifying the idle_chk and mcp_trace api */
7836  static enum dbg_status
qed_print_idle_chk_results_wrapper(struct qed_hwfn * p_hwfn,u32 * dump_buf,u32 num_dumped_dwords,char * results_buf)7837  qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7838  				   u32 *dump_buf,
7839  				   u32 num_dumped_dwords,
7840  				   char *results_buf)
7841  {
7842  	u32 num_errors, num_warnnings;
7843  
7844  	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7845  					  results_buf, &num_errors,
7846  					  &num_warnnings);
7847  }
7848  
7849  static DEFINE_MUTEX(qed_dbg_lock);
7850  
7851  #define MAX_PHY_RESULT_BUFFER 9000
7852  
7853  /******************************** Feature Meta data section ******************/
7854  
7855  #define GRC_NUM_STR_FUNCS 2
7856  #define IDLE_CHK_NUM_STR_FUNCS 1
7857  #define MCP_TRACE_NUM_STR_FUNCS 1
7858  #define REG_FIFO_NUM_STR_FUNCS 1
7859  #define IGU_FIFO_NUM_STR_FUNCS 1
7860  #define PROTECTION_OVERRIDE_NUM_STR_FUNCS 1
7861  #define FW_ASSERTS_NUM_STR_FUNCS 1
7862  #define ILT_NUM_STR_FUNCS 1
7863  #define PHY_NUM_STR_FUNCS 20
7864  
7865  /* Feature meta data lookup table */
7866  static struct {
7867  	char *name;
7868  	u32 num_funcs;
7869  	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7870  				    struct qed_ptt *p_ptt, u32 *size);
7871  	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7872  					struct qed_ptt *p_ptt, u32 *dump_buf,
7873  					u32 buf_size, u32 *dumped_dwords);
7874  	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7875  					 u32 *dump_buf, u32 num_dumped_dwords,
7876  					 char *results_buf);
7877  	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7878  					    u32 *dump_buf,
7879  					    u32 num_dumped_dwords,
7880  					    u32 *results_buf_size);
7881  	const struct qed_func_lookup *hsi_func_lookup;
7882  } qed_features_lookup[] = {
7883  	{
7884  	"grc", GRC_NUM_STR_FUNCS, qed_dbg_grc_get_dump_buf_size,
7885  		    qed_dbg_grc_dump, NULL, NULL, NULL}, {
7886  	"idle_chk", IDLE_CHK_NUM_STR_FUNCS,
7887  		    qed_dbg_idle_chk_get_dump_buf_size,
7888  		    qed_dbg_idle_chk_dump,
7889  		    qed_print_idle_chk_results_wrapper,
7890  		    qed_get_idle_chk_results_buf_size,
7891  		    NULL}, {
7892  	"mcp_trace", MCP_TRACE_NUM_STR_FUNCS,
7893  		    qed_dbg_mcp_trace_get_dump_buf_size,
7894  		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7895  		    qed_get_mcp_trace_results_buf_size,
7896  		    NULL}, {
7897  	"reg_fifo", REG_FIFO_NUM_STR_FUNCS,
7898  		    qed_dbg_reg_fifo_get_dump_buf_size,
7899  		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7900  		    qed_get_reg_fifo_results_buf_size,
7901  		    NULL}, {
7902  	"igu_fifo", IGU_FIFO_NUM_STR_FUNCS,
7903  		    qed_dbg_igu_fifo_get_dump_buf_size,
7904  		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7905  		    qed_get_igu_fifo_results_buf_size,
7906  		    NULL}, {
7907  	"protection_override", PROTECTION_OVERRIDE_NUM_STR_FUNCS,
7908  		    qed_dbg_protection_override_get_dump_buf_size,
7909  		    qed_dbg_protection_override_dump,
7910  		    qed_print_protection_override_results,
7911  		    qed_get_protection_override_results_buf_size,
7912  		    NULL}, {
7913  	"fw_asserts", FW_ASSERTS_NUM_STR_FUNCS,
7914  		    qed_dbg_fw_asserts_get_dump_buf_size,
7915  		    qed_dbg_fw_asserts_dump,
7916  		    qed_print_fw_asserts_results,
7917  		    qed_get_fw_asserts_results_buf_size,
7918  		    NULL}, {
7919  	"ilt", ILT_NUM_STR_FUNCS, qed_dbg_ilt_get_dump_buf_size,
7920  		    qed_dbg_ilt_dump, NULL, NULL, NULL},};
7921  
qed_dbg_print_feature(u8 * p_text_buf,u32 text_size)7922  static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7923  {
7924  	u32 i, precision = 80;
7925  
7926  	if (!p_text_buf)
7927  		return;
7928  
7929  	pr_notice("\n%.*s", precision, p_text_buf);
7930  	for (i = precision; i < text_size; i += precision)
7931  		pr_cont("%.*s", precision, p_text_buf + i);
7932  	pr_cont("\n");
7933  }
7934  
7935  #define QED_RESULTS_BUF_MIN_SIZE 16
7936  /* Generic function for decoding debug feature info */
format_feature(struct qed_hwfn * p_hwfn,enum qed_dbg_features feature_idx)7937  static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7938  				      enum qed_dbg_features feature_idx)
7939  {
7940  	struct qed_dbg_feature *feature =
7941  	    &p_hwfn->cdev->dbg_features[feature_idx];
7942  	u32 txt_size_bytes, null_char_pos, i;
7943  	u32 *dbuf, dwords;
7944  	enum dbg_status rc;
7945  	char *text_buf;
7946  
7947  	/* Check if feature supports formatting capability */
7948  	if (!qed_features_lookup[feature_idx].results_buf_size)
7949  		return DBG_STATUS_OK;
7950  
7951  	dbuf = (u32 *)feature->dump_buf;
7952  	dwords = feature->dumped_dwords;
7953  
7954  	/* Obtain size of formatted output */
7955  	rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn,
7956  							       dbuf,
7957  							       dwords,
7958  							       &txt_size_bytes);
7959  	if (rc != DBG_STATUS_OK)
7960  		return rc;
7961  
7962  	/* Make sure that the allocated size is a multiple of dword
7963  	 * (4 bytes).
7964  	 */
7965  	null_char_pos = txt_size_bytes - 1;
7966  	txt_size_bytes = (txt_size_bytes + 3) & ~0x3;
7967  
7968  	if (txt_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7969  		DP_NOTICE(p_hwfn->cdev,
7970  			  "formatted size of feature was too small %d. Aborting\n",
7971  			  txt_size_bytes);
7972  		return DBG_STATUS_INVALID_ARGS;
7973  	}
7974  
7975  	/* allocate temp text buf */
7976  	text_buf = vzalloc(txt_size_bytes);
7977  	if (!text_buf) {
7978  		DP_NOTICE(p_hwfn->cdev,
7979  			  "failed to allocate text buffer. Aborting\n");
7980  		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7981  	}
7982  
7983  	/* Decode feature opcodes to string on temp buf */
7984  	rc = qed_features_lookup[feature_idx].print_results(p_hwfn,
7985  							    dbuf,
7986  							    dwords,
7987  							    text_buf);
7988  	if (rc != DBG_STATUS_OK) {
7989  		vfree(text_buf);
7990  		return rc;
7991  	}
7992  
7993  	/* Replace the original null character with a '\n' character.
7994  	 * The bytes that were added as a result of the dword alignment are also
7995  	 * padded with '\n' characters.
7996  	 */
7997  	for (i = null_char_pos; i < txt_size_bytes; i++)
7998  		text_buf[i] = '\n';
7999  
8000  	/* Dump printable feature to log */
8001  	if (p_hwfn->cdev->print_dbg_data)
8002  		qed_dbg_print_feature(text_buf, txt_size_bytes);
8003  
8004  	/* Dump binary data as is to the output file */
8005  	if (p_hwfn->cdev->dbg_bin_dump) {
8006  		vfree(text_buf);
8007  		return rc;
8008  	}
8009  
8010  	/* Free the old dump_buf and point the dump_buf to the newly allocated
8011  	 * and formatted text buffer.
8012  	 */
8013  	vfree(feature->dump_buf);
8014  	feature->dump_buf = text_buf;
8015  	feature->buf_size = txt_size_bytes;
8016  	feature->dumped_dwords = txt_size_bytes / 4;
8017  
8018  	return rc;
8019  }
8020  
8021  #define MAX_DBG_FEATURE_SIZE_DWORDS	0x3FFFFFFF
8022  
8023  /* Generic function for performing the dump of a debug feature. */
qed_dbg_dump(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_dbg_features feature_idx)8024  static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
8025  				    struct qed_ptt *p_ptt,
8026  				    enum qed_dbg_features feature_idx)
8027  {
8028  	struct qed_dbg_feature *feature =
8029  	    &p_hwfn->cdev->dbg_features[feature_idx];
8030  	u32 buf_size_dwords, *dbuf, *dwords;
8031  	enum dbg_status rc;
8032  
8033  	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
8034  		  qed_features_lookup[feature_idx].name);
8035  
8036  	/* Dump_buf was already allocated need to free (this can happen if dump
8037  	 * was called but file was never read).
8038  	 * We can't use the buffer as is since size may have changed.
8039  	 */
8040  	if (feature->dump_buf) {
8041  		vfree(feature->dump_buf);
8042  		feature->dump_buf = NULL;
8043  	}
8044  
8045  	/* Get buffer size from hsi, allocate accordingly, and perform the
8046  	 * dump.
8047  	 */
8048  	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
8049  						       &buf_size_dwords);
8050  	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
8051  		return rc;
8052  
8053  	if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) {
8054  		feature->buf_size = 0;
8055  		DP_NOTICE(p_hwfn->cdev,
8056  			  "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n",
8057  			  qed_features_lookup[feature_idx].name,
8058  			  buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS);
8059  
8060  		return DBG_STATUS_OK;
8061  	}
8062  
8063  	feature->buf_size = buf_size_dwords * sizeof(u32);
8064  	feature->dump_buf = vmalloc(feature->buf_size);
8065  	if (!feature->dump_buf)
8066  		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
8067  
8068  	dbuf = (u32 *)feature->dump_buf;
8069  	dwords = &feature->dumped_dwords;
8070  	rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt,
8071  							   dbuf,
8072  							   feature->buf_size /
8073  							   sizeof(u32),
8074  							   dwords);
8075  
8076  	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
8077  	 * In this case the buffer holds valid binary data, but we won't able
8078  	 * to parse it (since parsing relies on data in NVRAM which is only
8079  	 * accessible when MFW is responsive). skip the formatting but return
8080  	 * success so that binary data is provided.
8081  	 */
8082  	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
8083  		return DBG_STATUS_OK;
8084  
8085  	if (rc != DBG_STATUS_OK)
8086  		return rc;
8087  
8088  	/* Format output */
8089  	rc = format_feature(p_hwfn, feature_idx);
8090  	return rc;
8091  }
8092  
qed_dbg_grc(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)8093  int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8094  {
8095  	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
8096  }
8097  
qed_dbg_grc_size(struct qed_dev * cdev)8098  int qed_dbg_grc_size(struct qed_dev *cdev)
8099  {
8100  	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
8101  }
8102  
qed_dbg_idle_chk(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)8103  int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8104  {
8105  	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
8106  			       num_dumped_bytes);
8107  }
8108  
qed_dbg_idle_chk_size(struct qed_dev * cdev)8109  int qed_dbg_idle_chk_size(struct qed_dev *cdev)
8110  {
8111  	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
8112  }
8113  
qed_dbg_reg_fifo(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)8114  int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8115  {
8116  	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
8117  			       num_dumped_bytes);
8118  }
8119  
qed_dbg_reg_fifo_size(struct qed_dev * cdev)8120  int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
8121  {
8122  	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
8123  }
8124  
qed_dbg_igu_fifo(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)8125  int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8126  {
8127  	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
8128  			       num_dumped_bytes);
8129  }
8130  
qed_dbg_igu_fifo_size(struct qed_dev * cdev)8131  int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
8132  {
8133  	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
8134  }
8135  
qed_dbg_nvm_image_length(struct qed_hwfn * p_hwfn,enum qed_nvm_images image_id,u32 * length)8136  static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
8137  				    enum qed_nvm_images image_id, u32 *length)
8138  {
8139  	struct qed_nvm_image_att image_att;
8140  	int rc;
8141  
8142  	*length = 0;
8143  	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
8144  	if (rc)
8145  		return rc;
8146  
8147  	*length = image_att.length;
8148  
8149  	return rc;
8150  }
8151  
qed_dbg_nvm_image(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes,enum qed_nvm_images image_id)8152  static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
8153  			     u32 *num_dumped_bytes,
8154  			     enum qed_nvm_images image_id)
8155  {
8156  	struct qed_hwfn *p_hwfn =
8157  		&cdev->hwfns[cdev->engine_for_debug];
8158  	u32 len_rounded;
8159  	int rc;
8160  
8161  	*num_dumped_bytes = 0;
8162  	rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
8163  	if (rc)
8164  		return rc;
8165  
8166  	DP_NOTICE(p_hwfn->cdev,
8167  		  "Collecting a debug feature [\"nvram image %d\"]\n",
8168  		  image_id);
8169  
8170  	len_rounded = roundup(len_rounded, sizeof(u32));
8171  	rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
8172  	if (rc)
8173  		return rc;
8174  
8175  	/* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
8176  	if (image_id != QED_NVM_IMAGE_NVM_META)
8177  		cpu_to_be32_array((__force __be32 *)buffer,
8178  				  (const u32 *)buffer,
8179  				  len_rounded / sizeof(u32));
8180  
8181  	*num_dumped_bytes = len_rounded;
8182  
8183  	return rc;
8184  }
8185  
qed_dbg_protection_override(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)8186  int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
8187  				u32 *num_dumped_bytes)
8188  {
8189  	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
8190  			       num_dumped_bytes);
8191  }
8192  
qed_dbg_protection_override_size(struct qed_dev * cdev)8193  int qed_dbg_protection_override_size(struct qed_dev *cdev)
8194  {
8195  	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
8196  }
8197  
qed_dbg_fw_asserts(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)8198  int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
8199  		       u32 *num_dumped_bytes)
8200  {
8201  	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
8202  			       num_dumped_bytes);
8203  }
8204  
qed_dbg_fw_asserts_size(struct qed_dev * cdev)8205  int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
8206  {
8207  	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
8208  }
8209  
qed_dbg_ilt(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)8210  int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
8211  {
8212  	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
8213  }
8214  
qed_dbg_ilt_size(struct qed_dev * cdev)8215  int qed_dbg_ilt_size(struct qed_dev *cdev)
8216  {
8217  	return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT);
8218  }
8219  
qed_dbg_mcp_trace(struct qed_dev * cdev,void * buffer,u32 * num_dumped_bytes)8220  int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
8221  		      u32 *num_dumped_bytes)
8222  {
8223  	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
8224  			       num_dumped_bytes);
8225  }
8226  
qed_dbg_mcp_trace_size(struct qed_dev * cdev)8227  int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
8228  {
8229  	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
8230  }
8231  
8232  /* Defines the amount of bytes allocated for recording the length of debugfs
8233   * feature buffer.
8234   */
8235  #define REGDUMP_HEADER_SIZE			sizeof(u32)
8236  #define REGDUMP_HEADER_SIZE_SHIFT		0
8237  #define REGDUMP_HEADER_SIZE_MASK		0xffffff
8238  #define REGDUMP_HEADER_FEATURE_SHIFT		24
8239  #define REGDUMP_HEADER_FEATURE_MASK		0x1f
8240  #define REGDUMP_HEADER_BIN_DUMP_SHIFT		29
8241  #define REGDUMP_HEADER_BIN_DUMP_MASK		0x1
8242  #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
8243  #define REGDUMP_HEADER_OMIT_ENGINE_MASK		0x1
8244  #define REGDUMP_HEADER_ENGINE_SHIFT		31
8245  #define REGDUMP_HEADER_ENGINE_MASK		0x1
8246  #define REGDUMP_MAX_SIZE			0x1000000
8247  #define ILT_DUMP_MAX_SIZE			(1024 * 1024 * 15)
8248  
8249  enum debug_print_features {
8250  	OLD_MODE = 0,
8251  	IDLE_CHK = 1,
8252  	GRC_DUMP = 2,
8253  	MCP_TRACE = 3,
8254  	REG_FIFO = 4,
8255  	PROTECTION_OVERRIDE = 5,
8256  	IGU_FIFO = 6,
8257  	PHY = 7,
8258  	FW_ASSERTS = 8,
8259  	NVM_CFG1 = 9,
8260  	DEFAULT_CFG = 10,
8261  	NVM_META = 11,
8262  	MDUMP = 12,
8263  	ILT_DUMP = 13,
8264  };
8265  
qed_calc_regdump_header(struct qed_dev * cdev,enum debug_print_features feature,int engine,u32 feature_size,u8 omit_engine,u8 dbg_bin_dump)8266  static u32 qed_calc_regdump_header(struct qed_dev *cdev,
8267  				   enum debug_print_features feature,
8268  				   int engine, u32 feature_size,
8269  				   u8 omit_engine, u8 dbg_bin_dump)
8270  {
8271  	u32 res = 0;
8272  
8273  	SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size);
8274  	if (res != feature_size)
8275  		DP_NOTICE(cdev,
8276  			  "Feature %d is too large (size 0x%x) and will corrupt the dump\n",
8277  			  feature, feature_size);
8278  
8279  	SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
8280  	SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, dbg_bin_dump);
8281  	SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
8282  	SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
8283  
8284  	return res;
8285  }
8286  
qed_dbg_all_data(struct qed_dev * cdev,void * buffer)8287  int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
8288  {
8289  	u8 cur_engine, omit_engine = 0, org_engine;
8290  	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
8291  	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
8292  	int grc_params[MAX_DBG_GRC_PARAMS], rc, i;
8293  	u32 offset = 0, feature_size;
8294  
8295  	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
8296  		grc_params[i] = dev_data->grc.param_val[i];
8297  
8298  	if (!QED_IS_CMT(cdev))
8299  		omit_engine = 1;
8300  
8301  	cdev->dbg_bin_dump = 1;
8302  	mutex_lock(&qed_dbg_lock);
8303  
8304  	org_engine = qed_get_debug_engine(cdev);
8305  	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8306  		/* Collect idle_chks and grcDump for each hw function */
8307  		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8308  			   "obtaining idle_chk and grcdump for current engine\n");
8309  		qed_set_debug_engine(cdev, cur_engine);
8310  
8311  		/* First idle_chk */
8312  		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8313  				      REGDUMP_HEADER_SIZE, &feature_size);
8314  		if (!rc) {
8315  			*(u32 *)((u8 *)buffer + offset) =
8316  			    qed_calc_regdump_header(cdev, IDLE_CHK,
8317  						    cur_engine,
8318  						    feature_size,
8319  						    omit_engine,
8320  						    cdev->dbg_bin_dump);
8321  			offset += (feature_size + REGDUMP_HEADER_SIZE);
8322  		} else {
8323  			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8324  		}
8325  
8326  		/* Second idle_chk */
8327  		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
8328  				      REGDUMP_HEADER_SIZE, &feature_size);
8329  		if (!rc) {
8330  			*(u32 *)((u8 *)buffer + offset) =
8331  			    qed_calc_regdump_header(cdev, IDLE_CHK,
8332  						    cur_engine,
8333  						    feature_size,
8334  						    omit_engine,
8335  						    cdev->dbg_bin_dump);
8336  			offset += (feature_size + REGDUMP_HEADER_SIZE);
8337  		} else {
8338  			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
8339  		}
8340  
8341  		/* reg_fifo dump */
8342  		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
8343  				      REGDUMP_HEADER_SIZE, &feature_size);
8344  		if (!rc) {
8345  			*(u32 *)((u8 *)buffer + offset) =
8346  			    qed_calc_regdump_header(cdev, REG_FIFO,
8347  						    cur_engine,
8348  						    feature_size,
8349  						    omit_engine,
8350  						    cdev->dbg_bin_dump);
8351  			offset += (feature_size + REGDUMP_HEADER_SIZE);
8352  		} else {
8353  			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
8354  		}
8355  
8356  		/* igu_fifo dump */
8357  		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
8358  				      REGDUMP_HEADER_SIZE, &feature_size);
8359  		if (!rc) {
8360  			*(u32 *)((u8 *)buffer + offset) =
8361  			    qed_calc_regdump_header(cdev, IGU_FIFO,
8362  						    cur_engine,
8363  						    feature_size,
8364  						    omit_engine,
8365  						    cdev->dbg_bin_dump);
8366  			offset += (feature_size + REGDUMP_HEADER_SIZE);
8367  		} else {
8368  			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
8369  		}
8370  
8371  		/* protection_override dump */
8372  		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
8373  						 REGDUMP_HEADER_SIZE,
8374  						 &feature_size);
8375  		if (!rc) {
8376  			*(u32 *)((u8 *)buffer + offset) =
8377  			    qed_calc_regdump_header(cdev,
8378  						    PROTECTION_OVERRIDE,
8379  						    cur_engine,
8380  						    feature_size,
8381  						    omit_engine,
8382  						    cdev->dbg_bin_dump);
8383  			offset += (feature_size + REGDUMP_HEADER_SIZE);
8384  		} else {
8385  			DP_ERR(cdev,
8386  			       "qed_dbg_protection_override failed. rc = %d\n",
8387  			       rc);
8388  		}
8389  
8390  		/* fw_asserts dump */
8391  		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
8392  					REGDUMP_HEADER_SIZE, &feature_size);
8393  		if (!rc) {
8394  			*(u32 *)((u8 *)buffer + offset) =
8395  			    qed_calc_regdump_header(cdev, FW_ASSERTS,
8396  						    cur_engine,
8397  						    feature_size,
8398  						    omit_engine,
8399  						    cdev->dbg_bin_dump);
8400  			offset += (feature_size + REGDUMP_HEADER_SIZE);
8401  		} else {
8402  			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
8403  			       rc);
8404  		}
8405  
8406  		feature_size = qed_dbg_ilt_size(cdev);
8407  		if (!cdev->disable_ilt_dump && feature_size <
8408  		    ILT_DUMP_MAX_SIZE) {
8409  			rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
8410  					 REGDUMP_HEADER_SIZE, &feature_size);
8411  			if (!rc) {
8412  				*(u32 *)((u8 *)buffer + offset) =
8413  				    qed_calc_regdump_header(cdev, ILT_DUMP,
8414  							    cur_engine,
8415  							    feature_size,
8416  							    omit_engine,
8417  							    cdev->dbg_bin_dump);
8418  				offset += (feature_size + REGDUMP_HEADER_SIZE);
8419  			} else {
8420  				DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
8421  				       rc);
8422  			}
8423  		}
8424  
8425  		/* Grc dump - must be last because when mcp stuck it will
8426  		 * clutter idle_chk, reg_fifo, ...
8427  		 */
8428  		for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
8429  			dev_data->grc.param_val[i] = grc_params[i];
8430  
8431  		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
8432  				 REGDUMP_HEADER_SIZE, &feature_size);
8433  		if (!rc) {
8434  			*(u32 *)((u8 *)buffer + offset) =
8435  			    qed_calc_regdump_header(cdev, GRC_DUMP,
8436  						    cur_engine,
8437  						    feature_size,
8438  						    omit_engine,
8439  						    cdev->dbg_bin_dump);
8440  			offset += (feature_size + REGDUMP_HEADER_SIZE);
8441  		} else {
8442  			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
8443  		}
8444  	}
8445  
8446  	qed_set_debug_engine(cdev, org_engine);
8447  
8448  	/* mcp_trace */
8449  	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
8450  			       REGDUMP_HEADER_SIZE, &feature_size);
8451  	if (!rc) {
8452  		*(u32 *)((u8 *)buffer + offset) =
8453  		    qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
8454  					    feature_size, omit_engine,
8455  					    cdev->dbg_bin_dump);
8456  		offset += (feature_size + REGDUMP_HEADER_SIZE);
8457  	} else {
8458  		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
8459  	}
8460  
8461  	/* nvm cfg1 */
8462  	rc = qed_dbg_nvm_image(cdev,
8463  			       (u8 *)buffer + offset +
8464  			       REGDUMP_HEADER_SIZE, &feature_size,
8465  			       QED_NVM_IMAGE_NVM_CFG1);
8466  	if (!rc) {
8467  		*(u32 *)((u8 *)buffer + offset) =
8468  		    qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
8469  					    feature_size, omit_engine,
8470  					    cdev->dbg_bin_dump);
8471  		offset += (feature_size + REGDUMP_HEADER_SIZE);
8472  	} else if (rc != -ENOENT) {
8473  		DP_ERR(cdev,
8474  		       "qed_dbg_nvm_image failed for image  %d (%s), rc = %d\n",
8475  		       QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1",
8476  		       rc);
8477  	}
8478  
8479  		/* nvm default */
8480  	rc = qed_dbg_nvm_image(cdev,
8481  			       (u8 *)buffer + offset +
8482  			       REGDUMP_HEADER_SIZE, &feature_size,
8483  			       QED_NVM_IMAGE_DEFAULT_CFG);
8484  	if (!rc) {
8485  		*(u32 *)((u8 *)buffer + offset) =
8486  		    qed_calc_regdump_header(cdev, DEFAULT_CFG,
8487  					    cur_engine, feature_size,
8488  					    omit_engine,
8489  					    cdev->dbg_bin_dump);
8490  		offset += (feature_size + REGDUMP_HEADER_SIZE);
8491  	} else if (rc != -ENOENT) {
8492  		DP_ERR(cdev,
8493  		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8494  		       QED_NVM_IMAGE_DEFAULT_CFG,
8495  		       "QED_NVM_IMAGE_DEFAULT_CFG", rc);
8496  	}
8497  
8498  	/* nvm meta */
8499  	rc = qed_dbg_nvm_image(cdev,
8500  			       (u8 *)buffer + offset +
8501  			       REGDUMP_HEADER_SIZE, &feature_size,
8502  			       QED_NVM_IMAGE_NVM_META);
8503  	if (!rc) {
8504  		*(u32 *)((u8 *)buffer + offset) =
8505  		    qed_calc_regdump_header(cdev, NVM_META, cur_engine,
8506  					    feature_size, omit_engine,
8507  					    cdev->dbg_bin_dump);
8508  		offset += (feature_size + REGDUMP_HEADER_SIZE);
8509  	} else if (rc != -ENOENT) {
8510  		DP_ERR(cdev,
8511  		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8512  		       QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META",
8513  		       rc);
8514  	}
8515  
8516  	/* nvm mdump */
8517  	rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset +
8518  			       REGDUMP_HEADER_SIZE, &feature_size,
8519  			       QED_NVM_IMAGE_MDUMP);
8520  	if (!rc) {
8521  		*(u32 *)((u8 *)buffer + offset) =
8522  		    qed_calc_regdump_header(cdev, MDUMP, cur_engine,
8523  					    feature_size, omit_engine,
8524  					    cdev->dbg_bin_dump);
8525  		offset += (feature_size + REGDUMP_HEADER_SIZE);
8526  	} else if (rc != -ENOENT) {
8527  		DP_ERR(cdev,
8528  		       "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8529  		       QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
8530  	}
8531  
8532  	mutex_unlock(&qed_dbg_lock);
8533  	cdev->dbg_bin_dump = 0;
8534  
8535  	return 0;
8536  }
8537  
qed_dbg_all_data_size(struct qed_dev * cdev)8538  int qed_dbg_all_data_size(struct qed_dev *cdev)
8539  {
8540  	u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
8541  	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
8542  	u8 cur_engine, org_engine;
8543  
8544  	cdev->disable_ilt_dump = false;
8545  	org_engine = qed_get_debug_engine(cdev);
8546  	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8547  		/* Engine specific */
8548  		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8549  			   "calculating idle_chk and grcdump register length for current engine\n");
8550  		qed_set_debug_engine(cdev, cur_engine);
8551  		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8552  		    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8553  		    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8554  		    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8555  		    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8556  		    REGDUMP_HEADER_SIZE +
8557  		    qed_dbg_protection_override_size(cdev) +
8558  		    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8559  		ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
8560  		if (ilt_len < ILT_DUMP_MAX_SIZE) {
8561  			total_ilt_len += ilt_len;
8562  			regs_len += ilt_len;
8563  		}
8564  	}
8565  
8566  	qed_set_debug_engine(cdev, org_engine);
8567  
8568  	/* Engine common */
8569  	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev) +
8570  	    REGDUMP_HEADER_SIZE + qed_dbg_phy_size(cdev);
8571  	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8572  	if (image_len)
8573  		regs_len += REGDUMP_HEADER_SIZE + image_len;
8574  	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8575  	if (image_len)
8576  		regs_len += REGDUMP_HEADER_SIZE + image_len;
8577  	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8578  	if (image_len)
8579  		regs_len += REGDUMP_HEADER_SIZE + image_len;
8580  	qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len);
8581  	if (image_len)
8582  		regs_len += REGDUMP_HEADER_SIZE + image_len;
8583  
8584  	if (regs_len > REGDUMP_MAX_SIZE) {
8585  		DP_VERBOSE(cdev, QED_MSG_DEBUG,
8586  			   "Dump exceeds max size 0x%x, disable ILT dump\n",
8587  			   REGDUMP_MAX_SIZE);
8588  		cdev->disable_ilt_dump = true;
8589  		regs_len -= total_ilt_len;
8590  	}
8591  
8592  	return regs_len;
8593  }
8594  
qed_dbg_feature(struct qed_dev * cdev,void * buffer,enum qed_dbg_features feature,u32 * num_dumped_bytes)8595  int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8596  		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
8597  {
8598  	struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
8599  	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
8600  	enum dbg_status dbg_rc;
8601  	struct qed_ptt *p_ptt;
8602  	int rc = 0;
8603  
8604  	/* Acquire ptt */
8605  	p_ptt = qed_ptt_acquire(p_hwfn);
8606  	if (!p_ptt)
8607  		return -EINVAL;
8608  
8609  	/* Get dump */
8610  	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8611  	if (dbg_rc != DBG_STATUS_OK) {
8612  		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8613  			   qed_dbg_get_status_str(dbg_rc));
8614  		*num_dumped_bytes = 0;
8615  		rc = -EINVAL;
8616  		goto out;
8617  	}
8618  
8619  	DP_VERBOSE(cdev, QED_MSG_DEBUG,
8620  		   "copying debugfs feature to external buffer\n");
8621  	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8622  	*num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords *
8623  			    4;
8624  
8625  out:
8626  	qed_ptt_release(p_hwfn, p_ptt);
8627  	return rc;
8628  }
8629  
qed_dbg_feature_size(struct qed_dev * cdev,enum qed_dbg_features feature)8630  int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8631  {
8632  	struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
8633  	struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
8634  	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8635  	u32 buf_size_dwords;
8636  	enum dbg_status rc;
8637  
8638  	if (!p_ptt)
8639  		return -EINVAL;
8640  
8641  	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8642  						   &buf_size_dwords);
8643  	if (rc != DBG_STATUS_OK)
8644  		buf_size_dwords = 0;
8645  
8646  	/* Feature will not be dumped if it exceeds maximum size */
8647  	if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
8648  		buf_size_dwords = 0;
8649  
8650  	qed_ptt_release(p_hwfn, p_ptt);
8651  	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8652  	return qed_feature->buf_size;
8653  }
8654  
qed_dbg_phy_size(struct qed_dev * cdev)8655  int qed_dbg_phy_size(struct qed_dev *cdev)
8656  {
8657  	/* return max size of phy info and
8658  	 * phy mac_stat multiplied by the number of ports
8659  	 */
8660  	return MAX_PHY_RESULT_BUFFER * (1 + qed_device_num_ports(cdev));
8661  }
8662  
qed_get_debug_engine(struct qed_dev * cdev)8663  u8 qed_get_debug_engine(struct qed_dev *cdev)
8664  {
8665  	return cdev->engine_for_debug;
8666  }
8667  
qed_set_debug_engine(struct qed_dev * cdev,int engine_number)8668  void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8669  {
8670  	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8671  		   engine_number);
8672  	cdev->engine_for_debug = engine_number;
8673  }
8674  
qed_dbg_pf_init(struct qed_dev * cdev)8675  void qed_dbg_pf_init(struct qed_dev *cdev)
8676  {
8677  	const u8 *dbg_values = NULL;
8678  	int i;
8679  
8680  	/* Sync ver with debugbus qed code */
8681  	qed_dbg_set_app_ver(TOOLS_VERSION);
8682  
8683  	/* Debug values are after init values.
8684  	 * The offset is the first dword of the file.
8685  	 */
8686  	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8687  
8688  	for_each_hwfn(cdev, i) {
8689  		qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8690  		qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8691  	}
8692  
8693  	/* Set the hwfn to be 0 as default */
8694  	cdev->engine_for_debug = 0;
8695  }
8696  
qed_dbg_pf_exit(struct qed_dev * cdev)8697  void qed_dbg_pf_exit(struct qed_dev *cdev)
8698  {
8699  	struct qed_dbg_feature *feature = NULL;
8700  	enum qed_dbg_features feature_idx;
8701  
8702  	/* debug features' buffers may be allocated if debug feature was used
8703  	 * but dump wasn't called
8704  	 */
8705  	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8706  		feature = &cdev->dbg_features[feature_idx];
8707  		if (feature->dump_buf) {
8708  			vfree(feature->dump_buf);
8709  			feature->dump_buf = NULL;
8710  		}
8711  	}
8712  }
8713