xref: /openbmc/linux/drivers/net/ethernet/qlogic/qed/qed_int.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1  // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2  /* QLogic qed NIC Driver
3   * Copyright (c) 2015-2017  QLogic Corporation
4   * Copyright (c) 2019-2020 Marvell International Ltd.
5   */
6  
7  #include <linux/types.h>
8  #include <asm/byteorder.h>
9  #include <linux/io.h>
10  #include <linux/bitops.h>
11  #include <linux/delay.h>
12  #include <linux/dma-mapping.h>
13  #include <linux/errno.h>
14  #include <linux/interrupt.h>
15  #include <linux/kernel.h>
16  #include <linux/pci.h>
17  #include <linux/slab.h>
18  #include <linux/string.h>
19  #include "qed.h"
20  #include "qed_hsi.h"
21  #include "qed_hw.h"
22  #include "qed_init_ops.h"
23  #include "qed_int.h"
24  #include "qed_mcp.h"
25  #include "qed_reg_addr.h"
26  #include "qed_sp.h"
27  #include "qed_sriov.h"
28  #include "qed_vf.h"
29  
30  struct qed_pi_info {
31  	qed_int_comp_cb_t	comp_cb;
32  	void			*cookie;
33  };
34  
35  struct qed_sb_sp_info {
36  	struct qed_sb_info sb_info;
37  
38  	/* per protocol index data */
39  	struct qed_pi_info pi_info_arr[PIS_PER_SB];
40  };
41  
42  enum qed_attention_type {
43  	QED_ATTN_TYPE_ATTN,
44  	QED_ATTN_TYPE_PARITY,
45  };
46  
47  #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
48  	ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
49  
50  struct aeu_invert_reg_bit {
51  	char bit_name[30];
52  
53  #define ATTENTION_PARITY                (1 << 0)
54  
55  #define ATTENTION_LENGTH_MASK           (0x00000ff0)
56  #define ATTENTION_LENGTH_SHIFT          (4)
57  #define ATTENTION_LENGTH(flags)         (((flags) & ATTENTION_LENGTH_MASK) >> \
58  					 ATTENTION_LENGTH_SHIFT)
59  #define ATTENTION_SINGLE                BIT(ATTENTION_LENGTH_SHIFT)
60  #define ATTENTION_PAR                   (ATTENTION_SINGLE | ATTENTION_PARITY)
61  #define ATTENTION_PAR_INT               ((2 << ATTENTION_LENGTH_SHIFT) | \
62  					 ATTENTION_PARITY)
63  
64  /* Multiple bits start with this offset */
65  #define ATTENTION_OFFSET_MASK           (0x000ff000)
66  #define ATTENTION_OFFSET_SHIFT          (12)
67  
68  #define ATTENTION_BB_MASK               (0x00700000)
69  #define ATTENTION_BB_SHIFT              (20)
70  #define ATTENTION_BB(value)             (value << ATTENTION_BB_SHIFT)
71  #define ATTENTION_BB_DIFFERENT          BIT(23)
72  
73  #define ATTENTION_CLEAR_ENABLE          BIT(28)
74  	unsigned int flags;
75  
76  	/* Callback to call if attention will be triggered */
77  	int (*cb)(struct qed_hwfn *p_hwfn);
78  
79  	enum block_id block_index;
80  };
81  
82  struct aeu_invert_reg {
83  	struct aeu_invert_reg_bit bits[32];
84  };
85  
86  #define MAX_ATTN_GRPS           (8)
87  #define NUM_ATTN_REGS           (9)
88  
89  /* Specific HW attention callbacks */
qed_mcp_attn_cb(struct qed_hwfn * p_hwfn)90  static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn)
91  {
92  	u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
93  
94  	/* This might occur on certain instances; Log it once then mask it */
95  	DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
96  		tmp);
97  	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
98  	       0xffffffff);
99  
100  	return 0;
101  }
102  
103  #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS		(0x1)
104  #define ATTENTION_INCORRECT_ACCESS_WR_MASK		(0x1)
105  #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT		(0)
106  #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK		(0xf)
107  #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT		(1)
108  #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK	(0x1)
109  #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT	(5)
110  #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK		(0xff)
111  #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT		(6)
112  #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK		(0xf)
113  #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT		(14)
114  #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK		(0xff)
115  #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT	(18)
qed_pswhst_attn_cb(struct qed_hwfn * p_hwfn)116  static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn)
117  {
118  	u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
119  			 PSWHST_REG_INCORRECT_ACCESS_VALID);
120  
121  	if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) {
122  		u32 addr, data, length;
123  
124  		addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
125  			      PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
126  		data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
127  			      PSWHST_REG_INCORRECT_ACCESS_DATA);
128  		length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
129  				PSWHST_REG_INCORRECT_ACCESS_LENGTH);
130  
131  		DP_INFO(p_hwfn->cdev,
132  			"Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
133  			addr, length,
134  			(u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID),
135  			(u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID),
136  			(u8) GET_FIELD(data,
137  				       ATTENTION_INCORRECT_ACCESS_VF_VALID),
138  			(u8) GET_FIELD(data,
139  				       ATTENTION_INCORRECT_ACCESS_CLIENT),
140  			(u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR),
141  			(u8) GET_FIELD(data,
142  				       ATTENTION_INCORRECT_ACCESS_BYTE_EN),
143  			data);
144  	}
145  
146  	return 0;
147  }
148  
149  #define QED_GRC_ATTENTION_VALID_BIT	(1 << 0)
150  #define QED_GRC_ATTENTION_ADDRESS_MASK	(0x7fffff)
151  #define QED_GRC_ATTENTION_ADDRESS_SHIFT	(0)
152  #define QED_GRC_ATTENTION_RDWR_BIT	(1 << 23)
153  #define QED_GRC_ATTENTION_MASTER_MASK	(0xf)
154  #define QED_GRC_ATTENTION_MASTER_SHIFT	(24)
155  #define QED_GRC_ATTENTION_PF_MASK	(0xf)
156  #define QED_GRC_ATTENTION_PF_SHIFT	(0)
157  #define QED_GRC_ATTENTION_VF_MASK	(0xff)
158  #define QED_GRC_ATTENTION_VF_SHIFT	(4)
159  #define QED_GRC_ATTENTION_PRIV_MASK	(0x3)
160  #define QED_GRC_ATTENTION_PRIV_SHIFT	(14)
161  #define QED_GRC_ATTENTION_PRIV_VF	(0)
attn_master_to_str(u8 master)162  static const char *attn_master_to_str(u8 master)
163  {
164  	switch (master) {
165  	case 1: return "PXP";
166  	case 2: return "MCP";
167  	case 3: return "MSDM";
168  	case 4: return "PSDM";
169  	case 5: return "YSDM";
170  	case 6: return "USDM";
171  	case 7: return "TSDM";
172  	case 8: return "XSDM";
173  	case 9: return "DBU";
174  	case 10: return "DMAE";
175  	default:
176  		return "Unknown";
177  	}
178  }
179  
qed_grc_attn_cb(struct qed_hwfn * p_hwfn)180  static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
181  {
182  	u32 tmp, tmp2;
183  
184  	/* We've already cleared the timeout interrupt register, so we learn
185  	 * of interrupts via the validity register
186  	 */
187  	tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
188  		     GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
189  	if (!(tmp & QED_GRC_ATTENTION_VALID_BIT))
190  		goto out;
191  
192  	/* Read the GRC timeout information */
193  	tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
194  		     GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
195  	tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
196  		      GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
197  
198  	DP_INFO(p_hwfn->cdev,
199  		"GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
200  		tmp2, tmp,
201  		(tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
202  		GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2,
203  		attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
204  		GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
205  		(GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
206  		 QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)",
207  		GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
208  
209  out:
210  	/* Regardles of anything else, clean the validity bit */
211  	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
212  	       GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
213  	return 0;
214  }
215  
216  #define PGLUE_ATTENTION_VALID			(1 << 29)
217  #define PGLUE_ATTENTION_RD_VALID		(1 << 26)
218  #define PGLUE_ATTENTION_DETAILS_PFID_MASK	(0xf)
219  #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT	(20)
220  #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK	(0x1)
221  #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT	(19)
222  #define PGLUE_ATTENTION_DETAILS_VFID_MASK	(0xff)
223  #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT	(24)
224  #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK	(0x1)
225  #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT	(21)
226  #define PGLUE_ATTENTION_DETAILS2_BME_MASK	(0x1)
227  #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT	(22)
228  #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK	(0x1)
229  #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT	(23)
230  #define PGLUE_ATTENTION_ICPL_VALID		(1 << 23)
231  #define PGLUE_ATTENTION_ZLR_VALID		(1 << 25)
232  #define PGLUE_ATTENTION_ILT_VALID		(1 << 23)
233  
qed_pglueb_rbc_attn_handler(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool hw_init)234  int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
235  				bool hw_init)
236  {
237  	char msg[256];
238  	u32 tmp;
239  
240  	tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
241  	if (tmp & PGLUE_ATTENTION_VALID) {
242  		u32 addr_lo, addr_hi, details;
243  
244  		addr_lo = qed_rd(p_hwfn, p_ptt,
245  				 PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
246  		addr_hi = qed_rd(p_hwfn, p_ptt,
247  				 PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
248  		details = qed_rd(p_hwfn, p_ptt,
249  				 PGLUE_B_REG_TX_ERR_WR_DETAILS);
250  
251  		snprintf(msg, sizeof(msg),
252  			 "Illegal write by chip to [%08x:%08x] blocked.\n"
253  			 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
254  			 "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]",
255  			 addr_hi, addr_lo, details,
256  			 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
257  			 (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
258  			 !!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID),
259  			 tmp,
260  			 !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR),
261  			 !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME),
262  			 !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN));
263  
264  		if (hw_init)
265  			DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg);
266  		else
267  			DP_NOTICE(p_hwfn, "%s\n", msg);
268  	}
269  
270  	tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
271  	if (tmp & PGLUE_ATTENTION_RD_VALID) {
272  		u32 addr_lo, addr_hi, details;
273  
274  		addr_lo = qed_rd(p_hwfn, p_ptt,
275  				 PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
276  		addr_hi = qed_rd(p_hwfn, p_ptt,
277  				 PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
278  		details = qed_rd(p_hwfn, p_ptt,
279  				 PGLUE_B_REG_TX_ERR_RD_DETAILS);
280  
281  		DP_NOTICE(p_hwfn,
282  			  "Illegal read by chip from [%08x:%08x] blocked.\n"
283  			  "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
284  			  "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
285  			  addr_hi, addr_lo, details,
286  			  (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
287  			  (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
288  			  GET_FIELD(details,
289  				    PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
290  			  tmp,
291  			  GET_FIELD(tmp,
292  				    PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
293  			  GET_FIELD(tmp,
294  				    PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
295  			  GET_FIELD(tmp,
296  				    PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
297  	}
298  
299  	tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
300  	if (tmp & PGLUE_ATTENTION_ICPL_VALID) {
301  		snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp);
302  
303  		if (hw_init)
304  			DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg);
305  		else
306  			DP_NOTICE(p_hwfn, "%s\n", msg);
307  	}
308  
309  	tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
310  	if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
311  		u32 addr_hi, addr_lo;
312  
313  		addr_lo = qed_rd(p_hwfn, p_ptt,
314  				 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
315  		addr_hi = qed_rd(p_hwfn, p_ptt,
316  				 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
317  
318  		DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n",
319  			  tmp, addr_hi, addr_lo);
320  	}
321  
322  	tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
323  	if (tmp & PGLUE_ATTENTION_ILT_VALID) {
324  		u32 addr_hi, addr_lo, details;
325  
326  		addr_lo = qed_rd(p_hwfn, p_ptt,
327  				 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
328  		addr_hi = qed_rd(p_hwfn, p_ptt,
329  				 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
330  		details = qed_rd(p_hwfn, p_ptt,
331  				 PGLUE_B_REG_VF_ILT_ERR_DETAILS);
332  
333  		DP_NOTICE(p_hwfn,
334  			  "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
335  			  details, tmp, addr_hi, addr_lo);
336  	}
337  
338  	/* Clear the indications */
339  	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2));
340  
341  	return 0;
342  }
343  
qed_pglueb_rbc_attn_cb(struct qed_hwfn * p_hwfn)344  static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
345  {
346  	return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false);
347  }
348  
qed_fw_assertion(struct qed_hwfn * p_hwfn)349  static int qed_fw_assertion(struct qed_hwfn *p_hwfn)
350  {
351  	qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_FW_ASSERT,
352  			  "FW assertion!\n");
353  
354  	/* Clear assert indications */
355  	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MISC_REG_AEU_GENERAL_ATTN_32, 0);
356  
357  	return -EINVAL;
358  }
359  
qed_general_attention_35(struct qed_hwfn * p_hwfn)360  static int qed_general_attention_35(struct qed_hwfn *p_hwfn)
361  {
362  	DP_INFO(p_hwfn, "General attention 35!\n");
363  
364  	return 0;
365  }
366  
367  #define QED_DORQ_ATTENTION_REASON_MASK  (0xfffff)
368  #define QED_DORQ_ATTENTION_OPAQUE_MASK  (0xffff)
369  #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
370  #define QED_DORQ_ATTENTION_SIZE_MASK            (0x7f)
371  #define QED_DORQ_ATTENTION_SIZE_SHIFT           (16)
372  
373  #define QED_DB_REC_COUNT                        1000
374  #define QED_DB_REC_INTERVAL                     100
375  
qed_db_rec_flush_queue(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)376  static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
377  				  struct qed_ptt *p_ptt)
378  {
379  	u32 count = QED_DB_REC_COUNT;
380  	u32 usage = 1;
381  
382  	/* Flush any pending (e)dpms as they may never arrive */
383  	qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
384  
385  	/* wait for usage to zero or count to run out. This is necessary since
386  	 * EDPM doorbell transactions can take multiple 64b cycles, and as such
387  	 * can "split" over the pci. Possibly, the doorbell drop can happen with
388  	 * half an EDPM in the queue and other half dropped. Another EDPM
389  	 * doorbell to the same address (from doorbell recovery mechanism or
390  	 * from the doorbelling entity) could have first half dropped and second
391  	 * half interpreted as continuation of the first. To prevent such
392  	 * malformed doorbells from reaching the device, flush the queue before
393  	 * releasing the overflow sticky indication.
394  	 */
395  	while (count-- && usage) {
396  		usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
397  		udelay(QED_DB_REC_INTERVAL);
398  	}
399  
400  	/* should have been depleted by now */
401  	if (usage) {
402  		DP_NOTICE(p_hwfn->cdev,
403  			  "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
404  			  QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage);
405  		return -EBUSY;
406  	}
407  
408  	return 0;
409  }
410  
qed_db_rec_handler(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)411  int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
412  {
413  	u32 attn_ovfl, cur_ovfl;
414  	int rc;
415  
416  	attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT,
417  				       &p_hwfn->db_recovery_info.overflow);
418  	cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
419  	if (!cur_ovfl && !attn_ovfl)
420  		return 0;
421  
422  	DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n",
423  		  attn_ovfl, cur_ovfl);
424  
425  	if (cur_ovfl && !p_hwfn->db_bar_no_edpm) {
426  		rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
427  		if (rc)
428  			return rc;
429  	}
430  
431  	/* Release overflow sticky indication (stop silently dropping everything) */
432  	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
433  
434  	/* Repeat all last doorbells (doorbell drop recovery) */
435  	qed_db_recovery_execute(p_hwfn);
436  
437  	return 0;
438  }
439  
qed_dorq_attn_overflow(struct qed_hwfn * p_hwfn)440  static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn)
441  {
442  	struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
443  	u32 overflow;
444  	int rc;
445  
446  	overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
447  	if (!overflow)
448  		goto out;
449  
450  	/* Run PF doorbell recovery in next periodic handler */
451  	set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
452  
453  	if (!p_hwfn->db_bar_no_edpm) {
454  		rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
455  		if (rc)
456  			goto out;
457  	}
458  
459  	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
460  out:
461  	/* Schedule the handler even if overflow was not detected */
462  	qed_periodic_db_rec_start(p_hwfn);
463  }
464  
qed_dorq_attn_int_sts(struct qed_hwfn * p_hwfn)465  static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
466  {
467  	u32 int_sts, first_drop_reason, details, address, all_drops_reason;
468  	struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
469  
470  	int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
471  	if (int_sts == 0xdeadbeaf) {
472  		DP_NOTICE(p_hwfn->cdev,
473  			  "DORQ is being reset, skipping int_sts handler\n");
474  
475  		return 0;
476  	}
477  
478  	/* int_sts may be zero since all PFs were interrupted for doorbell
479  	 * overflow but another one already handled it. Can abort here. If
480  	 * This PF also requires overflow recovery we will be interrupted again.
481  	 * The masked almost full indication may also be set. Ignoring.
482  	 */
483  	if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
484  		return 0;
485  
486  	DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
487  
488  	/* check if db_drop or overflow happened */
489  	if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
490  		       DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
491  		/* Obtain data about db drop/overflow */
492  		first_drop_reason = qed_rd(p_hwfn, p_ptt,
493  					   DORQ_REG_DB_DROP_REASON) &
494  		    QED_DORQ_ATTENTION_REASON_MASK;
495  		details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS);
496  		address = qed_rd(p_hwfn, p_ptt,
497  				 DORQ_REG_DB_DROP_DETAILS_ADDRESS);
498  		all_drops_reason = qed_rd(p_hwfn, p_ptt,
499  					  DORQ_REG_DB_DROP_DETAILS_REASON);
500  
501  		/* Log info */
502  		DP_NOTICE(p_hwfn->cdev,
503  			  "Doorbell drop occurred\n"
504  			  "Address\t\t0x%08x\t(second BAR address)\n"
505  			  "FID\t\t0x%04x\t\t(Opaque FID)\n"
506  			  "Size\t\t0x%04x\t\t(in bytes)\n"
507  			  "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
508  			  "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n",
509  			  address,
510  			  GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE),
511  			  GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
512  			  first_drop_reason, all_drops_reason);
513  
514  		/* Clear the doorbell drop details and prepare for next drop */
515  		qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
516  
517  		/* Mark interrupt as handled (note: even if drop was due to a different
518  		 * reason than overflow we mark as handled)
519  		 */
520  		qed_wr(p_hwfn,
521  		       p_ptt,
522  		       DORQ_REG_INT_STS_WR,
523  		       DORQ_REG_INT_STS_DB_DROP |
524  		       DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
525  
526  		/* If there are no indications other than drop indications, success */
527  		if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
528  				 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
529  				 DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
530  			return 0;
531  	}
532  
533  	/* Some other indication was present - non recoverable */
534  	DP_INFO(p_hwfn, "DORQ fatal attention\n");
535  
536  	return -EINVAL;
537  }
538  
qed_dorq_attn_cb(struct qed_hwfn * p_hwfn)539  static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
540  {
541  	if (p_hwfn->cdev->recov_in_prog)
542  		return 0;
543  
544  	p_hwfn->db_recovery_info.dorq_attn = true;
545  	qed_dorq_attn_overflow(p_hwfn);
546  
547  	return qed_dorq_attn_int_sts(p_hwfn);
548  }
549  
qed_dorq_attn_handler(struct qed_hwfn * p_hwfn)550  static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
551  {
552  	if (p_hwfn->db_recovery_info.dorq_attn)
553  		goto out;
554  
555  	/* Call DORQ callback if the attention was missed */
556  	qed_dorq_attn_cb(p_hwfn);
557  out:
558  	p_hwfn->db_recovery_info.dorq_attn = false;
559  }
560  
561  /* Instead of major changes to the data-structure, we have a some 'special'
562   * identifiers for sources that changed meaning between adapters.
563   */
564  enum aeu_invert_reg_special_type {
565  	AEU_INVERT_REG_SPECIAL_CNIG_0,
566  	AEU_INVERT_REG_SPECIAL_CNIG_1,
567  	AEU_INVERT_REG_SPECIAL_CNIG_2,
568  	AEU_INVERT_REG_SPECIAL_CNIG_3,
569  	AEU_INVERT_REG_SPECIAL_MAX,
570  };
571  
572  static struct aeu_invert_reg_bit
573  aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
574  	{"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
575  	{"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
576  	{"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
577  	{"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
578  };
579  
580  /* Notice aeu_invert_reg must be defined in the same order of bits as HW;  */
581  static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
582  	{
583  		{       /* After Invert 1 */
584  			{"GPIO0 function%d",
585  			 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
586  		}
587  	},
588  
589  	{
590  		{       /* After Invert 2 */
591  			{"PGLUE config_space", ATTENTION_SINGLE,
592  			 NULL, MAX_BLOCK_ID},
593  			{"PGLUE misc_flr", ATTENTION_SINGLE,
594  			 NULL, MAX_BLOCK_ID},
595  			{"PGLUE B RBC", ATTENTION_PAR_INT,
596  			 qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B},
597  			{"PGLUE misc_mctp", ATTENTION_SINGLE,
598  			 NULL, MAX_BLOCK_ID},
599  			{"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
600  			{"SMB event", ATTENTION_SINGLE,	NULL, MAX_BLOCK_ID},
601  			{"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
602  			{"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) |
603  					  (1 << ATTENTION_OFFSET_SHIFT),
604  			 NULL, MAX_BLOCK_ID},
605  			{"PCIE glue/PXP VPD %d",
606  			 (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS},
607  		}
608  	},
609  
610  	{
611  		{       /* After Invert 3 */
612  			{"General Attention %d",
613  			 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
614  		}
615  	},
616  
617  	{
618  		{       /* After Invert 4 */
619  			{"General Attention 32", ATTENTION_SINGLE |
620  			 ATTENTION_CLEAR_ENABLE, qed_fw_assertion,
621  			 MAX_BLOCK_ID},
622  			{"General Attention %d",
623  			 (2 << ATTENTION_LENGTH_SHIFT) |
624  			 (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
625  			{"General Attention 35", ATTENTION_SINGLE |
626  			 ATTENTION_CLEAR_ENABLE, qed_general_attention_35,
627  			 MAX_BLOCK_ID},
628  			{"NWS Parity",
629  			 ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
630  			 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
631  			 NULL, BLOCK_NWS},
632  			{"NWS Interrupt",
633  			 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
634  			 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
635  			 NULL, BLOCK_NWS},
636  			{"NWM Parity",
637  			 ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
638  			 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
639  			 NULL, BLOCK_NWM},
640  			{"NWM Interrupt",
641  			 ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
642  			 ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
643  			 NULL, BLOCK_NWM},
644  			{"MCP CPU", ATTENTION_SINGLE,
645  			 qed_mcp_attn_cb, MAX_BLOCK_ID},
646  			{"MCP Watchdog timer", ATTENTION_SINGLE,
647  			 NULL, MAX_BLOCK_ID},
648  			{"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
649  			{"AVS stop status ready", ATTENTION_SINGLE,
650  			 NULL, MAX_BLOCK_ID},
651  			{"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
652  			{"MSTAT per-path", ATTENTION_PAR_INT,
653  			 NULL, MAX_BLOCK_ID},
654  			{"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT),
655  			 NULL, MAX_BLOCK_ID},
656  			{"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG},
657  			{"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB},
658  			{"BTB",	ATTENTION_PAR_INT, NULL, BLOCK_BTB},
659  			{"BRB",	ATTENTION_PAR_INT, NULL, BLOCK_BRB},
660  			{"PRS",	ATTENTION_PAR_INT, NULL, BLOCK_PRS},
661  		}
662  	},
663  
664  	{
665  		{       /* After Invert 5 */
666  			{"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC},
667  			{"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1},
668  			{"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2},
669  			{"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB},
670  			{"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF},
671  			{"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM},
672  			{"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM},
673  			{"MCM",  ATTENTION_PAR_INT, NULL, BLOCK_MCM},
674  			{"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM},
675  			{"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM},
676  			{"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM},
677  			{"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM},
678  			{"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM},
679  			{"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM},
680  			{"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM},
681  			{"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM},
682  		}
683  	},
684  
685  	{
686  		{       /* After Invert 6 */
687  			{"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM},
688  			{"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM},
689  			{"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM},
690  			{"XCM",	ATTENTION_PAR_INT, NULL, BLOCK_XCM},
691  			{"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM},
692  			{"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM},
693  			{"YCM",	ATTENTION_PAR_INT, NULL, BLOCK_YCM},
694  			{"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM},
695  			{"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM},
696  			{"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD},
697  			{"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD},
698  			{"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD},
699  			{"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD},
700  			{"DORQ", ATTENTION_PAR_INT,
701  			 qed_dorq_attn_cb, BLOCK_DORQ},
702  			{"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG},
703  			{"IPC",	ATTENTION_PAR_INT, NULL, BLOCK_IPC},
704  		}
705  	},
706  
707  	{
708  		{       /* After Invert 7 */
709  			{"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC},
710  			{"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU},
711  			{"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE},
712  			{"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU},
713  			{"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
714  			{"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU},
715  			{"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU},
716  			{"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM},
717  			{"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC},
718  			{"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF},
719  			{"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF},
720  			{"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS},
721  			{"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC},
722  			{"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS},
723  			{"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE},
724  			{"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS},
725  			{"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ},
726  		}
727  	},
728  
729  	{
730  		{       /* After Invert 8 */
731  			{"PSWRQ (pci_clk)", ATTENTION_PAR_INT,
732  			 NULL, BLOCK_PSWRQ2},
733  			{"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR},
734  			{"PSWWR (pci_clk)", ATTENTION_PAR_INT,
735  			 NULL, BLOCK_PSWWR2},
736  			{"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD},
737  			{"PSWRD (pci_clk)", ATTENTION_PAR_INT,
738  			 NULL, BLOCK_PSWRD2},
739  			{"PSWHST", ATTENTION_PAR_INT,
740  			 qed_pswhst_attn_cb, BLOCK_PSWHST},
741  			{"PSWHST (pci_clk)", ATTENTION_PAR_INT,
742  			 NULL, BLOCK_PSWHST2},
743  			{"GRC",	ATTENTION_PAR_INT,
744  			 qed_grc_attn_cb, BLOCK_GRC},
745  			{"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU},
746  			{"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI},
747  			{"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
748  			{"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
749  			{"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
750  			{"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
751  			{"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
752  			{"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
753  			{"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS},
754  			{"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE,
755  			 NULL, BLOCK_PGLCS},
756  			{"PERST_B assertion", ATTENTION_SINGLE,
757  			 NULL, MAX_BLOCK_ID},
758  			{"PERST_B deassertion", ATTENTION_SINGLE,
759  			 NULL, MAX_BLOCK_ID},
760  			{"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT),
761  			 NULL, MAX_BLOCK_ID},
762  		}
763  	},
764  
765  	{
766  		{       /* After Invert 9 */
767  			{"MCP Latched memory", ATTENTION_PAR,
768  			 NULL, MAX_BLOCK_ID},
769  			{"MCP Latched scratchpad cache", ATTENTION_SINGLE,
770  			 NULL, MAX_BLOCK_ID},
771  			{"MCP Latched ump_tx", ATTENTION_PAR,
772  			 NULL, MAX_BLOCK_ID},
773  			{"MCP Latched scratchpad", ATTENTION_PAR,
774  			 NULL, MAX_BLOCK_ID},
775  			{"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT),
776  			 NULL, MAX_BLOCK_ID},
777  		}
778  	},
779  };
780  
781  static struct aeu_invert_reg_bit *
qed_int_aeu_translate(struct qed_hwfn * p_hwfn,struct aeu_invert_reg_bit * p_bit)782  qed_int_aeu_translate(struct qed_hwfn *p_hwfn,
783  		      struct aeu_invert_reg_bit *p_bit)
784  {
785  	if (!QED_IS_BB(p_hwfn->cdev))
786  		return p_bit;
787  
788  	if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
789  		return p_bit;
790  
791  	return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
792  				  ATTENTION_BB_SHIFT];
793  }
794  
qed_int_is_parity_flag(struct qed_hwfn * p_hwfn,struct aeu_invert_reg_bit * p_bit)795  static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn,
796  				   struct aeu_invert_reg_bit *p_bit)
797  {
798  	return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags &
799  		   ATTENTION_PARITY);
800  }
801  
802  #define ATTN_STATE_BITS         (0xfff)
803  #define ATTN_BITS_MASKABLE      (0x3ff)
804  struct qed_sb_attn_info {
805  	/* Virtual & Physical address of the SB */
806  	struct atten_status_block       *sb_attn;
807  	dma_addr_t			sb_phys;
808  
809  	/* Last seen running index */
810  	u16				index;
811  
812  	/* A mask of the AEU bits resulting in a parity error */
813  	u32				parity_mask[NUM_ATTN_REGS];
814  
815  	/* A pointer to the attention description structure */
816  	struct aeu_invert_reg		*p_aeu_desc;
817  
818  	/* Previously asserted attentions, which are still unasserted */
819  	u16				known_attn;
820  
821  	/* Cleanup address for the link's general hw attention */
822  	u32				mfw_attn_addr;
823  };
824  
qed_attn_update_idx(struct qed_hwfn * p_hwfn,struct qed_sb_attn_info * p_sb_desc)825  static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
826  				      struct qed_sb_attn_info *p_sb_desc)
827  {
828  	u16 rc = 0, index;
829  
830  	index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
831  	if (p_sb_desc->index != index) {
832  		p_sb_desc->index	= index;
833  		rc		      = QED_SB_ATT_IDX;
834  	}
835  
836  	return rc;
837  }
838  
839  /**
840   * qed_int_assertion() - Handle asserted attention bits.
841   *
842   * @p_hwfn: HW device data.
843   * @asserted_bits: Newly asserted bits.
844   *
845   * Return: Zero value.
846   */
qed_int_assertion(struct qed_hwfn * p_hwfn,u16 asserted_bits)847  static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
848  {
849  	struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
850  	u32 igu_mask;
851  
852  	/* Mask the source of the attention in the IGU */
853  	igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
854  	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
855  		   igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
856  	igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
857  	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
858  
859  	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
860  		   "inner known ATTN state: 0x%04x --> 0x%04x\n",
861  		   sb_attn_sw->known_attn,
862  		   sb_attn_sw->known_attn | asserted_bits);
863  	sb_attn_sw->known_attn |= asserted_bits;
864  
865  	/* Handle MCP events */
866  	if (asserted_bits & 0x100) {
867  		qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
868  		/* Clean the MCP attention */
869  		qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
870  		       sb_attn_sw->mfw_attn_addr, 0);
871  	}
872  
873  	DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
874  		      GTT_BAR0_MAP_REG_IGU_CMD +
875  		      ((IGU_CMD_ATTN_BIT_SET_UPPER -
876  			IGU_CMD_INT_ACK_BASE) << 3),
877  		      (u32)asserted_bits);
878  
879  	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
880  		   asserted_bits);
881  
882  	return 0;
883  }
884  
qed_int_attn_print(struct qed_hwfn * p_hwfn,enum block_id id,enum dbg_attn_type type,bool b_clear)885  static void qed_int_attn_print(struct qed_hwfn *p_hwfn,
886  			       enum block_id id,
887  			       enum dbg_attn_type type, bool b_clear)
888  {
889  	struct dbg_attn_block_result attn_results;
890  	enum dbg_status status;
891  
892  	memset(&attn_results, 0, sizeof(attn_results));
893  
894  	status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
895  				   b_clear, &attn_results);
896  	if (status != DBG_STATUS_OK)
897  		DP_NOTICE(p_hwfn,
898  			  "Failed to parse attention information [status: %s]\n",
899  			  qed_dbg_get_status_str(status));
900  	else
901  		qed_dbg_parse_attn(p_hwfn, &attn_results);
902  }
903  
904  /**
905   * qed_int_deassertion_aeu_bit() - Handles the effects of a single
906   * cause of the attention.
907   *
908   * @p_hwfn: HW device data.
909   * @p_aeu: Descriptor of an AEU bit which caused the attention.
910   * @aeu_en_reg: Register offset of the AEU enable reg. which configured
911   *              this bit to this group.
912   * @p_bit_name: AEU bit description for logging purposes.
913   * @bitmask: Index of this bit in the aeu_en_reg.
914   *
915   * Return: Zero on success, negative errno otherwise.
916   */
917  static int
qed_int_deassertion_aeu_bit(struct qed_hwfn * p_hwfn,struct aeu_invert_reg_bit * p_aeu,u32 aeu_en_reg,const char * p_bit_name,u32 bitmask)918  qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
919  			    struct aeu_invert_reg_bit *p_aeu,
920  			    u32 aeu_en_reg,
921  			    const char *p_bit_name, u32 bitmask)
922  {
923  	bool b_fatal = false;
924  	int rc = -EINVAL;
925  	u32 val;
926  
927  	DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
928  		p_bit_name, bitmask);
929  
930  	/* Call callback before clearing the interrupt status */
931  	if (p_aeu->cb) {
932  		DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
933  			p_bit_name);
934  		rc = p_aeu->cb(p_hwfn);
935  	}
936  
937  	if (rc)
938  		b_fatal = true;
939  
940  	/* Print HW block interrupt registers */
941  	if (p_aeu->block_index != MAX_BLOCK_ID)
942  		qed_int_attn_print(p_hwfn, p_aeu->block_index,
943  				   ATTN_TYPE_INTERRUPT, !b_fatal);
944  
945  	/* Reach assertion if attention is fatal */
946  	if (b_fatal)
947  		qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_HW_ATTN,
948  				  "`%s': Fatal attention\n",
949  				  p_bit_name);
950  	else /* If the attention is benign, no need to prevent it */
951  		goto out;
952  
953  	/* Prevent this Attention from being asserted in the future */
954  	val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
955  	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
956  	DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
957  		p_bit_name);
958  
959  	/* Re-enable FW aassertion (Gen 32) interrupts */
960  	val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
961  		     MISC_REG_AEU_ENABLE4_IGU_OUT_0);
962  	val |= MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32;
963  	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
964  	       MISC_REG_AEU_ENABLE4_IGU_OUT_0, val);
965  
966  out:
967  	return rc;
968  }
969  
970  /**
971   * qed_int_deassertion_parity() - Handle a single parity AEU source.
972   *
973   * @p_hwfn: HW device data.
974   * @p_aeu: Descriptor of an AEU bit which caused the parity.
975   * @aeu_en_reg: Address of the AEU enable register.
976   * @bit_index: Index (0-31) of an AEU bit.
977   */
qed_int_deassertion_parity(struct qed_hwfn * p_hwfn,struct aeu_invert_reg_bit * p_aeu,u32 aeu_en_reg,u8 bit_index)978  static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
979  				       struct aeu_invert_reg_bit *p_aeu,
980  				       u32 aeu_en_reg, u8 bit_index)
981  {
982  	u32 block_id = p_aeu->block_index, mask, val;
983  
984  	DP_NOTICE(p_hwfn->cdev,
985  		  "%s parity attention is set [address 0x%08x, bit %d]\n",
986  		  p_aeu->bit_name, aeu_en_reg, bit_index);
987  
988  	if (block_id != MAX_BLOCK_ID) {
989  		qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
990  
991  		/* In BB, there's a single parity bit for several blocks */
992  		if (block_id == BLOCK_BTB) {
993  			qed_int_attn_print(p_hwfn, BLOCK_OPTE,
994  					   ATTN_TYPE_PARITY, false);
995  			qed_int_attn_print(p_hwfn, BLOCK_MCP,
996  					   ATTN_TYPE_PARITY, false);
997  		}
998  	}
999  
1000  	/* Prevent this parity error from being re-asserted */
1001  	mask = ~BIT(bit_index);
1002  	val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
1003  	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
1004  	DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
1005  		p_aeu->bit_name);
1006  }
1007  
1008  /**
1009   * qed_int_deassertion() - Handle deassertion of previously asserted
1010   * attentions.
1011   *
1012   * @p_hwfn: HW device data.
1013   * @deasserted_bits: newly deasserted bits.
1014   *
1015   * Return: Zero value.
1016   */
qed_int_deassertion(struct qed_hwfn * p_hwfn,u16 deasserted_bits)1017  static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
1018  			       u16 deasserted_bits)
1019  {
1020  	struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
1021  	u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
1022  	u8 i, j, k, bit_idx;
1023  	int rc = 0;
1024  
1025  	/* Read the attention registers in the AEU */
1026  	for (i = 0; i < NUM_ATTN_REGS; i++) {
1027  		aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1028  					MISC_REG_AEU_AFTER_INVERT_1_IGU +
1029  					i * 0x4);
1030  		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1031  			   "Deasserted bits [%d]: %08x\n",
1032  			   i, aeu_inv_arr[i]);
1033  	}
1034  
1035  	/* Find parity attentions first */
1036  	for (i = 0; i < NUM_ATTN_REGS; i++) {
1037  		struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
1038  		u32 parities;
1039  
1040  		aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
1041  		en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1042  
1043  		/* Skip register in which no parity bit is currently set */
1044  		parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
1045  		if (!parities)
1046  			continue;
1047  
1048  		for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) {
1049  			struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
1050  
1051  			if (qed_int_is_parity_flag(p_hwfn, p_bit) &&
1052  			    !!(parities & BIT(bit_idx)))
1053  				qed_int_deassertion_parity(p_hwfn, p_bit,
1054  							   aeu_en, bit_idx);
1055  
1056  			bit_idx += ATTENTION_LENGTH(p_bit->flags);
1057  		}
1058  	}
1059  
1060  	/* Find non-parity cause for attention and act */
1061  	for (k = 0; k < MAX_ATTN_GRPS; k++) {
1062  		struct aeu_invert_reg_bit *p_aeu;
1063  
1064  		/* Handle only groups whose attention is currently deasserted */
1065  		if (!(deasserted_bits & (1 << k)))
1066  			continue;
1067  
1068  		for (i = 0; i < NUM_ATTN_REGS; i++) {
1069  			u32 bits;
1070  
1071  			aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
1072  				 i * sizeof(u32) +
1073  				 k * sizeof(u32) * NUM_ATTN_REGS;
1074  
1075  			en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1076  			bits = aeu_inv_arr[i] & en;
1077  
1078  			/* Skip if no bit from this group is currently set */
1079  			if (!bits)
1080  				continue;
1081  
1082  			/* Find all set bits from current register which belong
1083  			 * to current group, making them responsible for the
1084  			 * previous assertion.
1085  			 */
1086  			for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) {
1087  				long unsigned int bitmask;
1088  				u8 bit, bit_len;
1089  
1090  				p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
1091  				p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu);
1092  
1093  				bit = bit_idx;
1094  				bit_len = ATTENTION_LENGTH(p_aeu->flags);
1095  				if (qed_int_is_parity_flag(p_hwfn, p_aeu)) {
1096  					/* Skip Parity */
1097  					bit++;
1098  					bit_len--;
1099  				}
1100  
1101  				bitmask = bits & (((1 << bit_len) - 1) << bit);
1102  				bitmask >>= bit;
1103  
1104  				if (bitmask) {
1105  					u32 flags = p_aeu->flags;
1106  					char bit_name[30];
1107  					u8 num;
1108  
1109  					num = (u8)find_first_bit(&bitmask,
1110  								 bit_len);
1111  
1112  					/* Some bits represent more than a
1113  					 * single interrupt. Correctly print
1114  					 * their name.
1115  					 */
1116  					if (ATTENTION_LENGTH(flags) > 2 ||
1117  					    ((flags & ATTENTION_PAR_INT) &&
1118  					     ATTENTION_LENGTH(flags) > 1))
1119  						snprintf(bit_name, 30,
1120  							 p_aeu->bit_name, num);
1121  					else
1122  						strscpy(bit_name,
1123  							p_aeu->bit_name, 30);
1124  
1125  					/* We now need to pass bitmask in its
1126  					 * correct position.
1127  					 */
1128  					bitmask <<= bit;
1129  
1130  					/* Handle source of the attention */
1131  					qed_int_deassertion_aeu_bit(p_hwfn,
1132  								    p_aeu,
1133  								    aeu_en,
1134  								    bit_name,
1135  								    bitmask);
1136  				}
1137  
1138  				bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1139  			}
1140  		}
1141  	}
1142  
1143  	/* Handle missed DORQ attention */
1144  	qed_dorq_attn_handler(p_hwfn);
1145  
1146  	/* Clear IGU indication for the deasserted bits */
1147  	DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
1148  				    GTT_BAR0_MAP_REG_IGU_CMD +
1149  				    ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1150  				      IGU_CMD_INT_ACK_BASE) << 3),
1151  				    ~((u32)deasserted_bits));
1152  
1153  	/* Unmask deasserted attentions in IGU */
1154  	aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
1155  	aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1156  	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1157  
1158  	/* Clear deassertion from inner state */
1159  	sb_attn_sw->known_attn &= ~deasserted_bits;
1160  
1161  	return rc;
1162  }
1163  
qed_int_attentions(struct qed_hwfn * p_hwfn)1164  static int qed_int_attentions(struct qed_hwfn *p_hwfn)
1165  {
1166  	struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1167  	struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1168  	u32 attn_bits = 0, attn_acks = 0;
1169  	u16 asserted_bits, deasserted_bits;
1170  	__le16 index;
1171  	int rc = 0;
1172  
1173  	/* Read current attention bits/acks - safeguard against attentions
1174  	 * by guaranting work on a synchronized timeframe
1175  	 */
1176  	do {
1177  		index = p_sb_attn->sb_index;
1178  		/* finish reading index before the loop condition */
1179  		dma_rmb();
1180  		attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
1181  		attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
1182  	} while (index != p_sb_attn->sb_index);
1183  	p_sb_attn->sb_index = index;
1184  
1185  	/* Attention / Deassertion are meaningful (and in correct state)
1186  	 * only when they differ and consistent with known state - deassertion
1187  	 * when previous attention & current ack, and assertion when current
1188  	 * attention with no previous attention
1189  	 */
1190  	asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1191  		~p_sb_attn_sw->known_attn;
1192  	deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1193  		p_sb_attn_sw->known_attn;
1194  
1195  	if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
1196  		DP_INFO(p_hwfn,
1197  			"Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1198  			index, attn_bits, attn_acks, asserted_bits,
1199  			deasserted_bits, p_sb_attn_sw->known_attn);
1200  	} else if (asserted_bits == 0x100) {
1201  		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1202  			   "MFW indication via attention\n");
1203  	} else {
1204  		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1205  			   "MFW indication [deassertion]\n");
1206  	}
1207  
1208  	if (asserted_bits) {
1209  		rc = qed_int_assertion(p_hwfn, asserted_bits);
1210  		if (rc)
1211  			return rc;
1212  	}
1213  
1214  	if (deasserted_bits)
1215  		rc = qed_int_deassertion(p_hwfn, deasserted_bits);
1216  
1217  	return rc;
1218  }
1219  
qed_sb_ack_attn(struct qed_hwfn * p_hwfn,void __iomem * igu_addr,u32 ack_cons)1220  static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
1221  			    void __iomem *igu_addr, u32 ack_cons)
1222  {
1223  	u32 igu_ack;
1224  
1225  	igu_ack = ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1226  		   (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1227  		   (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1228  		   (IGU_SEG_ACCESS_ATTN <<
1229  		    IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1230  
1231  	DIRECT_REG_WR(igu_addr, igu_ack);
1232  
1233  	/* Both segments (interrupts & acks) are written to same place address;
1234  	 * Need to guarantee all commands will be received (in-order) by HW.
1235  	 */
1236  	barrier();
1237  }
1238  
qed_int_sp_dpc(struct tasklet_struct * t)1239  void qed_int_sp_dpc(struct tasklet_struct *t)
1240  {
1241  	struct qed_hwfn *p_hwfn = from_tasklet(p_hwfn, t, sp_dpc);
1242  	struct qed_pi_info *pi_info = NULL;
1243  	struct qed_sb_attn_info *sb_attn;
1244  	struct qed_sb_info *sb_info;
1245  	int arr_size;
1246  	u16 rc = 0;
1247  
1248  	if (!p_hwfn->p_sp_sb) {
1249  		DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
1250  		return;
1251  	}
1252  
1253  	sb_info = &p_hwfn->p_sp_sb->sb_info;
1254  	arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1255  	if (!sb_info) {
1256  		DP_ERR(p_hwfn->cdev,
1257  		       "Status block is NULL - cannot ack interrupts\n");
1258  		return;
1259  	}
1260  
1261  	if (!p_hwfn->p_sb_attn) {
1262  		DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
1263  		return;
1264  	}
1265  	sb_attn = p_hwfn->p_sb_attn;
1266  
1267  	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1268  		   p_hwfn, p_hwfn->my_id);
1269  
1270  	/* Disable ack for def status block. Required both for msix +
1271  	 * inta in non-mask mode, in inta does no harm.
1272  	 */
1273  	qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1274  
1275  	/* Gather Interrupts/Attentions information */
1276  	if (!sb_info->sb_virt) {
1277  		DP_ERR(p_hwfn->cdev,
1278  		       "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1279  	} else {
1280  		u32 tmp_index = sb_info->sb_ack;
1281  
1282  		rc = qed_sb_update_sb_idx(sb_info);
1283  		DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
1284  			   "Interrupt indices: 0x%08x --> 0x%08x\n",
1285  			   tmp_index, sb_info->sb_ack);
1286  	}
1287  
1288  	if (!sb_attn || !sb_attn->sb_attn) {
1289  		DP_ERR(p_hwfn->cdev,
1290  		       "Attentions Status block is NULL - cannot check for new attentions!\n");
1291  	} else {
1292  		u16 tmp_index = sb_attn->index;
1293  
1294  		rc |= qed_attn_update_idx(p_hwfn, sb_attn);
1295  		DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
1296  			   "Attention indices: 0x%08x --> 0x%08x\n",
1297  			   tmp_index, sb_attn->index);
1298  	}
1299  
1300  	/* Check if we expect interrupts at this time. if not just ack them */
1301  	if (!(rc & QED_SB_EVENT_MASK)) {
1302  		qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1303  		return;
1304  	}
1305  
1306  	/* Check the validity of the DPC ptt. If not ack interrupts and fail */
1307  	if (!p_hwfn->p_dpc_ptt) {
1308  		DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
1309  		qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1310  		return;
1311  	}
1312  
1313  	if (rc & QED_SB_ATT_IDX)
1314  		qed_int_attentions(p_hwfn);
1315  
1316  	if (rc & QED_SB_IDX) {
1317  		int pi;
1318  
1319  		/* Look for a free index */
1320  		for (pi = 0; pi < arr_size; pi++) {
1321  			pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1322  			if (pi_info->comp_cb)
1323  				pi_info->comp_cb(p_hwfn, pi_info->cookie);
1324  		}
1325  	}
1326  
1327  	if (sb_attn && (rc & QED_SB_ATT_IDX))
1328  		/* This should be done before the interrupts are enabled,
1329  		 * since otherwise a new attention will be generated.
1330  		 */
1331  		qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1332  
1333  	qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1334  }
1335  
qed_int_sb_attn_free(struct qed_hwfn * p_hwfn)1336  static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
1337  {
1338  	struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1339  
1340  	if (!p_sb)
1341  		return;
1342  
1343  	if (p_sb->sb_attn)
1344  		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1345  				  SB_ATTN_ALIGNED_SIZE(p_hwfn),
1346  				  p_sb->sb_attn, p_sb->sb_phys);
1347  	kfree(p_sb);
1348  	p_hwfn->p_sb_attn = NULL;
1349  }
1350  
qed_int_sb_attn_setup(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1351  static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
1352  				  struct qed_ptt *p_ptt)
1353  {
1354  	struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1355  
1356  	memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1357  
1358  	sb_info->index = 0;
1359  	sb_info->known_attn = 0;
1360  
1361  	/* Configure Attention Status Block in IGU */
1362  	qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1363  	       lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
1364  	qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1365  	       upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
1366  }
1367  
qed_int_sb_attn_init(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,void * sb_virt_addr,dma_addr_t sb_phy_addr)1368  static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
1369  				 struct qed_ptt *p_ptt,
1370  				 void *sb_virt_addr, dma_addr_t sb_phy_addr)
1371  {
1372  	struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1373  	int i, j, k;
1374  
1375  	sb_info->sb_attn = sb_virt_addr;
1376  	sb_info->sb_phys = sb_phy_addr;
1377  
1378  	/* Set the pointer to the AEU descriptors */
1379  	sb_info->p_aeu_desc = aeu_descs;
1380  
1381  	/* Calculate Parity Masks */
1382  	memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1383  	for (i = 0; i < NUM_ATTN_REGS; i++) {
1384  		/* j is array index, k is bit index */
1385  		for (j = 0, k = 0; k < 32 && j < 32; j++) {
1386  			struct aeu_invert_reg_bit *p_aeu;
1387  
1388  			p_aeu = &aeu_descs[i].bits[j];
1389  			if (qed_int_is_parity_flag(p_hwfn, p_aeu))
1390  				sb_info->parity_mask[i] |= 1 << k;
1391  
1392  			k += ATTENTION_LENGTH(p_aeu->flags);
1393  		}
1394  		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1395  			   "Attn Mask [Reg %d]: 0x%08x\n",
1396  			   i, sb_info->parity_mask[i]);
1397  	}
1398  
1399  	/* Set the address of cleanup for the mcp attention */
1400  	sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1401  				 MISC_REG_AEU_GENERAL_ATTN_0;
1402  
1403  	qed_int_sb_attn_setup(p_hwfn, p_ptt);
1404  }
1405  
qed_int_sb_attn_alloc(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1406  static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
1407  				 struct qed_ptt *p_ptt)
1408  {
1409  	struct qed_dev *cdev = p_hwfn->cdev;
1410  	struct qed_sb_attn_info *p_sb;
1411  	dma_addr_t p_phys = 0;
1412  	void *p_virt;
1413  
1414  	/* SB struct */
1415  	p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
1416  	if (!p_sb)
1417  		return -ENOMEM;
1418  
1419  	/* SB ring  */
1420  	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1421  				    SB_ATTN_ALIGNED_SIZE(p_hwfn),
1422  				    &p_phys, GFP_KERNEL);
1423  
1424  	if (!p_virt) {
1425  		kfree(p_sb);
1426  		return -ENOMEM;
1427  	}
1428  
1429  	/* Attention setup */
1430  	p_hwfn->p_sb_attn = p_sb;
1431  	qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1432  
1433  	return 0;
1434  }
1435  
1436  /* coalescing timeout = timeset << (timer_res + 1) */
1437  #define QED_CAU_DEF_RX_USECS 24
1438  #define QED_CAU_DEF_TX_USECS 48
1439  
qed_init_cau_sb_entry(struct qed_hwfn * p_hwfn,struct cau_sb_entry * p_sb_entry,u8 pf_id,u16 vf_number,u8 vf_valid)1440  void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
1441  			   struct cau_sb_entry *p_sb_entry,
1442  			   u8 pf_id, u16 vf_number, u8 vf_valid)
1443  {
1444  	struct qed_dev *cdev = p_hwfn->cdev;
1445  	u32 cau_state, params = 0, data = 0;
1446  	u8 timer_res;
1447  
1448  	memset(p_sb_entry, 0, sizeof(*p_sb_entry));
1449  
1450  	SET_FIELD(params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1451  	SET_FIELD(params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1452  	SET_FIELD(params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1453  	SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1454  	SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1455  
1456  	cau_state = CAU_HC_DISABLE_STATE;
1457  
1458  	if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
1459  		cau_state = CAU_HC_ENABLE_STATE;
1460  		if (!cdev->rx_coalesce_usecs)
1461  			cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS;
1462  		if (!cdev->tx_coalesce_usecs)
1463  			cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
1464  	}
1465  
1466  	/* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1467  	if (cdev->rx_coalesce_usecs <= 0x7F)
1468  		timer_res = 0;
1469  	else if (cdev->rx_coalesce_usecs <= 0xFF)
1470  		timer_res = 1;
1471  	else
1472  		timer_res = 2;
1473  
1474  	SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1475  
1476  	if (cdev->tx_coalesce_usecs <= 0x7F)
1477  		timer_res = 0;
1478  	else if (cdev->tx_coalesce_usecs <= 0xFF)
1479  		timer_res = 1;
1480  	else
1481  		timer_res = 2;
1482  
1483  	SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1484  	p_sb_entry->params = cpu_to_le32(params);
1485  
1486  	SET_FIELD(data, CAU_SB_ENTRY_STATE0, cau_state);
1487  	SET_FIELD(data, CAU_SB_ENTRY_STATE1, cau_state);
1488  	p_sb_entry->data = cpu_to_le32(data);
1489  }
1490  
qed_int_cau_conf_pi(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 igu_sb_id,u32 pi_index,enum qed_coalescing_fsm coalescing_fsm,u8 timeset)1491  static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
1492  				struct qed_ptt *p_ptt,
1493  				u16 igu_sb_id,
1494  				u32 pi_index,
1495  				enum qed_coalescing_fsm coalescing_fsm,
1496  				u8 timeset)
1497  {
1498  	u32 sb_offset, pi_offset;
1499  	u32 prod = 0;
1500  
1501  	if (IS_VF(p_hwfn->cdev))
1502  		return;
1503  
1504  	SET_FIELD(prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1505  	if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
1506  		SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 0);
1507  	else
1508  		SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1);
1509  
1510  	sb_offset = igu_sb_id * PIS_PER_SB;
1511  	pi_offset = sb_offset + pi_index;
1512  
1513  	if (p_hwfn->hw_init_done)
1514  		qed_wr(p_hwfn, p_ptt,
1515  		       CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), prod);
1516  	else
1517  		STORE_RT_REG(p_hwfn, CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1518  			     prod);
1519  }
1520  
qed_int_cau_conf_sb(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,dma_addr_t sb_phys,u16 igu_sb_id,u16 vf_number,u8 vf_valid)1521  void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
1522  			 struct qed_ptt *p_ptt,
1523  			 dma_addr_t sb_phys,
1524  			 u16 igu_sb_id, u16 vf_number, u8 vf_valid)
1525  {
1526  	struct cau_sb_entry sb_entry;
1527  
1528  	qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1529  			      vf_number, vf_valid);
1530  
1531  	if (p_hwfn->hw_init_done) {
1532  		/* Wide-bus, initialize via DMAE */
1533  		u64 phys_addr = (u64)sb_phys;
1534  
1535  		qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
1536  				  CAU_REG_SB_ADDR_MEMORY +
1537  				  igu_sb_id * sizeof(u64), 2, NULL);
1538  		qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
1539  				  CAU_REG_SB_VAR_MEMORY +
1540  				  igu_sb_id * sizeof(u64), 2, NULL);
1541  	} else {
1542  		/* Initialize Status Block Address */
1543  		STORE_RT_REG_AGG(p_hwfn,
1544  				 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
1545  				 igu_sb_id * 2,
1546  				 sb_phys);
1547  
1548  		STORE_RT_REG_AGG(p_hwfn,
1549  				 CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
1550  				 igu_sb_id * 2,
1551  				 sb_entry);
1552  	}
1553  
1554  	/* Configure pi coalescing if set */
1555  	if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
1556  		u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1557  		u8 timeset, timer_res;
1558  		u8 i;
1559  
1560  		/* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1561  		if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
1562  			timer_res = 0;
1563  		else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF)
1564  			timer_res = 1;
1565  		else
1566  			timer_res = 2;
1567  		timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
1568  		qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1569  				    QED_COAL_RX_STATE_MACHINE, timeset);
1570  
1571  		if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
1572  			timer_res = 0;
1573  		else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF)
1574  			timer_res = 1;
1575  		else
1576  			timer_res = 2;
1577  		timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res);
1578  		for (i = 0; i < num_tc; i++) {
1579  			qed_int_cau_conf_pi(p_hwfn, p_ptt,
1580  					    igu_sb_id, TX_PI(i),
1581  					    QED_COAL_TX_STATE_MACHINE,
1582  					    timeset);
1583  		}
1584  	}
1585  }
1586  
qed_int_sb_setup(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_sb_info * sb_info)1587  void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
1588  		      struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
1589  {
1590  	/* zero status block and ack counter */
1591  	sb_info->sb_ack = 0;
1592  	memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1593  
1594  	if (IS_PF(p_hwfn->cdev))
1595  		qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1596  				    sb_info->igu_sb_id, 0, 0);
1597  }
1598  
qed_get_igu_free_sb(struct qed_hwfn * p_hwfn,bool b_is_pf)1599  struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf)
1600  {
1601  	struct qed_igu_block *p_block;
1602  	u16 igu_id;
1603  
1604  	for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
1605  	     igu_id++) {
1606  		p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1607  
1608  		if (!(p_block->status & QED_IGU_STATUS_VALID) ||
1609  		    !(p_block->status & QED_IGU_STATUS_FREE))
1610  			continue;
1611  
1612  		if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf)
1613  			return p_block;
1614  	}
1615  
1616  	return NULL;
1617  }
1618  
qed_get_pf_igu_sb_id(struct qed_hwfn * p_hwfn,u16 vector_id)1619  static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id)
1620  {
1621  	struct qed_igu_block *p_block;
1622  	u16 igu_id;
1623  
1624  	for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
1625  	     igu_id++) {
1626  		p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1627  
1628  		if (!(p_block->status & QED_IGU_STATUS_VALID) ||
1629  		    !p_block->is_pf ||
1630  		    p_block->vector_number != vector_id)
1631  			continue;
1632  
1633  		return igu_id;
1634  	}
1635  
1636  	return QED_SB_INVALID_IDX;
1637  }
1638  
qed_get_igu_sb_id(struct qed_hwfn * p_hwfn,u16 sb_id)1639  u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
1640  {
1641  	u16 igu_sb_id;
1642  
1643  	/* Assuming continuous set of IGU SBs dedicated for given PF */
1644  	if (sb_id == QED_SP_SB_ID)
1645  		igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1646  	else if (IS_PF(p_hwfn->cdev))
1647  		igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
1648  	else
1649  		igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
1650  
1651  	if (sb_id == QED_SP_SB_ID)
1652  		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1653  			   "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1654  	else
1655  		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1656  			   "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1657  
1658  	return igu_sb_id;
1659  }
1660  
qed_int_sb_init(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_sb_info * sb_info,void * sb_virt_addr,dma_addr_t sb_phy_addr,u16 sb_id)1661  int qed_int_sb_init(struct qed_hwfn *p_hwfn,
1662  		    struct qed_ptt *p_ptt,
1663  		    struct qed_sb_info *sb_info,
1664  		    void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id)
1665  {
1666  	sb_info->sb_virt = sb_virt_addr;
1667  	sb_info->sb_phys = sb_phy_addr;
1668  
1669  	sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
1670  
1671  	if (sb_id != QED_SP_SB_ID) {
1672  		if (IS_PF(p_hwfn->cdev)) {
1673  			struct qed_igu_info *p_info;
1674  			struct qed_igu_block *p_block;
1675  
1676  			p_info = p_hwfn->hw_info.p_igu_info;
1677  			p_block = &p_info->entry[sb_info->igu_sb_id];
1678  
1679  			p_block->sb_info = sb_info;
1680  			p_block->status &= ~QED_IGU_STATUS_FREE;
1681  			p_info->usage.free_cnt--;
1682  		} else {
1683  			qed_vf_set_sb_info(p_hwfn, sb_id, sb_info);
1684  		}
1685  	}
1686  
1687  	sb_info->cdev = p_hwfn->cdev;
1688  
1689  	/* The igu address will hold the absolute address that needs to be
1690  	 * written to for a specific status block
1691  	 */
1692  	if (IS_PF(p_hwfn->cdev)) {
1693  		sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
1694  						  GTT_BAR0_MAP_REG_IGU_CMD +
1695  						  (sb_info->igu_sb_id << 3);
1696  	} else {
1697  		sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
1698  						  PXP_VF_BAR0_START_IGU +
1699  						  ((IGU_CMD_INT_ACK_BASE +
1700  						    sb_info->igu_sb_id) << 3);
1701  	}
1702  
1703  	sb_info->flags |= QED_SB_INFO_INIT;
1704  
1705  	qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
1706  
1707  	return 0;
1708  }
1709  
qed_int_sb_release(struct qed_hwfn * p_hwfn,struct qed_sb_info * sb_info,u16 sb_id)1710  int qed_int_sb_release(struct qed_hwfn *p_hwfn,
1711  		       struct qed_sb_info *sb_info, u16 sb_id)
1712  {
1713  	struct qed_igu_block *p_block;
1714  	struct qed_igu_info *p_info;
1715  
1716  	if (!sb_info)
1717  		return 0;
1718  
1719  	/* zero status block and ack counter */
1720  	sb_info->sb_ack = 0;
1721  	memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1722  
1723  	if (IS_VF(p_hwfn->cdev)) {
1724  		qed_vf_set_sb_info(p_hwfn, sb_id, NULL);
1725  		return 0;
1726  	}
1727  
1728  	p_info = p_hwfn->hw_info.p_igu_info;
1729  	p_block = &p_info->entry[sb_info->igu_sb_id];
1730  
1731  	/* Vector 0 is reserved to Default SB */
1732  	if (!p_block->vector_number) {
1733  		DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1734  		return -EINVAL;
1735  	}
1736  
1737  	/* Lose reference to client's SB info, and fix counters */
1738  	p_block->sb_info = NULL;
1739  	p_block->status |= QED_IGU_STATUS_FREE;
1740  	p_info->usage.free_cnt++;
1741  
1742  	return 0;
1743  }
1744  
qed_int_sp_sb_free(struct qed_hwfn * p_hwfn)1745  static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
1746  {
1747  	struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1748  
1749  	if (!p_sb)
1750  		return;
1751  
1752  	if (p_sb->sb_info.sb_virt)
1753  		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1754  				  SB_ALIGNED_SIZE(p_hwfn),
1755  				  p_sb->sb_info.sb_virt,
1756  				  p_sb->sb_info.sb_phys);
1757  	kfree(p_sb);
1758  	p_hwfn->p_sp_sb = NULL;
1759  }
1760  
qed_int_sp_sb_alloc(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1761  static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1762  {
1763  	struct qed_sb_sp_info *p_sb;
1764  	dma_addr_t p_phys = 0;
1765  	void *p_virt;
1766  
1767  	/* SB struct */
1768  	p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
1769  	if (!p_sb)
1770  		return -ENOMEM;
1771  
1772  	/* SB ring  */
1773  	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1774  				    SB_ALIGNED_SIZE(p_hwfn),
1775  				    &p_phys, GFP_KERNEL);
1776  	if (!p_virt) {
1777  		kfree(p_sb);
1778  		return -ENOMEM;
1779  	}
1780  
1781  	/* Status Block setup */
1782  	p_hwfn->p_sp_sb = p_sb;
1783  	qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
1784  			p_phys, QED_SP_SB_ID);
1785  
1786  	memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1787  
1788  	return 0;
1789  }
1790  
qed_int_register_cb(struct qed_hwfn * p_hwfn,qed_int_comp_cb_t comp_cb,void * cookie,u8 * sb_idx,__le16 ** p_fw_cons)1791  int qed_int_register_cb(struct qed_hwfn *p_hwfn,
1792  			qed_int_comp_cb_t comp_cb,
1793  			void *cookie, u8 *sb_idx, __le16 **p_fw_cons)
1794  {
1795  	struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1796  	int rc = -ENOMEM;
1797  	u8 pi;
1798  
1799  	/* Look for a free index */
1800  	for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1801  		if (p_sp_sb->pi_info_arr[pi].comp_cb)
1802  			continue;
1803  
1804  		p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1805  		p_sp_sb->pi_info_arr[pi].cookie = cookie;
1806  		*sb_idx = pi;
1807  		*p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1808  		rc = 0;
1809  		break;
1810  	}
1811  
1812  	return rc;
1813  }
1814  
qed_int_unregister_cb(struct qed_hwfn * p_hwfn,u8 pi)1815  int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
1816  {
1817  	struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1818  
1819  	if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL)
1820  		return -ENOMEM;
1821  
1822  	p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
1823  	p_sp_sb->pi_info_arr[pi].cookie = NULL;
1824  
1825  	return 0;
1826  }
1827  
qed_int_get_sp_sb_id(struct qed_hwfn * p_hwfn)1828  u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
1829  {
1830  	return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1831  }
1832  
qed_int_igu_enable_int(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_int_mode int_mode)1833  void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
1834  			    struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
1835  {
1836  	u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1837  
1838  	p_hwfn->cdev->int_mode = int_mode;
1839  	switch (p_hwfn->cdev->int_mode) {
1840  	case QED_INT_MODE_INTA:
1841  		igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1842  		igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1843  		break;
1844  
1845  	case QED_INT_MODE_MSI:
1846  		igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1847  		igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1848  		break;
1849  
1850  	case QED_INT_MODE_MSIX:
1851  		igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1852  		break;
1853  	case QED_INT_MODE_POLL:
1854  		break;
1855  	}
1856  
1857  	qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1858  }
1859  
qed_int_igu_enable_attn(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1860  static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn,
1861  				    struct qed_ptt *p_ptt)
1862  {
1863  
1864  	/* Configure AEU signal change to produce attentions */
1865  	qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1866  	qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1867  	qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1868  	qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1869  
1870  	/* Unmask AEU signals toward IGU */
1871  	qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1872  }
1873  
1874  int
qed_int_igu_enable(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_int_mode int_mode)1875  qed_int_igu_enable(struct qed_hwfn *p_hwfn,
1876  		   struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
1877  {
1878  	int rc = 0;
1879  
1880  	qed_int_igu_enable_attn(p_hwfn, p_ptt);
1881  
1882  	if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1883  		rc = qed_slowpath_irq_req(p_hwfn);
1884  		if (rc) {
1885  			DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
1886  			return -EINVAL;
1887  		}
1888  		p_hwfn->b_int_requested = true;
1889  	}
1890  	/* Enable interrupt Generation */
1891  	qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1892  	p_hwfn->b_int_enabled = 1;
1893  
1894  	return rc;
1895  }
1896  
qed_int_igu_disable_int(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1897  void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1898  {
1899  	p_hwfn->b_int_enabled = 0;
1900  
1901  	if (IS_VF(p_hwfn->cdev))
1902  		return;
1903  
1904  	qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1905  }
1906  
1907  #define IGU_CLEANUP_SLEEP_LENGTH                (1000)
qed_int_igu_cleanup_sb(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 igu_sb_id,bool cleanup_set,u16 opaque_fid)1908  static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
1909  				   struct qed_ptt *p_ptt,
1910  				   u16 igu_sb_id,
1911  				   bool cleanup_set, u16 opaque_fid)
1912  {
1913  	u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1914  	u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
1915  	u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1916  
1917  	/* Set the data field */
1918  	SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1919  	SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
1920  	SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1921  
1922  	/* Set the control register */
1923  	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1924  	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1925  	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1926  
1927  	qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1928  
1929  	barrier();
1930  
1931  	qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1932  
1933  	/* calculate where to read the status bit from */
1934  	sb_bit = 1 << (igu_sb_id % 32);
1935  	sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
1936  
1937  	sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
1938  
1939  	/* Now wait for the command to complete */
1940  	do {
1941  		val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
1942  
1943  		if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
1944  			break;
1945  
1946  		usleep_range(5000, 10000);
1947  	} while (--sleep_cnt);
1948  
1949  	if (!sleep_cnt)
1950  		DP_NOTICE(p_hwfn,
1951  			  "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1952  			  val, igu_sb_id);
1953  }
1954  
qed_int_igu_init_pure_rt_single(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 igu_sb_id,u16 opaque,bool b_set)1955  void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
1956  				     struct qed_ptt *p_ptt,
1957  				     u16 igu_sb_id, u16 opaque, bool b_set)
1958  {
1959  	struct qed_igu_block *p_block;
1960  	int pi, i;
1961  
1962  	p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
1963  	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1964  		   "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
1965  		   igu_sb_id,
1966  		   p_block->function_id,
1967  		   p_block->is_pf, p_block->vector_number);
1968  
1969  	/* Set */
1970  	if (b_set)
1971  		qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
1972  
1973  	/* Clear */
1974  	qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
1975  
1976  	/* Wait for the IGU SB to cleanup */
1977  	for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
1978  		u32 val;
1979  
1980  		val = qed_rd(p_hwfn, p_ptt,
1981  			     IGU_REG_WRITE_DONE_PENDING +
1982  			     ((igu_sb_id / 32) * 4));
1983  		if (val & BIT((igu_sb_id % 32)))
1984  			usleep_range(10, 20);
1985  		else
1986  			break;
1987  	}
1988  	if (i == IGU_CLEANUP_SLEEP_LENGTH)
1989  		DP_NOTICE(p_hwfn,
1990  			  "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1991  			  igu_sb_id);
1992  
1993  	/* Clear the CAU for the SB */
1994  	for (pi = 0; pi < 12; pi++)
1995  		qed_wr(p_hwfn, p_ptt,
1996  		       CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
1997  }
1998  
qed_int_igu_init_pure_rt(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool b_set,bool b_slowpath)1999  void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
2000  			      struct qed_ptt *p_ptt,
2001  			      bool b_set, bool b_slowpath)
2002  {
2003  	struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2004  	struct qed_igu_block *p_block;
2005  	u16 igu_sb_id = 0;
2006  	u32 val = 0;
2007  
2008  	val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
2009  	val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
2010  	val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
2011  	qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
2012  
2013  	for (igu_sb_id = 0;
2014  	     igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
2015  		p_block = &p_info->entry[igu_sb_id];
2016  
2017  		if (!(p_block->status & QED_IGU_STATUS_VALID) ||
2018  		    !p_block->is_pf ||
2019  		    (p_block->status & QED_IGU_STATUS_DSB))
2020  			continue;
2021  
2022  		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
2023  						p_hwfn->hw_info.opaque_fid,
2024  						b_set);
2025  	}
2026  
2027  	if (b_slowpath)
2028  		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2029  						p_info->igu_dsb_id,
2030  						p_hwfn->hw_info.opaque_fid,
2031  						b_set);
2032  }
2033  
qed_int_igu_reset_cam(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2034  int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2035  {
2036  	struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2037  	struct qed_igu_block *p_block;
2038  	int pf_sbs, vf_sbs;
2039  	u16 igu_sb_id;
2040  	u32 val, rval;
2041  
2042  	if (!RESC_NUM(p_hwfn, QED_SB)) {
2043  		p_info->b_allow_pf_vf_change = false;
2044  	} else {
2045  		/* Use the numbers the MFW have provided -
2046  		 * don't forget MFW accounts for the default SB as well.
2047  		 */
2048  		p_info->b_allow_pf_vf_change = true;
2049  
2050  		if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) {
2051  			DP_INFO(p_hwfn,
2052  				"MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
2053  				RESC_NUM(p_hwfn, QED_SB) - 1,
2054  				p_info->usage.cnt);
2055  			p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1;
2056  		}
2057  
2058  		if (IS_PF_SRIOV(p_hwfn)) {
2059  			u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs;
2060  
2061  			if (vfs != p_info->usage.iov_cnt)
2062  				DP_VERBOSE(p_hwfn,
2063  					   NETIF_MSG_INTR,
2064  					   "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
2065  					   p_info->usage.iov_cnt, vfs);
2066  
2067  			/* At this point we know how many SBs we have totally
2068  			 * in IGU + number of PF SBs. So we can validate that
2069  			 * we'd have sufficient for VF.
2070  			 */
2071  			if (vfs > p_info->usage.free_cnt +
2072  			    p_info->usage.free_cnt_iov - p_info->usage.cnt) {
2073  				DP_NOTICE(p_hwfn,
2074  					  "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2075  					  p_info->usage.free_cnt +
2076  					  p_info->usage.free_cnt_iov,
2077  					  p_info->usage.cnt, vfs);
2078  				return -EINVAL;
2079  			}
2080  
2081  			/* Currently cap the number of VFs SBs by the
2082  			 * number of VFs.
2083  			 */
2084  			p_info->usage.iov_cnt = vfs;
2085  		}
2086  	}
2087  
2088  	/* Mark all SBs as free, now in the right PF/VFs division */
2089  	p_info->usage.free_cnt = p_info->usage.cnt;
2090  	p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
2091  	p_info->usage.orig = p_info->usage.cnt;
2092  	p_info->usage.iov_orig = p_info->usage.iov_cnt;
2093  
2094  	/* We now proceed to re-configure the IGU cam to reflect the initial
2095  	 * configuration. We can start with the Default SB.
2096  	 */
2097  	pf_sbs = p_info->usage.cnt;
2098  	vf_sbs = p_info->usage.iov_cnt;
2099  
2100  	for (igu_sb_id = p_info->igu_dsb_id;
2101  	     igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
2102  		p_block = &p_info->entry[igu_sb_id];
2103  		val = 0;
2104  
2105  		if (!(p_block->status & QED_IGU_STATUS_VALID))
2106  			continue;
2107  
2108  		if (p_block->status & QED_IGU_STATUS_DSB) {
2109  			p_block->function_id = p_hwfn->rel_pf_id;
2110  			p_block->is_pf = 1;
2111  			p_block->vector_number = 0;
2112  			p_block->status = QED_IGU_STATUS_VALID |
2113  					  QED_IGU_STATUS_PF |
2114  					  QED_IGU_STATUS_DSB;
2115  		} else if (pf_sbs) {
2116  			pf_sbs--;
2117  			p_block->function_id = p_hwfn->rel_pf_id;
2118  			p_block->is_pf = 1;
2119  			p_block->vector_number = p_info->usage.cnt - pf_sbs;
2120  			p_block->status = QED_IGU_STATUS_VALID |
2121  					  QED_IGU_STATUS_PF |
2122  					  QED_IGU_STATUS_FREE;
2123  		} else if (vf_sbs) {
2124  			p_block->function_id =
2125  			    p_hwfn->cdev->p_iov_info->first_vf_in_pf +
2126  			    p_info->usage.iov_cnt - vf_sbs;
2127  			p_block->is_pf = 0;
2128  			p_block->vector_number = 0;
2129  			p_block->status = QED_IGU_STATUS_VALID |
2130  					  QED_IGU_STATUS_FREE;
2131  			vf_sbs--;
2132  		} else {
2133  			p_block->function_id = 0;
2134  			p_block->is_pf = 0;
2135  			p_block->vector_number = 0;
2136  		}
2137  
2138  		SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2139  			  p_block->function_id);
2140  		SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2141  		SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2142  			  p_block->vector_number);
2143  
2144  		/* VF entries would be enabled when VF is initializaed */
2145  		SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2146  
2147  		rval = qed_rd(p_hwfn, p_ptt,
2148  			      IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2149  
2150  		if (rval != val) {
2151  			qed_wr(p_hwfn, p_ptt,
2152  			       IGU_REG_MAPPING_MEMORY +
2153  			       sizeof(u32) * igu_sb_id, val);
2154  
2155  			DP_VERBOSE(p_hwfn,
2156  				   NETIF_MSG_INTR,
2157  				   "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2158  				   igu_sb_id,
2159  				   p_block->function_id,
2160  				   p_block->is_pf,
2161  				   p_block->vector_number, rval, val);
2162  		}
2163  	}
2164  
2165  	return 0;
2166  }
2167  
qed_int_igu_read_cam_block(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 igu_sb_id)2168  static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
2169  				       struct qed_ptt *p_ptt, u16 igu_sb_id)
2170  {
2171  	u32 val = qed_rd(p_hwfn, p_ptt,
2172  			 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2173  	struct qed_igu_block *p_block;
2174  
2175  	p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2176  
2177  	/* Fill the block information */
2178  	p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
2179  	p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
2180  	p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
2181  	p_block->igu_sb_id = igu_sb_id;
2182  }
2183  
qed_int_igu_read_cam(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2184  int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2185  {
2186  	struct qed_igu_info *p_igu_info;
2187  	struct qed_igu_block *p_block;
2188  	u32 min_vf = 0, max_vf = 0;
2189  	u16 igu_sb_id;
2190  
2191  	p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
2192  	if (!p_hwfn->hw_info.p_igu_info)
2193  		return -ENOMEM;
2194  
2195  	p_igu_info = p_hwfn->hw_info.p_igu_info;
2196  
2197  	/* Distinguish between existent and non-existent default SB */
2198  	p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX;
2199  
2200  	/* Find the range of VF ids whose SB belong to this PF */
2201  	if (p_hwfn->cdev->p_iov_info) {
2202  		struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
2203  
2204  		min_vf	= p_iov->first_vf_in_pf;
2205  		max_vf	= p_iov->first_vf_in_pf + p_iov->total_vfs;
2206  	}
2207  
2208  	for (igu_sb_id = 0;
2209  	     igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
2210  		/* Read current entry; Notice it might not belong to this PF */
2211  		qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
2212  		p_block = &p_igu_info->entry[igu_sb_id];
2213  
2214  		if ((p_block->is_pf) &&
2215  		    (p_block->function_id == p_hwfn->rel_pf_id)) {
2216  			p_block->status = QED_IGU_STATUS_PF |
2217  					  QED_IGU_STATUS_VALID |
2218  					  QED_IGU_STATUS_FREE;
2219  
2220  			if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
2221  				p_igu_info->usage.cnt++;
2222  		} else if (!(p_block->is_pf) &&
2223  			   (p_block->function_id >= min_vf) &&
2224  			   (p_block->function_id < max_vf)) {
2225  			/* Available for VFs of this PF */
2226  			p_block->status = QED_IGU_STATUS_VALID |
2227  					  QED_IGU_STATUS_FREE;
2228  
2229  			if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
2230  				p_igu_info->usage.iov_cnt++;
2231  		}
2232  
2233  		/* Mark the First entry belonging to the PF or its VFs
2234  		 * as the default SB [we'll reset IGU prior to first usage].
2235  		 */
2236  		if ((p_block->status & QED_IGU_STATUS_VALID) &&
2237  		    (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) {
2238  			p_igu_info->igu_dsb_id = igu_sb_id;
2239  			p_block->status |= QED_IGU_STATUS_DSB;
2240  		}
2241  
2242  		/* limit number of prints by having each PF print only its
2243  		 * entries with the exception of PF0 which would print
2244  		 * everything.
2245  		 */
2246  		if ((p_block->status & QED_IGU_STATUS_VALID) ||
2247  		    (p_hwfn->abs_pf_id == 0)) {
2248  			DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2249  				   "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2250  				   igu_sb_id, p_block->function_id,
2251  				   p_block->is_pf, p_block->vector_number);
2252  		}
2253  	}
2254  
2255  	if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) {
2256  		DP_NOTICE(p_hwfn,
2257  			  "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2258  			  p_igu_info->igu_dsb_id);
2259  		return -EINVAL;
2260  	}
2261  
2262  	/* All non default SB are considered free at this point */
2263  	p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
2264  	p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
2265  
2266  	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2267  		   "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2268  		   p_igu_info->igu_dsb_id,
2269  		   p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt);
2270  
2271  	return 0;
2272  }
2273  
2274  /**
2275   * qed_int_igu_init_rt() - Initialize IGU runtime registers.
2276   *
2277   * @p_hwfn: HW device data.
2278   */
qed_int_igu_init_rt(struct qed_hwfn * p_hwfn)2279  void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
2280  {
2281  	u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2282  
2283  	STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2284  }
2285  
qed_int_igu_read_sisr_reg(struct qed_hwfn * p_hwfn)2286  u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
2287  {
2288  	u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
2289  			       IGU_CMD_INT_ACK_BASE;
2290  	u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
2291  			       IGU_CMD_INT_ACK_BASE;
2292  	u32 intr_status_hi = 0, intr_status_lo = 0;
2293  	u64 intr_status = 0;
2294  
2295  	intr_status_lo = REG_RD(p_hwfn,
2296  				GTT_BAR0_MAP_REG_IGU_CMD +
2297  				lsb_igu_cmd_addr * 8);
2298  	intr_status_hi = REG_RD(p_hwfn,
2299  				GTT_BAR0_MAP_REG_IGU_CMD +
2300  				msb_igu_cmd_addr * 8);
2301  	intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2302  
2303  	return intr_status;
2304  }
2305  
qed_int_sp_dpc_setup(struct qed_hwfn * p_hwfn)2306  static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
2307  {
2308  	tasklet_setup(&p_hwfn->sp_dpc, qed_int_sp_dpc);
2309  	p_hwfn->b_sp_dpc_enabled = true;
2310  }
2311  
qed_int_alloc(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2312  int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2313  {
2314  	int rc = 0;
2315  
2316  	rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
2317  	if (rc)
2318  		return rc;
2319  
2320  	rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
2321  
2322  	return rc;
2323  }
2324  
qed_int_free(struct qed_hwfn * p_hwfn)2325  void qed_int_free(struct qed_hwfn *p_hwfn)
2326  {
2327  	qed_int_sp_sb_free(p_hwfn);
2328  	qed_int_sb_attn_free(p_hwfn);
2329  }
2330  
qed_int_setup(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2331  void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2332  {
2333  	qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2334  	qed_int_sb_attn_setup(p_hwfn, p_ptt);
2335  	qed_int_sp_dpc_setup(p_hwfn);
2336  }
2337  
qed_int_get_num_sbs(struct qed_hwfn * p_hwfn,struct qed_sb_cnt_info * p_sb_cnt_info)2338  void qed_int_get_num_sbs(struct qed_hwfn	*p_hwfn,
2339  			 struct qed_sb_cnt_info *p_sb_cnt_info)
2340  {
2341  	struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
2342  
2343  	if (!info || !p_sb_cnt_info)
2344  		return;
2345  
2346  	memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info));
2347  }
2348  
qed_int_disable_post_isr_release(struct qed_dev * cdev)2349  void qed_int_disable_post_isr_release(struct qed_dev *cdev)
2350  {
2351  	int i;
2352  
2353  	for_each_hwfn(cdev, i)
2354  		cdev->hwfns[i].b_int_requested = false;
2355  }
2356  
qed_int_attn_clr_enable(struct qed_dev * cdev,bool clr_enable)2357  void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable)
2358  {
2359  	cdev->attn_clr_en = clr_enable;
2360  }
2361  
qed_int_set_timer_res(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 timer_res,u16 sb_id,bool tx)2362  int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2363  			  u8 timer_res, u16 sb_id, bool tx)
2364  {
2365  	struct cau_sb_entry sb_entry;
2366  	u32 params;
2367  	int rc;
2368  
2369  	if (!p_hwfn->hw_init_done) {
2370  		DP_ERR(p_hwfn, "hardware not initialized yet\n");
2371  		return -EINVAL;
2372  	}
2373  
2374  	rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2375  			       sb_id * sizeof(u64),
2376  			       (u64)(uintptr_t)&sb_entry, 2, NULL);
2377  	if (rc) {
2378  		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2379  		return rc;
2380  	}
2381  
2382  	params = le32_to_cpu(sb_entry.params);
2383  
2384  	if (tx)
2385  		SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2386  	else
2387  		SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2388  
2389  	sb_entry.params = cpu_to_le32(params);
2390  
2391  	rc = qed_dmae_host2grc(p_hwfn, p_ptt,
2392  			       (u64)(uintptr_t)&sb_entry,
2393  			       CAU_REG_SB_VAR_MEMORY +
2394  			       sb_id * sizeof(u64), 2, NULL);
2395  	if (rc) {
2396  		DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
2397  		return rc;
2398  	}
2399  
2400  	return rc;
2401  }
2402  
qed_int_get_sb_dbg(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_sb_info * p_sb,struct qed_sb_info_dbg * p_info)2403  int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2404  		       struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info)
2405  {
2406  	u16 sbid = p_sb->igu_sb_id;
2407  	u32 i;
2408  
2409  	if (IS_VF(p_hwfn->cdev))
2410  		return -EINVAL;
2411  
2412  	if (sbid >= NUM_OF_SBS(p_hwfn->cdev))
2413  		return -EINVAL;
2414  
2415  	p_info->igu_prod = qed_rd(p_hwfn, p_ptt, IGU_REG_PRODUCER_MEMORY + sbid * 4);
2416  	p_info->igu_cons = qed_rd(p_hwfn, p_ptt, IGU_REG_CONSUMER_MEM + sbid * 4);
2417  
2418  	for (i = 0; i < PIS_PER_SB; i++)
2419  		p_info->pi[i] = (u16)qed_rd(p_hwfn, p_ptt,
2420  					    CAU_REG_PI_MEMORY + sbid * 4 * PIS_PER_SB + i * 4);
2421  
2422  	return 0;
2423  }
2424