1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/io.h>
36 #include <linux/bitops.h>
37 #include <linux/delay.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/errno.h>
40 #include <linux/interrupt.h>
41 #include <linux/kernel.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/string.h>
45 #include "qed.h"
46 #include "qed_hsi.h"
47 #include "qed_hw.h"
48 #include "qed_init_ops.h"
49 #include "qed_int.h"
50 #include "qed_mcp.h"
51 #include "qed_reg_addr.h"
52 #include "qed_sp.h"
53 #include "qed_sriov.h"
54 #include "qed_vf.h"
55 
56 struct qed_pi_info {
57 	qed_int_comp_cb_t	comp_cb;
58 	void			*cookie;
59 };
60 
61 struct qed_sb_sp_info {
62 	struct qed_sb_info	sb_info;
63 
64 	/* per protocol index data */
65 	struct qed_pi_info	pi_info_arr[PIS_PER_SB];
66 };
67 
68 enum qed_attention_type {
69 	QED_ATTN_TYPE_ATTN,
70 	QED_ATTN_TYPE_PARITY,
71 };
72 
73 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
74 	ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
75 
76 struct aeu_invert_reg_bit {
77 	char bit_name[30];
78 
79 #define ATTENTION_PARITY                (1 << 0)
80 
81 #define ATTENTION_LENGTH_MASK           (0x00000ff0)
82 #define ATTENTION_LENGTH_SHIFT          (4)
83 #define ATTENTION_LENGTH(flags)         (((flags) & ATTENTION_LENGTH_MASK) >> \
84 					 ATTENTION_LENGTH_SHIFT)
85 #define ATTENTION_SINGLE                (1 << ATTENTION_LENGTH_SHIFT)
86 #define ATTENTION_PAR                   (ATTENTION_SINGLE | ATTENTION_PARITY)
87 #define ATTENTION_PAR_INT               ((2 << ATTENTION_LENGTH_SHIFT) | \
88 					 ATTENTION_PARITY)
89 
90 /* Multiple bits start with this offset */
91 #define ATTENTION_OFFSET_MASK           (0x000ff000)
92 #define ATTENTION_OFFSET_SHIFT          (12)
93 	unsigned int flags;
94 
95 	/* Callback to call if attention will be triggered */
96 	int (*cb)(struct qed_hwfn *p_hwfn);
97 
98 	enum block_id block_index;
99 };
100 
101 struct aeu_invert_reg {
102 	struct aeu_invert_reg_bit bits[32];
103 };
104 
105 #define MAX_ATTN_GRPS           (8)
106 #define NUM_ATTN_REGS           (9)
107 
108 /* HW Attention register */
109 struct attn_hw_reg {
110 	u16 reg_idx;             /* Index of this register in its block */
111 	u16 num_of_bits;         /* number of valid attention bits */
112 	u32 sts_addr;            /* Address of the STS register */
113 	u32 sts_clr_addr;        /* Address of the STS_CLR register */
114 	u32 sts_wr_addr;         /* Address of the STS_WR register */
115 	u32 mask_addr;           /* Address of the MASK register */
116 };
117 
118 /* HW block attention registers */
119 struct attn_hw_regs {
120 	u16 num_of_int_regs;            /* Number of interrupt regs */
121 	u16 num_of_prty_regs;           /* Number of parity regs */
122 	struct attn_hw_reg **int_regs;  /* interrupt regs */
123 	struct attn_hw_reg **prty_regs; /* parity regs */
124 };
125 
126 /* HW block attention registers */
127 struct attn_hw_block {
128 	const char *name;                 /* Block name */
129 	struct attn_hw_regs chip_regs[1];
130 };
131 
132 static struct attn_hw_reg grc_int0_bb_b0 = {
133 	0, 4, 0x50180, 0x5018c, 0x50188, 0x50184};
134 
135 static struct attn_hw_reg *grc_int_bb_b0_regs[1] = {
136 	&grc_int0_bb_b0};
137 
138 static struct attn_hw_reg grc_prty1_bb_b0 = {
139 	0, 2, 0x50200, 0x5020c, 0x50208, 0x50204};
140 
141 static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = {
142 	&grc_prty1_bb_b0};
143 
144 static struct attn_hw_reg miscs_int0_bb_b0 = {
145 	0, 3, 0x9180, 0x918c, 0x9188, 0x9184};
146 
147 static struct attn_hw_reg miscs_int1_bb_b0 = {
148 	1, 11, 0x9190, 0x919c, 0x9198, 0x9194};
149 
150 static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = {
151 	&miscs_int0_bb_b0, &miscs_int1_bb_b0};
152 
153 static struct attn_hw_reg miscs_prty0_bb_b0 = {
154 	0, 1, 0x91a0, 0x91ac, 0x91a8, 0x91a4};
155 
156 static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = {
157 	&miscs_prty0_bb_b0};
158 
159 static struct attn_hw_reg misc_int0_bb_b0 = {
160 	0, 1, 0x8180, 0x818c, 0x8188, 0x8184};
161 
162 static struct attn_hw_reg *misc_int_bb_b0_regs[1] = {
163 	&misc_int0_bb_b0};
164 
165 static struct attn_hw_reg pglue_b_int0_bb_b0 = {
166 	0, 23, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184};
167 
168 static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = {
169 	&pglue_b_int0_bb_b0};
170 
171 static struct attn_hw_reg pglue_b_prty0_bb_b0 = {
172 	0, 1, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194};
173 
174 static struct attn_hw_reg pglue_b_prty1_bb_b0 = {
175 	1, 22, 0x2a8200, 0x2a820c, 0x2a8208, 0x2a8204};
176 
177 static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = {
178 	&pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0};
179 
180 static struct attn_hw_reg cnig_int0_bb_b0 = {
181 	0, 6, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec};
182 
183 static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = {
184 	&cnig_int0_bb_b0};
185 
186 static struct attn_hw_reg cnig_prty0_bb_b0 = {
187 	0, 2, 0x218348, 0x218354, 0x218350, 0x21834c};
188 
189 static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = {
190 	&cnig_prty0_bb_b0};
191 
192 static struct attn_hw_reg cpmu_int0_bb_b0 = {
193 	0, 1, 0x303e0, 0x303ec, 0x303e8, 0x303e4};
194 
195 static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = {
196 	&cpmu_int0_bb_b0};
197 
198 static struct attn_hw_reg ncsi_int0_bb_b0 = {
199 	0, 1, 0x404cc, 0x404d8, 0x404d4, 0x404d0};
200 
201 static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = {
202 	&ncsi_int0_bb_b0};
203 
204 static struct attn_hw_reg ncsi_prty1_bb_b0 = {
205 	0, 1, 0x40000, 0x4000c, 0x40008, 0x40004};
206 
207 static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = {
208 	&ncsi_prty1_bb_b0};
209 
210 static struct attn_hw_reg opte_prty1_bb_b0 = {
211 	0, 11, 0x53000, 0x5300c, 0x53008, 0x53004};
212 
213 static struct attn_hw_reg opte_prty0_bb_b0 = {
214 	1, 1, 0x53208, 0x53214, 0x53210, 0x5320c};
215 
216 static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = {
217 	&opte_prty1_bb_b0, &opte_prty0_bb_b0};
218 
219 static struct attn_hw_reg bmb_int0_bb_b0 = {
220 	0, 16, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4};
221 
222 static struct attn_hw_reg bmb_int1_bb_b0 = {
223 	1, 28, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc};
224 
225 static struct attn_hw_reg bmb_int2_bb_b0 = {
226 	2, 26, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4};
227 
228 static struct attn_hw_reg bmb_int3_bb_b0 = {
229 	3, 31, 0x540108, 0x540114, 0x540110, 0x54010c};
230 
231 static struct attn_hw_reg bmb_int4_bb_b0 = {
232 	4, 27, 0x540120, 0x54012c, 0x540128, 0x540124};
233 
234 static struct attn_hw_reg bmb_int5_bb_b0 = {
235 	5, 29, 0x540138, 0x540144, 0x540140, 0x54013c};
236 
237 static struct attn_hw_reg bmb_int6_bb_b0 = {
238 	6, 30, 0x540150, 0x54015c, 0x540158, 0x540154};
239 
240 static struct attn_hw_reg bmb_int7_bb_b0 = {
241 	7, 32, 0x540168, 0x540174, 0x540170, 0x54016c};
242 
243 static struct attn_hw_reg bmb_int8_bb_b0 = {
244 	8, 32, 0x540184, 0x540190, 0x54018c, 0x540188};
245 
246 static struct attn_hw_reg bmb_int9_bb_b0 = {
247 	9, 32, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0};
248 
249 static struct attn_hw_reg bmb_int10_bb_b0 = {
250 	10, 3, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8};
251 
252 static struct attn_hw_reg bmb_int11_bb_b0 = {
253 	11, 4, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0};
254 
255 static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = {
256 	&bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0,
257 	&bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0,
258 	&bmb_int8_bb_b0, &bmb_int9_bb_b0, &bmb_int10_bb_b0, &bmb_int11_bb_b0};
259 
260 static struct attn_hw_reg bmb_prty0_bb_b0 = {
261 	0, 5, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0};
262 
263 static struct attn_hw_reg bmb_prty1_bb_b0 = {
264 	1, 31, 0x540400, 0x54040c, 0x540408, 0x540404};
265 
266 static struct attn_hw_reg bmb_prty2_bb_b0 = {
267 	2, 15, 0x540410, 0x54041c, 0x540418, 0x540414};
268 
269 static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = {
270 	&bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0};
271 
272 static struct attn_hw_reg pcie_prty1_bb_b0 = {
273 	0, 17, 0x54000, 0x5400c, 0x54008, 0x54004};
274 
275 static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = {
276 	&pcie_prty1_bb_b0};
277 
278 static struct attn_hw_reg mcp2_prty0_bb_b0 = {
279 	0, 1, 0x52040, 0x5204c, 0x52048, 0x52044};
280 
281 static struct attn_hw_reg mcp2_prty1_bb_b0 = {
282 	1, 12, 0x52204, 0x52210, 0x5220c, 0x52208};
283 
284 static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = {
285 	&mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0};
286 
287 static struct attn_hw_reg pswhst_int0_bb_b0 = {
288 	0, 18, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184};
289 
290 static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = {
291 	&pswhst_int0_bb_b0};
292 
293 static struct attn_hw_reg pswhst_prty0_bb_b0 = {
294 	0, 1, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194};
295 
296 static struct attn_hw_reg pswhst_prty1_bb_b0 = {
297 	1, 17, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204};
298 
299 static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = {
300 	&pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0};
301 
302 static struct attn_hw_reg pswhst2_int0_bb_b0 = {
303 	0, 5, 0x29e180, 0x29e18c, 0x29e188, 0x29e184};
304 
305 static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = {
306 	&pswhst2_int0_bb_b0};
307 
308 static struct attn_hw_reg pswhst2_prty0_bb_b0 = {
309 	0, 1, 0x29e190, 0x29e19c, 0x29e198, 0x29e194};
310 
311 static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = {
312 	&pswhst2_prty0_bb_b0};
313 
314 static struct attn_hw_reg pswrd_int0_bb_b0 = {
315 	0, 3, 0x29c180, 0x29c18c, 0x29c188, 0x29c184};
316 
317 static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = {
318 	&pswrd_int0_bb_b0};
319 
320 static struct attn_hw_reg pswrd_prty0_bb_b0 = {
321 	0, 1, 0x29c190, 0x29c19c, 0x29c198, 0x29c194};
322 
323 static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = {
324 	&pswrd_prty0_bb_b0};
325 
326 static struct attn_hw_reg pswrd2_int0_bb_b0 = {
327 	0, 5, 0x29d180, 0x29d18c, 0x29d188, 0x29d184};
328 
329 static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = {
330 	&pswrd2_int0_bb_b0};
331 
332 static struct attn_hw_reg pswrd2_prty0_bb_b0 = {
333 	0, 1, 0x29d190, 0x29d19c, 0x29d198, 0x29d194};
334 
335 static struct attn_hw_reg pswrd2_prty1_bb_b0 = {
336 	1, 31, 0x29d200, 0x29d20c, 0x29d208, 0x29d204};
337 
338 static struct attn_hw_reg pswrd2_prty2_bb_b0 = {
339 	2, 3, 0x29d210, 0x29d21c, 0x29d218, 0x29d214};
340 
341 static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = {
342 	&pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0};
343 
344 static struct attn_hw_reg pswwr_int0_bb_b0 = {
345 	0, 16, 0x29a180, 0x29a18c, 0x29a188, 0x29a184};
346 
347 static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = {
348 	&pswwr_int0_bb_b0};
349 
350 static struct attn_hw_reg pswwr_prty0_bb_b0 = {
351 	0, 1, 0x29a190, 0x29a19c, 0x29a198, 0x29a194};
352 
353 static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = {
354 	&pswwr_prty0_bb_b0};
355 
356 static struct attn_hw_reg pswwr2_int0_bb_b0 = {
357 	0, 19, 0x29b180, 0x29b18c, 0x29b188, 0x29b184};
358 
359 static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = {
360 	&pswwr2_int0_bb_b0};
361 
362 static struct attn_hw_reg pswwr2_prty0_bb_b0 = {
363 	0, 1, 0x29b190, 0x29b19c, 0x29b198, 0x29b194};
364 
365 static struct attn_hw_reg pswwr2_prty1_bb_b0 = {
366 	1, 31, 0x29b200, 0x29b20c, 0x29b208, 0x29b204};
367 
368 static struct attn_hw_reg pswwr2_prty2_bb_b0 = {
369 	2, 31, 0x29b210, 0x29b21c, 0x29b218, 0x29b214};
370 
371 static struct attn_hw_reg pswwr2_prty3_bb_b0 = {
372 	3, 31, 0x29b220, 0x29b22c, 0x29b228, 0x29b224};
373 
374 static struct attn_hw_reg pswwr2_prty4_bb_b0 = {
375 	4, 20, 0x29b230, 0x29b23c, 0x29b238, 0x29b234};
376 
377 static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = {
378 	&pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0,
379 	&pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0};
380 
381 static struct attn_hw_reg pswrq_int0_bb_b0 = {
382 	0, 21, 0x280180, 0x28018c, 0x280188, 0x280184};
383 
384 static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = {
385 	&pswrq_int0_bb_b0};
386 
387 static struct attn_hw_reg pswrq_prty0_bb_b0 = {
388 	0, 1, 0x280190, 0x28019c, 0x280198, 0x280194};
389 
390 static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = {
391 	&pswrq_prty0_bb_b0};
392 
393 static struct attn_hw_reg pswrq2_int0_bb_b0 = {
394 	0, 15, 0x240180, 0x24018c, 0x240188, 0x240184};
395 
396 static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = {
397 	&pswrq2_int0_bb_b0};
398 
399 static struct attn_hw_reg pswrq2_prty1_bb_b0 = {
400 	0, 9, 0x240200, 0x24020c, 0x240208, 0x240204};
401 
402 static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = {
403 	&pswrq2_prty1_bb_b0};
404 
405 static struct attn_hw_reg pglcs_int0_bb_b0 = {
406 	0, 1, 0x1d00, 0x1d0c, 0x1d08, 0x1d04};
407 
408 static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = {
409 	&pglcs_int0_bb_b0};
410 
411 static struct attn_hw_reg dmae_int0_bb_b0 = {
412 	0, 2, 0xc180, 0xc18c, 0xc188, 0xc184};
413 
414 static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = {
415 	&dmae_int0_bb_b0};
416 
417 static struct attn_hw_reg dmae_prty1_bb_b0 = {
418 	0, 3, 0xc200, 0xc20c, 0xc208, 0xc204};
419 
420 static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = {
421 	&dmae_prty1_bb_b0};
422 
423 static struct attn_hw_reg ptu_int0_bb_b0 = {
424 	0, 8, 0x560180, 0x56018c, 0x560188, 0x560184};
425 
426 static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = {
427 	&ptu_int0_bb_b0};
428 
429 static struct attn_hw_reg ptu_prty1_bb_b0 = {
430 	0, 18, 0x560200, 0x56020c, 0x560208, 0x560204};
431 
432 static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = {
433 	&ptu_prty1_bb_b0};
434 
435 static struct attn_hw_reg tcm_int0_bb_b0 = {
436 	0, 8, 0x1180180, 0x118018c, 0x1180188, 0x1180184};
437 
438 static struct attn_hw_reg tcm_int1_bb_b0 = {
439 	1, 32, 0x1180190, 0x118019c, 0x1180198, 0x1180194};
440 
441 static struct attn_hw_reg tcm_int2_bb_b0 = {
442 	2, 1, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4};
443 
444 static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = {
445 	&tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0};
446 
447 static struct attn_hw_reg tcm_prty1_bb_b0 = {
448 	0, 31, 0x1180200, 0x118020c, 0x1180208, 0x1180204};
449 
450 static struct attn_hw_reg tcm_prty2_bb_b0 = {
451 	1, 2, 0x1180210, 0x118021c, 0x1180218, 0x1180214};
452 
453 static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = {
454 	&tcm_prty1_bb_b0, &tcm_prty2_bb_b0};
455 
456 static struct attn_hw_reg mcm_int0_bb_b0 = {
457 	0, 14, 0x1200180, 0x120018c, 0x1200188, 0x1200184};
458 
459 static struct attn_hw_reg mcm_int1_bb_b0 = {
460 	1, 26, 0x1200190, 0x120019c, 0x1200198, 0x1200194};
461 
462 static struct attn_hw_reg mcm_int2_bb_b0 = {
463 	2, 1, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4};
464 
465 static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = {
466 	&mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0};
467 
468 static struct attn_hw_reg mcm_prty1_bb_b0 = {
469 	0, 31, 0x1200200, 0x120020c, 0x1200208, 0x1200204};
470 
471 static struct attn_hw_reg mcm_prty2_bb_b0 = {
472 	1, 4, 0x1200210, 0x120021c, 0x1200218, 0x1200214};
473 
474 static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = {
475 	&mcm_prty1_bb_b0, &mcm_prty2_bb_b0};
476 
477 static struct attn_hw_reg ucm_int0_bb_b0 = {
478 	0, 17, 0x1280180, 0x128018c, 0x1280188, 0x1280184};
479 
480 static struct attn_hw_reg ucm_int1_bb_b0 = {
481 	1, 29, 0x1280190, 0x128019c, 0x1280198, 0x1280194};
482 
483 static struct attn_hw_reg ucm_int2_bb_b0 = {
484 	2, 1, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4};
485 
486 static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = {
487 	&ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0};
488 
489 static struct attn_hw_reg ucm_prty1_bb_b0 = {
490 	0, 31, 0x1280200, 0x128020c, 0x1280208, 0x1280204};
491 
492 static struct attn_hw_reg ucm_prty2_bb_b0 = {
493 	1, 7, 0x1280210, 0x128021c, 0x1280218, 0x1280214};
494 
495 static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = {
496 	&ucm_prty1_bb_b0, &ucm_prty2_bb_b0};
497 
498 static struct attn_hw_reg xcm_int0_bb_b0 = {
499 	0, 16, 0x1000180, 0x100018c, 0x1000188, 0x1000184};
500 
501 static struct attn_hw_reg xcm_int1_bb_b0 = {
502 	1, 25, 0x1000190, 0x100019c, 0x1000198, 0x1000194};
503 
504 static struct attn_hw_reg xcm_int2_bb_b0 = {
505 	2, 8, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4};
506 
507 static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = {
508 	&xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0};
509 
510 static struct attn_hw_reg xcm_prty1_bb_b0 = {
511 	0, 31, 0x1000200, 0x100020c, 0x1000208, 0x1000204};
512 
513 static struct attn_hw_reg xcm_prty2_bb_b0 = {
514 	1, 11, 0x1000210, 0x100021c, 0x1000218, 0x1000214};
515 
516 static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = {
517 	&xcm_prty1_bb_b0, &xcm_prty2_bb_b0};
518 
519 static struct attn_hw_reg ycm_int0_bb_b0 = {
520 	0, 13, 0x1080180, 0x108018c, 0x1080188, 0x1080184};
521 
522 static struct attn_hw_reg ycm_int1_bb_b0 = {
523 	1, 23, 0x1080190, 0x108019c, 0x1080198, 0x1080194};
524 
525 static struct attn_hw_reg ycm_int2_bb_b0 = {
526 	2, 1, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4};
527 
528 static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = {
529 	&ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0};
530 
531 static struct attn_hw_reg ycm_prty1_bb_b0 = {
532 	0, 31, 0x1080200, 0x108020c, 0x1080208, 0x1080204};
533 
534 static struct attn_hw_reg ycm_prty2_bb_b0 = {
535 	1, 3, 0x1080210, 0x108021c, 0x1080218, 0x1080214};
536 
537 static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = {
538 	&ycm_prty1_bb_b0, &ycm_prty2_bb_b0};
539 
540 static struct attn_hw_reg pcm_int0_bb_b0 = {
541 	0, 5, 0x1100180, 0x110018c, 0x1100188, 0x1100184};
542 
543 static struct attn_hw_reg pcm_int1_bb_b0 = {
544 	1, 14, 0x1100190, 0x110019c, 0x1100198, 0x1100194};
545 
546 static struct attn_hw_reg pcm_int2_bb_b0 = {
547 	2, 1, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4};
548 
549 static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = {
550 	&pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0};
551 
552 static struct attn_hw_reg pcm_prty1_bb_b0 = {
553 	0, 11, 0x1100200, 0x110020c, 0x1100208, 0x1100204};
554 
555 static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = {
556 	&pcm_prty1_bb_b0};
557 
558 static struct attn_hw_reg qm_int0_bb_b0 = {
559 	0, 22, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184};
560 
561 static struct attn_hw_reg *qm_int_bb_b0_regs[1] = {
562 	&qm_int0_bb_b0};
563 
564 static struct attn_hw_reg qm_prty0_bb_b0 = {
565 	0, 11, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194};
566 
567 static struct attn_hw_reg qm_prty1_bb_b0 = {
568 	1, 31, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204};
569 
570 static struct attn_hw_reg qm_prty2_bb_b0 = {
571 	2, 31, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214};
572 
573 static struct attn_hw_reg qm_prty3_bb_b0 = {
574 	3, 11, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224};
575 
576 static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = {
577 	&qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0};
578 
579 static struct attn_hw_reg tm_int0_bb_b0 = {
580 	0, 32, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184};
581 
582 static struct attn_hw_reg tm_int1_bb_b0 = {
583 	1, 11, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194};
584 
585 static struct attn_hw_reg *tm_int_bb_b0_regs[2] = {
586 	&tm_int0_bb_b0, &tm_int1_bb_b0};
587 
588 static struct attn_hw_reg tm_prty1_bb_b0 = {
589 	0, 17, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204};
590 
591 static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = {
592 	&tm_prty1_bb_b0};
593 
594 static struct attn_hw_reg dorq_int0_bb_b0 = {
595 	0, 9, 0x100180, 0x10018c, 0x100188, 0x100184};
596 
597 static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = {
598 	&dorq_int0_bb_b0};
599 
600 static struct attn_hw_reg dorq_prty0_bb_b0 = {
601 	0, 1, 0x100190, 0x10019c, 0x100198, 0x100194};
602 
603 static struct attn_hw_reg dorq_prty1_bb_b0 = {
604 	1, 6, 0x100200, 0x10020c, 0x100208, 0x100204};
605 
606 static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = {
607 	&dorq_prty0_bb_b0, &dorq_prty1_bb_b0};
608 
609 static struct attn_hw_reg brb_int0_bb_b0 = {
610 	0, 32, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4};
611 
612 static struct attn_hw_reg brb_int1_bb_b0 = {
613 	1, 30, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc};
614 
615 static struct attn_hw_reg brb_int2_bb_b0 = {
616 	2, 28, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4};
617 
618 static struct attn_hw_reg brb_int3_bb_b0 = {
619 	3, 31, 0x340108, 0x340114, 0x340110, 0x34010c};
620 
621 static struct attn_hw_reg brb_int4_bb_b0 = {
622 	4, 27, 0x340120, 0x34012c, 0x340128, 0x340124};
623 
624 static struct attn_hw_reg brb_int5_bb_b0 = {
625 	5, 1, 0x340138, 0x340144, 0x340140, 0x34013c};
626 
627 static struct attn_hw_reg brb_int6_bb_b0 = {
628 	6, 8, 0x340150, 0x34015c, 0x340158, 0x340154};
629 
630 static struct attn_hw_reg brb_int7_bb_b0 = {
631 	7, 32, 0x340168, 0x340174, 0x340170, 0x34016c};
632 
633 static struct attn_hw_reg brb_int8_bb_b0 = {
634 	8, 17, 0x340184, 0x340190, 0x34018c, 0x340188};
635 
636 static struct attn_hw_reg brb_int9_bb_b0 = {
637 	9, 1, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0};
638 
639 static struct attn_hw_reg brb_int10_bb_b0 = {
640 	10, 14, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8};
641 
642 static struct attn_hw_reg brb_int11_bb_b0 = {
643 	11, 8, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0};
644 
645 static struct attn_hw_reg *brb_int_bb_b0_regs[12] = {
646 	&brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0,
647 	&brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0,
648 	&brb_int8_bb_b0, &brb_int9_bb_b0, &brb_int10_bb_b0, &brb_int11_bb_b0};
649 
650 static struct attn_hw_reg brb_prty0_bb_b0 = {
651 	0, 5, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0};
652 
653 static struct attn_hw_reg brb_prty1_bb_b0 = {
654 	1, 31, 0x340400, 0x34040c, 0x340408, 0x340404};
655 
656 static struct attn_hw_reg brb_prty2_bb_b0 = {
657 	2, 14, 0x340410, 0x34041c, 0x340418, 0x340414};
658 
659 static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = {
660 	&brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0};
661 
662 static struct attn_hw_reg src_int0_bb_b0 = {
663 	0, 1, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4};
664 
665 static struct attn_hw_reg *src_int_bb_b0_regs[1] = {
666 	&src_int0_bb_b0};
667 
668 static struct attn_hw_reg prs_int0_bb_b0 = {
669 	0, 2, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044};
670 
671 static struct attn_hw_reg *prs_int_bb_b0_regs[1] = {
672 	&prs_int0_bb_b0};
673 
674 static struct attn_hw_reg prs_prty0_bb_b0 = {
675 	0, 2, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054};
676 
677 static struct attn_hw_reg prs_prty1_bb_b0 = {
678 	1, 31, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208};
679 
680 static struct attn_hw_reg prs_prty2_bb_b0 = {
681 	2, 5, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218};
682 
683 static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = {
684 	&prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0};
685 
686 static struct attn_hw_reg tsdm_int0_bb_b0 = {
687 	0, 26, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044};
688 
689 static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = {
690 	&tsdm_int0_bb_b0};
691 
692 static struct attn_hw_reg tsdm_prty1_bb_b0 = {
693 	0, 10, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204};
694 
695 static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = {
696 	&tsdm_prty1_bb_b0};
697 
698 static struct attn_hw_reg msdm_int0_bb_b0 = {
699 	0, 26, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044};
700 
701 static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = {
702 	&msdm_int0_bb_b0};
703 
704 static struct attn_hw_reg msdm_prty1_bb_b0 = {
705 	0, 11, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204};
706 
707 static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = {
708 	&msdm_prty1_bb_b0};
709 
710 static struct attn_hw_reg usdm_int0_bb_b0 = {
711 	0, 26, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044};
712 
713 static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = {
714 	&usdm_int0_bb_b0};
715 
716 static struct attn_hw_reg usdm_prty1_bb_b0 = {
717 	0, 10, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204};
718 
719 static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = {
720 	&usdm_prty1_bb_b0};
721 
722 static struct attn_hw_reg xsdm_int0_bb_b0 = {
723 	0, 26, 0xf80040, 0xf8004c, 0xf80048, 0xf80044};
724 
725 static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = {
726 	&xsdm_int0_bb_b0};
727 
728 static struct attn_hw_reg xsdm_prty1_bb_b0 = {
729 	0, 10, 0xf80200, 0xf8020c, 0xf80208, 0xf80204};
730 
731 static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = {
732 	&xsdm_prty1_bb_b0};
733 
734 static struct attn_hw_reg ysdm_int0_bb_b0 = {
735 	0, 26, 0xf90040, 0xf9004c, 0xf90048, 0xf90044};
736 
737 static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = {
738 	&ysdm_int0_bb_b0};
739 
740 static struct attn_hw_reg ysdm_prty1_bb_b0 = {
741 	0, 9, 0xf90200, 0xf9020c, 0xf90208, 0xf90204};
742 
743 static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = {
744 	&ysdm_prty1_bb_b0};
745 
746 static struct attn_hw_reg psdm_int0_bb_b0 = {
747 	0, 26, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044};
748 
749 static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = {
750 	&psdm_int0_bb_b0};
751 
752 static struct attn_hw_reg psdm_prty1_bb_b0 = {
753 	0, 9, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204};
754 
755 static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = {
756 	&psdm_prty1_bb_b0};
757 
758 static struct attn_hw_reg tsem_int0_bb_b0 = {
759 	0, 32, 0x1700040, 0x170004c, 0x1700048, 0x1700044};
760 
761 static struct attn_hw_reg tsem_int1_bb_b0 = {
762 	1, 13, 0x1700050, 0x170005c, 0x1700058, 0x1700054};
763 
764 static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = {
765 	2, 1, 0x1740040, 0x174004c, 0x1740048, 0x1740044};
766 
767 static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = {
768 	&tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0};
769 
770 static struct attn_hw_reg tsem_prty0_bb_b0 = {
771 	0, 3, 0x17000c8, 0x17000d4, 0x17000d0, 0x17000cc};
772 
773 static struct attn_hw_reg tsem_prty1_bb_b0 = {
774 	1, 6, 0x1700200, 0x170020c, 0x1700208, 0x1700204};
775 
776 static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = {
777 	2, 6, 0x174a200, 0x174a20c, 0x174a208, 0x174a204};
778 
779 static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = {
780 	&tsem_prty0_bb_b0, &tsem_prty1_bb_b0,
781 	&tsem_fast_memory_vfc_config_prty1_bb_b0};
782 
783 static struct attn_hw_reg msem_int0_bb_b0 = {
784 	0, 32, 0x1800040, 0x180004c, 0x1800048, 0x1800044};
785 
786 static struct attn_hw_reg msem_int1_bb_b0 = {
787 	1, 13, 0x1800050, 0x180005c, 0x1800058, 0x1800054};
788 
789 static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = {
790 	2, 1, 0x1840040, 0x184004c, 0x1840048, 0x1840044};
791 
792 static struct attn_hw_reg *msem_int_bb_b0_regs[3] = {
793 	&msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0};
794 
795 static struct attn_hw_reg msem_prty0_bb_b0 = {
796 	0, 3, 0x18000c8, 0x18000d4, 0x18000d0, 0x18000cc};
797 
798 static struct attn_hw_reg msem_prty1_bb_b0 = {
799 	1, 6, 0x1800200, 0x180020c, 0x1800208, 0x1800204};
800 
801 static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = {
802 	&msem_prty0_bb_b0, &msem_prty1_bb_b0};
803 
804 static struct attn_hw_reg usem_int0_bb_b0 = {
805 	0, 32, 0x1900040, 0x190004c, 0x1900048, 0x1900044};
806 
807 static struct attn_hw_reg usem_int1_bb_b0 = {
808 	1, 13, 0x1900050, 0x190005c, 0x1900058, 0x1900054};
809 
810 static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = {
811 	2, 1, 0x1940040, 0x194004c, 0x1940048, 0x1940044};
812 
813 static struct attn_hw_reg *usem_int_bb_b0_regs[3] = {
814 	&usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0};
815 
816 static struct attn_hw_reg usem_prty0_bb_b0 = {
817 	0, 3, 0x19000c8, 0x19000d4, 0x19000d0, 0x19000cc};
818 
819 static struct attn_hw_reg usem_prty1_bb_b0 = {
820 	1, 6, 0x1900200, 0x190020c, 0x1900208, 0x1900204};
821 
822 static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = {
823 	&usem_prty0_bb_b0, &usem_prty1_bb_b0};
824 
825 static struct attn_hw_reg xsem_int0_bb_b0 = {
826 	0, 32, 0x1400040, 0x140004c, 0x1400048, 0x1400044};
827 
828 static struct attn_hw_reg xsem_int1_bb_b0 = {
829 	1, 13, 0x1400050, 0x140005c, 0x1400058, 0x1400054};
830 
831 static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = {
832 	2, 1, 0x1440040, 0x144004c, 0x1440048, 0x1440044};
833 
834 static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = {
835 	&xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0};
836 
837 static struct attn_hw_reg xsem_prty0_bb_b0 = {
838 	0, 3, 0x14000c8, 0x14000d4, 0x14000d0, 0x14000cc};
839 
840 static struct attn_hw_reg xsem_prty1_bb_b0 = {
841 	1, 7, 0x1400200, 0x140020c, 0x1400208, 0x1400204};
842 
843 static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = {
844 	&xsem_prty0_bb_b0, &xsem_prty1_bb_b0};
845 
846 static struct attn_hw_reg ysem_int0_bb_b0 = {
847 	0, 32, 0x1500040, 0x150004c, 0x1500048, 0x1500044};
848 
849 static struct attn_hw_reg ysem_int1_bb_b0 = {
850 	1, 13, 0x1500050, 0x150005c, 0x1500058, 0x1500054};
851 
852 static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = {
853 	2, 1, 0x1540040, 0x154004c, 0x1540048, 0x1540044};
854 
855 static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = {
856 	&ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0};
857 
858 static struct attn_hw_reg ysem_prty0_bb_b0 = {
859 	0, 3, 0x15000c8, 0x15000d4, 0x15000d0, 0x15000cc};
860 
861 static struct attn_hw_reg ysem_prty1_bb_b0 = {
862 	1, 7, 0x1500200, 0x150020c, 0x1500208, 0x1500204};
863 
864 static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = {
865 	&ysem_prty0_bb_b0, &ysem_prty1_bb_b0};
866 
867 static struct attn_hw_reg psem_int0_bb_b0 = {
868 	0, 32, 0x1600040, 0x160004c, 0x1600048, 0x1600044};
869 
870 static struct attn_hw_reg psem_int1_bb_b0 = {
871 	1, 13, 0x1600050, 0x160005c, 0x1600058, 0x1600054};
872 
873 static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = {
874 	2, 1, 0x1640040, 0x164004c, 0x1640048, 0x1640044};
875 
876 static struct attn_hw_reg *psem_int_bb_b0_regs[3] = {
877 	&psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0};
878 
879 static struct attn_hw_reg psem_prty0_bb_b0 = {
880 	0, 3, 0x16000c8, 0x16000d4, 0x16000d0, 0x16000cc};
881 
882 static struct attn_hw_reg psem_prty1_bb_b0 = {
883 	1, 6, 0x1600200, 0x160020c, 0x1600208, 0x1600204};
884 
885 static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = {
886 	2, 6, 0x164a200, 0x164a20c, 0x164a208, 0x164a204};
887 
888 static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = {
889 	&psem_prty0_bb_b0, &psem_prty1_bb_b0,
890 	&psem_fast_memory_vfc_config_prty1_bb_b0};
891 
892 static struct attn_hw_reg rss_int0_bb_b0 = {
893 	0, 12, 0x238980, 0x23898c, 0x238988, 0x238984};
894 
895 static struct attn_hw_reg *rss_int_bb_b0_regs[1] = {
896 	&rss_int0_bb_b0};
897 
898 static struct attn_hw_reg rss_prty1_bb_b0 = {
899 	0, 4, 0x238a00, 0x238a0c, 0x238a08, 0x238a04};
900 
901 static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = {
902 	&rss_prty1_bb_b0};
903 
904 static struct attn_hw_reg tmld_int0_bb_b0 = {
905 	0, 6, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184};
906 
907 static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = {
908 	&tmld_int0_bb_b0};
909 
910 static struct attn_hw_reg tmld_prty1_bb_b0 = {
911 	0, 8, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204};
912 
913 static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = {
914 	&tmld_prty1_bb_b0};
915 
916 static struct attn_hw_reg muld_int0_bb_b0 = {
917 	0, 6, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184};
918 
919 static struct attn_hw_reg *muld_int_bb_b0_regs[1] = {
920 	&muld_int0_bb_b0};
921 
922 static struct attn_hw_reg muld_prty1_bb_b0 = {
923 	0, 10, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204};
924 
925 static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = {
926 	&muld_prty1_bb_b0};
927 
928 static struct attn_hw_reg yuld_int0_bb_b0 = {
929 	0, 6, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184};
930 
931 static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = {
932 	&yuld_int0_bb_b0};
933 
934 static struct attn_hw_reg yuld_prty1_bb_b0 = {
935 	0, 6, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204};
936 
937 static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = {
938 	&yuld_prty1_bb_b0};
939 
940 static struct attn_hw_reg xyld_int0_bb_b0 = {
941 	0, 6, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184};
942 
943 static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = {
944 	&xyld_int0_bb_b0};
945 
946 static struct attn_hw_reg xyld_prty1_bb_b0 = {
947 	0, 9, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204};
948 
949 static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = {
950 	&xyld_prty1_bb_b0};
951 
952 static struct attn_hw_reg prm_int0_bb_b0 = {
953 	0, 11, 0x230040, 0x23004c, 0x230048, 0x230044};
954 
955 static struct attn_hw_reg *prm_int_bb_b0_regs[1] = {
956 	&prm_int0_bb_b0};
957 
958 static struct attn_hw_reg prm_prty0_bb_b0 = {
959 	0, 1, 0x230050, 0x23005c, 0x230058, 0x230054};
960 
961 static struct attn_hw_reg prm_prty1_bb_b0 = {
962 	1, 24, 0x230200, 0x23020c, 0x230208, 0x230204};
963 
964 static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = {
965 	&prm_prty0_bb_b0, &prm_prty1_bb_b0};
966 
967 static struct attn_hw_reg pbf_pb1_int0_bb_b0 = {
968 	0, 9, 0xda0040, 0xda004c, 0xda0048, 0xda0044};
969 
970 static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = {
971 	&pbf_pb1_int0_bb_b0};
972 
973 static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = {
974 	0, 1, 0xda0050, 0xda005c, 0xda0058, 0xda0054};
975 
976 static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = {
977 	&pbf_pb1_prty0_bb_b0};
978 
979 static struct attn_hw_reg pbf_pb2_int0_bb_b0 = {
980 	0, 9, 0xda4040, 0xda404c, 0xda4048, 0xda4044};
981 
982 static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = {
983 	&pbf_pb2_int0_bb_b0};
984 
985 static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = {
986 	0, 1, 0xda4050, 0xda405c, 0xda4058, 0xda4054};
987 
988 static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = {
989 	&pbf_pb2_prty0_bb_b0};
990 
991 static struct attn_hw_reg rpb_int0_bb_b0 = {
992 	0, 9, 0x23c040, 0x23c04c, 0x23c048, 0x23c044};
993 
994 static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = {
995 	&rpb_int0_bb_b0};
996 
997 static struct attn_hw_reg rpb_prty0_bb_b0 = {
998 	0, 1, 0x23c050, 0x23c05c, 0x23c058, 0x23c054};
999 
1000 static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = {
1001 	&rpb_prty0_bb_b0};
1002 
1003 static struct attn_hw_reg btb_int0_bb_b0 = {
1004 	0, 16, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4};
1005 
1006 static struct attn_hw_reg btb_int1_bb_b0 = {
1007 	1, 16, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc};
1008 
1009 static struct attn_hw_reg btb_int2_bb_b0 = {
1010 	2, 4, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4};
1011 
1012 static struct attn_hw_reg btb_int3_bb_b0 = {
1013 	3, 32, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c};
1014 
1015 static struct attn_hw_reg btb_int4_bb_b0 = {
1016 	4, 23, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124};
1017 
1018 static struct attn_hw_reg btb_int5_bb_b0 = {
1019 	5, 32, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c};
1020 
1021 static struct attn_hw_reg btb_int6_bb_b0 = {
1022 	6, 1, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154};
1023 
1024 static struct attn_hw_reg btb_int8_bb_b0 = {
1025 	7, 1, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188};
1026 
1027 static struct attn_hw_reg btb_int9_bb_b0 = {
1028 	8, 1, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0};
1029 
1030 static struct attn_hw_reg btb_int10_bb_b0 = {
1031 	9, 1, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8};
1032 
1033 static struct attn_hw_reg btb_int11_bb_b0 = {
1034 	10, 2, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0};
1035 
1036 static struct attn_hw_reg *btb_int_bb_b0_regs[11] = {
1037 	&btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0,
1038 	&btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0,
1039 	&btb_int9_bb_b0, &btb_int10_bb_b0, &btb_int11_bb_b0};
1040 
1041 static struct attn_hw_reg btb_prty0_bb_b0 = {
1042 	0, 5, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0};
1043 
1044 static struct attn_hw_reg btb_prty1_bb_b0 = {
1045 	1, 23, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404};
1046 
1047 static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = {
1048 	&btb_prty0_bb_b0, &btb_prty1_bb_b0};
1049 
1050 static struct attn_hw_reg pbf_int0_bb_b0 = {
1051 	0, 1, 0xd80180, 0xd8018c, 0xd80188, 0xd80184};
1052 
1053 static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = {
1054 	&pbf_int0_bb_b0};
1055 
1056 static struct attn_hw_reg pbf_prty0_bb_b0 = {
1057 	0, 1, 0xd80190, 0xd8019c, 0xd80198, 0xd80194};
1058 
1059 static struct attn_hw_reg pbf_prty1_bb_b0 = {
1060 	1, 31, 0xd80200, 0xd8020c, 0xd80208, 0xd80204};
1061 
1062 static struct attn_hw_reg pbf_prty2_bb_b0 = {
1063 	2, 27, 0xd80210, 0xd8021c, 0xd80218, 0xd80214};
1064 
1065 static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = {
1066 	&pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0};
1067 
1068 static struct attn_hw_reg rdif_int0_bb_b0 = {
1069 	0, 8, 0x300180, 0x30018c, 0x300188, 0x300184};
1070 
1071 static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = {
1072 	&rdif_int0_bb_b0};
1073 
1074 static struct attn_hw_reg rdif_prty0_bb_b0 = {
1075 	0, 1, 0x300190, 0x30019c, 0x300198, 0x300194};
1076 
1077 static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = {
1078 	&rdif_prty0_bb_b0};
1079 
1080 static struct attn_hw_reg tdif_int0_bb_b0 = {
1081 	0, 8, 0x310180, 0x31018c, 0x310188, 0x310184};
1082 
1083 static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = {
1084 	&tdif_int0_bb_b0};
1085 
1086 static struct attn_hw_reg tdif_prty0_bb_b0 = {
1087 	0, 1, 0x310190, 0x31019c, 0x310198, 0x310194};
1088 
1089 static struct attn_hw_reg tdif_prty1_bb_b0 = {
1090 	1, 11, 0x310200, 0x31020c, 0x310208, 0x310204};
1091 
1092 static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = {
1093 	&tdif_prty0_bb_b0, &tdif_prty1_bb_b0};
1094 
1095 static struct attn_hw_reg cdu_int0_bb_b0 = {
1096 	0, 8, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc};
1097 
1098 static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = {
1099 	&cdu_int0_bb_b0};
1100 
1101 static struct attn_hw_reg cdu_prty1_bb_b0 = {
1102 	0, 5, 0x580200, 0x58020c, 0x580208, 0x580204};
1103 
1104 static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = {
1105 	&cdu_prty1_bb_b0};
1106 
1107 static struct attn_hw_reg ccfc_int0_bb_b0 = {
1108 	0, 2, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184};
1109 
1110 static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = {
1111 	&ccfc_int0_bb_b0};
1112 
1113 static struct attn_hw_reg ccfc_prty1_bb_b0 = {
1114 	0, 2, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204};
1115 
1116 static struct attn_hw_reg ccfc_prty0_bb_b0 = {
1117 	1, 6, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8};
1118 
1119 static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = {
1120 	&ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0};
1121 
1122 static struct attn_hw_reg tcfc_int0_bb_b0 = {
1123 	0, 2, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184};
1124 
1125 static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = {
1126 	&tcfc_int0_bb_b0};
1127 
1128 static struct attn_hw_reg tcfc_prty1_bb_b0 = {
1129 	0, 2, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204};
1130 
1131 static struct attn_hw_reg tcfc_prty0_bb_b0 = {
1132 	1, 6, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8};
1133 
1134 static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = {
1135 	&tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0};
1136 
1137 static struct attn_hw_reg igu_int0_bb_b0 = {
1138 	0, 11, 0x180180, 0x18018c, 0x180188, 0x180184};
1139 
1140 static struct attn_hw_reg *igu_int_bb_b0_regs[1] = {
1141 	&igu_int0_bb_b0};
1142 
1143 static struct attn_hw_reg igu_prty0_bb_b0 = {
1144 	0, 1, 0x180190, 0x18019c, 0x180198, 0x180194};
1145 
1146 static struct attn_hw_reg igu_prty1_bb_b0 = {
1147 	1, 31, 0x180200, 0x18020c, 0x180208, 0x180204};
1148 
1149 static struct attn_hw_reg igu_prty2_bb_b0 = {
1150 	2, 1, 0x180210, 0x18021c, 0x180218, 0x180214};
1151 
1152 static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = {
1153 	&igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0};
1154 
1155 static struct attn_hw_reg cau_int0_bb_b0 = {
1156 	0, 11, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0};
1157 
1158 static struct attn_hw_reg *cau_int_bb_b0_regs[1] = {
1159 	&cau_int0_bb_b0};
1160 
1161 static struct attn_hw_reg cau_prty1_bb_b0 = {
1162 	0, 13, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204};
1163 
1164 static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = {
1165 	&cau_prty1_bb_b0};
1166 
1167 static struct attn_hw_reg dbg_int0_bb_b0 = {
1168 	0, 1, 0x10180, 0x1018c, 0x10188, 0x10184};
1169 
1170 static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = {
1171 	&dbg_int0_bb_b0};
1172 
1173 static struct attn_hw_reg dbg_prty1_bb_b0 = {
1174 	0, 1, 0x10200, 0x1020c, 0x10208, 0x10204};
1175 
1176 static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = {
1177 	&dbg_prty1_bb_b0};
1178 
1179 static struct attn_hw_reg nig_int0_bb_b0 = {
1180 	0, 12, 0x500040, 0x50004c, 0x500048, 0x500044};
1181 
1182 static struct attn_hw_reg nig_int1_bb_b0 = {
1183 	1, 32, 0x500050, 0x50005c, 0x500058, 0x500054};
1184 
1185 static struct attn_hw_reg nig_int2_bb_b0 = {
1186 	2, 20, 0x500060, 0x50006c, 0x500068, 0x500064};
1187 
1188 static struct attn_hw_reg nig_int3_bb_b0 = {
1189 	3, 18, 0x500070, 0x50007c, 0x500078, 0x500074};
1190 
1191 static struct attn_hw_reg nig_int4_bb_b0 = {
1192 	4, 20, 0x500080, 0x50008c, 0x500088, 0x500084};
1193 
1194 static struct attn_hw_reg nig_int5_bb_b0 = {
1195 	5, 18, 0x500090, 0x50009c, 0x500098, 0x500094};
1196 
1197 static struct attn_hw_reg *nig_int_bb_b0_regs[6] = {
1198 	&nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0,
1199 	&nig_int4_bb_b0, &nig_int5_bb_b0};
1200 
1201 static struct attn_hw_reg nig_prty0_bb_b0 = {
1202 	0, 1, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4};
1203 
1204 static struct attn_hw_reg nig_prty1_bb_b0 = {
1205 	1, 31, 0x500200, 0x50020c, 0x500208, 0x500204};
1206 
1207 static struct attn_hw_reg nig_prty2_bb_b0 = {
1208 	2, 31, 0x500210, 0x50021c, 0x500218, 0x500214};
1209 
1210 static struct attn_hw_reg nig_prty3_bb_b0 = {
1211 	3, 31, 0x500220, 0x50022c, 0x500228, 0x500224};
1212 
1213 static struct attn_hw_reg nig_prty4_bb_b0 = {
1214 	4, 17, 0x500230, 0x50023c, 0x500238, 0x500234};
1215 
1216 static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = {
1217 	&nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0,
1218 	&nig_prty3_bb_b0, &nig_prty4_bb_b0};
1219 
1220 static struct attn_hw_reg ipc_int0_bb_b0 = {
1221 	0, 13, 0x2050c, 0x20518, 0x20514, 0x20510};
1222 
1223 static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = {
1224 	&ipc_int0_bb_b0};
1225 
1226 static struct attn_hw_reg ipc_prty0_bb_b0 = {
1227 	0, 1, 0x2051c, 0x20528, 0x20524, 0x20520};
1228 
1229 static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = {
1230 	&ipc_prty0_bb_b0};
1231 
1232 static struct attn_hw_block attn_blocks[] = {
1233 	{"grc", {{1, 1, grc_int_bb_b0_regs, grc_prty_bb_b0_regs} } },
1234 	{"miscs", {{2, 1, miscs_int_bb_b0_regs, miscs_prty_bb_b0_regs} } },
1235 	{"misc", {{1, 0, misc_int_bb_b0_regs, NULL} } },
1236 	{"dbu", {{0, 0, NULL, NULL} } },
1237 	{"pglue_b", {{1, 2, pglue_b_int_bb_b0_regs,
1238 		      pglue_b_prty_bb_b0_regs} } },
1239 	{"cnig", {{1, 1, cnig_int_bb_b0_regs, cnig_prty_bb_b0_regs} } },
1240 	{"cpmu", {{1, 0, cpmu_int_bb_b0_regs, NULL} } },
1241 	{"ncsi", {{1, 1, ncsi_int_bb_b0_regs, ncsi_prty_bb_b0_regs} } },
1242 	{"opte", {{0, 2, NULL, opte_prty_bb_b0_regs} } },
1243 	{"bmb", {{12, 3, bmb_int_bb_b0_regs, bmb_prty_bb_b0_regs} } },
1244 	{"pcie", {{0, 1, NULL, pcie_prty_bb_b0_regs} } },
1245 	{"mcp", {{0, 0, NULL, NULL} } },
1246 	{"mcp2", {{0, 2, NULL, mcp2_prty_bb_b0_regs} } },
1247 	{"pswhst", {{1, 2, pswhst_int_bb_b0_regs, pswhst_prty_bb_b0_regs} } },
1248 	{"pswhst2", {{1, 1, pswhst2_int_bb_b0_regs,
1249 		      pswhst2_prty_bb_b0_regs} } },
1250 	{"pswrd", {{1, 1, pswrd_int_bb_b0_regs, pswrd_prty_bb_b0_regs} } },
1251 	{"pswrd2", {{1, 3, pswrd2_int_bb_b0_regs, pswrd2_prty_bb_b0_regs} } },
1252 	{"pswwr", {{1, 1, pswwr_int_bb_b0_regs, pswwr_prty_bb_b0_regs} } },
1253 	{"pswwr2", {{1, 5, pswwr2_int_bb_b0_regs, pswwr2_prty_bb_b0_regs} } },
1254 	{"pswrq", {{1, 1, pswrq_int_bb_b0_regs, pswrq_prty_bb_b0_regs} } },
1255 	{"pswrq2", {{1, 1, pswrq2_int_bb_b0_regs, pswrq2_prty_bb_b0_regs} } },
1256 	{"pglcs", {{1, 0, pglcs_int_bb_b0_regs, NULL} } },
1257 	{"dmae", {{1, 1, dmae_int_bb_b0_regs, dmae_prty_bb_b0_regs} } },
1258 	{"ptu", {{1, 1, ptu_int_bb_b0_regs, ptu_prty_bb_b0_regs} } },
1259 	{"tcm", {{3, 2, tcm_int_bb_b0_regs, tcm_prty_bb_b0_regs} } },
1260 	{"mcm", {{3, 2, mcm_int_bb_b0_regs, mcm_prty_bb_b0_regs} } },
1261 	{"ucm", {{3, 2, ucm_int_bb_b0_regs, ucm_prty_bb_b0_regs} } },
1262 	{"xcm", {{3, 2, xcm_int_bb_b0_regs, xcm_prty_bb_b0_regs} } },
1263 	{"ycm", {{3, 2, ycm_int_bb_b0_regs, ycm_prty_bb_b0_regs} } },
1264 	{"pcm", {{3, 1, pcm_int_bb_b0_regs, pcm_prty_bb_b0_regs} } },
1265 	{"qm", {{1, 4, qm_int_bb_b0_regs, qm_prty_bb_b0_regs} } },
1266 	{"tm", {{2, 1, tm_int_bb_b0_regs, tm_prty_bb_b0_regs} } },
1267 	{"dorq", {{1, 2, dorq_int_bb_b0_regs, dorq_prty_bb_b0_regs} } },
1268 	{"brb", {{12, 3, brb_int_bb_b0_regs, brb_prty_bb_b0_regs} } },
1269 	{"src", {{1, 0, src_int_bb_b0_regs, NULL} } },
1270 	{"prs", {{1, 3, prs_int_bb_b0_regs, prs_prty_bb_b0_regs} } },
1271 	{"tsdm", {{1, 1, tsdm_int_bb_b0_regs, tsdm_prty_bb_b0_regs} } },
1272 	{"msdm", {{1, 1, msdm_int_bb_b0_regs, msdm_prty_bb_b0_regs} } },
1273 	{"usdm", {{1, 1, usdm_int_bb_b0_regs, usdm_prty_bb_b0_regs} } },
1274 	{"xsdm", {{1, 1, xsdm_int_bb_b0_regs, xsdm_prty_bb_b0_regs} } },
1275 	{"ysdm", {{1, 1, ysdm_int_bb_b0_regs, ysdm_prty_bb_b0_regs} } },
1276 	{"psdm", {{1, 1, psdm_int_bb_b0_regs, psdm_prty_bb_b0_regs} } },
1277 	{"tsem", {{3, 3, tsem_int_bb_b0_regs, tsem_prty_bb_b0_regs} } },
1278 	{"msem", {{3, 2, msem_int_bb_b0_regs, msem_prty_bb_b0_regs} } },
1279 	{"usem", {{3, 2, usem_int_bb_b0_regs, usem_prty_bb_b0_regs} } },
1280 	{"xsem", {{3, 2, xsem_int_bb_b0_regs, xsem_prty_bb_b0_regs} } },
1281 	{"ysem", {{3, 2, ysem_int_bb_b0_regs, ysem_prty_bb_b0_regs} } },
1282 	{"psem", {{3, 3, psem_int_bb_b0_regs, psem_prty_bb_b0_regs} } },
1283 	{"rss", {{1, 1, rss_int_bb_b0_regs, rss_prty_bb_b0_regs} } },
1284 	{"tmld", {{1, 1, tmld_int_bb_b0_regs, tmld_prty_bb_b0_regs} } },
1285 	{"muld", {{1, 1, muld_int_bb_b0_regs, muld_prty_bb_b0_regs} } },
1286 	{"yuld", {{1, 1, yuld_int_bb_b0_regs, yuld_prty_bb_b0_regs} } },
1287 	{"xyld", {{1, 1, xyld_int_bb_b0_regs, xyld_prty_bb_b0_regs} } },
1288 	{"prm", {{1, 2, prm_int_bb_b0_regs, prm_prty_bb_b0_regs} } },
1289 	{"pbf_pb1", {{1, 1, pbf_pb1_int_bb_b0_regs,
1290 		      pbf_pb1_prty_bb_b0_regs} } },
1291 	{"pbf_pb2", {{1, 1, pbf_pb2_int_bb_b0_regs,
1292 		      pbf_pb2_prty_bb_b0_regs} } },
1293 	{"rpb", { {1, 1, rpb_int_bb_b0_regs, rpb_prty_bb_b0_regs} } },
1294 	{"btb", { {11, 2, btb_int_bb_b0_regs, btb_prty_bb_b0_regs} } },
1295 	{"pbf", { {1, 3, pbf_int_bb_b0_regs, pbf_prty_bb_b0_regs} } },
1296 	{"rdif", { {1, 1, rdif_int_bb_b0_regs, rdif_prty_bb_b0_regs} } },
1297 	{"tdif", { {1, 2, tdif_int_bb_b0_regs, tdif_prty_bb_b0_regs} } },
1298 	{"cdu", { {1, 1, cdu_int_bb_b0_regs, cdu_prty_bb_b0_regs} } },
1299 	{"ccfc", { {1, 2, ccfc_int_bb_b0_regs, ccfc_prty_bb_b0_regs} } },
1300 	{"tcfc", { {1, 2, tcfc_int_bb_b0_regs, tcfc_prty_bb_b0_regs} } },
1301 	{"igu", { {1, 3, igu_int_bb_b0_regs, igu_prty_bb_b0_regs} } },
1302 	{"cau", { {1, 1, cau_int_bb_b0_regs, cau_prty_bb_b0_regs} } },
1303 	{"umac", { {0, 0, NULL, NULL} } },
1304 	{"xmac", { {0, 0, NULL, NULL} } },
1305 	{"dbg", { {1, 1, dbg_int_bb_b0_regs, dbg_prty_bb_b0_regs} } },
1306 	{"nig", { {6, 5, nig_int_bb_b0_regs, nig_prty_bb_b0_regs} } },
1307 	{"wol", { {0, 0, NULL, NULL} } },
1308 	{"bmbn", { {0, 0, NULL, NULL} } },
1309 	{"ipc", { {1, 1, ipc_int_bb_b0_regs, ipc_prty_bb_b0_regs} } },
1310 	{"nwm", { {0, 0, NULL, NULL} } },
1311 	{"nws", { {0, 0, NULL, NULL} } },
1312 	{"ms", { {0, 0, NULL, NULL} } },
1313 	{"phy_pcie", { {0, 0, NULL, NULL} } },
1314 	{"misc_aeu", { {0, 0, NULL, NULL} } },
1315 	{"bar0_map", { {0, 0, NULL, NULL} } },};
1316 
1317 /* Specific HW attention callbacks */
1318 static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn)
1319 {
1320 	u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
1321 
1322 	/* This might occur on certain instances; Log it once then mask it */
1323 	DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
1324 		tmp);
1325 	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
1326 	       0xffffffff);
1327 
1328 	return 0;
1329 }
1330 
1331 #define QED_PSWHST_ATTENTION_INCORRECT_ACCESS		(0x1)
1332 #define ATTENTION_INCORRECT_ACCESS_WR_MASK		(0x1)
1333 #define ATTENTION_INCORRECT_ACCESS_WR_SHIFT		(0)
1334 #define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK		(0xf)
1335 #define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT		(1)
1336 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK	(0x1)
1337 #define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT	(5)
1338 #define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK		(0xff)
1339 #define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT		(6)
1340 #define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK		(0xf)
1341 #define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT		(14)
1342 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK		(0xff)
1343 #define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT	(18)
1344 static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn)
1345 {
1346 	u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1347 			 PSWHST_REG_INCORRECT_ACCESS_VALID);
1348 
1349 	if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) {
1350 		u32 addr, data, length;
1351 
1352 		addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1353 			      PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
1354 		data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1355 			      PSWHST_REG_INCORRECT_ACCESS_DATA);
1356 		length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1357 				PSWHST_REG_INCORRECT_ACCESS_LENGTH);
1358 
1359 		DP_INFO(p_hwfn->cdev,
1360 			"Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
1361 			addr, length,
1362 			(u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID),
1363 			(u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID),
1364 			(u8) GET_FIELD(data,
1365 				       ATTENTION_INCORRECT_ACCESS_VF_VALID),
1366 			(u8) GET_FIELD(data,
1367 				       ATTENTION_INCORRECT_ACCESS_CLIENT),
1368 			(u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR),
1369 			(u8) GET_FIELD(data,
1370 				       ATTENTION_INCORRECT_ACCESS_BYTE_EN),
1371 			data);
1372 	}
1373 
1374 	return 0;
1375 }
1376 
1377 #define QED_GRC_ATTENTION_VALID_BIT	(1 << 0)
1378 #define QED_GRC_ATTENTION_ADDRESS_MASK	(0x7fffff)
1379 #define QED_GRC_ATTENTION_ADDRESS_SHIFT	(0)
1380 #define QED_GRC_ATTENTION_RDWR_BIT	(1 << 23)
1381 #define QED_GRC_ATTENTION_MASTER_MASK	(0xf)
1382 #define QED_GRC_ATTENTION_MASTER_SHIFT	(24)
1383 #define QED_GRC_ATTENTION_PF_MASK	(0xf)
1384 #define QED_GRC_ATTENTION_PF_SHIFT	(0)
1385 #define QED_GRC_ATTENTION_VF_MASK	(0xff)
1386 #define QED_GRC_ATTENTION_VF_SHIFT	(4)
1387 #define QED_GRC_ATTENTION_PRIV_MASK	(0x3)
1388 #define QED_GRC_ATTENTION_PRIV_SHIFT	(14)
1389 #define QED_GRC_ATTENTION_PRIV_VF	(0)
1390 static const char *attn_master_to_str(u8 master)
1391 {
1392 	switch (master) {
1393 	case 1: return "PXP";
1394 	case 2: return "MCP";
1395 	case 3: return "MSDM";
1396 	case 4: return "PSDM";
1397 	case 5: return "YSDM";
1398 	case 6: return "USDM";
1399 	case 7: return "TSDM";
1400 	case 8: return "XSDM";
1401 	case 9: return "DBU";
1402 	case 10: return "DMAE";
1403 	default:
1404 		return "Unknown";
1405 	}
1406 }
1407 
1408 static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
1409 {
1410 	u32 tmp, tmp2;
1411 
1412 	/* We've already cleared the timeout interrupt register, so we learn
1413 	 * of interrupts via the validity register
1414 	 */
1415 	tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1416 		     GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
1417 	if (!(tmp & QED_GRC_ATTENTION_VALID_BIT))
1418 		goto out;
1419 
1420 	/* Read the GRC timeout information */
1421 	tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1422 		     GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
1423 	tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1424 		      GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
1425 
1426 	DP_INFO(p_hwfn->cdev,
1427 		"GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
1428 		tmp2, tmp,
1429 		(tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
1430 		GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2,
1431 		attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
1432 		GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
1433 		(GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
1434 		 QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)",
1435 		GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
1436 
1437 out:
1438 	/* Regardles of anything else, clean the validity bit */
1439 	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
1440 	       GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
1441 	return 0;
1442 }
1443 
1444 #define PGLUE_ATTENTION_VALID			(1 << 29)
1445 #define PGLUE_ATTENTION_RD_VALID		(1 << 26)
1446 #define PGLUE_ATTENTION_DETAILS_PFID_MASK	(0xf)
1447 #define PGLUE_ATTENTION_DETAILS_PFID_SHIFT	(20)
1448 #define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK	(0x1)
1449 #define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT	(19)
1450 #define PGLUE_ATTENTION_DETAILS_VFID_MASK	(0xff)
1451 #define PGLUE_ATTENTION_DETAILS_VFID_SHIFT	(24)
1452 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK	(0x1)
1453 #define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT	(21)
1454 #define PGLUE_ATTENTION_DETAILS2_BME_MASK	(0x1)
1455 #define PGLUE_ATTENTION_DETAILS2_BME_SHIFT	(22)
1456 #define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK	(0x1)
1457 #define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT	(23)
1458 #define PGLUE_ATTENTION_ICPL_VALID		(1 << 23)
1459 #define PGLUE_ATTENTION_ZLR_VALID		(1 << 25)
1460 #define PGLUE_ATTENTION_ILT_VALID		(1 << 23)
1461 static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn)
1462 {
1463 	u32 tmp;
1464 
1465 	tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1466 		     PGLUE_B_REG_TX_ERR_WR_DETAILS2);
1467 	if (tmp & PGLUE_ATTENTION_VALID) {
1468 		u32 addr_lo, addr_hi, details;
1469 
1470 		addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1471 				 PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
1472 		addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1473 				 PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
1474 		details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1475 				 PGLUE_B_REG_TX_ERR_WR_DETAILS);
1476 
1477 		DP_INFO(p_hwfn,
1478 			"Illegal write by chip to [%08x:%08x] blocked.\n"
1479 			"Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
1480 			"Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
1481 			addr_hi, addr_lo, details,
1482 			(u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
1483 			(u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
1484 			GET_FIELD(details,
1485 				  PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
1486 			tmp,
1487 			GET_FIELD(tmp,
1488 				  PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
1489 			GET_FIELD(tmp,
1490 				  PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
1491 			GET_FIELD(tmp,
1492 				  PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
1493 	}
1494 
1495 	tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1496 		     PGLUE_B_REG_TX_ERR_RD_DETAILS2);
1497 	if (tmp & PGLUE_ATTENTION_RD_VALID) {
1498 		u32 addr_lo, addr_hi, details;
1499 
1500 		addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1501 				 PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
1502 		addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1503 				 PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
1504 		details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1505 				 PGLUE_B_REG_TX_ERR_RD_DETAILS);
1506 
1507 		DP_INFO(p_hwfn,
1508 			"Illegal read by chip from [%08x:%08x] blocked.\n"
1509 			" Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
1510 			" Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
1511 			addr_hi, addr_lo, details,
1512 			(u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
1513 			(u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
1514 			GET_FIELD(details,
1515 				  PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
1516 			tmp,
1517 			GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
1518 									 : 0,
1519 			GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
1520 			GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
1521 									: 0);
1522 	}
1523 
1524 	tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1525 		     PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
1526 	if (tmp & PGLUE_ATTENTION_ICPL_VALID)
1527 		DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp);
1528 
1529 	tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1530 		     PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
1531 	if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
1532 		u32 addr_hi, addr_lo;
1533 
1534 		addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1535 				 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
1536 		addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1537 				 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
1538 
1539 		DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n",
1540 			tmp, addr_hi, addr_lo);
1541 	}
1542 
1543 	tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1544 		     PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
1545 	if (tmp & PGLUE_ATTENTION_ILT_VALID) {
1546 		u32 addr_hi, addr_lo, details;
1547 
1548 		addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1549 				 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
1550 		addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1551 				 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
1552 		details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1553 				 PGLUE_B_REG_VF_ILT_ERR_DETAILS);
1554 
1555 		DP_INFO(p_hwfn,
1556 			"ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
1557 			details, tmp, addr_hi, addr_lo);
1558 	}
1559 
1560 	/* Clear the indications */
1561 	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
1562 	       PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
1563 
1564 	return 0;
1565 }
1566 
1567 #define QED_DORQ_ATTENTION_REASON_MASK	(0xfffff)
1568 #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
1569 #define QED_DORQ_ATTENTION_SIZE_MASK	(0x7f)
1570 #define QED_DORQ_ATTENTION_SIZE_SHIFT	(16)
1571 static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
1572 {
1573 	u32 reason;
1574 
1575 	reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
1576 			QED_DORQ_ATTENTION_REASON_MASK;
1577 	if (reason) {
1578 		u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1579 				     DORQ_REG_DB_DROP_DETAILS);
1580 
1581 		DP_INFO(p_hwfn->cdev,
1582 			"DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
1583 			qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1584 			       DORQ_REG_DB_DROP_DETAILS_ADDRESS),
1585 			(u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK),
1586 			GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
1587 			reason);
1588 	}
1589 
1590 	return -EINVAL;
1591 }
1592 
1593 /* Notice aeu_invert_reg must be defined in the same order of bits as HW;  */
1594 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
1595 	{
1596 		{       /* After Invert 1 */
1597 			{"GPIO0 function%d",
1598 			 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
1599 		}
1600 	},
1601 
1602 	{
1603 		{       /* After Invert 2 */
1604 			{"PGLUE config_space", ATTENTION_SINGLE,
1605 			 NULL, MAX_BLOCK_ID},
1606 			{"PGLUE misc_flr", ATTENTION_SINGLE,
1607 			 NULL, MAX_BLOCK_ID},
1608 			{"PGLUE B RBC", ATTENTION_PAR_INT,
1609 			 qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B},
1610 			{"PGLUE misc_mctp", ATTENTION_SINGLE,
1611 			 NULL, MAX_BLOCK_ID},
1612 			{"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
1613 			{"SMB event", ATTENTION_SINGLE,	NULL, MAX_BLOCK_ID},
1614 			{"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
1615 			{"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) |
1616 					  (1 << ATTENTION_OFFSET_SHIFT),
1617 			 NULL, MAX_BLOCK_ID},
1618 			{"PCIE glue/PXP VPD %d",
1619 			 (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS},
1620 		}
1621 	},
1622 
1623 	{
1624 		{       /* After Invert 3 */
1625 			{"General Attention %d",
1626 			 (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
1627 		}
1628 	},
1629 
1630 	{
1631 		{       /* After Invert 4 */
1632 			{"General Attention 32", ATTENTION_SINGLE,
1633 			 NULL, MAX_BLOCK_ID},
1634 			{"General Attention %d",
1635 			 (2 << ATTENTION_LENGTH_SHIFT) |
1636 			 (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
1637 			{"General Attention 35", ATTENTION_SINGLE,
1638 			 NULL, MAX_BLOCK_ID},
1639 			{"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT),
1640 			 NULL, BLOCK_CNIG},
1641 			{"MCP CPU", ATTENTION_SINGLE,
1642 			 qed_mcp_attn_cb, MAX_BLOCK_ID},
1643 			{"MCP Watchdog timer", ATTENTION_SINGLE,
1644 			 NULL, MAX_BLOCK_ID},
1645 			{"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
1646 			{"AVS stop status ready", ATTENTION_SINGLE,
1647 			 NULL, MAX_BLOCK_ID},
1648 			{"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
1649 			{"MSTAT per-path", ATTENTION_PAR_INT,
1650 			 NULL, MAX_BLOCK_ID},
1651 			{"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT),
1652 			 NULL, MAX_BLOCK_ID},
1653 			{"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG},
1654 			{"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB},
1655 			{"BTB",	ATTENTION_PAR_INT, NULL, BLOCK_BTB},
1656 			{"BRB",	ATTENTION_PAR_INT, NULL, BLOCK_BRB},
1657 			{"PRS",	ATTENTION_PAR_INT, NULL, BLOCK_PRS},
1658 		}
1659 	},
1660 
1661 	{
1662 		{       /* After Invert 5 */
1663 			{"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC},
1664 			{"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1},
1665 			{"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2},
1666 			{"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB},
1667 			{"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF},
1668 			{"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM},
1669 			{"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM},
1670 			{"MCM",  ATTENTION_PAR_INT, NULL, BLOCK_MCM},
1671 			{"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM},
1672 			{"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM},
1673 			{"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM},
1674 			{"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM},
1675 			{"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM},
1676 			{"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM},
1677 			{"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM},
1678 			{"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM},
1679 		}
1680 	},
1681 
1682 	{
1683 		{       /* After Invert 6 */
1684 			{"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM},
1685 			{"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM},
1686 			{"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM},
1687 			{"XCM",	ATTENTION_PAR_INT, NULL, BLOCK_XCM},
1688 			{"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM},
1689 			{"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM},
1690 			{"YCM",	ATTENTION_PAR_INT, NULL, BLOCK_YCM},
1691 			{"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM},
1692 			{"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM},
1693 			{"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD},
1694 			{"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD},
1695 			{"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD},
1696 			{"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD},
1697 			{"DORQ", ATTENTION_PAR_INT,
1698 			 qed_dorq_attn_cb, BLOCK_DORQ},
1699 			{"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG},
1700 			{"IPC",	ATTENTION_PAR_INT, NULL, BLOCK_IPC},
1701 		}
1702 	},
1703 
1704 	{
1705 		{       /* After Invert 7 */
1706 			{"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC},
1707 			{"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU},
1708 			{"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE},
1709 			{"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU},
1710 			{"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
1711 			{"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU},
1712 			{"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU},
1713 			{"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM},
1714 			{"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC},
1715 			{"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF},
1716 			{"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF},
1717 			{"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS},
1718 			{"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC},
1719 			{"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS},
1720 			{"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE},
1721 			{"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS},
1722 			{"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ},
1723 		}
1724 	},
1725 
1726 	{
1727 		{       /* After Invert 8 */
1728 			{"PSWRQ (pci_clk)", ATTENTION_PAR_INT,
1729 			 NULL, BLOCK_PSWRQ2},
1730 			{"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR},
1731 			{"PSWWR (pci_clk)", ATTENTION_PAR_INT,
1732 			 NULL, BLOCK_PSWWR2},
1733 			{"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD},
1734 			{"PSWRD (pci_clk)", ATTENTION_PAR_INT,
1735 			 NULL, BLOCK_PSWRD2},
1736 			{"PSWHST", ATTENTION_PAR_INT,
1737 			 qed_pswhst_attn_cb, BLOCK_PSWHST},
1738 			{"PSWHST (pci_clk)", ATTENTION_PAR_INT,
1739 			 NULL, BLOCK_PSWHST2},
1740 			{"GRC",	ATTENTION_PAR_INT,
1741 			 qed_grc_attn_cb, BLOCK_GRC},
1742 			{"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU},
1743 			{"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI},
1744 			{"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
1745 			{"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
1746 			{"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
1747 			{"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
1748 			{"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
1749 			{"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
1750 			{"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS},
1751 			{"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE,
1752 			 NULL, BLOCK_PGLCS},
1753 			{"PERST_B assertion", ATTENTION_SINGLE,
1754 			 NULL, MAX_BLOCK_ID},
1755 			{"PERST_B deassertion", ATTENTION_SINGLE,
1756 			 NULL, MAX_BLOCK_ID},
1757 			{"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT),
1758 			 NULL, MAX_BLOCK_ID},
1759 		}
1760 	},
1761 
1762 	{
1763 		{       /* After Invert 9 */
1764 			{"MCP Latched memory", ATTENTION_PAR,
1765 			 NULL, MAX_BLOCK_ID},
1766 			{"MCP Latched scratchpad cache", ATTENTION_SINGLE,
1767 			 NULL, MAX_BLOCK_ID},
1768 			{"MCP Latched ump_tx", ATTENTION_PAR,
1769 			 NULL, MAX_BLOCK_ID},
1770 			{"MCP Latched scratchpad", ATTENTION_PAR,
1771 			 NULL, MAX_BLOCK_ID},
1772 			{"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT),
1773 			 NULL, MAX_BLOCK_ID},
1774 		}
1775 	},
1776 };
1777 
1778 #define ATTN_STATE_BITS         (0xfff)
1779 #define ATTN_BITS_MASKABLE      (0x3ff)
1780 struct qed_sb_attn_info {
1781 	/* Virtual & Physical address of the SB */
1782 	struct atten_status_block       *sb_attn;
1783 	dma_addr_t			sb_phys;
1784 
1785 	/* Last seen running index */
1786 	u16				index;
1787 
1788 	/* A mask of the AEU bits resulting in a parity error */
1789 	u32				parity_mask[NUM_ATTN_REGS];
1790 
1791 	/* A pointer to the attention description structure */
1792 	struct aeu_invert_reg		*p_aeu_desc;
1793 
1794 	/* Previously asserted attentions, which are still unasserted */
1795 	u16				known_attn;
1796 
1797 	/* Cleanup address for the link's general hw attention */
1798 	u32				mfw_attn_addr;
1799 };
1800 
1801 static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
1802 				      struct qed_sb_attn_info *p_sb_desc)
1803 {
1804 	u16 rc = 0, index;
1805 
1806 	/* Make certain HW write took affect */
1807 	mmiowb();
1808 
1809 	index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
1810 	if (p_sb_desc->index != index) {
1811 		p_sb_desc->index	= index;
1812 		rc		      = QED_SB_ATT_IDX;
1813 	}
1814 
1815 	/* Make certain we got a consistent view with HW */
1816 	mmiowb();
1817 
1818 	return rc;
1819 }
1820 
1821 /**
1822  *  @brief qed_int_assertion - handles asserted attention bits
1823  *
1824  *  @param p_hwfn
1825  *  @param asserted_bits newly asserted bits
1826  *  @return int
1827  */
1828 static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
1829 {
1830 	struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
1831 	u32 igu_mask;
1832 
1833 	/* Mask the source of the attention in the IGU */
1834 	igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
1835 	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
1836 		   igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
1837 	igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
1838 	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
1839 
1840 	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1841 		   "inner known ATTN state: 0x%04x --> 0x%04x\n",
1842 		   sb_attn_sw->known_attn,
1843 		   sb_attn_sw->known_attn | asserted_bits);
1844 	sb_attn_sw->known_attn |= asserted_bits;
1845 
1846 	/* Handle MCP events */
1847 	if (asserted_bits & 0x100) {
1848 		qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
1849 		/* Clean the MCP attention */
1850 		qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
1851 		       sb_attn_sw->mfw_attn_addr, 0);
1852 	}
1853 
1854 	DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
1855 		      GTT_BAR0_MAP_REG_IGU_CMD +
1856 		      ((IGU_CMD_ATTN_BIT_SET_UPPER -
1857 			IGU_CMD_INT_ACK_BASE) << 3),
1858 		      (u32)asserted_bits);
1859 
1860 	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
1861 		   asserted_bits);
1862 
1863 	return 0;
1864 }
1865 
1866 static void qed_int_deassertion_print_bit(struct qed_hwfn *p_hwfn,
1867 					  struct attn_hw_reg *p_reg_desc,
1868 					  struct attn_hw_block *p_block,
1869 					  enum qed_attention_type type,
1870 					  u32 val, u32 mask)
1871 {
1872 	int j;
1873 
1874 	for (j = 0; j < p_reg_desc->num_of_bits; j++) {
1875 		if (!(val & (1 << j)))
1876 			continue;
1877 
1878 		DP_NOTICE(p_hwfn,
1879 			  "%s (%s): reg %d [0x%08x], bit %d [%s]\n",
1880 			  p_block->name,
1881 			  type == QED_ATTN_TYPE_ATTN ? "Interrupt" :
1882 						       "Parity",
1883 			  p_reg_desc->reg_idx, p_reg_desc->sts_addr,
1884 			  j, (mask & (1 << j)) ? " [MASKED]" : "");
1885 	}
1886 }
1887 
1888 /**
1889  * @brief qed_int_deassertion_aeu_bit - handles the effects of a single
1890  * cause of the attention
1891  *
1892  * @param p_hwfn
1893  * @param p_aeu - descriptor of an AEU bit which caused the attention
1894  * @param aeu_en_reg - register offset of the AEU enable reg. which configured
1895  *  this bit to this group.
1896  * @param bit_index - index of this bit in the aeu_en_reg
1897  *
1898  * @return int
1899  */
1900 static int
1901 qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
1902 			    struct aeu_invert_reg_bit *p_aeu,
1903 			    u32 aeu_en_reg,
1904 			    u32 bitmask)
1905 {
1906 	int rc = -EINVAL;
1907 	u32 val;
1908 
1909 	DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
1910 		p_aeu->bit_name, bitmask);
1911 
1912 	/* Call callback before clearing the interrupt status */
1913 	if (p_aeu->cb) {
1914 		DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
1915 			p_aeu->bit_name);
1916 		rc = p_aeu->cb(p_hwfn);
1917 	}
1918 
1919 	/* Handle HW block interrupt registers */
1920 	if (p_aeu->block_index != MAX_BLOCK_ID) {
1921 		struct attn_hw_block *p_block;
1922 		u32 mask;
1923 		int i;
1924 
1925 		p_block = &attn_blocks[p_aeu->block_index];
1926 
1927 		/* Handle each interrupt register */
1928 		for (i = 0; i < p_block->chip_regs[0].num_of_int_regs; i++) {
1929 			struct attn_hw_reg *p_reg_desc;
1930 			u32 sts_addr;
1931 
1932 			p_reg_desc = p_block->chip_regs[0].int_regs[i];
1933 
1934 			/* In case of fatal attention, don't clear the status
1935 			 * so it would appear in following idle check.
1936 			 */
1937 			if (rc == 0)
1938 				sts_addr = p_reg_desc->sts_clr_addr;
1939 			else
1940 				sts_addr = p_reg_desc->sts_addr;
1941 
1942 			val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr);
1943 			mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1944 				      p_reg_desc->mask_addr);
1945 			qed_int_deassertion_print_bit(p_hwfn, p_reg_desc,
1946 						      p_block,
1947 						      QED_ATTN_TYPE_ATTN,
1948 						      val, mask);
1949 		}
1950 	}
1951 
1952 	/* If the attention is benign, no need to prevent it */
1953 	if (!rc)
1954 		goto out;
1955 
1956 	/* Prevent this Attention from being asserted in the future */
1957 	val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
1958 	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
1959 	DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
1960 		p_aeu->bit_name);
1961 
1962 out:
1963 	return rc;
1964 }
1965 
1966 static void qed_int_parity_print(struct qed_hwfn *p_hwfn,
1967 				 struct aeu_invert_reg_bit *p_aeu,
1968 				 struct attn_hw_block *p_block,
1969 				 u8 bit_index)
1970 {
1971 	int i;
1972 
1973 	for (i = 0; i < p_block->chip_regs[0].num_of_prty_regs; i++) {
1974 		struct attn_hw_reg *p_reg_desc;
1975 		u32 val, mask;
1976 
1977 		p_reg_desc = p_block->chip_regs[0].prty_regs[i];
1978 
1979 		val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1980 			     p_reg_desc->sts_clr_addr);
1981 		mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1982 			      p_reg_desc->mask_addr);
1983 		qed_int_deassertion_print_bit(p_hwfn, p_reg_desc,
1984 					      p_block,
1985 					      QED_ATTN_TYPE_PARITY,
1986 					      val, mask);
1987 	}
1988 }
1989 
1990 /**
1991  * @brief qed_int_deassertion_parity - handle a single parity AEU source
1992  *
1993  * @param p_hwfn
1994  * @param p_aeu - descriptor of an AEU bit which caused the parity
1995  * @param bit_index
1996  */
1997 static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
1998 				       struct aeu_invert_reg_bit *p_aeu,
1999 				       u8 bit_index)
2000 {
2001 	u32 block_id = p_aeu->block_index;
2002 
2003 	DP_INFO(p_hwfn->cdev, "%s[%d] parity attention is set\n",
2004 		p_aeu->bit_name, bit_index);
2005 
2006 	if (block_id != MAX_BLOCK_ID) {
2007 		qed_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id],
2008 				     bit_index);
2009 
2010 		/* In BB, there's a single parity bit for several blocks */
2011 		if (block_id == BLOCK_BTB) {
2012 			qed_int_parity_print(p_hwfn, p_aeu,
2013 					     &attn_blocks[BLOCK_OPTE],
2014 					     bit_index);
2015 			qed_int_parity_print(p_hwfn, p_aeu,
2016 					     &attn_blocks[BLOCK_MCP],
2017 					     bit_index);
2018 		}
2019 	}
2020 }
2021 
2022 /**
2023  * @brief - handles deassertion of previously asserted attentions.
2024  *
2025  * @param p_hwfn
2026  * @param deasserted_bits - newly deasserted bits
2027  * @return int
2028  *
2029  */
2030 static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
2031 			       u16 deasserted_bits)
2032 {
2033 	struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
2034 	u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
2035 	u8 i, j, k, bit_idx;
2036 	int rc = 0;
2037 
2038 	/* Read the attention registers in the AEU */
2039 	for (i = 0; i < NUM_ATTN_REGS; i++) {
2040 		aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
2041 					MISC_REG_AEU_AFTER_INVERT_1_IGU +
2042 					i * 0x4);
2043 		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2044 			   "Deasserted bits [%d]: %08x\n",
2045 			   i, aeu_inv_arr[i]);
2046 	}
2047 
2048 	/* Find parity attentions first */
2049 	for (i = 0; i < NUM_ATTN_REGS; i++) {
2050 		struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
2051 		u32 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
2052 				MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
2053 				i * sizeof(u32));
2054 		u32 parities;
2055 
2056 		/* Skip register in which no parity bit is currently set */
2057 		parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
2058 		if (!parities)
2059 			continue;
2060 
2061 		for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
2062 			struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
2063 
2064 			if ((p_bit->flags & ATTENTION_PARITY) &&
2065 			    !!(parities & BIT(bit_idx)))
2066 				qed_int_deassertion_parity(p_hwfn, p_bit,
2067 							   bit_idx);
2068 
2069 			bit_idx += ATTENTION_LENGTH(p_bit->flags);
2070 		}
2071 	}
2072 
2073 	/* Find non-parity cause for attention and act */
2074 	for (k = 0; k < MAX_ATTN_GRPS; k++) {
2075 		struct aeu_invert_reg_bit *p_aeu;
2076 
2077 		/* Handle only groups whose attention is currently deasserted */
2078 		if (!(deasserted_bits & (1 << k)))
2079 			continue;
2080 
2081 		for (i = 0; i < NUM_ATTN_REGS; i++) {
2082 			u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
2083 				     i * sizeof(u32) +
2084 				     k * sizeof(u32) * NUM_ATTN_REGS;
2085 			u32 en, bits;
2086 
2087 			en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
2088 			bits = aeu_inv_arr[i] & en;
2089 
2090 			/* Skip if no bit from this group is currently set */
2091 			if (!bits)
2092 				continue;
2093 
2094 			/* Find all set bits from current register which belong
2095 			 * to current group, making them responsible for the
2096 			 * previous assertion.
2097 			 */
2098 			for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
2099 				u8 bit, bit_len;
2100 				u32 bitmask;
2101 
2102 				p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
2103 
2104 				/* No need to handle parity-only bits */
2105 				if (p_aeu->flags == ATTENTION_PAR)
2106 					continue;
2107 
2108 				bit = bit_idx;
2109 				bit_len = ATTENTION_LENGTH(p_aeu->flags);
2110 				if (p_aeu->flags & ATTENTION_PAR_INT) {
2111 					/* Skip Parity */
2112 					bit++;
2113 					bit_len--;
2114 				}
2115 
2116 				bitmask = bits & (((1 << bit_len) - 1) << bit);
2117 				if (bitmask) {
2118 					/* Handle source of the attention */
2119 					qed_int_deassertion_aeu_bit(p_hwfn,
2120 								    p_aeu,
2121 								    aeu_en,
2122 								    bitmask);
2123 				}
2124 
2125 				bit_idx += ATTENTION_LENGTH(p_aeu->flags);
2126 			}
2127 		}
2128 	}
2129 
2130 	/* Clear IGU indication for the deasserted bits */
2131 	DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
2132 				    GTT_BAR0_MAP_REG_IGU_CMD +
2133 				    ((IGU_CMD_ATTN_BIT_CLR_UPPER -
2134 				      IGU_CMD_INT_ACK_BASE) << 3),
2135 				    ~((u32)deasserted_bits));
2136 
2137 	/* Unmask deasserted attentions in IGU */
2138 	aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
2139 	aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
2140 	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
2141 
2142 	/* Clear deassertion from inner state */
2143 	sb_attn_sw->known_attn &= ~deasserted_bits;
2144 
2145 	return rc;
2146 }
2147 
2148 static int qed_int_attentions(struct qed_hwfn *p_hwfn)
2149 {
2150 	struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
2151 	struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
2152 	u32 attn_bits = 0, attn_acks = 0;
2153 	u16 asserted_bits, deasserted_bits;
2154 	__le16 index;
2155 	int rc = 0;
2156 
2157 	/* Read current attention bits/acks - safeguard against attentions
2158 	 * by guaranting work on a synchronized timeframe
2159 	 */
2160 	do {
2161 		index = p_sb_attn->sb_index;
2162 		attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
2163 		attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
2164 	} while (index != p_sb_attn->sb_index);
2165 	p_sb_attn->sb_index = index;
2166 
2167 	/* Attention / Deassertion are meaningful (and in correct state)
2168 	 * only when they differ and consistent with known state - deassertion
2169 	 * when previous attention & current ack, and assertion when current
2170 	 * attention with no previous attention
2171 	 */
2172 	asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
2173 		~p_sb_attn_sw->known_attn;
2174 	deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
2175 		p_sb_attn_sw->known_attn;
2176 
2177 	if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
2178 		DP_INFO(p_hwfn,
2179 			"Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
2180 			index, attn_bits, attn_acks, asserted_bits,
2181 			deasserted_bits, p_sb_attn_sw->known_attn);
2182 	} else if (asserted_bits == 0x100) {
2183 		DP_INFO(p_hwfn, "MFW indication via attention\n");
2184 	} else {
2185 		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2186 			   "MFW indication [deassertion]\n");
2187 	}
2188 
2189 	if (asserted_bits) {
2190 		rc = qed_int_assertion(p_hwfn, asserted_bits);
2191 		if (rc)
2192 			return rc;
2193 	}
2194 
2195 	if (deasserted_bits)
2196 		rc = qed_int_deassertion(p_hwfn, deasserted_bits);
2197 
2198 	return rc;
2199 }
2200 
2201 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
2202 			    void __iomem *igu_addr, u32 ack_cons)
2203 {
2204 	struct igu_prod_cons_update igu_ack = { 0 };
2205 
2206 	igu_ack.sb_id_and_flags =
2207 		((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
2208 		 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
2209 		 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
2210 		 (IGU_SEG_ACCESS_ATTN <<
2211 		  IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
2212 
2213 	DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags);
2214 
2215 	/* Both segments (interrupts & acks) are written to same place address;
2216 	 * Need to guarantee all commands will be received (in-order) by HW.
2217 	 */
2218 	mmiowb();
2219 	barrier();
2220 }
2221 
2222 void qed_int_sp_dpc(unsigned long hwfn_cookie)
2223 {
2224 	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
2225 	struct qed_pi_info *pi_info = NULL;
2226 	struct qed_sb_attn_info *sb_attn;
2227 	struct qed_sb_info *sb_info;
2228 	int arr_size;
2229 	u16 rc = 0;
2230 
2231 	if (!p_hwfn->p_sp_sb) {
2232 		DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
2233 		return;
2234 	}
2235 
2236 	sb_info = &p_hwfn->p_sp_sb->sb_info;
2237 	arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
2238 	if (!sb_info) {
2239 		DP_ERR(p_hwfn->cdev,
2240 		       "Status block is NULL - cannot ack interrupts\n");
2241 		return;
2242 	}
2243 
2244 	if (!p_hwfn->p_sb_attn) {
2245 		DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
2246 		return;
2247 	}
2248 	sb_attn = p_hwfn->p_sb_attn;
2249 
2250 	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
2251 		   p_hwfn, p_hwfn->my_id);
2252 
2253 	/* Disable ack for def status block. Required both for msix +
2254 	 * inta in non-mask mode, in inta does no harm.
2255 	 */
2256 	qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
2257 
2258 	/* Gather Interrupts/Attentions information */
2259 	if (!sb_info->sb_virt) {
2260 		DP_ERR(p_hwfn->cdev,
2261 		       "Interrupt Status block is NULL - cannot check for new interrupts!\n");
2262 	} else {
2263 		u32 tmp_index = sb_info->sb_ack;
2264 
2265 		rc = qed_sb_update_sb_idx(sb_info);
2266 		DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
2267 			   "Interrupt indices: 0x%08x --> 0x%08x\n",
2268 			   tmp_index, sb_info->sb_ack);
2269 	}
2270 
2271 	if (!sb_attn || !sb_attn->sb_attn) {
2272 		DP_ERR(p_hwfn->cdev,
2273 		       "Attentions Status block is NULL - cannot check for new attentions!\n");
2274 	} else {
2275 		u16 tmp_index = sb_attn->index;
2276 
2277 		rc |= qed_attn_update_idx(p_hwfn, sb_attn);
2278 		DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
2279 			   "Attention indices: 0x%08x --> 0x%08x\n",
2280 			   tmp_index, sb_attn->index);
2281 	}
2282 
2283 	/* Check if we expect interrupts at this time. if not just ack them */
2284 	if (!(rc & QED_SB_EVENT_MASK)) {
2285 		qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
2286 		return;
2287 	}
2288 
2289 	/* Check the validity of the DPC ptt. If not ack interrupts and fail */
2290 	if (!p_hwfn->p_dpc_ptt) {
2291 		DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
2292 		qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
2293 		return;
2294 	}
2295 
2296 	if (rc & QED_SB_ATT_IDX)
2297 		qed_int_attentions(p_hwfn);
2298 
2299 	if (rc & QED_SB_IDX) {
2300 		int pi;
2301 
2302 		/* Look for a free index */
2303 		for (pi = 0; pi < arr_size; pi++) {
2304 			pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
2305 			if (pi_info->comp_cb)
2306 				pi_info->comp_cb(p_hwfn, pi_info->cookie);
2307 		}
2308 	}
2309 
2310 	if (sb_attn && (rc & QED_SB_ATT_IDX))
2311 		/* This should be done before the interrupts are enabled,
2312 		 * since otherwise a new attention will be generated.
2313 		 */
2314 		qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
2315 
2316 	qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
2317 }
2318 
2319 static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
2320 {
2321 	struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
2322 
2323 	if (!p_sb)
2324 		return;
2325 
2326 	if (p_sb->sb_attn)
2327 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2328 				  SB_ATTN_ALIGNED_SIZE(p_hwfn),
2329 				  p_sb->sb_attn, p_sb->sb_phys);
2330 	kfree(p_sb);
2331 }
2332 
2333 static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
2334 				  struct qed_ptt *p_ptt)
2335 {
2336 	struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
2337 
2338 	memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
2339 
2340 	sb_info->index = 0;
2341 	sb_info->known_attn = 0;
2342 
2343 	/* Configure Attention Status Block in IGU */
2344 	qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
2345 	       lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
2346 	qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
2347 	       upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
2348 }
2349 
2350 static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
2351 				 struct qed_ptt *p_ptt,
2352 				 void *sb_virt_addr, dma_addr_t sb_phy_addr)
2353 {
2354 	struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
2355 	int i, j, k;
2356 
2357 	sb_info->sb_attn = sb_virt_addr;
2358 	sb_info->sb_phys = sb_phy_addr;
2359 
2360 	/* Set the pointer to the AEU descriptors */
2361 	sb_info->p_aeu_desc = aeu_descs;
2362 
2363 	/* Calculate Parity Masks */
2364 	memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
2365 	for (i = 0; i < NUM_ATTN_REGS; i++) {
2366 		/* j is array index, k is bit index */
2367 		for (j = 0, k = 0; k < 32; j++) {
2368 			unsigned int flags = aeu_descs[i].bits[j].flags;
2369 
2370 			if (flags & ATTENTION_PARITY)
2371 				sb_info->parity_mask[i] |= 1 << k;
2372 
2373 			k += ATTENTION_LENGTH(flags);
2374 		}
2375 		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2376 			   "Attn Mask [Reg %d]: 0x%08x\n",
2377 			   i, sb_info->parity_mask[i]);
2378 	}
2379 
2380 	/* Set the address of cleanup for the mcp attention */
2381 	sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
2382 				 MISC_REG_AEU_GENERAL_ATTN_0;
2383 
2384 	qed_int_sb_attn_setup(p_hwfn, p_ptt);
2385 }
2386 
2387 static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
2388 				 struct qed_ptt *p_ptt)
2389 {
2390 	struct qed_dev *cdev = p_hwfn->cdev;
2391 	struct qed_sb_attn_info *p_sb;
2392 	dma_addr_t p_phys = 0;
2393 	void *p_virt;
2394 
2395 	/* SB struct */
2396 	p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
2397 	if (!p_sb)
2398 		return -ENOMEM;
2399 
2400 	/* SB ring  */
2401 	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
2402 				    SB_ATTN_ALIGNED_SIZE(p_hwfn),
2403 				    &p_phys, GFP_KERNEL);
2404 
2405 	if (!p_virt) {
2406 		kfree(p_sb);
2407 		return -ENOMEM;
2408 	}
2409 
2410 	/* Attention setup */
2411 	p_hwfn->p_sb_attn = p_sb;
2412 	qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
2413 
2414 	return 0;
2415 }
2416 
2417 /* coalescing timeout = timeset << (timer_res + 1) */
2418 #define QED_CAU_DEF_RX_USECS 24
2419 #define QED_CAU_DEF_TX_USECS 48
2420 
2421 void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
2422 			   struct cau_sb_entry *p_sb_entry,
2423 			   u8 pf_id, u16 vf_number, u8 vf_valid)
2424 {
2425 	struct qed_dev *cdev = p_hwfn->cdev;
2426 	u32 cau_state;
2427 	u8 timer_res;
2428 
2429 	memset(p_sb_entry, 0, sizeof(*p_sb_entry));
2430 
2431 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
2432 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
2433 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
2434 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
2435 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
2436 
2437 	cau_state = CAU_HC_DISABLE_STATE;
2438 
2439 	if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
2440 		cau_state = CAU_HC_ENABLE_STATE;
2441 		if (!cdev->rx_coalesce_usecs)
2442 			cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS;
2443 		if (!cdev->tx_coalesce_usecs)
2444 			cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
2445 	}
2446 
2447 	/* Coalesce = (timeset << timer-res), timeset is 7bit wide */
2448 	if (cdev->rx_coalesce_usecs <= 0x7F)
2449 		timer_res = 0;
2450 	else if (cdev->rx_coalesce_usecs <= 0xFF)
2451 		timer_res = 1;
2452 	else
2453 		timer_res = 2;
2454 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2455 
2456 	if (cdev->tx_coalesce_usecs <= 0x7F)
2457 		timer_res = 0;
2458 	else if (cdev->tx_coalesce_usecs <= 0xFF)
2459 		timer_res = 1;
2460 	else
2461 		timer_res = 2;
2462 	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2463 
2464 	SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
2465 	SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
2466 }
2467 
2468 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
2469 			 struct qed_ptt *p_ptt,
2470 			 dma_addr_t sb_phys,
2471 			 u16 igu_sb_id, u16 vf_number, u8 vf_valid)
2472 {
2473 	struct cau_sb_entry sb_entry;
2474 
2475 	qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
2476 			      vf_number, vf_valid);
2477 
2478 	if (p_hwfn->hw_init_done) {
2479 		/* Wide-bus, initialize via DMAE */
2480 		u64 phys_addr = (u64)sb_phys;
2481 
2482 		qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
2483 				  CAU_REG_SB_ADDR_MEMORY +
2484 				  igu_sb_id * sizeof(u64), 2, 0);
2485 		qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
2486 				  CAU_REG_SB_VAR_MEMORY +
2487 				  igu_sb_id * sizeof(u64), 2, 0);
2488 	} else {
2489 		/* Initialize Status Block Address */
2490 		STORE_RT_REG_AGG(p_hwfn,
2491 				 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
2492 				 igu_sb_id * 2,
2493 				 sb_phys);
2494 
2495 		STORE_RT_REG_AGG(p_hwfn,
2496 				 CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
2497 				 igu_sb_id * 2,
2498 				 sb_entry);
2499 	}
2500 
2501 	/* Configure pi coalescing if set */
2502 	if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
2503 		u8 timeset, timer_res;
2504 		u8 num_tc = 1, i;
2505 
2506 		/* timeset = (coalesce >> timer-res), timeset is 7bit wide */
2507 		if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
2508 			timer_res = 0;
2509 		else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF)
2510 			timer_res = 1;
2511 		else
2512 			timer_res = 2;
2513 		timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
2514 		qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
2515 				    QED_COAL_RX_STATE_MACHINE, timeset);
2516 
2517 		if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
2518 			timer_res = 0;
2519 		else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF)
2520 			timer_res = 1;
2521 		else
2522 			timer_res = 2;
2523 		timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res);
2524 		for (i = 0; i < num_tc; i++) {
2525 			qed_int_cau_conf_pi(p_hwfn, p_ptt,
2526 					    igu_sb_id, TX_PI(i),
2527 					    QED_COAL_TX_STATE_MACHINE,
2528 					    timeset);
2529 		}
2530 	}
2531 }
2532 
2533 void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
2534 			 struct qed_ptt *p_ptt,
2535 			 u16 igu_sb_id,
2536 			 u32 pi_index,
2537 			 enum qed_coalescing_fsm coalescing_fsm,
2538 			 u8 timeset)
2539 {
2540 	struct cau_pi_entry pi_entry;
2541 	u32 sb_offset, pi_offset;
2542 
2543 	if (IS_VF(p_hwfn->cdev))
2544 		return;
2545 
2546 	sb_offset = igu_sb_id * PIS_PER_SB;
2547 	memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
2548 
2549 	SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
2550 	if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
2551 		SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
2552 	else
2553 		SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
2554 
2555 	pi_offset = sb_offset + pi_index;
2556 	if (p_hwfn->hw_init_done) {
2557 		qed_wr(p_hwfn, p_ptt,
2558 		       CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
2559 		       *((u32 *)&(pi_entry)));
2560 	} else {
2561 		STORE_RT_REG(p_hwfn,
2562 			     CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
2563 			     *((u32 *)&(pi_entry)));
2564 	}
2565 }
2566 
2567 void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
2568 		      struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
2569 {
2570 	/* zero status block and ack counter */
2571 	sb_info->sb_ack = 0;
2572 	memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
2573 
2574 	if (IS_PF(p_hwfn->cdev))
2575 		qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
2576 				    sb_info->igu_sb_id, 0, 0);
2577 }
2578 
2579 /**
2580  * @brief qed_get_igu_sb_id - given a sw sb_id return the
2581  *        igu_sb_id
2582  *
2583  * @param p_hwfn
2584  * @param sb_id
2585  *
2586  * @return u16
2587  */
2588 static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
2589 {
2590 	u16 igu_sb_id;
2591 
2592 	/* Assuming continuous set of IGU SBs dedicated for given PF */
2593 	if (sb_id == QED_SP_SB_ID)
2594 		igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
2595 	else if (IS_PF(p_hwfn->cdev))
2596 		igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
2597 	else
2598 		igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
2599 
2600 	if (sb_id == QED_SP_SB_ID)
2601 		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2602 			   "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
2603 	else
2604 		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2605 			   "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
2606 
2607 	return igu_sb_id;
2608 }
2609 
2610 int qed_int_sb_init(struct qed_hwfn *p_hwfn,
2611 		    struct qed_ptt *p_ptt,
2612 		    struct qed_sb_info *sb_info,
2613 		    void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id)
2614 {
2615 	sb_info->sb_virt = sb_virt_addr;
2616 	sb_info->sb_phys = sb_phy_addr;
2617 
2618 	sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
2619 
2620 	if (sb_id != QED_SP_SB_ID) {
2621 		p_hwfn->sbs_info[sb_id] = sb_info;
2622 		p_hwfn->num_sbs++;
2623 	}
2624 
2625 	sb_info->cdev = p_hwfn->cdev;
2626 
2627 	/* The igu address will hold the absolute address that needs to be
2628 	 * written to for a specific status block
2629 	 */
2630 	if (IS_PF(p_hwfn->cdev)) {
2631 		sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
2632 						  GTT_BAR0_MAP_REG_IGU_CMD +
2633 						  (sb_info->igu_sb_id << 3);
2634 	} else {
2635 		sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
2636 						  PXP_VF_BAR0_START_IGU +
2637 						  ((IGU_CMD_INT_ACK_BASE +
2638 						    sb_info->igu_sb_id) << 3);
2639 	}
2640 
2641 	sb_info->flags |= QED_SB_INFO_INIT;
2642 
2643 	qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
2644 
2645 	return 0;
2646 }
2647 
2648 int qed_int_sb_release(struct qed_hwfn *p_hwfn,
2649 		       struct qed_sb_info *sb_info, u16 sb_id)
2650 {
2651 	if (sb_id == QED_SP_SB_ID) {
2652 		DP_ERR(p_hwfn, "Do Not free sp sb using this function");
2653 		return -EINVAL;
2654 	}
2655 
2656 	/* zero status block and ack counter */
2657 	sb_info->sb_ack = 0;
2658 	memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
2659 
2660 	if (p_hwfn->sbs_info[sb_id] != NULL) {
2661 		p_hwfn->sbs_info[sb_id] = NULL;
2662 		p_hwfn->num_sbs--;
2663 	}
2664 
2665 	return 0;
2666 }
2667 
2668 static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
2669 {
2670 	struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
2671 
2672 	if (!p_sb)
2673 		return;
2674 
2675 	if (p_sb->sb_info.sb_virt)
2676 		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2677 				  SB_ALIGNED_SIZE(p_hwfn),
2678 				  p_sb->sb_info.sb_virt,
2679 				  p_sb->sb_info.sb_phys);
2680 	kfree(p_sb);
2681 }
2682 
2683 static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2684 {
2685 	struct qed_sb_sp_info *p_sb;
2686 	dma_addr_t p_phys = 0;
2687 	void *p_virt;
2688 
2689 	/* SB struct */
2690 	p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
2691 	if (!p_sb)
2692 		return -ENOMEM;
2693 
2694 	/* SB ring  */
2695 	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2696 				    SB_ALIGNED_SIZE(p_hwfn),
2697 				    &p_phys, GFP_KERNEL);
2698 	if (!p_virt) {
2699 		kfree(p_sb);
2700 		return -ENOMEM;
2701 	}
2702 
2703 	/* Status Block setup */
2704 	p_hwfn->p_sp_sb = p_sb;
2705 	qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
2706 			p_phys, QED_SP_SB_ID);
2707 
2708 	memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
2709 
2710 	return 0;
2711 }
2712 
2713 int qed_int_register_cb(struct qed_hwfn *p_hwfn,
2714 			qed_int_comp_cb_t comp_cb,
2715 			void *cookie, u8 *sb_idx, __le16 **p_fw_cons)
2716 {
2717 	struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
2718 	int rc = -ENOMEM;
2719 	u8 pi;
2720 
2721 	/* Look for a free index */
2722 	for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
2723 		if (p_sp_sb->pi_info_arr[pi].comp_cb)
2724 			continue;
2725 
2726 		p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
2727 		p_sp_sb->pi_info_arr[pi].cookie = cookie;
2728 		*sb_idx = pi;
2729 		*p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
2730 		rc = 0;
2731 		break;
2732 	}
2733 
2734 	return rc;
2735 }
2736 
2737 int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
2738 {
2739 	struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
2740 
2741 	if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL)
2742 		return -ENOMEM;
2743 
2744 	p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
2745 	p_sp_sb->pi_info_arr[pi].cookie = NULL;
2746 
2747 	return 0;
2748 }
2749 
2750 u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
2751 {
2752 	return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
2753 }
2754 
2755 void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
2756 			    struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
2757 {
2758 	u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
2759 
2760 	p_hwfn->cdev->int_mode = int_mode;
2761 	switch (p_hwfn->cdev->int_mode) {
2762 	case QED_INT_MODE_INTA:
2763 		igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
2764 		igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
2765 		break;
2766 
2767 	case QED_INT_MODE_MSI:
2768 		igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
2769 		igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
2770 		break;
2771 
2772 	case QED_INT_MODE_MSIX:
2773 		igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
2774 		break;
2775 	case QED_INT_MODE_POLL:
2776 		break;
2777 	}
2778 
2779 	qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
2780 }
2781 
2782 int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2783 		       enum qed_int_mode int_mode)
2784 {
2785 	int rc = 0;
2786 
2787 	/* Configure AEU signal change to produce attentions */
2788 	qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
2789 	qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
2790 	qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
2791 	qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
2792 
2793 	/* Flush the writes to IGU */
2794 	mmiowb();
2795 
2796 	/* Unmask AEU signals toward IGU */
2797 	qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
2798 	if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
2799 		rc = qed_slowpath_irq_req(p_hwfn);
2800 		if (rc) {
2801 			DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
2802 			return -EINVAL;
2803 		}
2804 		p_hwfn->b_int_requested = true;
2805 	}
2806 	/* Enable interrupt Generation */
2807 	qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
2808 	p_hwfn->b_int_enabled = 1;
2809 
2810 	return rc;
2811 }
2812 
2813 void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2814 {
2815 	p_hwfn->b_int_enabled = 0;
2816 
2817 	if (IS_VF(p_hwfn->cdev))
2818 		return;
2819 
2820 	qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
2821 }
2822 
2823 #define IGU_CLEANUP_SLEEP_LENGTH                (1000)
2824 static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
2825 				   struct qed_ptt *p_ptt,
2826 				   u32 sb_id, bool cleanup_set, u16 opaque_fid)
2827 {
2828 	u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
2829 	u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
2830 	u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
2831 
2832 	/* Set the data field */
2833 	SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
2834 	SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
2835 	SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
2836 
2837 	/* Set the control register */
2838 	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
2839 	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
2840 	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
2841 
2842 	qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
2843 
2844 	barrier();
2845 
2846 	qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
2847 
2848 	/* Flush the write to IGU */
2849 	mmiowb();
2850 
2851 	/* calculate where to read the status bit from */
2852 	sb_bit = 1 << (sb_id % 32);
2853 	sb_bit_addr = sb_id / 32 * sizeof(u32);
2854 
2855 	sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
2856 
2857 	/* Now wait for the command to complete */
2858 	do {
2859 		val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
2860 
2861 		if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
2862 			break;
2863 
2864 		usleep_range(5000, 10000);
2865 	} while (--sleep_cnt);
2866 
2867 	if (!sleep_cnt)
2868 		DP_NOTICE(p_hwfn,
2869 			  "Timeout waiting for clear status 0x%08x [for sb %d]\n",
2870 			  val, sb_id);
2871 }
2872 
2873 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
2874 				     struct qed_ptt *p_ptt,
2875 				     u32 sb_id, u16 opaque, bool b_set)
2876 {
2877 	int pi, i;
2878 
2879 	/* Set */
2880 	if (b_set)
2881 		qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
2882 
2883 	/* Clear */
2884 	qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
2885 
2886 	/* Wait for the IGU SB to cleanup */
2887 	for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
2888 		u32 val;
2889 
2890 		val = qed_rd(p_hwfn, p_ptt,
2891 			     IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4));
2892 		if (val & (1 << (sb_id % 32)))
2893 			usleep_range(10, 20);
2894 		else
2895 			break;
2896 	}
2897 	if (i == IGU_CLEANUP_SLEEP_LENGTH)
2898 		DP_NOTICE(p_hwfn,
2899 			  "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
2900 			  sb_id);
2901 
2902 	/* Clear the CAU for the SB */
2903 	for (pi = 0; pi < 12; pi++)
2904 		qed_wr(p_hwfn, p_ptt,
2905 		       CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
2906 }
2907 
2908 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
2909 			      struct qed_ptt *p_ptt,
2910 			      bool b_set, bool b_slowpath)
2911 {
2912 	u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
2913 	u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
2914 	u32 sb_id = 0, val = 0;
2915 
2916 	val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
2917 	val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
2918 	val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
2919 	qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
2920 
2921 	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2922 		   "IGU cleaning SBs [%d,...,%d]\n",
2923 		   igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
2924 
2925 	for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
2926 		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
2927 						p_hwfn->hw_info.opaque_fid,
2928 						b_set);
2929 
2930 	if (!b_slowpath)
2931 		return;
2932 
2933 	sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
2934 	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2935 		   "IGU cleaning slowpath SB [%d]\n", sb_id);
2936 	qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
2937 					p_hwfn->hw_info.opaque_fid, b_set);
2938 }
2939 
2940 static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
2941 				      struct qed_ptt *p_ptt, u16 sb_id)
2942 {
2943 	u32 val = qed_rd(p_hwfn, p_ptt,
2944 			 IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
2945 	struct qed_igu_block *p_block;
2946 
2947 	p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
2948 
2949 	/* stop scanning when hit first invalid PF entry */
2950 	if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
2951 	    GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
2952 		goto out;
2953 
2954 	/* Fill the block information */
2955 	p_block->status		= QED_IGU_STATUS_VALID;
2956 	p_block->function_id	= GET_FIELD(val,
2957 					    IGU_MAPPING_LINE_FUNCTION_NUMBER);
2958 	p_block->is_pf		= GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
2959 	p_block->vector_number	= GET_FIELD(val,
2960 					    IGU_MAPPING_LINE_VECTOR_NUMBER);
2961 
2962 	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2963 		   "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2964 		   sb_id, val, p_block->function_id,
2965 		   p_block->is_pf, p_block->vector_number);
2966 
2967 out:
2968 	return val;
2969 }
2970 
2971 int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2972 {
2973 	struct qed_igu_info *p_igu_info;
2974 	u32 val, min_vf = 0, max_vf = 0;
2975 	u16 sb_id, last_iov_sb_id = 0;
2976 	struct qed_igu_block *blk;
2977 	u16 prev_sb_id = 0xFF;
2978 
2979 	p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
2980 	if (!p_hwfn->hw_info.p_igu_info)
2981 		return -ENOMEM;
2982 
2983 	p_igu_info = p_hwfn->hw_info.p_igu_info;
2984 
2985 	/* Initialize base sb / sb cnt for PFs and VFs */
2986 	p_igu_info->igu_base_sb		= 0xffff;
2987 	p_igu_info->igu_sb_cnt		= 0;
2988 	p_igu_info->igu_dsb_id		= 0xffff;
2989 	p_igu_info->igu_base_sb_iov	= 0xffff;
2990 
2991 	if (p_hwfn->cdev->p_iov_info) {
2992 		struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
2993 
2994 		min_vf	= p_iov->first_vf_in_pf;
2995 		max_vf	= p_iov->first_vf_in_pf + p_iov->total_vfs;
2996 	}
2997 
2998 	for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
2999 	     sb_id++) {
3000 		blk = &p_igu_info->igu_map.igu_blocks[sb_id];
3001 
3002 		val	= qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
3003 
3004 		/* stop scanning when hit first invalid PF entry */
3005 		if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
3006 		    GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
3007 			break;
3008 
3009 		if (blk->is_pf) {
3010 			if (blk->function_id == p_hwfn->rel_pf_id) {
3011 				blk->status |= QED_IGU_STATUS_PF;
3012 
3013 				if (blk->vector_number == 0) {
3014 					if (p_igu_info->igu_dsb_id == 0xffff)
3015 						p_igu_info->igu_dsb_id = sb_id;
3016 				} else {
3017 					if (p_igu_info->igu_base_sb ==
3018 					    0xffff) {
3019 						p_igu_info->igu_base_sb = sb_id;
3020 					} else if (prev_sb_id != sb_id - 1) {
3021 						DP_NOTICE(p_hwfn->cdev,
3022 							  "consecutive igu vectors for HWFN %x broken",
3023 							  p_hwfn->rel_pf_id);
3024 						break;
3025 					}
3026 					prev_sb_id = sb_id;
3027 					/* we don't count the default */
3028 					(p_igu_info->igu_sb_cnt)++;
3029 				}
3030 			}
3031 		} else {
3032 			if ((blk->function_id >= min_vf) &&
3033 			    (blk->function_id < max_vf)) {
3034 				/* Available for VFs of this PF */
3035 				if (p_igu_info->igu_base_sb_iov == 0xffff) {
3036 					p_igu_info->igu_base_sb_iov = sb_id;
3037 				} else if (last_iov_sb_id != sb_id - 1) {
3038 					if (!val) {
3039 						DP_VERBOSE(p_hwfn->cdev,
3040 							   NETIF_MSG_INTR,
3041 							   "First uninitialized IGU CAM entry at index 0x%04x\n",
3042 							   sb_id);
3043 					} else {
3044 						DP_NOTICE(p_hwfn->cdev,
3045 							  "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n",
3046 							  p_hwfn->rel_pf_id,
3047 							  last_iov_sb_id,
3048 							  sb_id); }
3049 					break;
3050 				}
3051 				blk->status |= QED_IGU_STATUS_FREE;
3052 				p_hwfn->hw_info.p_igu_info->free_blks++;
3053 				last_iov_sb_id = sb_id;
3054 			}
3055 		}
3056 	}
3057 
3058 	/* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
3059 	 * the number of VF SBs [especially for first VF on engine, as we can't
3060 	 * diffrentiate between empty entries and its entries].
3061 	 * Since we don't really support more SBs than VFs today, prevent any
3062 	 * such configuration by sanitizing the number of SBs to equal the
3063 	 * number of VFs.
3064 	 */
3065 	if (IS_PF_SRIOV(p_hwfn)) {
3066 		u16 total_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
3067 
3068 		if (total_vfs < p_igu_info->free_blks) {
3069 			DP_VERBOSE(p_hwfn,
3070 				   (NETIF_MSG_INTR | QED_MSG_IOV),
3071 				   "Limiting number of SBs for IOV - %04x --> %04x\n",
3072 				   p_igu_info->free_blks,
3073 				   p_hwfn->cdev->p_iov_info->total_vfs);
3074 			p_igu_info->free_blks = total_vfs;
3075 		} else if (total_vfs > p_igu_info->free_blks) {
3076 			DP_NOTICE(p_hwfn,
3077 				  "IGU has only %04x SBs for VFs while the device has %04x VFs\n",
3078 				  p_igu_info->free_blks, total_vfs);
3079 			return -EINVAL;
3080 		}
3081 	}
3082 	p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
3083 
3084 	DP_VERBOSE(
3085 		p_hwfn,
3086 		NETIF_MSG_INTR,
3087 		"IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n",
3088 		p_igu_info->igu_base_sb,
3089 		p_igu_info->igu_base_sb_iov,
3090 		p_igu_info->igu_sb_cnt,
3091 		p_igu_info->igu_sb_cnt_iov,
3092 		p_igu_info->igu_dsb_id);
3093 
3094 	if (p_igu_info->igu_base_sb == 0xffff ||
3095 	    p_igu_info->igu_dsb_id == 0xffff ||
3096 	    p_igu_info->igu_sb_cnt == 0) {
3097 		DP_NOTICE(p_hwfn,
3098 			  "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
3099 			   p_igu_info->igu_base_sb,
3100 			   p_igu_info->igu_sb_cnt,
3101 			   p_igu_info->igu_dsb_id);
3102 		return -EINVAL;
3103 	}
3104 
3105 	return 0;
3106 }
3107 
3108 /**
3109  * @brief Initialize igu runtime registers
3110  *
3111  * @param p_hwfn
3112  */
3113 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
3114 {
3115 	u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
3116 
3117 	STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
3118 }
3119 
3120 u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
3121 {
3122 	u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
3123 			       IGU_CMD_INT_ACK_BASE;
3124 	u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
3125 			       IGU_CMD_INT_ACK_BASE;
3126 	u32 intr_status_hi = 0, intr_status_lo = 0;
3127 	u64 intr_status = 0;
3128 
3129 	intr_status_lo = REG_RD(p_hwfn,
3130 				GTT_BAR0_MAP_REG_IGU_CMD +
3131 				lsb_igu_cmd_addr * 8);
3132 	intr_status_hi = REG_RD(p_hwfn,
3133 				GTT_BAR0_MAP_REG_IGU_CMD +
3134 				msb_igu_cmd_addr * 8);
3135 	intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
3136 
3137 	return intr_status;
3138 }
3139 
3140 static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
3141 {
3142 	tasklet_init(p_hwfn->sp_dpc,
3143 		     qed_int_sp_dpc, (unsigned long)p_hwfn);
3144 	p_hwfn->b_sp_dpc_enabled = true;
3145 }
3146 
3147 static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
3148 {
3149 	p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL);
3150 	if (!p_hwfn->sp_dpc)
3151 		return -ENOMEM;
3152 
3153 	return 0;
3154 }
3155 
3156 static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
3157 {
3158 	kfree(p_hwfn->sp_dpc);
3159 }
3160 
3161 int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3162 {
3163 	int rc = 0;
3164 
3165 	rc = qed_int_sp_dpc_alloc(p_hwfn);
3166 	if (rc)
3167 		return rc;
3168 
3169 	rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
3170 	if (rc)
3171 		return rc;
3172 
3173 	rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
3174 
3175 	return rc;
3176 }
3177 
3178 void qed_int_free(struct qed_hwfn *p_hwfn)
3179 {
3180 	qed_int_sp_sb_free(p_hwfn);
3181 	qed_int_sb_attn_free(p_hwfn);
3182 	qed_int_sp_dpc_free(p_hwfn);
3183 }
3184 
3185 void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3186 {
3187 	qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
3188 	qed_int_sb_attn_setup(p_hwfn, p_ptt);
3189 	qed_int_sp_dpc_setup(p_hwfn);
3190 }
3191 
3192 void qed_int_get_num_sbs(struct qed_hwfn	*p_hwfn,
3193 			 struct qed_sb_cnt_info *p_sb_cnt_info)
3194 {
3195 	struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
3196 
3197 	if (!info || !p_sb_cnt_info)
3198 		return;
3199 
3200 	p_sb_cnt_info->sb_cnt		= info->igu_sb_cnt;
3201 	p_sb_cnt_info->sb_iov_cnt	= info->igu_sb_cnt_iov;
3202 	p_sb_cnt_info->sb_free_blk	= info->free_blks;
3203 }
3204 
3205 u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
3206 {
3207 	struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
3208 
3209 	/* Determine origin of SB id */
3210 	if ((sb_id >= p_info->igu_base_sb) &&
3211 	    (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
3212 		return sb_id - p_info->igu_base_sb;
3213 	} else if ((sb_id >= p_info->igu_base_sb_iov) &&
3214 		   (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
3215 		/* We want the first VF queue to be adjacent to the
3216 		 * last PF queue. Since L2 queues can be partial to
3217 		 * SBs, we'll use the feature instead.
3218 		 */
3219 		return sb_id - p_info->igu_base_sb_iov +
3220 		       FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
3221 	} else {
3222 		DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
3223 		return 0;
3224 	}
3225 }
3226 
3227 void qed_int_disable_post_isr_release(struct qed_dev *cdev)
3228 {
3229 	int i;
3230 
3231 	for_each_hwfn(cdev, i)
3232 		cdev->hwfns[i].b_int_requested = false;
3233 }
3234 
3235 int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3236 			  u8 timer_res, u16 sb_id, bool tx)
3237 {
3238 	struct cau_sb_entry sb_entry;
3239 	int rc;
3240 
3241 	if (!p_hwfn->hw_init_done) {
3242 		DP_ERR(p_hwfn, "hardware not initialized yet\n");
3243 		return -EINVAL;
3244 	}
3245 
3246 	rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
3247 			       sb_id * sizeof(u64),
3248 			       (u64)(uintptr_t)&sb_entry, 2, 0);
3249 	if (rc) {
3250 		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
3251 		return rc;
3252 	}
3253 
3254 	if (tx)
3255 		SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
3256 	else
3257 		SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
3258 
3259 	rc = qed_dmae_host2grc(p_hwfn, p_ptt,
3260 			       (u64)(uintptr_t)&sb_entry,
3261 			       CAU_REG_SB_VAR_MEMORY +
3262 			       sb_id * sizeof(u64), 2, 0);
3263 	if (rc) {
3264 		DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
3265 		return rc;
3266 	}
3267 
3268 	return rc;
3269 }
3270