1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NITROX_CSR_H
3 #define __NITROX_CSR_H
4 
5 #include <asm/byteorder.h>
6 #include <linux/types.h>
7 
8 /* EMU clusters */
9 #define NR_CLUSTERS		4
10 #define AE_CORES_PER_CLUSTER	20
11 #define SE_CORES_PER_CLUSTER	16
12 
13 /* BIST registers */
14 #define EMU_BIST_STATUSX(_i)	(0x1402700 + ((_i) * 0x40000))
15 #define UCD_BIST_STATUS		0x12C0070
16 #define NPS_CORE_BIST_REG	0x10000E8
17 #define NPS_CORE_NPC_BIST_REG	0x1000128
18 #define NPS_PKT_SLC_BIST_REG	0x1040088
19 #define NPS_PKT_IN_BIST_REG	0x1040100
20 #define POM_BIST_REG		0x11C0100
21 #define BMI_BIST_REG		0x1140080
22 #define EFL_CORE_BIST_REGX(_i)	(0x1240100 + ((_i) * 0x400))
23 #define EFL_TOP_BIST_STAT	0x1241090
24 #define BMO_BIST_REG		0x1180080
25 #define LBC_BIST_STATUS		0x1200020
26 #define PEM_BIST_STATUSX(_i)	(0x1080468 | ((_i) << 18))
27 
28 /* EMU registers */
29 #define EMU_SE_ENABLEX(_i)	(0x1400000 + ((_i) * 0x40000))
30 #define EMU_AE_ENABLEX(_i)	(0x1400008 + ((_i) * 0x40000))
31 #define EMU_WD_INT_ENA_W1SX(_i)	(0x1402318 + ((_i) * 0x40000))
32 #define EMU_GE_INT_ENA_W1SX(_i)	(0x1402518 + ((_i) * 0x40000))
33 #define EMU_FUSE_MAPX(_i)	(0x1402708 + ((_i) * 0x40000))
34 
35 /* UCD registers */
36 #define UCD_UCODE_LOAD_BLOCK_NUM	0x12C0010
37 #define UCD_UCODE_LOAD_IDX_DATAX(_i)	(0x12C0018 + ((_i) * 0x20))
38 #define UCD_SE_EID_UCODE_BLOCK_NUMX(_i)	(0x12C0000 + ((_i) * 0x1000))
39 
40 /* NPS core registers */
41 #define NPS_CORE_GBL_VFCFG	0x1000000
42 #define NPS_CORE_CONTROL	0x1000008
43 #define NPS_CORE_INT_ACTIVE	0x1000080
44 #define NPS_CORE_INT		0x10000A0
45 #define NPS_CORE_INT_ENA_W1S	0x10000B8
46 #define NPS_STATS_PKT_DMA_RD_CNT	0x1000180
47 #define NPS_STATS_PKT_DMA_WR_CNT	0x1000190
48 
49 /* NPS packet registers */
50 #define NPS_PKT_INT				0x1040018
51 #define NPS_PKT_IN_RERR_HI		0x1040108
52 #define NPS_PKT_IN_RERR_HI_ENA_W1S	0x1040120
53 #define NPS_PKT_IN_RERR_LO		0x1040128
54 #define NPS_PKT_IN_RERR_LO_ENA_W1S	0x1040140
55 #define NPS_PKT_IN_ERR_TYPE		0x1040148
56 #define NPS_PKT_IN_ERR_TYPE_ENA_W1S	0x1040160
57 #define NPS_PKT_IN_INSTR_CTLX(_i)	(0x10060 + ((_i) * 0x40000))
58 #define NPS_PKT_IN_INSTR_BADDRX(_i)	(0x10068 + ((_i) * 0x40000))
59 #define NPS_PKT_IN_INSTR_RSIZEX(_i)	(0x10070 + ((_i) * 0x40000))
60 #define NPS_PKT_IN_DONE_CNTSX(_i)	(0x10080 + ((_i) * 0x40000))
61 #define NPS_PKT_IN_INSTR_BAOFF_DBELLX(_i)	(0x10078 + ((_i) * 0x40000))
62 #define NPS_PKT_IN_INT_LEVELSX(_i)		(0x10088 + ((_i) * 0x40000))
63 
64 #define NPS_PKT_SLC_RERR_HI		0x1040208
65 #define NPS_PKT_SLC_RERR_HI_ENA_W1S	0x1040220
66 #define NPS_PKT_SLC_RERR_LO		0x1040228
67 #define NPS_PKT_SLC_RERR_LO_ENA_W1S	0x1040240
68 #define NPS_PKT_SLC_ERR_TYPE		0x1040248
69 #define NPS_PKT_SLC_ERR_TYPE_ENA_W1S	0x1040260
70 #define NPS_PKT_SLC_CTLX(_i)		(0x10000 + ((_i) * 0x40000))
71 #define NPS_PKT_SLC_CNTSX(_i)		(0x10008 + ((_i) * 0x40000))
72 #define NPS_PKT_SLC_INT_LEVELSX(_i)	(0x10010 + ((_i) * 0x40000))
73 
74 /* POM registers */
75 #define POM_INT_ENA_W1S		0x11C0018
76 #define POM_GRP_EXECMASKX(_i)	(0x11C1100 | ((_i) * 8))
77 #define POM_INT		0x11C0000
78 #define POM_PERF_CTL	0x11CC400
79 
80 /* BMI registers */
81 #define BMI_INT		0x1140000
82 #define BMI_CTL		0x1140020
83 #define BMI_INT_ENA_W1S	0x1140018
84 #define BMI_NPS_PKT_CNT	0x1140070
85 
86 /* EFL registers */
87 #define EFL_CORE_INT_ENA_W1SX(_i)		(0x1240018 + ((_i) * 0x400))
88 #define EFL_CORE_VF_ERR_INT0X(_i)		(0x1240050 + ((_i) * 0x400))
89 #define EFL_CORE_VF_ERR_INT0_ENA_W1SX(_i)	(0x1240068 + ((_i) * 0x400))
90 #define EFL_CORE_VF_ERR_INT1X(_i)		(0x1240070 + ((_i) * 0x400))
91 #define EFL_CORE_VF_ERR_INT1_ENA_W1SX(_i)	(0x1240088 + ((_i) * 0x400))
92 #define EFL_CORE_SE_ERR_INTX(_i)		(0x12400A0 + ((_i) * 0x400))
93 #define EFL_RNM_CTL_STATUS			0x1241800
94 #define EFL_CORE_INTX(_i)			(0x1240000 + ((_i) * 0x400))
95 
96 /* BMO registers */
97 #define BMO_CTL2		0x1180028
98 #define BMO_NPS_SLC_PKT_CNT	0x1180078
99 
100 /* LBC registers */
101 #define LBC_INT			0x1200000
102 #define LBC_INVAL_CTL		0x1201010
103 #define LBC_PLM_VF1_64_INT	0x1202008
104 #define LBC_INVAL_STATUS	0x1202010
105 #define LBC_INT_ENA_W1S		0x1203000
106 #define LBC_PLM_VF1_64_INT_ENA_W1S	0x1205008
107 #define LBC_PLM_VF65_128_INT		0x1206008
108 #define LBC_ELM_VF1_64_INT		0x1208000
109 #define LBC_PLM_VF65_128_INT_ENA_W1S	0x1209008
110 #define LBC_ELM_VF1_64_INT_ENA_W1S	0x120B000
111 #define LBC_ELM_VF65_128_INT		0x120C000
112 #define LBC_ELM_VF65_128_INT_ENA_W1S	0x120F000
113 
114 /* PEM registers */
115 #define PEM0_INT 0x1080428
116 
117 /**
118  * struct emu_fuse_map - EMU Fuse Map Registers
119  * @ae_fuse: Fuse settings for AE 19..0
120  * @se_fuse: Fuse settings for SE 15..0
121  *
122  * A set bit indicates the unit is fuse disabled.
123  */
124 union emu_fuse_map {
125 	u64 value;
126 	struct {
127 #if (defined(__BIG_ENDIAN_BITFIELD))
128 		u64 valid : 1;
129 		u64 raz_52_62 : 11;
130 		u64 ae_fuse : 20;
131 		u64 raz_16_31 : 16;
132 		u64 se_fuse : 16;
133 #else
134 		u64 se_fuse : 16;
135 		u64 raz_16_31 : 16;
136 		u64 ae_fuse : 20;
137 		u64 raz_52_62 : 11;
138 		u64 valid : 1;
139 #endif
140 	} s;
141 };
142 
143 /**
144  * struct emu_se_enable - Symmetric Engine Enable Registers
145  * @enable: Individual enables for each of the clusters
146  *   16 symmetric engines.
147  */
148 union emu_se_enable {
149 	u64 value;
150 	struct {
151 #if (defined(__BIG_ENDIAN_BITFIELD))
152 		u64 raz	: 48;
153 		u64 enable : 16;
154 #else
155 		u64 enable : 16;
156 		u64 raz	: 48;
157 #endif
158 	} s;
159 };
160 
161 /**
162  * struct emu_ae_enable - EMU Asymmetric engines.
163  * @enable: Individual enables for each of the cluster's
164  *   20 Asymmetric Engines.
165  */
166 union emu_ae_enable {
167 	u64 value;
168 	struct {
169 #if (defined(__BIG_ENDIAN_BITFIELD))
170 		u64 raz	: 44;
171 		u64 enable : 20;
172 #else
173 		u64 enable : 20;
174 		u64 raz	: 44;
175 #endif
176 	} s;
177 };
178 
179 /**
180  * struct emu_wd_int_ena_w1s - EMU Interrupt Enable Registers
181  * @ae_wd: Reads or sets enable for EMU(0..3)_WD_INT[AE_WD]
182  * @se_wd: Reads or sets enable for EMU(0..3)_WD_INT[SE_WD]
183  */
184 union emu_wd_int_ena_w1s {
185 	u64 value;
186 	struct {
187 #if (defined(__BIG_ENDIAN_BITFIELD))
188 		u64 raz2 : 12;
189 		u64 ae_wd : 20;
190 		u64 raz1 : 16;
191 		u64 se_wd : 16;
192 #else
193 		u64 se_wd : 16;
194 		u64 raz1 : 16;
195 		u64 ae_wd : 20;
196 		u64 raz2 : 12;
197 #endif
198 	} s;
199 };
200 
201 /**
202  * struct emu_ge_int_ena_w1s - EMU Interrupt Enable set registers
203  * @ae_ge: Reads or sets enable for EMU(0..3)_GE_INT[AE_GE]
204  * @se_ge: Reads or sets enable for EMU(0..3)_GE_INT[SE_GE]
205  */
206 union emu_ge_int_ena_w1s {
207 	u64 value;
208 	struct {
209 #if (defined(__BIG_ENDIAN_BITFIELD))
210 		u64 raz_52_63 : 12;
211 		u64 ae_ge : 20;
212 		u64 raz_16_31: 16;
213 		u64 se_ge : 16;
214 #else
215 		u64 se_ge : 16;
216 		u64 raz_16_31: 16;
217 		u64 ae_ge : 20;
218 		u64 raz_52_63 : 12;
219 #endif
220 	} s;
221 };
222 
223 /**
224  * struct nps_pkt_slc_ctl - Solicited Packet Out Control Registers
225  * @rh: Indicates whether to remove or include the response header
226  *   1 = Include, 0 = Remove
227  * @z: If set, 8 trailing 0x00 bytes will be added to the end of the
228  *   outgoing packet.
229  * @enb: Enable for this port.
230  */
231 union nps_pkt_slc_ctl {
232 	u64 value;
233 	struct {
234 #if defined(__BIG_ENDIAN_BITFIELD)
235 		u64 raz : 61;
236 		u64 rh : 1;
237 		u64 z : 1;
238 		u64 enb : 1;
239 #else
240 		u64 enb : 1;
241 		u64 z : 1;
242 		u64 rh : 1;
243 		u64 raz : 61;
244 #endif
245 	} s;
246 };
247 
248 /**
249  * struct nps_pkt_slc_cnts - Solicited Packet Out Count Registers
250  * @slc_int: Returns a 1 when:
251  *   NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT], or
252  *   NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SLC(i)_INT_LEVELS[TIMET].
253  *   To clear the bit, the CNTS register must be written to clear.
254  * @in_int: Returns a 1 when:
255  *   NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT].
256  *   To clear the bit, the DONE_CNTS register must be written to clear.
257  * @mbox_int: Returns a 1 when:
258  *   NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set. To clear the bit,
259  *   write NPS_PKT_MBOX_PF_VF(i)_INT[INTR] with 1.
260  * @timer: Timer, incremented every 2048 coprocessor clock cycles
261  *   when [CNT] is not zero. The hardware clears both [TIMER] and
262  *   [INT] when [CNT] goes to 0.
263  * @cnt: Packet counter. Hardware adds to [CNT] as it sends packets out.
264  *   On a write to this CSR, hardware subtracts the amount written to the
265  *   [CNT] field from [CNT].
266  */
267 union nps_pkt_slc_cnts {
268 	u64 value;
269 	struct {
270 #if defined(__BIG_ENDIAN_BITFIELD)
271 		u64 slc_int : 1;
272 		u64 uns_int : 1;
273 		u64 in_int : 1;
274 		u64 mbox_int : 1;
275 		u64 resend : 1;
276 		u64 raz : 5;
277 		u64 timer : 22;
278 		u64 cnt : 32;
279 #else
280 		u64 cnt	: 32;
281 		u64 timer : 22;
282 		u64 raz	: 5;
283 		u64 resend : 1;
284 		u64 mbox_int : 1;
285 		u64 in_int : 1;
286 		u64 uns_int : 1;
287 		u64 slc_int : 1;
288 #endif
289 	} s;
290 };
291 
292 /**
293  * struct nps_pkt_slc_int_levels - Solicited Packet Out Interrupt Levels
294  *   Registers.
295  * @bmode: Determines whether NPS_PKT_SLC_CNTS[CNT] is a byte or
296  *   packet counter.
297  * @timet: Output port counter time interrupt threshold.
298  * @cnt: Output port counter interrupt threshold.
299  */
300 union nps_pkt_slc_int_levels {
301 	u64 value;
302 	struct {
303 #if defined(__BIG_ENDIAN_BITFIELD)
304 		u64 bmode : 1;
305 		u64 raz	: 9;
306 		u64 timet : 22;
307 		u64 cnt	: 32;
308 #else
309 		u64 cnt : 32;
310 		u64 timet : 22;
311 		u64 raz : 9;
312 		u64 bmode : 1;
313 #endif
314 	} s;
315 };
316 
317 /**
318  * struct nps_pkt_inst - NPS Packet Interrupt Register
319  * @in_err: Set when any NPS_PKT_IN_RERR_HI/LO bit and
320  *    corresponding NPS_PKT_IN_RERR_*_ENA_* bit are bot set.
321  * @uns_err: Set when any NSP_PKT_UNS_RERR_HI/LO bit and
322  *    corresponding NPS_PKT_UNS_RERR_*_ENA_* bit are both set.
323  * @slc_er: Set when any NSP_PKT_SLC_RERR_HI/LO bit and
324  *    corresponding NPS_PKT_SLC_RERR_*_ENA_* bit are both set.
325  */
326 union nps_pkt_int {
327 	u64 value;
328 	struct {
329 #if defined(__BIG_ENDIAN_BITFIELD)
330 		u64 raz	: 54;
331 		u64 uns_wto : 1;
332 		u64 in_err : 1;
333 		u64 uns_err : 1;
334 		u64 slc_err : 1;
335 		u64 in_dbe : 1;
336 		u64 in_sbe : 1;
337 		u64 uns_dbe : 1;
338 		u64 uns_sbe : 1;
339 		u64 slc_dbe : 1;
340 		u64 slc_sbe : 1;
341 #else
342 		u64 slc_sbe : 1;
343 		u64 slc_dbe : 1;
344 		u64 uns_sbe : 1;
345 		u64 uns_dbe : 1;
346 		u64 in_sbe : 1;
347 		u64 in_dbe : 1;
348 		u64 slc_err : 1;
349 		u64 uns_err : 1;
350 		u64 in_err : 1;
351 		u64 uns_wto : 1;
352 		u64 raz	: 54;
353 #endif
354 	} s;
355 };
356 
357 /**
358  * struct nps_pkt_in_done_cnts - Input instruction ring counts registers
359  * @slc_cnt: Returns a 1 when:
360  *    NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT], or
361  *    NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SCL(i)_INT_LEVELS[TIMET]
362  *    To clear the bit, the CNTS register must be
363  *    written to clear the underlying condition
364  * @uns_int: Return a 1 when:
365  *    NPS_PKT_UNS(i)_CNTS[CNT] > NPS_PKT_UNS(i)_INT_LEVELS[CNT], or
366  *    NPS_PKT_UNS(i)_CNTS[TIMER] > NPS_PKT_UNS(i)_INT_LEVELS[TIMET]
367  *    To clear the bit, the CNTS register must be
368  *    written to clear the underlying condition
369  * @in_int: Returns a 1 when:
370  *    NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT]
371  *    To clear the bit, the DONE_CNTS register
372  *    must be written to clear the underlying condition
373  * @mbox_int: Returns a 1 when:
374  *    NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set.
375  *    To clear the bit, write NPS_PKT_MBOX_PF_VF(i)_INT[INTR]
376  *    with 1.
377  * @resend: A write of 1 will resend an MSI-X interrupt message if any
378  *    of the following conditions are true for this ring "i".
379  *    NPS_PKT_SLC(i)_CNTS[CNT] > NPS_PKT_SLC(i)_INT_LEVELS[CNT]
380  *    NPS_PKT_SLC(i)_CNTS[TIMER] > NPS_PKT_SLC(i)_INT_LEVELS[TIMET]
381  *    NPS_PKT_UNS(i)_CNTS[CNT] > NPS_PKT_UNS(i)_INT_LEVELS[CNT]
382  *    NPS_PKT_UNS(i)_CNTS[TIMER] > NPS_PKT_UNS(i)_INT_LEVELS[TIMET]
383  *    NPS_PKT_IN(i)_DONE_CNTS[CNT] > NPS_PKT_IN(i)_INT_LEVELS[CNT]
384  *    NPS_PKT_MBOX_PF_VF(i)_INT[INTR] is set
385  * @cnt: Packet counter. Hardware adds to [CNT] as it reads
386  *    packets. On a write to this CSR, hardware substracts the
387  *    amount written to the [CNT] field from [CNT], which will
388  *    clear PKT_IN(i)_INT_STATUS[INTR] if [CNT] becomes <=
389  *    NPS_PKT_IN(i)_INT_LEVELS[CNT]. This register should be
390  *    cleared before enabling a ring by reading the current
391  *    value and writing it back.
392  */
393 union nps_pkt_in_done_cnts {
394 	u64 value;
395 	struct {
396 #if defined(__BIG_ENDIAN_BITFIELD)
397 		u64 slc_int : 1;
398 		u64 uns_int : 1;
399 		u64 in_int : 1;
400 		u64 mbox_int : 1;
401 		u64 resend : 1;
402 		u64 raz : 27;
403 		u64 cnt	: 32;
404 #else
405 		u64 cnt	: 32;
406 		u64 raz	: 27;
407 		u64 resend : 1;
408 		u64 mbox_int : 1;
409 		u64 in_int : 1;
410 		u64 uns_int : 1;
411 		u64 slc_int : 1;
412 #endif
413 	} s;
414 };
415 
416 /**
417  * struct nps_pkt_in_instr_ctl - Input Instruction Ring Control Registers.
418  * @is64b: If 1, the ring uses 64-byte instructions. If 0, the
419  *   ring uses 32-byte instructions.
420  * @enb: Enable for the input ring.
421  */
422 union nps_pkt_in_instr_ctl {
423 	u64 value;
424 	struct {
425 #if (defined(__BIG_ENDIAN_BITFIELD))
426 		u64 raz	: 62;
427 		u64 is64b : 1;
428 		u64 enb	: 1;
429 #else
430 		u64 enb	: 1;
431 		u64 is64b : 1;
432 		u64 raz : 62;
433 #endif
434 	} s;
435 };
436 
437 /**
438  * struct nps_pkt_in_instr_rsize - Input instruction ring size registers
439  * @rsize: Ring size (number of instructions)
440  */
441 union nps_pkt_in_instr_rsize {
442 	u64 value;
443 	struct {
444 #if (defined(__BIG_ENDIAN_BITFIELD))
445 		u64 raz	: 32;
446 		u64 rsize : 32;
447 #else
448 		u64 rsize : 32;
449 		u64 raz	: 32;
450 #endif
451 	} s;
452 };
453 
454 /**
455  * struct nps_pkt_in_instr_baoff_dbell - Input instruction ring
456  *   base address offset and doorbell registers
457  * @aoff: Address offset. The offset from the NPS_PKT_IN_INSTR_BADDR
458  *   where the next pointer is read.
459  * @dbell: Pointer list doorbell count. Write operations to this field
460  *   increments the present value here. Read operations return the
461  *   present value.
462  */
463 union nps_pkt_in_instr_baoff_dbell {
464 	u64 value;
465 	struct {
466 #if (defined(__BIG_ENDIAN_BITFIELD))
467 		u64 aoff : 32;
468 		u64 dbell : 32;
469 #else
470 		u64 dbell : 32;
471 		u64 aoff : 32;
472 #endif
473 	} s;
474 };
475 
476 /**
477  * struct nps_core_int_ena_w1s - NPS core interrupt enable set register
478  * @host_nps_wr_err: Reads or sets enable for
479  *   NPS_CORE_INT[HOST_NPS_WR_ERR].
480  * @npco_dma_malform: Reads or sets enable for
481  *   NPS_CORE_INT[NPCO_DMA_MALFORM].
482  * @exec_wr_timeout: Reads or sets enable for
483  *   NPS_CORE_INT[EXEC_WR_TIMEOUT].
484  * @host_wr_timeout: Reads or sets enable for
485  *   NPS_CORE_INT[HOST_WR_TIMEOUT].
486  * @host_wr_err: Reads or sets enable for
487  *   NPS_CORE_INT[HOST_WR_ERR]
488  */
489 union nps_core_int_ena_w1s {
490 	u64 value;
491 	struct {
492 #if (defined(__BIG_ENDIAN_BITFIELD))
493 		u64 raz4 : 55;
494 		u64 host_nps_wr_err : 1;
495 		u64 npco_dma_malform : 1;
496 		u64 exec_wr_timeout : 1;
497 		u64 host_wr_timeout : 1;
498 		u64 host_wr_err : 1;
499 		u64 raz3 : 1;
500 		u64 raz2 : 1;
501 		u64 raz1 : 1;
502 		u64 raz0 : 1;
503 #else
504 		u64 raz0 : 1;
505 		u64 raz1 : 1;
506 		u64 raz2 : 1;
507 		u64 raz3 : 1;
508 		u64 host_wr_err	: 1;
509 		u64 host_wr_timeout : 1;
510 		u64 exec_wr_timeout : 1;
511 		u64 npco_dma_malform : 1;
512 		u64 host_nps_wr_err : 1;
513 		u64 raz4 : 55;
514 #endif
515 	} s;
516 };
517 
518 /**
519  * struct nps_core_gbl_vfcfg - Global VF Configuration Register.
520  * @ilk_disable: When set, this bit indicates that the ILK interface has
521  *    been disabled.
522  * @obaf: BMO allocation control
523  *    0 = allocate per queue
524  *    1 = allocate per VF
525  * @ibaf: BMI allocation control
526  *    0 = allocate per queue
527  *    1 = allocate per VF
528  * @zaf: ZIP allocation control
529  *    0 = allocate per queue
530  *    1 = allocate per VF
531  * @aeaf: AE allocation control
532  *    0 = allocate per queue
533  *    1 = allocate per VF
534  * @seaf: SE allocation control
535  *    0 = allocation per queue
536  *    1 = allocate per VF
537  * @cfg: VF/PF mode.
538  */
539 union nps_core_gbl_vfcfg {
540 	u64 value;
541 	struct {
542 #if (defined(__BIG_ENDIAN_BITFIELD))
543 		u64  raz :55;
544 		u64  ilk_disable :1;
545 		u64  obaf :1;
546 		u64  ibaf :1;
547 		u64  zaf :1;
548 		u64  aeaf :1;
549 		u64  seaf :1;
550 		u64  cfg :3;
551 #else
552 		u64  cfg :3;
553 		u64  seaf :1;
554 		u64  aeaf :1;
555 		u64  zaf :1;
556 		u64  ibaf :1;
557 		u64  obaf :1;
558 		u64  ilk_disable :1;
559 		u64  raz :55;
560 #endif
561 	} s;
562 };
563 
564 /**
565  * struct nps_core_int_active - NPS Core Interrupt Active Register
566  * @resend: Resend MSI-X interrupt if needs to handle interrupts
567  *    Sofware can set this bit and then exit the ISR.
568  * @ocla: Set when any OCLA(0)_INT and corresponding OCLA(0_INT_ENA_W1C
569  *    bit are set
570  * @mbox: Set when any NPS_PKT_MBOX_INT_LO/HI and corresponding
571  *    NPS_PKT_MBOX_INT_LO_ENA_W1C/HI_ENA_W1C bits are set
572  * @emu: bit i is set in [EMU] when any EMU(i)_INT bit is set
573  * @bmo: Set when any BMO_INT bit is set
574  * @bmi: Set when any BMI_INT bit is set or when any non-RO
575  *    BMI_INT and corresponding BMI_INT_ENA_W1C bits are both set
576  * @aqm: Set when any AQM_INT bit is set
577  * @zqm: Set when any ZQM_INT bit is set
578  * @efl: Set when any EFL_INT RO bit is set or when any non-RO EFL_INT
579  *    and corresponding EFL_INT_ENA_W1C bits are both set
580  * @ilk: Set when any ILK_INT bit is set
581  * @lbc: Set when any LBC_INT RO bit is set or when any non-RO LBC_INT
582  *    and corresponding LBC_INT_ENA_W1C bits are bot set
583  * @pem: Set when any PEM(0)_INT RO bit is set or when any non-RO
584  *    PEM(0)_INT and corresponding PEM(0)_INT_ENA_W1C bit are both set
585  * @ucd: Set when any UCD_INT bit is set
586  * @zctl: Set when any ZIP_INT RO bit is set or when any non-RO ZIP_INT
587  *    and corresponding ZIP_INT_ENA_W1C bits are both set
588  * @lbm: Set when any LBM_INT bit is set
589  * @nps_pkt: Set when any NPS_PKT_INT bit is set
590  * @nps_core: Set when any NPS_CORE_INT RO bit is set or when non-RO
591  *    NPS_CORE_INT and corresponding NSP_CORE_INT_ENA_W1C bits are both set
592  */
593 union nps_core_int_active {
594 	u64 value;
595 	struct {
596 #if (defined(__BIG_ENDIAN_BITFIELD))
597 		u64 resend : 1;
598 		u64 raz	: 43;
599 		u64 ocla : 1;
600 		u64 mbox : 1;
601 		u64 emu	: 4;
602 		u64 bmo	: 1;
603 		u64 bmi	: 1;
604 		u64 aqm	: 1;
605 		u64 zqm	: 1;
606 		u64 efl	: 1;
607 		u64 ilk	: 1;
608 		u64 lbc	: 1;
609 		u64 pem	: 1;
610 		u64 pom	: 1;
611 		u64 ucd	: 1;
612 		u64 zctl : 1;
613 		u64 lbm	: 1;
614 		u64 nps_pkt : 1;
615 		u64 nps_core : 1;
616 #else
617 		u64 nps_core : 1;
618 		u64 nps_pkt : 1;
619 		u64 lbm	: 1;
620 		u64 zctl: 1;
621 		u64 ucd	: 1;
622 		u64 pom	: 1;
623 		u64 pem	: 1;
624 		u64 lbc	: 1;
625 		u64 ilk	: 1;
626 		u64 efl	: 1;
627 		u64 zqm	: 1;
628 		u64 aqm	: 1;
629 		u64 bmi	: 1;
630 		u64 bmo	: 1;
631 		u64 emu	: 4;
632 		u64 mbox : 1;
633 		u64 ocla : 1;
634 		u64 raz	: 43;
635 		u64 resend : 1;
636 #endif
637 	} s;
638 };
639 
640 /**
641  * struct efl_core_int - EFL Interrupt Registers
642  * @epci_decode_err: EPCI decoded a transacation that was unknown
643  *    This error should only occurred when there is a micrcode/SE error
644  *    and should be considered fatal
645  * @ae_err: An AE uncorrectable error occurred.
646  *    See EFL_CORE(0..3)_AE_ERR_INT
647  * @se_err: An SE uncorrectable error occurred.
648  *    See EFL_CORE(0..3)_SE_ERR_INT
649  * @dbe: Double-bit error occurred in EFL
650  * @sbe: Single-bit error occurred in EFL
651  * @d_left: Asserted when new POM-Header-BMI-data is
652  *    being sent to an Exec, and that Exec has Not read all BMI
653  *    data associated with the previous POM header
654  * @len_ovr: Asserted when an Exec-Read is issued that is more than
655  *    14 greater in length that the BMI data left to be read
656  */
657 union efl_core_int {
658 	u64 value;
659 	struct {
660 #if (defined(__BIG_ENDIAN_BITFIELD))
661 		u64 raz	: 57;
662 		u64 epci_decode_err : 1;
663 		u64 ae_err : 1;
664 		u64 se_err : 1;
665 		u64 dbe	: 1;
666 		u64 sbe	: 1;
667 		u64 d_left : 1;
668 		u64 len_ovr : 1;
669 #else
670 		u64 len_ovr : 1;
671 		u64 d_left : 1;
672 		u64 sbe	: 1;
673 		u64 dbe	: 1;
674 		u64 se_err : 1;
675 		u64 ae_err : 1;
676 		u64 epci_decode_err  : 1;
677 		u64 raz	: 57;
678 #endif
679 	} s;
680 };
681 
682 /**
683  * struct efl_core_int_ena_w1s - EFL core interrupt enable set register
684  * @epci_decode_err: Reads or sets enable for
685  *   EFL_CORE(0..3)_INT[EPCI_DECODE_ERR].
686  * @d_left: Reads or sets enable for
687  *   EFL_CORE(0..3)_INT[D_LEFT].
688  * @len_ovr: Reads or sets enable for
689  *   EFL_CORE(0..3)_INT[LEN_OVR].
690  */
691 union efl_core_int_ena_w1s {
692 	u64 value;
693 	struct {
694 #if (defined(__BIG_ENDIAN_BITFIELD))
695 		u64 raz_7_63 : 57;
696 		u64 epci_decode_err : 1;
697 		u64 raz_2_5 : 4;
698 		u64 d_left : 1;
699 		u64 len_ovr : 1;
700 #else
701 		u64 len_ovr : 1;
702 		u64 d_left : 1;
703 		u64 raz_2_5 : 4;
704 		u64 epci_decode_err : 1;
705 		u64 raz_7_63 : 57;
706 #endif
707 	} s;
708 };
709 
710 /**
711  * struct efl_rnm_ctl_status - RNM Control and Status Register
712  * @ent_sel: Select input to RNM FIFO
713  * @exp_ent: Exported entropy enable for random number generator
714  * @rng_rst: Reset to RNG. Setting this bit to 1 cancels the generation
715  *    of the current random number.
716  * @rnm_rst: Reset the RNM. Setting this bit to 1 clears all sorted numbers
717  *    in the random number memory.
718  * @rng_en: Enabled the output of the RNG.
719  * @ent_en: Entropy enable for random number generator.
720  */
721 union efl_rnm_ctl_status {
722 	u64 value;
723 	struct {
724 #if (defined(__BIG_ENDIAN_BITFIELD))
725 		u64 raz_9_63 : 55;
726 		u64 ent_sel : 4;
727 		u64 exp_ent : 1;
728 		u64 rng_rst : 1;
729 		u64 rnm_rst : 1;
730 		u64 rng_en : 1;
731 		u64 ent_en : 1;
732 #else
733 		u64 ent_en : 1;
734 		u64 rng_en : 1;
735 		u64 rnm_rst : 1;
736 		u64 rng_rst : 1;
737 		u64 exp_ent : 1;
738 		u64 ent_sel : 4;
739 		u64 raz_9_63 : 55;
740 #endif
741 	} s;
742 };
743 
744 /**
745  * struct bmi_ctl - BMI control register
746  * @ilk_hdrq_thrsh: Maximum number of header queue locations
747  *   that ILK packets may consume. When the threshold is
748  *   exceeded ILK_XOFF is sent to the BMI_X2P_ARB.
749  * @nps_hdrq_thrsh: Maximum number of header queue locations
750  *   that NPS packets may consume. When the threshold is
751  *   exceeded NPS_XOFF is sent to the BMI_X2P_ARB.
752  * @totl_hdrq_thrsh: Maximum number of header queue locations
753  *   that the sum of ILK and NPS packets may consume.
754  * @ilk_free_thrsh: Maximum number of buffers that ILK packet
755  *   flows may consume before ILK_XOFF is sent to the BMI_X2P_ARB.
756  * @nps_free_thrsh: Maximum number of buffers that NPS packet
757  *   flows may consume before NPS XOFF is sent to the BMI_X2p_ARB.
758  * @totl_free_thrsh: Maximum number of buffers that bot ILK and NPS
759  *   packet flows may consume before both NPS_XOFF and ILK_XOFF
760  *   are asserted to the BMI_X2P_ARB.
761  * @max_pkt_len: Maximum packet length, integral number of 256B
762  *   buffers.
763  */
764 union bmi_ctl {
765 	u64 value;
766 	struct {
767 #if (defined(__BIG_ENDIAN_BITFIELD))
768 		u64 raz_56_63 : 8;
769 		u64 ilk_hdrq_thrsh : 8;
770 		u64 nps_hdrq_thrsh : 8;
771 		u64 totl_hdrq_thrsh : 8;
772 		u64 ilk_free_thrsh : 8;
773 		u64 nps_free_thrsh : 8;
774 		u64 totl_free_thrsh : 8;
775 		u64 max_pkt_len : 8;
776 #else
777 		u64 max_pkt_len : 8;
778 		u64 totl_free_thrsh : 8;
779 		u64 nps_free_thrsh : 8;
780 		u64 ilk_free_thrsh : 8;
781 		u64 totl_hdrq_thrsh : 8;
782 		u64 nps_hdrq_thrsh : 8;
783 		u64 ilk_hdrq_thrsh : 8;
784 		u64 raz_56_63 : 8;
785 #endif
786 	} s;
787 };
788 
789 /**
790  * struct bmi_int_ena_w1s - BMI interrupt enable set register
791  * @ilk_req_oflw: Reads or sets enable for
792  *   BMI_INT[ILK_REQ_OFLW].
793  * @nps_req_oflw: Reads or sets enable for
794  *   BMI_INT[NPS_REQ_OFLW].
795  * @fpf_undrrn: Reads or sets enable for
796  *   BMI_INT[FPF_UNDRRN].
797  * @eop_err_ilk: Reads or sets enable for
798  *   BMI_INT[EOP_ERR_ILK].
799  * @eop_err_nps: Reads or sets enable for
800  *   BMI_INT[EOP_ERR_NPS].
801  * @sop_err_ilk: Reads or sets enable for
802  *   BMI_INT[SOP_ERR_ILK].
803  * @sop_err_nps: Reads or sets enable for
804  *   BMI_INT[SOP_ERR_NPS].
805  * @pkt_rcv_err_ilk: Reads or sets enable for
806  *   BMI_INT[PKT_RCV_ERR_ILK].
807  * @pkt_rcv_err_nps: Reads or sets enable for
808  *   BMI_INT[PKT_RCV_ERR_NPS].
809  * @max_len_err_ilk: Reads or sets enable for
810  *   BMI_INT[MAX_LEN_ERR_ILK].
811  * @max_len_err_nps: Reads or sets enable for
812  *   BMI_INT[MAX_LEN_ERR_NPS].
813  */
814 union bmi_int_ena_w1s {
815 	u64 value;
816 	struct {
817 #if (defined(__BIG_ENDIAN_BITFIELD))
818 		u64 raz_13_63	: 51;
819 		u64 ilk_req_oflw : 1;
820 		u64 nps_req_oflw : 1;
821 		u64 raz_10 : 1;
822 		u64 raz_9 : 1;
823 		u64 fpf_undrrn	: 1;
824 		u64 eop_err_ilk	: 1;
825 		u64 eop_err_nps	: 1;
826 		u64 sop_err_ilk	: 1;
827 		u64 sop_err_nps	: 1;
828 		u64 pkt_rcv_err_ilk : 1;
829 		u64 pkt_rcv_err_nps : 1;
830 		u64 max_len_err_ilk : 1;
831 		u64 max_len_err_nps : 1;
832 #else
833 		u64 max_len_err_nps : 1;
834 		u64 max_len_err_ilk : 1;
835 		u64 pkt_rcv_err_nps : 1;
836 		u64 pkt_rcv_err_ilk : 1;
837 		u64 sop_err_nps	: 1;
838 		u64 sop_err_ilk	: 1;
839 		u64 eop_err_nps	: 1;
840 		u64 eop_err_ilk	: 1;
841 		u64 fpf_undrrn	: 1;
842 		u64 raz_9 : 1;
843 		u64 raz_10 : 1;
844 		u64 nps_req_oflw : 1;
845 		u64 ilk_req_oflw : 1;
846 		u64 raz_13_63 : 51;
847 #endif
848 	} s;
849 };
850 
851 /**
852  * struct bmo_ctl2 - BMO Control2 Register
853  * @arb_sel: Determines P2X Arbitration
854  * @ilk_buf_thrsh: Maximum number of buffers that the
855  *    ILK packet flows may consume before ILK XOFF is
856  *    asserted to the POM.
857  * @nps_slc_buf_thrsh: Maximum number of buffers that the
858  *    NPS_SLC packet flow may consume before NPS_SLC XOFF is
859  *    asserted to the POM.
860  * @nps_uns_buf_thrsh: Maximum number of buffers that the
861  *    NPS_UNS packet flow may consume before NPS_UNS XOFF is
862  *    asserted to the POM.
863  * @totl_buf_thrsh: Maximum number of buffers that ILK, NPS_UNS and
864  *    NPS_SLC packet flows may consume before NPS_UNS XOFF, NSP_SLC and
865  *    ILK_XOFF are all asserted POM.
866  */
867 union bmo_ctl2 {
868 	u64 value;
869 	struct {
870 #if (defined(__BIG_ENDIAN_BITFIELD))
871 		u64 arb_sel : 1;
872 		u64 raz_32_62 : 31;
873 		u64 ilk_buf_thrsh : 8;
874 		u64 nps_slc_buf_thrsh : 8;
875 		u64 nps_uns_buf_thrsh : 8;
876 		u64 totl_buf_thrsh : 8;
877 #else
878 		u64 totl_buf_thrsh : 8;
879 		u64 nps_uns_buf_thrsh : 8;
880 		u64 nps_slc_buf_thrsh : 8;
881 		u64 ilk_buf_thrsh : 8;
882 		u64 raz_32_62 : 31;
883 		u64 arb_sel : 1;
884 #endif
885 	} s;
886 };
887 
888 /**
889  * struct pom_int_ena_w1s - POM interrupt enable set register
890  * @illegal_intf: Reads or sets enable for POM_INT[ILLEGAL_INTF].
891  * @illegal_dport: Reads or sets enable for POM_INT[ILLEGAL_DPORT].
892  */
893 union pom_int_ena_w1s {
894 	u64 value;
895 	struct {
896 #if (defined(__BIG_ENDIAN_BITFIELD))
897 		u64 raz2 : 60;
898 		u64 illegal_intf : 1;
899 		u64 illegal_dport : 1;
900 		u64 raz1 : 1;
901 		u64 raz0 : 1;
902 #else
903 		u64 raz0 : 1;
904 		u64 raz1 : 1;
905 		u64 illegal_dport : 1;
906 		u64 illegal_intf : 1;
907 		u64 raz2 : 60;
908 #endif
909 	} s;
910 };
911 
912 /**
913  * struct lbc_inval_ctl - LBC invalidation control register
914  * @wait_timer: Wait timer for wait state. [WAIT_TIMER] must
915  *   always be written with its reset value.
916  * @cam_inval_start: Software should write [CAM_INVAL_START]=1
917  *   to initiate an LBC cache invalidation. After this, software
918  *   should read LBC_INVAL_STATUS until LBC_INVAL_STATUS[DONE] is set.
919  *   LBC hardware clears [CAVM_INVAL_START] before software can
920  *   observed LBC_INVAL_STATUS[DONE] to be set
921  */
922 union lbc_inval_ctl {
923 	u64 value;
924 	struct {
925 #if (defined(__BIG_ENDIAN_BITFIELD))
926 		u64 raz2 : 48;
927 		u64 wait_timer : 8;
928 		u64 raz1 : 6;
929 		u64 cam_inval_start : 1;
930 		u64 raz0 : 1;
931 #else
932 		u64 raz0 : 1;
933 		u64 cam_inval_start : 1;
934 		u64 raz1 : 6;
935 		u64 wait_timer : 8;
936 		u64 raz2 : 48;
937 #endif
938 	} s;
939 };
940 
941 /**
942  * struct lbc_int_ena_w1s - LBC interrupt enable set register
943  * @cam_hard_err: Reads or sets enable for LBC_INT[CAM_HARD_ERR].
944  * @cam_inval_abort: Reads or sets enable for LBC_INT[CAM_INVAL_ABORT].
945  * @over_fetch_err: Reads or sets enable for LBC_INT[OVER_FETCH_ERR].
946  * @cache_line_to_err: Reads or sets enable for
947  *   LBC_INT[CACHE_LINE_TO_ERR].
948  * @cam_soft_err: Reads or sets enable for
949  *   LBC_INT[CAM_SOFT_ERR].
950  * @dma_rd_err: Reads or sets enable for
951  *   LBC_INT[DMA_RD_ERR].
952  */
953 union lbc_int_ena_w1s {
954 	u64 value;
955 	struct {
956 #if (defined(__BIG_ENDIAN_BITFIELD))
957 		u64 raz_10_63 : 54;
958 		u64 cam_hard_err : 1;
959 		u64 cam_inval_abort : 1;
960 		u64 over_fetch_err : 1;
961 		u64 cache_line_to_err : 1;
962 		u64 raz_2_5 : 4;
963 		u64 cam_soft_err : 1;
964 		u64 dma_rd_err : 1;
965 #else
966 		u64 dma_rd_err : 1;
967 		u64 cam_soft_err : 1;
968 		u64 raz_2_5 : 4;
969 		u64 cache_line_to_err : 1;
970 		u64 over_fetch_err : 1;
971 		u64 cam_inval_abort : 1;
972 		u64 cam_hard_err : 1;
973 		u64 raz_10_63 : 54;
974 #endif
975 	} s;
976 };
977 
978 /**
979  * struct lbc_int - LBC interrupt summary register
980  * @cam_hard_err: indicates a fatal hardware error.
981  *   It requires system reset.
982  *   When [CAM_HARD_ERR] is set, LBC stops logging any new information in
983  *   LBC_POM_MISS_INFO_LOG,
984  *   LBC_POM_MISS_ADDR_LOG,
985  *   LBC_EFL_MISS_INFO_LOG, and
986  *   LBC_EFL_MISS_ADDR_LOG.
987  *   Software should sample them.
988  * @cam_inval_abort: indicates a fatal hardware error.
989  *   System reset is required.
990  * @over_fetch_err: indicates a fatal hardware error
991  *   System reset is required
992  * @cache_line_to_err: is a debug feature.
993  *   This timeout interrupt bit tells the software that
994  *   a cacheline in LBC has non-zero usage and the context
995  *   has not been used for greater than the
996  *   LBC_TO_CNT[TO_CNT] time interval.
997  * @sbe: Memory SBE error. This is recoverable via ECC.
998  *   See LBC_ECC_INT for more details.
999  * @dbe: Memory DBE error. This is a fatal and requires a
1000  *   system reset.
1001  * @pref_dat_len_mismatch_err: Summary bit for context length
1002  *   mismatch errors.
1003  * @rd_dat_len_mismatch_err: Summary bit for SE read data length
1004  *   greater than data prefect length errors.
1005  * @cam_soft_err: is recoverable. Software must complete a
1006  *   LBC_INVAL_CTL[CAM_INVAL_START] invalidation sequence and
1007  *   then clear [CAM_SOFT_ERR].
1008  * @dma_rd_err: A context prefect read of host memory returned with
1009  *   a read error.
1010  */
1011 union lbc_int {
1012 	u64 value;
1013 	struct {
1014 #if (defined(__BIG_ENDIAN_BITFIELD))
1015 		u64 raz_10_63 : 54;
1016 		u64 cam_hard_err : 1;
1017 		u64 cam_inval_abort : 1;
1018 		u64 over_fetch_err : 1;
1019 		u64 cache_line_to_err : 1;
1020 		u64 sbe : 1;
1021 		u64 dbe	: 1;
1022 		u64 pref_dat_len_mismatch_err : 1;
1023 		u64 rd_dat_len_mismatch_err : 1;
1024 		u64 cam_soft_err : 1;
1025 		u64 dma_rd_err : 1;
1026 #else
1027 		u64 dma_rd_err : 1;
1028 		u64 cam_soft_err : 1;
1029 		u64 rd_dat_len_mismatch_err : 1;
1030 		u64 pref_dat_len_mismatch_err : 1;
1031 		u64 dbe	: 1;
1032 		u64 sbe	: 1;
1033 		u64 cache_line_to_err : 1;
1034 		u64 over_fetch_err : 1;
1035 		u64 cam_inval_abort : 1;
1036 		u64 cam_hard_err : 1;
1037 		u64 raz_10_63 : 54;
1038 #endif
1039 	} s;
1040 };
1041 
1042 /**
1043  * struct lbc_inval_status: LBC Invalidation status register
1044  * @cam_clean_entry_complete_cnt: The number of entries that are
1045  *   cleaned up successfully.
1046  * @cam_clean_entry_cnt: The number of entries that have the CAM
1047  *   inval command issued.
1048  * @cam_inval_state: cam invalidation FSM state
1049  * @cam_inval_abort: cam invalidation abort
1050  * @cam_rst_rdy: lbc_cam reset ready
1051  * @done: LBC clears [DONE] when
1052  *   LBC_INVAL_CTL[CAM_INVAL_START] is written with a one,
1053  *   and sets [DONE] when it completes the invalidation
1054  *   sequence.
1055  */
1056 union lbc_inval_status {
1057 	u64 value;
1058 	struct {
1059 #if (defined(__BIG_ENDIAN_BITFIELD))
1060 		u64 raz3 : 23;
1061 		u64 cam_clean_entry_complete_cnt : 9;
1062 		u64 raz2 : 7;
1063 		u64 cam_clean_entry_cnt : 9;
1064 		u64 raz1 : 5;
1065 		u64 cam_inval_state : 3;
1066 		u64 raz0 : 5;
1067 		u64 cam_inval_abort : 1;
1068 		u64 cam_rst_rdy	: 1;
1069 		u64 done : 1;
1070 #else
1071 		u64 done : 1;
1072 		u64 cam_rst_rdy : 1;
1073 		u64 cam_inval_abort : 1;
1074 		u64 raz0 : 5;
1075 		u64 cam_inval_state : 3;
1076 		u64 raz1 : 5;
1077 		u64 cam_clean_entry_cnt : 9;
1078 		u64 raz2 : 7;
1079 		u64 cam_clean_entry_complete_cnt : 9;
1080 		u64 raz3 : 23;
1081 #endif
1082 	} s;
1083 };
1084 
1085 #endif /* __NITROX_CSR_H */
1086