1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/delay.h>
3 
4 #include "nitrox_dev.h"
5 #include "nitrox_csr.h"
6 
7 /**
8  * emu_enable_cores - Enable EMU cluster cores.
9  * @ndev: N5 device
10  */
11 static void emu_enable_cores(struct nitrox_device *ndev)
12 {
13 	union emu_se_enable emu_se;
14 	union emu_ae_enable emu_ae;
15 	int i;
16 
17 	/* AE cores 20 per cluster */
18 	emu_ae.value = 0;
19 	emu_ae.s.enable = 0xfffff;
20 
21 	/* SE cores 16 per cluster */
22 	emu_se.value = 0;
23 	emu_se.s.enable = 0xffff;
24 
25 	/* enable per cluster cores */
26 	for (i = 0; i < NR_CLUSTERS; i++) {
27 		nitrox_write_csr(ndev, EMU_AE_ENABLEX(i), emu_ae.value);
28 		nitrox_write_csr(ndev, EMU_SE_ENABLEX(i), emu_se.value);
29 	}
30 }
31 
32 /**
33  * nitrox_config_emu_unit - configure EMU unit.
34  * @ndev: N5 device
35  */
36 void nitrox_config_emu_unit(struct nitrox_device *ndev)
37 {
38 	union emu_wd_int_ena_w1s emu_wd_int;
39 	union emu_ge_int_ena_w1s emu_ge_int;
40 	u64 offset;
41 	int i;
42 
43 	/* enable cores */
44 	emu_enable_cores(ndev);
45 
46 	/* enable general error and watch dog interrupts */
47 	emu_ge_int.value = 0;
48 	emu_ge_int.s.se_ge = 0xffff;
49 	emu_ge_int.s.ae_ge = 0xfffff;
50 	emu_wd_int.value = 0;
51 	emu_wd_int.s.se_wd = 1;
52 
53 	for (i = 0; i < NR_CLUSTERS; i++) {
54 		offset = EMU_WD_INT_ENA_W1SX(i);
55 		nitrox_write_csr(ndev, offset, emu_wd_int.value);
56 		offset = EMU_GE_INT_ENA_W1SX(i);
57 		nitrox_write_csr(ndev, offset, emu_ge_int.value);
58 	}
59 }
60 
61 static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring)
62 {
63 	union nps_pkt_in_instr_ctl pkt_in_ctl;
64 	union nps_pkt_in_instr_baoff_dbell pkt_in_dbell;
65 	union nps_pkt_in_done_cnts pkt_in_cnts;
66 	u64 offset;
67 
68 	offset = NPS_PKT_IN_INSTR_CTLX(ring);
69 	/* disable the ring */
70 	pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
71 	pkt_in_ctl.s.enb = 0;
72 	nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
73 	usleep_range(100, 150);
74 
75 	/* wait to clear [ENB] */
76 	do {
77 		pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
78 	} while (pkt_in_ctl.s.enb);
79 
80 	/* clear off door bell counts */
81 	offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring);
82 	pkt_in_dbell.value = 0;
83 	pkt_in_dbell.s.dbell = 0xffffffff;
84 	nitrox_write_csr(ndev, offset, pkt_in_dbell.value);
85 
86 	/* clear done counts */
87 	offset = NPS_PKT_IN_DONE_CNTSX(ring);
88 	pkt_in_cnts.value = nitrox_read_csr(ndev, offset);
89 	nitrox_write_csr(ndev, offset, pkt_in_cnts.value);
90 	usleep_range(50, 100);
91 }
92 
93 void enable_pkt_input_ring(struct nitrox_device *ndev, int ring)
94 {
95 	union nps_pkt_in_instr_ctl pkt_in_ctl;
96 	u64 offset;
97 
98 	/* 64-byte instruction size */
99 	offset = NPS_PKT_IN_INSTR_CTLX(ring);
100 	pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
101 	pkt_in_ctl.s.is64b = 1;
102 	pkt_in_ctl.s.enb = 1;
103 	nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
104 
105 	/* wait for set [ENB] */
106 	do {
107 		pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
108 	} while (!pkt_in_ctl.s.enb);
109 }
110 
111 /**
112  * nitrox_config_pkt_input_rings - configure Packet Input Rings
113  * @ndev: N5 device
114  */
115 void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
116 {
117 	int i;
118 
119 	for (i = 0; i < ndev->nr_queues; i++) {
120 		struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
121 		union nps_pkt_in_instr_rsize pkt_in_rsize;
122 		u64 offset;
123 
124 		reset_pkt_input_ring(ndev, i);
125 
126 		/* configure ring base address 16-byte aligned,
127 		 * size and interrupt threshold.
128 		 */
129 		offset = NPS_PKT_IN_INSTR_BADDRX(i);
130 		nitrox_write_csr(ndev, offset, cmdq->dma);
131 
132 		/* configure ring size */
133 		offset = NPS_PKT_IN_INSTR_RSIZEX(i);
134 		pkt_in_rsize.value = 0;
135 		pkt_in_rsize.s.rsize = ndev->qlen;
136 		nitrox_write_csr(ndev, offset, pkt_in_rsize.value);
137 
138 		/* set high threshold for pkt input ring interrupts */
139 		offset = NPS_PKT_IN_INT_LEVELSX(i);
140 		nitrox_write_csr(ndev, offset, 0xffffffff);
141 
142 		enable_pkt_input_ring(ndev, i);
143 	}
144 }
145 
146 static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port)
147 {
148 	union nps_pkt_slc_ctl pkt_slc_ctl;
149 	union nps_pkt_slc_cnts pkt_slc_cnts;
150 	u64 offset;
151 
152 	/* disable slc port */
153 	offset = NPS_PKT_SLC_CTLX(port);
154 	pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
155 	pkt_slc_ctl.s.enb = 0;
156 	nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
157 	usleep_range(100, 150);
158 
159 	/* wait to clear [ENB] */
160 	do {
161 		pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
162 	} while (pkt_slc_ctl.s.enb);
163 
164 	/* clear slc counters */
165 	offset = NPS_PKT_SLC_CNTSX(port);
166 	pkt_slc_cnts.value = nitrox_read_csr(ndev, offset);
167 	nitrox_write_csr(ndev, offset, pkt_slc_cnts.value);
168 	usleep_range(50, 100);
169 }
170 
171 void enable_pkt_solicit_port(struct nitrox_device *ndev, int port)
172 {
173 	union nps_pkt_slc_ctl pkt_slc_ctl;
174 	u64 offset;
175 
176 	offset = NPS_PKT_SLC_CTLX(port);
177 	pkt_slc_ctl.value = 0;
178 	pkt_slc_ctl.s.enb = 1;
179 
180 	/*
181 	 * 8 trailing 0x00 bytes will be added
182 	 * to the end of the outgoing packet.
183 	 */
184 	pkt_slc_ctl.s.z = 1;
185 	/* enable response header */
186 	pkt_slc_ctl.s.rh = 1;
187 	nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
188 
189 	/* wait to set [ENB] */
190 	do {
191 		pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
192 	} while (!pkt_slc_ctl.s.enb);
193 }
194 
195 static void config_single_pkt_solicit_port(struct nitrox_device *ndev,
196 					   int port)
197 {
198 	union nps_pkt_slc_int_levels pkt_slc_int;
199 	u64 offset;
200 
201 	reset_pkt_solicit_port(ndev, port);
202 
203 	offset = NPS_PKT_SLC_INT_LEVELSX(port);
204 	pkt_slc_int.value = 0;
205 	/* time interrupt threshold */
206 	pkt_slc_int.s.timet = 0x3fffff;
207 	nitrox_write_csr(ndev, offset, pkt_slc_int.value);
208 
209 	enable_pkt_solicit_port(ndev, port);
210 }
211 
212 void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev)
213 {
214 	int i;
215 
216 	for (i = 0; i < ndev->nr_queues; i++)
217 		config_single_pkt_solicit_port(ndev, i);
218 }
219 
220 /**
221  * enable_nps_interrupts - enable NPS interrutps
222  * @ndev: N5 device.
223  *
224  * This includes NPS core, packet in and slc interrupts.
225  */
226 static void enable_nps_interrupts(struct nitrox_device *ndev)
227 {
228 	union nps_core_int_ena_w1s core_int;
229 
230 	/* NPS core interrutps */
231 	core_int.value = 0;
232 	core_int.s.host_wr_err = 1;
233 	core_int.s.host_wr_timeout = 1;
234 	core_int.s.exec_wr_timeout = 1;
235 	core_int.s.npco_dma_malform = 1;
236 	core_int.s.host_nps_wr_err = 1;
237 	nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value);
238 
239 	/* NPS packet in ring interrupts */
240 	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL));
241 	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL));
242 	nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL));
243 	/* NPS packet slc port interrupts */
244 	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL));
245 	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL));
246 	nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL));
247 }
248 
249 void nitrox_config_nps_unit(struct nitrox_device *ndev)
250 {
251 	union nps_core_gbl_vfcfg core_gbl_vfcfg;
252 
253 	/* endian control information */
254 	nitrox_write_csr(ndev, NPS_CORE_CONTROL, 1ULL);
255 
256 	/* disable ILK interface */
257 	core_gbl_vfcfg.value = 0;
258 	core_gbl_vfcfg.s.ilk_disable = 1;
259 	core_gbl_vfcfg.s.cfg = PF_MODE;
260 	nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
261 	/* config input and solicit ports */
262 	nitrox_config_pkt_input_rings(ndev);
263 	nitrox_config_pkt_solicit_ports(ndev);
264 
265 	/* enable interrupts */
266 	enable_nps_interrupts(ndev);
267 }
268 
269 void nitrox_config_pom_unit(struct nitrox_device *ndev)
270 {
271 	union pom_int_ena_w1s pom_int;
272 	int i;
273 
274 	/* enable pom interrupts */
275 	pom_int.value = 0;
276 	pom_int.s.illegal_dport = 1;
277 	nitrox_write_csr(ndev, POM_INT_ENA_W1S, pom_int.value);
278 
279 	/* enable perf counters */
280 	for (i = 0; i < ndev->hw.se_cores; i++)
281 		nitrox_write_csr(ndev, POM_PERF_CTL, BIT_ULL(i));
282 }
283 
284 /**
285  * nitrox_config_rand_unit - enable N5 random number unit
286  * @ndev: N5 device
287  */
288 void nitrox_config_rand_unit(struct nitrox_device *ndev)
289 {
290 	union efl_rnm_ctl_status efl_rnm_ctl;
291 	u64 offset;
292 
293 	offset = EFL_RNM_CTL_STATUS;
294 	efl_rnm_ctl.value = nitrox_read_csr(ndev, offset);
295 	efl_rnm_ctl.s.ent_en = 1;
296 	efl_rnm_ctl.s.rng_en = 1;
297 	nitrox_write_csr(ndev, offset, efl_rnm_ctl.value);
298 }
299 
300 void nitrox_config_efl_unit(struct nitrox_device *ndev)
301 {
302 	int i;
303 
304 	for (i = 0; i < NR_CLUSTERS; i++) {
305 		union efl_core_int_ena_w1s efl_core_int;
306 		u64 offset;
307 
308 		/* EFL core interrupts */
309 		offset = EFL_CORE_INT_ENA_W1SX(i);
310 		efl_core_int.value = 0;
311 		efl_core_int.s.len_ovr = 1;
312 		efl_core_int.s.d_left = 1;
313 		efl_core_int.s.epci_decode_err = 1;
314 		nitrox_write_csr(ndev, offset, efl_core_int.value);
315 
316 		offset = EFL_CORE_VF_ERR_INT0_ENA_W1SX(i);
317 		nitrox_write_csr(ndev, offset, (~0ULL));
318 		offset = EFL_CORE_VF_ERR_INT1_ENA_W1SX(i);
319 		nitrox_write_csr(ndev, offset, (~0ULL));
320 	}
321 }
322 
323 void nitrox_config_bmi_unit(struct nitrox_device *ndev)
324 {
325 	union bmi_ctl bmi_ctl;
326 	union bmi_int_ena_w1s bmi_int_ena;
327 	u64 offset;
328 
329 	/* no threshold limits for PCIe */
330 	offset = BMI_CTL;
331 	bmi_ctl.value = nitrox_read_csr(ndev, offset);
332 	bmi_ctl.s.max_pkt_len = 0xff;
333 	bmi_ctl.s.nps_free_thrsh = 0xff;
334 	bmi_ctl.s.nps_hdrq_thrsh = 0x7a;
335 	nitrox_write_csr(ndev, offset, bmi_ctl.value);
336 
337 	/* enable interrupts */
338 	offset = BMI_INT_ENA_W1S;
339 	bmi_int_ena.value = 0;
340 	bmi_int_ena.s.max_len_err_nps = 1;
341 	bmi_int_ena.s.pkt_rcv_err_nps = 1;
342 	bmi_int_ena.s.fpf_undrrn = 1;
343 	nitrox_write_csr(ndev, offset, bmi_int_ena.value);
344 }
345 
346 void nitrox_config_bmo_unit(struct nitrox_device *ndev)
347 {
348 	union bmo_ctl2 bmo_ctl2;
349 	u64 offset;
350 
351 	/* no threshold limits for PCIe */
352 	offset = BMO_CTL2;
353 	bmo_ctl2.value = nitrox_read_csr(ndev, offset);
354 	bmo_ctl2.s.nps_slc_buf_thrsh = 0xff;
355 	nitrox_write_csr(ndev, offset, bmo_ctl2.value);
356 }
357 
358 void invalidate_lbc(struct nitrox_device *ndev)
359 {
360 	union lbc_inval_ctl lbc_ctl;
361 	union lbc_inval_status lbc_stat;
362 	u64 offset;
363 
364 	/* invalidate LBC */
365 	offset = LBC_INVAL_CTL;
366 	lbc_ctl.value = nitrox_read_csr(ndev, offset);
367 	lbc_ctl.s.cam_inval_start = 1;
368 	nitrox_write_csr(ndev, offset, lbc_ctl.value);
369 
370 	offset = LBC_INVAL_STATUS;
371 
372 	do {
373 		lbc_stat.value = nitrox_read_csr(ndev, offset);
374 	} while (!lbc_stat.s.done);
375 }
376 
377 void nitrox_config_lbc_unit(struct nitrox_device *ndev)
378 {
379 	union lbc_int_ena_w1s lbc_int_ena;
380 	u64 offset;
381 
382 	invalidate_lbc(ndev);
383 
384 	/* enable interrupts */
385 	offset = LBC_INT_ENA_W1S;
386 	lbc_int_ena.value = 0;
387 	lbc_int_ena.s.dma_rd_err = 1;
388 	lbc_int_ena.s.over_fetch_err = 1;
389 	lbc_int_ena.s.cam_inval_abort = 1;
390 	lbc_int_ena.s.cam_hard_err = 1;
391 	nitrox_write_csr(ndev, offset, lbc_int_ena.value);
392 
393 	offset = LBC_PLM_VF1_64_INT_ENA_W1S;
394 	nitrox_write_csr(ndev, offset, (~0ULL));
395 	offset = LBC_PLM_VF65_128_INT_ENA_W1S;
396 	nitrox_write_csr(ndev, offset, (~0ULL));
397 
398 	offset = LBC_ELM_VF1_64_INT_ENA_W1S;
399 	nitrox_write_csr(ndev, offset, (~0ULL));
400 	offset = LBC_ELM_VF65_128_INT_ENA_W1S;
401 	nitrox_write_csr(ndev, offset, (~0ULL));
402 }
403