1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/delay.h>
3 
4 #include "nitrox_dev.h"
5 #include "nitrox_csr.h"
6 
7 #define PLL_REF_CLK 50
8 
9 /**
10  * emu_enable_cores - Enable EMU cluster cores.
11  * @ndev: N5 device
12  */
13 static void emu_enable_cores(struct nitrox_device *ndev)
14 {
15 	union emu_se_enable emu_se;
16 	union emu_ae_enable emu_ae;
17 	int i;
18 
19 	/* AE cores 20 per cluster */
20 	emu_ae.value = 0;
21 	emu_ae.s.enable = 0xfffff;
22 
23 	/* SE cores 16 per cluster */
24 	emu_se.value = 0;
25 	emu_se.s.enable = 0xffff;
26 
27 	/* enable per cluster cores */
28 	for (i = 0; i < NR_CLUSTERS; i++) {
29 		nitrox_write_csr(ndev, EMU_AE_ENABLEX(i), emu_ae.value);
30 		nitrox_write_csr(ndev, EMU_SE_ENABLEX(i), emu_se.value);
31 	}
32 }
33 
34 /**
35  * nitrox_config_emu_unit - configure EMU unit.
36  * @ndev: N5 device
37  */
38 void nitrox_config_emu_unit(struct nitrox_device *ndev)
39 {
40 	union emu_wd_int_ena_w1s emu_wd_int;
41 	union emu_ge_int_ena_w1s emu_ge_int;
42 	u64 offset;
43 	int i;
44 
45 	/* enable cores */
46 	emu_enable_cores(ndev);
47 
48 	/* enable general error and watch dog interrupts */
49 	emu_ge_int.value = 0;
50 	emu_ge_int.s.se_ge = 0xffff;
51 	emu_ge_int.s.ae_ge = 0xfffff;
52 	emu_wd_int.value = 0;
53 	emu_wd_int.s.se_wd = 1;
54 
55 	for (i = 0; i < NR_CLUSTERS; i++) {
56 		offset = EMU_WD_INT_ENA_W1SX(i);
57 		nitrox_write_csr(ndev, offset, emu_wd_int.value);
58 		offset = EMU_GE_INT_ENA_W1SX(i);
59 		nitrox_write_csr(ndev, offset, emu_ge_int.value);
60 	}
61 }
62 
63 static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring)
64 {
65 	union nps_pkt_in_instr_ctl pkt_in_ctl;
66 	union nps_pkt_in_instr_baoff_dbell pkt_in_dbell;
67 	union nps_pkt_in_done_cnts pkt_in_cnts;
68 	u64 offset;
69 
70 	offset = NPS_PKT_IN_INSTR_CTLX(ring);
71 	/* disable the ring */
72 	pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
73 	pkt_in_ctl.s.enb = 0;
74 	nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
75 	usleep_range(100, 150);
76 
77 	/* wait to clear [ENB] */
78 	do {
79 		pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
80 	} while (pkt_in_ctl.s.enb);
81 
82 	/* clear off door bell counts */
83 	offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring);
84 	pkt_in_dbell.value = 0;
85 	pkt_in_dbell.s.dbell = 0xffffffff;
86 	nitrox_write_csr(ndev, offset, pkt_in_dbell.value);
87 
88 	/* clear done counts */
89 	offset = NPS_PKT_IN_DONE_CNTSX(ring);
90 	pkt_in_cnts.value = nitrox_read_csr(ndev, offset);
91 	nitrox_write_csr(ndev, offset, pkt_in_cnts.value);
92 	usleep_range(50, 100);
93 }
94 
95 void enable_pkt_input_ring(struct nitrox_device *ndev, int ring)
96 {
97 	union nps_pkt_in_instr_ctl pkt_in_ctl;
98 	u64 offset;
99 
100 	/* 64-byte instruction size */
101 	offset = NPS_PKT_IN_INSTR_CTLX(ring);
102 	pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
103 	pkt_in_ctl.s.is64b = 1;
104 	pkt_in_ctl.s.enb = 1;
105 	nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
106 
107 	/* wait for set [ENB] */
108 	do {
109 		pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
110 	} while (!pkt_in_ctl.s.enb);
111 }
112 
113 /**
114  * nitrox_config_pkt_input_rings - configure Packet Input Rings
115  * @ndev: N5 device
116  */
117 void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
118 {
119 	int i;
120 
121 	for (i = 0; i < ndev->nr_queues; i++) {
122 		struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
123 		union nps_pkt_in_instr_rsize pkt_in_rsize;
124 		u64 offset;
125 
126 		reset_pkt_input_ring(ndev, i);
127 
128 		/* configure ring base address 16-byte aligned,
129 		 * size and interrupt threshold.
130 		 */
131 		offset = NPS_PKT_IN_INSTR_BADDRX(i);
132 		nitrox_write_csr(ndev, offset, cmdq->dma);
133 
134 		/* configure ring size */
135 		offset = NPS_PKT_IN_INSTR_RSIZEX(i);
136 		pkt_in_rsize.value = 0;
137 		pkt_in_rsize.s.rsize = ndev->qlen;
138 		nitrox_write_csr(ndev, offset, pkt_in_rsize.value);
139 
140 		/* set high threshold for pkt input ring interrupts */
141 		offset = NPS_PKT_IN_INT_LEVELSX(i);
142 		nitrox_write_csr(ndev, offset, 0xffffffff);
143 
144 		enable_pkt_input_ring(ndev, i);
145 	}
146 }
147 
148 static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port)
149 {
150 	union nps_pkt_slc_ctl pkt_slc_ctl;
151 	union nps_pkt_slc_cnts pkt_slc_cnts;
152 	u64 offset;
153 
154 	/* disable slc port */
155 	offset = NPS_PKT_SLC_CTLX(port);
156 	pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
157 	pkt_slc_ctl.s.enb = 0;
158 	nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
159 	usleep_range(100, 150);
160 
161 	/* wait to clear [ENB] */
162 	do {
163 		pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
164 	} while (pkt_slc_ctl.s.enb);
165 
166 	/* clear slc counters */
167 	offset = NPS_PKT_SLC_CNTSX(port);
168 	pkt_slc_cnts.value = nitrox_read_csr(ndev, offset);
169 	nitrox_write_csr(ndev, offset, pkt_slc_cnts.value);
170 	usleep_range(50, 100);
171 }
172 
173 void enable_pkt_solicit_port(struct nitrox_device *ndev, int port)
174 {
175 	union nps_pkt_slc_ctl pkt_slc_ctl;
176 	u64 offset;
177 
178 	offset = NPS_PKT_SLC_CTLX(port);
179 	pkt_slc_ctl.value = 0;
180 	pkt_slc_ctl.s.enb = 1;
181 
182 	/*
183 	 * 8 trailing 0x00 bytes will be added
184 	 * to the end of the outgoing packet.
185 	 */
186 	pkt_slc_ctl.s.z = 1;
187 	/* enable response header */
188 	pkt_slc_ctl.s.rh = 1;
189 	nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
190 
191 	/* wait to set [ENB] */
192 	do {
193 		pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
194 	} while (!pkt_slc_ctl.s.enb);
195 }
196 
197 static void config_single_pkt_solicit_port(struct nitrox_device *ndev,
198 					   int port)
199 {
200 	union nps_pkt_slc_int_levels pkt_slc_int;
201 	u64 offset;
202 
203 	reset_pkt_solicit_port(ndev, port);
204 
205 	offset = NPS_PKT_SLC_INT_LEVELSX(port);
206 	pkt_slc_int.value = 0;
207 	/* time interrupt threshold */
208 	pkt_slc_int.s.timet = 0x3fffff;
209 	nitrox_write_csr(ndev, offset, pkt_slc_int.value);
210 
211 	enable_pkt_solicit_port(ndev, port);
212 }
213 
214 void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev)
215 {
216 	int i;
217 
218 	for (i = 0; i < ndev->nr_queues; i++)
219 		config_single_pkt_solicit_port(ndev, i);
220 }
221 
222 /**
223  * enable_nps_interrupts - enable NPS interrutps
224  * @ndev: N5 device.
225  *
226  * This includes NPS core, packet in and slc interrupts.
227  */
228 static void enable_nps_interrupts(struct nitrox_device *ndev)
229 {
230 	union nps_core_int_ena_w1s core_int;
231 
232 	/* NPS core interrutps */
233 	core_int.value = 0;
234 	core_int.s.host_wr_err = 1;
235 	core_int.s.host_wr_timeout = 1;
236 	core_int.s.exec_wr_timeout = 1;
237 	core_int.s.npco_dma_malform = 1;
238 	core_int.s.host_nps_wr_err = 1;
239 	nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value);
240 
241 	/* NPS packet in ring interrupts */
242 	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL));
243 	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL));
244 	nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL));
245 	/* NPS packet slc port interrupts */
246 	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL));
247 	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL));
248 	nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL));
249 }
250 
251 void nitrox_config_nps_unit(struct nitrox_device *ndev)
252 {
253 	union nps_core_gbl_vfcfg core_gbl_vfcfg;
254 
255 	/* endian control information */
256 	nitrox_write_csr(ndev, NPS_CORE_CONTROL, 1ULL);
257 
258 	/* disable ILK interface */
259 	core_gbl_vfcfg.value = 0;
260 	core_gbl_vfcfg.s.ilk_disable = 1;
261 	core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF;
262 	nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
263 	/* config input and solicit ports */
264 	nitrox_config_pkt_input_rings(ndev);
265 	nitrox_config_pkt_solicit_ports(ndev);
266 
267 	/* enable interrupts */
268 	enable_nps_interrupts(ndev);
269 }
270 
271 void nitrox_config_pom_unit(struct nitrox_device *ndev)
272 {
273 	union pom_int_ena_w1s pom_int;
274 	int i;
275 
276 	/* enable pom interrupts */
277 	pom_int.value = 0;
278 	pom_int.s.illegal_dport = 1;
279 	nitrox_write_csr(ndev, POM_INT_ENA_W1S, pom_int.value);
280 
281 	/* enable perf counters */
282 	for (i = 0; i < ndev->hw.se_cores; i++)
283 		nitrox_write_csr(ndev, POM_PERF_CTL, BIT_ULL(i));
284 }
285 
286 /**
287  * nitrox_config_rand_unit - enable N5 random number unit
288  * @ndev: N5 device
289  */
290 void nitrox_config_rand_unit(struct nitrox_device *ndev)
291 {
292 	union efl_rnm_ctl_status efl_rnm_ctl;
293 	u64 offset;
294 
295 	offset = EFL_RNM_CTL_STATUS;
296 	efl_rnm_ctl.value = nitrox_read_csr(ndev, offset);
297 	efl_rnm_ctl.s.ent_en = 1;
298 	efl_rnm_ctl.s.rng_en = 1;
299 	nitrox_write_csr(ndev, offset, efl_rnm_ctl.value);
300 }
301 
302 void nitrox_config_efl_unit(struct nitrox_device *ndev)
303 {
304 	int i;
305 
306 	for (i = 0; i < NR_CLUSTERS; i++) {
307 		union efl_core_int_ena_w1s efl_core_int;
308 		u64 offset;
309 
310 		/* EFL core interrupts */
311 		offset = EFL_CORE_INT_ENA_W1SX(i);
312 		efl_core_int.value = 0;
313 		efl_core_int.s.len_ovr = 1;
314 		efl_core_int.s.d_left = 1;
315 		efl_core_int.s.epci_decode_err = 1;
316 		nitrox_write_csr(ndev, offset, efl_core_int.value);
317 
318 		offset = EFL_CORE_VF_ERR_INT0_ENA_W1SX(i);
319 		nitrox_write_csr(ndev, offset, (~0ULL));
320 		offset = EFL_CORE_VF_ERR_INT1_ENA_W1SX(i);
321 		nitrox_write_csr(ndev, offset, (~0ULL));
322 	}
323 }
324 
325 void nitrox_config_bmi_unit(struct nitrox_device *ndev)
326 {
327 	union bmi_ctl bmi_ctl;
328 	union bmi_int_ena_w1s bmi_int_ena;
329 	u64 offset;
330 
331 	/* no threshold limits for PCIe */
332 	offset = BMI_CTL;
333 	bmi_ctl.value = nitrox_read_csr(ndev, offset);
334 	bmi_ctl.s.max_pkt_len = 0xff;
335 	bmi_ctl.s.nps_free_thrsh = 0xff;
336 	bmi_ctl.s.nps_hdrq_thrsh = 0x7a;
337 	nitrox_write_csr(ndev, offset, bmi_ctl.value);
338 
339 	/* enable interrupts */
340 	offset = BMI_INT_ENA_W1S;
341 	bmi_int_ena.value = 0;
342 	bmi_int_ena.s.max_len_err_nps = 1;
343 	bmi_int_ena.s.pkt_rcv_err_nps = 1;
344 	bmi_int_ena.s.fpf_undrrn = 1;
345 	nitrox_write_csr(ndev, offset, bmi_int_ena.value);
346 }
347 
348 void nitrox_config_bmo_unit(struct nitrox_device *ndev)
349 {
350 	union bmo_ctl2 bmo_ctl2;
351 	u64 offset;
352 
353 	/* no threshold limits for PCIe */
354 	offset = BMO_CTL2;
355 	bmo_ctl2.value = nitrox_read_csr(ndev, offset);
356 	bmo_ctl2.s.nps_slc_buf_thrsh = 0xff;
357 	nitrox_write_csr(ndev, offset, bmo_ctl2.value);
358 }
359 
360 void invalidate_lbc(struct nitrox_device *ndev)
361 {
362 	union lbc_inval_ctl lbc_ctl;
363 	union lbc_inval_status lbc_stat;
364 	u64 offset;
365 
366 	/* invalidate LBC */
367 	offset = LBC_INVAL_CTL;
368 	lbc_ctl.value = nitrox_read_csr(ndev, offset);
369 	lbc_ctl.s.cam_inval_start = 1;
370 	nitrox_write_csr(ndev, offset, lbc_ctl.value);
371 
372 	offset = LBC_INVAL_STATUS;
373 
374 	do {
375 		lbc_stat.value = nitrox_read_csr(ndev, offset);
376 	} while (!lbc_stat.s.done);
377 }
378 
379 void nitrox_config_lbc_unit(struct nitrox_device *ndev)
380 {
381 	union lbc_int_ena_w1s lbc_int_ena;
382 	u64 offset;
383 
384 	invalidate_lbc(ndev);
385 
386 	/* enable interrupts */
387 	offset = LBC_INT_ENA_W1S;
388 	lbc_int_ena.value = 0;
389 	lbc_int_ena.s.dma_rd_err = 1;
390 	lbc_int_ena.s.over_fetch_err = 1;
391 	lbc_int_ena.s.cam_inval_abort = 1;
392 	lbc_int_ena.s.cam_hard_err = 1;
393 	nitrox_write_csr(ndev, offset, lbc_int_ena.value);
394 
395 	offset = LBC_PLM_VF1_64_INT_ENA_W1S;
396 	nitrox_write_csr(ndev, offset, (~0ULL));
397 	offset = LBC_PLM_VF65_128_INT_ENA_W1S;
398 	nitrox_write_csr(ndev, offset, (~0ULL));
399 
400 	offset = LBC_ELM_VF1_64_INT_ENA_W1S;
401 	nitrox_write_csr(ndev, offset, (~0ULL));
402 	offset = LBC_ELM_VF65_128_INT_ENA_W1S;
403 	nitrox_write_csr(ndev, offset, (~0ULL));
404 }
405 
406 void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
407 {
408 	union nps_core_gbl_vfcfg vfcfg;
409 
410 	vfcfg.value = nitrox_read_csr(ndev, NPS_CORE_GBL_VFCFG);
411 	vfcfg.s.cfg = mode & 0x7;
412 
413 	nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
414 }
415 
416 void nitrox_get_hwinfo(struct nitrox_device *ndev)
417 {
418 	union emu_fuse_map emu_fuse;
419 	union rst_boot rst_boot;
420 	union fus_dat1 fus_dat1;
421 	unsigned char name[IFNAMSIZ * 2] = {};
422 	int i, dead_cores;
423 	u64 offset;
424 
425 	/* get core frequency */
426 	offset = RST_BOOT;
427 	rst_boot.value = nitrox_read_csr(ndev, offset);
428 	ndev->hw.freq = (rst_boot.pnr_mul + 3) * PLL_REF_CLK;
429 
430 	for (i = 0; i < NR_CLUSTERS; i++) {
431 		offset = EMU_FUSE_MAPX(i);
432 		emu_fuse.value = nitrox_read_csr(ndev, offset);
433 		if (emu_fuse.s.valid) {
434 			dead_cores = hweight32(emu_fuse.s.ae_fuse);
435 			ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
436 			dead_cores = hweight16(emu_fuse.s.se_fuse);
437 			ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
438 		}
439 	}
440 	/* find zip hardware availability */
441 	offset = FUS_DAT1;
442 	fus_dat1.value = nitrox_read_csr(ndev, offset);
443 	if (!fus_dat1.nozip) {
444 		dead_cores = hweight8(fus_dat1.zip_info);
445 		ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
446 	}
447 
448 	/* determine the partname CNN55<cores>-<freq><pincount>-<rev>*/
449 	if (ndev->hw.ae_cores == AE_MAX_CORES) {
450 		switch (ndev->hw.se_cores) {
451 		case SE_MAX_CORES:
452 			i = snprintf(name, sizeof(name), "CNN5560");
453 			break;
454 		case 40:
455 			i = snprintf(name, sizeof(name), "CNN5560s");
456 			break;
457 		}
458 	} else if (ndev->hw.ae_cores == (AE_MAX_CORES / 2)) {
459 		i = snprintf(name, sizeof(name), "CNN5530");
460 	} else {
461 		i = snprintf(name, sizeof(name), "CNN5560i");
462 	}
463 
464 	snprintf(name + i, sizeof(name) - i, "-%3dBG676-1.%u",
465 		 ndev->hw.freq, ndev->hw.revision_id);
466 
467 	/* copy partname */
468 	strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
469 }
470