1 #include <linux/delay.h>
2 
3 #include "nitrox_dev.h"
4 #include "nitrox_csr.h"
5 
6 /**
7  * emu_enable_cores - Enable EMU cluster cores.
8  * @ndev: N5 device
9  */
10 static void emu_enable_cores(struct nitrox_device *ndev)
11 {
12 	union emu_se_enable emu_se;
13 	union emu_ae_enable emu_ae;
14 	int i;
15 
16 	/* AE cores 20 per cluster */
17 	emu_ae.value = 0;
18 	emu_ae.s.enable = 0xfffff;
19 
20 	/* SE cores 16 per cluster */
21 	emu_se.value = 0;
22 	emu_se.s.enable = 0xffff;
23 
24 	/* enable per cluster cores */
25 	for (i = 0; i < NR_CLUSTERS; i++) {
26 		nitrox_write_csr(ndev, EMU_AE_ENABLEX(i), emu_ae.value);
27 		nitrox_write_csr(ndev, EMU_SE_ENABLEX(i), emu_se.value);
28 	}
29 }
30 
31 /**
32  * nitrox_config_emu_unit - configure EMU unit.
33  * @ndev: N5 device
34  */
35 void nitrox_config_emu_unit(struct nitrox_device *ndev)
36 {
37 	union emu_wd_int_ena_w1s emu_wd_int;
38 	union emu_ge_int_ena_w1s emu_ge_int;
39 	u64 offset;
40 	int i;
41 
42 	/* enable cores */
43 	emu_enable_cores(ndev);
44 
45 	/* enable general error and watch dog interrupts */
46 	emu_ge_int.value = 0;
47 	emu_ge_int.s.se_ge = 0xffff;
48 	emu_ge_int.s.ae_ge = 0xfffff;
49 	emu_wd_int.value = 0;
50 	emu_wd_int.s.se_wd = 1;
51 
52 	for (i = 0; i < NR_CLUSTERS; i++) {
53 		offset = EMU_WD_INT_ENA_W1SX(i);
54 		nitrox_write_csr(ndev, offset, emu_wd_int.value);
55 		offset = EMU_GE_INT_ENA_W1SX(i);
56 		nitrox_write_csr(ndev, offset, emu_ge_int.value);
57 	}
58 }
59 
60 static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring)
61 {
62 	union nps_pkt_in_instr_ctl pkt_in_ctl;
63 	union nps_pkt_in_instr_baoff_dbell pkt_in_dbell;
64 	union nps_pkt_in_done_cnts pkt_in_cnts;
65 	u64 offset;
66 
67 	offset = NPS_PKT_IN_INSTR_CTLX(ring);
68 	/* disable the ring */
69 	pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
70 	pkt_in_ctl.s.enb = 0;
71 	nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
72 	usleep_range(100, 150);
73 
74 	/* wait to clear [ENB] */
75 	do {
76 		pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
77 	} while (pkt_in_ctl.s.enb);
78 
79 	/* clear off door bell counts */
80 	offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring);
81 	pkt_in_dbell.value = 0;
82 	pkt_in_dbell.s.dbell = 0xffffffff;
83 	nitrox_write_csr(ndev, offset, pkt_in_dbell.value);
84 
85 	/* clear done counts */
86 	offset = NPS_PKT_IN_DONE_CNTSX(ring);
87 	pkt_in_cnts.value = nitrox_read_csr(ndev, offset);
88 	nitrox_write_csr(ndev, offset, pkt_in_cnts.value);
89 	usleep_range(50, 100);
90 }
91 
92 void enable_pkt_input_ring(struct nitrox_device *ndev, int ring)
93 {
94 	union nps_pkt_in_instr_ctl pkt_in_ctl;
95 	u64 offset;
96 
97 	/* 64-byte instruction size */
98 	offset = NPS_PKT_IN_INSTR_CTLX(ring);
99 	pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
100 	pkt_in_ctl.s.is64b = 1;
101 	pkt_in_ctl.s.enb = 1;
102 	nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
103 
104 	/* wait for set [ENB] */
105 	do {
106 		pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
107 	} while (!pkt_in_ctl.s.enb);
108 }
109 
110 /**
111  * nitrox_config_pkt_input_rings - configure Packet Input Rings
112  * @ndev: N5 device
113  */
114 void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
115 {
116 	int i;
117 
118 	for (i = 0; i < ndev->nr_queues; i++) {
119 		struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
120 		union nps_pkt_in_instr_rsize pkt_in_rsize;
121 		u64 offset;
122 
123 		reset_pkt_input_ring(ndev, i);
124 
125 		/* configure ring base address 16-byte aligned,
126 		 * size and interrupt threshold.
127 		 */
128 		offset = NPS_PKT_IN_INSTR_BADDRX(i);
129 		nitrox_write_csr(ndev, NPS_PKT_IN_INSTR_BADDRX(i), cmdq->dma);
130 
131 		/* configure ring size */
132 		offset = NPS_PKT_IN_INSTR_RSIZEX(i);
133 		pkt_in_rsize.value = 0;
134 		pkt_in_rsize.s.rsize = ndev->qlen;
135 		nitrox_write_csr(ndev, offset, pkt_in_rsize.value);
136 
137 		/* set high threshold for pkt input ring interrupts */
138 		offset = NPS_PKT_IN_INT_LEVELSX(i);
139 		nitrox_write_csr(ndev, offset, 0xffffffff);
140 
141 		enable_pkt_input_ring(ndev, i);
142 	}
143 }
144 
145 static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port)
146 {
147 	union nps_pkt_slc_ctl pkt_slc_ctl;
148 	union nps_pkt_slc_cnts pkt_slc_cnts;
149 	u64 offset;
150 
151 	/* disable slc port */
152 	offset = NPS_PKT_SLC_CTLX(port);
153 	pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
154 	pkt_slc_ctl.s.enb = 0;
155 	nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
156 	usleep_range(100, 150);
157 
158 	/* wait to clear [ENB] */
159 	do {
160 		pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
161 	} while (pkt_slc_ctl.s.enb);
162 
163 	/* clear slc counters */
164 	offset = NPS_PKT_SLC_CNTSX(port);
165 	pkt_slc_cnts.value = nitrox_read_csr(ndev, offset);
166 	nitrox_write_csr(ndev, offset, pkt_slc_cnts.value);
167 	usleep_range(50, 100);
168 }
169 
170 void enable_pkt_solicit_port(struct nitrox_device *ndev, int port)
171 {
172 	union nps_pkt_slc_ctl pkt_slc_ctl;
173 	u64 offset;
174 
175 	offset = NPS_PKT_SLC_CTLX(port);
176 	pkt_slc_ctl.value = 0;
177 	pkt_slc_ctl.s.enb = 1;
178 
179 	/*
180 	 * 8 trailing 0x00 bytes will be added
181 	 * to the end of the outgoing packet.
182 	 */
183 	pkt_slc_ctl.s.z = 1;
184 	/* enable response header */
185 	pkt_slc_ctl.s.rh = 1;
186 	nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
187 
188 	/* wait to set [ENB] */
189 	do {
190 		pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
191 	} while (!pkt_slc_ctl.s.enb);
192 }
193 
194 static void config_single_pkt_solicit_port(struct nitrox_device *ndev,
195 					   int port)
196 {
197 	union nps_pkt_slc_int_levels pkt_slc_int;
198 	u64 offset;
199 
200 	reset_pkt_solicit_port(ndev, port);
201 
202 	offset = NPS_PKT_SLC_INT_LEVELSX(port);
203 	pkt_slc_int.value = 0;
204 	/* time interrupt threshold */
205 	pkt_slc_int.s.timet = 0x3fffff;
206 	nitrox_write_csr(ndev, offset, pkt_slc_int.value);
207 
208 	enable_pkt_solicit_port(ndev, port);
209 }
210 
211 void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev)
212 {
213 	int i;
214 
215 	for (i = 0; i < ndev->nr_queues; i++)
216 		config_single_pkt_solicit_port(ndev, i);
217 }
218 
219 /**
220  * enable_nps_interrupts - enable NPS interrutps
221  * @ndev: N5 device.
222  *
223  * This includes NPS core, packet in and slc interrupts.
224  */
225 static void enable_nps_interrupts(struct nitrox_device *ndev)
226 {
227 	union nps_core_int_ena_w1s core_int;
228 
229 	/* NPS core interrutps */
230 	core_int.value = 0;
231 	core_int.s.host_wr_err = 1;
232 	core_int.s.host_wr_timeout = 1;
233 	core_int.s.exec_wr_timeout = 1;
234 	core_int.s.npco_dma_malform = 1;
235 	core_int.s.host_nps_wr_err = 1;
236 	nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value);
237 
238 	/* NPS packet in ring interrupts */
239 	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL));
240 	nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL));
241 	nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL));
242 	/* NPS packet slc port interrupts */
243 	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL));
244 	nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL));
245 	nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL));
246 }
247 
248 void nitrox_config_nps_unit(struct nitrox_device *ndev)
249 {
250 	union nps_core_gbl_vfcfg core_gbl_vfcfg;
251 
252 	/* endian control information */
253 	nitrox_write_csr(ndev, NPS_CORE_CONTROL, 1ULL);
254 
255 	/* disable ILK interface */
256 	core_gbl_vfcfg.value = 0;
257 	core_gbl_vfcfg.s.ilk_disable = 1;
258 	core_gbl_vfcfg.s.cfg = PF_MODE;
259 	nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
260 	/* config input and solicit ports */
261 	nitrox_config_pkt_input_rings(ndev);
262 	nitrox_config_pkt_solicit_ports(ndev);
263 
264 	/* enable interrupts */
265 	enable_nps_interrupts(ndev);
266 }
267 
268 void nitrox_config_pom_unit(struct nitrox_device *ndev)
269 {
270 	union pom_int_ena_w1s pom_int;
271 	int i;
272 
273 	/* enable pom interrupts */
274 	pom_int.value = 0;
275 	pom_int.s.illegal_dport = 1;
276 	nitrox_write_csr(ndev, POM_INT_ENA_W1S, pom_int.value);
277 
278 	/* enable perf counters */
279 	for (i = 0; i < ndev->hw.se_cores; i++)
280 		nitrox_write_csr(ndev, POM_PERF_CTL, BIT_ULL(i));
281 }
282 
283 /**
284  * nitrox_config_rand_unit - enable N5 random number unit
285  * @ndev: N5 device
286  */
287 void nitrox_config_rand_unit(struct nitrox_device *ndev)
288 {
289 	union efl_rnm_ctl_status efl_rnm_ctl;
290 	u64 offset;
291 
292 	offset = EFL_RNM_CTL_STATUS;
293 	efl_rnm_ctl.value = nitrox_read_csr(ndev, offset);
294 	efl_rnm_ctl.s.ent_en = 1;
295 	efl_rnm_ctl.s.rng_en = 1;
296 	nitrox_write_csr(ndev, offset, efl_rnm_ctl.value);
297 }
298 
299 void nitrox_config_efl_unit(struct nitrox_device *ndev)
300 {
301 	int i;
302 
303 	for (i = 0; i < NR_CLUSTERS; i++) {
304 		union efl_core_int_ena_w1s efl_core_int;
305 		u64 offset;
306 
307 		/* EFL core interrupts */
308 		offset = EFL_CORE_INT_ENA_W1SX(i);
309 		efl_core_int.value = 0;
310 		efl_core_int.s.len_ovr = 1;
311 		efl_core_int.s.d_left = 1;
312 		efl_core_int.s.epci_decode_err = 1;
313 		nitrox_write_csr(ndev, offset, efl_core_int.value);
314 
315 		offset = EFL_CORE_VF_ERR_INT0_ENA_W1SX(i);
316 		nitrox_write_csr(ndev, offset, (~0ULL));
317 		offset = EFL_CORE_VF_ERR_INT1_ENA_W1SX(i);
318 		nitrox_write_csr(ndev, offset, (~0ULL));
319 	}
320 }
321 
322 void nitrox_config_bmi_unit(struct nitrox_device *ndev)
323 {
324 	union bmi_ctl bmi_ctl;
325 	union bmi_int_ena_w1s bmi_int_ena;
326 	u64 offset;
327 
328 	/* no threshold limits for PCIe */
329 	offset = BMI_CTL;
330 	bmi_ctl.value = nitrox_read_csr(ndev, offset);
331 	bmi_ctl.s.max_pkt_len = 0xff;
332 	bmi_ctl.s.nps_free_thrsh = 0xff;
333 	bmi_ctl.s.nps_hdrq_thrsh = 0x7a;
334 	nitrox_write_csr(ndev, offset, bmi_ctl.value);
335 
336 	/* enable interrupts */
337 	offset = BMI_INT_ENA_W1S;
338 	bmi_int_ena.value = 0;
339 	bmi_int_ena.s.max_len_err_nps = 1;
340 	bmi_int_ena.s.pkt_rcv_err_nps = 1;
341 	bmi_int_ena.s.fpf_undrrn = 1;
342 	nitrox_write_csr(ndev, offset, bmi_int_ena.value);
343 }
344 
345 void nitrox_config_bmo_unit(struct nitrox_device *ndev)
346 {
347 	union bmo_ctl2 bmo_ctl2;
348 	u64 offset;
349 
350 	/* no threshold limits for PCIe */
351 	offset = BMO_CTL2;
352 	bmo_ctl2.value = nitrox_read_csr(ndev, offset);
353 	bmo_ctl2.s.nps_slc_buf_thrsh = 0xff;
354 	nitrox_write_csr(ndev, offset, bmo_ctl2.value);
355 }
356 
357 void invalidate_lbc(struct nitrox_device *ndev)
358 {
359 	union lbc_inval_ctl lbc_ctl;
360 	union lbc_inval_status lbc_stat;
361 	u64 offset;
362 
363 	/* invalidate LBC */
364 	offset = LBC_INVAL_CTL;
365 	lbc_ctl.value = nitrox_read_csr(ndev, offset);
366 	lbc_ctl.s.cam_inval_start = 1;
367 	nitrox_write_csr(ndev, offset, lbc_ctl.value);
368 
369 	offset = LBC_INVAL_STATUS;
370 
371 	do {
372 		lbc_stat.value = nitrox_read_csr(ndev, offset);
373 	} while (!lbc_stat.s.done);
374 }
375 
376 void nitrox_config_lbc_unit(struct nitrox_device *ndev)
377 {
378 	union lbc_int_ena_w1s lbc_int_ena;
379 	u64 offset;
380 
381 	invalidate_lbc(ndev);
382 
383 	/* enable interrupts */
384 	offset = LBC_INT_ENA_W1S;
385 	lbc_int_ena.value = 0;
386 	lbc_int_ena.s.dma_rd_err = 1;
387 	lbc_int_ena.s.over_fetch_err = 1;
388 	lbc_int_ena.s.cam_inval_abort = 1;
389 	lbc_int_ena.s.cam_hard_err = 1;
390 	nitrox_write_csr(ndev, offset, lbc_int_ena.value);
391 
392 	offset = LBC_PLM_VF1_64_INT_ENA_W1S;
393 	nitrox_write_csr(ndev, offset, (~0ULL));
394 	offset = LBC_PLM_VF65_128_INT_ENA_W1S;
395 	nitrox_write_csr(ndev, offset, (~0ULL));
396 
397 	offset = LBC_ELM_VF1_64_INT_ENA_W1S;
398 	nitrox_write_csr(ndev, offset, (~0ULL));
399 	offset = LBC_ELM_VF65_128_INT_ENA_W1S;
400 	nitrox_write_csr(ndev, offset, (~0ULL));
401 }
402