1 #include <linux/pci.h>
2 #include <linux/printk.h>
3 #include <linux/slab.h>
4 
5 #include "nitrox_dev.h"
6 #include "nitrox_csr.h"
7 #include "nitrox_common.h"
8 
9 #define NR_RING_VECTORS 3
10 #define NPS_CORE_INT_ACTIVE_ENTRY 192
11 
12 /**
13  * nps_pkt_slc_isr - IRQ handler for NPS solicit port
14  * @irq: irq number
15  * @data: argument
16  */
17 static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
18 {
19 	struct bh_data *slc = data;
20 	union nps_pkt_slc_cnts pkt_slc_cnts;
21 
22 	pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr);
23 	/* New packet on SLC output port */
24 	if (pkt_slc_cnts.s.slc_int)
25 		tasklet_hi_schedule(&slc->resp_handler);
26 
27 	return IRQ_HANDLED;
28 }
29 
30 static void clear_nps_core_err_intr(struct nitrox_device *ndev)
31 {
32 	u64 value;
33 
34 	/* Write 1 to clear */
35 	value = nitrox_read_csr(ndev, NPS_CORE_INT);
36 	nitrox_write_csr(ndev, NPS_CORE_INT, value);
37 
38 	dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT  0x%016llx\n", value);
39 }
40 
41 static void clear_nps_pkt_err_intr(struct nitrox_device *ndev)
42 {
43 	union nps_pkt_int pkt_int;
44 	unsigned long value, offset;
45 	int i;
46 
47 	pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT);
48 	dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT  0x%016llx\n",
49 			    pkt_int.value);
50 
51 	if (pkt_int.s.slc_err) {
52 		offset = NPS_PKT_SLC_ERR_TYPE;
53 		value = nitrox_read_csr(ndev, offset);
54 		nitrox_write_csr(ndev, offset, value);
55 		dev_err_ratelimited(DEV(ndev),
56 				    "NPS_PKT_SLC_ERR_TYPE  0x%016lx\n", value);
57 
58 		offset = NPS_PKT_SLC_RERR_LO;
59 		value = nitrox_read_csr(ndev, offset);
60 		nitrox_write_csr(ndev, offset, value);
61 		/* enable the solicit ports */
62 		for_each_set_bit(i, &value, BITS_PER_LONG)
63 			enable_pkt_solicit_port(ndev, i);
64 
65 		dev_err_ratelimited(DEV(ndev),
66 				    "NPS_PKT_SLC_RERR_LO  0x%016lx\n", value);
67 
68 		offset = NPS_PKT_SLC_RERR_HI;
69 		value = nitrox_read_csr(ndev, offset);
70 		nitrox_write_csr(ndev, offset, value);
71 		dev_err_ratelimited(DEV(ndev),
72 				    "NPS_PKT_SLC_RERR_HI  0x%016lx\n", value);
73 	}
74 
75 	if (pkt_int.s.in_err) {
76 		offset = NPS_PKT_IN_ERR_TYPE;
77 		value = nitrox_read_csr(ndev, offset);
78 		nitrox_write_csr(ndev, offset, value);
79 		dev_err_ratelimited(DEV(ndev),
80 				    "NPS_PKT_IN_ERR_TYPE  0x%016lx\n", value);
81 		offset = NPS_PKT_IN_RERR_LO;
82 		value = nitrox_read_csr(ndev, offset);
83 		nitrox_write_csr(ndev, offset, value);
84 		/* enable the input ring */
85 		for_each_set_bit(i, &value, BITS_PER_LONG)
86 			enable_pkt_input_ring(ndev, i);
87 
88 		dev_err_ratelimited(DEV(ndev),
89 				    "NPS_PKT_IN_RERR_LO  0x%016lx\n", value);
90 
91 		offset = NPS_PKT_IN_RERR_HI;
92 		value = nitrox_read_csr(ndev, offset);
93 		nitrox_write_csr(ndev, offset, value);
94 		dev_err_ratelimited(DEV(ndev),
95 				    "NPS_PKT_IN_RERR_HI  0x%016lx\n", value);
96 	}
97 }
98 
99 static void clear_pom_err_intr(struct nitrox_device *ndev)
100 {
101 	u64 value;
102 
103 	value = nitrox_read_csr(ndev, POM_INT);
104 	nitrox_write_csr(ndev, POM_INT, value);
105 	dev_err_ratelimited(DEV(ndev), "POM_INT  0x%016llx\n", value);
106 }
107 
108 static void clear_pem_err_intr(struct nitrox_device *ndev)
109 {
110 	u64 value;
111 
112 	value = nitrox_read_csr(ndev, PEM0_INT);
113 	nitrox_write_csr(ndev, PEM0_INT, value);
114 	dev_err_ratelimited(DEV(ndev), "PEM(0)_INT  0x%016llx\n", value);
115 }
116 
117 static void clear_lbc_err_intr(struct nitrox_device *ndev)
118 {
119 	union lbc_int lbc_int;
120 	u64 value, offset;
121 	int i;
122 
123 	lbc_int.value = nitrox_read_csr(ndev, LBC_INT);
124 	dev_err_ratelimited(DEV(ndev), "LBC_INT  0x%016llx\n", lbc_int.value);
125 
126 	if (lbc_int.s.dma_rd_err) {
127 		for (i = 0; i < NR_CLUSTERS; i++) {
128 			offset = EFL_CORE_VF_ERR_INT0X(i);
129 			value = nitrox_read_csr(ndev, offset);
130 			nitrox_write_csr(ndev, offset, value);
131 			offset = EFL_CORE_VF_ERR_INT1X(i);
132 			value = nitrox_read_csr(ndev, offset);
133 			nitrox_write_csr(ndev, offset, value);
134 		}
135 	}
136 
137 	if (lbc_int.s.cam_soft_err) {
138 		dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n");
139 		invalidate_lbc(ndev);
140 	}
141 
142 	if (lbc_int.s.pref_dat_len_mismatch_err) {
143 		offset = LBC_PLM_VF1_64_INT;
144 		value = nitrox_read_csr(ndev, offset);
145 		nitrox_write_csr(ndev, offset, value);
146 		offset = LBC_PLM_VF65_128_INT;
147 		value = nitrox_read_csr(ndev, offset);
148 		nitrox_write_csr(ndev, offset, value);
149 	}
150 
151 	if (lbc_int.s.rd_dat_len_mismatch_err) {
152 		offset = LBC_ELM_VF1_64_INT;
153 		value = nitrox_read_csr(ndev, offset);
154 		nitrox_write_csr(ndev, offset, value);
155 		offset = LBC_ELM_VF65_128_INT;
156 		value = nitrox_read_csr(ndev, offset);
157 		nitrox_write_csr(ndev, offset, value);
158 	}
159 	nitrox_write_csr(ndev, LBC_INT, lbc_int.value);
160 }
161 
162 static void clear_efl_err_intr(struct nitrox_device *ndev)
163 {
164 	int i;
165 
166 	for (i = 0; i < NR_CLUSTERS; i++) {
167 		union efl_core_int core_int;
168 		u64 value, offset;
169 
170 		offset = EFL_CORE_INTX(i);
171 		core_int.value = nitrox_read_csr(ndev, offset);
172 		nitrox_write_csr(ndev, offset, core_int.value);
173 		dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT  0x%016llx\n",
174 				    i, core_int.value);
175 		if (core_int.s.se_err) {
176 			offset = EFL_CORE_SE_ERR_INTX(i);
177 			value = nitrox_read_csr(ndev, offset);
178 			nitrox_write_csr(ndev, offset, value);
179 		}
180 	}
181 }
182 
183 static void clear_bmi_err_intr(struct nitrox_device *ndev)
184 {
185 	u64 value;
186 
187 	value = nitrox_read_csr(ndev, BMI_INT);
188 	nitrox_write_csr(ndev, BMI_INT, value);
189 	dev_err_ratelimited(DEV(ndev), "BMI_INT  0x%016llx\n", value);
190 }
191 
192 /**
193  * clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts
194  * @ndev: NITROX device
195  */
196 static void clear_nps_core_int_active(struct nitrox_device *ndev)
197 {
198 	union nps_core_int_active core_int_active;
199 
200 	core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
201 
202 	if (core_int_active.s.nps_core)
203 		clear_nps_core_err_intr(ndev);
204 
205 	if (core_int_active.s.nps_pkt)
206 		clear_nps_pkt_err_intr(ndev);
207 
208 	if (core_int_active.s.pom)
209 		clear_pom_err_intr(ndev);
210 
211 	if (core_int_active.s.pem)
212 		clear_pem_err_intr(ndev);
213 
214 	if (core_int_active.s.lbc)
215 		clear_lbc_err_intr(ndev);
216 
217 	if (core_int_active.s.efl)
218 		clear_efl_err_intr(ndev);
219 
220 	if (core_int_active.s.bmi)
221 		clear_bmi_err_intr(ndev);
222 
223 	/* If more work callback the ISR, set resend */
224 	core_int_active.s.resend = 1;
225 	nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value);
226 }
227 
228 static irqreturn_t nps_core_int_isr(int irq, void *data)
229 {
230 	struct nitrox_device *ndev = data;
231 
232 	clear_nps_core_int_active(ndev);
233 
234 	return IRQ_HANDLED;
235 }
236 
237 static int nitrox_enable_msix(struct nitrox_device *ndev)
238 {
239 	struct msix_entry *entries;
240 	char **names;
241 	int i, nr_entries, ret;
242 
243 	/*
244 	 * PF MSI-X vectors
245 	 *
246 	 * Entry 0: NPS PKT ring 0
247 	 * Entry 1: AQMQ ring 0
248 	 * Entry 2: ZQM ring 0
249 	 * Entry 3: NPS PKT ring 1
250 	 * Entry 4: AQMQ ring 1
251 	 * Entry 5: ZQM ring 1
252 	 * ....
253 	 * Entry 192: NPS_CORE_INT_ACTIVE
254 	 */
255 	nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1;
256 	entries = kzalloc_node(nr_entries * sizeof(struct msix_entry),
257 			       GFP_KERNEL, ndev->node);
258 	if (!entries)
259 		return -ENOMEM;
260 
261 	names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL);
262 	if (!names) {
263 		kfree(entries);
264 		return -ENOMEM;
265 	}
266 
267 	/* fill entires */
268 	for (i = 0; i < (nr_entries - 1); i++)
269 		entries[i].entry = i;
270 
271 	entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY;
272 
273 	for (i = 0; i < nr_entries; i++) {
274 		*(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
275 		if (!(*(names + i))) {
276 			ret = -ENOMEM;
277 			goto msix_fail;
278 		}
279 	}
280 	ndev->msix.entries = entries;
281 	ndev->msix.names = names;
282 	ndev->msix.nr_entries = nr_entries;
283 
284 	ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries,
285 				    ndev->msix.nr_entries);
286 	if (ret) {
287 		dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n",
288 			ret);
289 		goto msix_fail;
290 	}
291 	return 0;
292 
293 msix_fail:
294 	for (i = 0; i < nr_entries; i++)
295 		kfree(*(names + i));
296 
297 	kfree(entries);
298 	kfree(names);
299 	return ret;
300 }
301 
302 static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev)
303 {
304 	int i;
305 
306 	if (!ndev->bh.slc)
307 		return;
308 
309 	for (i = 0; i < ndev->nr_queues; i++) {
310 		struct bh_data *bh = &ndev->bh.slc[i];
311 
312 		tasklet_disable(&bh->resp_handler);
313 		tasklet_kill(&bh->resp_handler);
314 	}
315 	kfree(ndev->bh.slc);
316 	ndev->bh.slc = NULL;
317 }
318 
319 static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev)
320 {
321 	u32 size;
322 	int i;
323 
324 	size = ndev->nr_queues * sizeof(struct bh_data);
325 	ndev->bh.slc = kzalloc(size, GFP_KERNEL);
326 	if (!ndev->bh.slc)
327 		return -ENOMEM;
328 
329 	for (i = 0; i < ndev->nr_queues; i++) {
330 		struct bh_data *bh = &ndev->bh.slc[i];
331 		u64 offset;
332 
333 		offset = NPS_PKT_SLC_CNTSX(i);
334 		/* pre calculate completion count address */
335 		bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
336 		bh->cmdq = &ndev->pkt_cmdqs[i];
337 
338 		tasklet_init(&bh->resp_handler, pkt_slc_resp_handler,
339 			     (unsigned long)bh);
340 	}
341 
342 	return 0;
343 }
344 
345 static int nitrox_request_irqs(struct nitrox_device *ndev)
346 {
347 	struct pci_dev *pdev = ndev->pdev;
348 	struct msix_entry *msix_ent = ndev->msix.entries;
349 	int nr_ring_vectors, i = 0, ring, cpu, ret;
350 	char *name;
351 
352 	/*
353 	 * PF MSI-X vectors
354 	 *
355 	 * Entry 0: NPS PKT ring 0
356 	 * Entry 1: AQMQ ring 0
357 	 * Entry 2: ZQM ring 0
358 	 * Entry 3: NPS PKT ring 1
359 	 * ....
360 	 * Entry 192: NPS_CORE_INT_ACTIVE
361 	 */
362 	nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS;
363 
364 	/* request irq for pkt ring/ports only */
365 	while (i < nr_ring_vectors) {
366 		name = *(ndev->msix.names + i);
367 		ring = (i / NR_RING_VECTORS);
368 		snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d",
369 			 ndev->idx, ring);
370 
371 		ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0,
372 				  name, &ndev->bh.slc[ring]);
373 		if (ret) {
374 			dev_err(&pdev->dev, "failed to get irq %d for %s\n",
375 				msix_ent[i].vector, name);
376 			return ret;
377 		}
378 		cpu = ring % num_online_cpus();
379 		irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu));
380 
381 		set_bit(i, ndev->msix.irqs);
382 		i += NR_RING_VECTORS;
383 	}
384 
385 	/* Request IRQ for NPS_CORE_INT_ACTIVE */
386 	name = *(ndev->msix.names + i);
387 	snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx);
388 	ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev);
389 	if (ret) {
390 		dev_err(&pdev->dev, "failed to get irq %d for %s\n",
391 			msix_ent[i].vector, name);
392 		return ret;
393 	}
394 	set_bit(i, ndev->msix.irqs);
395 
396 	return 0;
397 }
398 
399 static void nitrox_disable_msix(struct nitrox_device *ndev)
400 {
401 	struct msix_entry *msix_ent = ndev->msix.entries;
402 	char **names = ndev->msix.names;
403 	int i = 0, ring, nr_ring_vectors;
404 
405 	nr_ring_vectors = ndev->msix.nr_entries - 1;
406 
407 	/* clear pkt ring irqs */
408 	while (i < nr_ring_vectors) {
409 		if (test_and_clear_bit(i, ndev->msix.irqs)) {
410 			ring = (i / NR_RING_VECTORS);
411 			irq_set_affinity_hint(msix_ent[i].vector, NULL);
412 			free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]);
413 		}
414 		i += NR_RING_VECTORS;
415 	}
416 	irq_set_affinity_hint(msix_ent[i].vector, NULL);
417 	free_irq(msix_ent[i].vector, ndev);
418 	clear_bit(i, ndev->msix.irqs);
419 
420 	kfree(ndev->msix.entries);
421 	for (i = 0; i < ndev->msix.nr_entries; i++)
422 		kfree(*(names + i));
423 
424 	kfree(names);
425 	pci_disable_msix(ndev->pdev);
426 }
427 
428 /**
429  * nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ
430  * @ndev: NITROX device
431  */
432 void nitrox_pf_cleanup_isr(struct nitrox_device *ndev)
433 {
434 	nitrox_disable_msix(ndev);
435 	nitrox_cleanup_pkt_slc_bh(ndev);
436 }
437 
438 /**
439  * nitrox_init_isr - Initialize PF MSI-X vectors and IRQ
440  * @ndev: NITROX device
441  *
442  * Return: 0 on success, a negative value on failure.
443  */
444 int nitrox_pf_init_isr(struct nitrox_device *ndev)
445 {
446 	int err;
447 
448 	err = nitrox_setup_pkt_slc_bh(ndev);
449 	if (err)
450 		return err;
451 
452 	err = nitrox_enable_msix(ndev);
453 	if (err)
454 		goto msix_fail;
455 
456 	err = nitrox_request_irqs(ndev);
457 	if (err)
458 		goto irq_fail;
459 
460 	return 0;
461 
462 irq_fail:
463 	nitrox_disable_msix(ndev);
464 msix_fail:
465 	nitrox_cleanup_pkt_slc_bh(ndev);
466 	return err;
467 }
468