1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/pci.h>
3 #include <linux/printk.h>
4 #include <linux/slab.h>
5 
6 #include "nitrox_dev.h"
7 #include "nitrox_csr.h"
8 #include "nitrox_common.h"
9 #include "nitrox_hal.h"
10 
11 /**
12  * One vector for each type of ring
13  *  - NPS packet ring, AQMQ ring and ZQMQ ring
14  */
15 #define NR_RING_VECTORS 3
16 /* base entry for packet ring/port */
17 #define PKT_RING_MSIX_BASE 0
18 #define NON_RING_MSIX_BASE 192
19 
20 /**
21  * nps_pkt_slc_isr - IRQ handler for NPS solicit port
22  * @irq: irq number
23  * @data: argument
24  */
25 static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
26 {
27 	struct nitrox_q_vector *qvec = data;
28 	union nps_pkt_slc_cnts slc_cnts;
29 	struct nitrox_cmdq *cmdq = qvec->cmdq;
30 
31 	slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
32 	/* New packet on SLC output port */
33 	if (slc_cnts.s.slc_int)
34 		tasklet_hi_schedule(&qvec->resp_tasklet);
35 
36 	return IRQ_HANDLED;
37 }
38 
39 static void clear_nps_core_err_intr(struct nitrox_device *ndev)
40 {
41 	u64 value;
42 
43 	/* Write 1 to clear */
44 	value = nitrox_read_csr(ndev, NPS_CORE_INT);
45 	nitrox_write_csr(ndev, NPS_CORE_INT, value);
46 
47 	dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT  0x%016llx\n", value);
48 }
49 
50 static void clear_nps_pkt_err_intr(struct nitrox_device *ndev)
51 {
52 	union nps_pkt_int pkt_int;
53 	unsigned long value, offset;
54 	int i;
55 
56 	pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT);
57 	dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT  0x%016llx\n",
58 			    pkt_int.value);
59 
60 	if (pkt_int.s.slc_err) {
61 		offset = NPS_PKT_SLC_ERR_TYPE;
62 		value = nitrox_read_csr(ndev, offset);
63 		nitrox_write_csr(ndev, offset, value);
64 		dev_err_ratelimited(DEV(ndev),
65 				    "NPS_PKT_SLC_ERR_TYPE  0x%016lx\n", value);
66 
67 		offset = NPS_PKT_SLC_RERR_LO;
68 		value = nitrox_read_csr(ndev, offset);
69 		nitrox_write_csr(ndev, offset, value);
70 		/* enable the solicit ports */
71 		for_each_set_bit(i, &value, BITS_PER_LONG)
72 			enable_pkt_solicit_port(ndev, i);
73 
74 		dev_err_ratelimited(DEV(ndev),
75 				    "NPS_PKT_SLC_RERR_LO  0x%016lx\n", value);
76 
77 		offset = NPS_PKT_SLC_RERR_HI;
78 		value = nitrox_read_csr(ndev, offset);
79 		nitrox_write_csr(ndev, offset, value);
80 		dev_err_ratelimited(DEV(ndev),
81 				    "NPS_PKT_SLC_RERR_HI  0x%016lx\n", value);
82 	}
83 
84 	if (pkt_int.s.in_err) {
85 		offset = NPS_PKT_IN_ERR_TYPE;
86 		value = nitrox_read_csr(ndev, offset);
87 		nitrox_write_csr(ndev, offset, value);
88 		dev_err_ratelimited(DEV(ndev),
89 				    "NPS_PKT_IN_ERR_TYPE  0x%016lx\n", value);
90 		offset = NPS_PKT_IN_RERR_LO;
91 		value = nitrox_read_csr(ndev, offset);
92 		nitrox_write_csr(ndev, offset, value);
93 		/* enable the input ring */
94 		for_each_set_bit(i, &value, BITS_PER_LONG)
95 			enable_pkt_input_ring(ndev, i);
96 
97 		dev_err_ratelimited(DEV(ndev),
98 				    "NPS_PKT_IN_RERR_LO  0x%016lx\n", value);
99 
100 		offset = NPS_PKT_IN_RERR_HI;
101 		value = nitrox_read_csr(ndev, offset);
102 		nitrox_write_csr(ndev, offset, value);
103 		dev_err_ratelimited(DEV(ndev),
104 				    "NPS_PKT_IN_RERR_HI  0x%016lx\n", value);
105 	}
106 }
107 
108 static void clear_pom_err_intr(struct nitrox_device *ndev)
109 {
110 	u64 value;
111 
112 	value = nitrox_read_csr(ndev, POM_INT);
113 	nitrox_write_csr(ndev, POM_INT, value);
114 	dev_err_ratelimited(DEV(ndev), "POM_INT  0x%016llx\n", value);
115 }
116 
117 static void clear_pem_err_intr(struct nitrox_device *ndev)
118 {
119 	u64 value;
120 
121 	value = nitrox_read_csr(ndev, PEM0_INT);
122 	nitrox_write_csr(ndev, PEM0_INT, value);
123 	dev_err_ratelimited(DEV(ndev), "PEM(0)_INT  0x%016llx\n", value);
124 }
125 
126 static void clear_lbc_err_intr(struct nitrox_device *ndev)
127 {
128 	union lbc_int lbc_int;
129 	u64 value, offset;
130 	int i;
131 
132 	lbc_int.value = nitrox_read_csr(ndev, LBC_INT);
133 	dev_err_ratelimited(DEV(ndev), "LBC_INT  0x%016llx\n", lbc_int.value);
134 
135 	if (lbc_int.s.dma_rd_err) {
136 		for (i = 0; i < NR_CLUSTERS; i++) {
137 			offset = EFL_CORE_VF_ERR_INT0X(i);
138 			value = nitrox_read_csr(ndev, offset);
139 			nitrox_write_csr(ndev, offset, value);
140 			offset = EFL_CORE_VF_ERR_INT1X(i);
141 			value = nitrox_read_csr(ndev, offset);
142 			nitrox_write_csr(ndev, offset, value);
143 		}
144 	}
145 
146 	if (lbc_int.s.cam_soft_err) {
147 		dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n");
148 		invalidate_lbc(ndev);
149 	}
150 
151 	if (lbc_int.s.pref_dat_len_mismatch_err) {
152 		offset = LBC_PLM_VF1_64_INT;
153 		value = nitrox_read_csr(ndev, offset);
154 		nitrox_write_csr(ndev, offset, value);
155 		offset = LBC_PLM_VF65_128_INT;
156 		value = nitrox_read_csr(ndev, offset);
157 		nitrox_write_csr(ndev, offset, value);
158 	}
159 
160 	if (lbc_int.s.rd_dat_len_mismatch_err) {
161 		offset = LBC_ELM_VF1_64_INT;
162 		value = nitrox_read_csr(ndev, offset);
163 		nitrox_write_csr(ndev, offset, value);
164 		offset = LBC_ELM_VF65_128_INT;
165 		value = nitrox_read_csr(ndev, offset);
166 		nitrox_write_csr(ndev, offset, value);
167 	}
168 	nitrox_write_csr(ndev, LBC_INT, lbc_int.value);
169 }
170 
171 static void clear_efl_err_intr(struct nitrox_device *ndev)
172 {
173 	int i;
174 
175 	for (i = 0; i < NR_CLUSTERS; i++) {
176 		union efl_core_int core_int;
177 		u64 value, offset;
178 
179 		offset = EFL_CORE_INTX(i);
180 		core_int.value = nitrox_read_csr(ndev, offset);
181 		nitrox_write_csr(ndev, offset, core_int.value);
182 		dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT  0x%016llx\n",
183 				    i, core_int.value);
184 		if (core_int.s.se_err) {
185 			offset = EFL_CORE_SE_ERR_INTX(i);
186 			value = nitrox_read_csr(ndev, offset);
187 			nitrox_write_csr(ndev, offset, value);
188 		}
189 	}
190 }
191 
192 static void clear_bmi_err_intr(struct nitrox_device *ndev)
193 {
194 	u64 value;
195 
196 	value = nitrox_read_csr(ndev, BMI_INT);
197 	nitrox_write_csr(ndev, BMI_INT, value);
198 	dev_err_ratelimited(DEV(ndev), "BMI_INT  0x%016llx\n", value);
199 }
200 
201 static void nps_core_int_tasklet(unsigned long data)
202 {
203 	struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
204 	struct nitrox_device *ndev = qvec->ndev;
205 
206 	/* if pf mode do queue recovery */
207 	if (ndev->mode == __NDEV_MODE_PF) {
208 	} else {
209 		/**
210 		 * if VF(s) enabled communicate the error information
211 		 * to VF(s)
212 		 */
213 	}
214 }
215 
216 /**
217  * nps_core_int_isr - interrupt handler for NITROX errors and
218  *   mailbox communication
219  */
220 static irqreturn_t nps_core_int_isr(int irq, void *data)
221 {
222 	struct nitrox_device *ndev = data;
223 	union nps_core_int_active core_int;
224 
225 	core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
226 
227 	if (core_int.s.nps_core)
228 		clear_nps_core_err_intr(ndev);
229 
230 	if (core_int.s.nps_pkt)
231 		clear_nps_pkt_err_intr(ndev);
232 
233 	if (core_int.s.pom)
234 		clear_pom_err_intr(ndev);
235 
236 	if (core_int.s.pem)
237 		clear_pem_err_intr(ndev);
238 
239 	if (core_int.s.lbc)
240 		clear_lbc_err_intr(ndev);
241 
242 	if (core_int.s.efl)
243 		clear_efl_err_intr(ndev);
244 
245 	if (core_int.s.bmi)
246 		clear_bmi_err_intr(ndev);
247 
248 	/* If more work callback the ISR, set resend */
249 	core_int.s.resend = 1;
250 	nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
251 
252 	return IRQ_HANDLED;
253 }
254 
255 void nitrox_unregister_interrupts(struct nitrox_device *ndev)
256 {
257 	struct pci_dev *pdev = ndev->pdev;
258 	int i;
259 
260 	for (i = 0; i < ndev->num_vecs; i++) {
261 		struct nitrox_q_vector *qvec;
262 		int vec;
263 
264 		qvec = ndev->qvec + i;
265 		if (!qvec->valid)
266 			continue;
267 
268 		/* get the vector number */
269 		vec = pci_irq_vector(pdev, i);
270 		irq_set_affinity_hint(vec, NULL);
271 		free_irq(vec, qvec);
272 
273 		tasklet_disable(&qvec->resp_tasklet);
274 		tasklet_kill(&qvec->resp_tasklet);
275 		qvec->valid = false;
276 	}
277 	kfree(ndev->qvec);
278 	pci_free_irq_vectors(pdev);
279 }
280 
281 int nitrox_register_interrupts(struct nitrox_device *ndev)
282 {
283 	struct pci_dev *pdev = ndev->pdev;
284 	struct nitrox_q_vector *qvec;
285 	int nr_vecs, vec, cpu;
286 	int ret, i;
287 
288 	/*
289 	 * PF MSI-X vectors
290 	 *
291 	 * Entry 0: NPS PKT ring 0
292 	 * Entry 1: AQMQ ring 0
293 	 * Entry 2: ZQM ring 0
294 	 * Entry 3: NPS PKT ring 1
295 	 * Entry 4: AQMQ ring 1
296 	 * Entry 5: ZQM ring 1
297 	 * ....
298 	 * Entry 192: NPS_CORE_INT_ACTIVE
299 	 */
300 	nr_vecs = pci_msix_vec_count(pdev);
301 
302 	/* Enable MSI-X */
303 	ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
304 	if (ret < 0) {
305 		dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
306 		return ret;
307 	}
308 	ndev->num_vecs = nr_vecs;
309 
310 	ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
311 	if (!ndev->qvec) {
312 		pci_free_irq_vectors(pdev);
313 		return -ENOMEM;
314 	}
315 
316 	/* request irqs for packet rings/ports */
317 	for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
318 		qvec = &ndev->qvec[i];
319 
320 		qvec->ring = i / NR_RING_VECTORS;
321 		if (qvec->ring >= ndev->nr_queues)
322 			break;
323 
324 		snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
325 		/* get the vector number */
326 		vec = pci_irq_vector(pdev, i);
327 		ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
328 		if (ret) {
329 			dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
330 				qvec->ring);
331 			goto irq_fail;
332 		}
333 		cpu = qvec->ring % num_online_cpus();
334 		irq_set_affinity_hint(vec, get_cpu_mask(cpu));
335 
336 		tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
337 			     (unsigned long)qvec);
338 		qvec->cmdq = &ndev->pkt_inq[qvec->ring];
339 		qvec->valid = true;
340 	}
341 
342 	/* request irqs for non ring vectors */
343 	i = NON_RING_MSIX_BASE;
344 	qvec = &ndev->qvec[i];
345 
346 	snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
347 	/* get the vector number */
348 	vec = pci_irq_vector(pdev, i);
349 	ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
350 	if (ret) {
351 		dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
352 		goto irq_fail;
353 	}
354 	cpu = num_online_cpus();
355 	irq_set_affinity_hint(vec, get_cpu_mask(cpu));
356 
357 	tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
358 		     (unsigned long)qvec);
359 	qvec->ndev = ndev;
360 	qvec->valid = true;
361 
362 	return 0;
363 
364 irq_fail:
365 	nitrox_unregister_interrupts(ndev);
366 	return ret;
367 }
368