1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <linux/firmware.h>
5 #include "otx2_cpt_hw_types.h"
6 #include "otx2_cpt_common.h"
7 #include "otx2_cptpf_ucode.h"
8 #include "otx2_cptpf.h"
9 #include "cn10k_cpt.h"
10 #include "rvu_reg.h"
11 
12 #define OTX2_CPT_DRV_NAME    "rvu_cptpf"
13 #define OTX2_CPT_DRV_STRING  "Marvell RVU CPT Physical Function Driver"
14 
15 static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
16 					int num_vfs)
17 {
18 	int ena_bits;
19 
20 	/* Clear any pending interrupts */
21 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
22 			 RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
23 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
24 			 RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
25 
26 	/* Enable VF interrupts for VFs from 0 to 63 */
27 	ena_bits = ((num_vfs - 1) % 64);
28 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
29 			 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
30 			 GENMASK_ULL(ena_bits, 0));
31 
32 	if (num_vfs > 64) {
33 		/* Enable VF interrupts for VFs from 64 to 127 */
34 		ena_bits = num_vfs - 64 - 1;
35 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
36 				RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
37 				GENMASK_ULL(ena_bits, 0));
38 	}
39 }
40 
41 static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
42 					 int num_vfs)
43 {
44 	int vector;
45 
46 	/* Disable VF-PF interrupts */
47 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
48 			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
49 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
50 			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
51 	/* Clear any pending interrupts */
52 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
53 			 RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
54 
55 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
56 	free_irq(vector, cptpf);
57 
58 	if (num_vfs > 64) {
59 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
60 				 RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
61 		vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
62 		free_irq(vector, cptpf);
63 	}
64 }
65 
66 static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
67 					 int num_vfs)
68 {
69 	/* Clear FLR interrupt if any */
70 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
71 			 INTR_MASK(num_vfs));
72 
73 	/* Enable VF FLR interrupts */
74 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
75 			 RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
76 	/* Clear ME interrupt if any */
77 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
78 			 INTR_MASK(num_vfs));
79 	/* Enable VF ME interrupts */
80 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
81 			 RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
82 
83 	if (num_vfs <= 64)
84 		return;
85 
86 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
87 			 INTR_MASK(num_vfs - 64));
88 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
89 			 RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
90 
91 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
92 			 INTR_MASK(num_vfs - 64));
93 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
94 			 RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
95 }
96 
97 static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
98 				       int num_vfs)
99 {
100 	int vector;
101 
102 	/* Disable VF FLR interrupts */
103 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
104 			 RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
105 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
106 	free_irq(vector, cptpf);
107 
108 	/* Disable VF ME interrupts */
109 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
110 			 RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
111 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
112 	free_irq(vector, cptpf);
113 
114 	if (num_vfs <= 64)
115 		return;
116 
117 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
118 			 RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
119 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
120 	free_irq(vector, cptpf);
121 
122 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
123 			 RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
124 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
125 	free_irq(vector, cptpf);
126 }
127 
128 static void cptpf_flr_wq_handler(struct work_struct *work)
129 {
130 	struct cptpf_flr_work *flr_work;
131 	struct otx2_cptpf_dev *pf;
132 	struct mbox_msghdr *req;
133 	struct otx2_mbox *mbox;
134 	int vf, reg = 0;
135 
136 	flr_work = container_of(work, struct cptpf_flr_work, work);
137 	pf = flr_work->pf;
138 	mbox = &pf->afpf_mbox;
139 
140 	vf = flr_work - pf->flr_work;
141 
142 	req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
143 				      sizeof(struct msg_rsp));
144 	if (!req)
145 		return;
146 
147 	req->sig = OTX2_MBOX_REQ_SIG;
148 	req->id = MBOX_MSG_VF_FLR;
149 	req->pcifunc &= RVU_PFVF_FUNC_MASK;
150 	req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
151 
152 	otx2_cpt_send_mbox_msg(mbox, pf->pdev);
153 
154 	if (vf >= 64) {
155 		reg = 1;
156 		vf = vf - 64;
157 	}
158 	/* Clear transaction pending register */
159 	otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
160 			 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
161 	otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
162 			 RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
163 }
164 
165 static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
166 {
167 	int reg, dev, vf, start_vf, num_reg = 1;
168 	struct otx2_cptpf_dev *cptpf = arg;
169 	u64 intr;
170 
171 	if (cptpf->max_vfs > 64)
172 		num_reg = 2;
173 
174 	for (reg = 0; reg < num_reg; reg++) {
175 		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
176 				       RVU_PF_VFFLR_INTX(reg));
177 		if (!intr)
178 			continue;
179 		start_vf = 64 * reg;
180 		for (vf = 0; vf < 64; vf++) {
181 			if (!(intr & BIT_ULL(vf)))
182 				continue;
183 			dev = vf + start_vf;
184 			queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
185 			/* Clear interrupt */
186 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
187 					 RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
188 			/* Disable the interrupt */
189 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
190 					 RVU_PF_VFFLR_INT_ENA_W1CX(reg),
191 					 BIT_ULL(vf));
192 		}
193 	}
194 	return IRQ_HANDLED;
195 }
196 
197 static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
198 {
199 	struct otx2_cptpf_dev *cptpf = arg;
200 	int reg, vf, num_reg = 1;
201 	u64 intr;
202 
203 	if (cptpf->max_vfs > 64)
204 		num_reg = 2;
205 
206 	for (reg = 0; reg < num_reg; reg++) {
207 		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
208 				       RVU_PF_VFME_INTX(reg));
209 		if (!intr)
210 			continue;
211 		for (vf = 0; vf < 64; vf++) {
212 			if (!(intr & BIT_ULL(vf)))
213 				continue;
214 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
215 					 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
216 			/* Clear interrupt */
217 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
218 					 RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
219 		}
220 	}
221 	return IRQ_HANDLED;
222 }
223 
224 static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
225 				       int num_vfs)
226 {
227 	cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
228 	cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
229 }
230 
231 static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
232 {
233 	struct pci_dev *pdev = cptpf->pdev;
234 	struct device *dev = &pdev->dev;
235 	int ret, vector;
236 
237 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
238 	/* Register VF-PF mailbox interrupt handler */
239 	ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
240 			  cptpf);
241 	if (ret) {
242 		dev_err(dev,
243 			"IRQ registration failed for PFVF mbox0 irq\n");
244 		return ret;
245 	}
246 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
247 	/* Register VF FLR interrupt handler */
248 	ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
249 	if (ret) {
250 		dev_err(dev,
251 			"IRQ registration failed for VFFLR0 irq\n");
252 		goto free_mbox0_irq;
253 	}
254 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
255 	/* Register VF ME interrupt handler */
256 	ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
257 	if (ret) {
258 		dev_err(dev,
259 			"IRQ registration failed for PFVF mbox0 irq\n");
260 		goto free_flr0_irq;
261 	}
262 
263 	if (num_vfs > 64) {
264 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
265 		ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
266 				  "CPTVFPF Mbox1", cptpf);
267 		if (ret) {
268 			dev_err(dev,
269 				"IRQ registration failed for PFVF mbox1 irq\n");
270 			goto free_me0_irq;
271 		}
272 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
273 		/* Register VF FLR interrupt handler */
274 		ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
275 				  cptpf);
276 		if (ret) {
277 			dev_err(dev,
278 				"IRQ registration failed for VFFLR1 irq\n");
279 			goto free_mbox1_irq;
280 		}
281 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
282 		/* Register VF FLR interrupt handler */
283 		ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
284 				  cptpf);
285 		if (ret) {
286 			dev_err(dev,
287 				"IRQ registration failed for VFFLR1 irq\n");
288 			goto free_flr1_irq;
289 		}
290 	}
291 	cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
292 	cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
293 
294 	return 0;
295 
296 free_flr1_irq:
297 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
298 	free_irq(vector, cptpf);
299 free_mbox1_irq:
300 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
301 	free_irq(vector, cptpf);
302 free_me0_irq:
303 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
304 	free_irq(vector, cptpf);
305 free_flr0_irq:
306 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
307 	free_irq(vector, cptpf);
308 free_mbox0_irq:
309 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
310 	free_irq(vector, cptpf);
311 	return ret;
312 }
313 
314 static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
315 {
316 	if (!pf->flr_wq)
317 		return;
318 	destroy_workqueue(pf->flr_wq);
319 	pf->flr_wq = NULL;
320 	kfree(pf->flr_work);
321 }
322 
323 static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
324 {
325 	int vf;
326 
327 	cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
328 	if (!cptpf->flr_wq)
329 		return -ENOMEM;
330 
331 	cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
332 				  GFP_KERNEL);
333 	if (!cptpf->flr_work)
334 		goto destroy_wq;
335 
336 	for (vf = 0; vf < num_vfs; vf++) {
337 		cptpf->flr_work[vf].pf = cptpf;
338 		INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
339 	}
340 	return 0;
341 
342 destroy_wq:
343 	destroy_workqueue(cptpf->flr_wq);
344 	return -ENOMEM;
345 }
346 
347 static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
348 {
349 	struct device *dev = &cptpf->pdev->dev;
350 	u64 vfpf_mbox_base;
351 	int err, i;
352 
353 	cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox",
354 					      WQ_UNBOUND | WQ_HIGHPRI |
355 					      WQ_MEM_RECLAIM, 1);
356 	if (!cptpf->vfpf_mbox_wq)
357 		return -ENOMEM;
358 
359 	/* Map VF-PF mailbox memory */
360 	if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
361 		vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
362 	else
363 		vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
364 
365 	if (!vfpf_mbox_base) {
366 		dev_err(dev, "VF-PF mailbox address not configured\n");
367 		err = -ENOMEM;
368 		goto free_wqe;
369 	}
370 	cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
371 						MBOX_SIZE * cptpf->max_vfs);
372 	if (!cptpf->vfpf_mbox_base) {
373 		dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
374 		err = -ENOMEM;
375 		goto free_wqe;
376 	}
377 	err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
378 			     cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
379 			     num_vfs);
380 	if (err)
381 		goto free_wqe;
382 
383 	for (i = 0; i < num_vfs; i++) {
384 		cptpf->vf[i].vf_id = i;
385 		cptpf->vf[i].cptpf = cptpf;
386 		cptpf->vf[i].intr_idx = i % 64;
387 		INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
388 			  otx2_cptpf_vfpf_mbox_handler);
389 	}
390 	return 0;
391 
392 free_wqe:
393 	destroy_workqueue(cptpf->vfpf_mbox_wq);
394 	return err;
395 }
396 
397 static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
398 {
399 	destroy_workqueue(cptpf->vfpf_mbox_wq);
400 	otx2_mbox_destroy(&cptpf->vfpf_mbox);
401 }
402 
403 static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
404 {
405 	/* Disable AF-PF interrupt */
406 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
407 			 0x1ULL);
408 	/* Clear interrupt if any */
409 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
410 }
411 
412 static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
413 {
414 	struct pci_dev *pdev = cptpf->pdev;
415 	struct device *dev = &pdev->dev;
416 	int ret, irq;
417 
418 	irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
419 	/* Register AF-PF mailbox interrupt handler */
420 	ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
421 			       "CPTAFPF Mbox", cptpf);
422 	if (ret) {
423 		dev_err(dev,
424 			"IRQ registration failed for PFAF mbox irq\n");
425 		return ret;
426 	}
427 	/* Clear interrupt if any, to avoid spurious interrupts */
428 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
429 	/* Enable AF-PF interrupt */
430 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
431 			 0x1ULL);
432 
433 	ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
434 	if (ret) {
435 		dev_warn(dev,
436 			 "AF not responding to mailbox, deferring probe\n");
437 		cptpf_disable_afpf_mbox_intr(cptpf);
438 		return -EPROBE_DEFER;
439 	}
440 	return 0;
441 }
442 
443 static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
444 {
445 	struct pci_dev *pdev = cptpf->pdev;
446 	resource_size_t offset;
447 	int err;
448 
449 	cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
450 					      WQ_UNBOUND | WQ_HIGHPRI |
451 					      WQ_MEM_RECLAIM, 1);
452 	if (!cptpf->afpf_mbox_wq)
453 		return -ENOMEM;
454 
455 	offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
456 	/* Map AF-PF mailbox memory */
457 	cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
458 	if (!cptpf->afpf_mbox_base) {
459 		dev_err(&pdev->dev, "Unable to map BAR4\n");
460 		err = -ENOMEM;
461 		goto error;
462 	}
463 
464 	err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
465 			     pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
466 	if (err)
467 		goto error;
468 
469 	INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
470 	return 0;
471 
472 error:
473 	destroy_workqueue(cptpf->afpf_mbox_wq);
474 	return err;
475 }
476 
477 static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
478 {
479 	destroy_workqueue(cptpf->afpf_mbox_wq);
480 	otx2_mbox_destroy(&cptpf->afpf_mbox);
481 }
482 
483 static ssize_t kvf_limits_show(struct device *dev,
484 			       struct device_attribute *attr, char *buf)
485 {
486 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
487 
488 	return sprintf(buf, "%d\n", cptpf->kvf_limits);
489 }
490 
491 static ssize_t kvf_limits_store(struct device *dev,
492 				struct device_attribute *attr,
493 				const char *buf, size_t count)
494 {
495 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
496 	int lfs_num;
497 
498 	if (kstrtoint(buf, 0, &lfs_num)) {
499 		dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
500 			lfs_num, num_online_cpus());
501 		return -EINVAL;
502 	}
503 	if (lfs_num < 1 || lfs_num > num_online_cpus()) {
504 		dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
505 			lfs_num, num_online_cpus());
506 		return -EINVAL;
507 	}
508 	cptpf->kvf_limits = lfs_num;
509 
510 	return count;
511 }
512 
513 static DEVICE_ATTR_RW(kvf_limits);
514 static struct attribute *cptpf_attrs[] = {
515 	&dev_attr_kvf_limits.attr,
516 	NULL
517 };
518 
519 static const struct attribute_group cptpf_sysfs_group = {
520 	.attrs = cptpf_attrs,
521 };
522 
523 static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
524 {
525 	u64 rev;
526 
527 	rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
528 			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
529 	rev = (rev >> 12) & 0xFF;
530 	/*
531 	 * Check if AF has setup revision for RVUM block, otherwise
532 	 * driver probe should be deferred until AF driver comes up
533 	 */
534 	if (!rev) {
535 		dev_warn(&cptpf->pdev->dev,
536 			 "AF is not initialized, deferring probe\n");
537 		return -EPROBE_DEFER;
538 	}
539 	return 0;
540 }
541 
542 static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
543 {
544 	int timeout = 10, ret;
545 	u64 reg = 0;
546 
547 	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
548 				    CPT_AF_BLK_RST, 0x1, blkaddr);
549 	if (ret)
550 		return ret;
551 
552 	do {
553 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
554 					   CPT_AF_BLK_RST, &reg, blkaddr);
555 		if (ret)
556 			return ret;
557 
558 		if (!((reg >> 63) & 0x1))
559 			break;
560 
561 		usleep_range(10000, 20000);
562 		if (timeout-- < 0)
563 			return -EBUSY;
564 	} while (1);
565 
566 	return ret;
567 }
568 
569 static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
570 {
571 	int ret = 0;
572 
573 	if (cptpf->has_cpt1) {
574 		ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
575 		if (ret)
576 			return ret;
577 	}
578 	return cptx_device_reset(cptpf, BLKADDR_CPT0);
579 }
580 
581 static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
582 {
583 	u64 cfg;
584 
585 	cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
586 			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
587 	if (cfg & BIT_ULL(11))
588 		cptpf->has_cpt1 = true;
589 }
590 
591 static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
592 {
593 	union otx2_cptx_af_constants1 af_cnsts1 = {0};
594 	int ret = 0;
595 
596 	/* check if 'implemented' bit is set for block BLKADDR_CPT1 */
597 	cptpf_check_block_implemented(cptpf);
598 	/* Reset the CPT PF device */
599 	ret = cptpf_device_reset(cptpf);
600 	if (ret)
601 		return ret;
602 
603 	/* Get number of SE, IE and AE engines */
604 	ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
605 				   CPT_AF_CONSTANTS1, &af_cnsts1.u,
606 				   BLKADDR_CPT0);
607 	if (ret)
608 		return ret;
609 
610 	cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
611 	cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
612 	cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
613 
614 	/* Disable all cores */
615 	ret = otx2_cpt_disable_all_cores(cptpf);
616 
617 	return ret;
618 }
619 
620 static int cptpf_sriov_disable(struct pci_dev *pdev)
621 {
622 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
623 	int num_vfs = pci_num_vf(pdev);
624 
625 	if (!num_vfs)
626 		return 0;
627 
628 	pci_disable_sriov(pdev);
629 	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
630 	cptpf_flr_wq_destroy(cptpf);
631 	cptpf_vfpf_mbox_destroy(cptpf);
632 	module_put(THIS_MODULE);
633 	cptpf->enabled_vfs = 0;
634 
635 	return 0;
636 }
637 
638 static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
639 {
640 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
641 	int ret;
642 
643 	/* Initialize VF<=>PF mailbox */
644 	ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
645 	if (ret)
646 		return ret;
647 
648 	ret = cptpf_flr_wq_init(cptpf, num_vfs);
649 	if (ret)
650 		goto destroy_mbox;
651 	/* Register VF<=>PF mailbox interrupt */
652 	ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
653 	if (ret)
654 		goto destroy_flr;
655 
656 	/* Get CPT HW capabilities using LOAD_FVC operation. */
657 	ret = otx2_cpt_discover_eng_capabilities(cptpf);
658 	if (ret)
659 		goto disable_intr;
660 
661 	ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
662 	if (ret)
663 		goto disable_intr;
664 
665 	cptpf->enabled_vfs = num_vfs;
666 	ret = pci_enable_sriov(pdev, num_vfs);
667 	if (ret)
668 		goto disable_intr;
669 
670 	dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
671 
672 	try_module_get(THIS_MODULE);
673 	return num_vfs;
674 
675 disable_intr:
676 	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
677 	cptpf->enabled_vfs = 0;
678 destroy_flr:
679 	cptpf_flr_wq_destroy(cptpf);
680 destroy_mbox:
681 	cptpf_vfpf_mbox_destroy(cptpf);
682 	return ret;
683 }
684 
685 static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
686 {
687 	if (num_vfs > 0) {
688 		return cptpf_sriov_enable(pdev, num_vfs);
689 	} else {
690 		return cptpf_sriov_disable(pdev);
691 	}
692 }
693 
694 static int otx2_cptpf_probe(struct pci_dev *pdev,
695 			    const struct pci_device_id *ent)
696 {
697 	struct device *dev = &pdev->dev;
698 	struct otx2_cptpf_dev *cptpf;
699 	int err;
700 
701 	cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
702 	if (!cptpf)
703 		return -ENOMEM;
704 
705 	err = pcim_enable_device(pdev);
706 	if (err) {
707 		dev_err(dev, "Failed to enable PCI device\n");
708 		goto clear_drvdata;
709 	}
710 
711 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
712 	if (err) {
713 		dev_err(dev, "Unable to get usable DMA configuration\n");
714 		goto clear_drvdata;
715 	}
716 	/* Map PF's configuration registers */
717 	err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
718 					     OTX2_CPT_DRV_NAME);
719 	if (err) {
720 		dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
721 		goto clear_drvdata;
722 	}
723 	pci_set_master(pdev);
724 	pci_set_drvdata(pdev, cptpf);
725 	cptpf->pdev = pdev;
726 
727 	cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
728 
729 	/* Check if AF driver is up, otherwise defer probe */
730 	err = cpt_is_pf_usable(cptpf);
731 	if (err)
732 		goto clear_drvdata;
733 
734 	err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
735 				    RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
736 	if (err < 0) {
737 		dev_err(dev, "Request for %d msix vectors failed\n",
738 			RVU_PF_INT_VEC_CNT);
739 		goto clear_drvdata;
740 	}
741 	otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
742 	/* Initialize AF-PF mailbox */
743 	err = cptpf_afpf_mbox_init(cptpf);
744 	if (err)
745 		goto clear_drvdata;
746 	/* Register mailbox interrupt */
747 	err = cptpf_register_afpf_mbox_intr(cptpf);
748 	if (err)
749 		goto destroy_afpf_mbox;
750 
751 	cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
752 
753 	err = cn10k_cptpf_lmtst_init(cptpf);
754 	if (err)
755 		goto unregister_intr;
756 
757 	/* Initialize CPT PF device */
758 	err = cptpf_device_init(cptpf);
759 	if (err)
760 		goto unregister_intr;
761 
762 	/* Initialize engine groups */
763 	err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
764 	if (err)
765 		goto unregister_intr;
766 
767 	err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
768 	if (err)
769 		goto cleanup_eng_grps;
770 	return 0;
771 
772 cleanup_eng_grps:
773 	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
774 unregister_intr:
775 	cptpf_disable_afpf_mbox_intr(cptpf);
776 destroy_afpf_mbox:
777 	cptpf_afpf_mbox_destroy(cptpf);
778 clear_drvdata:
779 	pci_set_drvdata(pdev, NULL);
780 	return err;
781 }
782 
783 static void otx2_cptpf_remove(struct pci_dev *pdev)
784 {
785 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
786 
787 	if (!cptpf)
788 		return;
789 
790 	cptpf_sriov_disable(pdev);
791 	/* Delete sysfs entry created for kernel VF limits */
792 	sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
793 	/* Cleanup engine groups */
794 	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
795 	/* Disable AF-PF mailbox interrupt */
796 	cptpf_disable_afpf_mbox_intr(cptpf);
797 	/* Destroy AF-PF mbox */
798 	cptpf_afpf_mbox_destroy(cptpf);
799 	pci_set_drvdata(pdev, NULL);
800 }
801 
802 /* Supported devices */
803 static const struct pci_device_id otx2_cpt_id_table[] = {
804 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
805 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
806 	{ 0, }  /* end of table */
807 };
808 
809 static struct pci_driver otx2_cpt_pci_driver = {
810 	.name = OTX2_CPT_DRV_NAME,
811 	.id_table = otx2_cpt_id_table,
812 	.probe = otx2_cptpf_probe,
813 	.remove = otx2_cptpf_remove,
814 	.sriov_configure = otx2_cptpf_sriov_configure
815 };
816 
817 module_pci_driver(otx2_cpt_pci_driver);
818 
819 MODULE_AUTHOR("Marvell");
820 MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
821 MODULE_LICENSE("GPL v2");
822 MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
823