1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <linux/firmware.h>
5 #include "otx2_cpt_hw_types.h"
6 #include "otx2_cpt_common.h"
7 #include "otx2_cptpf_ucode.h"
8 #include "otx2_cptpf.h"
9 #include "rvu_reg.h"
10 
11 #define OTX2_CPT_DRV_NAME    "octeontx2-cpt"
12 #define OTX2_CPT_DRV_STRING  "Marvell OcteonTX2 CPT Physical Function Driver"
13 
14 static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
15 					int num_vfs)
16 {
17 	int ena_bits;
18 
19 	/* Clear any pending interrupts */
20 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
21 			 RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
22 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
23 			 RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
24 
25 	/* Enable VF interrupts for VFs from 0 to 63 */
26 	ena_bits = ((num_vfs - 1) % 64);
27 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
28 			 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
29 			 GENMASK_ULL(ena_bits, 0));
30 
31 	if (num_vfs > 64) {
32 		/* Enable VF interrupts for VFs from 64 to 127 */
33 		ena_bits = num_vfs - 64 - 1;
34 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
35 				RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
36 				GENMASK_ULL(ena_bits, 0));
37 	}
38 }
39 
40 static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
41 					 int num_vfs)
42 {
43 	int vector;
44 
45 	/* Disable VF-PF interrupts */
46 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
47 			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
48 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
49 			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
50 	/* Clear any pending interrupts */
51 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
52 			 RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
53 
54 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
55 	free_irq(vector, cptpf);
56 
57 	if (num_vfs > 64) {
58 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
59 				 RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
60 		vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
61 		free_irq(vector, cptpf);
62 	}
63 }
64 
65 static void cptpf_enable_vf_flr_intrs(struct otx2_cptpf_dev *cptpf)
66 {
67 	/* Clear interrupt if any */
68 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
69 			~0x0ULL);
70 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
71 			~0x0ULL);
72 
73 	/* Enable VF FLR interrupts */
74 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
75 			 RVU_PF_VFFLR_INT_ENA_W1SX(0), ~0x0ULL);
76 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
77 			 RVU_PF_VFFLR_INT_ENA_W1SX(1), ~0x0ULL);
78 }
79 
80 static void cptpf_disable_vf_flr_intrs(struct otx2_cptpf_dev *cptpf,
81 				       int num_vfs)
82 {
83 	int vector;
84 
85 	/* Disable VF FLR interrupts */
86 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
87 			 RVU_PF_VFFLR_INT_ENA_W1CX(0), ~0x0ULL);
88 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
89 			 RVU_PF_VFFLR_INT_ENA_W1CX(1), ~0x0ULL);
90 
91 	/* Clear interrupt if any */
92 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
93 			 ~0x0ULL);
94 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
95 			 ~0x0ULL);
96 
97 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
98 	free_irq(vector, cptpf);
99 
100 	if (num_vfs > 64) {
101 		vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
102 		free_irq(vector, cptpf);
103 	}
104 }
105 
106 static void cptpf_flr_wq_handler(struct work_struct *work)
107 {
108 	struct cptpf_flr_work *flr_work;
109 	struct otx2_cptpf_dev *pf;
110 	struct mbox_msghdr *req;
111 	struct otx2_mbox *mbox;
112 	int vf, reg = 0;
113 
114 	flr_work = container_of(work, struct cptpf_flr_work, work);
115 	pf = flr_work->pf;
116 	mbox = &pf->afpf_mbox;
117 
118 	vf = flr_work - pf->flr_work;
119 
120 	req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
121 				      sizeof(struct msg_rsp));
122 	if (!req)
123 		return;
124 
125 	req->sig = OTX2_MBOX_REQ_SIG;
126 	req->id = MBOX_MSG_VF_FLR;
127 	req->pcifunc &= RVU_PFVF_FUNC_MASK;
128 	req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
129 
130 	otx2_cpt_send_mbox_msg(mbox, pf->pdev);
131 
132 	if (vf >= 64) {
133 		reg = 1;
134 		vf = vf - 64;
135 	}
136 	/* Clear transaction pending register */
137 	otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
138 			 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
139 	otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
140 			 RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
141 }
142 
143 static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
144 {
145 	int reg, dev, vf, start_vf, num_reg = 1;
146 	struct otx2_cptpf_dev *cptpf = arg;
147 	u64 intr;
148 
149 	if (cptpf->max_vfs > 64)
150 		num_reg = 2;
151 
152 	for (reg = 0; reg < num_reg; reg++) {
153 		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
154 				       RVU_PF_VFFLR_INTX(reg));
155 		if (!intr)
156 			continue;
157 		start_vf = 64 * reg;
158 		for (vf = 0; vf < 64; vf++) {
159 			if (!(intr & BIT_ULL(vf)))
160 				continue;
161 			dev = vf + start_vf;
162 			queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
163 			/* Clear interrupt */
164 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
165 					 RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
166 			/* Disable the interrupt */
167 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
168 					 RVU_PF_VFFLR_INT_ENA_W1CX(reg),
169 					 BIT_ULL(vf));
170 		}
171 	}
172 	return IRQ_HANDLED;
173 }
174 
175 static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
176 				       int num_vfs)
177 {
178 	cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
179 	cptpf_disable_vf_flr_intrs(cptpf, num_vfs);
180 }
181 
182 static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
183 {
184 	struct pci_dev *pdev = cptpf->pdev;
185 	struct device *dev = &pdev->dev;
186 	int ret, vector;
187 
188 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
189 	/* Register VF-PF mailbox interrupt handler */
190 	ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
191 			  cptpf);
192 	if (ret) {
193 		dev_err(dev,
194 			"IRQ registration failed for PFVF mbox0 irq\n");
195 		return ret;
196 	}
197 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
198 	/* Register VF FLR interrupt handler */
199 	ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
200 	if (ret) {
201 		dev_err(dev,
202 			"IRQ registration failed for VFFLR0 irq\n");
203 		goto free_mbox0_irq;
204 	}
205 	if (num_vfs > 64) {
206 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
207 		ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
208 				  "CPTVFPF Mbox1", cptpf);
209 		if (ret) {
210 			dev_err(dev,
211 				"IRQ registration failed for PFVF mbox1 irq\n");
212 			goto free_flr0_irq;
213 		}
214 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
215 		/* Register VF FLR interrupt handler */
216 		ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
217 				  cptpf);
218 		if (ret) {
219 			dev_err(dev,
220 				"IRQ registration failed for VFFLR1 irq\n");
221 			goto free_mbox1_irq;
222 		}
223 	}
224 	cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
225 	cptpf_enable_vf_flr_intrs(cptpf);
226 
227 	return 0;
228 
229 free_mbox1_irq:
230 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
231 	free_irq(vector, cptpf);
232 free_flr0_irq:
233 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
234 	free_irq(vector, cptpf);
235 free_mbox0_irq:
236 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
237 	free_irq(vector, cptpf);
238 	return ret;
239 }
240 
241 static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
242 {
243 	if (!pf->flr_wq)
244 		return;
245 	destroy_workqueue(pf->flr_wq);
246 	pf->flr_wq = NULL;
247 	kfree(pf->flr_work);
248 }
249 
250 static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
251 {
252 	int vf;
253 
254 	cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
255 	if (!cptpf->flr_wq)
256 		return -ENOMEM;
257 
258 	cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
259 				  GFP_KERNEL);
260 	if (!cptpf->flr_work)
261 		goto destroy_wq;
262 
263 	for (vf = 0; vf < num_vfs; vf++) {
264 		cptpf->flr_work[vf].pf = cptpf;
265 		INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
266 	}
267 	return 0;
268 
269 destroy_wq:
270 	destroy_workqueue(cptpf->flr_wq);
271 	return -ENOMEM;
272 }
273 
274 static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
275 {
276 	struct device *dev = &cptpf->pdev->dev;
277 	u64 vfpf_mbox_base;
278 	int err, i;
279 
280 	cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox",
281 					      WQ_UNBOUND | WQ_HIGHPRI |
282 					      WQ_MEM_RECLAIM, 1);
283 	if (!cptpf->vfpf_mbox_wq)
284 		return -ENOMEM;
285 
286 	/* Map VF-PF mailbox memory */
287 	vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
288 	if (!vfpf_mbox_base) {
289 		dev_err(dev, "VF-PF mailbox address not configured\n");
290 		err = -ENOMEM;
291 		goto free_wqe;
292 	}
293 	cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
294 						MBOX_SIZE * cptpf->max_vfs);
295 	if (!cptpf->vfpf_mbox_base) {
296 		dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
297 		err = -ENOMEM;
298 		goto free_wqe;
299 	}
300 	err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
301 			     cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
302 			     num_vfs);
303 	if (err)
304 		goto free_wqe;
305 
306 	for (i = 0; i < num_vfs; i++) {
307 		cptpf->vf[i].vf_id = i;
308 		cptpf->vf[i].cptpf = cptpf;
309 		cptpf->vf[i].intr_idx = i % 64;
310 		INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
311 			  otx2_cptpf_vfpf_mbox_handler);
312 	}
313 	return 0;
314 
315 free_wqe:
316 	destroy_workqueue(cptpf->vfpf_mbox_wq);
317 	return err;
318 }
319 
320 static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
321 {
322 	destroy_workqueue(cptpf->vfpf_mbox_wq);
323 	otx2_mbox_destroy(&cptpf->vfpf_mbox);
324 }
325 
326 static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
327 {
328 	/* Disable AF-PF interrupt */
329 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
330 			 0x1ULL);
331 	/* Clear interrupt if any */
332 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
333 }
334 
335 static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
336 {
337 	struct pci_dev *pdev = cptpf->pdev;
338 	struct device *dev = &pdev->dev;
339 	int ret, irq;
340 
341 	irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
342 	/* Register AF-PF mailbox interrupt handler */
343 	ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
344 			       "CPTAFPF Mbox", cptpf);
345 	if (ret) {
346 		dev_err(dev,
347 			"IRQ registration failed for PFAF mbox irq\n");
348 		return ret;
349 	}
350 	/* Clear interrupt if any, to avoid spurious interrupts */
351 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
352 	/* Enable AF-PF interrupt */
353 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
354 			 0x1ULL);
355 
356 	ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
357 	if (ret) {
358 		dev_warn(dev,
359 			 "AF not responding to mailbox, deferring probe\n");
360 		cptpf_disable_afpf_mbox_intr(cptpf);
361 		return -EPROBE_DEFER;
362 	}
363 	return 0;
364 }
365 
366 static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
367 {
368 	int err;
369 
370 	cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
371 					      WQ_UNBOUND | WQ_HIGHPRI |
372 					      WQ_MEM_RECLAIM, 1);
373 	if (!cptpf->afpf_mbox_wq)
374 		return -ENOMEM;
375 
376 	err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
377 			     cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
378 	if (err)
379 		goto error;
380 
381 	INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
382 	return 0;
383 
384 error:
385 	destroy_workqueue(cptpf->afpf_mbox_wq);
386 	return err;
387 }
388 
389 static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
390 {
391 	destroy_workqueue(cptpf->afpf_mbox_wq);
392 	otx2_mbox_destroy(&cptpf->afpf_mbox);
393 }
394 
395 static ssize_t kvf_limits_show(struct device *dev,
396 			       struct device_attribute *attr, char *buf)
397 {
398 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
399 
400 	return sprintf(buf, "%d\n", cptpf->kvf_limits);
401 }
402 
403 static ssize_t kvf_limits_store(struct device *dev,
404 				struct device_attribute *attr,
405 				const char *buf, size_t count)
406 {
407 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
408 	int lfs_num;
409 
410 	if (kstrtoint(buf, 0, &lfs_num)) {
411 		dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
412 			lfs_num, num_online_cpus());
413 		return -EINVAL;
414 	}
415 	if (lfs_num < 1 || lfs_num > num_online_cpus()) {
416 		dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
417 			lfs_num, num_online_cpus());
418 		return -EINVAL;
419 	}
420 	cptpf->kvf_limits = lfs_num;
421 
422 	return count;
423 }
424 
425 static DEVICE_ATTR_RW(kvf_limits);
426 static struct attribute *cptpf_attrs[] = {
427 	&dev_attr_kvf_limits.attr,
428 	NULL
429 };
430 
431 static const struct attribute_group cptpf_sysfs_group = {
432 	.attrs = cptpf_attrs,
433 };
434 
435 static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
436 {
437 	u64 rev;
438 
439 	rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
440 			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
441 	rev = (rev >> 12) & 0xFF;
442 	/*
443 	 * Check if AF has setup revision for RVUM block, otherwise
444 	 * driver probe should be deferred until AF driver comes up
445 	 */
446 	if (!rev) {
447 		dev_warn(&cptpf->pdev->dev,
448 			 "AF is not initialized, deferring probe\n");
449 		return -EPROBE_DEFER;
450 	}
451 	return 0;
452 }
453 
454 static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
455 {
456 	int timeout = 10, ret;
457 	u64 reg = 0;
458 
459 	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
460 				    CPT_AF_BLK_RST, 0x1, blkaddr);
461 	if (ret)
462 		return ret;
463 
464 	do {
465 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
466 					   CPT_AF_BLK_RST, &reg, blkaddr);
467 		if (ret)
468 			return ret;
469 
470 		if (!((reg >> 63) & 0x1))
471 			break;
472 
473 		usleep_range(10000, 20000);
474 		if (timeout-- < 0)
475 			return -EBUSY;
476 	} while (1);
477 
478 	return ret;
479 }
480 
481 static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
482 {
483 	int ret = 0;
484 
485 	if (cptpf->has_cpt1) {
486 		ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
487 		if (ret)
488 			return ret;
489 	}
490 	return cptx_device_reset(cptpf, BLKADDR_CPT0);
491 }
492 
493 static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
494 {
495 	u64 cfg;
496 
497 	cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
498 			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
499 	if (cfg & BIT_ULL(11))
500 		cptpf->has_cpt1 = true;
501 }
502 
503 static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
504 {
505 	union otx2_cptx_af_constants1 af_cnsts1 = {0};
506 	int ret = 0;
507 
508 	/* check if 'implemented' bit is set for block BLKADDR_CPT1 */
509 	cptpf_check_block_implemented(cptpf);
510 	/* Reset the CPT PF device */
511 	ret = cptpf_device_reset(cptpf);
512 	if (ret)
513 		return ret;
514 
515 	/* Get number of SE, IE and AE engines */
516 	ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
517 				   CPT_AF_CONSTANTS1, &af_cnsts1.u,
518 				   BLKADDR_CPT0);
519 	if (ret)
520 		return ret;
521 
522 	cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
523 	cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
524 	cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
525 
526 	/* Disable all cores */
527 	ret = otx2_cpt_disable_all_cores(cptpf);
528 
529 	return ret;
530 }
531 
532 static int cptpf_sriov_disable(struct pci_dev *pdev)
533 {
534 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
535 	int num_vfs = pci_num_vf(pdev);
536 
537 	if (!num_vfs)
538 		return 0;
539 
540 	pci_disable_sriov(pdev);
541 	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
542 	cptpf_flr_wq_destroy(cptpf);
543 	cptpf_vfpf_mbox_destroy(cptpf);
544 	module_put(THIS_MODULE);
545 	cptpf->enabled_vfs = 0;
546 
547 	return 0;
548 }
549 
550 static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
551 {
552 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
553 	int ret;
554 
555 	/* Initialize VF<=>PF mailbox */
556 	ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
557 	if (ret)
558 		return ret;
559 
560 	ret = cptpf_flr_wq_init(cptpf, num_vfs);
561 	if (ret)
562 		goto destroy_mbox;
563 	/* Register VF<=>PF mailbox interrupt */
564 	ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
565 	if (ret)
566 		goto destroy_flr;
567 
568 	/* Get CPT HW capabilities using LOAD_FVC operation. */
569 	ret = otx2_cpt_discover_eng_capabilities(cptpf);
570 	if (ret)
571 		goto disable_intr;
572 
573 	ret = otx2_cpt_create_eng_grps(cptpf->pdev, &cptpf->eng_grps);
574 	if (ret)
575 		goto disable_intr;
576 
577 	cptpf->enabled_vfs = num_vfs;
578 	ret = pci_enable_sriov(pdev, num_vfs);
579 	if (ret)
580 		goto disable_intr;
581 
582 	dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
583 
584 	try_module_get(THIS_MODULE);
585 	return num_vfs;
586 
587 disable_intr:
588 	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
589 	cptpf->enabled_vfs = 0;
590 destroy_flr:
591 	cptpf_flr_wq_destroy(cptpf);
592 destroy_mbox:
593 	cptpf_vfpf_mbox_destroy(cptpf);
594 	return ret;
595 }
596 
597 static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
598 {
599 	if (num_vfs > 0) {
600 		return cptpf_sriov_enable(pdev, num_vfs);
601 	} else {
602 		return cptpf_sriov_disable(pdev);
603 	}
604 }
605 
606 static int otx2_cptpf_probe(struct pci_dev *pdev,
607 			    const struct pci_device_id *ent)
608 {
609 	struct device *dev = &pdev->dev;
610 	resource_size_t offset, size;
611 	struct otx2_cptpf_dev *cptpf;
612 	int err;
613 
614 	cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
615 	if (!cptpf)
616 		return -ENOMEM;
617 
618 	err = pcim_enable_device(pdev);
619 	if (err) {
620 		dev_err(dev, "Failed to enable PCI device\n");
621 		goto clear_drvdata;
622 	}
623 
624 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
625 	if (err) {
626 		dev_err(dev, "Unable to get usable DMA configuration\n");
627 		goto clear_drvdata;
628 	}
629 	/* Map PF's configuration registers */
630 	err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
631 					     OTX2_CPT_DRV_NAME);
632 	if (err) {
633 		dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
634 		goto clear_drvdata;
635 	}
636 	pci_set_master(pdev);
637 	pci_set_drvdata(pdev, cptpf);
638 	cptpf->pdev = pdev;
639 
640 	cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
641 
642 	/* Check if AF driver is up, otherwise defer probe */
643 	err = cpt_is_pf_usable(cptpf);
644 	if (err)
645 		goto clear_drvdata;
646 
647 	offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
648 	size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
649 	/* Map AF-PF mailbox memory */
650 	cptpf->afpf_mbox_base = devm_ioremap_wc(dev, offset, size);
651 	if (!cptpf->afpf_mbox_base) {
652 		dev_err(&pdev->dev, "Unable to map BAR4\n");
653 		err = -ENODEV;
654 		goto clear_drvdata;
655 	}
656 	err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
657 				    RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
658 	if (err < 0) {
659 		dev_err(dev, "Request for %d msix vectors failed\n",
660 			RVU_PF_INT_VEC_CNT);
661 		goto clear_drvdata;
662 	}
663 	/* Initialize AF-PF mailbox */
664 	err = cptpf_afpf_mbox_init(cptpf);
665 	if (err)
666 		goto clear_drvdata;
667 	/* Register mailbox interrupt */
668 	err = cptpf_register_afpf_mbox_intr(cptpf);
669 	if (err)
670 		goto destroy_afpf_mbox;
671 
672 	cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
673 
674 	/* Initialize CPT PF device */
675 	err = cptpf_device_init(cptpf);
676 	if (err)
677 		goto unregister_intr;
678 
679 	/* Initialize engine groups */
680 	err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
681 	if (err)
682 		goto unregister_intr;
683 
684 	err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
685 	if (err)
686 		goto cleanup_eng_grps;
687 	return 0;
688 
689 cleanup_eng_grps:
690 	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
691 unregister_intr:
692 	cptpf_disable_afpf_mbox_intr(cptpf);
693 destroy_afpf_mbox:
694 	cptpf_afpf_mbox_destroy(cptpf);
695 clear_drvdata:
696 	pci_set_drvdata(pdev, NULL);
697 	return err;
698 }
699 
700 static void otx2_cptpf_remove(struct pci_dev *pdev)
701 {
702 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
703 
704 	if (!cptpf)
705 		return;
706 
707 	cptpf_sriov_disable(pdev);
708 	/* Delete sysfs entry created for kernel VF limits */
709 	sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
710 	/* Cleanup engine groups */
711 	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
712 	/* Disable AF-PF mailbox interrupt */
713 	cptpf_disable_afpf_mbox_intr(cptpf);
714 	/* Destroy AF-PF mbox */
715 	cptpf_afpf_mbox_destroy(cptpf);
716 	pci_set_drvdata(pdev, NULL);
717 }
718 
719 /* Supported devices */
720 static const struct pci_device_id otx2_cpt_id_table[] = {
721 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
722 	{ 0, }  /* end of table */
723 };
724 
725 static struct pci_driver otx2_cpt_pci_driver = {
726 	.name = OTX2_CPT_DRV_NAME,
727 	.id_table = otx2_cpt_id_table,
728 	.probe = otx2_cptpf_probe,
729 	.remove = otx2_cptpf_remove,
730 	.sriov_configure = otx2_cptpf_sriov_configure
731 };
732 
733 module_pci_driver(otx2_cpt_pci_driver);
734 
735 MODULE_AUTHOR("Marvell");
736 MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
737 MODULE_LICENSE("GPL v2");
738 MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
739