1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <linux/firmware.h>
5 #include "otx2_cpt_hw_types.h"
6 #include "otx2_cpt_common.h"
7 #include "otx2_cptpf_ucode.h"
8 #include "otx2_cptpf.h"
9 #include "rvu_reg.h"
10 
11 #define OTX2_CPT_DRV_NAME    "octeontx2-cpt"
12 #define OTX2_CPT_DRV_STRING  "Marvell OcteonTX2 CPT Physical Function Driver"
13 
14 static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
15 					int num_vfs)
16 {
17 	int ena_bits;
18 
19 	/* Clear any pending interrupts */
20 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
21 			 RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
22 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
23 			 RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
24 
25 	/* Enable VF interrupts for VFs from 0 to 63 */
26 	ena_bits = ((num_vfs - 1) % 64);
27 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
28 			 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
29 			 GENMASK_ULL(ena_bits, 0));
30 
31 	if (num_vfs > 64) {
32 		/* Enable VF interrupts for VFs from 64 to 127 */
33 		ena_bits = num_vfs - 64 - 1;
34 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
35 				RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
36 				GENMASK_ULL(ena_bits, 0));
37 	}
38 }
39 
40 static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
41 					 int num_vfs)
42 {
43 	int vector;
44 
45 	/* Disable VF-PF interrupts */
46 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
47 			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
48 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
49 			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
50 	/* Clear any pending interrupts */
51 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
52 			 RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
53 
54 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
55 	free_irq(vector, cptpf);
56 
57 	if (num_vfs > 64) {
58 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
59 				 RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
60 		vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
61 		free_irq(vector, cptpf);
62 	}
63 }
64 
65 static void cptpf_enable_vf_flr_intrs(struct otx2_cptpf_dev *cptpf)
66 {
67 	/* Clear interrupt if any */
68 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
69 			~0x0ULL);
70 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
71 			~0x0ULL);
72 
73 	/* Enable VF FLR interrupts */
74 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
75 			 RVU_PF_VFFLR_INT_ENA_W1SX(0), ~0x0ULL);
76 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
77 			 RVU_PF_VFFLR_INT_ENA_W1SX(1), ~0x0ULL);
78 }
79 
80 static void cptpf_disable_vf_flr_intrs(struct otx2_cptpf_dev *cptpf,
81 				       int num_vfs)
82 {
83 	int vector;
84 
85 	/* Disable VF FLR interrupts */
86 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
87 			 RVU_PF_VFFLR_INT_ENA_W1CX(0), ~0x0ULL);
88 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
89 			 RVU_PF_VFFLR_INT_ENA_W1CX(1), ~0x0ULL);
90 
91 	/* Clear interrupt if any */
92 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
93 			 ~0x0ULL);
94 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
95 			 ~0x0ULL);
96 
97 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
98 	free_irq(vector, cptpf);
99 
100 	if (num_vfs > 64) {
101 		vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
102 		free_irq(vector, cptpf);
103 	}
104 }
105 
106 static void cptpf_flr_wq_handler(struct work_struct *work)
107 {
108 	struct cptpf_flr_work *flr_work;
109 	struct otx2_cptpf_dev *pf;
110 	struct mbox_msghdr *req;
111 	struct otx2_mbox *mbox;
112 	int vf, reg = 0;
113 
114 	flr_work = container_of(work, struct cptpf_flr_work, work);
115 	pf = flr_work->pf;
116 	mbox = &pf->afpf_mbox;
117 
118 	vf = flr_work - pf->flr_work;
119 
120 	req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
121 				      sizeof(struct msg_rsp));
122 	if (!req)
123 		return;
124 
125 	req->sig = OTX2_MBOX_REQ_SIG;
126 	req->id = MBOX_MSG_VF_FLR;
127 	req->pcifunc &= RVU_PFVF_FUNC_MASK;
128 	req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
129 
130 	otx2_cpt_send_mbox_msg(mbox, pf->pdev);
131 
132 	if (vf >= 64) {
133 		reg = 1;
134 		vf = vf - 64;
135 	}
136 	/* Clear transaction pending register */
137 	otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
138 			 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
139 	otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
140 			 RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
141 }
142 
143 static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
144 {
145 	int reg, dev, vf, start_vf, num_reg = 1;
146 	struct otx2_cptpf_dev *cptpf = arg;
147 	u64 intr;
148 
149 	if (cptpf->max_vfs > 64)
150 		num_reg = 2;
151 
152 	for (reg = 0; reg < num_reg; reg++) {
153 		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
154 				       RVU_PF_VFFLR_INTX(reg));
155 		if (!intr)
156 			continue;
157 		start_vf = 64 * reg;
158 		for (vf = 0; vf < 64; vf++) {
159 			if (!(intr & BIT_ULL(vf)))
160 				continue;
161 			dev = vf + start_vf;
162 			queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
163 			/* Clear interrupt */
164 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
165 					 RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
166 			/* Disable the interrupt */
167 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
168 					 RVU_PF_VFFLR_INT_ENA_W1CX(reg),
169 					 BIT_ULL(vf));
170 		}
171 	}
172 	return IRQ_HANDLED;
173 }
174 
175 static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
176 				       int num_vfs)
177 {
178 	cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
179 	cptpf_disable_vf_flr_intrs(cptpf, num_vfs);
180 }
181 
182 static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
183 {
184 	struct pci_dev *pdev = cptpf->pdev;
185 	struct device *dev = &pdev->dev;
186 	int ret, vector;
187 
188 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
189 	/* Register VF-PF mailbox interrupt handler */
190 	ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
191 			  cptpf);
192 	if (ret) {
193 		dev_err(dev,
194 			"IRQ registration failed for PFVF mbox0 irq\n");
195 		return ret;
196 	}
197 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
198 	/* Register VF FLR interrupt handler */
199 	ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
200 	if (ret) {
201 		dev_err(dev,
202 			"IRQ registration failed for VFFLR0 irq\n");
203 		goto free_mbox0_irq;
204 	}
205 	if (num_vfs > 64) {
206 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
207 		ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
208 				  "CPTVFPF Mbox1", cptpf);
209 		if (ret) {
210 			dev_err(dev,
211 				"IRQ registration failed for PFVF mbox1 irq\n");
212 			goto free_flr0_irq;
213 		}
214 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
215 		/* Register VF FLR interrupt handler */
216 		ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
217 				  cptpf);
218 		if (ret) {
219 			dev_err(dev,
220 				"IRQ registration failed for VFFLR1 irq\n");
221 			goto free_mbox1_irq;
222 		}
223 	}
224 	cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
225 	cptpf_enable_vf_flr_intrs(cptpf);
226 
227 	return 0;
228 
229 free_mbox1_irq:
230 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
231 	free_irq(vector, cptpf);
232 free_flr0_irq:
233 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
234 	free_irq(vector, cptpf);
235 free_mbox0_irq:
236 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
237 	free_irq(vector, cptpf);
238 	return ret;
239 }
240 
241 static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
242 {
243 	if (!pf->flr_wq)
244 		return;
245 	destroy_workqueue(pf->flr_wq);
246 	pf->flr_wq = NULL;
247 	kfree(pf->flr_work);
248 }
249 
250 static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
251 {
252 	int vf;
253 
254 	cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
255 	if (!cptpf->flr_wq)
256 		return -ENOMEM;
257 
258 	cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
259 				  GFP_KERNEL);
260 	if (!cptpf->flr_work)
261 		goto destroy_wq;
262 
263 	for (vf = 0; vf < num_vfs; vf++) {
264 		cptpf->flr_work[vf].pf = cptpf;
265 		INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
266 	}
267 	return 0;
268 
269 destroy_wq:
270 	destroy_workqueue(cptpf->flr_wq);
271 	return -ENOMEM;
272 }
273 
274 static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
275 {
276 	struct device *dev = &cptpf->pdev->dev;
277 	u64 vfpf_mbox_base;
278 	int err, i;
279 
280 	cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox",
281 					      WQ_UNBOUND | WQ_HIGHPRI |
282 					      WQ_MEM_RECLAIM, 1);
283 	if (!cptpf->vfpf_mbox_wq)
284 		return -ENOMEM;
285 
286 	/* Map VF-PF mailbox memory */
287 	vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
288 	if (!vfpf_mbox_base) {
289 		dev_err(dev, "VF-PF mailbox address not configured\n");
290 		err = -ENOMEM;
291 		goto free_wqe;
292 	}
293 	cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
294 						MBOX_SIZE * cptpf->max_vfs);
295 	if (!cptpf->vfpf_mbox_base) {
296 		dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
297 		err = -ENOMEM;
298 		goto free_wqe;
299 	}
300 	err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
301 			     cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
302 			     num_vfs);
303 	if (err)
304 		goto free_wqe;
305 
306 	for (i = 0; i < num_vfs; i++) {
307 		cptpf->vf[i].vf_id = i;
308 		cptpf->vf[i].cptpf = cptpf;
309 		cptpf->vf[i].intr_idx = i % 64;
310 		INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
311 			  otx2_cptpf_vfpf_mbox_handler);
312 	}
313 	return 0;
314 
315 free_wqe:
316 	destroy_workqueue(cptpf->vfpf_mbox_wq);
317 	return err;
318 }
319 
320 static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
321 {
322 	destroy_workqueue(cptpf->vfpf_mbox_wq);
323 	otx2_mbox_destroy(&cptpf->vfpf_mbox);
324 }
325 
326 static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
327 {
328 	/* Disable AF-PF interrupt */
329 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
330 			 0x1ULL);
331 	/* Clear interrupt if any */
332 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
333 }
334 
335 static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
336 {
337 	struct pci_dev *pdev = cptpf->pdev;
338 	struct device *dev = &pdev->dev;
339 	int ret, irq;
340 
341 	irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
342 	/* Register AF-PF mailbox interrupt handler */
343 	ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
344 			       "CPTAFPF Mbox", cptpf);
345 	if (ret) {
346 		dev_err(dev,
347 			"IRQ registration failed for PFAF mbox irq\n");
348 		return ret;
349 	}
350 	/* Clear interrupt if any, to avoid spurious interrupts */
351 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
352 	/* Enable AF-PF interrupt */
353 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
354 			 0x1ULL);
355 
356 	ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
357 	if (ret) {
358 		dev_warn(dev,
359 			 "AF not responding to mailbox, deferring probe\n");
360 		cptpf_disable_afpf_mbox_intr(cptpf);
361 		return -EPROBE_DEFER;
362 	}
363 	return 0;
364 }
365 
366 static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
367 {
368 	int err;
369 
370 	cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
371 					      WQ_UNBOUND | WQ_HIGHPRI |
372 					      WQ_MEM_RECLAIM, 1);
373 	if (!cptpf->afpf_mbox_wq)
374 		return -ENOMEM;
375 
376 	err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
377 			     cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
378 	if (err)
379 		goto error;
380 
381 	INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
382 	return 0;
383 
384 error:
385 	destroy_workqueue(cptpf->afpf_mbox_wq);
386 	return err;
387 }
388 
389 static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
390 {
391 	destroy_workqueue(cptpf->afpf_mbox_wq);
392 	otx2_mbox_destroy(&cptpf->afpf_mbox);
393 }
394 
395 static ssize_t kvf_limits_show(struct device *dev,
396 			       struct device_attribute *attr, char *buf)
397 {
398 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
399 
400 	return sprintf(buf, "%d\n", cptpf->kvf_limits);
401 }
402 
403 static ssize_t kvf_limits_store(struct device *dev,
404 				struct device_attribute *attr,
405 				const char *buf, size_t count)
406 {
407 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
408 	int lfs_num;
409 
410 	if (kstrtoint(buf, 0, &lfs_num)) {
411 		dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
412 			lfs_num, num_online_cpus());
413 		return -EINVAL;
414 	}
415 	if (lfs_num < 1 || lfs_num > num_online_cpus()) {
416 		dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
417 			lfs_num, num_online_cpus());
418 		return -EINVAL;
419 	}
420 	cptpf->kvf_limits = lfs_num;
421 
422 	return count;
423 }
424 
425 static DEVICE_ATTR_RW(kvf_limits);
426 static struct attribute *cptpf_attrs[] = {
427 	&dev_attr_kvf_limits.attr,
428 	NULL
429 };
430 
431 static const struct attribute_group cptpf_sysfs_group = {
432 	.attrs = cptpf_attrs,
433 };
434 
435 static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
436 {
437 	u64 rev;
438 
439 	rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
440 			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
441 	rev = (rev >> 12) & 0xFF;
442 	/*
443 	 * Check if AF has setup revision for RVUM block, otherwise
444 	 * driver probe should be deferred until AF driver comes up
445 	 */
446 	if (!rev) {
447 		dev_warn(&cptpf->pdev->dev,
448 			 "AF is not initialized, deferring probe\n");
449 		return -EPROBE_DEFER;
450 	}
451 	return 0;
452 }
453 
454 static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
455 {
456 	int timeout = 10, ret;
457 	u64 reg = 0;
458 
459 	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
460 				    CPT_AF_BLK_RST, 0x1);
461 	if (ret)
462 		return ret;
463 
464 	do {
465 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
466 					   CPT_AF_BLK_RST, &reg);
467 		if (ret)
468 			return ret;
469 
470 		if (!((reg >> 63) & 0x1))
471 			break;
472 
473 		usleep_range(10000, 20000);
474 		if (timeout-- < 0)
475 			return -EBUSY;
476 	} while (1);
477 
478 	return ret;
479 }
480 
481 static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
482 {
483 	union otx2_cptx_af_constants1 af_cnsts1 = {0};
484 	int ret = 0;
485 
486 	/* Reset the CPT PF device */
487 	ret = cptpf_device_reset(cptpf);
488 	if (ret)
489 		return ret;
490 
491 	/* Get number of SE, IE and AE engines */
492 	ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
493 				   CPT_AF_CONSTANTS1, &af_cnsts1.u);
494 	if (ret)
495 		return ret;
496 
497 	cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
498 	cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
499 	cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
500 
501 	/* Disable all cores */
502 	ret = otx2_cpt_disable_all_cores(cptpf);
503 
504 	return ret;
505 }
506 
507 static int cptpf_sriov_disable(struct pci_dev *pdev)
508 {
509 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
510 	int num_vfs = pci_num_vf(pdev);
511 
512 	if (!num_vfs)
513 		return 0;
514 
515 	pci_disable_sriov(pdev);
516 	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
517 	cptpf_flr_wq_destroy(cptpf);
518 	cptpf_vfpf_mbox_destroy(cptpf);
519 	module_put(THIS_MODULE);
520 	cptpf->enabled_vfs = 0;
521 
522 	return 0;
523 }
524 
525 static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
526 {
527 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
528 	int ret;
529 
530 	/* Initialize VF<=>PF mailbox */
531 	ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
532 	if (ret)
533 		return ret;
534 
535 	ret = cptpf_flr_wq_init(cptpf, num_vfs);
536 	if (ret)
537 		goto destroy_mbox;
538 	/* Register VF<=>PF mailbox interrupt */
539 	ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
540 	if (ret)
541 		goto destroy_flr;
542 
543 	/* Get CPT HW capabilities using LOAD_FVC operation. */
544 	ret = otx2_cpt_discover_eng_capabilities(cptpf);
545 	if (ret)
546 		goto disable_intr;
547 
548 	ret = otx2_cpt_create_eng_grps(cptpf->pdev, &cptpf->eng_grps);
549 	if (ret)
550 		goto disable_intr;
551 
552 	cptpf->enabled_vfs = num_vfs;
553 	ret = pci_enable_sriov(pdev, num_vfs);
554 	if (ret)
555 		goto disable_intr;
556 
557 	dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
558 
559 	try_module_get(THIS_MODULE);
560 	return num_vfs;
561 
562 disable_intr:
563 	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
564 	cptpf->enabled_vfs = 0;
565 destroy_flr:
566 	cptpf_flr_wq_destroy(cptpf);
567 destroy_mbox:
568 	cptpf_vfpf_mbox_destroy(cptpf);
569 	return ret;
570 }
571 
572 static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
573 {
574 	if (num_vfs > 0) {
575 		return cptpf_sriov_enable(pdev, num_vfs);
576 	} else {
577 		return cptpf_sriov_disable(pdev);
578 	}
579 }
580 
581 static int otx2_cptpf_probe(struct pci_dev *pdev,
582 			    const struct pci_device_id *ent)
583 {
584 	struct device *dev = &pdev->dev;
585 	resource_size_t offset, size;
586 	struct otx2_cptpf_dev *cptpf;
587 	int err;
588 
589 	cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
590 	if (!cptpf)
591 		return -ENOMEM;
592 
593 	err = pcim_enable_device(pdev);
594 	if (err) {
595 		dev_err(dev, "Failed to enable PCI device\n");
596 		goto clear_drvdata;
597 	}
598 
599 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
600 	if (err) {
601 		dev_err(dev, "Unable to get usable DMA configuration\n");
602 		goto clear_drvdata;
603 	}
604 	/* Map PF's configuration registers */
605 	err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
606 					     OTX2_CPT_DRV_NAME);
607 	if (err) {
608 		dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
609 		goto clear_drvdata;
610 	}
611 	pci_set_master(pdev);
612 	pci_set_drvdata(pdev, cptpf);
613 	cptpf->pdev = pdev;
614 
615 	cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
616 
617 	/* Check if AF driver is up, otherwise defer probe */
618 	err = cpt_is_pf_usable(cptpf);
619 	if (err)
620 		goto clear_drvdata;
621 
622 	offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
623 	size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
624 	/* Map AF-PF mailbox memory */
625 	cptpf->afpf_mbox_base = devm_ioremap_wc(dev, offset, size);
626 	if (!cptpf->afpf_mbox_base) {
627 		dev_err(&pdev->dev, "Unable to map BAR4\n");
628 		err = -ENODEV;
629 		goto clear_drvdata;
630 	}
631 	err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
632 				    RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
633 	if (err < 0) {
634 		dev_err(dev, "Request for %d msix vectors failed\n",
635 			RVU_PF_INT_VEC_CNT);
636 		goto clear_drvdata;
637 	}
638 	/* Initialize AF-PF mailbox */
639 	err = cptpf_afpf_mbox_init(cptpf);
640 	if (err)
641 		goto clear_drvdata;
642 	/* Register mailbox interrupt */
643 	err = cptpf_register_afpf_mbox_intr(cptpf);
644 	if (err)
645 		goto destroy_afpf_mbox;
646 
647 	cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
648 
649 	/* Initialize CPT PF device */
650 	err = cptpf_device_init(cptpf);
651 	if (err)
652 		goto unregister_intr;
653 
654 	/* Initialize engine groups */
655 	err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
656 	if (err)
657 		goto unregister_intr;
658 
659 	err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
660 	if (err)
661 		goto cleanup_eng_grps;
662 	return 0;
663 
664 cleanup_eng_grps:
665 	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
666 unregister_intr:
667 	cptpf_disable_afpf_mbox_intr(cptpf);
668 destroy_afpf_mbox:
669 	cptpf_afpf_mbox_destroy(cptpf);
670 clear_drvdata:
671 	pci_set_drvdata(pdev, NULL);
672 	return err;
673 }
674 
675 static void otx2_cptpf_remove(struct pci_dev *pdev)
676 {
677 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
678 
679 	if (!cptpf)
680 		return;
681 
682 	cptpf_sriov_disable(pdev);
683 	/* Delete sysfs entry created for kernel VF limits */
684 	sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
685 	/* Cleanup engine groups */
686 	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
687 	/* Disable AF-PF mailbox interrupt */
688 	cptpf_disable_afpf_mbox_intr(cptpf);
689 	/* Destroy AF-PF mbox */
690 	cptpf_afpf_mbox_destroy(cptpf);
691 	pci_set_drvdata(pdev, NULL);
692 }
693 
694 /* Supported devices */
695 static const struct pci_device_id otx2_cpt_id_table[] = {
696 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
697 	{ 0, }  /* end of table */
698 };
699 
700 static struct pci_driver otx2_cpt_pci_driver = {
701 	.name = OTX2_CPT_DRV_NAME,
702 	.id_table = otx2_cpt_id_table,
703 	.probe = otx2_cptpf_probe,
704 	.remove = otx2_cptpf_remove,
705 	.sriov_configure = otx2_cptpf_sriov_configure
706 };
707 
708 module_pci_driver(otx2_cpt_pci_driver);
709 
710 MODULE_AUTHOR("Marvell");
711 MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
712 MODULE_LICENSE("GPL v2");
713 MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
714