1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <linux/firmware.h>
5 #include "otx2_cpt_hw_types.h"
6 #include "otx2_cpt_common.h"
7 #include "otx2_cpt_devlink.h"
8 #include "otx2_cptpf_ucode.h"
9 #include "otx2_cptpf.h"
10 #include "cn10k_cpt.h"
11 #include "rvu_reg.h"
12 
13 #define OTX2_CPT_DRV_NAME    "rvu_cptpf"
14 #define OTX2_CPT_DRV_STRING  "Marvell RVU CPT Physical Function Driver"
15 
16 static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
17 					int num_vfs)
18 {
19 	int ena_bits;
20 
21 	/* Clear any pending interrupts */
22 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
23 			 RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
24 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
25 			 RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
26 
27 	/* Enable VF interrupts for VFs from 0 to 63 */
28 	ena_bits = ((num_vfs - 1) % 64);
29 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
30 			 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
31 			 GENMASK_ULL(ena_bits, 0));
32 
33 	if (num_vfs > 64) {
34 		/* Enable VF interrupts for VFs from 64 to 127 */
35 		ena_bits = num_vfs - 64 - 1;
36 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
37 				RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
38 				GENMASK_ULL(ena_bits, 0));
39 	}
40 }
41 
42 static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
43 					 int num_vfs)
44 {
45 	int vector;
46 
47 	/* Disable VF-PF interrupts */
48 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
49 			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
50 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
51 			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
52 	/* Clear any pending interrupts */
53 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
54 			 RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
55 
56 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
57 	free_irq(vector, cptpf);
58 
59 	if (num_vfs > 64) {
60 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
61 				 RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
62 		vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
63 		free_irq(vector, cptpf);
64 	}
65 }
66 
67 static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
68 					 int num_vfs)
69 {
70 	/* Clear FLR interrupt if any */
71 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
72 			 INTR_MASK(num_vfs));
73 
74 	/* Enable VF FLR interrupts */
75 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
76 			 RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
77 	/* Clear ME interrupt if any */
78 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
79 			 INTR_MASK(num_vfs));
80 	/* Enable VF ME interrupts */
81 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
82 			 RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
83 
84 	if (num_vfs <= 64)
85 		return;
86 
87 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
88 			 INTR_MASK(num_vfs - 64));
89 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
90 			 RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
91 
92 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
93 			 INTR_MASK(num_vfs - 64));
94 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
95 			 RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
96 }
97 
98 static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
99 				       int num_vfs)
100 {
101 	int vector;
102 
103 	/* Disable VF FLR interrupts */
104 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
105 			 RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
106 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
107 	free_irq(vector, cptpf);
108 
109 	/* Disable VF ME interrupts */
110 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
111 			 RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
112 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
113 	free_irq(vector, cptpf);
114 
115 	if (num_vfs <= 64)
116 		return;
117 
118 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
119 			 RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
120 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
121 	free_irq(vector, cptpf);
122 
123 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
124 			 RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
125 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
126 	free_irq(vector, cptpf);
127 }
128 
129 static void cptpf_flr_wq_handler(struct work_struct *work)
130 {
131 	struct cptpf_flr_work *flr_work;
132 	struct otx2_cptpf_dev *pf;
133 	struct mbox_msghdr *req;
134 	struct otx2_mbox *mbox;
135 	int vf, reg = 0;
136 
137 	flr_work = container_of(work, struct cptpf_flr_work, work);
138 	pf = flr_work->pf;
139 	mbox = &pf->afpf_mbox;
140 
141 	vf = flr_work - pf->flr_work;
142 
143 	mutex_lock(&pf->lock);
144 	req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
145 				      sizeof(struct msg_rsp));
146 	if (!req)
147 		return;
148 
149 	req->sig = OTX2_MBOX_REQ_SIG;
150 	req->id = MBOX_MSG_VF_FLR;
151 	req->pcifunc &= RVU_PFVF_FUNC_MASK;
152 	req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
153 
154 	otx2_cpt_send_mbox_msg(mbox, pf->pdev);
155 	if (!otx2_cpt_sync_mbox_msg(&pf->afpf_mbox)) {
156 
157 		if (vf >= 64) {
158 			reg = 1;
159 			vf = vf - 64;
160 		}
161 		/* Clear transaction pending register */
162 		otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
163 				 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
164 		otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
165 				 RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
166 	}
167 	mutex_unlock(&pf->lock);
168 }
169 
170 static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
171 {
172 	int reg, dev, vf, start_vf, num_reg = 1;
173 	struct otx2_cptpf_dev *cptpf = arg;
174 	u64 intr;
175 
176 	if (cptpf->max_vfs > 64)
177 		num_reg = 2;
178 
179 	for (reg = 0; reg < num_reg; reg++) {
180 		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
181 				       RVU_PF_VFFLR_INTX(reg));
182 		if (!intr)
183 			continue;
184 		start_vf = 64 * reg;
185 		for (vf = 0; vf < 64; vf++) {
186 			if (!(intr & BIT_ULL(vf)))
187 				continue;
188 			dev = vf + start_vf;
189 			queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
190 			/* Clear interrupt */
191 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
192 					 RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
193 			/* Disable the interrupt */
194 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
195 					 RVU_PF_VFFLR_INT_ENA_W1CX(reg),
196 					 BIT_ULL(vf));
197 		}
198 	}
199 	return IRQ_HANDLED;
200 }
201 
202 static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
203 {
204 	struct otx2_cptpf_dev *cptpf = arg;
205 	int reg, vf, num_reg = 1;
206 	u64 intr;
207 
208 	if (cptpf->max_vfs > 64)
209 		num_reg = 2;
210 
211 	for (reg = 0; reg < num_reg; reg++) {
212 		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
213 				       RVU_PF_VFME_INTX(reg));
214 		if (!intr)
215 			continue;
216 		for (vf = 0; vf < 64; vf++) {
217 			if (!(intr & BIT_ULL(vf)))
218 				continue;
219 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
220 					 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
221 			/* Clear interrupt */
222 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
223 					 RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
224 		}
225 	}
226 	return IRQ_HANDLED;
227 }
228 
229 static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
230 				       int num_vfs)
231 {
232 	cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
233 	cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
234 }
235 
236 static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
237 {
238 	struct pci_dev *pdev = cptpf->pdev;
239 	struct device *dev = &pdev->dev;
240 	int ret, vector;
241 
242 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
243 	/* Register VF-PF mailbox interrupt handler */
244 	ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
245 			  cptpf);
246 	if (ret) {
247 		dev_err(dev,
248 			"IRQ registration failed for PFVF mbox0 irq\n");
249 		return ret;
250 	}
251 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
252 	/* Register VF FLR interrupt handler */
253 	ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
254 	if (ret) {
255 		dev_err(dev,
256 			"IRQ registration failed for VFFLR0 irq\n");
257 		goto free_mbox0_irq;
258 	}
259 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
260 	/* Register VF ME interrupt handler */
261 	ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
262 	if (ret) {
263 		dev_err(dev,
264 			"IRQ registration failed for PFVF mbox0 irq\n");
265 		goto free_flr0_irq;
266 	}
267 
268 	if (num_vfs > 64) {
269 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
270 		ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
271 				  "CPTVFPF Mbox1", cptpf);
272 		if (ret) {
273 			dev_err(dev,
274 				"IRQ registration failed for PFVF mbox1 irq\n");
275 			goto free_me0_irq;
276 		}
277 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
278 		/* Register VF FLR interrupt handler */
279 		ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
280 				  cptpf);
281 		if (ret) {
282 			dev_err(dev,
283 				"IRQ registration failed for VFFLR1 irq\n");
284 			goto free_mbox1_irq;
285 		}
286 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
287 		/* Register VF FLR interrupt handler */
288 		ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
289 				  cptpf);
290 		if (ret) {
291 			dev_err(dev,
292 				"IRQ registration failed for VFFLR1 irq\n");
293 			goto free_flr1_irq;
294 		}
295 	}
296 	cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
297 	cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
298 
299 	return 0;
300 
301 free_flr1_irq:
302 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
303 	free_irq(vector, cptpf);
304 free_mbox1_irq:
305 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
306 	free_irq(vector, cptpf);
307 free_me0_irq:
308 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
309 	free_irq(vector, cptpf);
310 free_flr0_irq:
311 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
312 	free_irq(vector, cptpf);
313 free_mbox0_irq:
314 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
315 	free_irq(vector, cptpf);
316 	return ret;
317 }
318 
319 static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
320 {
321 	if (!pf->flr_wq)
322 		return;
323 	destroy_workqueue(pf->flr_wq);
324 	pf->flr_wq = NULL;
325 	kfree(pf->flr_work);
326 }
327 
328 static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
329 {
330 	int vf;
331 
332 	cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
333 	if (!cptpf->flr_wq)
334 		return -ENOMEM;
335 
336 	cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
337 				  GFP_KERNEL);
338 	if (!cptpf->flr_work)
339 		goto destroy_wq;
340 
341 	for (vf = 0; vf < num_vfs; vf++) {
342 		cptpf->flr_work[vf].pf = cptpf;
343 		INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
344 	}
345 	return 0;
346 
347 destroy_wq:
348 	destroy_workqueue(cptpf->flr_wq);
349 	return -ENOMEM;
350 }
351 
352 static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
353 {
354 	struct device *dev = &cptpf->pdev->dev;
355 	u64 vfpf_mbox_base;
356 	int err, i;
357 
358 	cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox",
359 					      WQ_UNBOUND | WQ_HIGHPRI |
360 					      WQ_MEM_RECLAIM, 1);
361 	if (!cptpf->vfpf_mbox_wq)
362 		return -ENOMEM;
363 
364 	/* Map VF-PF mailbox memory */
365 	if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
366 		vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
367 	else
368 		vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
369 
370 	if (!vfpf_mbox_base) {
371 		dev_err(dev, "VF-PF mailbox address not configured\n");
372 		err = -ENOMEM;
373 		goto free_wqe;
374 	}
375 	cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
376 						MBOX_SIZE * cptpf->max_vfs);
377 	if (!cptpf->vfpf_mbox_base) {
378 		dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
379 		err = -ENOMEM;
380 		goto free_wqe;
381 	}
382 	err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
383 			     cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
384 			     num_vfs);
385 	if (err)
386 		goto free_wqe;
387 
388 	for (i = 0; i < num_vfs; i++) {
389 		cptpf->vf[i].vf_id = i;
390 		cptpf->vf[i].cptpf = cptpf;
391 		cptpf->vf[i].intr_idx = i % 64;
392 		INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
393 			  otx2_cptpf_vfpf_mbox_handler);
394 	}
395 	return 0;
396 
397 free_wqe:
398 	destroy_workqueue(cptpf->vfpf_mbox_wq);
399 	return err;
400 }
401 
402 static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
403 {
404 	destroy_workqueue(cptpf->vfpf_mbox_wq);
405 	otx2_mbox_destroy(&cptpf->vfpf_mbox);
406 }
407 
408 static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
409 {
410 	/* Disable AF-PF interrupt */
411 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
412 			 0x1ULL);
413 	/* Clear interrupt if any */
414 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
415 }
416 
417 static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
418 {
419 	struct pci_dev *pdev = cptpf->pdev;
420 	struct device *dev = &pdev->dev;
421 	int ret, irq;
422 
423 	irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
424 	/* Register AF-PF mailbox interrupt handler */
425 	ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
426 			       "CPTAFPF Mbox", cptpf);
427 	if (ret) {
428 		dev_err(dev,
429 			"IRQ registration failed for PFAF mbox irq\n");
430 		return ret;
431 	}
432 	/* Clear interrupt if any, to avoid spurious interrupts */
433 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
434 	/* Enable AF-PF interrupt */
435 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
436 			 0x1ULL);
437 
438 	ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
439 	if (ret) {
440 		dev_warn(dev,
441 			 "AF not responding to mailbox, deferring probe\n");
442 		cptpf_disable_afpf_mbox_intr(cptpf);
443 		return -EPROBE_DEFER;
444 	}
445 	return 0;
446 }
447 
448 static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
449 {
450 	struct pci_dev *pdev = cptpf->pdev;
451 	resource_size_t offset;
452 	int err;
453 
454 	cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
455 					      WQ_UNBOUND | WQ_HIGHPRI |
456 					      WQ_MEM_RECLAIM, 1);
457 	if (!cptpf->afpf_mbox_wq)
458 		return -ENOMEM;
459 
460 	offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
461 	/* Map AF-PF mailbox memory */
462 	cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
463 	if (!cptpf->afpf_mbox_base) {
464 		dev_err(&pdev->dev, "Unable to map BAR4\n");
465 		err = -ENOMEM;
466 		goto error;
467 	}
468 
469 	err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
470 			     pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
471 	if (err)
472 		goto error;
473 
474 	INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
475 	mutex_init(&cptpf->lock);
476 	return 0;
477 
478 error:
479 	destroy_workqueue(cptpf->afpf_mbox_wq);
480 	return err;
481 }
482 
483 static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
484 {
485 	destroy_workqueue(cptpf->afpf_mbox_wq);
486 	otx2_mbox_destroy(&cptpf->afpf_mbox);
487 }
488 
489 static ssize_t kvf_limits_show(struct device *dev,
490 			       struct device_attribute *attr, char *buf)
491 {
492 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
493 
494 	return sprintf(buf, "%d\n", cptpf->kvf_limits);
495 }
496 
497 static ssize_t kvf_limits_store(struct device *dev,
498 				struct device_attribute *attr,
499 				const char *buf, size_t count)
500 {
501 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
502 	int lfs_num;
503 	int ret;
504 
505 	ret = kstrtoint(buf, 0, &lfs_num);
506 	if (ret)
507 		return ret;
508 	if (lfs_num < 1 || lfs_num > num_online_cpus()) {
509 		dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
510 			lfs_num, num_online_cpus());
511 		return -EINVAL;
512 	}
513 	cptpf->kvf_limits = lfs_num;
514 
515 	return count;
516 }
517 
518 static DEVICE_ATTR_RW(kvf_limits);
519 static struct attribute *cptpf_attrs[] = {
520 	&dev_attr_kvf_limits.attr,
521 	NULL
522 };
523 
524 static const struct attribute_group cptpf_sysfs_group = {
525 	.attrs = cptpf_attrs,
526 };
527 
528 static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
529 {
530 	u64 rev;
531 
532 	rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
533 			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
534 	rev = (rev >> 12) & 0xFF;
535 	/*
536 	 * Check if AF has setup revision for RVUM block, otherwise
537 	 * driver probe should be deferred until AF driver comes up
538 	 */
539 	if (!rev) {
540 		dev_warn(&cptpf->pdev->dev,
541 			 "AF is not initialized, deferring probe\n");
542 		return -EPROBE_DEFER;
543 	}
544 	return 0;
545 }
546 
547 static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
548 {
549 	int timeout = 10, ret;
550 	u64 reg = 0;
551 
552 	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
553 				    CPT_AF_BLK_RST, 0x1, blkaddr);
554 	if (ret)
555 		return ret;
556 
557 	do {
558 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
559 					   CPT_AF_BLK_RST, &reg, blkaddr);
560 		if (ret)
561 			return ret;
562 
563 		if (!((reg >> 63) & 0x1))
564 			break;
565 
566 		usleep_range(10000, 20000);
567 		if (timeout-- < 0)
568 			return -EBUSY;
569 	} while (1);
570 
571 	return ret;
572 }
573 
574 static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
575 {
576 	int ret = 0;
577 
578 	if (cptpf->has_cpt1) {
579 		ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
580 		if (ret)
581 			return ret;
582 	}
583 	return cptx_device_reset(cptpf, BLKADDR_CPT0);
584 }
585 
586 static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
587 {
588 	u64 cfg;
589 
590 	cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
591 			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
592 	if (cfg & BIT_ULL(11))
593 		cptpf->has_cpt1 = true;
594 }
595 
596 static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
597 {
598 	union otx2_cptx_af_constants1 af_cnsts1 = {0};
599 	int ret = 0;
600 
601 	/* check if 'implemented' bit is set for block BLKADDR_CPT1 */
602 	cptpf_check_block_implemented(cptpf);
603 	/* Reset the CPT PF device */
604 	ret = cptpf_device_reset(cptpf);
605 	if (ret)
606 		return ret;
607 
608 	/* Get number of SE, IE and AE engines */
609 	ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
610 				   CPT_AF_CONSTANTS1, &af_cnsts1.u,
611 				   BLKADDR_CPT0);
612 	if (ret)
613 		return ret;
614 
615 	cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
616 	cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
617 	cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
618 
619 	/* Disable all cores */
620 	ret = otx2_cpt_disable_all_cores(cptpf);
621 
622 	return ret;
623 }
624 
625 static int cptpf_sriov_disable(struct pci_dev *pdev)
626 {
627 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
628 	int num_vfs = pci_num_vf(pdev);
629 
630 	if (!num_vfs)
631 		return 0;
632 
633 	pci_disable_sriov(pdev);
634 	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
635 	cptpf_flr_wq_destroy(cptpf);
636 	cptpf_vfpf_mbox_destroy(cptpf);
637 	module_put(THIS_MODULE);
638 	cptpf->enabled_vfs = 0;
639 
640 	return 0;
641 }
642 
643 static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
644 {
645 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
646 	int ret;
647 
648 	/* Initialize VF<=>PF mailbox */
649 	ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
650 	if (ret)
651 		return ret;
652 
653 	ret = cptpf_flr_wq_init(cptpf, num_vfs);
654 	if (ret)
655 		goto destroy_mbox;
656 	/* Register VF<=>PF mailbox interrupt */
657 	ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
658 	if (ret)
659 		goto destroy_flr;
660 
661 	/* Get CPT HW capabilities using LOAD_FVC operation. */
662 	ret = otx2_cpt_discover_eng_capabilities(cptpf);
663 	if (ret)
664 		goto disable_intr;
665 
666 	ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
667 	if (ret)
668 		goto disable_intr;
669 
670 	cptpf->enabled_vfs = num_vfs;
671 	ret = pci_enable_sriov(pdev, num_vfs);
672 	if (ret)
673 		goto disable_intr;
674 
675 	dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
676 
677 	try_module_get(THIS_MODULE);
678 	return num_vfs;
679 
680 disable_intr:
681 	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
682 	cptpf->enabled_vfs = 0;
683 destroy_flr:
684 	cptpf_flr_wq_destroy(cptpf);
685 destroy_mbox:
686 	cptpf_vfpf_mbox_destroy(cptpf);
687 	return ret;
688 }
689 
690 static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
691 {
692 	if (num_vfs > 0) {
693 		return cptpf_sriov_enable(pdev, num_vfs);
694 	} else {
695 		return cptpf_sriov_disable(pdev);
696 	}
697 }
698 
699 static int otx2_cptpf_probe(struct pci_dev *pdev,
700 			    const struct pci_device_id *ent)
701 {
702 	struct device *dev = &pdev->dev;
703 	struct otx2_cptpf_dev *cptpf;
704 	int err;
705 
706 	cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
707 	if (!cptpf)
708 		return -ENOMEM;
709 
710 	err = pcim_enable_device(pdev);
711 	if (err) {
712 		dev_err(dev, "Failed to enable PCI device\n");
713 		goto clear_drvdata;
714 	}
715 
716 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
717 	if (err) {
718 		dev_err(dev, "Unable to get usable DMA configuration\n");
719 		goto clear_drvdata;
720 	}
721 	/* Map PF's configuration registers */
722 	err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
723 					     OTX2_CPT_DRV_NAME);
724 	if (err) {
725 		dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
726 		goto clear_drvdata;
727 	}
728 	pci_set_master(pdev);
729 	pci_set_drvdata(pdev, cptpf);
730 	cptpf->pdev = pdev;
731 
732 	cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
733 
734 	/* Check if AF driver is up, otherwise defer probe */
735 	err = cpt_is_pf_usable(cptpf);
736 	if (err)
737 		goto clear_drvdata;
738 
739 	err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
740 				    RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
741 	if (err < 0) {
742 		dev_err(dev, "Request for %d msix vectors failed\n",
743 			RVU_PF_INT_VEC_CNT);
744 		goto clear_drvdata;
745 	}
746 	otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
747 	/* Initialize AF-PF mailbox */
748 	err = cptpf_afpf_mbox_init(cptpf);
749 	if (err)
750 		goto clear_drvdata;
751 	/* Register mailbox interrupt */
752 	err = cptpf_register_afpf_mbox_intr(cptpf);
753 	if (err)
754 		goto destroy_afpf_mbox;
755 
756 	cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
757 
758 	err = cn10k_cptpf_lmtst_init(cptpf);
759 	if (err)
760 		goto unregister_intr;
761 
762 	/* Initialize CPT PF device */
763 	err = cptpf_device_init(cptpf);
764 	if (err)
765 		goto unregister_intr;
766 
767 	/* Initialize engine groups */
768 	err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
769 	if (err)
770 		goto unregister_intr;
771 
772 	err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
773 	if (err)
774 		goto cleanup_eng_grps;
775 
776 	err = otx2_cpt_register_dl(cptpf);
777 	if (err)
778 		goto sysfs_grp_del;
779 
780 	return 0;
781 
782 sysfs_grp_del:
783 	sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
784 cleanup_eng_grps:
785 	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
786 unregister_intr:
787 	cptpf_disable_afpf_mbox_intr(cptpf);
788 destroy_afpf_mbox:
789 	cptpf_afpf_mbox_destroy(cptpf);
790 clear_drvdata:
791 	pci_set_drvdata(pdev, NULL);
792 	return err;
793 }
794 
795 static void otx2_cptpf_remove(struct pci_dev *pdev)
796 {
797 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
798 
799 	if (!cptpf)
800 		return;
801 
802 	cptpf_sriov_disable(pdev);
803 	otx2_cpt_unregister_dl(cptpf);
804 	/* Delete sysfs entry created for kernel VF limits */
805 	sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
806 	/* Cleanup engine groups */
807 	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
808 	/* Disable AF-PF mailbox interrupt */
809 	cptpf_disable_afpf_mbox_intr(cptpf);
810 	/* Destroy AF-PF mbox */
811 	cptpf_afpf_mbox_destroy(cptpf);
812 	pci_set_drvdata(pdev, NULL);
813 }
814 
815 /* Supported devices */
816 static const struct pci_device_id otx2_cpt_id_table[] = {
817 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
818 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
819 	{ 0, }  /* end of table */
820 };
821 
822 static struct pci_driver otx2_cpt_pci_driver = {
823 	.name = OTX2_CPT_DRV_NAME,
824 	.id_table = otx2_cpt_id_table,
825 	.probe = otx2_cptpf_probe,
826 	.remove = otx2_cptpf_remove,
827 	.sriov_configure = otx2_cptpf_sriov_configure
828 };
829 
830 module_pci_driver(otx2_cpt_pci_driver);
831 
832 MODULE_AUTHOR("Marvell");
833 MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
834 MODULE_LICENSE("GPL v2");
835 MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
836