1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <linux/firmware.h>
5 #include "otx2_cpt_hw_types.h"
6 #include "otx2_cpt_common.h"
7 #include "otx2_cpt_devlink.h"
8 #include "otx2_cptpf_ucode.h"
9 #include "otx2_cptpf.h"
10 #include "cn10k_cpt.h"
11 #include "rvu_reg.h"
12 
13 #define OTX2_CPT_DRV_NAME    "rvu_cptpf"
14 #define OTX2_CPT_DRV_STRING  "Marvell RVU CPT Physical Function Driver"
15 
16 static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
17 					int num_vfs)
18 {
19 	int ena_bits;
20 
21 	/* Clear any pending interrupts */
22 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
23 			 RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL);
24 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
25 			 RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL);
26 
27 	/* Enable VF interrupts for VFs from 0 to 63 */
28 	ena_bits = ((num_vfs - 1) % 64);
29 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
30 			 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
31 			 GENMASK_ULL(ena_bits, 0));
32 
33 	if (num_vfs > 64) {
34 		/* Enable VF interrupts for VFs from 64 to 127 */
35 		ena_bits = num_vfs - 64 - 1;
36 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
37 				RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
38 				GENMASK_ULL(ena_bits, 0));
39 	}
40 }
41 
42 static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
43 					 int num_vfs)
44 {
45 	int vector;
46 
47 	/* Disable VF-PF interrupts */
48 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
49 			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
50 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
51 			 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
52 	/* Clear any pending interrupts */
53 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
54 			 RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
55 
56 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
57 	free_irq(vector, cptpf);
58 
59 	if (num_vfs > 64) {
60 		otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
61 				 RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
62 		vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
63 		free_irq(vector, cptpf);
64 	}
65 }
66 
67 static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
68 					 int num_vfs)
69 {
70 	/* Clear FLR interrupt if any */
71 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(0),
72 			 INTR_MASK(num_vfs));
73 
74 	/* Enable VF FLR interrupts */
75 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
76 			 RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
77 	/* Clear ME interrupt if any */
78 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(0),
79 			 INTR_MASK(num_vfs));
80 	/* Enable VF ME interrupts */
81 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
82 			 RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs));
83 
84 	if (num_vfs <= 64)
85 		return;
86 
87 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFFLR_INTX(1),
88 			 INTR_MASK(num_vfs - 64));
89 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
90 			 RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
91 
92 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_VFME_INTX(1),
93 			 INTR_MASK(num_vfs - 64));
94 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
95 			 RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs - 64));
96 }
97 
98 static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev *cptpf,
99 				       int num_vfs)
100 {
101 	int vector;
102 
103 	/* Disable VF FLR interrupts */
104 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
105 			 RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
106 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR0);
107 	free_irq(vector, cptpf);
108 
109 	/* Disable VF ME interrupts */
110 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
111 			 RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs));
112 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME0);
113 	free_irq(vector, cptpf);
114 
115 	if (num_vfs <= 64)
116 		return;
117 
118 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
119 			 RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
120 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFFLR1);
121 	free_irq(vector, cptpf);
122 
123 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
124 			 RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs - 64));
125 	vector = pci_irq_vector(cptpf->pdev, RVU_PF_INT_VEC_VFME1);
126 	free_irq(vector, cptpf);
127 }
128 
129 static void cptpf_flr_wq_handler(struct work_struct *work)
130 {
131 	struct cptpf_flr_work *flr_work;
132 	struct otx2_cptpf_dev *pf;
133 	struct mbox_msghdr *req;
134 	struct otx2_mbox *mbox;
135 	int vf, reg = 0;
136 
137 	flr_work = container_of(work, struct cptpf_flr_work, work);
138 	pf = flr_work->pf;
139 	mbox = &pf->afpf_mbox;
140 
141 	vf = flr_work - pf->flr_work;
142 
143 	mutex_lock(&pf->lock);
144 	req = otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
145 				      sizeof(struct msg_rsp));
146 	if (!req) {
147 		mutex_unlock(&pf->lock);
148 		return;
149 	}
150 
151 	req->sig = OTX2_MBOX_REQ_SIG;
152 	req->id = MBOX_MSG_VF_FLR;
153 	req->pcifunc &= RVU_PFVF_FUNC_MASK;
154 	req->pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
155 
156 	otx2_cpt_send_mbox_msg(mbox, pf->pdev);
157 	if (!otx2_cpt_sync_mbox_msg(&pf->afpf_mbox)) {
158 
159 		if (vf >= 64) {
160 			reg = 1;
161 			vf = vf - 64;
162 		}
163 		/* Clear transaction pending register */
164 		otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
165 				 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
166 		otx2_cpt_write64(pf->reg_base, BLKADDR_RVUM, 0,
167 				 RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
168 	}
169 	mutex_unlock(&pf->lock);
170 }
171 
172 static irqreturn_t cptpf_vf_flr_intr(int __always_unused irq, void *arg)
173 {
174 	int reg, dev, vf, start_vf, num_reg = 1;
175 	struct otx2_cptpf_dev *cptpf = arg;
176 	u64 intr;
177 
178 	if (cptpf->max_vfs > 64)
179 		num_reg = 2;
180 
181 	for (reg = 0; reg < num_reg; reg++) {
182 		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
183 				       RVU_PF_VFFLR_INTX(reg));
184 		if (!intr)
185 			continue;
186 		start_vf = 64 * reg;
187 		for (vf = 0; vf < 64; vf++) {
188 			if (!(intr & BIT_ULL(vf)))
189 				continue;
190 			dev = vf + start_vf;
191 			queue_work(cptpf->flr_wq, &cptpf->flr_work[dev].work);
192 			/* Clear interrupt */
193 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
194 					 RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
195 			/* Disable the interrupt */
196 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
197 					 RVU_PF_VFFLR_INT_ENA_W1CX(reg),
198 					 BIT_ULL(vf));
199 		}
200 	}
201 	return IRQ_HANDLED;
202 }
203 
204 static irqreturn_t cptpf_vf_me_intr(int __always_unused irq, void *arg)
205 {
206 	struct otx2_cptpf_dev *cptpf = arg;
207 	int reg, vf, num_reg = 1;
208 	u64 intr;
209 
210 	if (cptpf->max_vfs > 64)
211 		num_reg = 2;
212 
213 	for (reg = 0; reg < num_reg; reg++) {
214 		intr = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
215 				       RVU_PF_VFME_INTX(reg));
216 		if (!intr)
217 			continue;
218 		for (vf = 0; vf < 64; vf++) {
219 			if (!(intr & BIT_ULL(vf)))
220 				continue;
221 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
222 					 RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
223 			/* Clear interrupt */
224 			otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0,
225 					 RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
226 		}
227 	}
228 	return IRQ_HANDLED;
229 }
230 
231 static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev *cptpf,
232 				       int num_vfs)
233 {
234 	cptpf_disable_vfpf_mbox_intr(cptpf, num_vfs);
235 	cptpf_disable_vf_flr_me_intrs(cptpf, num_vfs);
236 }
237 
238 static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev *cptpf, int num_vfs)
239 {
240 	struct pci_dev *pdev = cptpf->pdev;
241 	struct device *dev = &pdev->dev;
242 	int ret, vector;
243 
244 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
245 	/* Register VF-PF mailbox interrupt handler */
246 	ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0, "CPTVFPF Mbox0",
247 			  cptpf);
248 	if (ret) {
249 		dev_err(dev,
250 			"IRQ registration failed for PFVF mbox0 irq\n");
251 		return ret;
252 	}
253 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
254 	/* Register VF FLR interrupt handler */
255 	ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR0", cptpf);
256 	if (ret) {
257 		dev_err(dev,
258 			"IRQ registration failed for VFFLR0 irq\n");
259 		goto free_mbox0_irq;
260 	}
261 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
262 	/* Register VF ME interrupt handler */
263 	ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME0", cptpf);
264 	if (ret) {
265 		dev_err(dev,
266 			"IRQ registration failed for PFVF mbox0 irq\n");
267 		goto free_flr0_irq;
268 	}
269 
270 	if (num_vfs > 64) {
271 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
272 		ret = request_irq(vector, otx2_cptpf_vfpf_mbox_intr, 0,
273 				  "CPTVFPF Mbox1", cptpf);
274 		if (ret) {
275 			dev_err(dev,
276 				"IRQ registration failed for PFVF mbox1 irq\n");
277 			goto free_me0_irq;
278 		}
279 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
280 		/* Register VF FLR interrupt handler */
281 		ret = request_irq(vector, cptpf_vf_flr_intr, 0, "CPTPF FLR1",
282 				  cptpf);
283 		if (ret) {
284 			dev_err(dev,
285 				"IRQ registration failed for VFFLR1 irq\n");
286 			goto free_mbox1_irq;
287 		}
288 		vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME1);
289 		/* Register VF FLR interrupt handler */
290 		ret = request_irq(vector, cptpf_vf_me_intr, 0, "CPTPF ME1",
291 				  cptpf);
292 		if (ret) {
293 			dev_err(dev,
294 				"IRQ registration failed for VFFLR1 irq\n");
295 			goto free_flr1_irq;
296 		}
297 	}
298 	cptpf_enable_vfpf_mbox_intr(cptpf, num_vfs);
299 	cptpf_enable_vf_flr_me_intrs(cptpf, num_vfs);
300 
301 	return 0;
302 
303 free_flr1_irq:
304 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR1);
305 	free_irq(vector, cptpf);
306 free_mbox1_irq:
307 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
308 	free_irq(vector, cptpf);
309 free_me0_irq:
310 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFME0);
311 	free_irq(vector, cptpf);
312 free_flr0_irq:
313 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFFLR0);
314 	free_irq(vector, cptpf);
315 free_mbox0_irq:
316 	vector = pci_irq_vector(pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
317 	free_irq(vector, cptpf);
318 	return ret;
319 }
320 
321 static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev *pf)
322 {
323 	if (!pf->flr_wq)
324 		return;
325 	destroy_workqueue(pf->flr_wq);
326 	pf->flr_wq = NULL;
327 	kfree(pf->flr_work);
328 }
329 
330 static int cptpf_flr_wq_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
331 {
332 	int vf;
333 
334 	cptpf->flr_wq = alloc_ordered_workqueue("cptpf_flr_wq", 0);
335 	if (!cptpf->flr_wq)
336 		return -ENOMEM;
337 
338 	cptpf->flr_work = kcalloc(num_vfs, sizeof(struct cptpf_flr_work),
339 				  GFP_KERNEL);
340 	if (!cptpf->flr_work)
341 		goto destroy_wq;
342 
343 	for (vf = 0; vf < num_vfs; vf++) {
344 		cptpf->flr_work[vf].pf = cptpf;
345 		INIT_WORK(&cptpf->flr_work[vf].work, cptpf_flr_wq_handler);
346 	}
347 	return 0;
348 
349 destroy_wq:
350 	destroy_workqueue(cptpf->flr_wq);
351 	return -ENOMEM;
352 }
353 
354 static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev *cptpf, int num_vfs)
355 {
356 	struct device *dev = &cptpf->pdev->dev;
357 	u64 vfpf_mbox_base;
358 	int err, i;
359 
360 	cptpf->vfpf_mbox_wq = alloc_workqueue("cpt_vfpf_mailbox",
361 					      WQ_UNBOUND | WQ_HIGHPRI |
362 					      WQ_MEM_RECLAIM, 1);
363 	if (!cptpf->vfpf_mbox_wq)
364 		return -ENOMEM;
365 
366 	/* Map VF-PF mailbox memory */
367 	if (test_bit(CN10K_MBOX, &cptpf->cap_flag))
368 		vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_MBOX_ADDR);
369 	else
370 		vfpf_mbox_base = readq(cptpf->reg_base + RVU_PF_VF_BAR4_ADDR);
371 
372 	if (!vfpf_mbox_base) {
373 		dev_err(dev, "VF-PF mailbox address not configured\n");
374 		err = -ENOMEM;
375 		goto free_wqe;
376 	}
377 	cptpf->vfpf_mbox_base = devm_ioremap_wc(dev, vfpf_mbox_base,
378 						MBOX_SIZE * cptpf->max_vfs);
379 	if (!cptpf->vfpf_mbox_base) {
380 		dev_err(dev, "Mapping of VF-PF mailbox address failed\n");
381 		err = -ENOMEM;
382 		goto free_wqe;
383 	}
384 	err = otx2_mbox_init(&cptpf->vfpf_mbox, cptpf->vfpf_mbox_base,
385 			     cptpf->pdev, cptpf->reg_base, MBOX_DIR_PFVF,
386 			     num_vfs);
387 	if (err)
388 		goto free_wqe;
389 
390 	for (i = 0; i < num_vfs; i++) {
391 		cptpf->vf[i].vf_id = i;
392 		cptpf->vf[i].cptpf = cptpf;
393 		cptpf->vf[i].intr_idx = i % 64;
394 		INIT_WORK(&cptpf->vf[i].vfpf_mbox_work,
395 			  otx2_cptpf_vfpf_mbox_handler);
396 	}
397 	return 0;
398 
399 free_wqe:
400 	destroy_workqueue(cptpf->vfpf_mbox_wq);
401 	return err;
402 }
403 
404 static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
405 {
406 	destroy_workqueue(cptpf->vfpf_mbox_wq);
407 	otx2_mbox_destroy(&cptpf->vfpf_mbox);
408 }
409 
410 static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
411 {
412 	/* Disable AF-PF interrupt */
413 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1C,
414 			 0x1ULL);
415 	/* Clear interrupt if any */
416 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
417 }
418 
419 static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev *cptpf)
420 {
421 	struct pci_dev *pdev = cptpf->pdev;
422 	struct device *dev = &pdev->dev;
423 	int ret, irq;
424 
425 	irq = pci_irq_vector(pdev, RVU_PF_INT_VEC_AFPF_MBOX);
426 	/* Register AF-PF mailbox interrupt handler */
427 	ret = devm_request_irq(dev, irq, otx2_cptpf_afpf_mbox_intr, 0,
428 			       "CPTAFPF Mbox", cptpf);
429 	if (ret) {
430 		dev_err(dev,
431 			"IRQ registration failed for PFAF mbox irq\n");
432 		return ret;
433 	}
434 	/* Clear interrupt if any, to avoid spurious interrupts */
435 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT, 0x1ULL);
436 	/* Enable AF-PF interrupt */
437 	otx2_cpt_write64(cptpf->reg_base, BLKADDR_RVUM, 0, RVU_PF_INT_ENA_W1S,
438 			 0x1ULL);
439 
440 	ret = otx2_cpt_send_ready_msg(&cptpf->afpf_mbox, cptpf->pdev);
441 	if (ret) {
442 		dev_warn(dev,
443 			 "AF not responding to mailbox, deferring probe\n");
444 		cptpf_disable_afpf_mbox_intr(cptpf);
445 		return -EPROBE_DEFER;
446 	}
447 	return 0;
448 }
449 
450 static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev *cptpf)
451 {
452 	struct pci_dev *pdev = cptpf->pdev;
453 	resource_size_t offset;
454 	int err;
455 
456 	cptpf->afpf_mbox_wq = alloc_workqueue("cpt_afpf_mailbox",
457 					      WQ_UNBOUND | WQ_HIGHPRI |
458 					      WQ_MEM_RECLAIM, 1);
459 	if (!cptpf->afpf_mbox_wq)
460 		return -ENOMEM;
461 
462 	offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
463 	/* Map AF-PF mailbox memory */
464 	cptpf->afpf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, MBOX_SIZE);
465 	if (!cptpf->afpf_mbox_base) {
466 		dev_err(&pdev->dev, "Unable to map BAR4\n");
467 		err = -ENOMEM;
468 		goto error;
469 	}
470 
471 	err = otx2_mbox_init(&cptpf->afpf_mbox, cptpf->afpf_mbox_base,
472 			     pdev, cptpf->reg_base, MBOX_DIR_PFAF, 1);
473 	if (err)
474 		goto error;
475 
476 	INIT_WORK(&cptpf->afpf_mbox_work, otx2_cptpf_afpf_mbox_handler);
477 	mutex_init(&cptpf->lock);
478 	return 0;
479 
480 error:
481 	destroy_workqueue(cptpf->afpf_mbox_wq);
482 	return err;
483 }
484 
485 static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev *cptpf)
486 {
487 	destroy_workqueue(cptpf->afpf_mbox_wq);
488 	otx2_mbox_destroy(&cptpf->afpf_mbox);
489 }
490 
491 static ssize_t kvf_limits_show(struct device *dev,
492 			       struct device_attribute *attr, char *buf)
493 {
494 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
495 
496 	return sprintf(buf, "%d\n", cptpf->kvf_limits);
497 }
498 
499 static ssize_t kvf_limits_store(struct device *dev,
500 				struct device_attribute *attr,
501 				const char *buf, size_t count)
502 {
503 	struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
504 	int lfs_num;
505 	int ret;
506 
507 	ret = kstrtoint(buf, 0, &lfs_num);
508 	if (ret)
509 		return ret;
510 	if (lfs_num < 1 || lfs_num > num_online_cpus()) {
511 		dev_err(dev, "lfs count %d must be in range [1 - %d]\n",
512 			lfs_num, num_online_cpus());
513 		return -EINVAL;
514 	}
515 	cptpf->kvf_limits = lfs_num;
516 
517 	return count;
518 }
519 
520 static DEVICE_ATTR_RW(kvf_limits);
521 static struct attribute *cptpf_attrs[] = {
522 	&dev_attr_kvf_limits.attr,
523 	NULL
524 };
525 
526 static const struct attribute_group cptpf_sysfs_group = {
527 	.attrs = cptpf_attrs,
528 };
529 
530 static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
531 {
532 	u64 rev;
533 
534 	rev = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
535 			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
536 	rev = (rev >> 12) & 0xFF;
537 	/*
538 	 * Check if AF has setup revision for RVUM block, otherwise
539 	 * driver probe should be deferred until AF driver comes up
540 	 */
541 	if (!rev) {
542 		dev_warn(&cptpf->pdev->dev,
543 			 "AF is not initialized, deferring probe\n");
544 		return -EPROBE_DEFER;
545 	}
546 	return 0;
547 }
548 
549 static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
550 {
551 	int timeout = 10, ret;
552 	u64 reg = 0;
553 
554 	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
555 				    CPT_AF_BLK_RST, 0x1, blkaddr);
556 	if (ret)
557 		return ret;
558 
559 	do {
560 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
561 					   CPT_AF_BLK_RST, &reg, blkaddr);
562 		if (ret)
563 			return ret;
564 
565 		if (!((reg >> 63) & 0x1))
566 			break;
567 
568 		usleep_range(10000, 20000);
569 		if (timeout-- < 0)
570 			return -EBUSY;
571 	} while (1);
572 
573 	return ret;
574 }
575 
576 static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
577 {
578 	int ret = 0;
579 
580 	if (cptpf->has_cpt1) {
581 		ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
582 		if (ret)
583 			return ret;
584 	}
585 	return cptx_device_reset(cptpf, BLKADDR_CPT0);
586 }
587 
588 static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
589 {
590 	u64 cfg;
591 
592 	cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0,
593 			      RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1));
594 	if (cfg & BIT_ULL(11))
595 		cptpf->has_cpt1 = true;
596 }
597 
598 static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
599 {
600 	union otx2_cptx_af_constants1 af_cnsts1 = {0};
601 	int ret = 0;
602 
603 	/* check if 'implemented' bit is set for block BLKADDR_CPT1 */
604 	cptpf_check_block_implemented(cptpf);
605 	/* Reset the CPT PF device */
606 	ret = cptpf_device_reset(cptpf);
607 	if (ret)
608 		return ret;
609 
610 	/* Get number of SE, IE and AE engines */
611 	ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
612 				   CPT_AF_CONSTANTS1, &af_cnsts1.u,
613 				   BLKADDR_CPT0);
614 	if (ret)
615 		return ret;
616 
617 	cptpf->eng_grps.avail.max_se_cnt = af_cnsts1.s.se;
618 	cptpf->eng_grps.avail.max_ie_cnt = af_cnsts1.s.ie;
619 	cptpf->eng_grps.avail.max_ae_cnt = af_cnsts1.s.ae;
620 
621 	/* Disable all cores */
622 	ret = otx2_cpt_disable_all_cores(cptpf);
623 
624 	return ret;
625 }
626 
627 static int cptpf_sriov_disable(struct pci_dev *pdev)
628 {
629 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
630 	int num_vfs = pci_num_vf(pdev);
631 
632 	if (!num_vfs)
633 		return 0;
634 
635 	pci_disable_sriov(pdev);
636 	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
637 	cptpf_flr_wq_destroy(cptpf);
638 	cptpf_vfpf_mbox_destroy(cptpf);
639 	module_put(THIS_MODULE);
640 	cptpf->enabled_vfs = 0;
641 
642 	return 0;
643 }
644 
645 static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
646 {
647 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
648 	int ret;
649 
650 	/* Initialize VF<=>PF mailbox */
651 	ret = cptpf_vfpf_mbox_init(cptpf, num_vfs);
652 	if (ret)
653 		return ret;
654 
655 	ret = cptpf_flr_wq_init(cptpf, num_vfs);
656 	if (ret)
657 		goto destroy_mbox;
658 	/* Register VF<=>PF mailbox interrupt */
659 	ret = cptpf_register_vfpf_intr(cptpf, num_vfs);
660 	if (ret)
661 		goto destroy_flr;
662 
663 	/* Get CPT HW capabilities using LOAD_FVC operation. */
664 	ret = otx2_cpt_discover_eng_capabilities(cptpf);
665 	if (ret)
666 		goto disable_intr;
667 
668 	ret = otx2_cpt_create_eng_grps(cptpf, &cptpf->eng_grps);
669 	if (ret)
670 		goto disable_intr;
671 
672 	cptpf->enabled_vfs = num_vfs;
673 	ret = pci_enable_sriov(pdev, num_vfs);
674 	if (ret)
675 		goto disable_intr;
676 
677 	dev_notice(&cptpf->pdev->dev, "VFs enabled: %d\n", num_vfs);
678 
679 	try_module_get(THIS_MODULE);
680 	return num_vfs;
681 
682 disable_intr:
683 	cptpf_unregister_vfpf_intr(cptpf, num_vfs);
684 	cptpf->enabled_vfs = 0;
685 destroy_flr:
686 	cptpf_flr_wq_destroy(cptpf);
687 destroy_mbox:
688 	cptpf_vfpf_mbox_destroy(cptpf);
689 	return ret;
690 }
691 
692 static int otx2_cptpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
693 {
694 	if (num_vfs > 0) {
695 		return cptpf_sriov_enable(pdev, num_vfs);
696 	} else {
697 		return cptpf_sriov_disable(pdev);
698 	}
699 }
700 
701 static int otx2_cptpf_probe(struct pci_dev *pdev,
702 			    const struct pci_device_id *ent)
703 {
704 	struct device *dev = &pdev->dev;
705 	struct otx2_cptpf_dev *cptpf;
706 	int err;
707 
708 	cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
709 	if (!cptpf)
710 		return -ENOMEM;
711 
712 	err = pcim_enable_device(pdev);
713 	if (err) {
714 		dev_err(dev, "Failed to enable PCI device\n");
715 		goto clear_drvdata;
716 	}
717 
718 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
719 	if (err) {
720 		dev_err(dev, "Unable to get usable DMA configuration\n");
721 		goto clear_drvdata;
722 	}
723 	/* Map PF's configuration registers */
724 	err = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM,
725 					     OTX2_CPT_DRV_NAME);
726 	if (err) {
727 		dev_err(dev, "Couldn't get PCI resources 0x%x\n", err);
728 		goto clear_drvdata;
729 	}
730 	pci_set_master(pdev);
731 	pci_set_drvdata(pdev, cptpf);
732 	cptpf->pdev = pdev;
733 
734 	cptpf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM];
735 
736 	/* Check if AF driver is up, otherwise defer probe */
737 	err = cpt_is_pf_usable(cptpf);
738 	if (err)
739 		goto clear_drvdata;
740 
741 	err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
742 				    RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
743 	if (err < 0) {
744 		dev_err(dev, "Request for %d msix vectors failed\n",
745 			RVU_PF_INT_VEC_CNT);
746 		goto clear_drvdata;
747 	}
748 	otx2_cpt_set_hw_caps(pdev, &cptpf->cap_flag);
749 	/* Initialize AF-PF mailbox */
750 	err = cptpf_afpf_mbox_init(cptpf);
751 	if (err)
752 		goto clear_drvdata;
753 	/* Register mailbox interrupt */
754 	err = cptpf_register_afpf_mbox_intr(cptpf);
755 	if (err)
756 		goto destroy_afpf_mbox;
757 
758 	cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
759 
760 	err = cn10k_cptpf_lmtst_init(cptpf);
761 	if (err)
762 		goto unregister_intr;
763 
764 	/* Initialize CPT PF device */
765 	err = cptpf_device_init(cptpf);
766 	if (err)
767 		goto unregister_intr;
768 
769 	/* Initialize engine groups */
770 	err = otx2_cpt_init_eng_grps(pdev, &cptpf->eng_grps);
771 	if (err)
772 		goto unregister_intr;
773 
774 	err = sysfs_create_group(&dev->kobj, &cptpf_sysfs_group);
775 	if (err)
776 		goto cleanup_eng_grps;
777 
778 	err = otx2_cpt_register_dl(cptpf);
779 	if (err)
780 		goto sysfs_grp_del;
781 
782 	return 0;
783 
784 sysfs_grp_del:
785 	sysfs_remove_group(&dev->kobj, &cptpf_sysfs_group);
786 cleanup_eng_grps:
787 	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
788 unregister_intr:
789 	cptpf_disable_afpf_mbox_intr(cptpf);
790 destroy_afpf_mbox:
791 	cptpf_afpf_mbox_destroy(cptpf);
792 clear_drvdata:
793 	pci_set_drvdata(pdev, NULL);
794 	return err;
795 }
796 
797 static void otx2_cptpf_remove(struct pci_dev *pdev)
798 {
799 	struct otx2_cptpf_dev *cptpf = pci_get_drvdata(pdev);
800 
801 	if (!cptpf)
802 		return;
803 
804 	cptpf_sriov_disable(pdev);
805 	otx2_cpt_unregister_dl(cptpf);
806 	/* Delete sysfs entry created for kernel VF limits */
807 	sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
808 	/* Cleanup engine groups */
809 	otx2_cpt_cleanup_eng_grps(pdev, &cptpf->eng_grps);
810 	/* Disable AF-PF mailbox interrupt */
811 	cptpf_disable_afpf_mbox_intr(cptpf);
812 	/* Destroy AF-PF mbox */
813 	cptpf_afpf_mbox_destroy(cptpf);
814 	pci_set_drvdata(pdev, NULL);
815 }
816 
817 /* Supported devices */
818 static const struct pci_device_id otx2_cpt_id_table[] = {
819 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX2_CPT_PCI_PF_DEVICE_ID) },
820 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CN10K_CPT_PCI_PF_DEVICE_ID) },
821 	{ 0, }  /* end of table */
822 };
823 
824 static struct pci_driver otx2_cpt_pci_driver = {
825 	.name = OTX2_CPT_DRV_NAME,
826 	.id_table = otx2_cpt_id_table,
827 	.probe = otx2_cptpf_probe,
828 	.remove = otx2_cptpf_remove,
829 	.sriov_configure = otx2_cptpf_sriov_configure
830 };
831 
832 module_pci_driver(otx2_cpt_pci_driver);
833 
834 MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
835 
836 MODULE_AUTHOR("Marvell");
837 MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
838 MODULE_LICENSE("GPL v2");
839 MODULE_DEVICE_TABLE(pci, otx2_cpt_id_table);
840