xref: /openbmc/linux/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c (revision c0688ec002a451d04a51d43b849765c5ce6cb36f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include <linux/bitfield.h>
9 #include <linux/pci.h>
10 #include "rvu_struct.h"
11 #include "rvu_reg.h"
12 #include "mbox.h"
13 #include "rvu.h"
14 
15 /* CPT PF device id */
16 #define	PCI_DEVID_OTX2_CPT_PF	0xA0FD
17 #define	PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
18 
19 /* Length of initial context fetch in 128 byte words */
20 #define CPT_CTX_ILEN    1ULL
21 
22 #define cpt_get_eng_sts(e_min, e_max, rsp, etype)                   \
23 ({                                                                  \
24 	u64 free_sts = 0, busy_sts = 0;                             \
25 	typeof(rsp) _rsp = rsp;                                     \
26 	u32 e, i;                                                   \
27 								    \
28 	for (e = (e_min), i = 0; e < (e_max); e++, i++) {           \
29 		reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); \
30 		if (reg & 0x1)                                      \
31 			busy_sts |= 1ULL << i;                      \
32 								    \
33 		if (reg & 0x2)                                      \
34 			free_sts |= 1ULL << i;                      \
35 	}                                                           \
36 	(_rsp)->busy_sts_##etype = busy_sts;                        \
37 	(_rsp)->free_sts_##etype = free_sts;                        \
38 })
39 
40 static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
41 {
42 	struct rvu_block *block = ptr;
43 	struct rvu *rvu = block->rvu;
44 	int blkaddr = block->addr;
45 	u64 reg, val;
46 	int i, eng;
47 	u8 grp;
48 
49 	reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec));
50 	dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg);
51 
52 	i = -1;
53 	while ((i = find_next_bit((unsigned long *)&reg, 64, i + 1)) < 64) {
54 		switch (vec) {
55 		case 0:
56 			eng = i;
57 			break;
58 		case 1:
59 			eng = i + 64;
60 			break;
61 		case 2:
62 			eng = i + 128;
63 			break;
64 		}
65 		grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF;
66 		/* Disable and enable the engine which triggers fault */
67 		rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0);
68 		val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng));
69 		rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL);
70 
71 		rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp);
72 		rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL);
73 	}
74 	rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg);
75 
76 	return IRQ_HANDLED;
77 }
78 
79 static irqreturn_t rvu_cpt_af_flt0_intr_handler(int irq, void *ptr)
80 {
81 	return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT0, ptr);
82 }
83 
84 static irqreturn_t rvu_cpt_af_flt1_intr_handler(int irq, void *ptr)
85 {
86 	return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT1, ptr);
87 }
88 
89 static irqreturn_t rvu_cpt_af_flt2_intr_handler(int irq, void *ptr)
90 {
91 	return cpt_af_flt_intr_handler(CPT_10K_AF_INT_VEC_FLT2, ptr);
92 }
93 
94 static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
95 {
96 	struct rvu_block *block = ptr;
97 	struct rvu *rvu = block->rvu;
98 	int blkaddr = block->addr;
99 	u64 reg;
100 
101 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
102 	dev_err_ratelimited(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg);
103 
104 	rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg);
105 	return IRQ_HANDLED;
106 }
107 
108 static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr)
109 {
110 	struct rvu_block *block = ptr;
111 	struct rvu *rvu = block->rvu;
112 	int blkaddr = block->addr;
113 	u64 reg;
114 
115 	reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
116 	dev_err_ratelimited(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg);
117 
118 	rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg);
119 	return IRQ_HANDLED;
120 }
121 
122 static int rvu_cpt_do_register_interrupt(struct rvu_block *block, int irq_offs,
123 					 irq_handler_t handler,
124 					 const char *name)
125 {
126 	struct rvu *rvu = block->rvu;
127 	int ret;
128 
129 	ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
130 			  name, block);
131 	if (ret) {
132 		dev_err(rvu->dev, "RVUAF: %s irq registration failed", name);
133 		return ret;
134 	}
135 
136 	WARN_ON(rvu->irq_allocated[irq_offs]);
137 	rvu->irq_allocated[irq_offs] = true;
138 	return 0;
139 }
140 
141 static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
142 {
143 	struct rvu *rvu = block->rvu;
144 	int blkaddr = block->addr;
145 	int i;
146 
147 	/* Disable all CPT AF interrupts */
148 	rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL);
149 	rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL);
150 	rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF);
151 
152 	rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
153 	rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
154 
155 	for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++)
156 		if (rvu->irq_allocated[off + i]) {
157 			free_irq(pci_irq_vector(rvu->pdev, off + i), block);
158 			rvu->irq_allocated[off + i] = false;
159 		}
160 }
161 
162 static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
163 {
164 	struct rvu_hwinfo *hw = rvu->hw;
165 	struct rvu_block *block;
166 	int i, offs;
167 
168 	if (!is_block_implemented(rvu->hw, blkaddr))
169 		return;
170 	offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
171 	if (!offs) {
172 		dev_warn(rvu->dev,
173 			 "Failed to get CPT_AF_INT vector offsets\n");
174 		return;
175 	}
176 	block = &hw->block[blkaddr];
177 	if (!is_rvu_otx2(rvu))
178 		return cpt_10k_unregister_interrupts(block, offs);
179 
180 	/* Disable all CPT AF interrupts */
181 	for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
182 		rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL);
183 	rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
184 	rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
185 
186 	for (i = 0; i < CPT_AF_INT_VEC_CNT; i++)
187 		if (rvu->irq_allocated[offs + i]) {
188 			free_irq(pci_irq_vector(rvu->pdev, offs + i), block);
189 			rvu->irq_allocated[offs + i] = false;
190 		}
191 }
192 
193 void rvu_cpt_unregister_interrupts(struct rvu *rvu)
194 {
195 	cpt_unregister_interrupts(rvu, BLKADDR_CPT0);
196 	cpt_unregister_interrupts(rvu, BLKADDR_CPT1);
197 }
198 
199 static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
200 {
201 	struct rvu *rvu = block->rvu;
202 	int blkaddr = block->addr;
203 	irq_handler_t flt_fn;
204 	int i, ret;
205 
206 	for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
207 		sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
208 
209 		switch (i) {
210 		case CPT_10K_AF_INT_VEC_FLT0:
211 			flt_fn = rvu_cpt_af_flt0_intr_handler;
212 			break;
213 		case CPT_10K_AF_INT_VEC_FLT1:
214 			flt_fn = rvu_cpt_af_flt1_intr_handler;
215 			break;
216 		case CPT_10K_AF_INT_VEC_FLT2:
217 			flt_fn = rvu_cpt_af_flt2_intr_handler;
218 			break;
219 		}
220 		ret = rvu_cpt_do_register_interrupt(block, off + i,
221 						    flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]);
222 		if (ret)
223 			goto err;
224 		if (i == CPT_10K_AF_INT_VEC_FLT2)
225 			rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF);
226 		else
227 			rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
228 	}
229 
230 	ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
231 					    rvu_cpt_af_rvu_intr_handler,
232 					    "CPTAF RVU");
233 	if (ret)
234 		goto err;
235 	rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
236 
237 	ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS,
238 					    rvu_cpt_af_ras_intr_handler,
239 					    "CPTAF RAS");
240 	if (ret)
241 		goto err;
242 	rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
243 
244 	return 0;
245 err:
246 	rvu_cpt_unregister_interrupts(rvu);
247 	return ret;
248 }
249 
250 static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
251 {
252 	struct rvu_hwinfo *hw = rvu->hw;
253 	struct rvu_block *block;
254 	irq_handler_t flt_fn;
255 	int i, offs, ret = 0;
256 
257 	if (!is_block_implemented(rvu->hw, blkaddr))
258 		return 0;
259 
260 	block = &hw->block[blkaddr];
261 	offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
262 	if (!offs) {
263 		dev_warn(rvu->dev,
264 			 "Failed to get CPT_AF_INT vector offsets\n");
265 		return 0;
266 	}
267 
268 	if (!is_rvu_otx2(rvu))
269 		return cpt_10k_register_interrupts(block, offs);
270 
271 	for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
272 		sprintf(&rvu->irq_name[(offs + i) * NAME_SIZE], "CPTAF FLT%d", i);
273 		switch (i) {
274 		case CPT_AF_INT_VEC_FLT0:
275 			flt_fn = rvu_cpt_af_flt0_intr_handler;
276 			break;
277 		case CPT_AF_INT_VEC_FLT1:
278 			flt_fn = rvu_cpt_af_flt1_intr_handler;
279 			break;
280 		}
281 		ret = rvu_cpt_do_register_interrupt(block, offs + i,
282 						    flt_fn, &rvu->irq_name[(offs + i) * NAME_SIZE]);
283 		if (ret)
284 			goto err;
285 		rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
286 	}
287 
288 	ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
289 					    rvu_cpt_af_rvu_intr_handler,
290 					    "CPTAF RVU");
291 	if (ret)
292 		goto err;
293 	rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
294 
295 	ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RAS,
296 					    rvu_cpt_af_ras_intr_handler,
297 					    "CPTAF RAS");
298 	if (ret)
299 		goto err;
300 	rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
301 
302 	return 0;
303 err:
304 	rvu_cpt_unregister_interrupts(rvu);
305 	return ret;
306 }
307 
308 int rvu_cpt_register_interrupts(struct rvu *rvu)
309 {
310 	int ret;
311 
312 	ret = cpt_register_interrupts(rvu, BLKADDR_CPT0);
313 	if (ret)
314 		return ret;
315 
316 	return cpt_register_interrupts(rvu, BLKADDR_CPT1);
317 }
318 
319 static int get_cpt_pf_num(struct rvu *rvu)
320 {
321 	int i, domain_nr, cpt_pf_num = -1;
322 	struct pci_dev *pdev;
323 
324 	domain_nr = pci_domain_nr(rvu->pdev->bus);
325 	for (i = 0; i < rvu->hw->total_pfs; i++) {
326 		pdev = pci_get_domain_bus_and_slot(domain_nr, i + 1, 0);
327 		if (!pdev)
328 			continue;
329 
330 		if (pdev->device == PCI_DEVID_OTX2_CPT_PF ||
331 		    pdev->device == PCI_DEVID_OTX2_CPT10K_PF) {
332 			cpt_pf_num = i;
333 			put_device(&pdev->dev);
334 			break;
335 		}
336 		put_device(&pdev->dev);
337 	}
338 	return cpt_pf_num;
339 }
340 
341 static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
342 {
343 	int cpt_pf_num = rvu->cpt_pf_num;
344 
345 	if (rvu_get_pf(pcifunc) != cpt_pf_num)
346 		return false;
347 	if (pcifunc & RVU_PFVF_FUNC_MASK)
348 		return false;
349 
350 	return true;
351 }
352 
353 static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
354 {
355 	int cpt_pf_num = rvu->cpt_pf_num;
356 
357 	if (rvu_get_pf(pcifunc) != cpt_pf_num)
358 		return false;
359 	if (!(pcifunc & RVU_PFVF_FUNC_MASK))
360 		return false;
361 
362 	return true;
363 }
364 
365 static int validate_and_get_cpt_blkaddr(int req_blkaddr)
366 {
367 	int blkaddr;
368 
369 	blkaddr = req_blkaddr ? req_blkaddr : BLKADDR_CPT0;
370 	if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
371 		return -EINVAL;
372 
373 	return blkaddr;
374 }
375 
376 int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
377 				  struct cpt_lf_alloc_req_msg *req,
378 				  struct msg_rsp *rsp)
379 {
380 	u16 pcifunc = req->hdr.pcifunc;
381 	struct rvu_block *block;
382 	int cptlf, blkaddr;
383 	int num_lfs, slot;
384 	u64 val;
385 
386 	blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
387 	if (blkaddr < 0)
388 		return blkaddr;
389 
390 	if (req->eng_grpmsk == 0x0)
391 		return CPT_AF_ERR_GRP_INVALID;
392 
393 	block = &rvu->hw->block[blkaddr];
394 	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
395 					block->addr);
396 	if (!num_lfs)
397 		return CPT_AF_ERR_LF_INVALID;
398 
399 	/* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
400 	if (req->nix_pf_func) {
401 		/* If default, use 'this' CPTLF's PFFUNC */
402 		if (req->nix_pf_func == RVU_DEFAULT_PF_FUNC)
403 			req->nix_pf_func = pcifunc;
404 		if (!is_pffunc_map_valid(rvu, req->nix_pf_func, BLKTYPE_NIX))
405 			return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
406 	}
407 
408 	/* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
409 	if (req->sso_pf_func) {
410 		/* If default, use 'this' CPTLF's PFFUNC */
411 		if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC)
412 			req->sso_pf_func = pcifunc;
413 		if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO))
414 			return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
415 	}
416 
417 	for (slot = 0; slot < num_lfs; slot++) {
418 		cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
419 		if (cptlf < 0)
420 			return CPT_AF_ERR_LF_INVALID;
421 
422 		/* Set CPT LF group and priority */
423 		val = (u64)req->eng_grpmsk << 48 | 1;
424 		if (!is_rvu_otx2(rvu)) {
425 			if (req->ctx_ilen_valid)
426 				val |= (req->ctx_ilen << 17);
427 			else
428 				val |= (CPT_CTX_ILEN << 17);
429 		}
430 
431 		rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
432 
433 		/* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set
434 		 * on reset.
435 		 */
436 		val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
437 		val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32));
438 		val |= ((u64)req->nix_pf_func << 48 |
439 			(u64)req->sso_pf_func << 32);
440 		rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
441 	}
442 
443 	return 0;
444 }
445 
446 static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
447 {
448 	u16 pcifunc = req->hdr.pcifunc;
449 	int num_lfs, cptlf, slot, err;
450 	struct rvu_block *block;
451 
452 	block = &rvu->hw->block[blkaddr];
453 	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
454 					block->addr);
455 	if (!num_lfs)
456 		return 0;
457 
458 	for (slot = 0; slot < num_lfs; slot++) {
459 		cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
460 		if (cptlf < 0)
461 			return CPT_AF_ERR_LF_INVALID;
462 
463 		/* Perform teardown */
464 		rvu_cpt_lf_teardown(rvu, pcifunc, blkaddr, cptlf, slot);
465 
466 		/* Reset LF */
467 		err = rvu_lf_reset(rvu, block, cptlf);
468 		if (err) {
469 			dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
470 				block->addr, cptlf);
471 		}
472 	}
473 
474 	return 0;
475 }
476 
477 int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
478 				 struct msg_rsp *rsp)
479 {
480 	int ret;
481 
482 	ret = cpt_lf_free(rvu, req, BLKADDR_CPT0);
483 	if (ret)
484 		return ret;
485 
486 	if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
487 		ret = cpt_lf_free(rvu, req, BLKADDR_CPT1);
488 
489 	return ret;
490 }
491 
492 static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
493 					struct cpt_inline_ipsec_cfg_msg *req)
494 {
495 	u16 sso_pf_func = req->sso_pf_func;
496 	u8 nix_sel;
497 	u64 val;
498 
499 	val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
500 	if (req->enable && (val & BIT_ULL(16))) {
501 		/* IPSec inline outbound path is already enabled for a given
502 		 * CPT LF, HRM states that inline inbound & outbound paths
503 		 * must not be enabled at the same time for a given CPT LF
504 		 */
505 		return CPT_AF_ERR_INLINE_IPSEC_INB_ENA;
506 	}
507 	/* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
508 	if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO))
509 		return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
510 
511 	nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0;
512 	/* Enable CPT LF for IPsec inline inbound operations */
513 	if (req->enable)
514 		val |= BIT_ULL(9);
515 	else
516 		val &= ~BIT_ULL(9);
517 
518 	val |= (u64)nix_sel << 8;
519 	rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
520 
521 	if (sso_pf_func) {
522 		/* Set SSO_PF_FUNC */
523 		val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
524 		val |= (u64)sso_pf_func << 32;
525 		val |= (u64)req->nix_pf_func << 48;
526 		rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
527 	}
528 	if (req->sso_pf_func_ovrd)
529 		/* Set SSO_PF_FUNC_OVRD for inline IPSec */
530 		rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1);
531 
532 	/* Configure the X2P Link register with the cpt base channel number and
533 	 * range of channels it should propagate to X2P
534 	 */
535 	if (!is_rvu_otx2(rvu)) {
536 		val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
537 		val |= (u64)rvu->hw->cpt_chan_base;
538 
539 		rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
540 		rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
541 	}
542 
543 	return 0;
544 }
545 
546 static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf,
547 					 struct cpt_inline_ipsec_cfg_msg *req)
548 {
549 	u16 nix_pf_func = req->nix_pf_func;
550 	int nix_blkaddr;
551 	u8 nix_sel;
552 	u64 val;
553 
554 	val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
555 	if (req->enable && (val & BIT_ULL(9))) {
556 		/* IPSec inline inbound path is already enabled for a given
557 		 * CPT LF, HRM states that inline inbound & outbound paths
558 		 * must not be enabled at the same time for a given CPT LF
559 		 */
560 		return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA;
561 	}
562 
563 	/* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
564 	if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX))
565 		return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
566 
567 	/* Enable CPT LF for IPsec inline outbound operations */
568 	if (req->enable)
569 		val |= BIT_ULL(16);
570 	else
571 		val &= ~BIT_ULL(16);
572 	rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
573 
574 	if (nix_pf_func) {
575 		/* Set NIX_PF_FUNC */
576 		val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
577 		val |= (u64)nix_pf_func << 48;
578 		rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
579 
580 		nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func);
581 		nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1;
582 
583 		val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
584 		val |= (u64)nix_sel << 8;
585 		rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
586 	}
587 
588 	return 0;
589 }
590 
591 int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
592 					  struct cpt_inline_ipsec_cfg_msg *req,
593 					  struct msg_rsp *rsp)
594 {
595 	u16 pcifunc = req->hdr.pcifunc;
596 	struct rvu_block *block;
597 	int cptlf, blkaddr, ret;
598 	u16 actual_slot;
599 
600 	blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
601 					    req->slot, &actual_slot);
602 	if (blkaddr < 0)
603 		return CPT_AF_ERR_LF_INVALID;
604 
605 	block = &rvu->hw->block[blkaddr];
606 
607 	cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
608 	if (cptlf < 0)
609 		return CPT_AF_ERR_LF_INVALID;
610 
611 	switch (req->dir) {
612 	case CPT_INLINE_INBOUND:
613 		ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req);
614 		break;
615 
616 	case CPT_INLINE_OUTBOUND:
617 		ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req);
618 		break;
619 
620 	default:
621 		return CPT_AF_ERR_PARAM;
622 	}
623 
624 	return ret;
625 }
626 
627 static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
628 {
629 	u64 offset = req->reg_offset;
630 	int blkaddr, num_lfs, lf;
631 	struct rvu_block *block;
632 	struct rvu_pfvf *pfvf;
633 
634 	blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
635 	if (blkaddr < 0)
636 		return false;
637 
638 	/* Registers that can be accessed from PF/VF */
639 	if ((offset & 0xFF000) ==  CPT_AF_LFX_CTL(0) ||
640 	    (offset & 0xFF000) ==  CPT_AF_LFX_CTL2(0)) {
641 		if (offset & 7)
642 			return false;
643 
644 		lf = (offset & 0xFFF) >> 3;
645 		block = &rvu->hw->block[blkaddr];
646 		pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
647 		num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
648 		if (lf >= num_lfs)
649 			/* Slot is not valid for that PF/VF */
650 			return false;
651 
652 		/* Translate local LF used by VFs to global CPT LF */
653 		lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr],
654 				req->hdr.pcifunc, lf);
655 		if (lf < 0)
656 			return false;
657 
658 		return true;
659 	} else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
660 		/* Registers that can be accessed from PF */
661 		switch (offset) {
662 		case CPT_AF_DIAG:
663 		case CPT_AF_CTL:
664 		case CPT_AF_PF_FUNC:
665 		case CPT_AF_BLK_RST:
666 		case CPT_AF_CONSTANTS1:
667 		case CPT_AF_CTX_FLUSH_TIMER:
668 			return true;
669 		}
670 
671 		switch (offset & 0xFF000) {
672 		case CPT_AF_EXEX_STS(0):
673 		case CPT_AF_EXEX_CTL(0):
674 		case CPT_AF_EXEX_CTL2(0):
675 		case CPT_AF_EXEX_UCODE_BASE(0):
676 			if (offset & 7)
677 				return false;
678 			break;
679 		default:
680 			return false;
681 		}
682 		return true;
683 	}
684 	return false;
685 }
686 
687 int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
688 					struct cpt_rd_wr_reg_msg *req,
689 					struct cpt_rd_wr_reg_msg *rsp)
690 {
691 	int blkaddr;
692 
693 	blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
694 	if (blkaddr < 0)
695 		return blkaddr;
696 
697 	/* This message is accepted only if sent from CPT PF/VF */
698 	if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
699 	    !is_cpt_vf(rvu, req->hdr.pcifunc))
700 		return CPT_AF_ERR_ACCESS_DENIED;
701 
702 	rsp->reg_offset = req->reg_offset;
703 	rsp->ret_val = req->ret_val;
704 	rsp->is_write = req->is_write;
705 
706 	if (!is_valid_offset(rvu, req))
707 		return CPT_AF_ERR_ACCESS_DENIED;
708 
709 	if (req->is_write)
710 		rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
711 	else
712 		rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
713 
714 	return 0;
715 }
716 
717 static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
718 {
719 	if (is_rvu_otx2(rvu))
720 		return;
721 
722 	rsp->ctx_mis_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_MIS_PC);
723 	rsp->ctx_hit_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_HIT_PC);
724 	rsp->ctx_aop_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_AOP_PC);
725 	rsp->ctx_aop_lat_pc = rvu_read64(rvu, blkaddr,
726 					 CPT_AF_CTX_AOP_LATENCY_PC);
727 	rsp->ctx_ifetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_IFETCH_PC);
728 	rsp->ctx_ifetch_lat_pc = rvu_read64(rvu, blkaddr,
729 					    CPT_AF_CTX_IFETCH_LATENCY_PC);
730 	rsp->ctx_ffetch_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
731 	rsp->ctx_ffetch_lat_pc = rvu_read64(rvu, blkaddr,
732 					    CPT_AF_CTX_FFETCH_LATENCY_PC);
733 	rsp->ctx_wback_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
734 	rsp->ctx_wback_lat_pc = rvu_read64(rvu, blkaddr,
735 					   CPT_AF_CTX_FFETCH_LATENCY_PC);
736 	rsp->ctx_psh_pc = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FFETCH_PC);
737 	rsp->ctx_psh_lat_pc = rvu_read64(rvu, blkaddr,
738 					 CPT_AF_CTX_FFETCH_LATENCY_PC);
739 	rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR);
740 	rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID);
741 	rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER);
742 
743 	rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME);
744 	rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
745 	rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
746 	rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
747 	rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
748 	rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0));
749 	rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1));
750 }
751 
752 static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr)
753 {
754 	u16 max_ses, max_ies, max_aes;
755 	u32 e_min = 0, e_max = 0;
756 	u64 reg;
757 
758 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
759 	max_ses = reg & 0xffff;
760 	max_ies = (reg >> 16) & 0xffff;
761 	max_aes = (reg >> 32) & 0xffff;
762 
763 	/* Get AE status */
764 	e_min = max_ses + max_ies;
765 	e_max = max_ses + max_ies + max_aes;
766 	cpt_get_eng_sts(e_min, e_max, rsp, ae);
767 	/* Get SE status */
768 	e_min = 0;
769 	e_max = max_ses;
770 	cpt_get_eng_sts(e_min, e_max, rsp, se);
771 	/* Get IE status */
772 	e_min = max_ses;
773 	e_max = max_ses + max_ies;
774 	cpt_get_eng_sts(e_min, e_max, rsp, ie);
775 }
776 
777 int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
778 			     struct cpt_sts_rsp *rsp)
779 {
780 	int blkaddr;
781 
782 	blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
783 	if (blkaddr < 0)
784 		return blkaddr;
785 
786 	/* This message is accepted only if sent from CPT PF/VF */
787 	if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
788 	    !is_cpt_vf(rvu, req->hdr.pcifunc))
789 		return CPT_AF_ERR_ACCESS_DENIED;
790 
791 	get_ctx_pc(rvu, rsp, blkaddr);
792 
793 	/* Get CPT engines status */
794 	get_eng_sts(rvu, rsp, blkaddr);
795 
796 	/* Read CPT instruction PC registers */
797 	rsp->inst_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
798 	rsp->inst_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
799 	rsp->rd_req_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
800 	rsp->rd_lat_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
801 	rsp->rd_uc_pc = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
802 	rsp->active_cycles_pc = rvu_read64(rvu, blkaddr,
803 					   CPT_AF_ACTIVE_CYCLES_PC);
804 	rsp->exe_err_info = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
805 	rsp->cptclk_cnt = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
806 	rsp->diag = rvu_read64(rvu, blkaddr, CPT_AF_DIAG);
807 
808 	return 0;
809 }
810 
811 #define RXC_ZOMBIE_THRES  GENMASK_ULL(59, 48)
812 #define RXC_ZOMBIE_LIMIT  GENMASK_ULL(43, 32)
813 #define RXC_ACTIVE_THRES  GENMASK_ULL(27, 16)
814 #define RXC_ACTIVE_LIMIT  GENMASK_ULL(11, 0)
815 #define RXC_ACTIVE_COUNT  GENMASK_ULL(60, 48)
816 #define RXC_ZOMBIE_COUNT  GENMASK_ULL(60, 48)
817 
818 static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
819 			     int blkaddr, struct cpt_rxc_time_cfg_req *save)
820 {
821 	u64 dfrg_reg;
822 
823 	if (save) {
824 		/* Save older config */
825 		dfrg_reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
826 		save->zombie_thres = FIELD_GET(RXC_ZOMBIE_THRES, dfrg_reg);
827 		save->zombie_limit = FIELD_GET(RXC_ZOMBIE_LIMIT, dfrg_reg);
828 		save->active_thres = FIELD_GET(RXC_ACTIVE_THRES, dfrg_reg);
829 		save->active_limit = FIELD_GET(RXC_ACTIVE_LIMIT, dfrg_reg);
830 
831 		save->step = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
832 	}
833 
834 	dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
835 	dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
836 	dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
837 	dfrg_reg |= FIELD_PREP(RXC_ACTIVE_LIMIT, req->active_limit);
838 
839 	rvu_write64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG, req->step);
840 	rvu_write64(rvu, blkaddr, CPT_AF_RXC_DFRG, dfrg_reg);
841 }
842 
843 int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
844 				      struct cpt_rxc_time_cfg_req *req,
845 				      struct msg_rsp *rsp)
846 {
847 	int blkaddr;
848 
849 	blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
850 	if (blkaddr < 0)
851 		return blkaddr;
852 
853 	/* This message is accepted only if sent from CPT PF/VF */
854 	if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
855 	    !is_cpt_vf(rvu, req->hdr.pcifunc))
856 		return CPT_AF_ERR_ACCESS_DENIED;
857 
858 	cpt_rxc_time_cfg(rvu, req, blkaddr, NULL);
859 
860 	return 0;
861 }
862 
863 int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
864 					struct msg_rsp *rsp)
865 {
866 	return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
867 }
868 
869 int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req,
870 				  struct msg_rsp *rsp)
871 {
872 	u16 pcifunc = req->hdr.pcifunc;
873 	struct rvu_block *block;
874 	int cptlf, blkaddr, ret;
875 	u16 actual_slot;
876 	u64 ctl, ctl2;
877 
878 	blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
879 					    req->slot, &actual_slot);
880 	if (blkaddr < 0)
881 		return CPT_AF_ERR_LF_INVALID;
882 
883 	block = &rvu->hw->block[blkaddr];
884 
885 	cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
886 	if (cptlf < 0)
887 		return CPT_AF_ERR_LF_INVALID;
888 	ctl = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
889 	ctl2 = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
890 
891 	ret = rvu_lf_reset(rvu, block, cptlf);
892 	if (ret)
893 		dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
894 			block->addr, cptlf);
895 
896 	rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), ctl);
897 	rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), ctl2);
898 
899 	return 0;
900 }
901 
902 static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
903 {
904 	struct cpt_rxc_time_cfg_req req, prev;
905 	int timeout = 2000;
906 	u64 reg;
907 
908 	if (is_rvu_otx2(rvu))
909 		return;
910 
911 	/* Set time limit to minimum values, so that rxc entries will be
912 	 * flushed out quickly.
913 	 */
914 	req.step = 1;
915 	req.zombie_thres = 1;
916 	req.zombie_limit = 1;
917 	req.active_thres = 1;
918 	req.active_limit = 1;
919 
920 	cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev);
921 
922 	do {
923 		reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
924 		udelay(1);
925 		if (FIELD_GET(RXC_ACTIVE_COUNT, reg))
926 			timeout--;
927 		else
928 			break;
929 	} while (timeout);
930 
931 	if (timeout == 0)
932 		dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n");
933 
934 	timeout = 2000;
935 	do {
936 		reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
937 		udelay(1);
938 		if (FIELD_GET(RXC_ZOMBIE_COUNT, reg))
939 			timeout--;
940 		else
941 			break;
942 	} while (timeout);
943 
944 	if (timeout == 0)
945 		dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
946 
947 	/* Restore config */
948 	cpt_rxc_time_cfg(rvu, &prev, blkaddr, NULL);
949 }
950 
951 #define INFLIGHT   GENMASK_ULL(8, 0)
952 #define GRB_CNT    GENMASK_ULL(39, 32)
953 #define GWB_CNT    GENMASK_ULL(47, 40)
954 #define XQ_XOR     GENMASK_ULL(63, 63)
955 #define DQPTR      GENMASK_ULL(19, 0)
956 #define NQPTR      GENMASK_ULL(51, 32)
957 
958 static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
959 {
960 	int timeout = 1000000;
961 	u64 inprog, inst_ptr;
962 	u64 qsize, pending;
963 	int i = 0;
964 
965 	/* Disable instructions enqueuing */
966 	rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);
967 
968 	inprog = rvu_read64(rvu, blkaddr,
969 			    CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
970 	inprog |= BIT_ULL(16);
971 	rvu_write64(rvu, blkaddr,
972 		    CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);
973 
974 	qsize = rvu_read64(rvu, blkaddr,
975 			   CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_SIZE)) & 0x7FFF;
976 	do {
977 		inst_ptr = rvu_read64(rvu, blkaddr,
978 				      CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_INST_PTR));
979 		pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) +
980 			  FIELD_GET(NQPTR, inst_ptr) -
981 			  FIELD_GET(DQPTR, inst_ptr);
982 		udelay(1);
983 		timeout--;
984 	} while ((pending != 0) && (timeout != 0));
985 
986 	if (timeout == 0)
987 		dev_warn(rvu->dev, "TIMEOUT: CPT poll on pending instructions\n");
988 
989 	timeout = 1000000;
990 	/* Wait for CPT queue to become execution-quiescent */
991 	do {
992 		inprog = rvu_read64(rvu, blkaddr,
993 				    CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
994 
995 		if ((FIELD_GET(INFLIGHT, inprog) == 0) &&
996 		    (FIELD_GET(GRB_CNT, inprog) == 0)) {
997 			i++;
998 		} else {
999 			i = 0;
1000 			timeout--;
1001 		}
1002 	} while ((timeout != 0) && (i < 10));
1003 
1004 	if (timeout == 0)
1005 		dev_warn(rvu->dev, "TIMEOUT: CPT poll on inflight count\n");
1006 	/* Wait for 2 us to flush all queue writes to memory */
1007 	udelay(2);
1008 }
1009 
1010 int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
1011 {
1012 	u64 reg;
1013 
1014 	if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
1015 		cpt_rxc_teardown(rvu, blkaddr);
1016 
1017 	mutex_lock(&rvu->alias_lock);
1018 	/* Enable BAR2 ALIAS for this pcifunc. */
1019 	reg = BIT_ULL(16) | pcifunc;
1020 	rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
1021 
1022 	cpt_lf_disable_iqueue(rvu, blkaddr, slot);
1023 
1024 	rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
1025 	mutex_unlock(&rvu->alias_lock);
1026 
1027 	return 0;
1028 }
1029 
1030 #define CPT_RES_LEN    16
1031 #define CPT_SE_IE_EGRP 1ULL
1032 
1033 static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
1034 				      int nix_blkaddr)
1035 {
1036 	int cpt_pf_num = rvu->cpt_pf_num;
1037 	struct cpt_inst_lmtst_req *req;
1038 	dma_addr_t res_daddr;
1039 	int timeout = 3000;
1040 	u8 cpt_idx;
1041 	u64 *inst;
1042 	u16 *res;
1043 	int rc;
1044 
1045 	res = kzalloc(CPT_RES_LEN, GFP_KERNEL);
1046 	if (!res)
1047 		return -ENOMEM;
1048 
1049 	res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN,
1050 				   DMA_BIDIRECTIONAL);
1051 	if (dma_mapping_error(rvu->dev, res_daddr)) {
1052 		dev_err(rvu->dev, "DMA mapping failed for CPT result\n");
1053 		rc = -EFAULT;
1054 		goto res_free;
1055 	}
1056 	*res = 0xFFFF;
1057 
1058 	/* Send mbox message to CPT PF */
1059 	req = (struct cpt_inst_lmtst_req *)
1060 	       otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up,
1061 				       cpt_pf_num, sizeof(*req),
1062 				       sizeof(struct msg_rsp));
1063 	if (!req) {
1064 		rc = -ENOMEM;
1065 		goto res_daddr_unmap;
1066 	}
1067 	req->hdr.sig = OTX2_MBOX_REQ_SIG;
1068 	req->hdr.id = MBOX_MSG_CPT_INST_LMTST;
1069 
1070 	inst = req->inst;
1071 	/* Prepare CPT_INST_S */
1072 	inst[0] = 0;
1073 	inst[1] = res_daddr;
1074 	/* AF PF FUNC */
1075 	inst[2] = 0;
1076 	/* Set QORD */
1077 	inst[3] = 1;
1078 	inst[4] = 0;
1079 	inst[5] = 0;
1080 	inst[6] = 0;
1081 	/* Set EGRP */
1082 	inst[7] = CPT_SE_IE_EGRP << 61;
1083 
1084 	/* Subtract 1 from the NIX-CPT credit count to preserve
1085 	 * credit counts.
1086 	 */
1087 	cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
1088 	rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
1089 		    BIT_ULL(22) - 1);
1090 
1091 	otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
1092 	rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
1093 	if (rc)
1094 		dev_warn(rvu->dev, "notification to pf %d failed\n",
1095 			 cpt_pf_num);
1096 	/* Wait for CPT instruction to be completed */
1097 	do {
1098 		mdelay(1);
1099 		if (*res == 0xFFFF)
1100 			timeout--;
1101 		else
1102 			break;
1103 	} while (timeout);
1104 
1105 	if (timeout == 0)
1106 		dev_warn(rvu->dev, "Poll for result hits hard loop counter\n");
1107 
1108 res_daddr_unmap:
1109 	dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL);
1110 res_free:
1111 	kfree(res);
1112 
1113 	return 0;
1114 }
1115 
1116 #define CTX_CAM_PF_FUNC   GENMASK_ULL(61, 46)
1117 #define CTX_CAM_CPTR      GENMASK_ULL(45, 0)
1118 
1119 int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
1120 {
1121 	int nix_blkaddr, blkaddr;
1122 	u16 max_ctx_entries, i;
1123 	int slot = 0, num_lfs;
1124 	u64 reg, cam_data;
1125 	int rc;
1126 
1127 	nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1128 	if (nix_blkaddr < 0)
1129 		return -EINVAL;
1130 
1131 	if (is_rvu_otx2(rvu))
1132 		return 0;
1133 
1134 	blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0;
1135 
1136 	/* Submit CPT_INST_S to track when all packets have been
1137 	 * flushed through for the NIX PF FUNC in inline inbound case.
1138 	 */
1139 	rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr);
1140 	if (rc)
1141 		return rc;
1142 
1143 	/* Wait for rxc entries to be flushed out */
1144 	cpt_rxc_teardown(rvu, blkaddr);
1145 
1146 	reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
1147 	max_ctx_entries = (reg >> 48) & 0xFFF;
1148 
1149 	mutex_lock(&rvu->rsrc_lock);
1150 
1151 	num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
1152 					blkaddr);
1153 	if (num_lfs == 0) {
1154 		dev_warn(rvu->dev, "CPT LF is not configured\n");
1155 		goto unlock;
1156 	}
1157 
1158 	/* Enable BAR2 ALIAS for this pcifunc. */
1159 	reg = BIT_ULL(16) | pcifunc;
1160 	rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
1161 
1162 	for (i = 0; i < max_ctx_entries; i++) {
1163 		cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
1164 
1165 		if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) &&
1166 		    FIELD_GET(CTX_CAM_CPTR, cam_data)) {
1167 			reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data);
1168 			rvu_write64(rvu, blkaddr,
1169 				    CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH),
1170 				    reg);
1171 		}
1172 	}
1173 	rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
1174 
1175 unlock:
1176 	mutex_unlock(&rvu->rsrc_lock);
1177 
1178 	return 0;
1179 }
1180 
1181 int rvu_cpt_init(struct rvu *rvu)
1182 {
1183 	/* Retrieve CPT PF number */
1184 	rvu->cpt_pf_num = get_cpt_pf_num(rvu);
1185 	return 0;
1186 }
1187