17a37245eSSunil Goutham // SPDX-License-Identifier: GPL-2.0
2c7cd6c5aSSunil Goutham /* Marvell RVU Admin Function driver
37a37245eSSunil Goutham  *
4c7cd6c5aSSunil Goutham  * Copyright (C) 2018 Marvell.
57a37245eSSunil Goutham  *
67a37245eSSunil Goutham  */
7*ea9dd2e5SSuman Ghosh #include <linux/bitfield.h>
87a37245eSSunil Goutham #include <linux/module.h>
97a37245eSSunil Goutham #include <linux/pci.h>
107a37245eSSunil Goutham 
117a37245eSSunil Goutham #include "rvu_struct.h"
127a37245eSSunil Goutham #include "rvu_reg.h"
137a37245eSSunil Goutham #include "rvu.h"
147a37245eSSunil Goutham 
npa_aq_enqueue_wait(struct rvu * rvu,struct rvu_block * block,struct npa_aq_inst_s * inst)154a3581cdSSunil Goutham static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
164a3581cdSSunil Goutham 			       struct npa_aq_inst_s *inst)
174a3581cdSSunil Goutham {
184a3581cdSSunil Goutham 	struct admin_queue *aq = block->aq;
194a3581cdSSunil Goutham 	struct npa_aq_res_s *result;
204a3581cdSSunil Goutham 	int timeout = 1000;
214a3581cdSSunil Goutham 	u64 reg, head;
224a3581cdSSunil Goutham 
234a3581cdSSunil Goutham 	result = (struct npa_aq_res_s *)aq->res->base;
244a3581cdSSunil Goutham 
254a3581cdSSunil Goutham 	/* Get current head pointer where to append this instruction */
264a3581cdSSunil Goutham 	reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
274a3581cdSSunil Goutham 	head = (reg >> 4) & AQ_PTR_MASK;
284a3581cdSSunil Goutham 
294a3581cdSSunil Goutham 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
304a3581cdSSunil Goutham 	       (void *)inst, aq->inst->entry_sz);
314a3581cdSSunil Goutham 	memset(result, 0, sizeof(*result));
324a3581cdSSunil Goutham 	/* sync into memory */
334a3581cdSSunil Goutham 	wmb();
344a3581cdSSunil Goutham 
354a3581cdSSunil Goutham 	/* Ring the doorbell and wait for result */
364a3581cdSSunil Goutham 	rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
374a3581cdSSunil Goutham 	while (result->compcode == NPA_AQ_COMP_NOTDONE) {
384a3581cdSSunil Goutham 		cpu_relax();
394a3581cdSSunil Goutham 		udelay(1);
404a3581cdSSunil Goutham 		timeout--;
414a3581cdSSunil Goutham 		if (!timeout)
424a3581cdSSunil Goutham 			return -EBUSY;
434a3581cdSSunil Goutham 	}
444a3581cdSSunil Goutham 
45*ea9dd2e5SSuman Ghosh 	if (result->compcode != NPA_AQ_COMP_GOOD) {
464a3581cdSSunil Goutham 		/* TODO: Replace this with some error code */
47*ea9dd2e5SSuman Ghosh 		if (result->compcode == NPA_AQ_COMP_CTX_FAULT ||
48*ea9dd2e5SSuman Ghosh 		    result->compcode == NPA_AQ_COMP_LOCKERR ||
49*ea9dd2e5SSuman Ghosh 		    result->compcode == NPA_AQ_COMP_CTX_POISON) {
50*ea9dd2e5SSuman Ghosh 			if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0))
51*ea9dd2e5SSuman Ghosh 				dev_err(rvu->dev,
52*ea9dd2e5SSuman Ghosh 					"%s: Not able to unlock cachelines\n", __func__);
53*ea9dd2e5SSuman Ghosh 		}
54*ea9dd2e5SSuman Ghosh 
554a3581cdSSunil Goutham 		return -EBUSY;
56*ea9dd2e5SSuman Ghosh 	}
574a3581cdSSunil Goutham 
584a3581cdSSunil Goutham 	return 0;
594a3581cdSSunil Goutham }
604a3581cdSSunil Goutham 
rvu_npa_aq_enq_inst(struct rvu * rvu,struct npa_aq_enq_req * req,struct npa_aq_enq_rsp * rsp)618756828aSChristina Jacob int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
624a3581cdSSunil Goutham 			struct npa_aq_enq_rsp *rsp)
634a3581cdSSunil Goutham {
644a3581cdSSunil Goutham 	struct rvu_hwinfo *hw = rvu->hw;
654a3581cdSSunil Goutham 	u16 pcifunc = req->hdr.pcifunc;
664a3581cdSSunil Goutham 	int blkaddr, npalf, rc = 0;
674a3581cdSSunil Goutham 	struct npa_aq_inst_s inst;
684a3581cdSSunil Goutham 	struct rvu_block *block;
694a3581cdSSunil Goutham 	struct admin_queue *aq;
704a3581cdSSunil Goutham 	struct rvu_pfvf *pfvf;
714a3581cdSSunil Goutham 	void *ctx, *mask;
7257856ddeSGeetha sowjanya 	bool ena;
734a3581cdSSunil Goutham 
744a3581cdSSunil Goutham 	pfvf = rvu_get_pfvf(rvu, pcifunc);
754a3581cdSSunil Goutham 	if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
764a3581cdSSunil Goutham 		return NPA_AF_ERR_AQ_ENQUEUE;
774a3581cdSSunil Goutham 
784a3581cdSSunil Goutham 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
794a3581cdSSunil Goutham 	if (!pfvf->npalf || blkaddr < 0)
804a3581cdSSunil Goutham 		return NPA_AF_ERR_AF_LF_INVALID;
814a3581cdSSunil Goutham 
824a3581cdSSunil Goutham 	block = &hw->block[blkaddr];
834a3581cdSSunil Goutham 	aq = block->aq;
844a3581cdSSunil Goutham 	if (!aq) {
854a3581cdSSunil Goutham 		dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
864a3581cdSSunil Goutham 		return NPA_AF_ERR_AQ_ENQUEUE;
874a3581cdSSunil Goutham 	}
884a3581cdSSunil Goutham 
894a3581cdSSunil Goutham 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
904a3581cdSSunil Goutham 	if (npalf < 0)
914a3581cdSSunil Goutham 		return NPA_AF_ERR_AF_LF_INVALID;
924a3581cdSSunil Goutham 
934a3581cdSSunil Goutham 	memset(&inst, 0, sizeof(struct npa_aq_inst_s));
944a3581cdSSunil Goutham 	inst.cindex = req->aura_id;
954a3581cdSSunil Goutham 	inst.lf = npalf;
964a3581cdSSunil Goutham 	inst.ctype = req->ctype;
974a3581cdSSunil Goutham 	inst.op = req->op;
984a3581cdSSunil Goutham 	/* Currently we are not supporting enqueuing multiple instructions,
994a3581cdSSunil Goutham 	 * so always choose first entry in result memory.
1004a3581cdSSunil Goutham 	 */
1014a3581cdSSunil Goutham 	inst.res_addr = (u64)aq->res->iova;
1024a3581cdSSunil Goutham 
10327150bc4SGeetha sowjanya 	/* Hardware uses same aq->res->base for updating result of
10427150bc4SGeetha sowjanya 	 * previous instruction hence wait here till it is done.
10527150bc4SGeetha sowjanya 	 */
10627150bc4SGeetha sowjanya 	spin_lock(&aq->lock);
10727150bc4SGeetha sowjanya 
1084a3581cdSSunil Goutham 	/* Clean result + context memory */
1094a3581cdSSunil Goutham 	memset(aq->res->base, 0, aq->res->entry_sz);
1104a3581cdSSunil Goutham 	/* Context needs to be written at RES_ADDR + 128 */
1114a3581cdSSunil Goutham 	ctx = aq->res->base + 128;
1124a3581cdSSunil Goutham 	/* Mask needs to be written at RES_ADDR + 256 */
1134a3581cdSSunil Goutham 	mask = aq->res->base + 256;
1144a3581cdSSunil Goutham 
1154a3581cdSSunil Goutham 	switch (req->op) {
1164a3581cdSSunil Goutham 	case NPA_AQ_INSTOP_WRITE:
1174a3581cdSSunil Goutham 		/* Copy context and write mask */
1184a3581cdSSunil Goutham 		if (req->ctype == NPA_AQ_CTYPE_AURA) {
1194a3581cdSSunil Goutham 			memcpy(mask, &req->aura_mask,
1204a3581cdSSunil Goutham 			       sizeof(struct npa_aura_s));
1214a3581cdSSunil Goutham 			memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
1224a3581cdSSunil Goutham 		} else {
1234a3581cdSSunil Goutham 			memcpy(mask, &req->pool_mask,
1244a3581cdSSunil Goutham 			       sizeof(struct npa_pool_s));
1254a3581cdSSunil Goutham 			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
1264a3581cdSSunil Goutham 		}
1274a3581cdSSunil Goutham 		break;
1284a3581cdSSunil Goutham 	case NPA_AQ_INSTOP_INIT:
1294a3581cdSSunil Goutham 		if (req->ctype == NPA_AQ_CTYPE_AURA) {
1304a3581cdSSunil Goutham 			if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
1314a3581cdSSunil Goutham 				rc = NPA_AF_ERR_AQ_FULL;
1324a3581cdSSunil Goutham 				break;
1334a3581cdSSunil Goutham 			}
1344a3581cdSSunil Goutham 			/* Set pool's context address */
1354a3581cdSSunil Goutham 			req->aura.pool_addr = pfvf->pool_ctx->iova +
1364a3581cdSSunil Goutham 			(req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
1374a3581cdSSunil Goutham 			memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
1384a3581cdSSunil Goutham 		} else { /* POOL's context */
1394a3581cdSSunil Goutham 			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
1404a3581cdSSunil Goutham 		}
1414a3581cdSSunil Goutham 		break;
1424a3581cdSSunil Goutham 	case NPA_AQ_INSTOP_NOP:
1434a3581cdSSunil Goutham 	case NPA_AQ_INSTOP_READ:
1444a3581cdSSunil Goutham 	case NPA_AQ_INSTOP_LOCK:
1454a3581cdSSunil Goutham 	case NPA_AQ_INSTOP_UNLOCK:
1464a3581cdSSunil Goutham 		break;
1474a3581cdSSunil Goutham 	default:
1484a3581cdSSunil Goutham 		rc = NPA_AF_ERR_AQ_FULL;
1494a3581cdSSunil Goutham 		break;
1504a3581cdSSunil Goutham 	}
1514a3581cdSSunil Goutham 
15227150bc4SGeetha sowjanya 	if (rc) {
15327150bc4SGeetha sowjanya 		spin_unlock(&aq->lock);
1544a3581cdSSunil Goutham 		return rc;
15527150bc4SGeetha sowjanya 	}
1564a3581cdSSunil Goutham 
1574a3581cdSSunil Goutham 	/* Submit the instruction to AQ */
1584a3581cdSSunil Goutham 	rc = npa_aq_enqueue_wait(rvu, block, &inst);
1594a3581cdSSunil Goutham 	if (rc) {
1604a3581cdSSunil Goutham 		spin_unlock(&aq->lock);
1614a3581cdSSunil Goutham 		return rc;
1624a3581cdSSunil Goutham 	}
1634a3581cdSSunil Goutham 
16457856ddeSGeetha sowjanya 	/* Set aura bitmap if aura hw context is enabled */
16557856ddeSGeetha sowjanya 	if (req->ctype == NPA_AQ_CTYPE_AURA) {
16657856ddeSGeetha sowjanya 		if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
16757856ddeSGeetha sowjanya 			__set_bit(req->aura_id, pfvf->aura_bmap);
16857856ddeSGeetha sowjanya 		if (req->op == NPA_AQ_INSTOP_WRITE) {
16957856ddeSGeetha sowjanya 			ena = (req->aura.ena & req->aura_mask.ena) |
17057856ddeSGeetha sowjanya 				(test_bit(req->aura_id, pfvf->aura_bmap) &
17157856ddeSGeetha sowjanya 				~req->aura_mask.ena);
17257856ddeSGeetha sowjanya 			if (ena)
17357856ddeSGeetha sowjanya 				__set_bit(req->aura_id, pfvf->aura_bmap);
17457856ddeSGeetha sowjanya 			else
17557856ddeSGeetha sowjanya 				__clear_bit(req->aura_id, pfvf->aura_bmap);
17657856ddeSGeetha sowjanya 		}
17757856ddeSGeetha sowjanya 	}
17857856ddeSGeetha sowjanya 
17957856ddeSGeetha sowjanya 	/* Set pool bitmap if pool hw context is enabled */
18057856ddeSGeetha sowjanya 	if (req->ctype == NPA_AQ_CTYPE_POOL) {
18157856ddeSGeetha sowjanya 		if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
18257856ddeSGeetha sowjanya 			__set_bit(req->aura_id, pfvf->pool_bmap);
18357856ddeSGeetha sowjanya 		if (req->op == NPA_AQ_INSTOP_WRITE) {
18457856ddeSGeetha sowjanya 			ena = (req->pool.ena & req->pool_mask.ena) |
18557856ddeSGeetha sowjanya 				(test_bit(req->aura_id, pfvf->pool_bmap) &
18657856ddeSGeetha sowjanya 				~req->pool_mask.ena);
18757856ddeSGeetha sowjanya 			if (ena)
18857856ddeSGeetha sowjanya 				__set_bit(req->aura_id, pfvf->pool_bmap);
18957856ddeSGeetha sowjanya 			else
19057856ddeSGeetha sowjanya 				__clear_bit(req->aura_id, pfvf->pool_bmap);
19157856ddeSGeetha sowjanya 		}
19257856ddeSGeetha sowjanya 	}
1934a3581cdSSunil Goutham 	spin_unlock(&aq->lock);
1944a3581cdSSunil Goutham 
1954a3581cdSSunil Goutham 	if (rsp) {
1964a3581cdSSunil Goutham 		/* Copy read context into mailbox */
1974a3581cdSSunil Goutham 		if (req->op == NPA_AQ_INSTOP_READ) {
1984a3581cdSSunil Goutham 			if (req->ctype == NPA_AQ_CTYPE_AURA)
1994a3581cdSSunil Goutham 				memcpy(&rsp->aura, ctx,
2004a3581cdSSunil Goutham 				       sizeof(struct npa_aura_s));
2014a3581cdSSunil Goutham 			else
2024a3581cdSSunil Goutham 				memcpy(&rsp->pool, ctx,
2034a3581cdSSunil Goutham 				       sizeof(struct npa_pool_s));
2044a3581cdSSunil Goutham 		}
2054a3581cdSSunil Goutham 	}
2064a3581cdSSunil Goutham 
2074a3581cdSSunil Goutham 	return 0;
2084a3581cdSSunil Goutham }
2094a3581cdSSunil Goutham 
npa_lf_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req)21057856ddeSGeetha sowjanya static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
21157856ddeSGeetha sowjanya {
21257856ddeSGeetha sowjanya 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
21357856ddeSGeetha sowjanya 	struct npa_aq_enq_req aq_req;
21457856ddeSGeetha sowjanya 	unsigned long *bmap;
21557856ddeSGeetha sowjanya 	int id, cnt = 0;
21657856ddeSGeetha sowjanya 	int err = 0, rc;
21757856ddeSGeetha sowjanya 
21857856ddeSGeetha sowjanya 	if (!pfvf->pool_ctx || !pfvf->aura_ctx)
21957856ddeSGeetha sowjanya 		return NPA_AF_ERR_AQ_ENQUEUE;
22057856ddeSGeetha sowjanya 
22157856ddeSGeetha sowjanya 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
22257856ddeSGeetha sowjanya 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
22357856ddeSGeetha sowjanya 
22457856ddeSGeetha sowjanya 	if (req->ctype == NPA_AQ_CTYPE_POOL) {
22557856ddeSGeetha sowjanya 		aq_req.pool.ena = 0;
22657856ddeSGeetha sowjanya 		aq_req.pool_mask.ena = 1;
22757856ddeSGeetha sowjanya 		cnt = pfvf->pool_ctx->qsize;
22857856ddeSGeetha sowjanya 		bmap = pfvf->pool_bmap;
22957856ddeSGeetha sowjanya 	} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
23057856ddeSGeetha sowjanya 		aq_req.aura.ena = 0;
23157856ddeSGeetha sowjanya 		aq_req.aura_mask.ena = 1;
23227150bc4SGeetha sowjanya 		aq_req.aura.bp_ena = 0;
23327150bc4SGeetha sowjanya 		aq_req.aura_mask.bp_ena = 1;
23457856ddeSGeetha sowjanya 		cnt = pfvf->aura_ctx->qsize;
23557856ddeSGeetha sowjanya 		bmap = pfvf->aura_bmap;
23657856ddeSGeetha sowjanya 	}
23757856ddeSGeetha sowjanya 
23857856ddeSGeetha sowjanya 	aq_req.ctype = req->ctype;
23957856ddeSGeetha sowjanya 	aq_req.op = NPA_AQ_INSTOP_WRITE;
24057856ddeSGeetha sowjanya 
24157856ddeSGeetha sowjanya 	for (id = 0; id < cnt; id++) {
24257856ddeSGeetha sowjanya 		if (!test_bit(id, bmap))
24357856ddeSGeetha sowjanya 			continue;
24457856ddeSGeetha sowjanya 		aq_req.aura_id = id;
24557856ddeSGeetha sowjanya 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
24657856ddeSGeetha sowjanya 		if (rc) {
24757856ddeSGeetha sowjanya 			err = rc;
24857856ddeSGeetha sowjanya 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
24957856ddeSGeetha sowjanya 				(req->ctype == NPA_AQ_CTYPE_AURA) ?
25057856ddeSGeetha sowjanya 				"Aura" : "Pool", id);
25157856ddeSGeetha sowjanya 		}
25257856ddeSGeetha sowjanya 	}
25357856ddeSGeetha sowjanya 
25457856ddeSGeetha sowjanya 	return err;
25557856ddeSGeetha sowjanya }
25657856ddeSGeetha sowjanya 
257a0291766SSunil Goutham #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
npa_lf_hwctx_lockdown(struct rvu * rvu,struct npa_aq_enq_req * req)258a0291766SSunil Goutham static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
259a0291766SSunil Goutham {
260a0291766SSunil Goutham 	struct npa_aq_enq_req lock_ctx_req;
261a0291766SSunil Goutham 	int err;
262a0291766SSunil Goutham 
263a0291766SSunil Goutham 	if (req->op != NPA_AQ_INSTOP_INIT)
264a0291766SSunil Goutham 		return 0;
265a0291766SSunil Goutham 
266a0291766SSunil Goutham 	memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
267a0291766SSunil Goutham 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
268a0291766SSunil Goutham 	lock_ctx_req.ctype = req->ctype;
269a0291766SSunil Goutham 	lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
270a0291766SSunil Goutham 	lock_ctx_req.aura_id = req->aura_id;
271a0291766SSunil Goutham 	err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
272a0291766SSunil Goutham 	if (err)
273a0291766SSunil Goutham 		dev_err(rvu->dev,
274a0291766SSunil Goutham 			"PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
275a0291766SSunil Goutham 			req->hdr.pcifunc,
276a0291766SSunil Goutham 			(req->ctype == NPA_AQ_CTYPE_AURA) ?
277a0291766SSunil Goutham 			"Aura" : "Pool", req->aura_id);
278a0291766SSunil Goutham 	return err;
279a0291766SSunil Goutham }
280a0291766SSunil Goutham 
rvu_mbox_handler_npa_aq_enq(struct rvu * rvu,struct npa_aq_enq_req * req,struct npa_aq_enq_rsp * rsp)281a0291766SSunil Goutham int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
282a0291766SSunil Goutham 				struct npa_aq_enq_req *req,
283a0291766SSunil Goutham 				struct npa_aq_enq_rsp *rsp)
284a0291766SSunil Goutham {
285a0291766SSunil Goutham 	int err;
286a0291766SSunil Goutham 
287a0291766SSunil Goutham 	err = rvu_npa_aq_enq_inst(rvu, req, rsp);
288a0291766SSunil Goutham 	if (!err)
289a0291766SSunil Goutham 		err = npa_lf_hwctx_lockdown(rvu, req);
290a0291766SSunil Goutham 	return err;
291a0291766SSunil Goutham }
292a0291766SSunil Goutham #else
293a0291766SSunil Goutham 
rvu_mbox_handler_npa_aq_enq(struct rvu * rvu,struct npa_aq_enq_req * req,struct npa_aq_enq_rsp * rsp)294eac66686SSunil Goutham int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
2954a3581cdSSunil Goutham 				struct npa_aq_enq_req *req,
2964a3581cdSSunil Goutham 				struct npa_aq_enq_rsp *rsp)
2974a3581cdSSunil Goutham {
2984a3581cdSSunil Goutham 	return rvu_npa_aq_enq_inst(rvu, req, rsp);
2994a3581cdSSunil Goutham }
300a0291766SSunil Goutham #endif
3014a3581cdSSunil Goutham 
rvu_mbox_handler_npa_hwctx_disable(struct rvu * rvu,struct hwctx_disable_req * req,struct msg_rsp * rsp)302eac66686SSunil Goutham int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
30357856ddeSGeetha sowjanya 				       struct hwctx_disable_req *req,
30457856ddeSGeetha sowjanya 				       struct msg_rsp *rsp)
30557856ddeSGeetha sowjanya {
30657856ddeSGeetha sowjanya 	return npa_lf_hwctx_disable(rvu, req);
30757856ddeSGeetha sowjanya }
30857856ddeSGeetha sowjanya 
npa_ctx_free(struct rvu * rvu,struct rvu_pfvf * pfvf)3093fa4c323SSunil Goutham static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
3103fa4c323SSunil Goutham {
31157856ddeSGeetha sowjanya 	kfree(pfvf->aura_bmap);
31257856ddeSGeetha sowjanya 	pfvf->aura_bmap = NULL;
31357856ddeSGeetha sowjanya 
3143fa4c323SSunil Goutham 	qmem_free(rvu->dev, pfvf->aura_ctx);
3153fa4c323SSunil Goutham 	pfvf->aura_ctx = NULL;
3163fa4c323SSunil Goutham 
31757856ddeSGeetha sowjanya 	kfree(pfvf->pool_bmap);
31857856ddeSGeetha sowjanya 	pfvf->pool_bmap = NULL;
31957856ddeSGeetha sowjanya 
3203fa4c323SSunil Goutham 	qmem_free(rvu->dev, pfvf->pool_ctx);
3213fa4c323SSunil Goutham 	pfvf->pool_ctx = NULL;
3223fa4c323SSunil Goutham 
3233fa4c323SSunil Goutham 	qmem_free(rvu->dev, pfvf->npa_qints_ctx);
3243fa4c323SSunil Goutham 	pfvf->npa_qints_ctx = NULL;
3253fa4c323SSunil Goutham }
3263fa4c323SSunil Goutham 
rvu_mbox_handler_npa_lf_alloc(struct rvu * rvu,struct npa_lf_alloc_req * req,struct npa_lf_alloc_rsp * rsp)327eac66686SSunil Goutham int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
3283fa4c323SSunil Goutham 				  struct npa_lf_alloc_req *req,
3293fa4c323SSunil Goutham 				  struct npa_lf_alloc_rsp *rsp)
3303fa4c323SSunil Goutham {
3313fa4c323SSunil Goutham 	int npalf, qints, hwctx_size, err, rc = 0;
3323fa4c323SSunil Goutham 	struct rvu_hwinfo *hw = rvu->hw;
3333fa4c323SSunil Goutham 	u16 pcifunc = req->hdr.pcifunc;
3343fa4c323SSunil Goutham 	struct rvu_block *block;
3353fa4c323SSunil Goutham 	struct rvu_pfvf *pfvf;
3363fa4c323SSunil Goutham 	u64 cfg, ctx_cfg;
3373fa4c323SSunil Goutham 	int blkaddr;
3383fa4c323SSunil Goutham 
3393fa4c323SSunil Goutham 	if (req->aura_sz > NPA_AURA_SZ_MAX ||
3403fa4c323SSunil Goutham 	    req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
3413fa4c323SSunil Goutham 		return NPA_AF_ERR_PARAM;
3423fa4c323SSunil Goutham 
343ee1e7591SGeetha sowjanya 	if (req->way_mask)
344ee1e7591SGeetha sowjanya 		req->way_mask &= 0xFFFF;
345ee1e7591SGeetha sowjanya 
3463fa4c323SSunil Goutham 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3473fa4c323SSunil Goutham 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
3483fa4c323SSunil Goutham 	if (!pfvf->npalf || blkaddr < 0)
3493fa4c323SSunil Goutham 		return NPA_AF_ERR_AF_LF_INVALID;
3503fa4c323SSunil Goutham 
3513fa4c323SSunil Goutham 	block = &hw->block[blkaddr];
3523fa4c323SSunil Goutham 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
3533fa4c323SSunil Goutham 	if (npalf < 0)
3543fa4c323SSunil Goutham 		return NPA_AF_ERR_AF_LF_INVALID;
3553fa4c323SSunil Goutham 
3563fa4c323SSunil Goutham 	/* Reset this NPA LF */
3573fa4c323SSunil Goutham 	err = rvu_lf_reset(rvu, block, npalf);
3583fa4c323SSunil Goutham 	if (err) {
3593fa4c323SSunil Goutham 		dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
3603fa4c323SSunil Goutham 		return NPA_AF_ERR_LF_RESET;
3613fa4c323SSunil Goutham 	}
3623fa4c323SSunil Goutham 
3633fa4c323SSunil Goutham 	ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
3643fa4c323SSunil Goutham 
3653fa4c323SSunil Goutham 	/* Alloc memory for aura HW contexts */
3663fa4c323SSunil Goutham 	hwctx_size = 1UL << (ctx_cfg & 0xF);
3673fa4c323SSunil Goutham 	err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
3683fa4c323SSunil Goutham 			 NPA_AURA_COUNT(req->aura_sz), hwctx_size);
3693fa4c323SSunil Goutham 	if (err)
3703fa4c323SSunil Goutham 		goto free_mem;
3713fa4c323SSunil Goutham 
37257856ddeSGeetha sowjanya 	pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
37357856ddeSGeetha sowjanya 				  GFP_KERNEL);
37457856ddeSGeetha sowjanya 	if (!pfvf->aura_bmap)
37557856ddeSGeetha sowjanya 		goto free_mem;
37657856ddeSGeetha sowjanya 
3773fa4c323SSunil Goutham 	/* Alloc memory for pool HW contexts */
3783fa4c323SSunil Goutham 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
3793fa4c323SSunil Goutham 	err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
3803fa4c323SSunil Goutham 	if (err)
3813fa4c323SSunil Goutham 		goto free_mem;
3823fa4c323SSunil Goutham 
38357856ddeSGeetha sowjanya 	pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
38457856ddeSGeetha sowjanya 				  GFP_KERNEL);
38557856ddeSGeetha sowjanya 	if (!pfvf->pool_bmap)
38657856ddeSGeetha sowjanya 		goto free_mem;
38757856ddeSGeetha sowjanya 
3883fa4c323SSunil Goutham 	/* Get no of queue interrupts supported */
3893fa4c323SSunil Goutham 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
3903fa4c323SSunil Goutham 	qints = (cfg >> 28) & 0xFFF;
3913fa4c323SSunil Goutham 
3923fa4c323SSunil Goutham 	/* Alloc memory for Qints HW contexts */
3933fa4c323SSunil Goutham 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
3943fa4c323SSunil Goutham 	err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
3953fa4c323SSunil Goutham 	if (err)
3963fa4c323SSunil Goutham 		goto free_mem;
3973fa4c323SSunil Goutham 
3983fa4c323SSunil Goutham 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
3993fa4c323SSunil Goutham 	/* Clear way partition mask and set aura offset to '0' */
4003fa4c323SSunil Goutham 	cfg &= ~(BIT_ULL(34) - 1);
4013fa4c323SSunil Goutham 	/* Set aura size & enable caching of contexts */
402ee1e7591SGeetha sowjanya 	cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
403ee1e7591SGeetha sowjanya 
4043fa4c323SSunil Goutham 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
4053fa4c323SSunil Goutham 
4063fa4c323SSunil Goutham 	/* Configure aura HW context's base */
4073fa4c323SSunil Goutham 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
4083fa4c323SSunil Goutham 		    (u64)pfvf->aura_ctx->iova);
4093fa4c323SSunil Goutham 
4103fa4c323SSunil Goutham 	/* Enable caching of qints hw context */
411ee1e7591SGeetha sowjanya 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
412ee1e7591SGeetha sowjanya 		    BIT_ULL(36) | req->way_mask << 20);
4133fa4c323SSunil Goutham 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
4143fa4c323SSunil Goutham 		    (u64)pfvf->npa_qints_ctx->iova);
4153fa4c323SSunil Goutham 
4163fa4c323SSunil Goutham 	goto exit;
4173fa4c323SSunil Goutham 
4183fa4c323SSunil Goutham free_mem:
4193fa4c323SSunil Goutham 	npa_ctx_free(rvu, pfvf);
4203fa4c323SSunil Goutham 	rc = -ENOMEM;
4213fa4c323SSunil Goutham 
4223fa4c323SSunil Goutham exit:
4233fa4c323SSunil Goutham 	/* set stack page info */
4243fa4c323SSunil Goutham 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
4253fa4c323SSunil Goutham 	rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
4263fa4c323SSunil Goutham 	rsp->stack_pg_bytes = cfg & 0xFF;
4273fa4c323SSunil Goutham 	rsp->qints = (cfg >> 28) & 0xFFF;
428ae2c341eSGeetha sowjanya 	if (!is_rvu_otx2(rvu)) {
429ae2c341eSGeetha sowjanya 		cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
430ae2c341eSGeetha sowjanya 		rsp->cache_lines = (cfg >> 1) & 0x3F;
431ae2c341eSGeetha sowjanya 	}
4323fa4c323SSunil Goutham 	return rc;
4333fa4c323SSunil Goutham }
4343fa4c323SSunil Goutham 
rvu_mbox_handler_npa_lf_free(struct rvu * rvu,struct msg_req * req,struct msg_rsp * rsp)435eac66686SSunil Goutham int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
4363fa4c323SSunil Goutham 				 struct msg_rsp *rsp)
4373fa4c323SSunil Goutham {
4383fa4c323SSunil Goutham 	struct rvu_hwinfo *hw = rvu->hw;
4393fa4c323SSunil Goutham 	u16 pcifunc = req->hdr.pcifunc;
4403fa4c323SSunil Goutham 	struct rvu_block *block;
4413fa4c323SSunil Goutham 	struct rvu_pfvf *pfvf;
4423fa4c323SSunil Goutham 	int npalf, err;
4433fa4c323SSunil Goutham 	int blkaddr;
4443fa4c323SSunil Goutham 
4453fa4c323SSunil Goutham 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4463fa4c323SSunil Goutham 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
4473fa4c323SSunil Goutham 	if (!pfvf->npalf || blkaddr < 0)
4483fa4c323SSunil Goutham 		return NPA_AF_ERR_AF_LF_INVALID;
4493fa4c323SSunil Goutham 
4503fa4c323SSunil Goutham 	block = &hw->block[blkaddr];
4513fa4c323SSunil Goutham 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
4523fa4c323SSunil Goutham 	if (npalf < 0)
4533fa4c323SSunil Goutham 		return NPA_AF_ERR_AF_LF_INVALID;
4543fa4c323SSunil Goutham 
4553fa4c323SSunil Goutham 	/* Reset this NPA LF */
4563fa4c323SSunil Goutham 	err = rvu_lf_reset(rvu, block, npalf);
4573fa4c323SSunil Goutham 	if (err) {
4583fa4c323SSunil Goutham 		dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
4593fa4c323SSunil Goutham 		return NPA_AF_ERR_LF_RESET;
4603fa4c323SSunil Goutham 	}
4613fa4c323SSunil Goutham 
4623fa4c323SSunil Goutham 	npa_ctx_free(rvu, pfvf);
4633fa4c323SSunil Goutham 
4643fa4c323SSunil Goutham 	return 0;
4653fa4c323SSunil Goutham }
4663fa4c323SSunil Goutham 
npa_aq_init(struct rvu * rvu,struct rvu_block * block)4677a37245eSSunil Goutham static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
4687a37245eSSunil Goutham {
4697a37245eSSunil Goutham 	u64 cfg;
4707a37245eSSunil Goutham 	int err;
4717a37245eSSunil Goutham 
4727a37245eSSunil Goutham 	/* Set admin queue endianness */
4737a37245eSSunil Goutham 	cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
4747a37245eSSunil Goutham #ifdef __BIG_ENDIAN
4757a37245eSSunil Goutham 	cfg |= BIT_ULL(1);
4767a37245eSSunil Goutham 	rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
4777a37245eSSunil Goutham #else
4787a37245eSSunil Goutham 	cfg &= ~BIT_ULL(1);
4797a37245eSSunil Goutham 	rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
4807a37245eSSunil Goutham #endif
4817a37245eSSunil Goutham 
4827a37245eSSunil Goutham 	/* Do not bypass NDC cache */
4837a37245eSSunil Goutham 	cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
4847a37245eSSunil Goutham 	cfg &= ~0x03DULL;
485a0291766SSunil Goutham #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
486a0291766SSunil Goutham 	/* Disable caching of stack pages */
487a0291766SSunil Goutham 	cfg |= 0x10ULL;
488a0291766SSunil Goutham #endif
4897a37245eSSunil Goutham 	rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
4907a37245eSSunil Goutham 
491ae2c341eSGeetha sowjanya 	/* For CN10K NPA BATCH DMA set 35 cache lines */
492ae2c341eSGeetha sowjanya 	if (!is_rvu_otx2(rvu)) {
493ae2c341eSGeetha sowjanya 		cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL);
494ae2c341eSGeetha sowjanya 		cfg &= ~0x7EULL;
495ae2c341eSGeetha sowjanya 		cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1);
496ae2c341eSGeetha sowjanya 		rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg);
497ae2c341eSGeetha sowjanya 	}
4987a37245eSSunil Goutham 	/* Result structure can be followed by Aura/Pool context at
4997a37245eSSunil Goutham 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
5007a37245eSSunil Goutham 	 * operation type. Alloc sufficient result memory for all operations.
5017a37245eSSunil Goutham 	 */
5027a37245eSSunil Goutham 	err = rvu_aq_alloc(rvu, &block->aq,
5037a37245eSSunil Goutham 			   Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
5047a37245eSSunil Goutham 			   ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
5057a37245eSSunil Goutham 	if (err)
5067a37245eSSunil Goutham 		return err;
5077a37245eSSunil Goutham 
5087a37245eSSunil Goutham 	rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
5097a37245eSSunil Goutham 	rvu_write64(rvu, block->addr,
5107a37245eSSunil Goutham 		    NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
5117a37245eSSunil Goutham 	return 0;
5127a37245eSSunil Goutham }
5137a37245eSSunil Goutham 
rvu_npa_init(struct rvu * rvu)5147a37245eSSunil Goutham int rvu_npa_init(struct rvu *rvu)
5157a37245eSSunil Goutham {
5167a37245eSSunil Goutham 	struct rvu_hwinfo *hw = rvu->hw;
5178e3bf53cSZheng Yongjun 	int blkaddr;
5187a37245eSSunil Goutham 
5197a37245eSSunil Goutham 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
5207a37245eSSunil Goutham 	if (blkaddr < 0)
5217a37245eSSunil Goutham 		return 0;
5227a37245eSSunil Goutham 
5237a37245eSSunil Goutham 	/* Initialize admin queue */
5248e3bf53cSZheng Yongjun 	return npa_aq_init(rvu, &hw->block[blkaddr]);
5257a37245eSSunil Goutham }
5267a37245eSSunil Goutham 
rvu_npa_freemem(struct rvu * rvu)5277a37245eSSunil Goutham void rvu_npa_freemem(struct rvu *rvu)
5287a37245eSSunil Goutham {
5297a37245eSSunil Goutham 	struct rvu_hwinfo *hw = rvu->hw;
5307a37245eSSunil Goutham 	struct rvu_block *block;
5313fa4c323SSunil Goutham 	int blkaddr;
5327a37245eSSunil Goutham 
5337a37245eSSunil Goutham 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
5347a37245eSSunil Goutham 	if (blkaddr < 0)
5357a37245eSSunil Goutham 		return;
5367a37245eSSunil Goutham 
5377a37245eSSunil Goutham 	block = &hw->block[blkaddr];
5383fa4c323SSunil Goutham 	rvu_aq_free(rvu, block->aq);
5397a37245eSSunil Goutham }
540c554f9c1SGeetha sowjanya 
rvu_npa_lf_teardown(struct rvu * rvu,u16 pcifunc,int npalf)541c554f9c1SGeetha sowjanya void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
542c554f9c1SGeetha sowjanya {
543c554f9c1SGeetha sowjanya 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
544c554f9c1SGeetha sowjanya 	struct hwctx_disable_req ctx_req;
545c554f9c1SGeetha sowjanya 
546c554f9c1SGeetha sowjanya 	/* Disable all pools */
547c554f9c1SGeetha sowjanya 	ctx_req.hdr.pcifunc = pcifunc;
548c554f9c1SGeetha sowjanya 	ctx_req.ctype = NPA_AQ_CTYPE_POOL;
549c554f9c1SGeetha sowjanya 	npa_lf_hwctx_disable(rvu, &ctx_req);
550c554f9c1SGeetha sowjanya 
551c554f9c1SGeetha sowjanya 	/* Disable all auras */
552c554f9c1SGeetha sowjanya 	ctx_req.ctype = NPA_AQ_CTYPE_AURA;
553c554f9c1SGeetha sowjanya 	npa_lf_hwctx_disable(rvu, &ctx_req);
554c554f9c1SGeetha sowjanya 
555c554f9c1SGeetha sowjanya 	npa_ctx_free(rvu, pfvf);
556c554f9c1SGeetha sowjanya }
557*ea9dd2e5SSuman Ghosh 
558*ea9dd2e5SSuman Ghosh /* Due to an Hardware errata, in some corner cases, AQ context lock
559*ea9dd2e5SSuman Ghosh  * operations can result in a NDC way getting into an illegal state
560*ea9dd2e5SSuman Ghosh  * of not valid but locked.
561*ea9dd2e5SSuman Ghosh  *
562*ea9dd2e5SSuman Ghosh  * This API solves the problem by clearing the lock bit of the NDC block.
563*ea9dd2e5SSuman Ghosh  * The operation needs to be done for each line of all the NDC banks.
564*ea9dd2e5SSuman Ghosh  */
rvu_ndc_fix_locked_cacheline(struct rvu * rvu,int blkaddr)565*ea9dd2e5SSuman Ghosh int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
566*ea9dd2e5SSuman Ghosh {
567*ea9dd2e5SSuman Ghosh 	int bank, max_bank, line, max_line, err;
568*ea9dd2e5SSuman Ghosh 	u64 reg, ndc_af_const;
569*ea9dd2e5SSuman Ghosh 
570*ea9dd2e5SSuman Ghosh 	/* Set the ENABLE bit(63) to '0' */
571*ea9dd2e5SSuman Ghosh 	reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
572*ea9dd2e5SSuman Ghosh 	rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
573*ea9dd2e5SSuman Ghosh 
574*ea9dd2e5SSuman Ghosh 	/* Poll until the BUSY bits(47:32) are set to '0' */
575*ea9dd2e5SSuman Ghosh 	err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true);
576*ea9dd2e5SSuman Ghosh 	if (err) {
577*ea9dd2e5SSuman Ghosh 		dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n");
578*ea9dd2e5SSuman Ghosh 		return err;
579*ea9dd2e5SSuman Ghosh 	}
580*ea9dd2e5SSuman Ghosh 
581*ea9dd2e5SSuman Ghosh 	ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST);
582*ea9dd2e5SSuman Ghosh 	max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
583*ea9dd2e5SSuman Ghosh 	max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const);
584*ea9dd2e5SSuman Ghosh 	for (bank = 0; bank < max_bank; bank++) {
585*ea9dd2e5SSuman Ghosh 		for (line = 0; line < max_line; line++) {
586*ea9dd2e5SSuman Ghosh 			/* Check if 'cache line valid bit(63)' is not set
587*ea9dd2e5SSuman Ghosh 			 * but 'cache line lock bit(60)' is set and on
588*ea9dd2e5SSuman Ghosh 			 * success, reset the lock bit(60).
589*ea9dd2e5SSuman Ghosh 			 */
590*ea9dd2e5SSuman Ghosh 			reg = rvu_read64(rvu, blkaddr,
591*ea9dd2e5SSuman Ghosh 					 NDC_AF_BANKX_LINEX_METADATA(bank, line));
592*ea9dd2e5SSuman Ghosh 			if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) {
593*ea9dd2e5SSuman Ghosh 				rvu_write64(rvu, blkaddr,
594*ea9dd2e5SSuman Ghosh 					    NDC_AF_BANKX_LINEX_METADATA(bank, line),
595*ea9dd2e5SSuman Ghosh 					    reg & ~BIT_ULL(60));
596*ea9dd2e5SSuman Ghosh 			}
597*ea9dd2e5SSuman Ghosh 		}
598*ea9dd2e5SSuman Ghosh 	}
599*ea9dd2e5SSuman Ghosh 
600*ea9dd2e5SSuman Ghosh 	return 0;
601*ea9dd2e5SSuman Ghosh }
602