17a37245eSSunil Goutham // SPDX-License-Identifier: GPL-2.0
27a37245eSSunil Goutham /* Marvell OcteonTx2 RVU Admin Function driver
37a37245eSSunil Goutham  *
47a37245eSSunil Goutham  * Copyright (C) 2018 Marvell International Ltd.
57a37245eSSunil Goutham  *
67a37245eSSunil Goutham  * This program is free software; you can redistribute it and/or modify
77a37245eSSunil Goutham  * it under the terms of the GNU General Public License version 2 as
87a37245eSSunil Goutham  * published by the Free Software Foundation.
97a37245eSSunil Goutham  */
107a37245eSSunil Goutham 
117a37245eSSunil Goutham #include <linux/module.h>
127a37245eSSunil Goutham #include <linux/pci.h>
137a37245eSSunil Goutham 
147a37245eSSunil Goutham #include "rvu_struct.h"
157a37245eSSunil Goutham #include "rvu_reg.h"
167a37245eSSunil Goutham #include "rvu.h"
177a37245eSSunil Goutham 
184a3581cdSSunil Goutham static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
194a3581cdSSunil Goutham 			       struct npa_aq_inst_s *inst)
204a3581cdSSunil Goutham {
214a3581cdSSunil Goutham 	struct admin_queue *aq = block->aq;
224a3581cdSSunil Goutham 	struct npa_aq_res_s *result;
234a3581cdSSunil Goutham 	int timeout = 1000;
244a3581cdSSunil Goutham 	u64 reg, head;
254a3581cdSSunil Goutham 
264a3581cdSSunil Goutham 	result = (struct npa_aq_res_s *)aq->res->base;
274a3581cdSSunil Goutham 
284a3581cdSSunil Goutham 	/* Get current head pointer where to append this instruction */
294a3581cdSSunil Goutham 	reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
304a3581cdSSunil Goutham 	head = (reg >> 4) & AQ_PTR_MASK;
314a3581cdSSunil Goutham 
324a3581cdSSunil Goutham 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
334a3581cdSSunil Goutham 	       (void *)inst, aq->inst->entry_sz);
344a3581cdSSunil Goutham 	memset(result, 0, sizeof(*result));
354a3581cdSSunil Goutham 	/* sync into memory */
364a3581cdSSunil Goutham 	wmb();
374a3581cdSSunil Goutham 
384a3581cdSSunil Goutham 	/* Ring the doorbell and wait for result */
394a3581cdSSunil Goutham 	rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
404a3581cdSSunil Goutham 	while (result->compcode == NPA_AQ_COMP_NOTDONE) {
414a3581cdSSunil Goutham 		cpu_relax();
424a3581cdSSunil Goutham 		udelay(1);
434a3581cdSSunil Goutham 		timeout--;
444a3581cdSSunil Goutham 		if (!timeout)
454a3581cdSSunil Goutham 			return -EBUSY;
464a3581cdSSunil Goutham 	}
474a3581cdSSunil Goutham 
484a3581cdSSunil Goutham 	if (result->compcode != NPA_AQ_COMP_GOOD)
494a3581cdSSunil Goutham 		/* TODO: Replace this with some error code */
504a3581cdSSunil Goutham 		return -EBUSY;
514a3581cdSSunil Goutham 
524a3581cdSSunil Goutham 	return 0;
534a3581cdSSunil Goutham }
544a3581cdSSunil Goutham 
554a3581cdSSunil Goutham static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
564a3581cdSSunil Goutham 			       struct npa_aq_enq_rsp *rsp)
574a3581cdSSunil Goutham {
584a3581cdSSunil Goutham 	struct rvu_hwinfo *hw = rvu->hw;
594a3581cdSSunil Goutham 	u16 pcifunc = req->hdr.pcifunc;
604a3581cdSSunil Goutham 	int blkaddr, npalf, rc = 0;
614a3581cdSSunil Goutham 	struct npa_aq_inst_s inst;
624a3581cdSSunil Goutham 	struct rvu_block *block;
634a3581cdSSunil Goutham 	struct admin_queue *aq;
644a3581cdSSunil Goutham 	struct rvu_pfvf *pfvf;
654a3581cdSSunil Goutham 	void *ctx, *mask;
6657856ddeSGeetha sowjanya 	bool ena;
674a3581cdSSunil Goutham 
684a3581cdSSunil Goutham 	pfvf = rvu_get_pfvf(rvu, pcifunc);
694a3581cdSSunil Goutham 	if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
704a3581cdSSunil Goutham 		return NPA_AF_ERR_AQ_ENQUEUE;
714a3581cdSSunil Goutham 
724a3581cdSSunil Goutham 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
734a3581cdSSunil Goutham 	if (!pfvf->npalf || blkaddr < 0)
744a3581cdSSunil Goutham 		return NPA_AF_ERR_AF_LF_INVALID;
754a3581cdSSunil Goutham 
764a3581cdSSunil Goutham 	block = &hw->block[blkaddr];
774a3581cdSSunil Goutham 	aq = block->aq;
784a3581cdSSunil Goutham 	if (!aq) {
794a3581cdSSunil Goutham 		dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
804a3581cdSSunil Goutham 		return NPA_AF_ERR_AQ_ENQUEUE;
814a3581cdSSunil Goutham 	}
824a3581cdSSunil Goutham 
834a3581cdSSunil Goutham 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
844a3581cdSSunil Goutham 	if (npalf < 0)
854a3581cdSSunil Goutham 		return NPA_AF_ERR_AF_LF_INVALID;
864a3581cdSSunil Goutham 
874a3581cdSSunil Goutham 	memset(&inst, 0, sizeof(struct npa_aq_inst_s));
884a3581cdSSunil Goutham 	inst.cindex = req->aura_id;
894a3581cdSSunil Goutham 	inst.lf = npalf;
904a3581cdSSunil Goutham 	inst.ctype = req->ctype;
914a3581cdSSunil Goutham 	inst.op = req->op;
924a3581cdSSunil Goutham 	/* Currently we are not supporting enqueuing multiple instructions,
934a3581cdSSunil Goutham 	 * so always choose first entry in result memory.
944a3581cdSSunil Goutham 	 */
954a3581cdSSunil Goutham 	inst.res_addr = (u64)aq->res->iova;
964a3581cdSSunil Goutham 
974a3581cdSSunil Goutham 	/* Clean result + context memory */
984a3581cdSSunil Goutham 	memset(aq->res->base, 0, aq->res->entry_sz);
994a3581cdSSunil Goutham 	/* Context needs to be written at RES_ADDR + 128 */
1004a3581cdSSunil Goutham 	ctx = aq->res->base + 128;
1014a3581cdSSunil Goutham 	/* Mask needs to be written at RES_ADDR + 256 */
1024a3581cdSSunil Goutham 	mask = aq->res->base + 256;
1034a3581cdSSunil Goutham 
1044a3581cdSSunil Goutham 	switch (req->op) {
1054a3581cdSSunil Goutham 	case NPA_AQ_INSTOP_WRITE:
1064a3581cdSSunil Goutham 		/* Copy context and write mask */
1074a3581cdSSunil Goutham 		if (req->ctype == NPA_AQ_CTYPE_AURA) {
1084a3581cdSSunil Goutham 			memcpy(mask, &req->aura_mask,
1094a3581cdSSunil Goutham 			       sizeof(struct npa_aura_s));
1104a3581cdSSunil Goutham 			memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
1114a3581cdSSunil Goutham 		} else {
1124a3581cdSSunil Goutham 			memcpy(mask, &req->pool_mask,
1134a3581cdSSunil Goutham 			       sizeof(struct npa_pool_s));
1144a3581cdSSunil Goutham 			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
1154a3581cdSSunil Goutham 		}
1164a3581cdSSunil Goutham 		break;
1174a3581cdSSunil Goutham 	case NPA_AQ_INSTOP_INIT:
1184a3581cdSSunil Goutham 		if (req->ctype == NPA_AQ_CTYPE_AURA) {
1194a3581cdSSunil Goutham 			if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
1204a3581cdSSunil Goutham 				rc = NPA_AF_ERR_AQ_FULL;
1214a3581cdSSunil Goutham 				break;
1224a3581cdSSunil Goutham 			}
1234a3581cdSSunil Goutham 			/* Set pool's context address */
1244a3581cdSSunil Goutham 			req->aura.pool_addr = pfvf->pool_ctx->iova +
1254a3581cdSSunil Goutham 			(req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
1264a3581cdSSunil Goutham 			memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
1274a3581cdSSunil Goutham 		} else { /* POOL's context */
1284a3581cdSSunil Goutham 			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
1294a3581cdSSunil Goutham 		}
1304a3581cdSSunil Goutham 		break;
1314a3581cdSSunil Goutham 	case NPA_AQ_INSTOP_NOP:
1324a3581cdSSunil Goutham 	case NPA_AQ_INSTOP_READ:
1334a3581cdSSunil Goutham 	case NPA_AQ_INSTOP_LOCK:
1344a3581cdSSunil Goutham 	case NPA_AQ_INSTOP_UNLOCK:
1354a3581cdSSunil Goutham 		break;
1364a3581cdSSunil Goutham 	default:
1374a3581cdSSunil Goutham 		rc = NPA_AF_ERR_AQ_FULL;
1384a3581cdSSunil Goutham 		break;
1394a3581cdSSunil Goutham 	}
1404a3581cdSSunil Goutham 
1414a3581cdSSunil Goutham 	if (rc)
1424a3581cdSSunil Goutham 		return rc;
1434a3581cdSSunil Goutham 
1444a3581cdSSunil Goutham 	spin_lock(&aq->lock);
1454a3581cdSSunil Goutham 
1464a3581cdSSunil Goutham 	/* Submit the instruction to AQ */
1474a3581cdSSunil Goutham 	rc = npa_aq_enqueue_wait(rvu, block, &inst);
1484a3581cdSSunil Goutham 	if (rc) {
1494a3581cdSSunil Goutham 		spin_unlock(&aq->lock);
1504a3581cdSSunil Goutham 		return rc;
1514a3581cdSSunil Goutham 	}
1524a3581cdSSunil Goutham 
15357856ddeSGeetha sowjanya 	/* Set aura bitmap if aura hw context is enabled */
15457856ddeSGeetha sowjanya 	if (req->ctype == NPA_AQ_CTYPE_AURA) {
15557856ddeSGeetha sowjanya 		if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
15657856ddeSGeetha sowjanya 			__set_bit(req->aura_id, pfvf->aura_bmap);
15757856ddeSGeetha sowjanya 		if (req->op == NPA_AQ_INSTOP_WRITE) {
15857856ddeSGeetha sowjanya 			ena = (req->aura.ena & req->aura_mask.ena) |
15957856ddeSGeetha sowjanya 				(test_bit(req->aura_id, pfvf->aura_bmap) &
16057856ddeSGeetha sowjanya 				~req->aura_mask.ena);
16157856ddeSGeetha sowjanya 			if (ena)
16257856ddeSGeetha sowjanya 				__set_bit(req->aura_id, pfvf->aura_bmap);
16357856ddeSGeetha sowjanya 			else
16457856ddeSGeetha sowjanya 				__clear_bit(req->aura_id, pfvf->aura_bmap);
16557856ddeSGeetha sowjanya 		}
16657856ddeSGeetha sowjanya 	}
16757856ddeSGeetha sowjanya 
16857856ddeSGeetha sowjanya 	/* Set pool bitmap if pool hw context is enabled */
16957856ddeSGeetha sowjanya 	if (req->ctype == NPA_AQ_CTYPE_POOL) {
17057856ddeSGeetha sowjanya 		if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
17157856ddeSGeetha sowjanya 			__set_bit(req->aura_id, pfvf->pool_bmap);
17257856ddeSGeetha sowjanya 		if (req->op == NPA_AQ_INSTOP_WRITE) {
17357856ddeSGeetha sowjanya 			ena = (req->pool.ena & req->pool_mask.ena) |
17457856ddeSGeetha sowjanya 				(test_bit(req->aura_id, pfvf->pool_bmap) &
17557856ddeSGeetha sowjanya 				~req->pool_mask.ena);
17657856ddeSGeetha sowjanya 			if (ena)
17757856ddeSGeetha sowjanya 				__set_bit(req->aura_id, pfvf->pool_bmap);
17857856ddeSGeetha sowjanya 			else
17957856ddeSGeetha sowjanya 				__clear_bit(req->aura_id, pfvf->pool_bmap);
18057856ddeSGeetha sowjanya 		}
18157856ddeSGeetha sowjanya 	}
1824a3581cdSSunil Goutham 	spin_unlock(&aq->lock);
1834a3581cdSSunil Goutham 
1844a3581cdSSunil Goutham 	if (rsp) {
1854a3581cdSSunil Goutham 		/* Copy read context into mailbox */
1864a3581cdSSunil Goutham 		if (req->op == NPA_AQ_INSTOP_READ) {
1874a3581cdSSunil Goutham 			if (req->ctype == NPA_AQ_CTYPE_AURA)
1884a3581cdSSunil Goutham 				memcpy(&rsp->aura, ctx,
1894a3581cdSSunil Goutham 				       sizeof(struct npa_aura_s));
1904a3581cdSSunil Goutham 			else
1914a3581cdSSunil Goutham 				memcpy(&rsp->pool, ctx,
1924a3581cdSSunil Goutham 				       sizeof(struct npa_pool_s));
1934a3581cdSSunil Goutham 		}
1944a3581cdSSunil Goutham 	}
1954a3581cdSSunil Goutham 
1964a3581cdSSunil Goutham 	return 0;
1974a3581cdSSunil Goutham }
1984a3581cdSSunil Goutham 
19957856ddeSGeetha sowjanya static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
20057856ddeSGeetha sowjanya {
20157856ddeSGeetha sowjanya 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
20257856ddeSGeetha sowjanya 	struct npa_aq_enq_req aq_req;
20357856ddeSGeetha sowjanya 	unsigned long *bmap;
20457856ddeSGeetha sowjanya 	int id, cnt = 0;
20557856ddeSGeetha sowjanya 	int err = 0, rc;
20657856ddeSGeetha sowjanya 
20757856ddeSGeetha sowjanya 	if (!pfvf->pool_ctx || !pfvf->aura_ctx)
20857856ddeSGeetha sowjanya 		return NPA_AF_ERR_AQ_ENQUEUE;
20957856ddeSGeetha sowjanya 
21057856ddeSGeetha sowjanya 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
21157856ddeSGeetha sowjanya 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
21257856ddeSGeetha sowjanya 
21357856ddeSGeetha sowjanya 	if (req->ctype == NPA_AQ_CTYPE_POOL) {
21457856ddeSGeetha sowjanya 		aq_req.pool.ena = 0;
21557856ddeSGeetha sowjanya 		aq_req.pool_mask.ena = 1;
21657856ddeSGeetha sowjanya 		cnt = pfvf->pool_ctx->qsize;
21757856ddeSGeetha sowjanya 		bmap = pfvf->pool_bmap;
21857856ddeSGeetha sowjanya 	} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
21957856ddeSGeetha sowjanya 		aq_req.aura.ena = 0;
22057856ddeSGeetha sowjanya 		aq_req.aura_mask.ena = 1;
22157856ddeSGeetha sowjanya 		cnt = pfvf->aura_ctx->qsize;
22257856ddeSGeetha sowjanya 		bmap = pfvf->aura_bmap;
22357856ddeSGeetha sowjanya 	}
22457856ddeSGeetha sowjanya 
22557856ddeSGeetha sowjanya 	aq_req.ctype = req->ctype;
22657856ddeSGeetha sowjanya 	aq_req.op = NPA_AQ_INSTOP_WRITE;
22757856ddeSGeetha sowjanya 
22857856ddeSGeetha sowjanya 	for (id = 0; id < cnt; id++) {
22957856ddeSGeetha sowjanya 		if (!test_bit(id, bmap))
23057856ddeSGeetha sowjanya 			continue;
23157856ddeSGeetha sowjanya 		aq_req.aura_id = id;
23257856ddeSGeetha sowjanya 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
23357856ddeSGeetha sowjanya 		if (rc) {
23457856ddeSGeetha sowjanya 			err = rc;
23557856ddeSGeetha sowjanya 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
23657856ddeSGeetha sowjanya 				(req->ctype == NPA_AQ_CTYPE_AURA) ?
23757856ddeSGeetha sowjanya 				"Aura" : "Pool", id);
23857856ddeSGeetha sowjanya 		}
23957856ddeSGeetha sowjanya 	}
24057856ddeSGeetha sowjanya 
24157856ddeSGeetha sowjanya 	return err;
24257856ddeSGeetha sowjanya }
24357856ddeSGeetha sowjanya 
244eac66686SSunil Goutham int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
2454a3581cdSSunil Goutham 				struct npa_aq_enq_req *req,
2464a3581cdSSunil Goutham 				struct npa_aq_enq_rsp *rsp)
2474a3581cdSSunil Goutham {
2484a3581cdSSunil Goutham 	return rvu_npa_aq_enq_inst(rvu, req, rsp);
2494a3581cdSSunil Goutham }
2504a3581cdSSunil Goutham 
251eac66686SSunil Goutham int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
25257856ddeSGeetha sowjanya 				       struct hwctx_disable_req *req,
25357856ddeSGeetha sowjanya 				       struct msg_rsp *rsp)
25457856ddeSGeetha sowjanya {
25557856ddeSGeetha sowjanya 	return npa_lf_hwctx_disable(rvu, req);
25657856ddeSGeetha sowjanya }
25757856ddeSGeetha sowjanya 
2583fa4c323SSunil Goutham static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
2593fa4c323SSunil Goutham {
26057856ddeSGeetha sowjanya 	kfree(pfvf->aura_bmap);
26157856ddeSGeetha sowjanya 	pfvf->aura_bmap = NULL;
26257856ddeSGeetha sowjanya 
2633fa4c323SSunil Goutham 	qmem_free(rvu->dev, pfvf->aura_ctx);
2643fa4c323SSunil Goutham 	pfvf->aura_ctx = NULL;
2653fa4c323SSunil Goutham 
26657856ddeSGeetha sowjanya 	kfree(pfvf->pool_bmap);
26757856ddeSGeetha sowjanya 	pfvf->pool_bmap = NULL;
26857856ddeSGeetha sowjanya 
2693fa4c323SSunil Goutham 	qmem_free(rvu->dev, pfvf->pool_ctx);
2703fa4c323SSunil Goutham 	pfvf->pool_ctx = NULL;
2713fa4c323SSunil Goutham 
2723fa4c323SSunil Goutham 	qmem_free(rvu->dev, pfvf->npa_qints_ctx);
2733fa4c323SSunil Goutham 	pfvf->npa_qints_ctx = NULL;
2743fa4c323SSunil Goutham }
2753fa4c323SSunil Goutham 
276eac66686SSunil Goutham int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
2773fa4c323SSunil Goutham 				  struct npa_lf_alloc_req *req,
2783fa4c323SSunil Goutham 				  struct npa_lf_alloc_rsp *rsp)
2793fa4c323SSunil Goutham {
2803fa4c323SSunil Goutham 	int npalf, qints, hwctx_size, err, rc = 0;
2813fa4c323SSunil Goutham 	struct rvu_hwinfo *hw = rvu->hw;
2823fa4c323SSunil Goutham 	u16 pcifunc = req->hdr.pcifunc;
2833fa4c323SSunil Goutham 	struct rvu_block *block;
2843fa4c323SSunil Goutham 	struct rvu_pfvf *pfvf;
2853fa4c323SSunil Goutham 	u64 cfg, ctx_cfg;
2863fa4c323SSunil Goutham 	int blkaddr;
2873fa4c323SSunil Goutham 
2883fa4c323SSunil Goutham 	if (req->aura_sz > NPA_AURA_SZ_MAX ||
2893fa4c323SSunil Goutham 	    req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
2903fa4c323SSunil Goutham 		return NPA_AF_ERR_PARAM;
2913fa4c323SSunil Goutham 
2923fa4c323SSunil Goutham 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2933fa4c323SSunil Goutham 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
2943fa4c323SSunil Goutham 	if (!pfvf->npalf || blkaddr < 0)
2953fa4c323SSunil Goutham 		return NPA_AF_ERR_AF_LF_INVALID;
2963fa4c323SSunil Goutham 
2973fa4c323SSunil Goutham 	block = &hw->block[blkaddr];
2983fa4c323SSunil Goutham 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
2993fa4c323SSunil Goutham 	if (npalf < 0)
3003fa4c323SSunil Goutham 		return NPA_AF_ERR_AF_LF_INVALID;
3013fa4c323SSunil Goutham 
3023fa4c323SSunil Goutham 	/* Reset this NPA LF */
3033fa4c323SSunil Goutham 	err = rvu_lf_reset(rvu, block, npalf);
3043fa4c323SSunil Goutham 	if (err) {
3053fa4c323SSunil Goutham 		dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
3063fa4c323SSunil Goutham 		return NPA_AF_ERR_LF_RESET;
3073fa4c323SSunil Goutham 	}
3083fa4c323SSunil Goutham 
3093fa4c323SSunil Goutham 	ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
3103fa4c323SSunil Goutham 
3113fa4c323SSunil Goutham 	/* Alloc memory for aura HW contexts */
3123fa4c323SSunil Goutham 	hwctx_size = 1UL << (ctx_cfg & 0xF);
3133fa4c323SSunil Goutham 	err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
3143fa4c323SSunil Goutham 			 NPA_AURA_COUNT(req->aura_sz), hwctx_size);
3153fa4c323SSunil Goutham 	if (err)
3163fa4c323SSunil Goutham 		goto free_mem;
3173fa4c323SSunil Goutham 
31857856ddeSGeetha sowjanya 	pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
31957856ddeSGeetha sowjanya 				  GFP_KERNEL);
32057856ddeSGeetha sowjanya 	if (!pfvf->aura_bmap)
32157856ddeSGeetha sowjanya 		goto free_mem;
32257856ddeSGeetha sowjanya 
3233fa4c323SSunil Goutham 	/* Alloc memory for pool HW contexts */
3243fa4c323SSunil Goutham 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
3253fa4c323SSunil Goutham 	err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
3263fa4c323SSunil Goutham 	if (err)
3273fa4c323SSunil Goutham 		goto free_mem;
3283fa4c323SSunil Goutham 
32957856ddeSGeetha sowjanya 	pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
33057856ddeSGeetha sowjanya 				  GFP_KERNEL);
33157856ddeSGeetha sowjanya 	if (!pfvf->pool_bmap)
33257856ddeSGeetha sowjanya 		goto free_mem;
33357856ddeSGeetha sowjanya 
3343fa4c323SSunil Goutham 	/* Get no of queue interrupts supported */
3353fa4c323SSunil Goutham 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
3363fa4c323SSunil Goutham 	qints = (cfg >> 28) & 0xFFF;
3373fa4c323SSunil Goutham 
3383fa4c323SSunil Goutham 	/* Alloc memory for Qints HW contexts */
3393fa4c323SSunil Goutham 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
3403fa4c323SSunil Goutham 	err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
3413fa4c323SSunil Goutham 	if (err)
3423fa4c323SSunil Goutham 		goto free_mem;
3433fa4c323SSunil Goutham 
3443fa4c323SSunil Goutham 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
3453fa4c323SSunil Goutham 	/* Clear way partition mask and set aura offset to '0' */
3463fa4c323SSunil Goutham 	cfg &= ~(BIT_ULL(34) - 1);
3473fa4c323SSunil Goutham 	/* Set aura size & enable caching of contexts */
3483fa4c323SSunil Goutham 	cfg |= (req->aura_sz << 16) | BIT_ULL(34);
3493fa4c323SSunil Goutham 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
3503fa4c323SSunil Goutham 
3513fa4c323SSunil Goutham 	/* Configure aura HW context's base */
3523fa4c323SSunil Goutham 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
3533fa4c323SSunil Goutham 		    (u64)pfvf->aura_ctx->iova);
3543fa4c323SSunil Goutham 
3553fa4c323SSunil Goutham 	/* Enable caching of qints hw context */
3563fa4c323SSunil Goutham 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
3573fa4c323SSunil Goutham 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
3583fa4c323SSunil Goutham 		    (u64)pfvf->npa_qints_ctx->iova);
3593fa4c323SSunil Goutham 
3603fa4c323SSunil Goutham 	goto exit;
3613fa4c323SSunil Goutham 
3623fa4c323SSunil Goutham free_mem:
3633fa4c323SSunil Goutham 	npa_ctx_free(rvu, pfvf);
3643fa4c323SSunil Goutham 	rc = -ENOMEM;
3653fa4c323SSunil Goutham 
3663fa4c323SSunil Goutham exit:
3673fa4c323SSunil Goutham 	/* set stack page info */
3683fa4c323SSunil Goutham 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
3693fa4c323SSunil Goutham 	rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
3703fa4c323SSunil Goutham 	rsp->stack_pg_bytes = cfg & 0xFF;
3713fa4c323SSunil Goutham 	rsp->qints = (cfg >> 28) & 0xFFF;
3723fa4c323SSunil Goutham 	return rc;
3733fa4c323SSunil Goutham }
3743fa4c323SSunil Goutham 
375eac66686SSunil Goutham int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
3763fa4c323SSunil Goutham 				 struct msg_rsp *rsp)
3773fa4c323SSunil Goutham {
3783fa4c323SSunil Goutham 	struct rvu_hwinfo *hw = rvu->hw;
3793fa4c323SSunil Goutham 	u16 pcifunc = req->hdr.pcifunc;
3803fa4c323SSunil Goutham 	struct rvu_block *block;
3813fa4c323SSunil Goutham 	struct rvu_pfvf *pfvf;
3823fa4c323SSunil Goutham 	int npalf, err;
3833fa4c323SSunil Goutham 	int blkaddr;
3843fa4c323SSunil Goutham 
3853fa4c323SSunil Goutham 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3863fa4c323SSunil Goutham 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
3873fa4c323SSunil Goutham 	if (!pfvf->npalf || blkaddr < 0)
3883fa4c323SSunil Goutham 		return NPA_AF_ERR_AF_LF_INVALID;
3893fa4c323SSunil Goutham 
3903fa4c323SSunil Goutham 	block = &hw->block[blkaddr];
3913fa4c323SSunil Goutham 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
3923fa4c323SSunil Goutham 	if (npalf < 0)
3933fa4c323SSunil Goutham 		return NPA_AF_ERR_AF_LF_INVALID;
3943fa4c323SSunil Goutham 
3953fa4c323SSunil Goutham 	/* Reset this NPA LF */
3963fa4c323SSunil Goutham 	err = rvu_lf_reset(rvu, block, npalf);
3973fa4c323SSunil Goutham 	if (err) {
3983fa4c323SSunil Goutham 		dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
3993fa4c323SSunil Goutham 		return NPA_AF_ERR_LF_RESET;
4003fa4c323SSunil Goutham 	}
4013fa4c323SSunil Goutham 
4023fa4c323SSunil Goutham 	npa_ctx_free(rvu, pfvf);
4033fa4c323SSunil Goutham 
4043fa4c323SSunil Goutham 	return 0;
4053fa4c323SSunil Goutham }
4063fa4c323SSunil Goutham 
4077a37245eSSunil Goutham static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
4087a37245eSSunil Goutham {
4097a37245eSSunil Goutham 	u64 cfg;
4107a37245eSSunil Goutham 	int err;
4117a37245eSSunil Goutham 
4127a37245eSSunil Goutham 	/* Set admin queue endianness */
4137a37245eSSunil Goutham 	cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
4147a37245eSSunil Goutham #ifdef __BIG_ENDIAN
4157a37245eSSunil Goutham 	cfg |= BIT_ULL(1);
4167a37245eSSunil Goutham 	rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
4177a37245eSSunil Goutham #else
4187a37245eSSunil Goutham 	cfg &= ~BIT_ULL(1);
4197a37245eSSunil Goutham 	rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
4207a37245eSSunil Goutham #endif
4217a37245eSSunil Goutham 
4227a37245eSSunil Goutham 	/* Do not bypass NDC cache */
4237a37245eSSunil Goutham 	cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
4247a37245eSSunil Goutham 	cfg &= ~0x03DULL;
4257a37245eSSunil Goutham 	rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
4267a37245eSSunil Goutham 
4277a37245eSSunil Goutham 	/* Result structure can be followed by Aura/Pool context at
4287a37245eSSunil Goutham 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4297a37245eSSunil Goutham 	 * operation type. Alloc sufficient result memory for all operations.
4307a37245eSSunil Goutham 	 */
4317a37245eSSunil Goutham 	err = rvu_aq_alloc(rvu, &block->aq,
4327a37245eSSunil Goutham 			   Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
4337a37245eSSunil Goutham 			   ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
4347a37245eSSunil Goutham 	if (err)
4357a37245eSSunil Goutham 		return err;
4367a37245eSSunil Goutham 
4377a37245eSSunil Goutham 	rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
4387a37245eSSunil Goutham 	rvu_write64(rvu, block->addr,
4397a37245eSSunil Goutham 		    NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
4407a37245eSSunil Goutham 	return 0;
4417a37245eSSunil Goutham }
4427a37245eSSunil Goutham 
4437a37245eSSunil Goutham int rvu_npa_init(struct rvu *rvu)
4447a37245eSSunil Goutham {
4457a37245eSSunil Goutham 	struct rvu_hwinfo *hw = rvu->hw;
4467a37245eSSunil Goutham 	int blkaddr, err;
4477a37245eSSunil Goutham 
4487a37245eSSunil Goutham 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
4497a37245eSSunil Goutham 	if (blkaddr < 0)
4507a37245eSSunil Goutham 		return 0;
4517a37245eSSunil Goutham 
4527a37245eSSunil Goutham 	/* Initialize admin queue */
4537a37245eSSunil Goutham 	err = npa_aq_init(rvu, &hw->block[blkaddr]);
4547a37245eSSunil Goutham 	if (err)
4557a37245eSSunil Goutham 		return err;
4567a37245eSSunil Goutham 
4577a37245eSSunil Goutham 	return 0;
4587a37245eSSunil Goutham }
4597a37245eSSunil Goutham 
4607a37245eSSunil Goutham void rvu_npa_freemem(struct rvu *rvu)
4617a37245eSSunil Goutham {
4627a37245eSSunil Goutham 	struct rvu_hwinfo *hw = rvu->hw;
4637a37245eSSunil Goutham 	struct rvu_block *block;
4643fa4c323SSunil Goutham 	int blkaddr;
4657a37245eSSunil Goutham 
4667a37245eSSunil Goutham 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
4677a37245eSSunil Goutham 	if (blkaddr < 0)
4687a37245eSSunil Goutham 		return;
4697a37245eSSunil Goutham 
4707a37245eSSunil Goutham 	block = &hw->block[blkaddr];
4713fa4c323SSunil Goutham 	rvu_aq_free(rvu, block->aq);
4727a37245eSSunil Goutham }
473