1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 
18 static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
19 			       struct npa_aq_inst_s *inst)
20 {
21 	struct admin_queue *aq = block->aq;
22 	struct npa_aq_res_s *result;
23 	int timeout = 1000;
24 	u64 reg, head;
25 
26 	result = (struct npa_aq_res_s *)aq->res->base;
27 
28 	/* Get current head pointer where to append this instruction */
29 	reg = rvu_read64(rvu, block->addr, NPA_AF_AQ_STATUS);
30 	head = (reg >> 4) & AQ_PTR_MASK;
31 
32 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
33 	       (void *)inst, aq->inst->entry_sz);
34 	memset(result, 0, sizeof(*result));
35 	/* sync into memory */
36 	wmb();
37 
38 	/* Ring the doorbell and wait for result */
39 	rvu_write64(rvu, block->addr, NPA_AF_AQ_DOOR, 1);
40 	while (result->compcode == NPA_AQ_COMP_NOTDONE) {
41 		cpu_relax();
42 		udelay(1);
43 		timeout--;
44 		if (!timeout)
45 			return -EBUSY;
46 	}
47 
48 	if (result->compcode != NPA_AQ_COMP_GOOD)
49 		/* TODO: Replace this with some error code */
50 		return -EBUSY;
51 
52 	return 0;
53 }
54 
55 int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
56 			struct npa_aq_enq_rsp *rsp)
57 {
58 	struct rvu_hwinfo *hw = rvu->hw;
59 	u16 pcifunc = req->hdr.pcifunc;
60 	int blkaddr, npalf, rc = 0;
61 	struct npa_aq_inst_s inst;
62 	struct rvu_block *block;
63 	struct admin_queue *aq;
64 	struct rvu_pfvf *pfvf;
65 	void *ctx, *mask;
66 	bool ena;
67 
68 	pfvf = rvu_get_pfvf(rvu, pcifunc);
69 	if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
70 		return NPA_AF_ERR_AQ_ENQUEUE;
71 
72 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
73 	if (!pfvf->npalf || blkaddr < 0)
74 		return NPA_AF_ERR_AF_LF_INVALID;
75 
76 	block = &hw->block[blkaddr];
77 	aq = block->aq;
78 	if (!aq) {
79 		dev_warn(rvu->dev, "%s: NPA AQ not initialized\n", __func__);
80 		return NPA_AF_ERR_AQ_ENQUEUE;
81 	}
82 
83 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
84 	if (npalf < 0)
85 		return NPA_AF_ERR_AF_LF_INVALID;
86 
87 	memset(&inst, 0, sizeof(struct npa_aq_inst_s));
88 	inst.cindex = req->aura_id;
89 	inst.lf = npalf;
90 	inst.ctype = req->ctype;
91 	inst.op = req->op;
92 	/* Currently we are not supporting enqueuing multiple instructions,
93 	 * so always choose first entry in result memory.
94 	 */
95 	inst.res_addr = (u64)aq->res->iova;
96 
97 	/* Clean result + context memory */
98 	memset(aq->res->base, 0, aq->res->entry_sz);
99 	/* Context needs to be written at RES_ADDR + 128 */
100 	ctx = aq->res->base + 128;
101 	/* Mask needs to be written at RES_ADDR + 256 */
102 	mask = aq->res->base + 256;
103 
104 	switch (req->op) {
105 	case NPA_AQ_INSTOP_WRITE:
106 		/* Copy context and write mask */
107 		if (req->ctype == NPA_AQ_CTYPE_AURA) {
108 			memcpy(mask, &req->aura_mask,
109 			       sizeof(struct npa_aura_s));
110 			memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
111 		} else {
112 			memcpy(mask, &req->pool_mask,
113 			       sizeof(struct npa_pool_s));
114 			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
115 		}
116 		break;
117 	case NPA_AQ_INSTOP_INIT:
118 		if (req->ctype == NPA_AQ_CTYPE_AURA) {
119 			if (req->aura.pool_addr >= pfvf->pool_ctx->qsize) {
120 				rc = NPA_AF_ERR_AQ_FULL;
121 				break;
122 			}
123 			/* Set pool's context address */
124 			req->aura.pool_addr = pfvf->pool_ctx->iova +
125 			(req->aura.pool_addr * pfvf->pool_ctx->entry_sz);
126 			memcpy(ctx, &req->aura, sizeof(struct npa_aura_s));
127 		} else { /* POOL's context */
128 			memcpy(ctx, &req->pool, sizeof(struct npa_pool_s));
129 		}
130 		break;
131 	case NPA_AQ_INSTOP_NOP:
132 	case NPA_AQ_INSTOP_READ:
133 	case NPA_AQ_INSTOP_LOCK:
134 	case NPA_AQ_INSTOP_UNLOCK:
135 		break;
136 	default:
137 		rc = NPA_AF_ERR_AQ_FULL;
138 		break;
139 	}
140 
141 	if (rc)
142 		return rc;
143 
144 	spin_lock(&aq->lock);
145 
146 	/* Submit the instruction to AQ */
147 	rc = npa_aq_enqueue_wait(rvu, block, &inst);
148 	if (rc) {
149 		spin_unlock(&aq->lock);
150 		return rc;
151 	}
152 
153 	/* Set aura bitmap if aura hw context is enabled */
154 	if (req->ctype == NPA_AQ_CTYPE_AURA) {
155 		if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
156 			__set_bit(req->aura_id, pfvf->aura_bmap);
157 		if (req->op == NPA_AQ_INSTOP_WRITE) {
158 			ena = (req->aura.ena & req->aura_mask.ena) |
159 				(test_bit(req->aura_id, pfvf->aura_bmap) &
160 				~req->aura_mask.ena);
161 			if (ena)
162 				__set_bit(req->aura_id, pfvf->aura_bmap);
163 			else
164 				__clear_bit(req->aura_id, pfvf->aura_bmap);
165 		}
166 	}
167 
168 	/* Set pool bitmap if pool hw context is enabled */
169 	if (req->ctype == NPA_AQ_CTYPE_POOL) {
170 		if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
171 			__set_bit(req->aura_id, pfvf->pool_bmap);
172 		if (req->op == NPA_AQ_INSTOP_WRITE) {
173 			ena = (req->pool.ena & req->pool_mask.ena) |
174 				(test_bit(req->aura_id, pfvf->pool_bmap) &
175 				~req->pool_mask.ena);
176 			if (ena)
177 				__set_bit(req->aura_id, pfvf->pool_bmap);
178 			else
179 				__clear_bit(req->aura_id, pfvf->pool_bmap);
180 		}
181 	}
182 	spin_unlock(&aq->lock);
183 
184 	if (rsp) {
185 		/* Copy read context into mailbox */
186 		if (req->op == NPA_AQ_INSTOP_READ) {
187 			if (req->ctype == NPA_AQ_CTYPE_AURA)
188 				memcpy(&rsp->aura, ctx,
189 				       sizeof(struct npa_aura_s));
190 			else
191 				memcpy(&rsp->pool, ctx,
192 				       sizeof(struct npa_pool_s));
193 		}
194 	}
195 
196 	return 0;
197 }
198 
199 static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
200 {
201 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
202 	struct npa_aq_enq_req aq_req;
203 	unsigned long *bmap;
204 	int id, cnt = 0;
205 	int err = 0, rc;
206 
207 	if (!pfvf->pool_ctx || !pfvf->aura_ctx)
208 		return NPA_AF_ERR_AQ_ENQUEUE;
209 
210 	memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
211 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
212 
213 	if (req->ctype == NPA_AQ_CTYPE_POOL) {
214 		aq_req.pool.ena = 0;
215 		aq_req.pool_mask.ena = 1;
216 		cnt = pfvf->pool_ctx->qsize;
217 		bmap = pfvf->pool_bmap;
218 	} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
219 		aq_req.aura.ena = 0;
220 		aq_req.aura_mask.ena = 1;
221 		cnt = pfvf->aura_ctx->qsize;
222 		bmap = pfvf->aura_bmap;
223 	}
224 
225 	aq_req.ctype = req->ctype;
226 	aq_req.op = NPA_AQ_INSTOP_WRITE;
227 
228 	for (id = 0; id < cnt; id++) {
229 		if (!test_bit(id, bmap))
230 			continue;
231 		aq_req.aura_id = id;
232 		rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
233 		if (rc) {
234 			err = rc;
235 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
236 				(req->ctype == NPA_AQ_CTYPE_AURA) ?
237 				"Aura" : "Pool", id);
238 		}
239 	}
240 
241 	return err;
242 }
243 
244 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
245 static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
246 {
247 	struct npa_aq_enq_req lock_ctx_req;
248 	int err;
249 
250 	if (req->op != NPA_AQ_INSTOP_INIT)
251 		return 0;
252 
253 	memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
254 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
255 	lock_ctx_req.ctype = req->ctype;
256 	lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
257 	lock_ctx_req.aura_id = req->aura_id;
258 	err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
259 	if (err)
260 		dev_err(rvu->dev,
261 			"PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
262 			req->hdr.pcifunc,
263 			(req->ctype == NPA_AQ_CTYPE_AURA) ?
264 			"Aura" : "Pool", req->aura_id);
265 	return err;
266 }
267 
268 int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
269 				struct npa_aq_enq_req *req,
270 				struct npa_aq_enq_rsp *rsp)
271 {
272 	int err;
273 
274 	err = rvu_npa_aq_enq_inst(rvu, req, rsp);
275 	if (!err)
276 		err = npa_lf_hwctx_lockdown(rvu, req);
277 	return err;
278 }
279 #else
280 
281 int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
282 				struct npa_aq_enq_req *req,
283 				struct npa_aq_enq_rsp *rsp)
284 {
285 	return rvu_npa_aq_enq_inst(rvu, req, rsp);
286 }
287 #endif
288 
289 int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
290 				       struct hwctx_disable_req *req,
291 				       struct msg_rsp *rsp)
292 {
293 	return npa_lf_hwctx_disable(rvu, req);
294 }
295 
296 static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
297 {
298 	kfree(pfvf->aura_bmap);
299 	pfvf->aura_bmap = NULL;
300 
301 	qmem_free(rvu->dev, pfvf->aura_ctx);
302 	pfvf->aura_ctx = NULL;
303 
304 	kfree(pfvf->pool_bmap);
305 	pfvf->pool_bmap = NULL;
306 
307 	qmem_free(rvu->dev, pfvf->pool_ctx);
308 	pfvf->pool_ctx = NULL;
309 
310 	qmem_free(rvu->dev, pfvf->npa_qints_ctx);
311 	pfvf->npa_qints_ctx = NULL;
312 }
313 
314 int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
315 				  struct npa_lf_alloc_req *req,
316 				  struct npa_lf_alloc_rsp *rsp)
317 {
318 	int npalf, qints, hwctx_size, err, rc = 0;
319 	struct rvu_hwinfo *hw = rvu->hw;
320 	u16 pcifunc = req->hdr.pcifunc;
321 	struct rvu_block *block;
322 	struct rvu_pfvf *pfvf;
323 	u64 cfg, ctx_cfg;
324 	int blkaddr;
325 
326 	if (req->aura_sz > NPA_AURA_SZ_MAX ||
327 	    req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
328 		return NPA_AF_ERR_PARAM;
329 
330 	if (req->way_mask)
331 		req->way_mask &= 0xFFFF;
332 
333 	pfvf = rvu_get_pfvf(rvu, pcifunc);
334 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
335 	if (!pfvf->npalf || blkaddr < 0)
336 		return NPA_AF_ERR_AF_LF_INVALID;
337 
338 	block = &hw->block[blkaddr];
339 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
340 	if (npalf < 0)
341 		return NPA_AF_ERR_AF_LF_INVALID;
342 
343 	/* Reset this NPA LF */
344 	err = rvu_lf_reset(rvu, block, npalf);
345 	if (err) {
346 		dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
347 		return NPA_AF_ERR_LF_RESET;
348 	}
349 
350 	ctx_cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST1);
351 
352 	/* Alloc memory for aura HW contexts */
353 	hwctx_size = 1UL << (ctx_cfg & 0xF);
354 	err = qmem_alloc(rvu->dev, &pfvf->aura_ctx,
355 			 NPA_AURA_COUNT(req->aura_sz), hwctx_size);
356 	if (err)
357 		goto free_mem;
358 
359 	pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
360 				  GFP_KERNEL);
361 	if (!pfvf->aura_bmap)
362 		goto free_mem;
363 
364 	/* Alloc memory for pool HW contexts */
365 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
366 	err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
367 	if (err)
368 		goto free_mem;
369 
370 	pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
371 				  GFP_KERNEL);
372 	if (!pfvf->pool_bmap)
373 		goto free_mem;
374 
375 	/* Get no of queue interrupts supported */
376 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
377 	qints = (cfg >> 28) & 0xFFF;
378 
379 	/* Alloc memory for Qints HW contexts */
380 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
381 	err = qmem_alloc(rvu->dev, &pfvf->npa_qints_ctx, qints, hwctx_size);
382 	if (err)
383 		goto free_mem;
384 
385 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf));
386 	/* Clear way partition mask and set aura offset to '0' */
387 	cfg &= ~(BIT_ULL(34) - 1);
388 	/* Set aura size & enable caching of contexts */
389 	cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
390 
391 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
392 
393 	/* Configure aura HW context's base */
394 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_LOC_AURAS_BASE(npalf),
395 		    (u64)pfvf->aura_ctx->iova);
396 
397 	/* Enable caching of qints hw context */
398 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
399 		    BIT_ULL(36) | req->way_mask << 20);
400 	rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
401 		    (u64)pfvf->npa_qints_ctx->iova);
402 
403 	goto exit;
404 
405 free_mem:
406 	npa_ctx_free(rvu, pfvf);
407 	rc = -ENOMEM;
408 
409 exit:
410 	/* set stack page info */
411 	cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
412 	rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF;
413 	rsp->stack_pg_bytes = cfg & 0xFF;
414 	rsp->qints = (cfg >> 28) & 0xFFF;
415 	return rc;
416 }
417 
418 int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
419 				 struct msg_rsp *rsp)
420 {
421 	struct rvu_hwinfo *hw = rvu->hw;
422 	u16 pcifunc = req->hdr.pcifunc;
423 	struct rvu_block *block;
424 	struct rvu_pfvf *pfvf;
425 	int npalf, err;
426 	int blkaddr;
427 
428 	pfvf = rvu_get_pfvf(rvu, pcifunc);
429 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
430 	if (!pfvf->npalf || blkaddr < 0)
431 		return NPA_AF_ERR_AF_LF_INVALID;
432 
433 	block = &hw->block[blkaddr];
434 	npalf = rvu_get_lf(rvu, block, pcifunc, 0);
435 	if (npalf < 0)
436 		return NPA_AF_ERR_AF_LF_INVALID;
437 
438 	/* Reset this NPA LF */
439 	err = rvu_lf_reset(rvu, block, npalf);
440 	if (err) {
441 		dev_err(rvu->dev, "Failed to reset NPALF%d\n", npalf);
442 		return NPA_AF_ERR_LF_RESET;
443 	}
444 
445 	npa_ctx_free(rvu, pfvf);
446 
447 	return 0;
448 }
449 
450 static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
451 {
452 	u64 cfg;
453 	int err;
454 
455 	/* Set admin queue endianness */
456 	cfg = rvu_read64(rvu, block->addr, NPA_AF_GEN_CFG);
457 #ifdef __BIG_ENDIAN
458 	cfg |= BIT_ULL(1);
459 	rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
460 #else
461 	cfg &= ~BIT_ULL(1);
462 	rvu_write64(rvu, block->addr, NPA_AF_GEN_CFG, cfg);
463 #endif
464 
465 	/* Do not bypass NDC cache */
466 	cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
467 	cfg &= ~0x03DULL;
468 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
469 	/* Disable caching of stack pages */
470 	cfg |= 0x10ULL;
471 #endif
472 	rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
473 
474 	/* Result structure can be followed by Aura/Pool context at
475 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
476 	 * operation type. Alloc sufficient result memory for all operations.
477 	 */
478 	err = rvu_aq_alloc(rvu, &block->aq,
479 			   Q_COUNT(AQ_SIZE), sizeof(struct npa_aq_inst_s),
480 			   ALIGN(sizeof(struct npa_aq_res_s), 128) + 256);
481 	if (err)
482 		return err;
483 
484 	rvu_write64(rvu, block->addr, NPA_AF_AQ_CFG, AQ_SIZE);
485 	rvu_write64(rvu, block->addr,
486 		    NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
487 	return 0;
488 }
489 
490 int rvu_npa_init(struct rvu *rvu)
491 {
492 	struct rvu_hwinfo *hw = rvu->hw;
493 	int blkaddr, err;
494 
495 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
496 	if (blkaddr < 0)
497 		return 0;
498 
499 	/* Initialize admin queue */
500 	err = npa_aq_init(rvu, &hw->block[blkaddr]);
501 	if (err)
502 		return err;
503 
504 	return 0;
505 }
506 
507 void rvu_npa_freemem(struct rvu *rvu)
508 {
509 	struct rvu_hwinfo *hw = rvu->hw;
510 	struct rvu_block *block;
511 	int blkaddr;
512 
513 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
514 	if (blkaddr < 0)
515 		return;
516 
517 	block = &hw->block[blkaddr];
518 	rvu_aq_free(rvu, block->aq);
519 }
520 
521 void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
522 {
523 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
524 	struct hwctx_disable_req ctx_req;
525 
526 	/* Disable all pools */
527 	ctx_req.hdr.pcifunc = pcifunc;
528 	ctx_req.ctype = NPA_AQ_CTYPE_POOL;
529 	npa_lf_hwctx_disable(rvu, &ctx_req);
530 
531 	/* Disable all auras */
532 	ctx_req.ctype = NPA_AQ_CTYPE_AURA;
533 	npa_lf_hwctx_disable(rvu, &ctx_req);
534 
535 	npa_ctx_free(rvu, pfvf);
536 }
537