1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "cgx.h"
18 
19 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
20 {
21 	if (blkaddr == BLKADDR_NIX0 && hw->nix0)
22 		return hw->nix0;
23 
24 	return NULL;
25 }
26 
27 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
28 			    int lvl, u16 pcifunc, u16 schq)
29 {
30 	struct nix_txsch *txsch;
31 	struct nix_hw *nix_hw;
32 
33 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
34 	if (!nix_hw)
35 		return false;
36 
37 	txsch = &nix_hw->txsch[lvl];
38 	/* Check out of bounds */
39 	if (schq >= txsch->schq.max)
40 		return false;
41 
42 	spin_lock(&rvu->rsrc_lock);
43 	if (txsch->pfvf_map[schq] != pcifunc) {
44 		spin_unlock(&rvu->rsrc_lock);
45 		return false;
46 	}
47 	spin_unlock(&rvu->rsrc_lock);
48 	return true;
49 }
50 
51 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
52 				 u64 format, bool v4, u64 *fidx)
53 {
54 	struct nix_lso_format field = {0};
55 
56 	/* IP's Length field */
57 	field.layer = NIX_TXLAYER_OL3;
58 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
59 	field.offset = v4 ? 2 : 4;
60 	field.sizem1 = 1; /* i.e 2 bytes */
61 	field.alg = NIX_LSOALG_ADD_PAYLEN;
62 	rvu_write64(rvu, blkaddr,
63 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
64 		    *(u64 *)&field);
65 
66 	/* No ID field in IPv6 header */
67 	if (!v4)
68 		return;
69 
70 	/* IP's ID field */
71 	field.layer = NIX_TXLAYER_OL3;
72 	field.offset = 4;
73 	field.sizem1 = 1; /* i.e 2 bytes */
74 	field.alg = NIX_LSOALG_ADD_SEGNUM;
75 	rvu_write64(rvu, blkaddr,
76 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
77 		    *(u64 *)&field);
78 }
79 
80 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
81 				 u64 format, u64 *fidx)
82 {
83 	struct nix_lso_format field = {0};
84 
85 	/* TCP's sequence number field */
86 	field.layer = NIX_TXLAYER_OL4;
87 	field.offset = 4;
88 	field.sizem1 = 3; /* i.e 4 bytes */
89 	field.alg = NIX_LSOALG_ADD_OFFSET;
90 	rvu_write64(rvu, blkaddr,
91 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
92 		    *(u64 *)&field);
93 
94 	/* TCP's flags field */
95 	field.layer = NIX_TXLAYER_OL4;
96 	field.offset = 12;
97 	field.sizem1 = 0; /* not needed */
98 	field.alg = NIX_LSOALG_TCP_FLAGS;
99 	rvu_write64(rvu, blkaddr,
100 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
101 		    *(u64 *)&field);
102 }
103 
104 static void nix_setup_lso(struct rvu *rvu, int blkaddr)
105 {
106 	u64 cfg, idx, fidx = 0;
107 
108 	/* Enable LSO */
109 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
110 	/* For TSO, set first and middle segment flags to
111 	 * mask out PSH, RST & FIN flags in TCP packet
112 	 */
113 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
114 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
115 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
116 
117 	/* Configure format fields for TCPv4 segmentation offload */
118 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
119 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
120 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
121 
122 	/* Set rest of the fields to NOP */
123 	for (; fidx < 8; fidx++) {
124 		rvu_write64(rvu, blkaddr,
125 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
126 	}
127 
128 	/* Configure format fields for TCPv6 segmentation offload */
129 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
130 	fidx = 0;
131 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
132 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
133 
134 	/* Set rest of the fields to NOP */
135 	for (; fidx < 8; fidx++) {
136 		rvu_write64(rvu, blkaddr,
137 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
138 	}
139 }
140 
141 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
142 {
143 	if (pfvf->rq_ctx)
144 		qmem_free(rvu->dev, pfvf->rq_ctx);
145 	if (pfvf->sq_ctx)
146 		qmem_free(rvu->dev, pfvf->sq_ctx);
147 	if (pfvf->cq_ctx)
148 		qmem_free(rvu->dev, pfvf->cq_ctx);
149 	if (pfvf->rss_ctx)
150 		qmem_free(rvu->dev, pfvf->rss_ctx);
151 	if (pfvf->nix_qints_ctx)
152 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
153 	if (pfvf->cq_ints_ctx)
154 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
155 
156 	pfvf->rq_ctx = NULL;
157 	pfvf->sq_ctx = NULL;
158 	pfvf->cq_ctx = NULL;
159 	pfvf->rss_ctx = NULL;
160 	pfvf->nix_qints_ctx = NULL;
161 	pfvf->cq_ints_ctx = NULL;
162 }
163 
164 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
165 			      struct rvu_pfvf *pfvf, int nixlf,
166 			      int rss_sz, int rss_grps, int hwctx_size)
167 {
168 	int err, grp, num_indices;
169 
170 	/* RSS is not requested for this NIXLF */
171 	if (!rss_sz)
172 		return 0;
173 	num_indices = rss_sz * rss_grps;
174 
175 	/* Alloc NIX RSS HW context memory and config the base */
176 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
177 	if (err)
178 		return err;
179 
180 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
181 		    (u64)pfvf->rss_ctx->iova);
182 
183 	/* Config full RSS table size, enable RSS and caching */
184 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
185 		    BIT_ULL(36) | BIT_ULL(4) |
186 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
187 	/* Config RSS group offset and sizes */
188 	for (grp = 0; grp < rss_grps; grp++)
189 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
190 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
191 	return 0;
192 }
193 
194 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
195 			       struct nix_aq_inst_s *inst)
196 {
197 	struct admin_queue *aq = block->aq;
198 	struct nix_aq_res_s *result;
199 	int timeout = 1000;
200 	u64 reg, head;
201 
202 	result = (struct nix_aq_res_s *)aq->res->base;
203 
204 	/* Get current head pointer where to append this instruction */
205 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
206 	head = (reg >> 4) & AQ_PTR_MASK;
207 
208 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
209 	       (void *)inst, aq->inst->entry_sz);
210 	memset(result, 0, sizeof(*result));
211 	/* sync into memory */
212 	wmb();
213 
214 	/* Ring the doorbell and wait for result */
215 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
216 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
217 		cpu_relax();
218 		udelay(1);
219 		timeout--;
220 		if (!timeout)
221 			return -EBUSY;
222 	}
223 
224 	if (result->compcode != NIX_AQ_COMP_GOOD)
225 		/* TODO: Replace this with some error code */
226 		return -EBUSY;
227 
228 	return 0;
229 }
230 
231 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
232 			       struct nix_aq_enq_rsp *rsp)
233 {
234 	struct rvu_hwinfo *hw = rvu->hw;
235 	u16 pcifunc = req->hdr.pcifunc;
236 	int nixlf, blkaddr, rc = 0;
237 	struct nix_aq_inst_s inst;
238 	struct rvu_block *block;
239 	struct admin_queue *aq;
240 	struct rvu_pfvf *pfvf;
241 	void *ctx, *mask;
242 	u64 cfg;
243 
244 	pfvf = rvu_get_pfvf(rvu, pcifunc);
245 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
246 	if (!pfvf->nixlf || blkaddr < 0)
247 		return NIX_AF_ERR_AF_LF_INVALID;
248 
249 	block = &hw->block[blkaddr];
250 	aq = block->aq;
251 	if (!aq) {
252 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
253 		return NIX_AF_ERR_AQ_ENQUEUE;
254 	}
255 
256 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
257 	if (nixlf < 0)
258 		return NIX_AF_ERR_AF_LF_INVALID;
259 
260 	switch (req->ctype) {
261 	case NIX_AQ_CTYPE_RQ:
262 		/* Check if index exceeds max no of queues */
263 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
264 			rc = NIX_AF_ERR_AQ_ENQUEUE;
265 		break;
266 	case NIX_AQ_CTYPE_SQ:
267 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
268 			rc = NIX_AF_ERR_AQ_ENQUEUE;
269 		break;
270 	case NIX_AQ_CTYPE_CQ:
271 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
272 			rc = NIX_AF_ERR_AQ_ENQUEUE;
273 		break;
274 	case NIX_AQ_CTYPE_RSS:
275 		/* Check if RSS is enabled and qidx is within range */
276 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
277 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
278 		    (req->qidx >= (256UL << (cfg & 0xF))))
279 			rc = NIX_AF_ERR_AQ_ENQUEUE;
280 		break;
281 	default:
282 		rc = NIX_AF_ERR_AQ_ENQUEUE;
283 	}
284 
285 	if (rc)
286 		return rc;
287 
288 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
289 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
290 	    req->op != NIX_AQ_INSTOP_WRITE) {
291 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
292 				     pcifunc, req->sq.smq))
293 			return NIX_AF_ERR_AQ_ENQUEUE;
294 	}
295 
296 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
297 	inst.lf = nixlf;
298 	inst.cindex = req->qidx;
299 	inst.ctype = req->ctype;
300 	inst.op = req->op;
301 	/* Currently we are not supporting enqueuing multiple instructions,
302 	 * so always choose first entry in result memory.
303 	 */
304 	inst.res_addr = (u64)aq->res->iova;
305 
306 	/* Clean result + context memory */
307 	memset(aq->res->base, 0, aq->res->entry_sz);
308 	/* Context needs to be written at RES_ADDR + 128 */
309 	ctx = aq->res->base + 128;
310 	/* Mask needs to be written at RES_ADDR + 256 */
311 	mask = aq->res->base + 256;
312 
313 	switch (req->op) {
314 	case NIX_AQ_INSTOP_WRITE:
315 		if (req->ctype == NIX_AQ_CTYPE_RQ)
316 			memcpy(mask, &req->rq_mask,
317 			       sizeof(struct nix_rq_ctx_s));
318 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
319 			memcpy(mask, &req->sq_mask,
320 			       sizeof(struct nix_sq_ctx_s));
321 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
322 			memcpy(mask, &req->cq_mask,
323 			       sizeof(struct nix_cq_ctx_s));
324 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
325 			memcpy(mask, &req->rss_mask,
326 			       sizeof(struct nix_rsse_s));
327 		/* Fall through */
328 	case NIX_AQ_INSTOP_INIT:
329 		if (req->ctype == NIX_AQ_CTYPE_RQ)
330 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
331 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
332 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
333 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
334 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
335 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
336 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
337 		break;
338 	case NIX_AQ_INSTOP_NOP:
339 	case NIX_AQ_INSTOP_READ:
340 	case NIX_AQ_INSTOP_LOCK:
341 	case NIX_AQ_INSTOP_UNLOCK:
342 		break;
343 	default:
344 		rc = NIX_AF_ERR_AQ_ENQUEUE;
345 		return rc;
346 	}
347 
348 	spin_lock(&aq->lock);
349 
350 	/* Submit the instruction to AQ */
351 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
352 	if (rc) {
353 		spin_unlock(&aq->lock);
354 		return rc;
355 	}
356 
357 	if (rsp) {
358 		/* Copy read context into mailbox */
359 		if (req->op == NIX_AQ_INSTOP_READ && !rc) {
360 			if (req->ctype == NIX_AQ_CTYPE_RQ)
361 				memcpy(&rsp->rq, ctx,
362 				       sizeof(struct nix_rq_ctx_s));
363 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
364 				memcpy(&rsp->sq, ctx,
365 				       sizeof(struct nix_sq_ctx_s));
366 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
367 				memcpy(&rsp->cq, ctx,
368 				       sizeof(struct nix_cq_ctx_s));
369 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
370 				memcpy(&rsp->rss, ctx,
371 				       sizeof(struct nix_cq_ctx_s));
372 		}
373 	}
374 
375 	spin_unlock(&aq->lock);
376 	return rc;
377 }
378 
379 int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
380 				struct nix_aq_enq_req *req,
381 				struct nix_aq_enq_rsp *rsp)
382 {
383 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
384 }
385 
386 int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
387 				  struct nix_lf_alloc_req *req,
388 				  struct nix_lf_alloc_rsp *rsp)
389 {
390 	int nixlf, qints, hwctx_size, err, rc = 0;
391 	struct rvu_hwinfo *hw = rvu->hw;
392 	u16 pcifunc = req->hdr.pcifunc;
393 	struct rvu_block *block;
394 	struct rvu_pfvf *pfvf;
395 	u64 cfg, ctx_cfg;
396 	int blkaddr;
397 
398 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
399 		return NIX_AF_ERR_PARAM;
400 
401 	pfvf = rvu_get_pfvf(rvu, pcifunc);
402 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
403 	if (!pfvf->nixlf || blkaddr < 0)
404 		return NIX_AF_ERR_AF_LF_INVALID;
405 
406 	block = &hw->block[blkaddr];
407 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
408 	if (nixlf < 0)
409 		return NIX_AF_ERR_AF_LF_INVALID;
410 
411 	/* If RSS is being enabled, check if requested config is valid.
412 	 * RSS table size should be power of two, otherwise
413 	 * RSS_GRP::OFFSET + adder might go beyond that group or
414 	 * won't be able to use entire table.
415 	 */
416 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
417 			    !is_power_of_2(req->rss_sz)))
418 		return NIX_AF_ERR_RSS_SIZE_INVALID;
419 
420 	if (req->rss_sz &&
421 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
422 		return NIX_AF_ERR_RSS_GRPS_INVALID;
423 
424 	/* Reset this NIX LF */
425 	err = rvu_lf_reset(rvu, block, nixlf);
426 	if (err) {
427 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
428 			block->addr - BLKADDR_NIX0, nixlf);
429 		return NIX_AF_ERR_LF_RESET;
430 	}
431 
432 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
433 
434 	/* Alloc NIX RQ HW context memory and config the base */
435 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
436 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
437 	if (err)
438 		goto free_mem;
439 
440 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
441 		    (u64)pfvf->rq_ctx->iova);
442 
443 	/* Set caching and queue count in HW */
444 	cfg = BIT_ULL(36) | (req->rq_cnt - 1);
445 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
446 
447 	/* Alloc NIX SQ HW context memory and config the base */
448 	hwctx_size = 1UL << (ctx_cfg & 0xF);
449 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
450 	if (err)
451 		goto free_mem;
452 
453 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
454 		    (u64)pfvf->sq_ctx->iova);
455 	cfg = BIT_ULL(36) | (req->sq_cnt - 1);
456 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
457 
458 	/* Alloc NIX CQ HW context memory and config the base */
459 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
460 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
461 	if (err)
462 		goto free_mem;
463 
464 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
465 		    (u64)pfvf->cq_ctx->iova);
466 	cfg = BIT_ULL(36) | (req->cq_cnt - 1);
467 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
468 
469 	/* Initialize receive side scaling (RSS) */
470 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
471 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
472 				 req->rss_sz, req->rss_grps, hwctx_size);
473 	if (err)
474 		goto free_mem;
475 
476 	/* Alloc memory for CQINT's HW contexts */
477 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
478 	qints = (cfg >> 24) & 0xFFF;
479 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
480 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
481 	if (err)
482 		goto free_mem;
483 
484 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
485 		    (u64)pfvf->cq_ints_ctx->iova);
486 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
487 
488 	/* Alloc memory for QINT's HW contexts */
489 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
490 	qints = (cfg >> 12) & 0xFFF;
491 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
492 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
493 	if (err)
494 		goto free_mem;
495 
496 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
497 		    (u64)pfvf->nix_qints_ctx->iova);
498 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
499 
500 	/* Enable LMTST for this NIX LF */
501 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
502 
503 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
504 	 * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
505 	 * PCIFUNC itself.
506 	 */
507 	if (req->npa_func == RVU_DEFAULT_PF_FUNC)
508 		cfg = pcifunc;
509 	else
510 		cfg = req->npa_func;
511 
512 	if (req->sso_func == RVU_DEFAULT_PF_FUNC)
513 		cfg |= (u64)pcifunc << 16;
514 	else
515 		cfg |= (u64)req->sso_func << 16;
516 
517 	cfg |= (u64)req->xqe_sz << 33;
518 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
519 
520 	/* Config Rx pkt length, csum checks and apad  enable / disable */
521 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
522 
523 	goto exit;
524 
525 free_mem:
526 	nix_ctx_free(rvu, pfvf);
527 	rc = -ENOMEM;
528 
529 exit:
530 	/* Set macaddr of this PF/VF */
531 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
532 
533 	/* set SQB size info */
534 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
535 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
536 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
537 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
538 	return rc;
539 }
540 
541 int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
542 				 struct msg_rsp *rsp)
543 {
544 	struct rvu_hwinfo *hw = rvu->hw;
545 	u16 pcifunc = req->hdr.pcifunc;
546 	struct rvu_block *block;
547 	int blkaddr, nixlf, err;
548 	struct rvu_pfvf *pfvf;
549 
550 	pfvf = rvu_get_pfvf(rvu, pcifunc);
551 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
552 	if (!pfvf->nixlf || blkaddr < 0)
553 		return NIX_AF_ERR_AF_LF_INVALID;
554 
555 	block = &hw->block[blkaddr];
556 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
557 	if (nixlf < 0)
558 		return NIX_AF_ERR_AF_LF_INVALID;
559 
560 	/* Reset this NIX LF */
561 	err = rvu_lf_reset(rvu, block, nixlf);
562 	if (err) {
563 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
564 			block->addr - BLKADDR_NIX0, nixlf);
565 		return NIX_AF_ERR_LF_RESET;
566 	}
567 
568 	nix_ctx_free(rvu, pfvf);
569 
570 	return 0;
571 }
572 
573 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
574 {
575 	struct nix_txsch *txsch;
576 	u64 cfg, reg;
577 	int err, lvl;
578 
579 	/* Get scheduler queue count of each type and alloc
580 	 * bitmap for each for alloc/free/attach operations.
581 	 */
582 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
583 		txsch = &nix_hw->txsch[lvl];
584 		txsch->lvl = lvl;
585 		switch (lvl) {
586 		case NIX_TXSCH_LVL_SMQ:
587 			reg = NIX_AF_MDQ_CONST;
588 			break;
589 		case NIX_TXSCH_LVL_TL4:
590 			reg = NIX_AF_TL4_CONST;
591 			break;
592 		case NIX_TXSCH_LVL_TL3:
593 			reg = NIX_AF_TL3_CONST;
594 			break;
595 		case NIX_TXSCH_LVL_TL2:
596 			reg = NIX_AF_TL2_CONST;
597 			break;
598 		case NIX_TXSCH_LVL_TL1:
599 			reg = NIX_AF_TL1_CONST;
600 			break;
601 		}
602 		cfg = rvu_read64(rvu, blkaddr, reg);
603 		txsch->schq.max = cfg & 0xFFFF;
604 		err = rvu_alloc_bitmap(&txsch->schq);
605 		if (err)
606 			return err;
607 
608 		/* Allocate memory for scheduler queues to
609 		 * PF/VF pcifunc mapping info.
610 		 */
611 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
612 					       sizeof(u16), GFP_KERNEL);
613 		if (!txsch->pfvf_map)
614 			return -ENOMEM;
615 	}
616 	return 0;
617 }
618 
619 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
620 {
621 	int idx, err;
622 	u64 status;
623 
624 	/* Start X2P bus calibration */
625 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
626 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
627 	/* Wait for calibration to complete */
628 	err = rvu_poll_reg(rvu, blkaddr,
629 			   NIX_AF_STATUS, BIT_ULL(10), false);
630 	if (err) {
631 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
632 		return err;
633 	}
634 
635 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
636 	/* Check if CGX devices are ready */
637 	for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
638 		if (status & (BIT_ULL(16 + idx)))
639 			continue;
640 		dev_err(rvu->dev,
641 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
642 		err = -EBUSY;
643 	}
644 
645 	/* Check if LBK is ready */
646 	if (!(status & BIT_ULL(19))) {
647 		dev_err(rvu->dev,
648 			"LBK didn't respond to NIX X2P calibration\n");
649 		err = -EBUSY;
650 	}
651 
652 	/* Clear 'calibrate_x2p' bit */
653 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
654 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
655 	if (err || (status & 0x3FFULL))
656 		dev_err(rvu->dev,
657 			"NIX X2P calibration failed, status 0x%llx\n", status);
658 	if (err)
659 		return err;
660 	return 0;
661 }
662 
663 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
664 {
665 	u64 cfg;
666 	int err;
667 
668 	/* Set admin queue endianness */
669 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
670 #ifdef __BIG_ENDIAN
671 	cfg |= BIT_ULL(1);
672 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
673 #else
674 	cfg &= ~BIT_ULL(1);
675 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
676 #endif
677 
678 	/* Do not bypass NDC cache */
679 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
680 	cfg &= ~0x3FFEULL;
681 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
682 
683 	/* Result structure can be followed by RQ/SQ/CQ context at
684 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
685 	 * operation type. Alloc sufficient result memory for all operations.
686 	 */
687 	err = rvu_aq_alloc(rvu, &block->aq,
688 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
689 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
690 	if (err)
691 		return err;
692 
693 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
694 	rvu_write64(rvu, block->addr,
695 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
696 	return 0;
697 }
698 
699 int rvu_nix_init(struct rvu *rvu)
700 {
701 	struct rvu_hwinfo *hw = rvu->hw;
702 	struct rvu_block *block;
703 	int blkaddr, err;
704 	u64 cfg;
705 
706 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
707 	if (blkaddr < 0)
708 		return 0;
709 	block = &hw->block[blkaddr];
710 
711 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
712 	err = nix_calibrate_x2p(rvu, blkaddr);
713 	if (err)
714 		return err;
715 
716 	/* Set num of links of each type */
717 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
718 	hw->cgx = (cfg >> 12) & 0xF;
719 	hw->lmac_per_cgx = (cfg >> 8) & 0xF;
720 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
721 	hw->lbk_links = 1;
722 	hw->sdp_links = 1;
723 
724 	/* Initialize admin queue */
725 	err = nix_aq_init(rvu, block);
726 	if (err)
727 		return err;
728 
729 	/* Restore CINT timer delay to HW reset values */
730 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
731 
732 	/* Configure segmentation offload formats */
733 	nix_setup_lso(rvu, blkaddr);
734 
735 	if (blkaddr == BLKADDR_NIX0) {
736 		hw->nix0 = devm_kzalloc(rvu->dev,
737 					sizeof(struct nix_hw), GFP_KERNEL);
738 		if (!hw->nix0)
739 			return -ENOMEM;
740 
741 		err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
742 		if (err)
743 			return err;
744 	}
745 	return 0;
746 }
747 
748 void rvu_nix_freemem(struct rvu *rvu)
749 {
750 	struct rvu_hwinfo *hw = rvu->hw;
751 	struct rvu_block *block;
752 	struct nix_txsch *txsch;
753 	struct nix_hw *nix_hw;
754 	int blkaddr, lvl;
755 
756 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
757 	if (blkaddr < 0)
758 		return;
759 
760 	block = &hw->block[blkaddr];
761 	rvu_aq_free(rvu, block->aq);
762 
763 	if (blkaddr == BLKADDR_NIX0) {
764 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
765 		if (!nix_hw)
766 			return;
767 
768 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
769 			txsch = &nix_hw->txsch[lvl];
770 			kfree(txsch->schq.bmap);
771 		}
772 	}
773 }
774