1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "cgx.h"
18 
19 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
20 {
21 	if (blkaddr == BLKADDR_NIX0 && hw->nix0)
22 		return hw->nix0;
23 
24 	return NULL;
25 }
26 
27 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
28 			    int lvl, u16 pcifunc, u16 schq)
29 {
30 	struct nix_txsch *txsch;
31 	struct nix_hw *nix_hw;
32 
33 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
34 	if (!nix_hw)
35 		return false;
36 
37 	txsch = &nix_hw->txsch[lvl];
38 	/* Check out of bounds */
39 	if (schq >= txsch->schq.max)
40 		return false;
41 
42 	spin_lock(&rvu->rsrc_lock);
43 	if (txsch->pfvf_map[schq] != pcifunc) {
44 		spin_unlock(&rvu->rsrc_lock);
45 		return false;
46 	}
47 	spin_unlock(&rvu->rsrc_lock);
48 	return true;
49 }
50 
51 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
52 				 u64 format, bool v4, u64 *fidx)
53 {
54 	struct nix_lso_format field = {0};
55 
56 	/* IP's Length field */
57 	field.layer = NIX_TXLAYER_OL3;
58 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
59 	field.offset = v4 ? 2 : 4;
60 	field.sizem1 = 1; /* i.e 2 bytes */
61 	field.alg = NIX_LSOALG_ADD_PAYLEN;
62 	rvu_write64(rvu, blkaddr,
63 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
64 		    *(u64 *)&field);
65 
66 	/* No ID field in IPv6 header */
67 	if (!v4)
68 		return;
69 
70 	/* IP's ID field */
71 	field.layer = NIX_TXLAYER_OL3;
72 	field.offset = 4;
73 	field.sizem1 = 1; /* i.e 2 bytes */
74 	field.alg = NIX_LSOALG_ADD_SEGNUM;
75 	rvu_write64(rvu, blkaddr,
76 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
77 		    *(u64 *)&field);
78 }
79 
80 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
81 				 u64 format, u64 *fidx)
82 {
83 	struct nix_lso_format field = {0};
84 
85 	/* TCP's sequence number field */
86 	field.layer = NIX_TXLAYER_OL4;
87 	field.offset = 4;
88 	field.sizem1 = 3; /* i.e 4 bytes */
89 	field.alg = NIX_LSOALG_ADD_OFFSET;
90 	rvu_write64(rvu, blkaddr,
91 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
92 		    *(u64 *)&field);
93 
94 	/* TCP's flags field */
95 	field.layer = NIX_TXLAYER_OL4;
96 	field.offset = 12;
97 	field.sizem1 = 0; /* not needed */
98 	field.alg = NIX_LSOALG_TCP_FLAGS;
99 	rvu_write64(rvu, blkaddr,
100 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
101 		    *(u64 *)&field);
102 }
103 
104 static void nix_setup_lso(struct rvu *rvu, int blkaddr)
105 {
106 	u64 cfg, idx, fidx = 0;
107 
108 	/* Enable LSO */
109 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
110 	/* For TSO, set first and middle segment flags to
111 	 * mask out PSH, RST & FIN flags in TCP packet
112 	 */
113 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
114 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
115 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
116 
117 	/* Configure format fields for TCPv4 segmentation offload */
118 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
119 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
120 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
121 
122 	/* Set rest of the fields to NOP */
123 	for (; fidx < 8; fidx++) {
124 		rvu_write64(rvu, blkaddr,
125 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
126 	}
127 
128 	/* Configure format fields for TCPv6 segmentation offload */
129 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
130 	fidx = 0;
131 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
132 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
133 
134 	/* Set rest of the fields to NOP */
135 	for (; fidx < 8; fidx++) {
136 		rvu_write64(rvu, blkaddr,
137 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
138 	}
139 }
140 
141 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
142 {
143 	kfree(pfvf->rq_bmap);
144 	kfree(pfvf->sq_bmap);
145 	kfree(pfvf->cq_bmap);
146 	if (pfvf->rq_ctx)
147 		qmem_free(rvu->dev, pfvf->rq_ctx);
148 	if (pfvf->sq_ctx)
149 		qmem_free(rvu->dev, pfvf->sq_ctx);
150 	if (pfvf->cq_ctx)
151 		qmem_free(rvu->dev, pfvf->cq_ctx);
152 	if (pfvf->rss_ctx)
153 		qmem_free(rvu->dev, pfvf->rss_ctx);
154 	if (pfvf->nix_qints_ctx)
155 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
156 	if (pfvf->cq_ints_ctx)
157 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
158 
159 	pfvf->rq_bmap = NULL;
160 	pfvf->cq_bmap = NULL;
161 	pfvf->sq_bmap = NULL;
162 	pfvf->rq_ctx = NULL;
163 	pfvf->sq_ctx = NULL;
164 	pfvf->cq_ctx = NULL;
165 	pfvf->rss_ctx = NULL;
166 	pfvf->nix_qints_ctx = NULL;
167 	pfvf->cq_ints_ctx = NULL;
168 }
169 
170 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
171 			      struct rvu_pfvf *pfvf, int nixlf,
172 			      int rss_sz, int rss_grps, int hwctx_size)
173 {
174 	int err, grp, num_indices;
175 
176 	/* RSS is not requested for this NIXLF */
177 	if (!rss_sz)
178 		return 0;
179 	num_indices = rss_sz * rss_grps;
180 
181 	/* Alloc NIX RSS HW context memory and config the base */
182 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
183 	if (err)
184 		return err;
185 
186 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
187 		    (u64)pfvf->rss_ctx->iova);
188 
189 	/* Config full RSS table size, enable RSS and caching */
190 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
191 		    BIT_ULL(36) | BIT_ULL(4) |
192 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
193 	/* Config RSS group offset and sizes */
194 	for (grp = 0; grp < rss_grps; grp++)
195 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
196 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
197 	return 0;
198 }
199 
200 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
201 			       struct nix_aq_inst_s *inst)
202 {
203 	struct admin_queue *aq = block->aq;
204 	struct nix_aq_res_s *result;
205 	int timeout = 1000;
206 	u64 reg, head;
207 
208 	result = (struct nix_aq_res_s *)aq->res->base;
209 
210 	/* Get current head pointer where to append this instruction */
211 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
212 	head = (reg >> 4) & AQ_PTR_MASK;
213 
214 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
215 	       (void *)inst, aq->inst->entry_sz);
216 	memset(result, 0, sizeof(*result));
217 	/* sync into memory */
218 	wmb();
219 
220 	/* Ring the doorbell and wait for result */
221 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
222 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
223 		cpu_relax();
224 		udelay(1);
225 		timeout--;
226 		if (!timeout)
227 			return -EBUSY;
228 	}
229 
230 	if (result->compcode != NIX_AQ_COMP_GOOD)
231 		/* TODO: Replace this with some error code */
232 		return -EBUSY;
233 
234 	return 0;
235 }
236 
237 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
238 			       struct nix_aq_enq_rsp *rsp)
239 {
240 	struct rvu_hwinfo *hw = rvu->hw;
241 	u16 pcifunc = req->hdr.pcifunc;
242 	int nixlf, blkaddr, rc = 0;
243 	struct nix_aq_inst_s inst;
244 	struct rvu_block *block;
245 	struct admin_queue *aq;
246 	struct rvu_pfvf *pfvf;
247 	void *ctx, *mask;
248 	bool ena;
249 	u64 cfg;
250 
251 	pfvf = rvu_get_pfvf(rvu, pcifunc);
252 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
253 	if (!pfvf->nixlf || blkaddr < 0)
254 		return NIX_AF_ERR_AF_LF_INVALID;
255 
256 	block = &hw->block[blkaddr];
257 	aq = block->aq;
258 	if (!aq) {
259 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
260 		return NIX_AF_ERR_AQ_ENQUEUE;
261 	}
262 
263 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
264 	if (nixlf < 0)
265 		return NIX_AF_ERR_AF_LF_INVALID;
266 
267 	switch (req->ctype) {
268 	case NIX_AQ_CTYPE_RQ:
269 		/* Check if index exceeds max no of queues */
270 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
271 			rc = NIX_AF_ERR_AQ_ENQUEUE;
272 		break;
273 	case NIX_AQ_CTYPE_SQ:
274 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
275 			rc = NIX_AF_ERR_AQ_ENQUEUE;
276 		break;
277 	case NIX_AQ_CTYPE_CQ:
278 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
279 			rc = NIX_AF_ERR_AQ_ENQUEUE;
280 		break;
281 	case NIX_AQ_CTYPE_RSS:
282 		/* Check if RSS is enabled and qidx is within range */
283 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
284 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
285 		    (req->qidx >= (256UL << (cfg & 0xF))))
286 			rc = NIX_AF_ERR_AQ_ENQUEUE;
287 		break;
288 	default:
289 		rc = NIX_AF_ERR_AQ_ENQUEUE;
290 	}
291 
292 	if (rc)
293 		return rc;
294 
295 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
296 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
297 	    req->op != NIX_AQ_INSTOP_WRITE) {
298 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
299 				     pcifunc, req->sq.smq))
300 			return NIX_AF_ERR_AQ_ENQUEUE;
301 	}
302 
303 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
304 	inst.lf = nixlf;
305 	inst.cindex = req->qidx;
306 	inst.ctype = req->ctype;
307 	inst.op = req->op;
308 	/* Currently we are not supporting enqueuing multiple instructions,
309 	 * so always choose first entry in result memory.
310 	 */
311 	inst.res_addr = (u64)aq->res->iova;
312 
313 	/* Clean result + context memory */
314 	memset(aq->res->base, 0, aq->res->entry_sz);
315 	/* Context needs to be written at RES_ADDR + 128 */
316 	ctx = aq->res->base + 128;
317 	/* Mask needs to be written at RES_ADDR + 256 */
318 	mask = aq->res->base + 256;
319 
320 	switch (req->op) {
321 	case NIX_AQ_INSTOP_WRITE:
322 		if (req->ctype == NIX_AQ_CTYPE_RQ)
323 			memcpy(mask, &req->rq_mask,
324 			       sizeof(struct nix_rq_ctx_s));
325 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
326 			memcpy(mask, &req->sq_mask,
327 			       sizeof(struct nix_sq_ctx_s));
328 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
329 			memcpy(mask, &req->cq_mask,
330 			       sizeof(struct nix_cq_ctx_s));
331 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
332 			memcpy(mask, &req->rss_mask,
333 			       sizeof(struct nix_rsse_s));
334 		/* Fall through */
335 	case NIX_AQ_INSTOP_INIT:
336 		if (req->ctype == NIX_AQ_CTYPE_RQ)
337 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
338 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
339 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
340 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
341 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
342 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
343 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
344 		break;
345 	case NIX_AQ_INSTOP_NOP:
346 	case NIX_AQ_INSTOP_READ:
347 	case NIX_AQ_INSTOP_LOCK:
348 	case NIX_AQ_INSTOP_UNLOCK:
349 		break;
350 	default:
351 		rc = NIX_AF_ERR_AQ_ENQUEUE;
352 		return rc;
353 	}
354 
355 	spin_lock(&aq->lock);
356 
357 	/* Submit the instruction to AQ */
358 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
359 	if (rc) {
360 		spin_unlock(&aq->lock);
361 		return rc;
362 	}
363 
364 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
365 	if (req->op == NIX_AQ_INSTOP_INIT) {
366 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
367 			__set_bit(req->qidx, pfvf->rq_bmap);
368 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
369 			__set_bit(req->qidx, pfvf->sq_bmap);
370 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
371 			__set_bit(req->qidx, pfvf->cq_bmap);
372 	}
373 
374 	if (req->op == NIX_AQ_INSTOP_WRITE) {
375 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
376 			ena = (req->rq.ena & req->rq_mask.ena) |
377 				(test_bit(req->qidx, pfvf->rq_bmap) &
378 				~req->rq_mask.ena);
379 			if (ena)
380 				__set_bit(req->qidx, pfvf->rq_bmap);
381 			else
382 				__clear_bit(req->qidx, pfvf->rq_bmap);
383 		}
384 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
385 			ena = (req->rq.ena & req->sq_mask.ena) |
386 				(test_bit(req->qidx, pfvf->sq_bmap) &
387 				~req->sq_mask.ena);
388 			if (ena)
389 				__set_bit(req->qidx, pfvf->sq_bmap);
390 			else
391 				__clear_bit(req->qidx, pfvf->sq_bmap);
392 		}
393 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
394 			ena = (req->rq.ena & req->cq_mask.ena) |
395 				(test_bit(req->qidx, pfvf->cq_bmap) &
396 				~req->cq_mask.ena);
397 			if (ena)
398 				__set_bit(req->qidx, pfvf->cq_bmap);
399 			else
400 				__clear_bit(req->qidx, pfvf->cq_bmap);
401 		}
402 	}
403 
404 	if (rsp) {
405 		/* Copy read context into mailbox */
406 		if (req->op == NIX_AQ_INSTOP_READ) {
407 			if (req->ctype == NIX_AQ_CTYPE_RQ)
408 				memcpy(&rsp->rq, ctx,
409 				       sizeof(struct nix_rq_ctx_s));
410 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
411 				memcpy(&rsp->sq, ctx,
412 				       sizeof(struct nix_sq_ctx_s));
413 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
414 				memcpy(&rsp->cq, ctx,
415 				       sizeof(struct nix_cq_ctx_s));
416 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
417 				memcpy(&rsp->rss, ctx,
418 				       sizeof(struct nix_cq_ctx_s));
419 		}
420 	}
421 
422 	spin_unlock(&aq->lock);
423 	return 0;
424 }
425 
426 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
427 {
428 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
429 	struct nix_aq_enq_req aq_req;
430 	unsigned long *bmap;
431 	int qidx, q_cnt = 0;
432 	int err = 0, rc;
433 
434 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
435 		return NIX_AF_ERR_AQ_ENQUEUE;
436 
437 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
438 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
439 
440 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
441 		aq_req.cq.ena = 0;
442 		aq_req.cq_mask.ena = 1;
443 		q_cnt = pfvf->cq_ctx->qsize;
444 		bmap = pfvf->cq_bmap;
445 	}
446 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
447 		aq_req.sq.ena = 0;
448 		aq_req.sq_mask.ena = 1;
449 		q_cnt = pfvf->sq_ctx->qsize;
450 		bmap = pfvf->sq_bmap;
451 	}
452 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
453 		aq_req.rq.ena = 0;
454 		aq_req.rq_mask.ena = 1;
455 		q_cnt = pfvf->rq_ctx->qsize;
456 		bmap = pfvf->rq_bmap;
457 	}
458 
459 	aq_req.ctype = req->ctype;
460 	aq_req.op = NIX_AQ_INSTOP_WRITE;
461 
462 	for (qidx = 0; qidx < q_cnt; qidx++) {
463 		if (!test_bit(qidx, bmap))
464 			continue;
465 		aq_req.qidx = qidx;
466 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
467 		if (rc) {
468 			err = rc;
469 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
470 				(req->ctype == NIX_AQ_CTYPE_CQ) ?
471 				"CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
472 				"RQ" : "SQ"), qidx);
473 		}
474 	}
475 
476 	return err;
477 }
478 
479 int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
480 				struct nix_aq_enq_req *req,
481 				struct nix_aq_enq_rsp *rsp)
482 {
483 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
484 }
485 
486 int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
487 				       struct hwctx_disable_req *req,
488 				       struct msg_rsp *rsp)
489 {
490 	return nix_lf_hwctx_disable(rvu, req);
491 }
492 
493 int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
494 				  struct nix_lf_alloc_req *req,
495 				  struct nix_lf_alloc_rsp *rsp)
496 {
497 	int nixlf, qints, hwctx_size, err, rc = 0;
498 	struct rvu_hwinfo *hw = rvu->hw;
499 	u16 pcifunc = req->hdr.pcifunc;
500 	struct rvu_block *block;
501 	struct rvu_pfvf *pfvf;
502 	u64 cfg, ctx_cfg;
503 	int blkaddr;
504 
505 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
506 		return NIX_AF_ERR_PARAM;
507 
508 	pfvf = rvu_get_pfvf(rvu, pcifunc);
509 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
510 	if (!pfvf->nixlf || blkaddr < 0)
511 		return NIX_AF_ERR_AF_LF_INVALID;
512 
513 	block = &hw->block[blkaddr];
514 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
515 	if (nixlf < 0)
516 		return NIX_AF_ERR_AF_LF_INVALID;
517 
518 	/* If RSS is being enabled, check if requested config is valid.
519 	 * RSS table size should be power of two, otherwise
520 	 * RSS_GRP::OFFSET + adder might go beyond that group or
521 	 * won't be able to use entire table.
522 	 */
523 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
524 			    !is_power_of_2(req->rss_sz)))
525 		return NIX_AF_ERR_RSS_SIZE_INVALID;
526 
527 	if (req->rss_sz &&
528 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
529 		return NIX_AF_ERR_RSS_GRPS_INVALID;
530 
531 	/* Reset this NIX LF */
532 	err = rvu_lf_reset(rvu, block, nixlf);
533 	if (err) {
534 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
535 			block->addr - BLKADDR_NIX0, nixlf);
536 		return NIX_AF_ERR_LF_RESET;
537 	}
538 
539 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
540 
541 	/* Alloc NIX RQ HW context memory and config the base */
542 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
543 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
544 	if (err)
545 		goto free_mem;
546 
547 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
548 	if (!pfvf->rq_bmap)
549 		goto free_mem;
550 
551 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
552 		    (u64)pfvf->rq_ctx->iova);
553 
554 	/* Set caching and queue count in HW */
555 	cfg = BIT_ULL(36) | (req->rq_cnt - 1);
556 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
557 
558 	/* Alloc NIX SQ HW context memory and config the base */
559 	hwctx_size = 1UL << (ctx_cfg & 0xF);
560 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
561 	if (err)
562 		goto free_mem;
563 
564 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
565 	if (!pfvf->sq_bmap)
566 		goto free_mem;
567 
568 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
569 		    (u64)pfvf->sq_ctx->iova);
570 	cfg = BIT_ULL(36) | (req->sq_cnt - 1);
571 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
572 
573 	/* Alloc NIX CQ HW context memory and config the base */
574 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
575 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
576 	if (err)
577 		goto free_mem;
578 
579 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
580 	if (!pfvf->cq_bmap)
581 		goto free_mem;
582 
583 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
584 		    (u64)pfvf->cq_ctx->iova);
585 	cfg = BIT_ULL(36) | (req->cq_cnt - 1);
586 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
587 
588 	/* Initialize receive side scaling (RSS) */
589 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
590 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
591 				 req->rss_sz, req->rss_grps, hwctx_size);
592 	if (err)
593 		goto free_mem;
594 
595 	/* Alloc memory for CQINT's HW contexts */
596 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
597 	qints = (cfg >> 24) & 0xFFF;
598 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
599 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
600 	if (err)
601 		goto free_mem;
602 
603 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
604 		    (u64)pfvf->cq_ints_ctx->iova);
605 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
606 
607 	/* Alloc memory for QINT's HW contexts */
608 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
609 	qints = (cfg >> 12) & 0xFFF;
610 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
611 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
612 	if (err)
613 		goto free_mem;
614 
615 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
616 		    (u64)pfvf->nix_qints_ctx->iova);
617 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
618 
619 	/* Enable LMTST for this NIX LF */
620 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
621 
622 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
623 	 * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
624 	 * PCIFUNC itself.
625 	 */
626 	if (req->npa_func == RVU_DEFAULT_PF_FUNC)
627 		cfg = pcifunc;
628 	else
629 		cfg = req->npa_func;
630 
631 	if (req->sso_func == RVU_DEFAULT_PF_FUNC)
632 		cfg |= (u64)pcifunc << 16;
633 	else
634 		cfg |= (u64)req->sso_func << 16;
635 
636 	cfg |= (u64)req->xqe_sz << 33;
637 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
638 
639 	/* Config Rx pkt length, csum checks and apad  enable / disable */
640 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
641 
642 	goto exit;
643 
644 free_mem:
645 	nix_ctx_free(rvu, pfvf);
646 	rc = -ENOMEM;
647 
648 exit:
649 	/* Set macaddr of this PF/VF */
650 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
651 
652 	/* set SQB size info */
653 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
654 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
655 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
656 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
657 	return rc;
658 }
659 
660 int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
661 				 struct msg_rsp *rsp)
662 {
663 	struct rvu_hwinfo *hw = rvu->hw;
664 	u16 pcifunc = req->hdr.pcifunc;
665 	struct rvu_block *block;
666 	int blkaddr, nixlf, err;
667 	struct rvu_pfvf *pfvf;
668 
669 	pfvf = rvu_get_pfvf(rvu, pcifunc);
670 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
671 	if (!pfvf->nixlf || blkaddr < 0)
672 		return NIX_AF_ERR_AF_LF_INVALID;
673 
674 	block = &hw->block[blkaddr];
675 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
676 	if (nixlf < 0)
677 		return NIX_AF_ERR_AF_LF_INVALID;
678 
679 	/* Reset this NIX LF */
680 	err = rvu_lf_reset(rvu, block, nixlf);
681 	if (err) {
682 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
683 			block->addr - BLKADDR_NIX0, nixlf);
684 		return NIX_AF_ERR_LF_RESET;
685 	}
686 
687 	nix_ctx_free(rvu, pfvf);
688 
689 	return 0;
690 }
691 
692 /* Disable shaping of pkts by a scheduler queue
693  * at a given scheduler level.
694  */
695 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
696 				 int lvl, int schq)
697 {
698 	u64  cir_reg = 0, pir_reg = 0;
699 	u64  cfg;
700 
701 	switch (lvl) {
702 	case NIX_TXSCH_LVL_TL1:
703 		cir_reg = NIX_AF_TL1X_CIR(schq);
704 		pir_reg = 0; /* PIR not available at TL1 */
705 		break;
706 	case NIX_TXSCH_LVL_TL2:
707 		cir_reg = NIX_AF_TL2X_CIR(schq);
708 		pir_reg = NIX_AF_TL2X_PIR(schq);
709 		break;
710 	case NIX_TXSCH_LVL_TL3:
711 		cir_reg = NIX_AF_TL3X_CIR(schq);
712 		pir_reg = NIX_AF_TL3X_PIR(schq);
713 		break;
714 	case NIX_TXSCH_LVL_TL4:
715 		cir_reg = NIX_AF_TL4X_CIR(schq);
716 		pir_reg = NIX_AF_TL4X_PIR(schq);
717 		break;
718 	}
719 
720 	if (!cir_reg)
721 		return;
722 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
723 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
724 
725 	if (!pir_reg)
726 		return;
727 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
728 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
729 }
730 
731 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
732 				 int lvl, int schq)
733 {
734 	struct rvu_hwinfo *hw = rvu->hw;
735 	int link;
736 
737 	/* Reset TL4's SDP link config */
738 	if (lvl == NIX_TXSCH_LVL_TL4)
739 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
740 
741 	if (lvl != NIX_TXSCH_LVL_TL3)
742 		return;
743 
744 	/* Reset TL3's CGX or LBK link config */
745 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
746 		rvu_write64(rvu, blkaddr,
747 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
748 }
749 
750 int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
751 				     struct nix_txsch_alloc_req *req,
752 				     struct nix_txsch_alloc_rsp *rsp)
753 {
754 	u16 pcifunc = req->hdr.pcifunc;
755 	struct nix_txsch *txsch;
756 	int lvl, idx, req_schq;
757 	struct rvu_pfvf *pfvf;
758 	struct nix_hw *nix_hw;
759 	int blkaddr, rc = 0;
760 	u16 schq;
761 
762 	pfvf = rvu_get_pfvf(rvu, pcifunc);
763 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
764 	if (!pfvf->nixlf || blkaddr < 0)
765 		return NIX_AF_ERR_AF_LF_INVALID;
766 
767 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
768 	if (!nix_hw)
769 		return -EINVAL;
770 
771 	spin_lock(&rvu->rsrc_lock);
772 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
773 		txsch = &nix_hw->txsch[lvl];
774 		req_schq = req->schq_contig[lvl] + req->schq[lvl];
775 
776 		/* There are only 28 TL1s */
777 		if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
778 			goto err;
779 
780 		/* Check if request is valid */
781 		if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
782 			goto err;
783 
784 		/* If contiguous queues are needed, check for availability */
785 		if (req->schq_contig[lvl] &&
786 		    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
787 			goto err;
788 
789 		/* Check if full request can be accommodated */
790 		if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
791 			goto err;
792 	}
793 
794 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
795 		txsch = &nix_hw->txsch[lvl];
796 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
797 		rsp->schq[lvl] = req->schq[lvl];
798 
799 		schq = 0;
800 		/* Alloc contiguous queues first */
801 		if (req->schq_contig[lvl]) {
802 			schq = rvu_alloc_rsrc_contig(&txsch->schq,
803 						     req->schq_contig[lvl]);
804 
805 			for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
806 				txsch->pfvf_map[schq] = pcifunc;
807 				nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
808 				nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
809 				rsp->schq_contig_list[lvl][idx] = schq;
810 				schq++;
811 			}
812 		}
813 
814 		/* Alloc non-contiguous queues */
815 		for (idx = 0; idx < req->schq[lvl]; idx++) {
816 			schq = rvu_alloc_rsrc(&txsch->schq);
817 			txsch->pfvf_map[schq] = pcifunc;
818 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
819 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
820 			rsp->schq_list[lvl][idx] = schq;
821 		}
822 	}
823 	goto exit;
824 err:
825 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
826 exit:
827 	spin_unlock(&rvu->rsrc_lock);
828 	return rc;
829 }
830 
831 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
832 {
833 	int blkaddr, nixlf, lvl, schq, err;
834 	struct rvu_hwinfo *hw = rvu->hw;
835 	struct nix_txsch *txsch;
836 	struct nix_hw *nix_hw;
837 	u64 cfg;
838 
839 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
840 	if (blkaddr < 0)
841 		return NIX_AF_ERR_AF_LF_INVALID;
842 
843 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
844 	if (!nix_hw)
845 		return -EINVAL;
846 
847 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
848 	if (nixlf < 0)
849 		return NIX_AF_ERR_AF_LF_INVALID;
850 
851 	/* Disable TL2/3 queue links before SMQ flush*/
852 	spin_lock(&rvu->rsrc_lock);
853 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
854 		if (lvl != NIX_TXSCH_LVL_TL3 && lvl != NIX_TXSCH_LVL_TL4)
855 			continue;
856 
857 		txsch = &nix_hw->txsch[lvl];
858 		for (schq = 0; schq < txsch->schq.max; schq++) {
859 			if (txsch->pfvf_map[schq] != pcifunc)
860 				continue;
861 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
862 		}
863 	}
864 
865 	/* Flush SMQs */
866 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
867 	for (schq = 0; schq < txsch->schq.max; schq++) {
868 		if (txsch->pfvf_map[schq] != pcifunc)
869 			continue;
870 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
871 		/* Do SMQ flush and set enqueue xoff */
872 		cfg |= BIT_ULL(50) | BIT_ULL(49);
873 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
874 
875 		/* Wait for flush to complete */
876 		err = rvu_poll_reg(rvu, blkaddr,
877 				   NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
878 		if (err) {
879 			dev_err(rvu->dev,
880 				"NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
881 		}
882 	}
883 
884 	/* Now free scheduler queues to free pool */
885 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
886 		txsch = &nix_hw->txsch[lvl];
887 		for (schq = 0; schq < txsch->schq.max; schq++) {
888 			if (txsch->pfvf_map[schq] != pcifunc)
889 				continue;
890 			rvu_free_rsrc(&txsch->schq, schq);
891 			txsch->pfvf_map[schq] = 0;
892 		}
893 	}
894 	spin_unlock(&rvu->rsrc_lock);
895 
896 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
897 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
898 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
899 	if (err)
900 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
901 
902 	return 0;
903 }
904 
905 int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
906 				    struct nix_txsch_free_req *req,
907 				    struct msg_rsp *rsp)
908 {
909 	return nix_txschq_free(rvu, req->hdr.pcifunc);
910 }
911 
912 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
913 {
914 	struct nix_txsch *txsch;
915 	u64 cfg, reg;
916 	int err, lvl;
917 
918 	/* Get scheduler queue count of each type and alloc
919 	 * bitmap for each for alloc/free/attach operations.
920 	 */
921 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
922 		txsch = &nix_hw->txsch[lvl];
923 		txsch->lvl = lvl;
924 		switch (lvl) {
925 		case NIX_TXSCH_LVL_SMQ:
926 			reg = NIX_AF_MDQ_CONST;
927 			break;
928 		case NIX_TXSCH_LVL_TL4:
929 			reg = NIX_AF_TL4_CONST;
930 			break;
931 		case NIX_TXSCH_LVL_TL3:
932 			reg = NIX_AF_TL3_CONST;
933 			break;
934 		case NIX_TXSCH_LVL_TL2:
935 			reg = NIX_AF_TL2_CONST;
936 			break;
937 		case NIX_TXSCH_LVL_TL1:
938 			reg = NIX_AF_TL1_CONST;
939 			break;
940 		}
941 		cfg = rvu_read64(rvu, blkaddr, reg);
942 		txsch->schq.max = cfg & 0xFFFF;
943 		err = rvu_alloc_bitmap(&txsch->schq);
944 		if (err)
945 			return err;
946 
947 		/* Allocate memory for scheduler queues to
948 		 * PF/VF pcifunc mapping info.
949 		 */
950 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
951 					       sizeof(u16), GFP_KERNEL);
952 		if (!txsch->pfvf_map)
953 			return -ENOMEM;
954 	}
955 	return 0;
956 }
957 
958 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
959 {
960 	int idx, err;
961 	u64 status;
962 
963 	/* Start X2P bus calibration */
964 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
965 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
966 	/* Wait for calibration to complete */
967 	err = rvu_poll_reg(rvu, blkaddr,
968 			   NIX_AF_STATUS, BIT_ULL(10), false);
969 	if (err) {
970 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
971 		return err;
972 	}
973 
974 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
975 	/* Check if CGX devices are ready */
976 	for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
977 		if (status & (BIT_ULL(16 + idx)))
978 			continue;
979 		dev_err(rvu->dev,
980 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
981 		err = -EBUSY;
982 	}
983 
984 	/* Check if LBK is ready */
985 	if (!(status & BIT_ULL(19))) {
986 		dev_err(rvu->dev,
987 			"LBK didn't respond to NIX X2P calibration\n");
988 		err = -EBUSY;
989 	}
990 
991 	/* Clear 'calibrate_x2p' bit */
992 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
993 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
994 	if (err || (status & 0x3FFULL))
995 		dev_err(rvu->dev,
996 			"NIX X2P calibration failed, status 0x%llx\n", status);
997 	if (err)
998 		return err;
999 	return 0;
1000 }
1001 
1002 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
1003 {
1004 	u64 cfg;
1005 	int err;
1006 
1007 	/* Set admin queue endianness */
1008 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
1009 #ifdef __BIG_ENDIAN
1010 	cfg |= BIT_ULL(1);
1011 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
1012 #else
1013 	cfg &= ~BIT_ULL(1);
1014 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
1015 #endif
1016 
1017 	/* Do not bypass NDC cache */
1018 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
1019 	cfg &= ~0x3FFEULL;
1020 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
1021 
1022 	/* Result structure can be followed by RQ/SQ/CQ context at
1023 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
1024 	 * operation type. Alloc sufficient result memory for all operations.
1025 	 */
1026 	err = rvu_aq_alloc(rvu, &block->aq,
1027 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
1028 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
1029 	if (err)
1030 		return err;
1031 
1032 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
1033 	rvu_write64(rvu, block->addr,
1034 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
1035 	return 0;
1036 }
1037 
1038 int rvu_nix_init(struct rvu *rvu)
1039 {
1040 	struct rvu_hwinfo *hw = rvu->hw;
1041 	struct rvu_block *block;
1042 	int blkaddr, err;
1043 	u64 cfg;
1044 
1045 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
1046 	if (blkaddr < 0)
1047 		return 0;
1048 	block = &hw->block[blkaddr];
1049 
1050 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
1051 	err = nix_calibrate_x2p(rvu, blkaddr);
1052 	if (err)
1053 		return err;
1054 
1055 	/* Set num of links of each type */
1056 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
1057 	hw->cgx = (cfg >> 12) & 0xF;
1058 	hw->lmac_per_cgx = (cfg >> 8) & 0xF;
1059 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
1060 	hw->lbk_links = 1;
1061 	hw->sdp_links = 1;
1062 
1063 	/* Initialize admin queue */
1064 	err = nix_aq_init(rvu, block);
1065 	if (err)
1066 		return err;
1067 
1068 	/* Restore CINT timer delay to HW reset values */
1069 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
1070 
1071 	/* Configure segmentation offload formats */
1072 	nix_setup_lso(rvu, blkaddr);
1073 
1074 	if (blkaddr == BLKADDR_NIX0) {
1075 		hw->nix0 = devm_kzalloc(rvu->dev,
1076 					sizeof(struct nix_hw), GFP_KERNEL);
1077 		if (!hw->nix0)
1078 			return -ENOMEM;
1079 
1080 		err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
1081 		if (err)
1082 			return err;
1083 	}
1084 	return 0;
1085 }
1086 
1087 void rvu_nix_freemem(struct rvu *rvu)
1088 {
1089 	struct rvu_hwinfo *hw = rvu->hw;
1090 	struct rvu_block *block;
1091 	struct nix_txsch *txsch;
1092 	struct nix_hw *nix_hw;
1093 	int blkaddr, lvl;
1094 
1095 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
1096 	if (blkaddr < 0)
1097 		return;
1098 
1099 	block = &hw->block[blkaddr];
1100 	rvu_aq_free(rvu, block->aq);
1101 
1102 	if (blkaddr == BLKADDR_NIX0) {
1103 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
1104 		if (!nix_hw)
1105 			return;
1106 
1107 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1108 			txsch = &nix_hw->txsch[lvl];
1109 			kfree(txsch->schq.bmap);
1110 		}
1111 	}
1112 }
1113