1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "npc.h"
18 #include "cgx.h"
19 
20 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
21 
22 enum mc_tbl_sz {
23 	MC_TBL_SZ_256,
24 	MC_TBL_SZ_512,
25 	MC_TBL_SZ_1K,
26 	MC_TBL_SZ_2K,
27 	MC_TBL_SZ_4K,
28 	MC_TBL_SZ_8K,
29 	MC_TBL_SZ_16K,
30 	MC_TBL_SZ_32K,
31 	MC_TBL_SZ_64K,
32 };
33 
34 enum mc_buf_cnt {
35 	MC_BUF_CNT_8,
36 	MC_BUF_CNT_16,
37 	MC_BUF_CNT_32,
38 	MC_BUF_CNT_64,
39 	MC_BUF_CNT_128,
40 	MC_BUF_CNT_256,
41 	MC_BUF_CNT_512,
42 	MC_BUF_CNT_1024,
43 	MC_BUF_CNT_2048,
44 };
45 
46 /* For now considering MC resources needed for broadcast
47  * pkt replication only. i.e 256 HWVFs + 12 PFs.
48  */
49 #define MC_TBL_SIZE	MC_TBL_SZ_512
50 #define MC_BUF_CNT	MC_BUF_CNT_128
51 
52 struct mce {
53 	struct hlist_node	node;
54 	u16			idx;
55 	u16			pcifunc;
56 };
57 
58 int rvu_get_nixlf_count(struct rvu *rvu)
59 {
60 	struct rvu_block *block;
61 	int blkaddr;
62 
63 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
64 	if (blkaddr < 0)
65 		return 0;
66 	block = &rvu->hw->block[blkaddr];
67 	return block->lf.max;
68 }
69 
70 static void nix_mce_list_init(struct nix_mce_list *list, int max)
71 {
72 	INIT_HLIST_HEAD(&list->head);
73 	list->count = 0;
74 	list->max = max;
75 }
76 
77 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
78 {
79 	int idx;
80 
81 	if (!mcast)
82 		return 0;
83 
84 	idx = mcast->next_free_mce;
85 	mcast->next_free_mce += count;
86 	return idx;
87 }
88 
89 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
90 {
91 	if (blkaddr == BLKADDR_NIX0 && hw->nix0)
92 		return hw->nix0;
93 
94 	return NULL;
95 }
96 
97 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
98 			    int lvl, u16 pcifunc, u16 schq)
99 {
100 	struct nix_txsch *txsch;
101 	struct nix_hw *nix_hw;
102 
103 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
104 	if (!nix_hw)
105 		return false;
106 
107 	txsch = &nix_hw->txsch[lvl];
108 	/* Check out of bounds */
109 	if (schq >= txsch->schq.max)
110 		return false;
111 
112 	mutex_lock(&rvu->rsrc_lock);
113 	if (txsch->pfvf_map[schq] != pcifunc) {
114 		mutex_unlock(&rvu->rsrc_lock);
115 		return false;
116 	}
117 	mutex_unlock(&rvu->rsrc_lock);
118 	return true;
119 }
120 
121 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
122 {
123 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
124 	u8 cgx_id, lmac_id;
125 	int pkind, pf;
126 	int err;
127 
128 	pf = rvu_get_pf(pcifunc);
129 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
130 		return 0;
131 
132 	switch (type) {
133 	case NIX_INTF_TYPE_CGX:
134 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
135 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
136 
137 		pkind = rvu_npc_get_pkind(rvu, pf);
138 		if (pkind < 0) {
139 			dev_err(rvu->dev,
140 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
141 			return -EINVAL;
142 		}
143 		pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
144 		pfvf->tx_chan_base = pfvf->rx_chan_base;
145 		pfvf->rx_chan_cnt = 1;
146 		pfvf->tx_chan_cnt = 1;
147 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
148 		rvu_npc_set_pkind(rvu, pkind, pfvf);
149 		break;
150 	case NIX_INTF_TYPE_LBK:
151 		break;
152 	}
153 
154 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
155 	 * RVU PF/VF's MAC address.
156 	 */
157 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
158 				    pfvf->rx_chan_base, pfvf->mac_addr);
159 
160 	/* Add this PF_FUNC to bcast pkt replication list */
161 	err = nix_update_bcast_mce_list(rvu, pcifunc, true);
162 	if (err) {
163 		dev_err(rvu->dev,
164 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
165 			pcifunc);
166 		return err;
167 	}
168 
169 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
170 					  nixlf, pfvf->rx_chan_base);
171 	pfvf->maxlen = NIC_HW_MIN_FRS;
172 	pfvf->minlen = NIC_HW_MIN_FRS;
173 
174 	return 0;
175 }
176 
177 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
178 {
179 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
180 	int err;
181 
182 	pfvf->maxlen = 0;
183 	pfvf->minlen = 0;
184 
185 	/* Remove this PF_FUNC from bcast pkt replication list */
186 	err = nix_update_bcast_mce_list(rvu, pcifunc, false);
187 	if (err) {
188 		dev_err(rvu->dev,
189 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
190 			pcifunc);
191 	}
192 
193 	/* Free and disable any MCAM entries used by this NIX LF */
194 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
195 }
196 
197 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
198 				 u64 format, bool v4, u64 *fidx)
199 {
200 	struct nix_lso_format field = {0};
201 
202 	/* IP's Length field */
203 	field.layer = NIX_TXLAYER_OL3;
204 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
205 	field.offset = v4 ? 2 : 4;
206 	field.sizem1 = 1; /* i.e 2 bytes */
207 	field.alg = NIX_LSOALG_ADD_PAYLEN;
208 	rvu_write64(rvu, blkaddr,
209 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
210 		    *(u64 *)&field);
211 
212 	/* No ID field in IPv6 header */
213 	if (!v4)
214 		return;
215 
216 	/* IP's ID field */
217 	field.layer = NIX_TXLAYER_OL3;
218 	field.offset = 4;
219 	field.sizem1 = 1; /* i.e 2 bytes */
220 	field.alg = NIX_LSOALG_ADD_SEGNUM;
221 	rvu_write64(rvu, blkaddr,
222 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
223 		    *(u64 *)&field);
224 }
225 
226 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
227 				 u64 format, u64 *fidx)
228 {
229 	struct nix_lso_format field = {0};
230 
231 	/* TCP's sequence number field */
232 	field.layer = NIX_TXLAYER_OL4;
233 	field.offset = 4;
234 	field.sizem1 = 3; /* i.e 4 bytes */
235 	field.alg = NIX_LSOALG_ADD_OFFSET;
236 	rvu_write64(rvu, blkaddr,
237 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
238 		    *(u64 *)&field);
239 
240 	/* TCP's flags field */
241 	field.layer = NIX_TXLAYER_OL4;
242 	field.offset = 12;
243 	field.sizem1 = 0; /* not needed */
244 	field.alg = NIX_LSOALG_TCP_FLAGS;
245 	rvu_write64(rvu, blkaddr,
246 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
247 		    *(u64 *)&field);
248 }
249 
250 static void nix_setup_lso(struct rvu *rvu, int blkaddr)
251 {
252 	u64 cfg, idx, fidx = 0;
253 
254 	/* Enable LSO */
255 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
256 	/* For TSO, set first and middle segment flags to
257 	 * mask out PSH, RST & FIN flags in TCP packet
258 	 */
259 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
260 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
261 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
262 
263 	/* Configure format fields for TCPv4 segmentation offload */
264 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
265 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
266 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
267 
268 	/* Set rest of the fields to NOP */
269 	for (; fidx < 8; fidx++) {
270 		rvu_write64(rvu, blkaddr,
271 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
272 	}
273 
274 	/* Configure format fields for TCPv6 segmentation offload */
275 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
276 	fidx = 0;
277 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
278 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
279 
280 	/* Set rest of the fields to NOP */
281 	for (; fidx < 8; fidx++) {
282 		rvu_write64(rvu, blkaddr,
283 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
284 	}
285 }
286 
287 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
288 {
289 	kfree(pfvf->rq_bmap);
290 	kfree(pfvf->sq_bmap);
291 	kfree(pfvf->cq_bmap);
292 	if (pfvf->rq_ctx)
293 		qmem_free(rvu->dev, pfvf->rq_ctx);
294 	if (pfvf->sq_ctx)
295 		qmem_free(rvu->dev, pfvf->sq_ctx);
296 	if (pfvf->cq_ctx)
297 		qmem_free(rvu->dev, pfvf->cq_ctx);
298 	if (pfvf->rss_ctx)
299 		qmem_free(rvu->dev, pfvf->rss_ctx);
300 	if (pfvf->nix_qints_ctx)
301 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
302 	if (pfvf->cq_ints_ctx)
303 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
304 
305 	pfvf->rq_bmap = NULL;
306 	pfvf->cq_bmap = NULL;
307 	pfvf->sq_bmap = NULL;
308 	pfvf->rq_ctx = NULL;
309 	pfvf->sq_ctx = NULL;
310 	pfvf->cq_ctx = NULL;
311 	pfvf->rss_ctx = NULL;
312 	pfvf->nix_qints_ctx = NULL;
313 	pfvf->cq_ints_ctx = NULL;
314 }
315 
316 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
317 			      struct rvu_pfvf *pfvf, int nixlf,
318 			      int rss_sz, int rss_grps, int hwctx_size)
319 {
320 	int err, grp, num_indices;
321 
322 	/* RSS is not requested for this NIXLF */
323 	if (!rss_sz)
324 		return 0;
325 	num_indices = rss_sz * rss_grps;
326 
327 	/* Alloc NIX RSS HW context memory and config the base */
328 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
329 	if (err)
330 		return err;
331 
332 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
333 		    (u64)pfvf->rss_ctx->iova);
334 
335 	/* Config full RSS table size, enable RSS and caching */
336 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
337 		    BIT_ULL(36) | BIT_ULL(4) |
338 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
339 	/* Config RSS group offset and sizes */
340 	for (grp = 0; grp < rss_grps; grp++)
341 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
342 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
343 	return 0;
344 }
345 
346 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
347 			       struct nix_aq_inst_s *inst)
348 {
349 	struct admin_queue *aq = block->aq;
350 	struct nix_aq_res_s *result;
351 	int timeout = 1000;
352 	u64 reg, head;
353 
354 	result = (struct nix_aq_res_s *)aq->res->base;
355 
356 	/* Get current head pointer where to append this instruction */
357 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
358 	head = (reg >> 4) & AQ_PTR_MASK;
359 
360 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
361 	       (void *)inst, aq->inst->entry_sz);
362 	memset(result, 0, sizeof(*result));
363 	/* sync into memory */
364 	wmb();
365 
366 	/* Ring the doorbell and wait for result */
367 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
368 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
369 		cpu_relax();
370 		udelay(1);
371 		timeout--;
372 		if (!timeout)
373 			return -EBUSY;
374 	}
375 
376 	if (result->compcode != NIX_AQ_COMP_GOOD)
377 		/* TODO: Replace this with some error code */
378 		return -EBUSY;
379 
380 	return 0;
381 }
382 
383 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
384 			       struct nix_aq_enq_rsp *rsp)
385 {
386 	struct rvu_hwinfo *hw = rvu->hw;
387 	u16 pcifunc = req->hdr.pcifunc;
388 	int nixlf, blkaddr, rc = 0;
389 	struct nix_aq_inst_s inst;
390 	struct rvu_block *block;
391 	struct admin_queue *aq;
392 	struct rvu_pfvf *pfvf;
393 	void *ctx, *mask;
394 	bool ena;
395 	u64 cfg;
396 
397 	pfvf = rvu_get_pfvf(rvu, pcifunc);
398 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
399 	if (!pfvf->nixlf || blkaddr < 0)
400 		return NIX_AF_ERR_AF_LF_INVALID;
401 
402 	block = &hw->block[blkaddr];
403 	aq = block->aq;
404 	if (!aq) {
405 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
406 		return NIX_AF_ERR_AQ_ENQUEUE;
407 	}
408 
409 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
410 	if (nixlf < 0)
411 		return NIX_AF_ERR_AF_LF_INVALID;
412 
413 	switch (req->ctype) {
414 	case NIX_AQ_CTYPE_RQ:
415 		/* Check if index exceeds max no of queues */
416 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
417 			rc = NIX_AF_ERR_AQ_ENQUEUE;
418 		break;
419 	case NIX_AQ_CTYPE_SQ:
420 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
421 			rc = NIX_AF_ERR_AQ_ENQUEUE;
422 		break;
423 	case NIX_AQ_CTYPE_CQ:
424 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
425 			rc = NIX_AF_ERR_AQ_ENQUEUE;
426 		break;
427 	case NIX_AQ_CTYPE_RSS:
428 		/* Check if RSS is enabled and qidx is within range */
429 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
430 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
431 		    (req->qidx >= (256UL << (cfg & 0xF))))
432 			rc = NIX_AF_ERR_AQ_ENQUEUE;
433 		break;
434 	case NIX_AQ_CTYPE_MCE:
435 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
436 		/* Check if index exceeds MCE list length */
437 		if (!hw->nix0->mcast.mce_ctx ||
438 		    (req->qidx >= (256UL << (cfg & 0xF))))
439 			rc = NIX_AF_ERR_AQ_ENQUEUE;
440 
441 		/* Adding multicast lists for requests from PF/VFs is not
442 		 * yet supported, so ignore this.
443 		 */
444 		if (rsp)
445 			rc = NIX_AF_ERR_AQ_ENQUEUE;
446 		break;
447 	default:
448 		rc = NIX_AF_ERR_AQ_ENQUEUE;
449 	}
450 
451 	if (rc)
452 		return rc;
453 
454 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
455 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
456 	    req->op != NIX_AQ_INSTOP_WRITE) {
457 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
458 				     pcifunc, req->sq.smq))
459 			return NIX_AF_ERR_AQ_ENQUEUE;
460 	}
461 
462 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
463 	inst.lf = nixlf;
464 	inst.cindex = req->qidx;
465 	inst.ctype = req->ctype;
466 	inst.op = req->op;
467 	/* Currently we are not supporting enqueuing multiple instructions,
468 	 * so always choose first entry in result memory.
469 	 */
470 	inst.res_addr = (u64)aq->res->iova;
471 
472 	/* Clean result + context memory */
473 	memset(aq->res->base, 0, aq->res->entry_sz);
474 	/* Context needs to be written at RES_ADDR + 128 */
475 	ctx = aq->res->base + 128;
476 	/* Mask needs to be written at RES_ADDR + 256 */
477 	mask = aq->res->base + 256;
478 
479 	switch (req->op) {
480 	case NIX_AQ_INSTOP_WRITE:
481 		if (req->ctype == NIX_AQ_CTYPE_RQ)
482 			memcpy(mask, &req->rq_mask,
483 			       sizeof(struct nix_rq_ctx_s));
484 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
485 			memcpy(mask, &req->sq_mask,
486 			       sizeof(struct nix_sq_ctx_s));
487 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
488 			memcpy(mask, &req->cq_mask,
489 			       sizeof(struct nix_cq_ctx_s));
490 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
491 			memcpy(mask, &req->rss_mask,
492 			       sizeof(struct nix_rsse_s));
493 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
494 			memcpy(mask, &req->mce_mask,
495 			       sizeof(struct nix_rx_mce_s));
496 		/* Fall through */
497 	case NIX_AQ_INSTOP_INIT:
498 		if (req->ctype == NIX_AQ_CTYPE_RQ)
499 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
500 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
501 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
502 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
503 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
504 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
505 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
506 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
507 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
508 		break;
509 	case NIX_AQ_INSTOP_NOP:
510 	case NIX_AQ_INSTOP_READ:
511 	case NIX_AQ_INSTOP_LOCK:
512 	case NIX_AQ_INSTOP_UNLOCK:
513 		break;
514 	default:
515 		rc = NIX_AF_ERR_AQ_ENQUEUE;
516 		return rc;
517 	}
518 
519 	spin_lock(&aq->lock);
520 
521 	/* Submit the instruction to AQ */
522 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
523 	if (rc) {
524 		spin_unlock(&aq->lock);
525 		return rc;
526 	}
527 
528 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
529 	if (req->op == NIX_AQ_INSTOP_INIT) {
530 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
531 			__set_bit(req->qidx, pfvf->rq_bmap);
532 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
533 			__set_bit(req->qidx, pfvf->sq_bmap);
534 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
535 			__set_bit(req->qidx, pfvf->cq_bmap);
536 	}
537 
538 	if (req->op == NIX_AQ_INSTOP_WRITE) {
539 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
540 			ena = (req->rq.ena & req->rq_mask.ena) |
541 				(test_bit(req->qidx, pfvf->rq_bmap) &
542 				~req->rq_mask.ena);
543 			if (ena)
544 				__set_bit(req->qidx, pfvf->rq_bmap);
545 			else
546 				__clear_bit(req->qidx, pfvf->rq_bmap);
547 		}
548 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
549 			ena = (req->rq.ena & req->sq_mask.ena) |
550 				(test_bit(req->qidx, pfvf->sq_bmap) &
551 				~req->sq_mask.ena);
552 			if (ena)
553 				__set_bit(req->qidx, pfvf->sq_bmap);
554 			else
555 				__clear_bit(req->qidx, pfvf->sq_bmap);
556 		}
557 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
558 			ena = (req->rq.ena & req->cq_mask.ena) |
559 				(test_bit(req->qidx, pfvf->cq_bmap) &
560 				~req->cq_mask.ena);
561 			if (ena)
562 				__set_bit(req->qidx, pfvf->cq_bmap);
563 			else
564 				__clear_bit(req->qidx, pfvf->cq_bmap);
565 		}
566 	}
567 
568 	if (rsp) {
569 		/* Copy read context into mailbox */
570 		if (req->op == NIX_AQ_INSTOP_READ) {
571 			if (req->ctype == NIX_AQ_CTYPE_RQ)
572 				memcpy(&rsp->rq, ctx,
573 				       sizeof(struct nix_rq_ctx_s));
574 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
575 				memcpy(&rsp->sq, ctx,
576 				       sizeof(struct nix_sq_ctx_s));
577 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
578 				memcpy(&rsp->cq, ctx,
579 				       sizeof(struct nix_cq_ctx_s));
580 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
581 				memcpy(&rsp->rss, ctx,
582 				       sizeof(struct nix_rsse_s));
583 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
584 				memcpy(&rsp->mce, ctx,
585 				       sizeof(struct nix_rx_mce_s));
586 		}
587 	}
588 
589 	spin_unlock(&aq->lock);
590 	return 0;
591 }
592 
593 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
594 {
595 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
596 	struct nix_aq_enq_req aq_req;
597 	unsigned long *bmap;
598 	int qidx, q_cnt = 0;
599 	int err = 0, rc;
600 
601 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
602 		return NIX_AF_ERR_AQ_ENQUEUE;
603 
604 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
605 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
606 
607 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
608 		aq_req.cq.ena = 0;
609 		aq_req.cq_mask.ena = 1;
610 		q_cnt = pfvf->cq_ctx->qsize;
611 		bmap = pfvf->cq_bmap;
612 	}
613 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
614 		aq_req.sq.ena = 0;
615 		aq_req.sq_mask.ena = 1;
616 		q_cnt = pfvf->sq_ctx->qsize;
617 		bmap = pfvf->sq_bmap;
618 	}
619 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
620 		aq_req.rq.ena = 0;
621 		aq_req.rq_mask.ena = 1;
622 		q_cnt = pfvf->rq_ctx->qsize;
623 		bmap = pfvf->rq_bmap;
624 	}
625 
626 	aq_req.ctype = req->ctype;
627 	aq_req.op = NIX_AQ_INSTOP_WRITE;
628 
629 	for (qidx = 0; qidx < q_cnt; qidx++) {
630 		if (!test_bit(qidx, bmap))
631 			continue;
632 		aq_req.qidx = qidx;
633 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
634 		if (rc) {
635 			err = rc;
636 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
637 				(req->ctype == NIX_AQ_CTYPE_CQ) ?
638 				"CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
639 				"RQ" : "SQ"), qidx);
640 		}
641 	}
642 
643 	return err;
644 }
645 
646 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
647 				struct nix_aq_enq_req *req,
648 				struct nix_aq_enq_rsp *rsp)
649 {
650 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
651 }
652 
653 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
654 				       struct hwctx_disable_req *req,
655 				       struct msg_rsp *rsp)
656 {
657 	return nix_lf_hwctx_disable(rvu, req);
658 }
659 
660 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
661 				  struct nix_lf_alloc_req *req,
662 				  struct nix_lf_alloc_rsp *rsp)
663 {
664 	int nixlf, qints, hwctx_size, err, rc = 0;
665 	struct rvu_hwinfo *hw = rvu->hw;
666 	u16 pcifunc = req->hdr.pcifunc;
667 	struct rvu_block *block;
668 	struct rvu_pfvf *pfvf;
669 	u64 cfg, ctx_cfg;
670 	int blkaddr;
671 
672 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
673 		return NIX_AF_ERR_PARAM;
674 
675 	pfvf = rvu_get_pfvf(rvu, pcifunc);
676 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
677 	if (!pfvf->nixlf || blkaddr < 0)
678 		return NIX_AF_ERR_AF_LF_INVALID;
679 
680 	block = &hw->block[blkaddr];
681 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
682 	if (nixlf < 0)
683 		return NIX_AF_ERR_AF_LF_INVALID;
684 
685 	/* If RSS is being enabled, check if requested config is valid.
686 	 * RSS table size should be power of two, otherwise
687 	 * RSS_GRP::OFFSET + adder might go beyond that group or
688 	 * won't be able to use entire table.
689 	 */
690 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
691 			    !is_power_of_2(req->rss_sz)))
692 		return NIX_AF_ERR_RSS_SIZE_INVALID;
693 
694 	if (req->rss_sz &&
695 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
696 		return NIX_AF_ERR_RSS_GRPS_INVALID;
697 
698 	/* Reset this NIX LF */
699 	err = rvu_lf_reset(rvu, block, nixlf);
700 	if (err) {
701 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
702 			block->addr - BLKADDR_NIX0, nixlf);
703 		return NIX_AF_ERR_LF_RESET;
704 	}
705 
706 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
707 
708 	/* Alloc NIX RQ HW context memory and config the base */
709 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
710 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
711 	if (err)
712 		goto free_mem;
713 
714 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
715 	if (!pfvf->rq_bmap)
716 		goto free_mem;
717 
718 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
719 		    (u64)pfvf->rq_ctx->iova);
720 
721 	/* Set caching and queue count in HW */
722 	cfg = BIT_ULL(36) | (req->rq_cnt - 1);
723 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
724 
725 	/* Alloc NIX SQ HW context memory and config the base */
726 	hwctx_size = 1UL << (ctx_cfg & 0xF);
727 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
728 	if (err)
729 		goto free_mem;
730 
731 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
732 	if (!pfvf->sq_bmap)
733 		goto free_mem;
734 
735 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
736 		    (u64)pfvf->sq_ctx->iova);
737 	cfg = BIT_ULL(36) | (req->sq_cnt - 1);
738 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
739 
740 	/* Alloc NIX CQ HW context memory and config the base */
741 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
742 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
743 	if (err)
744 		goto free_mem;
745 
746 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
747 	if (!pfvf->cq_bmap)
748 		goto free_mem;
749 
750 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
751 		    (u64)pfvf->cq_ctx->iova);
752 	cfg = BIT_ULL(36) | (req->cq_cnt - 1);
753 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
754 
755 	/* Initialize receive side scaling (RSS) */
756 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
757 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
758 				 req->rss_sz, req->rss_grps, hwctx_size);
759 	if (err)
760 		goto free_mem;
761 
762 	/* Alloc memory for CQINT's HW contexts */
763 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
764 	qints = (cfg >> 24) & 0xFFF;
765 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
766 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
767 	if (err)
768 		goto free_mem;
769 
770 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
771 		    (u64)pfvf->cq_ints_ctx->iova);
772 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
773 
774 	/* Alloc memory for QINT's HW contexts */
775 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
776 	qints = (cfg >> 12) & 0xFFF;
777 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
778 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
779 	if (err)
780 		goto free_mem;
781 
782 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
783 		    (u64)pfvf->nix_qints_ctx->iova);
784 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
785 
786 	/* Enable LMTST for this NIX LF */
787 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
788 
789 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
790 	 * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
791 	 * PCIFUNC itself.
792 	 */
793 	if (req->npa_func == RVU_DEFAULT_PF_FUNC)
794 		cfg = pcifunc;
795 	else
796 		cfg = req->npa_func;
797 
798 	if (req->sso_func == RVU_DEFAULT_PF_FUNC)
799 		cfg |= (u64)pcifunc << 16;
800 	else
801 		cfg |= (u64)req->sso_func << 16;
802 
803 	cfg |= (u64)req->xqe_sz << 33;
804 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
805 
806 	/* Config Rx pkt length, csum checks and apad  enable / disable */
807 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
808 
809 	err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf);
810 	if (err)
811 		goto free_mem;
812 
813 	goto exit;
814 
815 free_mem:
816 	nix_ctx_free(rvu, pfvf);
817 	rc = -ENOMEM;
818 
819 exit:
820 	/* Set macaddr of this PF/VF */
821 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
822 
823 	/* set SQB size info */
824 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
825 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
826 	rsp->rx_chan_base = pfvf->rx_chan_base;
827 	rsp->tx_chan_base = pfvf->tx_chan_base;
828 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
829 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
830 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
831 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
832 	/* Get HW supported stat count */
833 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
834 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
835 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
836 	/* Get count of CQ IRQs and error IRQs supported per LF */
837 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
838 	rsp->qints = ((cfg >> 12) & 0xFFF);
839 	rsp->cints = ((cfg >> 24) & 0xFFF);
840 	return rc;
841 }
842 
843 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
844 				 struct msg_rsp *rsp)
845 {
846 	struct rvu_hwinfo *hw = rvu->hw;
847 	u16 pcifunc = req->hdr.pcifunc;
848 	struct rvu_block *block;
849 	int blkaddr, nixlf, err;
850 	struct rvu_pfvf *pfvf;
851 
852 	pfvf = rvu_get_pfvf(rvu, pcifunc);
853 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
854 	if (!pfvf->nixlf || blkaddr < 0)
855 		return NIX_AF_ERR_AF_LF_INVALID;
856 
857 	block = &hw->block[blkaddr];
858 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
859 	if (nixlf < 0)
860 		return NIX_AF_ERR_AF_LF_INVALID;
861 
862 	nix_interface_deinit(rvu, pcifunc, nixlf);
863 
864 	/* Reset this NIX LF */
865 	err = rvu_lf_reset(rvu, block, nixlf);
866 	if (err) {
867 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
868 			block->addr - BLKADDR_NIX0, nixlf);
869 		return NIX_AF_ERR_LF_RESET;
870 	}
871 
872 	nix_ctx_free(rvu, pfvf);
873 
874 	return 0;
875 }
876 
877 /* Disable shaping of pkts by a scheduler queue
878  * at a given scheduler level.
879  */
880 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
881 				 int lvl, int schq)
882 {
883 	u64  cir_reg = 0, pir_reg = 0;
884 	u64  cfg;
885 
886 	switch (lvl) {
887 	case NIX_TXSCH_LVL_TL1:
888 		cir_reg = NIX_AF_TL1X_CIR(schq);
889 		pir_reg = 0; /* PIR not available at TL1 */
890 		break;
891 	case NIX_TXSCH_LVL_TL2:
892 		cir_reg = NIX_AF_TL2X_CIR(schq);
893 		pir_reg = NIX_AF_TL2X_PIR(schq);
894 		break;
895 	case NIX_TXSCH_LVL_TL3:
896 		cir_reg = NIX_AF_TL3X_CIR(schq);
897 		pir_reg = NIX_AF_TL3X_PIR(schq);
898 		break;
899 	case NIX_TXSCH_LVL_TL4:
900 		cir_reg = NIX_AF_TL4X_CIR(schq);
901 		pir_reg = NIX_AF_TL4X_PIR(schq);
902 		break;
903 	}
904 
905 	if (!cir_reg)
906 		return;
907 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
908 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
909 
910 	if (!pir_reg)
911 		return;
912 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
913 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
914 }
915 
916 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
917 				 int lvl, int schq)
918 {
919 	struct rvu_hwinfo *hw = rvu->hw;
920 	int link;
921 
922 	/* Reset TL4's SDP link config */
923 	if (lvl == NIX_TXSCH_LVL_TL4)
924 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
925 
926 	if (lvl != NIX_TXSCH_LVL_TL2)
927 		return;
928 
929 	/* Reset TL2's CGX or LBK link config */
930 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
931 		rvu_write64(rvu, blkaddr,
932 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
933 }
934 
935 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
936 				     struct nix_txsch_alloc_req *req,
937 				     struct nix_txsch_alloc_rsp *rsp)
938 {
939 	u16 pcifunc = req->hdr.pcifunc;
940 	struct nix_txsch *txsch;
941 	int lvl, idx, req_schq;
942 	struct rvu_pfvf *pfvf;
943 	struct nix_hw *nix_hw;
944 	int blkaddr, rc = 0;
945 	u16 schq;
946 
947 	pfvf = rvu_get_pfvf(rvu, pcifunc);
948 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
949 	if (!pfvf->nixlf || blkaddr < 0)
950 		return NIX_AF_ERR_AF_LF_INVALID;
951 
952 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
953 	if (!nix_hw)
954 		return -EINVAL;
955 
956 	mutex_lock(&rvu->rsrc_lock);
957 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
958 		txsch = &nix_hw->txsch[lvl];
959 		req_schq = req->schq_contig[lvl] + req->schq[lvl];
960 
961 		/* There are only 28 TL1s */
962 		if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
963 			goto err;
964 
965 		/* Check if request is valid */
966 		if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
967 			goto err;
968 
969 		/* If contiguous queues are needed, check for availability */
970 		if (req->schq_contig[lvl] &&
971 		    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
972 			goto err;
973 
974 		/* Check if full request can be accommodated */
975 		if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
976 			goto err;
977 	}
978 
979 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
980 		txsch = &nix_hw->txsch[lvl];
981 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
982 		rsp->schq[lvl] = req->schq[lvl];
983 
984 		schq = 0;
985 		/* Alloc contiguous queues first */
986 		if (req->schq_contig[lvl]) {
987 			schq = rvu_alloc_rsrc_contig(&txsch->schq,
988 						     req->schq_contig[lvl]);
989 
990 			for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
991 				txsch->pfvf_map[schq] = pcifunc;
992 				nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
993 				nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
994 				rsp->schq_contig_list[lvl][idx] = schq;
995 				schq++;
996 			}
997 		}
998 
999 		/* Alloc non-contiguous queues */
1000 		for (idx = 0; idx < req->schq[lvl]; idx++) {
1001 			schq = rvu_alloc_rsrc(&txsch->schq);
1002 			txsch->pfvf_map[schq] = pcifunc;
1003 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1004 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1005 			rsp->schq_list[lvl][idx] = schq;
1006 		}
1007 	}
1008 	goto exit;
1009 err:
1010 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1011 exit:
1012 	mutex_unlock(&rvu->rsrc_lock);
1013 	return rc;
1014 }
1015 
1016 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1017 {
1018 	int blkaddr, nixlf, lvl, schq, err;
1019 	struct rvu_hwinfo *hw = rvu->hw;
1020 	struct nix_txsch *txsch;
1021 	struct nix_hw *nix_hw;
1022 	u64 cfg;
1023 
1024 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1025 	if (blkaddr < 0)
1026 		return NIX_AF_ERR_AF_LF_INVALID;
1027 
1028 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1029 	if (!nix_hw)
1030 		return -EINVAL;
1031 
1032 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1033 	if (nixlf < 0)
1034 		return NIX_AF_ERR_AF_LF_INVALID;
1035 
1036 	/* Disable TL2/3 queue links before SMQ flush*/
1037 	mutex_lock(&rvu->rsrc_lock);
1038 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1039 		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1040 			continue;
1041 
1042 		txsch = &nix_hw->txsch[lvl];
1043 		for (schq = 0; schq < txsch->schq.max; schq++) {
1044 			if (txsch->pfvf_map[schq] != pcifunc)
1045 				continue;
1046 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1047 		}
1048 	}
1049 
1050 	/* Flush SMQs */
1051 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1052 	for (schq = 0; schq < txsch->schq.max; schq++) {
1053 		if (txsch->pfvf_map[schq] != pcifunc)
1054 			continue;
1055 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
1056 		/* Do SMQ flush and set enqueue xoff */
1057 		cfg |= BIT_ULL(50) | BIT_ULL(49);
1058 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
1059 
1060 		/* Wait for flush to complete */
1061 		err = rvu_poll_reg(rvu, blkaddr,
1062 				   NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
1063 		if (err) {
1064 			dev_err(rvu->dev,
1065 				"NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
1066 		}
1067 	}
1068 
1069 	/* Now free scheduler queues to free pool */
1070 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1071 		txsch = &nix_hw->txsch[lvl];
1072 		for (schq = 0; schq < txsch->schq.max; schq++) {
1073 			if (txsch->pfvf_map[schq] != pcifunc)
1074 				continue;
1075 			rvu_free_rsrc(&txsch->schq, schq);
1076 			txsch->pfvf_map[schq] = 0;
1077 		}
1078 	}
1079 	mutex_unlock(&rvu->rsrc_lock);
1080 
1081 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1082 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1083 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1084 	if (err)
1085 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1086 
1087 	return 0;
1088 }
1089 
1090 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1091 				    struct nix_txsch_free_req *req,
1092 				    struct msg_rsp *rsp)
1093 {
1094 	return nix_txschq_free(rvu, req->hdr.pcifunc);
1095 }
1096 
1097 static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1098 				   int lvl, u64 reg, u64 regval)
1099 {
1100 	u64 regbase = reg & 0xFFFF;
1101 	u16 schq, parent;
1102 
1103 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1104 		return false;
1105 
1106 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1107 	/* Check if this schq belongs to this PF/VF or not */
1108 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1109 		return false;
1110 
1111 	parent = (regval >> 16) & 0x1FF;
1112 	/* Validate MDQ's TL4 parent */
1113 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
1114 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1115 		return false;
1116 
1117 	/* Validate TL4's TL3 parent */
1118 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
1119 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1120 		return false;
1121 
1122 	/* Validate TL3's TL2 parent */
1123 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
1124 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1125 		return false;
1126 
1127 	/* Validate TL2's TL1 parent */
1128 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
1129 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1130 		return false;
1131 
1132 	return true;
1133 }
1134 
1135 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1136 				    struct nix_txschq_config *req,
1137 				    struct msg_rsp *rsp)
1138 {
1139 	struct rvu_hwinfo *hw = rvu->hw;
1140 	u16 pcifunc = req->hdr.pcifunc;
1141 	u64 reg, regval, schq_regbase;
1142 	struct nix_txsch *txsch;
1143 	struct nix_hw *nix_hw;
1144 	int blkaddr, idx, err;
1145 	int nixlf;
1146 
1147 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1148 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
1149 		return NIX_AF_INVAL_TXSCHQ_CFG;
1150 
1151 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1152 	if (blkaddr < 0)
1153 		return NIX_AF_ERR_AF_LF_INVALID;
1154 
1155 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1156 	if (!nix_hw)
1157 		return -EINVAL;
1158 
1159 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1160 	if (nixlf < 0)
1161 		return NIX_AF_ERR_AF_LF_INVALID;
1162 
1163 	txsch = &nix_hw->txsch[req->lvl];
1164 	for (idx = 0; idx < req->num_regs; idx++) {
1165 		reg = req->reg[idx];
1166 		regval = req->regval[idx];
1167 		schq_regbase = reg & 0xFFFF;
1168 
1169 		if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
1170 					    txsch->lvl, reg, regval))
1171 			return NIX_AF_INVAL_TXSCHQ_CFG;
1172 
1173 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1174 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1175 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1176 					   pcifunc, 0);
1177 			regval &= ~(0x7FULL << 24);
1178 			regval |= ((u64)nixlf << 24);
1179 		}
1180 
1181 		rvu_write64(rvu, blkaddr, reg, regval);
1182 
1183 		/* Check for SMQ flush, if so, poll for its completion */
1184 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1185 		    (regval & BIT_ULL(49))) {
1186 			err = rvu_poll_reg(rvu, blkaddr,
1187 					   reg, BIT_ULL(49), true);
1188 			if (err)
1189 				return NIX_AF_SMQ_FLUSH_FAILED;
1190 		}
1191 	}
1192 	return 0;
1193 }
1194 
1195 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1196 			   struct nix_vtag_config *req)
1197 {
1198 	u64 regval = 0;
1199 
1200 #define NIX_VTAGTYPE_MAX 0x8ull
1201 #define NIX_VTAGSIZE_MASK 0x7ull
1202 #define NIX_VTAGSTRIP_CAP_MASK 0x30ull
1203 
1204 	if (req->rx.vtag_type >= NIX_VTAGTYPE_MAX ||
1205 	    req->vtag_size > VTAGSIZE_T8)
1206 		return -EINVAL;
1207 
1208 	regval = rvu_read64(rvu, blkaddr,
1209 			    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type));
1210 
1211 	if (req->rx.strip_vtag && req->rx.capture_vtag)
1212 		regval |= BIT_ULL(4) | BIT_ULL(5);
1213 	else if (req->rx.strip_vtag)
1214 		regval |= BIT_ULL(4);
1215 	else
1216 		regval &= ~(BIT_ULL(4) | BIT_ULL(5));
1217 
1218 	regval &= ~NIX_VTAGSIZE_MASK;
1219 	regval |= req->vtag_size & NIX_VTAGSIZE_MASK;
1220 
1221 	rvu_write64(rvu, blkaddr,
1222 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1223 	return 0;
1224 }
1225 
1226 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
1227 				  struct nix_vtag_config *req,
1228 				  struct msg_rsp *rsp)
1229 {
1230 	struct rvu_hwinfo *hw = rvu->hw;
1231 	u16 pcifunc = req->hdr.pcifunc;
1232 	int blkaddr, nixlf, err;
1233 
1234 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1235 	if (blkaddr < 0)
1236 		return NIX_AF_ERR_AF_LF_INVALID;
1237 
1238 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1239 	if (nixlf < 0)
1240 		return NIX_AF_ERR_AF_LF_INVALID;
1241 
1242 	if (req->cfg_type) {
1243 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1244 		if (err)
1245 			return NIX_AF_ERR_PARAM;
1246 	} else {
1247 		/* TODO: handle tx vtag configuration */
1248 		return 0;
1249 	}
1250 
1251 	return 0;
1252 }
1253 
1254 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1255 			 u16 pcifunc, int next, bool eol)
1256 {
1257 	struct nix_aq_enq_req aq_req;
1258 	int err;
1259 
1260 	aq_req.hdr.pcifunc = pcifunc;
1261 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
1262 	aq_req.op = op;
1263 	aq_req.qidx = mce;
1264 
1265 	/* Forward bcast pkts to RQ0, RSS not needed */
1266 	aq_req.mce.op = 0;
1267 	aq_req.mce.index = 0;
1268 	aq_req.mce.eol = eol;
1269 	aq_req.mce.pf_func = pcifunc;
1270 	aq_req.mce.next = next;
1271 
1272 	/* All fields valid */
1273 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
1274 
1275 	err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1276 	if (err) {
1277 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1278 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1279 		return err;
1280 	}
1281 	return 0;
1282 }
1283 
1284 static int nix_update_mce_list(struct nix_mce_list *mce_list,
1285 			       u16 pcifunc, int idx, bool add)
1286 {
1287 	struct mce *mce, *tail = NULL;
1288 	bool delete = false;
1289 
1290 	/* Scan through the current list */
1291 	hlist_for_each_entry(mce, &mce_list->head, node) {
1292 		/* If already exists, then delete */
1293 		if (mce->pcifunc == pcifunc && !add) {
1294 			delete = true;
1295 			break;
1296 		}
1297 		tail = mce;
1298 	}
1299 
1300 	if (delete) {
1301 		hlist_del(&mce->node);
1302 		kfree(mce);
1303 		mce_list->count--;
1304 		return 0;
1305 	}
1306 
1307 	if (!add)
1308 		return 0;
1309 
1310 	/* Add a new one to the list, at the tail */
1311 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
1312 	if (!mce)
1313 		return -ENOMEM;
1314 	mce->idx = idx;
1315 	mce->pcifunc = pcifunc;
1316 	if (!tail)
1317 		hlist_add_head(&mce->node, &mce_list->head);
1318 	else
1319 		hlist_add_behind(&mce->node, &tail->node);
1320 	mce_list->count++;
1321 	return 0;
1322 }
1323 
1324 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
1325 {
1326 	int err = 0, idx, next_idx, count;
1327 	struct nix_mce_list *mce_list;
1328 	struct mce *mce, *next_mce;
1329 	struct nix_mcast *mcast;
1330 	struct nix_hw *nix_hw;
1331 	struct rvu_pfvf *pfvf;
1332 	int blkaddr;
1333 
1334 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1335 	if (blkaddr < 0)
1336 		return 0;
1337 
1338 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1339 	if (!nix_hw)
1340 		return 0;
1341 
1342 	mcast = &nix_hw->mcast;
1343 
1344 	/* Get this PF/VF func's MCE index */
1345 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1346 	idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
1347 
1348 	mce_list = &pfvf->bcast_mce_list;
1349 	if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
1350 		dev_err(rvu->dev,
1351 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
1352 			__func__, idx, mce_list->max,
1353 			pcifunc >> RVU_PFVF_PF_SHIFT);
1354 		return -EINVAL;
1355 	}
1356 
1357 	mutex_lock(&mcast->mce_lock);
1358 
1359 	err = nix_update_mce_list(mce_list, pcifunc, idx, add);
1360 	if (err)
1361 		goto end;
1362 
1363 	/* Disable MCAM entry in NPC */
1364 
1365 	if (!mce_list->count)
1366 		goto end;
1367 	count = mce_list->count;
1368 
1369 	/* Dump the updated list to HW */
1370 	hlist_for_each_entry(mce, &mce_list->head, node) {
1371 		next_idx = 0;
1372 		count--;
1373 		if (count) {
1374 			next_mce = hlist_entry(mce->node.next,
1375 					       struct mce, node);
1376 			next_idx = next_mce->idx;
1377 		}
1378 		/* EOL should be set in last MCE */
1379 		err = nix_setup_mce(rvu, mce->idx,
1380 				    NIX_AQ_INSTOP_WRITE, mce->pcifunc,
1381 				    next_idx, count ? false : true);
1382 		if (err)
1383 			goto end;
1384 	}
1385 
1386 end:
1387 	mutex_unlock(&mcast->mce_lock);
1388 	return err;
1389 }
1390 
1391 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
1392 {
1393 	struct nix_mcast *mcast = &nix_hw->mcast;
1394 	int err, pf, numvfs, idx;
1395 	struct rvu_pfvf *pfvf;
1396 	u16 pcifunc;
1397 	u64 cfg;
1398 
1399 	/* Skip PF0 (i.e AF) */
1400 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
1401 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1402 		/* If PF is not enabled, nothing to do */
1403 		if (!((cfg >> 20) & 0x01))
1404 			continue;
1405 		/* Get numVFs attached to this PF */
1406 		numvfs = (cfg >> 12) & 0xFF;
1407 
1408 		pfvf = &rvu->pf[pf];
1409 		/* Save the start MCE */
1410 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
1411 
1412 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
1413 
1414 		for (idx = 0; idx < (numvfs + 1); idx++) {
1415 			/* idx-0 is for PF, followed by VFs */
1416 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1417 			pcifunc |= idx;
1418 			/* Add dummy entries now, so that we don't have to check
1419 			 * for whether AQ_OP should be INIT/WRITE later on.
1420 			 * Will be updated when a NIXLF is attached/detached to
1421 			 * these PF/VFs.
1422 			 */
1423 			err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
1424 					    NIX_AQ_INSTOP_INIT,
1425 					    pcifunc, 0, true);
1426 			if (err)
1427 				return err;
1428 		}
1429 	}
1430 	return 0;
1431 }
1432 
1433 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1434 {
1435 	struct nix_mcast *mcast = &nix_hw->mcast;
1436 	struct rvu_hwinfo *hw = rvu->hw;
1437 	int err, size;
1438 
1439 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
1440 	size = (1ULL << size);
1441 
1442 	/* Alloc memory for multicast/mirror replication entries */
1443 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
1444 			 (256UL << MC_TBL_SIZE), size);
1445 	if (err)
1446 		return -ENOMEM;
1447 
1448 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
1449 		    (u64)mcast->mce_ctx->iova);
1450 
1451 	/* Set max list length equal to max no of VFs per PF  + PF itself */
1452 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
1453 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
1454 
1455 	/* Alloc memory for multicast replication buffers */
1456 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
1457 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
1458 			 (8UL << MC_BUF_CNT), size);
1459 	if (err)
1460 		return -ENOMEM;
1461 
1462 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
1463 		    (u64)mcast->mcast_buf->iova);
1464 
1465 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
1466 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
1467 
1468 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
1469 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
1470 		    BIT_ULL(20) | MC_BUF_CNT);
1471 
1472 	mutex_init(&mcast->mce_lock);
1473 
1474 	return nix_setup_bcast_tables(rvu, nix_hw);
1475 }
1476 
1477 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1478 {
1479 	struct nix_txsch *txsch;
1480 	u64 cfg, reg;
1481 	int err, lvl;
1482 
1483 	/* Get scheduler queue count of each type and alloc
1484 	 * bitmap for each for alloc/free/attach operations.
1485 	 */
1486 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1487 		txsch = &nix_hw->txsch[lvl];
1488 		txsch->lvl = lvl;
1489 		switch (lvl) {
1490 		case NIX_TXSCH_LVL_SMQ:
1491 			reg = NIX_AF_MDQ_CONST;
1492 			break;
1493 		case NIX_TXSCH_LVL_TL4:
1494 			reg = NIX_AF_TL4_CONST;
1495 			break;
1496 		case NIX_TXSCH_LVL_TL3:
1497 			reg = NIX_AF_TL3_CONST;
1498 			break;
1499 		case NIX_TXSCH_LVL_TL2:
1500 			reg = NIX_AF_TL2_CONST;
1501 			break;
1502 		case NIX_TXSCH_LVL_TL1:
1503 			reg = NIX_AF_TL1_CONST;
1504 			break;
1505 		}
1506 		cfg = rvu_read64(rvu, blkaddr, reg);
1507 		txsch->schq.max = cfg & 0xFFFF;
1508 		err = rvu_alloc_bitmap(&txsch->schq);
1509 		if (err)
1510 			return err;
1511 
1512 		/* Allocate memory for scheduler queues to
1513 		 * PF/VF pcifunc mapping info.
1514 		 */
1515 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
1516 					       sizeof(u16), GFP_KERNEL);
1517 		if (!txsch->pfvf_map)
1518 			return -ENOMEM;
1519 	}
1520 	return 0;
1521 }
1522 
1523 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
1524 				   struct msg_rsp *rsp)
1525 {
1526 	struct rvu_hwinfo *hw = rvu->hw;
1527 	u16 pcifunc = req->hdr.pcifunc;
1528 	int i, nixlf, blkaddr;
1529 	u64 stats;
1530 
1531 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1532 	if (blkaddr < 0)
1533 		return NIX_AF_ERR_AF_LF_INVALID;
1534 
1535 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1536 	if (nixlf < 0)
1537 		return NIX_AF_ERR_AF_LF_INVALID;
1538 
1539 	/* Get stats count supported by HW */
1540 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1541 
1542 	/* Reset tx stats */
1543 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
1544 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
1545 
1546 	/* Reset rx stats */
1547 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
1548 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
1549 
1550 	return 0;
1551 }
1552 
1553 /* Returns the ALG index to be set into NPC_RX_ACTION */
1554 static int get_flowkey_alg_idx(u32 flow_cfg)
1555 {
1556 	u32 ip_cfg;
1557 
1558 	flow_cfg &= ~FLOW_KEY_TYPE_PORT;
1559 	ip_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
1560 	if (flow_cfg == ip_cfg)
1561 		return FLOW_KEY_ALG_IP;
1562 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP))
1563 		return FLOW_KEY_ALG_TCP;
1564 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP))
1565 		return FLOW_KEY_ALG_UDP;
1566 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_SCTP))
1567 		return FLOW_KEY_ALG_SCTP;
1568 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP))
1569 		return FLOW_KEY_ALG_TCP_UDP;
1570 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP))
1571 		return FLOW_KEY_ALG_TCP_SCTP;
1572 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
1573 		return FLOW_KEY_ALG_UDP_SCTP;
1574 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP |
1575 			      FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
1576 		return FLOW_KEY_ALG_TCP_UDP_SCTP;
1577 
1578 	return FLOW_KEY_ALG_PORT;
1579 }
1580 
1581 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
1582 					 struct nix_rss_flowkey_cfg *req,
1583 					 struct msg_rsp *rsp)
1584 {
1585 	struct rvu_hwinfo *hw = rvu->hw;
1586 	u16 pcifunc = req->hdr.pcifunc;
1587 	int alg_idx, nixlf, blkaddr;
1588 
1589 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1590 	if (blkaddr < 0)
1591 		return NIX_AF_ERR_AF_LF_INVALID;
1592 
1593 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1594 	if (nixlf < 0)
1595 		return NIX_AF_ERR_AF_LF_INVALID;
1596 
1597 	alg_idx = get_flowkey_alg_idx(req->flowkey_cfg);
1598 
1599 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
1600 				       alg_idx, req->mcam_index);
1601 	return 0;
1602 }
1603 
1604 static void set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
1605 {
1606 	struct nix_rx_flowkey_alg *field = NULL;
1607 	int idx, key_type;
1608 
1609 	if (!alg)
1610 		return;
1611 
1612 	/* FIELD0: IPv4
1613 	 * FIELD1: IPv6
1614 	 * FIELD2: TCP/UDP/SCTP/ALL
1615 	 * FIELD3: Unused
1616 	 * FIELD4: Unused
1617 	 *
1618 	 * Each of the 32 possible flow key algorithm definitions should
1619 	 * fall into above incremental config (except ALG0). Otherwise a
1620 	 * single NPC MCAM entry is not sufficient for supporting RSS.
1621 	 *
1622 	 * If a different definition or combination needed then NPC MCAM
1623 	 * has to be programmed to filter such pkts and it's action should
1624 	 * point to this definition to calculate flowtag or hash.
1625 	 */
1626 	for (idx = 0; idx < 32; idx++) {
1627 		key_type = flow_cfg & BIT_ULL(idx);
1628 		if (!key_type)
1629 			continue;
1630 		switch (key_type) {
1631 		case FLOW_KEY_TYPE_PORT:
1632 			field = &alg[0];
1633 			field->sel_chan = true;
1634 			/* This should be set to 1, when SEL_CHAN is set */
1635 			field->bytesm1 = 1;
1636 			break;
1637 		case FLOW_KEY_TYPE_IPV4:
1638 			field = &alg[0];
1639 			field->lid = NPC_LID_LC;
1640 			field->ltype_match = NPC_LT_LC_IP;
1641 			field->hdr_offset = 12; /* SIP offset */
1642 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
1643 			field->ltype_mask = 0xF; /* Match only IPv4 */
1644 			break;
1645 		case FLOW_KEY_TYPE_IPV6:
1646 			field = &alg[1];
1647 			field->lid = NPC_LID_LC;
1648 			field->ltype_match = NPC_LT_LC_IP6;
1649 			field->hdr_offset = 8; /* SIP offset */
1650 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
1651 			field->ltype_mask = 0xF; /* Match only IPv6 */
1652 			break;
1653 		case FLOW_KEY_TYPE_TCP:
1654 		case FLOW_KEY_TYPE_UDP:
1655 		case FLOW_KEY_TYPE_SCTP:
1656 			field = &alg[2];
1657 			field->lid = NPC_LID_LD;
1658 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
1659 			if (key_type == FLOW_KEY_TYPE_TCP)
1660 				field->ltype_match |= NPC_LT_LD_TCP;
1661 			else if (key_type == FLOW_KEY_TYPE_UDP)
1662 				field->ltype_match |= NPC_LT_LD_UDP;
1663 			else if (key_type == FLOW_KEY_TYPE_SCTP)
1664 				field->ltype_match |= NPC_LT_LD_SCTP;
1665 			field->key_offset = 32; /* After IPv4/v6 SIP, DIP */
1666 			field->ltype_mask = ~field->ltype_match;
1667 			break;
1668 		}
1669 		if (field)
1670 			field->ena = 1;
1671 		field = NULL;
1672 	}
1673 }
1674 
1675 static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
1676 {
1677 #define FIELDS_PER_ALG	5
1678 	u64 field[FLOW_KEY_ALG_MAX][FIELDS_PER_ALG];
1679 	u32 flowkey_cfg, minkey_cfg;
1680 	int alg, fid;
1681 
1682 	memset(&field, 0, sizeof(u64) * FLOW_KEY_ALG_MAX * FIELDS_PER_ALG);
1683 
1684 	/* Only incoming channel number */
1685 	flowkey_cfg = FLOW_KEY_TYPE_PORT;
1686 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_PORT], flowkey_cfg);
1687 
1688 	/* For a incoming pkt if none of the fields match then flowkey
1689 	 * will be zero, hence tag generated will also be zero.
1690 	 * RSS entry at rsse_index = NIX_AF_LF()_RSS_GRP()[OFFSET] will
1691 	 * be used to queue the packet.
1692 	 */
1693 
1694 	/* IPv4/IPv6 SIP/DIPs */
1695 	flowkey_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
1696 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_IP], flowkey_cfg);
1697 
1698 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
1699 	minkey_cfg = flowkey_cfg;
1700 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP;
1701 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP], flowkey_cfg);
1702 
1703 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
1704 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP;
1705 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP], flowkey_cfg);
1706 
1707 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
1708 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_SCTP;
1709 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_SCTP], flowkey_cfg);
1710 
1711 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
1712 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP;
1713 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP], flowkey_cfg);
1714 
1715 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
1716 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP;
1717 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_SCTP], flowkey_cfg);
1718 
1719 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
1720 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
1721 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP_SCTP], flowkey_cfg);
1722 
1723 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
1724 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP |
1725 		      FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
1726 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP_SCTP],
1727 			   flowkey_cfg);
1728 
1729 	for (alg = 0; alg < FLOW_KEY_ALG_MAX; alg++) {
1730 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
1731 			rvu_write64(rvu, blkaddr,
1732 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
1733 				    field[alg][fid]);
1734 	}
1735 }
1736 
1737 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
1738 				      struct nix_set_mac_addr *req,
1739 				      struct msg_rsp *rsp)
1740 {
1741 	struct rvu_hwinfo *hw = rvu->hw;
1742 	u16 pcifunc = req->hdr.pcifunc;
1743 	struct rvu_pfvf *pfvf;
1744 	int blkaddr, nixlf;
1745 
1746 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1747 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1748 	if (!pfvf->nixlf || blkaddr < 0)
1749 		return NIX_AF_ERR_AF_LF_INVALID;
1750 
1751 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1752 	if (nixlf < 0)
1753 		return NIX_AF_ERR_AF_LF_INVALID;
1754 
1755 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1756 
1757 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
1758 				    pfvf->rx_chan_base, req->mac_addr);
1759 	return 0;
1760 }
1761 
1762 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
1763 				     struct msg_rsp *rsp)
1764 {
1765 	bool allmulti = false, disable_promisc = false;
1766 	struct rvu_hwinfo *hw = rvu->hw;
1767 	u16 pcifunc = req->hdr.pcifunc;
1768 	struct rvu_pfvf *pfvf;
1769 	int blkaddr, nixlf;
1770 
1771 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1772 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1773 	if (!pfvf->nixlf || blkaddr < 0)
1774 		return NIX_AF_ERR_AF_LF_INVALID;
1775 
1776 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1777 	if (nixlf < 0)
1778 		return NIX_AF_ERR_AF_LF_INVALID;
1779 
1780 	if (req->mode & NIX_RX_MODE_PROMISC)
1781 		allmulti = false;
1782 	else if (req->mode & NIX_RX_MODE_ALLMULTI)
1783 		allmulti = true;
1784 	else
1785 		disable_promisc = true;
1786 
1787 	if (disable_promisc)
1788 		rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
1789 	else
1790 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
1791 					      pfvf->rx_chan_base, allmulti);
1792 	return 0;
1793 }
1794 
1795 static void nix_find_link_frs(struct rvu *rvu,
1796 			      struct nix_frs_cfg *req, u16 pcifunc)
1797 {
1798 	int pf = rvu_get_pf(pcifunc);
1799 	struct rvu_pfvf *pfvf;
1800 	int maxlen, minlen;
1801 	int numvfs, hwvf;
1802 	int vf;
1803 
1804 	/* Update with requester's min/max lengths */
1805 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1806 	pfvf->maxlen = req->maxlen;
1807 	if (req->update_minlen)
1808 		pfvf->minlen = req->minlen;
1809 
1810 	maxlen = req->maxlen;
1811 	minlen = req->update_minlen ? req->minlen : 0;
1812 
1813 	/* Get this PF's numVFs and starting hwvf */
1814 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
1815 
1816 	/* For each VF, compare requested max/minlen */
1817 	for (vf = 0; vf < numvfs; vf++) {
1818 		pfvf =  &rvu->hwvf[hwvf + vf];
1819 		if (pfvf->maxlen > maxlen)
1820 			maxlen = pfvf->maxlen;
1821 		if (req->update_minlen &&
1822 		    pfvf->minlen && pfvf->minlen < minlen)
1823 			minlen = pfvf->minlen;
1824 	}
1825 
1826 	/* Compare requested max/minlen with PF's max/minlen */
1827 	pfvf = &rvu->pf[pf];
1828 	if (pfvf->maxlen > maxlen)
1829 		maxlen = pfvf->maxlen;
1830 	if (req->update_minlen &&
1831 	    pfvf->minlen && pfvf->minlen < minlen)
1832 		minlen = pfvf->minlen;
1833 
1834 	/* Update the request with max/min PF's and it's VF's max/min */
1835 	req->maxlen = maxlen;
1836 	if (req->update_minlen)
1837 		req->minlen = minlen;
1838 }
1839 
1840 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
1841 				    struct msg_rsp *rsp)
1842 {
1843 	struct rvu_hwinfo *hw = rvu->hw;
1844 	u16 pcifunc = req->hdr.pcifunc;
1845 	int pf = rvu_get_pf(pcifunc);
1846 	int blkaddr, schq, link = -1;
1847 	struct nix_txsch *txsch;
1848 	u64 cfg, lmac_fifo_len;
1849 	struct nix_hw *nix_hw;
1850 	u8 cgx = 0, lmac = 0;
1851 
1852 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1853 	if (blkaddr < 0)
1854 		return NIX_AF_ERR_AF_LF_INVALID;
1855 
1856 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1857 	if (!nix_hw)
1858 		return -EINVAL;
1859 
1860 	if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
1861 		return NIX_AF_ERR_FRS_INVALID;
1862 
1863 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
1864 		return NIX_AF_ERR_FRS_INVALID;
1865 
1866 	/* Check if requester wants to update SMQ's */
1867 	if (!req->update_smq)
1868 		goto rx_frscfg;
1869 
1870 	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
1871 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1872 	mutex_lock(&rvu->rsrc_lock);
1873 	for (schq = 0; schq < txsch->schq.max; schq++) {
1874 		if (txsch->pfvf_map[schq] != pcifunc)
1875 			continue;
1876 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
1877 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
1878 		if (req->update_minlen)
1879 			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
1880 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
1881 	}
1882 	mutex_unlock(&rvu->rsrc_lock);
1883 
1884 rx_frscfg:
1885 	/* Check if config is for SDP link */
1886 	if (req->sdp_link) {
1887 		if (!hw->sdp_links)
1888 			return NIX_AF_ERR_RX_LINK_INVALID;
1889 		link = hw->cgx_links + hw->lbk_links;
1890 		goto linkcfg;
1891 	}
1892 
1893 	/* Check if the request is from CGX mapped RVU PF */
1894 	if (is_pf_cgxmapped(rvu, pf)) {
1895 		/* Get CGX and LMAC to which this PF is mapped and find link */
1896 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
1897 		link = (cgx * hw->lmac_per_cgx) + lmac;
1898 	} else if (pf == 0) {
1899 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
1900 		link = hw->cgx_links;
1901 	}
1902 
1903 	if (link < 0)
1904 		return NIX_AF_ERR_RX_LINK_INVALID;
1905 
1906 	nix_find_link_frs(rvu, req, pcifunc);
1907 
1908 linkcfg:
1909 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
1910 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
1911 	if (req->update_minlen)
1912 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
1913 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
1914 
1915 	if (req->sdp_link || pf == 0)
1916 		return 0;
1917 
1918 	/* Update transmit credits for CGX links */
1919 	lmac_fifo_len =
1920 		CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
1921 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
1922 	cfg &= ~(0xFFFFFULL << 12);
1923 	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
1924 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
1925 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg);
1926 
1927 	return 0;
1928 }
1929 
1930 static void nix_link_config(struct rvu *rvu, int blkaddr)
1931 {
1932 	struct rvu_hwinfo *hw = rvu->hw;
1933 	int cgx, lmac_cnt, slink, link;
1934 	u64 tx_credits;
1935 
1936 	/* Set default min/max packet lengths allowed on NIX Rx links.
1937 	 *
1938 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
1939 	 * as undersize and report them to SW as error pkts, hence
1940 	 * setting it to 40 bytes.
1941 	 */
1942 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
1943 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
1944 			    NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
1945 	}
1946 
1947 	if (hw->sdp_links) {
1948 		link = hw->cgx_links + hw->lbk_links;
1949 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
1950 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
1951 	}
1952 
1953 	/* Set credits for Tx links assuming max packet length allowed.
1954 	 * This will be reconfigured based on MTU set for PF/VF.
1955 	 */
1956 	for (cgx = 0; cgx < hw->cgx; cgx++) {
1957 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
1958 		tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
1959 		/* Enable credits and set credit pkt count to max allowed */
1960 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
1961 		slink = cgx * hw->lmac_per_cgx;
1962 		for (link = slink; link < (slink + lmac_cnt); link++) {
1963 			rvu_write64(rvu, blkaddr,
1964 				    NIX_AF_TX_LINKX_NORM_CREDIT(link),
1965 				    tx_credits);
1966 			rvu_write64(rvu, blkaddr,
1967 				    NIX_AF_TX_LINKX_EXPR_CREDIT(link),
1968 				    tx_credits);
1969 		}
1970 	}
1971 
1972 	/* Set Tx credits for LBK link */
1973 	slink = hw->cgx_links;
1974 	for (link = slink; link < (slink + hw->lbk_links); link++) {
1975 		tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
1976 		/* Enable credits and set credit pkt count to max allowed */
1977 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
1978 		rvu_write64(rvu, blkaddr,
1979 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
1980 		rvu_write64(rvu, blkaddr,
1981 			    NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits);
1982 	}
1983 }
1984 
1985 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
1986 {
1987 	int idx, err;
1988 	u64 status;
1989 
1990 	/* Start X2P bus calibration */
1991 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
1992 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
1993 	/* Wait for calibration to complete */
1994 	err = rvu_poll_reg(rvu, blkaddr,
1995 			   NIX_AF_STATUS, BIT_ULL(10), false);
1996 	if (err) {
1997 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
1998 		return err;
1999 	}
2000 
2001 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
2002 	/* Check if CGX devices are ready */
2003 	for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
2004 		if (status & (BIT_ULL(16 + idx)))
2005 			continue;
2006 		dev_err(rvu->dev,
2007 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
2008 		err = -EBUSY;
2009 	}
2010 
2011 	/* Check if LBK is ready */
2012 	if (!(status & BIT_ULL(19))) {
2013 		dev_err(rvu->dev,
2014 			"LBK didn't respond to NIX X2P calibration\n");
2015 		err = -EBUSY;
2016 	}
2017 
2018 	/* Clear 'calibrate_x2p' bit */
2019 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2020 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
2021 	if (err || (status & 0x3FFULL))
2022 		dev_err(rvu->dev,
2023 			"NIX X2P calibration failed, status 0x%llx\n", status);
2024 	if (err)
2025 		return err;
2026 	return 0;
2027 }
2028 
2029 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
2030 {
2031 	u64 cfg;
2032 	int err;
2033 
2034 	/* Set admin queue endianness */
2035 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
2036 #ifdef __BIG_ENDIAN
2037 	cfg |= BIT_ULL(1);
2038 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
2039 #else
2040 	cfg &= ~BIT_ULL(1);
2041 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
2042 #endif
2043 
2044 	/* Do not bypass NDC cache */
2045 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
2046 	cfg &= ~0x3FFEULL;
2047 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
2048 
2049 	/* Result structure can be followed by RQ/SQ/CQ context at
2050 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
2051 	 * operation type. Alloc sufficient result memory for all operations.
2052 	 */
2053 	err = rvu_aq_alloc(rvu, &block->aq,
2054 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
2055 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
2056 	if (err)
2057 		return err;
2058 
2059 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
2060 	rvu_write64(rvu, block->addr,
2061 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
2062 	return 0;
2063 }
2064 
2065 int rvu_nix_init(struct rvu *rvu)
2066 {
2067 	struct rvu_hwinfo *hw = rvu->hw;
2068 	struct rvu_block *block;
2069 	int blkaddr, err;
2070 	u64 cfg;
2071 
2072 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
2073 	if (blkaddr < 0)
2074 		return 0;
2075 	block = &hw->block[blkaddr];
2076 
2077 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
2078 	err = nix_calibrate_x2p(rvu, blkaddr);
2079 	if (err)
2080 		return err;
2081 
2082 	/* Set num of links of each type */
2083 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
2084 	hw->cgx = (cfg >> 12) & 0xF;
2085 	hw->lmac_per_cgx = (cfg >> 8) & 0xF;
2086 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
2087 	hw->lbk_links = 1;
2088 	hw->sdp_links = 1;
2089 
2090 	/* Initialize admin queue */
2091 	err = nix_aq_init(rvu, block);
2092 	if (err)
2093 		return err;
2094 
2095 	/* Restore CINT timer delay to HW reset values */
2096 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
2097 
2098 	/* Configure segmentation offload formats */
2099 	nix_setup_lso(rvu, blkaddr);
2100 
2101 	if (blkaddr == BLKADDR_NIX0) {
2102 		hw->nix0 = devm_kzalloc(rvu->dev,
2103 					sizeof(struct nix_hw), GFP_KERNEL);
2104 		if (!hw->nix0)
2105 			return -ENOMEM;
2106 
2107 		err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
2108 		if (err)
2109 			return err;
2110 
2111 		err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
2112 		if (err)
2113 			return err;
2114 
2115 		/* Config Outer L2, IP, TCP and UDP's NPC layer info.
2116 		 * This helps HW protocol checker to identify headers
2117 		 * and validate length and checksums.
2118 		 */
2119 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
2120 			    (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
2121 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
2122 			    (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
2123 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
2124 			    (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
2125 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
2126 			    (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
2127 
2128 		nix_rx_flowkey_alg_cfg(rvu, blkaddr);
2129 
2130 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
2131 		nix_link_config(rvu, blkaddr);
2132 	}
2133 	return 0;
2134 }
2135 
2136 void rvu_nix_freemem(struct rvu *rvu)
2137 {
2138 	struct rvu_hwinfo *hw = rvu->hw;
2139 	struct rvu_block *block;
2140 	struct nix_txsch *txsch;
2141 	struct nix_mcast *mcast;
2142 	struct nix_hw *nix_hw;
2143 	int blkaddr, lvl;
2144 
2145 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
2146 	if (blkaddr < 0)
2147 		return;
2148 
2149 	block = &hw->block[blkaddr];
2150 	rvu_aq_free(rvu, block->aq);
2151 
2152 	if (blkaddr == BLKADDR_NIX0) {
2153 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
2154 		if (!nix_hw)
2155 			return;
2156 
2157 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2158 			txsch = &nix_hw->txsch[lvl];
2159 			kfree(txsch->schq.bmap);
2160 		}
2161 
2162 		mcast = &nix_hw->mcast;
2163 		qmem_free(rvu->dev, mcast->mce_ctx);
2164 		qmem_free(rvu->dev, mcast->mcast_buf);
2165 		mutex_destroy(&mcast->mce_lock);
2166 	}
2167 }
2168