1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "npc.h"
18 #include "cgx.h"
19 
20 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
21 
22 enum mc_tbl_sz {
23 	MC_TBL_SZ_256,
24 	MC_TBL_SZ_512,
25 	MC_TBL_SZ_1K,
26 	MC_TBL_SZ_2K,
27 	MC_TBL_SZ_4K,
28 	MC_TBL_SZ_8K,
29 	MC_TBL_SZ_16K,
30 	MC_TBL_SZ_32K,
31 	MC_TBL_SZ_64K,
32 };
33 
34 enum mc_buf_cnt {
35 	MC_BUF_CNT_8,
36 	MC_BUF_CNT_16,
37 	MC_BUF_CNT_32,
38 	MC_BUF_CNT_64,
39 	MC_BUF_CNT_128,
40 	MC_BUF_CNT_256,
41 	MC_BUF_CNT_512,
42 	MC_BUF_CNT_1024,
43 	MC_BUF_CNT_2048,
44 };
45 
46 /* For now considering MC resources needed for broadcast
47  * pkt replication only. i.e 256 HWVFs + 12 PFs.
48  */
49 #define MC_TBL_SIZE	MC_TBL_SZ_512
50 #define MC_BUF_CNT	MC_BUF_CNT_128
51 
52 struct mce {
53 	struct hlist_node	node;
54 	u16			idx;
55 	u16			pcifunc;
56 };
57 
58 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
59 {
60 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
61 	int blkaddr;
62 
63 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
64 	if (!pfvf->nixlf || blkaddr < 0)
65 		return false;
66 	return true;
67 }
68 
69 int rvu_get_nixlf_count(struct rvu *rvu)
70 {
71 	struct rvu_block *block;
72 	int blkaddr;
73 
74 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
75 	if (blkaddr < 0)
76 		return 0;
77 	block = &rvu->hw->block[blkaddr];
78 	return block->lf.max;
79 }
80 
81 static void nix_mce_list_init(struct nix_mce_list *list, int max)
82 {
83 	INIT_HLIST_HEAD(&list->head);
84 	list->count = 0;
85 	list->max = max;
86 }
87 
88 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
89 {
90 	int idx;
91 
92 	if (!mcast)
93 		return 0;
94 
95 	idx = mcast->next_free_mce;
96 	mcast->next_free_mce += count;
97 	return idx;
98 }
99 
100 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
101 {
102 	if (blkaddr == BLKADDR_NIX0 && hw->nix0)
103 		return hw->nix0;
104 
105 	return NULL;
106 }
107 
108 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
109 {
110 	int err;
111 
112 	/*Sync all in flight RX packets to LLC/DRAM */
113 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
114 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
115 	if (err)
116 		dev_err(rvu->dev, "NIX RX software sync failed\n");
117 
118 	/* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA]
119 	 * bit too early. Hence wait for 50us more.
120 	 */
121 	if (is_rvu_9xxx_A0(rvu))
122 		usleep_range(50, 60);
123 }
124 
125 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
126 			    int lvl, u16 pcifunc, u16 schq)
127 {
128 	struct nix_txsch *txsch;
129 	struct nix_hw *nix_hw;
130 
131 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
132 	if (!nix_hw)
133 		return false;
134 
135 	txsch = &nix_hw->txsch[lvl];
136 	/* Check out of bounds */
137 	if (schq >= txsch->schq.max)
138 		return false;
139 
140 	mutex_lock(&rvu->rsrc_lock);
141 	if (txsch->pfvf_map[schq] != pcifunc) {
142 		mutex_unlock(&rvu->rsrc_lock);
143 		return false;
144 	}
145 	mutex_unlock(&rvu->rsrc_lock);
146 	return true;
147 }
148 
149 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
150 {
151 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
152 	u8 cgx_id, lmac_id;
153 	int pkind, pf, vf;
154 	int err;
155 
156 	pf = rvu_get_pf(pcifunc);
157 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
158 		return 0;
159 
160 	switch (type) {
161 	case NIX_INTF_TYPE_CGX:
162 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
163 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
164 
165 		pkind = rvu_npc_get_pkind(rvu, pf);
166 		if (pkind < 0) {
167 			dev_err(rvu->dev,
168 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
169 			return -EINVAL;
170 		}
171 		pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
172 		pfvf->tx_chan_base = pfvf->rx_chan_base;
173 		pfvf->rx_chan_cnt = 1;
174 		pfvf->tx_chan_cnt = 1;
175 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
176 		rvu_npc_set_pkind(rvu, pkind, pfvf);
177 		break;
178 	case NIX_INTF_TYPE_LBK:
179 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
180 		pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
181 		pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
182 						NIX_CHAN_LBK_CHX(0, vf + 1);
183 		pfvf->rx_chan_cnt = 1;
184 		pfvf->tx_chan_cnt = 1;
185 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
186 					      pfvf->rx_chan_base, false);
187 		break;
188 	}
189 
190 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
191 	 * RVU PF/VF's MAC address.
192 	 */
193 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
194 				    pfvf->rx_chan_base, pfvf->mac_addr);
195 
196 	/* Add this PF_FUNC to bcast pkt replication list */
197 	err = nix_update_bcast_mce_list(rvu, pcifunc, true);
198 	if (err) {
199 		dev_err(rvu->dev,
200 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
201 			pcifunc);
202 		return err;
203 	}
204 
205 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
206 					  nixlf, pfvf->rx_chan_base);
207 	pfvf->maxlen = NIC_HW_MIN_FRS;
208 	pfvf->minlen = NIC_HW_MIN_FRS;
209 
210 	return 0;
211 }
212 
213 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
214 {
215 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
216 	int err;
217 
218 	pfvf->maxlen = 0;
219 	pfvf->minlen = 0;
220 	pfvf->rxvlan = false;
221 
222 	/* Remove this PF_FUNC from bcast pkt replication list */
223 	err = nix_update_bcast_mce_list(rvu, pcifunc, false);
224 	if (err) {
225 		dev_err(rvu->dev,
226 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
227 			pcifunc);
228 	}
229 
230 	/* Free and disable any MCAM entries used by this NIX LF */
231 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
232 }
233 
234 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
235 				 u64 format, bool v4, u64 *fidx)
236 {
237 	struct nix_lso_format field = {0};
238 
239 	/* IP's Length field */
240 	field.layer = NIX_TXLAYER_OL3;
241 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
242 	field.offset = v4 ? 2 : 4;
243 	field.sizem1 = 1; /* i.e 2 bytes */
244 	field.alg = NIX_LSOALG_ADD_PAYLEN;
245 	rvu_write64(rvu, blkaddr,
246 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
247 		    *(u64 *)&field);
248 
249 	/* No ID field in IPv6 header */
250 	if (!v4)
251 		return;
252 
253 	/* IP's ID field */
254 	field.layer = NIX_TXLAYER_OL3;
255 	field.offset = 4;
256 	field.sizem1 = 1; /* i.e 2 bytes */
257 	field.alg = NIX_LSOALG_ADD_SEGNUM;
258 	rvu_write64(rvu, blkaddr,
259 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
260 		    *(u64 *)&field);
261 }
262 
263 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
264 				 u64 format, u64 *fidx)
265 {
266 	struct nix_lso_format field = {0};
267 
268 	/* TCP's sequence number field */
269 	field.layer = NIX_TXLAYER_OL4;
270 	field.offset = 4;
271 	field.sizem1 = 3; /* i.e 4 bytes */
272 	field.alg = NIX_LSOALG_ADD_OFFSET;
273 	rvu_write64(rvu, blkaddr,
274 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
275 		    *(u64 *)&field);
276 
277 	/* TCP's flags field */
278 	field.layer = NIX_TXLAYER_OL4;
279 	field.offset = 12;
280 	field.sizem1 = 0; /* not needed */
281 	field.alg = NIX_LSOALG_TCP_FLAGS;
282 	rvu_write64(rvu, blkaddr,
283 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
284 		    *(u64 *)&field);
285 }
286 
287 static void nix_setup_lso(struct rvu *rvu, int blkaddr)
288 {
289 	u64 cfg, idx, fidx = 0;
290 
291 	/* Enable LSO */
292 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
293 	/* For TSO, set first and middle segment flags to
294 	 * mask out PSH, RST & FIN flags in TCP packet
295 	 */
296 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
297 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
298 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
299 
300 	/* Configure format fields for TCPv4 segmentation offload */
301 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
302 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
303 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
304 
305 	/* Set rest of the fields to NOP */
306 	for (; fidx < 8; fidx++) {
307 		rvu_write64(rvu, blkaddr,
308 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
309 	}
310 
311 	/* Configure format fields for TCPv6 segmentation offload */
312 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
313 	fidx = 0;
314 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
315 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
316 
317 	/* Set rest of the fields to NOP */
318 	for (; fidx < 8; fidx++) {
319 		rvu_write64(rvu, blkaddr,
320 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
321 	}
322 }
323 
324 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
325 {
326 	kfree(pfvf->rq_bmap);
327 	kfree(pfvf->sq_bmap);
328 	kfree(pfvf->cq_bmap);
329 	if (pfvf->rq_ctx)
330 		qmem_free(rvu->dev, pfvf->rq_ctx);
331 	if (pfvf->sq_ctx)
332 		qmem_free(rvu->dev, pfvf->sq_ctx);
333 	if (pfvf->cq_ctx)
334 		qmem_free(rvu->dev, pfvf->cq_ctx);
335 	if (pfvf->rss_ctx)
336 		qmem_free(rvu->dev, pfvf->rss_ctx);
337 	if (pfvf->nix_qints_ctx)
338 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
339 	if (pfvf->cq_ints_ctx)
340 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
341 
342 	pfvf->rq_bmap = NULL;
343 	pfvf->cq_bmap = NULL;
344 	pfvf->sq_bmap = NULL;
345 	pfvf->rq_ctx = NULL;
346 	pfvf->sq_ctx = NULL;
347 	pfvf->cq_ctx = NULL;
348 	pfvf->rss_ctx = NULL;
349 	pfvf->nix_qints_ctx = NULL;
350 	pfvf->cq_ints_ctx = NULL;
351 }
352 
353 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
354 			      struct rvu_pfvf *pfvf, int nixlf,
355 			      int rss_sz, int rss_grps, int hwctx_size)
356 {
357 	int err, grp, num_indices;
358 
359 	/* RSS is not requested for this NIXLF */
360 	if (!rss_sz)
361 		return 0;
362 	num_indices = rss_sz * rss_grps;
363 
364 	/* Alloc NIX RSS HW context memory and config the base */
365 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
366 	if (err)
367 		return err;
368 
369 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
370 		    (u64)pfvf->rss_ctx->iova);
371 
372 	/* Config full RSS table size, enable RSS and caching */
373 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
374 		    BIT_ULL(36) | BIT_ULL(4) |
375 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
376 	/* Config RSS group offset and sizes */
377 	for (grp = 0; grp < rss_grps; grp++)
378 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
379 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
380 	return 0;
381 }
382 
383 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
384 			       struct nix_aq_inst_s *inst)
385 {
386 	struct admin_queue *aq = block->aq;
387 	struct nix_aq_res_s *result;
388 	int timeout = 1000;
389 	u64 reg, head;
390 
391 	result = (struct nix_aq_res_s *)aq->res->base;
392 
393 	/* Get current head pointer where to append this instruction */
394 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
395 	head = (reg >> 4) & AQ_PTR_MASK;
396 
397 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
398 	       (void *)inst, aq->inst->entry_sz);
399 	memset(result, 0, sizeof(*result));
400 	/* sync into memory */
401 	wmb();
402 
403 	/* Ring the doorbell and wait for result */
404 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
405 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
406 		cpu_relax();
407 		udelay(1);
408 		timeout--;
409 		if (!timeout)
410 			return -EBUSY;
411 	}
412 
413 	if (result->compcode != NIX_AQ_COMP_GOOD)
414 		/* TODO: Replace this with some error code */
415 		return -EBUSY;
416 
417 	return 0;
418 }
419 
420 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
421 			       struct nix_aq_enq_rsp *rsp)
422 {
423 	struct rvu_hwinfo *hw = rvu->hw;
424 	u16 pcifunc = req->hdr.pcifunc;
425 	int nixlf, blkaddr, rc = 0;
426 	struct nix_aq_inst_s inst;
427 	struct rvu_block *block;
428 	struct admin_queue *aq;
429 	struct rvu_pfvf *pfvf;
430 	void *ctx, *mask;
431 	bool ena;
432 	u64 cfg;
433 
434 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
435 	if (blkaddr < 0)
436 		return NIX_AF_ERR_AF_LF_INVALID;
437 
438 	block = &hw->block[blkaddr];
439 	aq = block->aq;
440 	if (!aq) {
441 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
442 		return NIX_AF_ERR_AQ_ENQUEUE;
443 	}
444 
445 	pfvf = rvu_get_pfvf(rvu, pcifunc);
446 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
447 
448 	/* Skip NIXLF check for broadcast MCE entry init */
449 	if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
450 		if (!pfvf->nixlf || nixlf < 0)
451 			return NIX_AF_ERR_AF_LF_INVALID;
452 	}
453 
454 	switch (req->ctype) {
455 	case NIX_AQ_CTYPE_RQ:
456 		/* Check if index exceeds max no of queues */
457 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
458 			rc = NIX_AF_ERR_AQ_ENQUEUE;
459 		break;
460 	case NIX_AQ_CTYPE_SQ:
461 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
462 			rc = NIX_AF_ERR_AQ_ENQUEUE;
463 		break;
464 	case NIX_AQ_CTYPE_CQ:
465 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
466 			rc = NIX_AF_ERR_AQ_ENQUEUE;
467 		break;
468 	case NIX_AQ_CTYPE_RSS:
469 		/* Check if RSS is enabled and qidx is within range */
470 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
471 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
472 		    (req->qidx >= (256UL << (cfg & 0xF))))
473 			rc = NIX_AF_ERR_AQ_ENQUEUE;
474 		break;
475 	case NIX_AQ_CTYPE_MCE:
476 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
477 		/* Check if index exceeds MCE list length */
478 		if (!hw->nix0->mcast.mce_ctx ||
479 		    (req->qidx >= (256UL << (cfg & 0xF))))
480 			rc = NIX_AF_ERR_AQ_ENQUEUE;
481 
482 		/* Adding multicast lists for requests from PF/VFs is not
483 		 * yet supported, so ignore this.
484 		 */
485 		if (rsp)
486 			rc = NIX_AF_ERR_AQ_ENQUEUE;
487 		break;
488 	default:
489 		rc = NIX_AF_ERR_AQ_ENQUEUE;
490 	}
491 
492 	if (rc)
493 		return rc;
494 
495 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
496 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
497 	    req->op != NIX_AQ_INSTOP_WRITE) {
498 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
499 				     pcifunc, req->sq.smq))
500 			return NIX_AF_ERR_AQ_ENQUEUE;
501 	}
502 
503 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
504 	inst.lf = nixlf;
505 	inst.cindex = req->qidx;
506 	inst.ctype = req->ctype;
507 	inst.op = req->op;
508 	/* Currently we are not supporting enqueuing multiple instructions,
509 	 * so always choose first entry in result memory.
510 	 */
511 	inst.res_addr = (u64)aq->res->iova;
512 
513 	/* Clean result + context memory */
514 	memset(aq->res->base, 0, aq->res->entry_sz);
515 	/* Context needs to be written at RES_ADDR + 128 */
516 	ctx = aq->res->base + 128;
517 	/* Mask needs to be written at RES_ADDR + 256 */
518 	mask = aq->res->base + 256;
519 
520 	switch (req->op) {
521 	case NIX_AQ_INSTOP_WRITE:
522 		if (req->ctype == NIX_AQ_CTYPE_RQ)
523 			memcpy(mask, &req->rq_mask,
524 			       sizeof(struct nix_rq_ctx_s));
525 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
526 			memcpy(mask, &req->sq_mask,
527 			       sizeof(struct nix_sq_ctx_s));
528 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
529 			memcpy(mask, &req->cq_mask,
530 			       sizeof(struct nix_cq_ctx_s));
531 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
532 			memcpy(mask, &req->rss_mask,
533 			       sizeof(struct nix_rsse_s));
534 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
535 			memcpy(mask, &req->mce_mask,
536 			       sizeof(struct nix_rx_mce_s));
537 		/* Fall through */
538 	case NIX_AQ_INSTOP_INIT:
539 		if (req->ctype == NIX_AQ_CTYPE_RQ)
540 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
541 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
542 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
543 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
544 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
545 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
546 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
547 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
548 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
549 		break;
550 	case NIX_AQ_INSTOP_NOP:
551 	case NIX_AQ_INSTOP_READ:
552 	case NIX_AQ_INSTOP_LOCK:
553 	case NIX_AQ_INSTOP_UNLOCK:
554 		break;
555 	default:
556 		rc = NIX_AF_ERR_AQ_ENQUEUE;
557 		return rc;
558 	}
559 
560 	spin_lock(&aq->lock);
561 
562 	/* Submit the instruction to AQ */
563 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
564 	if (rc) {
565 		spin_unlock(&aq->lock);
566 		return rc;
567 	}
568 
569 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
570 	if (req->op == NIX_AQ_INSTOP_INIT) {
571 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
572 			__set_bit(req->qidx, pfvf->rq_bmap);
573 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
574 			__set_bit(req->qidx, pfvf->sq_bmap);
575 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
576 			__set_bit(req->qidx, pfvf->cq_bmap);
577 	}
578 
579 	if (req->op == NIX_AQ_INSTOP_WRITE) {
580 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
581 			ena = (req->rq.ena & req->rq_mask.ena) |
582 				(test_bit(req->qidx, pfvf->rq_bmap) &
583 				~req->rq_mask.ena);
584 			if (ena)
585 				__set_bit(req->qidx, pfvf->rq_bmap);
586 			else
587 				__clear_bit(req->qidx, pfvf->rq_bmap);
588 		}
589 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
590 			ena = (req->rq.ena & req->sq_mask.ena) |
591 				(test_bit(req->qidx, pfvf->sq_bmap) &
592 				~req->sq_mask.ena);
593 			if (ena)
594 				__set_bit(req->qidx, pfvf->sq_bmap);
595 			else
596 				__clear_bit(req->qidx, pfvf->sq_bmap);
597 		}
598 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
599 			ena = (req->rq.ena & req->cq_mask.ena) |
600 				(test_bit(req->qidx, pfvf->cq_bmap) &
601 				~req->cq_mask.ena);
602 			if (ena)
603 				__set_bit(req->qidx, pfvf->cq_bmap);
604 			else
605 				__clear_bit(req->qidx, pfvf->cq_bmap);
606 		}
607 	}
608 
609 	if (rsp) {
610 		/* Copy read context into mailbox */
611 		if (req->op == NIX_AQ_INSTOP_READ) {
612 			if (req->ctype == NIX_AQ_CTYPE_RQ)
613 				memcpy(&rsp->rq, ctx,
614 				       sizeof(struct nix_rq_ctx_s));
615 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
616 				memcpy(&rsp->sq, ctx,
617 				       sizeof(struct nix_sq_ctx_s));
618 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
619 				memcpy(&rsp->cq, ctx,
620 				       sizeof(struct nix_cq_ctx_s));
621 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
622 				memcpy(&rsp->rss, ctx,
623 				       sizeof(struct nix_rsse_s));
624 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
625 				memcpy(&rsp->mce, ctx,
626 				       sizeof(struct nix_rx_mce_s));
627 		}
628 	}
629 
630 	spin_unlock(&aq->lock);
631 	return 0;
632 }
633 
634 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
635 {
636 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
637 	struct nix_aq_enq_req aq_req;
638 	unsigned long *bmap;
639 	int qidx, q_cnt = 0;
640 	int err = 0, rc;
641 
642 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
643 		return NIX_AF_ERR_AQ_ENQUEUE;
644 
645 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
646 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
647 
648 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
649 		aq_req.cq.ena = 0;
650 		aq_req.cq_mask.ena = 1;
651 		q_cnt = pfvf->cq_ctx->qsize;
652 		bmap = pfvf->cq_bmap;
653 	}
654 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
655 		aq_req.sq.ena = 0;
656 		aq_req.sq_mask.ena = 1;
657 		q_cnt = pfvf->sq_ctx->qsize;
658 		bmap = pfvf->sq_bmap;
659 	}
660 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
661 		aq_req.rq.ena = 0;
662 		aq_req.rq_mask.ena = 1;
663 		q_cnt = pfvf->rq_ctx->qsize;
664 		bmap = pfvf->rq_bmap;
665 	}
666 
667 	aq_req.ctype = req->ctype;
668 	aq_req.op = NIX_AQ_INSTOP_WRITE;
669 
670 	for (qidx = 0; qidx < q_cnt; qidx++) {
671 		if (!test_bit(qidx, bmap))
672 			continue;
673 		aq_req.qidx = qidx;
674 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
675 		if (rc) {
676 			err = rc;
677 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
678 				(req->ctype == NIX_AQ_CTYPE_CQ) ?
679 				"CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
680 				"RQ" : "SQ"), qidx);
681 		}
682 	}
683 
684 	return err;
685 }
686 
687 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
688 				struct nix_aq_enq_req *req,
689 				struct nix_aq_enq_rsp *rsp)
690 {
691 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
692 }
693 
694 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
695 				       struct hwctx_disable_req *req,
696 				       struct msg_rsp *rsp)
697 {
698 	return nix_lf_hwctx_disable(rvu, req);
699 }
700 
701 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
702 				  struct nix_lf_alloc_req *req,
703 				  struct nix_lf_alloc_rsp *rsp)
704 {
705 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
706 	struct rvu_hwinfo *hw = rvu->hw;
707 	u16 pcifunc = req->hdr.pcifunc;
708 	struct rvu_block *block;
709 	struct rvu_pfvf *pfvf;
710 	u64 cfg, ctx_cfg;
711 	int blkaddr;
712 
713 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
714 		return NIX_AF_ERR_PARAM;
715 
716 	pfvf = rvu_get_pfvf(rvu, pcifunc);
717 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
718 	if (!pfvf->nixlf || blkaddr < 0)
719 		return NIX_AF_ERR_AF_LF_INVALID;
720 
721 	block = &hw->block[blkaddr];
722 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
723 	if (nixlf < 0)
724 		return NIX_AF_ERR_AF_LF_INVALID;
725 
726 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
727 	if (req->npa_func) {
728 		/* If default, use 'this' NIXLF's PFFUNC */
729 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
730 			req->npa_func = pcifunc;
731 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
732 			return NIX_AF_INVAL_NPA_PF_FUNC;
733 	}
734 
735 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
736 	if (req->sso_func) {
737 		/* If default, use 'this' NIXLF's PFFUNC */
738 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
739 			req->sso_func = pcifunc;
740 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
741 			return NIX_AF_INVAL_SSO_PF_FUNC;
742 	}
743 
744 	/* If RSS is being enabled, check if requested config is valid.
745 	 * RSS table size should be power of two, otherwise
746 	 * RSS_GRP::OFFSET + adder might go beyond that group or
747 	 * won't be able to use entire table.
748 	 */
749 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
750 			    !is_power_of_2(req->rss_sz)))
751 		return NIX_AF_ERR_RSS_SIZE_INVALID;
752 
753 	if (req->rss_sz &&
754 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
755 		return NIX_AF_ERR_RSS_GRPS_INVALID;
756 
757 	/* Reset this NIX LF */
758 	err = rvu_lf_reset(rvu, block, nixlf);
759 	if (err) {
760 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
761 			block->addr - BLKADDR_NIX0, nixlf);
762 		return NIX_AF_ERR_LF_RESET;
763 	}
764 
765 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
766 
767 	/* Alloc NIX RQ HW context memory and config the base */
768 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
769 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
770 	if (err)
771 		goto free_mem;
772 
773 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
774 	if (!pfvf->rq_bmap)
775 		goto free_mem;
776 
777 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
778 		    (u64)pfvf->rq_ctx->iova);
779 
780 	/* Set caching and queue count in HW */
781 	cfg = BIT_ULL(36) | (req->rq_cnt - 1);
782 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
783 
784 	/* Alloc NIX SQ HW context memory and config the base */
785 	hwctx_size = 1UL << (ctx_cfg & 0xF);
786 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
787 	if (err)
788 		goto free_mem;
789 
790 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
791 	if (!pfvf->sq_bmap)
792 		goto free_mem;
793 
794 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
795 		    (u64)pfvf->sq_ctx->iova);
796 	cfg = BIT_ULL(36) | (req->sq_cnt - 1);
797 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
798 
799 	/* Alloc NIX CQ HW context memory and config the base */
800 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
801 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
802 	if (err)
803 		goto free_mem;
804 
805 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
806 	if (!pfvf->cq_bmap)
807 		goto free_mem;
808 
809 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
810 		    (u64)pfvf->cq_ctx->iova);
811 	cfg = BIT_ULL(36) | (req->cq_cnt - 1);
812 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
813 
814 	/* Initialize receive side scaling (RSS) */
815 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
816 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
817 				 req->rss_sz, req->rss_grps, hwctx_size);
818 	if (err)
819 		goto free_mem;
820 
821 	/* Alloc memory for CQINT's HW contexts */
822 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
823 	qints = (cfg >> 24) & 0xFFF;
824 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
825 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
826 	if (err)
827 		goto free_mem;
828 
829 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
830 		    (u64)pfvf->cq_ints_ctx->iova);
831 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
832 
833 	/* Alloc memory for QINT's HW contexts */
834 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
835 	qints = (cfg >> 12) & 0xFFF;
836 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
837 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
838 	if (err)
839 		goto free_mem;
840 
841 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
842 		    (u64)pfvf->nix_qints_ctx->iova);
843 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
844 
845 	/* Enable LMTST for this NIX LF */
846 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
847 
848 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
849 	if (req->npa_func)
850 		cfg = req->npa_func;
851 	if (req->sso_func)
852 		cfg |= (u64)req->sso_func << 16;
853 
854 	cfg |= (u64)req->xqe_sz << 33;
855 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
856 
857 	/* Config Rx pkt length, csum checks and apad  enable / disable */
858 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
859 
860 	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
861 	err = nix_interface_init(rvu, pcifunc, intf, nixlf);
862 	if (err)
863 		goto free_mem;
864 
865 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
866 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
867 
868 	goto exit;
869 
870 free_mem:
871 	nix_ctx_free(rvu, pfvf);
872 	rc = -ENOMEM;
873 
874 exit:
875 	/* Set macaddr of this PF/VF */
876 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
877 
878 	/* set SQB size info */
879 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
880 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
881 	rsp->rx_chan_base = pfvf->rx_chan_base;
882 	rsp->tx_chan_base = pfvf->tx_chan_base;
883 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
884 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
885 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
886 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
887 	/* Get HW supported stat count */
888 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
889 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
890 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
891 	/* Get count of CQ IRQs and error IRQs supported per LF */
892 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
893 	rsp->qints = ((cfg >> 12) & 0xFFF);
894 	rsp->cints = ((cfg >> 24) & 0xFFF);
895 	return rc;
896 }
897 
898 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
899 				 struct msg_rsp *rsp)
900 {
901 	struct rvu_hwinfo *hw = rvu->hw;
902 	u16 pcifunc = req->hdr.pcifunc;
903 	struct rvu_block *block;
904 	int blkaddr, nixlf, err;
905 	struct rvu_pfvf *pfvf;
906 
907 	pfvf = rvu_get_pfvf(rvu, pcifunc);
908 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
909 	if (!pfvf->nixlf || blkaddr < 0)
910 		return NIX_AF_ERR_AF_LF_INVALID;
911 
912 	block = &hw->block[blkaddr];
913 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
914 	if (nixlf < 0)
915 		return NIX_AF_ERR_AF_LF_INVALID;
916 
917 	nix_interface_deinit(rvu, pcifunc, nixlf);
918 
919 	/* Reset this NIX LF */
920 	err = rvu_lf_reset(rvu, block, nixlf);
921 	if (err) {
922 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
923 			block->addr - BLKADDR_NIX0, nixlf);
924 		return NIX_AF_ERR_LF_RESET;
925 	}
926 
927 	nix_ctx_free(rvu, pfvf);
928 
929 	return 0;
930 }
931 
932 /* Disable shaping of pkts by a scheduler queue
933  * at a given scheduler level.
934  */
935 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
936 				 int lvl, int schq)
937 {
938 	u64  cir_reg = 0, pir_reg = 0;
939 	u64  cfg;
940 
941 	switch (lvl) {
942 	case NIX_TXSCH_LVL_TL1:
943 		cir_reg = NIX_AF_TL1X_CIR(schq);
944 		pir_reg = 0; /* PIR not available at TL1 */
945 		break;
946 	case NIX_TXSCH_LVL_TL2:
947 		cir_reg = NIX_AF_TL2X_CIR(schq);
948 		pir_reg = NIX_AF_TL2X_PIR(schq);
949 		break;
950 	case NIX_TXSCH_LVL_TL3:
951 		cir_reg = NIX_AF_TL3X_CIR(schq);
952 		pir_reg = NIX_AF_TL3X_PIR(schq);
953 		break;
954 	case NIX_TXSCH_LVL_TL4:
955 		cir_reg = NIX_AF_TL4X_CIR(schq);
956 		pir_reg = NIX_AF_TL4X_PIR(schq);
957 		break;
958 	}
959 
960 	if (!cir_reg)
961 		return;
962 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
963 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
964 
965 	if (!pir_reg)
966 		return;
967 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
968 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
969 }
970 
971 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
972 				 int lvl, int schq)
973 {
974 	struct rvu_hwinfo *hw = rvu->hw;
975 	int link;
976 
977 	/* Reset TL4's SDP link config */
978 	if (lvl == NIX_TXSCH_LVL_TL4)
979 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
980 
981 	if (lvl != NIX_TXSCH_LVL_TL2)
982 		return;
983 
984 	/* Reset TL2's CGX or LBK link config */
985 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
986 		rvu_write64(rvu, blkaddr,
987 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
988 }
989 
990 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
991 				     struct nix_txsch_alloc_req *req,
992 				     struct nix_txsch_alloc_rsp *rsp)
993 {
994 	u16 pcifunc = req->hdr.pcifunc;
995 	struct nix_txsch *txsch;
996 	int lvl, idx, req_schq;
997 	struct rvu_pfvf *pfvf;
998 	struct nix_hw *nix_hw;
999 	int blkaddr, rc = 0;
1000 	u16 schq;
1001 
1002 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1003 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1004 	if (!pfvf->nixlf || blkaddr < 0)
1005 		return NIX_AF_ERR_AF_LF_INVALID;
1006 
1007 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1008 	if (!nix_hw)
1009 		return -EINVAL;
1010 
1011 	mutex_lock(&rvu->rsrc_lock);
1012 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1013 		txsch = &nix_hw->txsch[lvl];
1014 		req_schq = req->schq_contig[lvl] + req->schq[lvl];
1015 
1016 		/* There are only 28 TL1s */
1017 		if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
1018 			goto err;
1019 
1020 		/* Check if request is valid */
1021 		if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1022 			goto err;
1023 
1024 		/* If contiguous queues are needed, check for availability */
1025 		if (req->schq_contig[lvl] &&
1026 		    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1027 			goto err;
1028 
1029 		/* Check if full request can be accommodated */
1030 		if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
1031 			goto err;
1032 	}
1033 
1034 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1035 		txsch = &nix_hw->txsch[lvl];
1036 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
1037 		rsp->schq[lvl] = req->schq[lvl];
1038 
1039 		schq = 0;
1040 		/* Alloc contiguous queues first */
1041 		if (req->schq_contig[lvl]) {
1042 			schq = rvu_alloc_rsrc_contig(&txsch->schq,
1043 						     req->schq_contig[lvl]);
1044 
1045 			for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1046 				txsch->pfvf_map[schq] = pcifunc;
1047 				nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1048 				nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1049 				rsp->schq_contig_list[lvl][idx] = schq;
1050 				schq++;
1051 			}
1052 		}
1053 
1054 		/* Alloc non-contiguous queues */
1055 		for (idx = 0; idx < req->schq[lvl]; idx++) {
1056 			schq = rvu_alloc_rsrc(&txsch->schq);
1057 			txsch->pfvf_map[schq] = pcifunc;
1058 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1059 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1060 			rsp->schq_list[lvl][idx] = schq;
1061 		}
1062 	}
1063 	goto exit;
1064 err:
1065 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1066 exit:
1067 	mutex_unlock(&rvu->rsrc_lock);
1068 	return rc;
1069 }
1070 
1071 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1072 {
1073 	int blkaddr, nixlf, lvl, schq, err;
1074 	struct rvu_hwinfo *hw = rvu->hw;
1075 	struct nix_txsch *txsch;
1076 	struct nix_hw *nix_hw;
1077 	u64 cfg;
1078 
1079 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1080 	if (blkaddr < 0)
1081 		return NIX_AF_ERR_AF_LF_INVALID;
1082 
1083 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1084 	if (!nix_hw)
1085 		return -EINVAL;
1086 
1087 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1088 	if (nixlf < 0)
1089 		return NIX_AF_ERR_AF_LF_INVALID;
1090 
1091 	/* Disable TL2/3 queue links before SMQ flush*/
1092 	mutex_lock(&rvu->rsrc_lock);
1093 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1094 		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1095 			continue;
1096 
1097 		txsch = &nix_hw->txsch[lvl];
1098 		for (schq = 0; schq < txsch->schq.max; schq++) {
1099 			if (txsch->pfvf_map[schq] != pcifunc)
1100 				continue;
1101 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1102 		}
1103 	}
1104 
1105 	/* Flush SMQs */
1106 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1107 	for (schq = 0; schq < txsch->schq.max; schq++) {
1108 		if (txsch->pfvf_map[schq] != pcifunc)
1109 			continue;
1110 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
1111 		/* Do SMQ flush and set enqueue xoff */
1112 		cfg |= BIT_ULL(50) | BIT_ULL(49);
1113 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
1114 
1115 		/* Wait for flush to complete */
1116 		err = rvu_poll_reg(rvu, blkaddr,
1117 				   NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
1118 		if (err) {
1119 			dev_err(rvu->dev,
1120 				"NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
1121 		}
1122 	}
1123 
1124 	/* Now free scheduler queues to free pool */
1125 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1126 		txsch = &nix_hw->txsch[lvl];
1127 		for (schq = 0; schq < txsch->schq.max; schq++) {
1128 			if (txsch->pfvf_map[schq] != pcifunc)
1129 				continue;
1130 			rvu_free_rsrc(&txsch->schq, schq);
1131 			txsch->pfvf_map[schq] = 0;
1132 		}
1133 	}
1134 	mutex_unlock(&rvu->rsrc_lock);
1135 
1136 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1137 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1138 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1139 	if (err)
1140 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1141 
1142 	return 0;
1143 }
1144 
1145 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1146 				    struct nix_txsch_free_req *req,
1147 				    struct msg_rsp *rsp)
1148 {
1149 	return nix_txschq_free(rvu, req->hdr.pcifunc);
1150 }
1151 
1152 static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1153 				   int lvl, u64 reg, u64 regval)
1154 {
1155 	u64 regbase = reg & 0xFFFF;
1156 	u16 schq, parent;
1157 
1158 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1159 		return false;
1160 
1161 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1162 	/* Check if this schq belongs to this PF/VF or not */
1163 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1164 		return false;
1165 
1166 	parent = (regval >> 16) & 0x1FF;
1167 	/* Validate MDQ's TL4 parent */
1168 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
1169 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1170 		return false;
1171 
1172 	/* Validate TL4's TL3 parent */
1173 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
1174 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1175 		return false;
1176 
1177 	/* Validate TL3's TL2 parent */
1178 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
1179 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1180 		return false;
1181 
1182 	/* Validate TL2's TL1 parent */
1183 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
1184 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1185 		return false;
1186 
1187 	return true;
1188 }
1189 
1190 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1191 				    struct nix_txschq_config *req,
1192 				    struct msg_rsp *rsp)
1193 {
1194 	struct rvu_hwinfo *hw = rvu->hw;
1195 	u16 pcifunc = req->hdr.pcifunc;
1196 	u64 reg, regval, schq_regbase;
1197 	struct nix_txsch *txsch;
1198 	struct nix_hw *nix_hw;
1199 	int blkaddr, idx, err;
1200 	int nixlf;
1201 
1202 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1203 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
1204 		return NIX_AF_INVAL_TXSCHQ_CFG;
1205 
1206 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1207 	if (blkaddr < 0)
1208 		return NIX_AF_ERR_AF_LF_INVALID;
1209 
1210 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1211 	if (!nix_hw)
1212 		return -EINVAL;
1213 
1214 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1215 	if (nixlf < 0)
1216 		return NIX_AF_ERR_AF_LF_INVALID;
1217 
1218 	txsch = &nix_hw->txsch[req->lvl];
1219 	for (idx = 0; idx < req->num_regs; idx++) {
1220 		reg = req->reg[idx];
1221 		regval = req->regval[idx];
1222 		schq_regbase = reg & 0xFFFF;
1223 
1224 		if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
1225 					    txsch->lvl, reg, regval))
1226 			return NIX_AF_INVAL_TXSCHQ_CFG;
1227 
1228 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1229 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1230 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1231 					   pcifunc, 0);
1232 			regval &= ~(0x7FULL << 24);
1233 			regval |= ((u64)nixlf << 24);
1234 		}
1235 
1236 		rvu_write64(rvu, blkaddr, reg, regval);
1237 
1238 		/* Check for SMQ flush, if so, poll for its completion */
1239 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1240 		    (regval & BIT_ULL(49))) {
1241 			err = rvu_poll_reg(rvu, blkaddr,
1242 					   reg, BIT_ULL(49), true);
1243 			if (err)
1244 				return NIX_AF_SMQ_FLUSH_FAILED;
1245 		}
1246 	}
1247 	return 0;
1248 }
1249 
1250 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1251 			   struct nix_vtag_config *req)
1252 {
1253 	u64 regval = req->vtag_size;
1254 
1255 	if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
1256 		return -EINVAL;
1257 
1258 	if (req->rx.capture_vtag)
1259 		regval |= BIT_ULL(5);
1260 	if (req->rx.strip_vtag)
1261 		regval |= BIT_ULL(4);
1262 
1263 	rvu_write64(rvu, blkaddr,
1264 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1265 	return 0;
1266 }
1267 
1268 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
1269 				  struct nix_vtag_config *req,
1270 				  struct msg_rsp *rsp)
1271 {
1272 	struct rvu_hwinfo *hw = rvu->hw;
1273 	u16 pcifunc = req->hdr.pcifunc;
1274 	int blkaddr, nixlf, err;
1275 
1276 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1277 	if (blkaddr < 0)
1278 		return NIX_AF_ERR_AF_LF_INVALID;
1279 
1280 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1281 	if (nixlf < 0)
1282 		return NIX_AF_ERR_AF_LF_INVALID;
1283 
1284 	if (req->cfg_type) {
1285 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1286 		if (err)
1287 			return NIX_AF_ERR_PARAM;
1288 	} else {
1289 		/* TODO: handle tx vtag configuration */
1290 		return 0;
1291 	}
1292 
1293 	return 0;
1294 }
1295 
1296 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1297 			 u16 pcifunc, int next, bool eol)
1298 {
1299 	struct nix_aq_enq_req aq_req;
1300 	int err;
1301 
1302 	aq_req.hdr.pcifunc = 0;
1303 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
1304 	aq_req.op = op;
1305 	aq_req.qidx = mce;
1306 
1307 	/* Forward bcast pkts to RQ0, RSS not needed */
1308 	aq_req.mce.op = 0;
1309 	aq_req.mce.index = 0;
1310 	aq_req.mce.eol = eol;
1311 	aq_req.mce.pf_func = pcifunc;
1312 	aq_req.mce.next = next;
1313 
1314 	/* All fields valid */
1315 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
1316 
1317 	err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1318 	if (err) {
1319 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1320 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1321 		return err;
1322 	}
1323 	return 0;
1324 }
1325 
1326 static int nix_update_mce_list(struct nix_mce_list *mce_list,
1327 			       u16 pcifunc, int idx, bool add)
1328 {
1329 	struct mce *mce, *tail = NULL;
1330 	bool delete = false;
1331 
1332 	/* Scan through the current list */
1333 	hlist_for_each_entry(mce, &mce_list->head, node) {
1334 		/* If already exists, then delete */
1335 		if (mce->pcifunc == pcifunc && !add) {
1336 			delete = true;
1337 			break;
1338 		}
1339 		tail = mce;
1340 	}
1341 
1342 	if (delete) {
1343 		hlist_del(&mce->node);
1344 		kfree(mce);
1345 		mce_list->count--;
1346 		return 0;
1347 	}
1348 
1349 	if (!add)
1350 		return 0;
1351 
1352 	/* Add a new one to the list, at the tail */
1353 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
1354 	if (!mce)
1355 		return -ENOMEM;
1356 	mce->idx = idx;
1357 	mce->pcifunc = pcifunc;
1358 	if (!tail)
1359 		hlist_add_head(&mce->node, &mce_list->head);
1360 	else
1361 		hlist_add_behind(&mce->node, &tail->node);
1362 	mce_list->count++;
1363 	return 0;
1364 }
1365 
1366 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
1367 {
1368 	int err = 0, idx, next_idx, count;
1369 	struct nix_mce_list *mce_list;
1370 	struct mce *mce, *next_mce;
1371 	struct nix_mcast *mcast;
1372 	struct nix_hw *nix_hw;
1373 	struct rvu_pfvf *pfvf;
1374 	int blkaddr;
1375 
1376 	/* Broadcast pkt replication is not needed for AF's VFs, hence skip */
1377 	if (is_afvf(pcifunc))
1378 		return 0;
1379 
1380 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1381 	if (blkaddr < 0)
1382 		return 0;
1383 
1384 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1385 	if (!nix_hw)
1386 		return 0;
1387 
1388 	mcast = &nix_hw->mcast;
1389 
1390 	/* Get this PF/VF func's MCE index */
1391 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1392 	idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
1393 
1394 	mce_list = &pfvf->bcast_mce_list;
1395 	if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
1396 		dev_err(rvu->dev,
1397 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
1398 			__func__, idx, mce_list->max,
1399 			pcifunc >> RVU_PFVF_PF_SHIFT);
1400 		return -EINVAL;
1401 	}
1402 
1403 	mutex_lock(&mcast->mce_lock);
1404 
1405 	err = nix_update_mce_list(mce_list, pcifunc, idx, add);
1406 	if (err)
1407 		goto end;
1408 
1409 	/* Disable MCAM entry in NPC */
1410 
1411 	if (!mce_list->count)
1412 		goto end;
1413 	count = mce_list->count;
1414 
1415 	/* Dump the updated list to HW */
1416 	hlist_for_each_entry(mce, &mce_list->head, node) {
1417 		next_idx = 0;
1418 		count--;
1419 		if (count) {
1420 			next_mce = hlist_entry(mce->node.next,
1421 					       struct mce, node);
1422 			next_idx = next_mce->idx;
1423 		}
1424 		/* EOL should be set in last MCE */
1425 		err = nix_setup_mce(rvu, mce->idx,
1426 				    NIX_AQ_INSTOP_WRITE, mce->pcifunc,
1427 				    next_idx, count ? false : true);
1428 		if (err)
1429 			goto end;
1430 	}
1431 
1432 end:
1433 	mutex_unlock(&mcast->mce_lock);
1434 	return err;
1435 }
1436 
1437 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
1438 {
1439 	struct nix_mcast *mcast = &nix_hw->mcast;
1440 	int err, pf, numvfs, idx;
1441 	struct rvu_pfvf *pfvf;
1442 	u16 pcifunc;
1443 	u64 cfg;
1444 
1445 	/* Skip PF0 (i.e AF) */
1446 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
1447 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1448 		/* If PF is not enabled, nothing to do */
1449 		if (!((cfg >> 20) & 0x01))
1450 			continue;
1451 		/* Get numVFs attached to this PF */
1452 		numvfs = (cfg >> 12) & 0xFF;
1453 
1454 		pfvf = &rvu->pf[pf];
1455 		/* Save the start MCE */
1456 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
1457 
1458 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
1459 
1460 		for (idx = 0; idx < (numvfs + 1); idx++) {
1461 			/* idx-0 is for PF, followed by VFs */
1462 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1463 			pcifunc |= idx;
1464 			/* Add dummy entries now, so that we don't have to check
1465 			 * for whether AQ_OP should be INIT/WRITE later on.
1466 			 * Will be updated when a NIXLF is attached/detached to
1467 			 * these PF/VFs.
1468 			 */
1469 			err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
1470 					    NIX_AQ_INSTOP_INIT,
1471 					    pcifunc, 0, true);
1472 			if (err)
1473 				return err;
1474 		}
1475 	}
1476 	return 0;
1477 }
1478 
1479 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1480 {
1481 	struct nix_mcast *mcast = &nix_hw->mcast;
1482 	struct rvu_hwinfo *hw = rvu->hw;
1483 	int err, size;
1484 
1485 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
1486 	size = (1ULL << size);
1487 
1488 	/* Alloc memory for multicast/mirror replication entries */
1489 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
1490 			 (256UL << MC_TBL_SIZE), size);
1491 	if (err)
1492 		return -ENOMEM;
1493 
1494 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
1495 		    (u64)mcast->mce_ctx->iova);
1496 
1497 	/* Set max list length equal to max no of VFs per PF  + PF itself */
1498 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
1499 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
1500 
1501 	/* Alloc memory for multicast replication buffers */
1502 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
1503 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
1504 			 (8UL << MC_BUF_CNT), size);
1505 	if (err)
1506 		return -ENOMEM;
1507 
1508 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
1509 		    (u64)mcast->mcast_buf->iova);
1510 
1511 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
1512 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
1513 
1514 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
1515 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
1516 		    BIT_ULL(20) | MC_BUF_CNT);
1517 
1518 	mutex_init(&mcast->mce_lock);
1519 
1520 	return nix_setup_bcast_tables(rvu, nix_hw);
1521 }
1522 
1523 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1524 {
1525 	struct nix_txsch *txsch;
1526 	u64 cfg, reg;
1527 	int err, lvl;
1528 
1529 	/* Get scheduler queue count of each type and alloc
1530 	 * bitmap for each for alloc/free/attach operations.
1531 	 */
1532 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1533 		txsch = &nix_hw->txsch[lvl];
1534 		txsch->lvl = lvl;
1535 		switch (lvl) {
1536 		case NIX_TXSCH_LVL_SMQ:
1537 			reg = NIX_AF_MDQ_CONST;
1538 			break;
1539 		case NIX_TXSCH_LVL_TL4:
1540 			reg = NIX_AF_TL4_CONST;
1541 			break;
1542 		case NIX_TXSCH_LVL_TL3:
1543 			reg = NIX_AF_TL3_CONST;
1544 			break;
1545 		case NIX_TXSCH_LVL_TL2:
1546 			reg = NIX_AF_TL2_CONST;
1547 			break;
1548 		case NIX_TXSCH_LVL_TL1:
1549 			reg = NIX_AF_TL1_CONST;
1550 			break;
1551 		}
1552 		cfg = rvu_read64(rvu, blkaddr, reg);
1553 		txsch->schq.max = cfg & 0xFFFF;
1554 		err = rvu_alloc_bitmap(&txsch->schq);
1555 		if (err)
1556 			return err;
1557 
1558 		/* Allocate memory for scheduler queues to
1559 		 * PF/VF pcifunc mapping info.
1560 		 */
1561 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
1562 					       sizeof(u16), GFP_KERNEL);
1563 		if (!txsch->pfvf_map)
1564 			return -ENOMEM;
1565 	}
1566 	return 0;
1567 }
1568 
1569 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
1570 				   struct msg_rsp *rsp)
1571 {
1572 	struct rvu_hwinfo *hw = rvu->hw;
1573 	u16 pcifunc = req->hdr.pcifunc;
1574 	int i, nixlf, blkaddr;
1575 	u64 stats;
1576 
1577 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1578 	if (blkaddr < 0)
1579 		return NIX_AF_ERR_AF_LF_INVALID;
1580 
1581 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1582 	if (nixlf < 0)
1583 		return NIX_AF_ERR_AF_LF_INVALID;
1584 
1585 	/* Get stats count supported by HW */
1586 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1587 
1588 	/* Reset tx stats */
1589 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
1590 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
1591 
1592 	/* Reset rx stats */
1593 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
1594 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
1595 
1596 	return 0;
1597 }
1598 
1599 /* Returns the ALG index to be set into NPC_RX_ACTION */
1600 static int get_flowkey_alg_idx(u32 flow_cfg)
1601 {
1602 	u32 ip_cfg;
1603 
1604 	flow_cfg &= ~NIX_FLOW_KEY_TYPE_PORT;
1605 	ip_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
1606 	if (flow_cfg == ip_cfg)
1607 		return NIX_FLOW_KEY_ALG_IP;
1608 	else if (flow_cfg == (ip_cfg | NIX_FLOW_KEY_TYPE_TCP))
1609 		return NIX_FLOW_KEY_ALG_TCP;
1610 	else if (flow_cfg == (ip_cfg | NIX_FLOW_KEY_TYPE_UDP))
1611 		return NIX_FLOW_KEY_ALG_UDP;
1612 	else if (flow_cfg == (ip_cfg | NIX_FLOW_KEY_TYPE_SCTP))
1613 		return NIX_FLOW_KEY_ALG_SCTP;
1614 	else if (flow_cfg == (ip_cfg | NIX_FLOW_KEY_TYPE_TCP |
1615 			      NIX_FLOW_KEY_TYPE_UDP))
1616 		return NIX_FLOW_KEY_ALG_TCP_UDP;
1617 	else if (flow_cfg == (ip_cfg | NIX_FLOW_KEY_TYPE_TCP |
1618 			      NIX_FLOW_KEY_TYPE_SCTP))
1619 		return NIX_FLOW_KEY_ALG_TCP_SCTP;
1620 	else if (flow_cfg == (ip_cfg | NIX_FLOW_KEY_TYPE_UDP |
1621 			      NIX_FLOW_KEY_TYPE_SCTP))
1622 		return NIX_FLOW_KEY_ALG_UDP_SCTP;
1623 	else if (flow_cfg == (ip_cfg | NIX_FLOW_KEY_TYPE_TCP |
1624 			      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP))
1625 		return NIX_FLOW_KEY_ALG_TCP_UDP_SCTP;
1626 
1627 	return NIX_FLOW_KEY_ALG_PORT;
1628 }
1629 
1630 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
1631 					 struct nix_rss_flowkey_cfg *req,
1632 					 struct nix_rss_flowkey_cfg_rsp *rsp)
1633 {
1634 	struct rvu_hwinfo *hw = rvu->hw;
1635 	u16 pcifunc = req->hdr.pcifunc;
1636 	int alg_idx, nixlf, blkaddr;
1637 
1638 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1639 	if (blkaddr < 0)
1640 		return NIX_AF_ERR_AF_LF_INVALID;
1641 
1642 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1643 	if (nixlf < 0)
1644 		return NIX_AF_ERR_AF_LF_INVALID;
1645 
1646 	alg_idx = get_flowkey_alg_idx(req->flowkey_cfg);
1647 	rsp->alg_idx = alg_idx;
1648 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
1649 				       alg_idx, req->mcam_index);
1650 	return 0;
1651 }
1652 
1653 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
1654 {
1655 	int idx, nr_field, key_off, field_marker, keyoff_marker;
1656 	int max_key_off, max_bit_pos, group_member;
1657 	struct nix_rx_flowkey_alg *field;
1658 	struct nix_rx_flowkey_alg tmp;
1659 	u32 key_type, valid_key;
1660 
1661 	if (!alg)
1662 		return -EINVAL;
1663 
1664 #define FIELDS_PER_ALG  5
1665 #define MAX_KEY_OFF	40
1666 	/* Clear all fields */
1667 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
1668 
1669 	/* Each of the 32 possible flow key algorithm definitions should
1670 	 * fall into above incremental config (except ALG0). Otherwise a
1671 	 * single NPC MCAM entry is not sufficient for supporting RSS.
1672 	 *
1673 	 * If a different definition or combination needed then NPC MCAM
1674 	 * has to be programmed to filter such pkts and it's action should
1675 	 * point to this definition to calculate flowtag or hash.
1676 	 *
1677 	 * The `for loop` goes over _all_ protocol field and the following
1678 	 * variables depicts the state machine forward progress logic.
1679 	 *
1680 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
1681 	 * in field->key_offset update.
1682 	 * field_marker - Enabled when a new field needs to be selected.
1683 	 * group_member - Enabled when protocol is part of a group.
1684 	 */
1685 
1686 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
1687 	nr_field = 0; key_off = 0; field_marker = 1;
1688 	field = &tmp; max_bit_pos = fls(flow_cfg);
1689 	for (idx = 0;
1690 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
1691 	     key_off < MAX_KEY_OFF; idx++) {
1692 		key_type = BIT(idx);
1693 		valid_key = flow_cfg & key_type;
1694 		/* Found a field marker, reset the field values */
1695 		if (field_marker)
1696 			memset(&tmp, 0, sizeof(tmp));
1697 
1698 		switch (key_type) {
1699 		case NIX_FLOW_KEY_TYPE_PORT:
1700 			field->sel_chan = true;
1701 			/* This should be set to 1, when SEL_CHAN is set */
1702 			field->bytesm1 = 1;
1703 			field_marker = true;
1704 			keyoff_marker = true;
1705 			break;
1706 		case NIX_FLOW_KEY_TYPE_IPV4:
1707 			field->lid = NPC_LID_LC;
1708 			field->ltype_match = NPC_LT_LC_IP;
1709 			field->hdr_offset = 12; /* SIP offset */
1710 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
1711 			field->ltype_mask = 0xF; /* Match only IPv4 */
1712 			field_marker = true;
1713 			keyoff_marker = false;
1714 			break;
1715 		case NIX_FLOW_KEY_TYPE_IPV6:
1716 			field->lid = NPC_LID_LC;
1717 			field->ltype_match = NPC_LT_LC_IP6;
1718 			field->hdr_offset = 8; /* SIP offset */
1719 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
1720 			field->ltype_mask = 0xF; /* Match only IPv6 */
1721 			field_marker = true;
1722 			keyoff_marker = true;
1723 			break;
1724 		case NIX_FLOW_KEY_TYPE_TCP:
1725 		case NIX_FLOW_KEY_TYPE_UDP:
1726 		case NIX_FLOW_KEY_TYPE_SCTP:
1727 			field->lid = NPC_LID_LD;
1728 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
1729 			if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) {
1730 				field->ltype_match |= NPC_LT_LD_TCP;
1731 				group_member = true;
1732 			} else if (key_type == NIX_FLOW_KEY_TYPE_UDP &&
1733 				   valid_key) {
1734 				field->ltype_match |= NPC_LT_LD_UDP;
1735 				group_member = true;
1736 			} else if (key_type == NIX_FLOW_KEY_TYPE_SCTP &&
1737 				   valid_key) {
1738 				field->ltype_match |= NPC_LT_LD_SCTP;
1739 				group_member = true;
1740 			}
1741 			field->ltype_mask = ~field->ltype_match;
1742 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP) {
1743 				/* Handle the case where any of the group item
1744 				 * is enabled in the group but not the final one
1745 				 */
1746 				if (group_member) {
1747 					valid_key = true;
1748 					group_member = false;
1749 				}
1750 				field_marker = true;
1751 				keyoff_marker = true;
1752 			} else {
1753 				field_marker = false;
1754 				keyoff_marker = false;
1755 			}
1756 			break;
1757 		}
1758 		field->ena = 1;
1759 
1760 		/* Found a valid flow key type */
1761 		if (valid_key) {
1762 			field->key_offset = key_off;
1763 			memcpy(&alg[nr_field], field, sizeof(*field));
1764 			max_key_off = max(max_key_off, field->bytesm1 + 1);
1765 
1766 			/* Found a field marker, get the next field */
1767 			if (field_marker)
1768 				nr_field++;
1769 		}
1770 
1771 		/* Found a keyoff marker, update the new key_off */
1772 		if (keyoff_marker) {
1773 			key_off += max_key_off;
1774 			max_key_off = 0;
1775 		}
1776 	}
1777 	/* Processed all the flow key types */
1778 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
1779 		return 0;
1780 	else
1781 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
1782 }
1783 
1784 static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
1785 {
1786 	u64 field[NIX_FLOW_KEY_ALG_MAX][FIELDS_PER_ALG];
1787 	u32 flowkey_cfg, minkey_cfg;
1788 	int alg, fid;
1789 
1790 	memset(&field, 0, sizeof(u64) * NIX_FLOW_KEY_ALG_MAX * FIELDS_PER_ALG);
1791 
1792 	/* Only incoming channel number */
1793 	flowkey_cfg = NIX_FLOW_KEY_TYPE_PORT;
1794 	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_PORT], flowkey_cfg);
1795 
1796 	/* For a incoming pkt if none of the fields match then flowkey
1797 	 * will be zero, hence tag generated will also be zero.
1798 	 * RSS entry at rsse_index = NIX_AF_LF()_RSS_GRP()[OFFSET] will
1799 	 * be used to queue the packet.
1800 	 */
1801 
1802 	/* IPv4/IPv6 SIP/DIPs */
1803 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
1804 	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_IP], flowkey_cfg);
1805 
1806 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
1807 	minkey_cfg = flowkey_cfg;
1808 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
1809 	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_TCP], flowkey_cfg);
1810 
1811 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
1812 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
1813 	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_UDP], flowkey_cfg);
1814 
1815 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
1816 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
1817 	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_SCTP], flowkey_cfg);
1818 
1819 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
1820 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
1821 			NIX_FLOW_KEY_TYPE_UDP;
1822 	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_TCP_UDP],
1823 			   flowkey_cfg);
1824 
1825 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
1826 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
1827 			NIX_FLOW_KEY_TYPE_SCTP;
1828 	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_TCP_SCTP],
1829 			   flowkey_cfg);
1830 
1831 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
1832 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
1833 			NIX_FLOW_KEY_TYPE_SCTP;
1834 	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_UDP_SCTP],
1835 			   flowkey_cfg);
1836 
1837 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
1838 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
1839 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
1840 	set_flowkey_fields((void *)&field[NIX_FLOW_KEY_ALG_TCP_UDP_SCTP],
1841 			   flowkey_cfg);
1842 
1843 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
1844 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
1845 			rvu_write64(rvu, blkaddr,
1846 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
1847 				    field[alg][fid]);
1848 	}
1849 }
1850 
1851 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
1852 				      struct nix_set_mac_addr *req,
1853 				      struct msg_rsp *rsp)
1854 {
1855 	struct rvu_hwinfo *hw = rvu->hw;
1856 	u16 pcifunc = req->hdr.pcifunc;
1857 	struct rvu_pfvf *pfvf;
1858 	int blkaddr, nixlf;
1859 
1860 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1861 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1862 	if (!pfvf->nixlf || blkaddr < 0)
1863 		return NIX_AF_ERR_AF_LF_INVALID;
1864 
1865 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1866 	if (nixlf < 0)
1867 		return NIX_AF_ERR_AF_LF_INVALID;
1868 
1869 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1870 
1871 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
1872 				    pfvf->rx_chan_base, req->mac_addr);
1873 
1874 	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
1875 
1876 	return 0;
1877 }
1878 
1879 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
1880 				     struct msg_rsp *rsp)
1881 {
1882 	bool allmulti = false, disable_promisc = false;
1883 	struct rvu_hwinfo *hw = rvu->hw;
1884 	u16 pcifunc = req->hdr.pcifunc;
1885 	struct rvu_pfvf *pfvf;
1886 	int blkaddr, nixlf;
1887 
1888 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1889 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1890 	if (!pfvf->nixlf || blkaddr < 0)
1891 		return NIX_AF_ERR_AF_LF_INVALID;
1892 
1893 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1894 	if (nixlf < 0)
1895 		return NIX_AF_ERR_AF_LF_INVALID;
1896 
1897 	if (req->mode & NIX_RX_MODE_PROMISC)
1898 		allmulti = false;
1899 	else if (req->mode & NIX_RX_MODE_ALLMULTI)
1900 		allmulti = true;
1901 	else
1902 		disable_promisc = true;
1903 
1904 	if (disable_promisc)
1905 		rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
1906 	else
1907 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
1908 					      pfvf->rx_chan_base, allmulti);
1909 
1910 	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
1911 
1912 	return 0;
1913 }
1914 
1915 static void nix_find_link_frs(struct rvu *rvu,
1916 			      struct nix_frs_cfg *req, u16 pcifunc)
1917 {
1918 	int pf = rvu_get_pf(pcifunc);
1919 	struct rvu_pfvf *pfvf;
1920 	int maxlen, minlen;
1921 	int numvfs, hwvf;
1922 	int vf;
1923 
1924 	/* Update with requester's min/max lengths */
1925 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1926 	pfvf->maxlen = req->maxlen;
1927 	if (req->update_minlen)
1928 		pfvf->minlen = req->minlen;
1929 
1930 	maxlen = req->maxlen;
1931 	minlen = req->update_minlen ? req->minlen : 0;
1932 
1933 	/* Get this PF's numVFs and starting hwvf */
1934 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
1935 
1936 	/* For each VF, compare requested max/minlen */
1937 	for (vf = 0; vf < numvfs; vf++) {
1938 		pfvf =  &rvu->hwvf[hwvf + vf];
1939 		if (pfvf->maxlen > maxlen)
1940 			maxlen = pfvf->maxlen;
1941 		if (req->update_minlen &&
1942 		    pfvf->minlen && pfvf->minlen < minlen)
1943 			minlen = pfvf->minlen;
1944 	}
1945 
1946 	/* Compare requested max/minlen with PF's max/minlen */
1947 	pfvf = &rvu->pf[pf];
1948 	if (pfvf->maxlen > maxlen)
1949 		maxlen = pfvf->maxlen;
1950 	if (req->update_minlen &&
1951 	    pfvf->minlen && pfvf->minlen < minlen)
1952 		minlen = pfvf->minlen;
1953 
1954 	/* Update the request with max/min PF's and it's VF's max/min */
1955 	req->maxlen = maxlen;
1956 	if (req->update_minlen)
1957 		req->minlen = minlen;
1958 }
1959 
1960 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
1961 				    struct msg_rsp *rsp)
1962 {
1963 	struct rvu_hwinfo *hw = rvu->hw;
1964 	u16 pcifunc = req->hdr.pcifunc;
1965 	int pf = rvu_get_pf(pcifunc);
1966 	int blkaddr, schq, link = -1;
1967 	struct nix_txsch *txsch;
1968 	u64 cfg, lmac_fifo_len;
1969 	struct nix_hw *nix_hw;
1970 	u8 cgx = 0, lmac = 0;
1971 
1972 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1973 	if (blkaddr < 0)
1974 		return NIX_AF_ERR_AF_LF_INVALID;
1975 
1976 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1977 	if (!nix_hw)
1978 		return -EINVAL;
1979 
1980 	if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
1981 		return NIX_AF_ERR_FRS_INVALID;
1982 
1983 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
1984 		return NIX_AF_ERR_FRS_INVALID;
1985 
1986 	/* Check if requester wants to update SMQ's */
1987 	if (!req->update_smq)
1988 		goto rx_frscfg;
1989 
1990 	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
1991 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1992 	mutex_lock(&rvu->rsrc_lock);
1993 	for (schq = 0; schq < txsch->schq.max; schq++) {
1994 		if (txsch->pfvf_map[schq] != pcifunc)
1995 			continue;
1996 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
1997 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
1998 		if (req->update_minlen)
1999 			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
2000 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
2001 	}
2002 	mutex_unlock(&rvu->rsrc_lock);
2003 
2004 rx_frscfg:
2005 	/* Check if config is for SDP link */
2006 	if (req->sdp_link) {
2007 		if (!hw->sdp_links)
2008 			return NIX_AF_ERR_RX_LINK_INVALID;
2009 		link = hw->cgx_links + hw->lbk_links;
2010 		goto linkcfg;
2011 	}
2012 
2013 	/* Check if the request is from CGX mapped RVU PF */
2014 	if (is_pf_cgxmapped(rvu, pf)) {
2015 		/* Get CGX and LMAC to which this PF is mapped and find link */
2016 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
2017 		link = (cgx * hw->lmac_per_cgx) + lmac;
2018 	} else if (pf == 0) {
2019 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
2020 		link = hw->cgx_links;
2021 	}
2022 
2023 	if (link < 0)
2024 		return NIX_AF_ERR_RX_LINK_INVALID;
2025 
2026 	nix_find_link_frs(rvu, req, pcifunc);
2027 
2028 linkcfg:
2029 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
2030 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
2031 	if (req->update_minlen)
2032 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
2033 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
2034 
2035 	if (req->sdp_link || pf == 0)
2036 		return 0;
2037 
2038 	/* Update transmit credits for CGX links */
2039 	lmac_fifo_len =
2040 		CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2041 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
2042 	cfg &= ~(0xFFFFFULL << 12);
2043 	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
2044 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
2045 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg);
2046 
2047 	return 0;
2048 }
2049 
2050 int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
2051 				      struct msg_rsp *rsp)
2052 {
2053 	struct npc_mcam_alloc_entry_req alloc_req = { };
2054 	struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
2055 	struct npc_mcam_free_entry_req free_req = { };
2056 	u16 pcifunc = req->hdr.pcifunc;
2057 	int blkaddr, nixlf, err;
2058 	struct rvu_pfvf *pfvf;
2059 
2060 	/* LBK VFs do not have separate MCAM UCAST entry hence
2061 	 * skip allocating rxvlan for them
2062 	 */
2063 	if (is_afvf(pcifunc))
2064 		return 0;
2065 
2066 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2067 	if (pfvf->rxvlan)
2068 		return 0;
2069 
2070 	/* alloc new mcam entry */
2071 	alloc_req.hdr.pcifunc = pcifunc;
2072 	alloc_req.count = 1;
2073 
2074 	err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
2075 						    &alloc_rsp);
2076 	if (err)
2077 		return err;
2078 
2079 	/* update entry to enable rxvlan offload */
2080 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2081 	if (blkaddr < 0) {
2082 		err = NIX_AF_ERR_AF_LF_INVALID;
2083 		goto free_entry;
2084 	}
2085 
2086 	nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
2087 	if (nixlf < 0) {
2088 		err = NIX_AF_ERR_AF_LF_INVALID;
2089 		goto free_entry;
2090 	}
2091 
2092 	pfvf->rxvlan_index = alloc_rsp.entry_list[0];
2093 	/* all it means is that rxvlan_index is valid */
2094 	pfvf->rxvlan = true;
2095 
2096 	err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2097 	if (err)
2098 		goto free_entry;
2099 
2100 	return 0;
2101 free_entry:
2102 	free_req.hdr.pcifunc = pcifunc;
2103 	free_req.entry = alloc_rsp.entry_list[0];
2104 	rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
2105 	pfvf->rxvlan = false;
2106 	return err;
2107 }
2108 
2109 static void nix_link_config(struct rvu *rvu, int blkaddr)
2110 {
2111 	struct rvu_hwinfo *hw = rvu->hw;
2112 	int cgx, lmac_cnt, slink, link;
2113 	u64 tx_credits;
2114 
2115 	/* Set default min/max packet lengths allowed on NIX Rx links.
2116 	 *
2117 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
2118 	 * as undersize and report them to SW as error pkts, hence
2119 	 * setting it to 40 bytes.
2120 	 */
2121 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
2122 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2123 			    NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2124 	}
2125 
2126 	if (hw->sdp_links) {
2127 		link = hw->cgx_links + hw->lbk_links;
2128 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2129 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2130 	}
2131 
2132 	/* Set credits for Tx links assuming max packet length allowed.
2133 	 * This will be reconfigured based on MTU set for PF/VF.
2134 	 */
2135 	for (cgx = 0; cgx < hw->cgx; cgx++) {
2136 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2137 		tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
2138 		/* Enable credits and set credit pkt count to max allowed */
2139 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
2140 		slink = cgx * hw->lmac_per_cgx;
2141 		for (link = slink; link < (slink + lmac_cnt); link++) {
2142 			rvu_write64(rvu, blkaddr,
2143 				    NIX_AF_TX_LINKX_NORM_CREDIT(link),
2144 				    tx_credits);
2145 			rvu_write64(rvu, blkaddr,
2146 				    NIX_AF_TX_LINKX_EXPR_CREDIT(link),
2147 				    tx_credits);
2148 		}
2149 	}
2150 
2151 	/* Set Tx credits for LBK link */
2152 	slink = hw->cgx_links;
2153 	for (link = slink; link < (slink + hw->lbk_links); link++) {
2154 		tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
2155 		/* Enable credits and set credit pkt count to max allowed */
2156 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
2157 		rvu_write64(rvu, blkaddr,
2158 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
2159 		rvu_write64(rvu, blkaddr,
2160 			    NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits);
2161 	}
2162 }
2163 
2164 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
2165 {
2166 	int idx, err;
2167 	u64 status;
2168 
2169 	/* Start X2P bus calibration */
2170 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2171 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
2172 	/* Wait for calibration to complete */
2173 	err = rvu_poll_reg(rvu, blkaddr,
2174 			   NIX_AF_STATUS, BIT_ULL(10), false);
2175 	if (err) {
2176 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
2177 		return err;
2178 	}
2179 
2180 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
2181 	/* Check if CGX devices are ready */
2182 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
2183 		/* Skip when cgx port is not available */
2184 		if (!rvu_cgx_pdata(idx, rvu) ||
2185 		    (status & (BIT_ULL(16 + idx))))
2186 			continue;
2187 		dev_err(rvu->dev,
2188 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
2189 		err = -EBUSY;
2190 	}
2191 
2192 	/* Check if LBK is ready */
2193 	if (!(status & BIT_ULL(19))) {
2194 		dev_err(rvu->dev,
2195 			"LBK didn't respond to NIX X2P calibration\n");
2196 		err = -EBUSY;
2197 	}
2198 
2199 	/* Clear 'calibrate_x2p' bit */
2200 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2201 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
2202 	if (err || (status & 0x3FFULL))
2203 		dev_err(rvu->dev,
2204 			"NIX X2P calibration failed, status 0x%llx\n", status);
2205 	if (err)
2206 		return err;
2207 	return 0;
2208 }
2209 
2210 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
2211 {
2212 	u64 cfg;
2213 	int err;
2214 
2215 	/* Set admin queue endianness */
2216 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
2217 #ifdef __BIG_ENDIAN
2218 	cfg |= BIT_ULL(8);
2219 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
2220 #else
2221 	cfg &= ~BIT_ULL(8);
2222 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
2223 #endif
2224 
2225 	/* Do not bypass NDC cache */
2226 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
2227 	cfg &= ~0x3FFEULL;
2228 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
2229 
2230 	/* Result structure can be followed by RQ/SQ/CQ context at
2231 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
2232 	 * operation type. Alloc sufficient result memory for all operations.
2233 	 */
2234 	err = rvu_aq_alloc(rvu, &block->aq,
2235 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
2236 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
2237 	if (err)
2238 		return err;
2239 
2240 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
2241 	rvu_write64(rvu, block->addr,
2242 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
2243 	return 0;
2244 }
2245 
2246 int rvu_nix_init(struct rvu *rvu)
2247 {
2248 	struct rvu_hwinfo *hw = rvu->hw;
2249 	struct rvu_block *block;
2250 	int blkaddr, err;
2251 	u64 cfg;
2252 
2253 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
2254 	if (blkaddr < 0)
2255 		return 0;
2256 	block = &hw->block[blkaddr];
2257 
2258 	/* As per a HW errata in 9xxx A0 silicon, NIX may corrupt
2259 	 * internal state when conditional clocks are turned off.
2260 	 * Hence enable them.
2261 	 */
2262 	if (is_rvu_9xxx_A0(rvu))
2263 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2264 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
2265 
2266 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
2267 	err = nix_calibrate_x2p(rvu, blkaddr);
2268 	if (err)
2269 		return err;
2270 
2271 	/* Set num of links of each type */
2272 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
2273 	hw->cgx = (cfg >> 12) & 0xF;
2274 	hw->lmac_per_cgx = (cfg >> 8) & 0xF;
2275 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
2276 	hw->lbk_links = 1;
2277 	hw->sdp_links = 1;
2278 
2279 	/* Initialize admin queue */
2280 	err = nix_aq_init(rvu, block);
2281 	if (err)
2282 		return err;
2283 
2284 	/* Restore CINT timer delay to HW reset values */
2285 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
2286 
2287 	/* Configure segmentation offload formats */
2288 	nix_setup_lso(rvu, blkaddr);
2289 
2290 	if (blkaddr == BLKADDR_NIX0) {
2291 		hw->nix0 = devm_kzalloc(rvu->dev,
2292 					sizeof(struct nix_hw), GFP_KERNEL);
2293 		if (!hw->nix0)
2294 			return -ENOMEM;
2295 
2296 		err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
2297 		if (err)
2298 			return err;
2299 
2300 		err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
2301 		if (err)
2302 			return err;
2303 
2304 		/* Config Outer L2, IP, TCP and UDP's NPC layer info.
2305 		 * This helps HW protocol checker to identify headers
2306 		 * and validate length and checksums.
2307 		 */
2308 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
2309 			    (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
2310 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
2311 			    (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
2312 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
2313 			    (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
2314 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
2315 			    (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
2316 
2317 		nix_rx_flowkey_alg_cfg(rvu, blkaddr);
2318 
2319 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
2320 		nix_link_config(rvu, blkaddr);
2321 	}
2322 	return 0;
2323 }
2324 
2325 void rvu_nix_freemem(struct rvu *rvu)
2326 {
2327 	struct rvu_hwinfo *hw = rvu->hw;
2328 	struct rvu_block *block;
2329 	struct nix_txsch *txsch;
2330 	struct nix_mcast *mcast;
2331 	struct nix_hw *nix_hw;
2332 	int blkaddr, lvl;
2333 
2334 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
2335 	if (blkaddr < 0)
2336 		return;
2337 
2338 	block = &hw->block[blkaddr];
2339 	rvu_aq_free(rvu, block->aq);
2340 
2341 	if (blkaddr == BLKADDR_NIX0) {
2342 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
2343 		if (!nix_hw)
2344 			return;
2345 
2346 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2347 			txsch = &nix_hw->txsch[lvl];
2348 			kfree(txsch->schq.bmap);
2349 		}
2350 
2351 		mcast = &nix_hw->mcast;
2352 		qmem_free(rvu->dev, mcast->mce_ctx);
2353 		qmem_free(rvu->dev, mcast->mcast_buf);
2354 		mutex_destroy(&mcast->mce_lock);
2355 	}
2356 }
2357 
2358 static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
2359 {
2360 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
2361 	struct rvu_hwinfo *hw = rvu->hw;
2362 	int blkaddr;
2363 
2364 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2365 	if (!pfvf->nixlf || blkaddr < 0)
2366 		return NIX_AF_ERR_AF_LF_INVALID;
2367 
2368 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2369 	if (*nixlf < 0)
2370 		return NIX_AF_ERR_AF_LF_INVALID;
2371 
2372 	return 0;
2373 }
2374 
2375 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
2376 				     struct msg_rsp *rsp)
2377 {
2378 	u16 pcifunc = req->hdr.pcifunc;
2379 	int nixlf, err;
2380 
2381 	err = nix_get_nixlf(rvu, pcifunc, &nixlf);
2382 	if (err)
2383 		return err;
2384 
2385 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
2386 	return 0;
2387 }
2388 
2389 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
2390 				    struct msg_rsp *rsp)
2391 {
2392 	u16 pcifunc = req->hdr.pcifunc;
2393 	int nixlf, err;
2394 
2395 	err = nix_get_nixlf(rvu, pcifunc, &nixlf);
2396 	if (err)
2397 		return err;
2398 
2399 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
2400 	return 0;
2401 }
2402 
2403 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
2404 {
2405 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
2406 	struct hwctx_disable_req ctx_req;
2407 	int err;
2408 
2409 	ctx_req.hdr.pcifunc = pcifunc;
2410 
2411 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
2412 	nix_interface_deinit(rvu, pcifunc, nixlf);
2413 	nix_rx_sync(rvu, blkaddr);
2414 	nix_txschq_free(rvu, pcifunc);
2415 
2416 	if (pfvf->sq_ctx) {
2417 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
2418 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
2419 		if (err)
2420 			dev_err(rvu->dev, "SQ ctx disable failed\n");
2421 	}
2422 
2423 	if (pfvf->rq_ctx) {
2424 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
2425 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
2426 		if (err)
2427 			dev_err(rvu->dev, "RQ ctx disable failed\n");
2428 	}
2429 
2430 	if (pfvf->cq_ctx) {
2431 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
2432 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
2433 		if (err)
2434 			dev_err(rvu->dev, "CQ ctx disable failed\n");
2435 	}
2436 
2437 	nix_ctx_free(rvu, pfvf);
2438 }
2439