1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "npc.h"
18 #include "cgx.h"
19 
20 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
21 
22 enum mc_tbl_sz {
23 	MC_TBL_SZ_256,
24 	MC_TBL_SZ_512,
25 	MC_TBL_SZ_1K,
26 	MC_TBL_SZ_2K,
27 	MC_TBL_SZ_4K,
28 	MC_TBL_SZ_8K,
29 	MC_TBL_SZ_16K,
30 	MC_TBL_SZ_32K,
31 	MC_TBL_SZ_64K,
32 };
33 
34 enum mc_buf_cnt {
35 	MC_BUF_CNT_8,
36 	MC_BUF_CNT_16,
37 	MC_BUF_CNT_32,
38 	MC_BUF_CNT_64,
39 	MC_BUF_CNT_128,
40 	MC_BUF_CNT_256,
41 	MC_BUF_CNT_512,
42 	MC_BUF_CNT_1024,
43 	MC_BUF_CNT_2048,
44 };
45 
46 enum nix_makr_fmt_indexes {
47 	NIX_MARK_CFG_IP_DSCP_RED,
48 	NIX_MARK_CFG_IP_DSCP_YELLOW,
49 	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
50 	NIX_MARK_CFG_IP_ECN_RED,
51 	NIX_MARK_CFG_IP_ECN_YELLOW,
52 	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
53 	NIX_MARK_CFG_VLAN_DEI_RED,
54 	NIX_MARK_CFG_VLAN_DEI_YELLOW,
55 	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
56 	NIX_MARK_CFG_MAX,
57 };
58 
59 /* For now considering MC resources needed for broadcast
60  * pkt replication only. i.e 256 HWVFs + 12 PFs.
61  */
62 #define MC_TBL_SIZE	MC_TBL_SZ_512
63 #define MC_BUF_CNT	MC_BUF_CNT_128
64 
65 struct mce {
66 	struct hlist_node	node;
67 	u16			pcifunc;
68 };
69 
70 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
71 {
72 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
73 	int blkaddr;
74 
75 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
76 	if (!pfvf->nixlf || blkaddr < 0)
77 		return false;
78 	return true;
79 }
80 
81 int rvu_get_nixlf_count(struct rvu *rvu)
82 {
83 	struct rvu_block *block;
84 	int blkaddr;
85 
86 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
87 	if (blkaddr < 0)
88 		return 0;
89 	block = &rvu->hw->block[blkaddr];
90 	return block->lf.max;
91 }
92 
93 static void nix_mce_list_init(struct nix_mce_list *list, int max)
94 {
95 	INIT_HLIST_HEAD(&list->head);
96 	list->count = 0;
97 	list->max = max;
98 }
99 
100 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
101 {
102 	int idx;
103 
104 	if (!mcast)
105 		return 0;
106 
107 	idx = mcast->next_free_mce;
108 	mcast->next_free_mce += count;
109 	return idx;
110 }
111 
112 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
113 {
114 	if (blkaddr == BLKADDR_NIX0 && hw->nix0)
115 		return hw->nix0;
116 
117 	return NULL;
118 }
119 
120 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
121 {
122 	int err;
123 
124 	/*Sync all in flight RX packets to LLC/DRAM */
125 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
126 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
127 	if (err)
128 		dev_err(rvu->dev, "NIX RX software sync failed\n");
129 }
130 
131 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
132 			    int lvl, u16 pcifunc, u16 schq)
133 {
134 	struct rvu_hwinfo *hw = rvu->hw;
135 	struct nix_txsch *txsch;
136 	struct nix_hw *nix_hw;
137 	u16 map_func;
138 
139 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
140 	if (!nix_hw)
141 		return false;
142 
143 	txsch = &nix_hw->txsch[lvl];
144 	/* Check out of bounds */
145 	if (schq >= txsch->schq.max)
146 		return false;
147 
148 	mutex_lock(&rvu->rsrc_lock);
149 	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
150 	mutex_unlock(&rvu->rsrc_lock);
151 
152 	/* TLs aggegating traffic are shared across PF and VFs */
153 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
154 		if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
155 			return false;
156 		else
157 			return true;
158 	}
159 
160 	if (map_func != pcifunc)
161 		return false;
162 
163 	return true;
164 }
165 
166 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
167 {
168 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
169 	u8 cgx_id, lmac_id;
170 	int pkind, pf, vf;
171 	int err;
172 
173 	pf = rvu_get_pf(pcifunc);
174 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
175 		return 0;
176 
177 	switch (type) {
178 	case NIX_INTF_TYPE_CGX:
179 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
180 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
181 
182 		pkind = rvu_npc_get_pkind(rvu, pf);
183 		if (pkind < 0) {
184 			dev_err(rvu->dev,
185 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
186 			return -EINVAL;
187 		}
188 		pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
189 		pfvf->tx_chan_base = pfvf->rx_chan_base;
190 		pfvf->rx_chan_cnt = 1;
191 		pfvf->tx_chan_cnt = 1;
192 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
193 		rvu_npc_set_pkind(rvu, pkind, pfvf);
194 		break;
195 	case NIX_INTF_TYPE_LBK:
196 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
197 
198 		/* Note that AF's VFs work in pairs and talk over consecutive
199 		 * loopback channels.Therefore if odd number of AF VFs are
200 		 * enabled then the last VF remains with no pair.
201 		 */
202 		pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
203 		pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
204 						NIX_CHAN_LBK_CHX(0, vf + 1);
205 		pfvf->rx_chan_cnt = 1;
206 		pfvf->tx_chan_cnt = 1;
207 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
208 					      pfvf->rx_chan_base, false);
209 		break;
210 	}
211 
212 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
213 	 * RVU PF/VF's MAC address.
214 	 */
215 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
216 				    pfvf->rx_chan_base, pfvf->mac_addr);
217 
218 	/* Add this PF_FUNC to bcast pkt replication list */
219 	err = nix_update_bcast_mce_list(rvu, pcifunc, true);
220 	if (err) {
221 		dev_err(rvu->dev,
222 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
223 			pcifunc);
224 		return err;
225 	}
226 
227 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
228 					  nixlf, pfvf->rx_chan_base);
229 	pfvf->maxlen = NIC_HW_MIN_FRS;
230 	pfvf->minlen = NIC_HW_MIN_FRS;
231 
232 	return 0;
233 }
234 
235 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
236 {
237 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
238 	int err;
239 
240 	pfvf->maxlen = 0;
241 	pfvf->minlen = 0;
242 	pfvf->rxvlan = false;
243 
244 	/* Remove this PF_FUNC from bcast pkt replication list */
245 	err = nix_update_bcast_mce_list(rvu, pcifunc, false);
246 	if (err) {
247 		dev_err(rvu->dev,
248 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
249 			pcifunc);
250 	}
251 
252 	/* Free and disable any MCAM entries used by this NIX LF */
253 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
254 }
255 
256 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
257 				 u64 format, bool v4, u64 *fidx)
258 {
259 	struct nix_lso_format field = {0};
260 
261 	/* IP's Length field */
262 	field.layer = NIX_TXLAYER_OL3;
263 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
264 	field.offset = v4 ? 2 : 4;
265 	field.sizem1 = 1; /* i.e 2 bytes */
266 	field.alg = NIX_LSOALG_ADD_PAYLEN;
267 	rvu_write64(rvu, blkaddr,
268 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
269 		    *(u64 *)&field);
270 
271 	/* No ID field in IPv6 header */
272 	if (!v4)
273 		return;
274 
275 	/* IP's ID field */
276 	field.layer = NIX_TXLAYER_OL3;
277 	field.offset = 4;
278 	field.sizem1 = 1; /* i.e 2 bytes */
279 	field.alg = NIX_LSOALG_ADD_SEGNUM;
280 	rvu_write64(rvu, blkaddr,
281 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
282 		    *(u64 *)&field);
283 }
284 
285 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
286 				 u64 format, u64 *fidx)
287 {
288 	struct nix_lso_format field = {0};
289 
290 	/* TCP's sequence number field */
291 	field.layer = NIX_TXLAYER_OL4;
292 	field.offset = 4;
293 	field.sizem1 = 3; /* i.e 4 bytes */
294 	field.alg = NIX_LSOALG_ADD_OFFSET;
295 	rvu_write64(rvu, blkaddr,
296 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
297 		    *(u64 *)&field);
298 
299 	/* TCP's flags field */
300 	field.layer = NIX_TXLAYER_OL4;
301 	field.offset = 12;
302 	field.sizem1 = 1; /* 2 bytes */
303 	field.alg = NIX_LSOALG_TCP_FLAGS;
304 	rvu_write64(rvu, blkaddr,
305 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
306 		    *(u64 *)&field);
307 }
308 
309 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
310 {
311 	u64 cfg, idx, fidx = 0;
312 
313 	/* Get max HW supported format indices */
314 	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
315 	nix_hw->lso.total = cfg;
316 
317 	/* Enable LSO */
318 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
319 	/* For TSO, set first and middle segment flags to
320 	 * mask out PSH, RST & FIN flags in TCP packet
321 	 */
322 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
323 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
324 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
325 
326 	/* Setup default static LSO formats
327 	 *
328 	 * Configure format fields for TCPv4 segmentation offload
329 	 */
330 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
331 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
332 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
333 
334 	/* Set rest of the fields to NOP */
335 	for (; fidx < 8; fidx++) {
336 		rvu_write64(rvu, blkaddr,
337 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
338 	}
339 	nix_hw->lso.in_use++;
340 
341 	/* Configure format fields for TCPv6 segmentation offload */
342 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
343 	fidx = 0;
344 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
345 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
346 
347 	/* Set rest of the fields to NOP */
348 	for (; fidx < 8; fidx++) {
349 		rvu_write64(rvu, blkaddr,
350 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
351 	}
352 	nix_hw->lso.in_use++;
353 }
354 
355 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
356 {
357 	kfree(pfvf->rq_bmap);
358 	kfree(pfvf->sq_bmap);
359 	kfree(pfvf->cq_bmap);
360 	if (pfvf->rq_ctx)
361 		qmem_free(rvu->dev, pfvf->rq_ctx);
362 	if (pfvf->sq_ctx)
363 		qmem_free(rvu->dev, pfvf->sq_ctx);
364 	if (pfvf->cq_ctx)
365 		qmem_free(rvu->dev, pfvf->cq_ctx);
366 	if (pfvf->rss_ctx)
367 		qmem_free(rvu->dev, pfvf->rss_ctx);
368 	if (pfvf->nix_qints_ctx)
369 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
370 	if (pfvf->cq_ints_ctx)
371 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
372 
373 	pfvf->rq_bmap = NULL;
374 	pfvf->cq_bmap = NULL;
375 	pfvf->sq_bmap = NULL;
376 	pfvf->rq_ctx = NULL;
377 	pfvf->sq_ctx = NULL;
378 	pfvf->cq_ctx = NULL;
379 	pfvf->rss_ctx = NULL;
380 	pfvf->nix_qints_ctx = NULL;
381 	pfvf->cq_ints_ctx = NULL;
382 }
383 
384 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
385 			      struct rvu_pfvf *pfvf, int nixlf,
386 			      int rss_sz, int rss_grps, int hwctx_size,
387 			      u64 way_mask)
388 {
389 	int err, grp, num_indices;
390 
391 	/* RSS is not requested for this NIXLF */
392 	if (!rss_sz)
393 		return 0;
394 	num_indices = rss_sz * rss_grps;
395 
396 	/* Alloc NIX RSS HW context memory and config the base */
397 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
398 	if (err)
399 		return err;
400 
401 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
402 		    (u64)pfvf->rss_ctx->iova);
403 
404 	/* Config full RSS table size, enable RSS and caching */
405 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
406 		    BIT_ULL(36) | BIT_ULL(4) |
407 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
408 		    way_mask << 20);
409 	/* Config RSS group offset and sizes */
410 	for (grp = 0; grp < rss_grps; grp++)
411 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
412 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
413 	return 0;
414 }
415 
416 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
417 			       struct nix_aq_inst_s *inst)
418 {
419 	struct admin_queue *aq = block->aq;
420 	struct nix_aq_res_s *result;
421 	int timeout = 1000;
422 	u64 reg, head;
423 
424 	result = (struct nix_aq_res_s *)aq->res->base;
425 
426 	/* Get current head pointer where to append this instruction */
427 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
428 	head = (reg >> 4) & AQ_PTR_MASK;
429 
430 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
431 	       (void *)inst, aq->inst->entry_sz);
432 	memset(result, 0, sizeof(*result));
433 	/* sync into memory */
434 	wmb();
435 
436 	/* Ring the doorbell and wait for result */
437 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
438 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
439 		cpu_relax();
440 		udelay(1);
441 		timeout--;
442 		if (!timeout)
443 			return -EBUSY;
444 	}
445 
446 	if (result->compcode != NIX_AQ_COMP_GOOD)
447 		/* TODO: Replace this with some error code */
448 		return -EBUSY;
449 
450 	return 0;
451 }
452 
453 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
454 			       struct nix_aq_enq_rsp *rsp)
455 {
456 	struct rvu_hwinfo *hw = rvu->hw;
457 	u16 pcifunc = req->hdr.pcifunc;
458 	int nixlf, blkaddr, rc = 0;
459 	struct nix_aq_inst_s inst;
460 	struct rvu_block *block;
461 	struct admin_queue *aq;
462 	struct rvu_pfvf *pfvf;
463 	void *ctx, *mask;
464 	bool ena;
465 	u64 cfg;
466 
467 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
468 	if (blkaddr < 0)
469 		return NIX_AF_ERR_AF_LF_INVALID;
470 
471 	block = &hw->block[blkaddr];
472 	aq = block->aq;
473 	if (!aq) {
474 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
475 		return NIX_AF_ERR_AQ_ENQUEUE;
476 	}
477 
478 	pfvf = rvu_get_pfvf(rvu, pcifunc);
479 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
480 
481 	/* Skip NIXLF check for broadcast MCE entry init */
482 	if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
483 		if (!pfvf->nixlf || nixlf < 0)
484 			return NIX_AF_ERR_AF_LF_INVALID;
485 	}
486 
487 	switch (req->ctype) {
488 	case NIX_AQ_CTYPE_RQ:
489 		/* Check if index exceeds max no of queues */
490 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
491 			rc = NIX_AF_ERR_AQ_ENQUEUE;
492 		break;
493 	case NIX_AQ_CTYPE_SQ:
494 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
495 			rc = NIX_AF_ERR_AQ_ENQUEUE;
496 		break;
497 	case NIX_AQ_CTYPE_CQ:
498 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
499 			rc = NIX_AF_ERR_AQ_ENQUEUE;
500 		break;
501 	case NIX_AQ_CTYPE_RSS:
502 		/* Check if RSS is enabled and qidx is within range */
503 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
504 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
505 		    (req->qidx >= (256UL << (cfg & 0xF))))
506 			rc = NIX_AF_ERR_AQ_ENQUEUE;
507 		break;
508 	case NIX_AQ_CTYPE_MCE:
509 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
510 		/* Check if index exceeds MCE list length */
511 		if (!hw->nix0->mcast.mce_ctx ||
512 		    (req->qidx >= (256UL << (cfg & 0xF))))
513 			rc = NIX_AF_ERR_AQ_ENQUEUE;
514 
515 		/* Adding multicast lists for requests from PF/VFs is not
516 		 * yet supported, so ignore this.
517 		 */
518 		if (rsp)
519 			rc = NIX_AF_ERR_AQ_ENQUEUE;
520 		break;
521 	default:
522 		rc = NIX_AF_ERR_AQ_ENQUEUE;
523 	}
524 
525 	if (rc)
526 		return rc;
527 
528 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
529 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
530 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
531 	     (req->op == NIX_AQ_INSTOP_WRITE &&
532 	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
533 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
534 				     pcifunc, req->sq.smq))
535 			return NIX_AF_ERR_AQ_ENQUEUE;
536 	}
537 
538 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
539 	inst.lf = nixlf;
540 	inst.cindex = req->qidx;
541 	inst.ctype = req->ctype;
542 	inst.op = req->op;
543 	/* Currently we are not supporting enqueuing multiple instructions,
544 	 * so always choose first entry in result memory.
545 	 */
546 	inst.res_addr = (u64)aq->res->iova;
547 
548 	/* Clean result + context memory */
549 	memset(aq->res->base, 0, aq->res->entry_sz);
550 	/* Context needs to be written at RES_ADDR + 128 */
551 	ctx = aq->res->base + 128;
552 	/* Mask needs to be written at RES_ADDR + 256 */
553 	mask = aq->res->base + 256;
554 
555 	switch (req->op) {
556 	case NIX_AQ_INSTOP_WRITE:
557 		if (req->ctype == NIX_AQ_CTYPE_RQ)
558 			memcpy(mask, &req->rq_mask,
559 			       sizeof(struct nix_rq_ctx_s));
560 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
561 			memcpy(mask, &req->sq_mask,
562 			       sizeof(struct nix_sq_ctx_s));
563 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
564 			memcpy(mask, &req->cq_mask,
565 			       sizeof(struct nix_cq_ctx_s));
566 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
567 			memcpy(mask, &req->rss_mask,
568 			       sizeof(struct nix_rsse_s));
569 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
570 			memcpy(mask, &req->mce_mask,
571 			       sizeof(struct nix_rx_mce_s));
572 		/* Fall through */
573 	case NIX_AQ_INSTOP_INIT:
574 		if (req->ctype == NIX_AQ_CTYPE_RQ)
575 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
576 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
577 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
578 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
579 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
580 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
581 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
582 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
583 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
584 		break;
585 	case NIX_AQ_INSTOP_NOP:
586 	case NIX_AQ_INSTOP_READ:
587 	case NIX_AQ_INSTOP_LOCK:
588 	case NIX_AQ_INSTOP_UNLOCK:
589 		break;
590 	default:
591 		rc = NIX_AF_ERR_AQ_ENQUEUE;
592 		return rc;
593 	}
594 
595 	spin_lock(&aq->lock);
596 
597 	/* Submit the instruction to AQ */
598 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
599 	if (rc) {
600 		spin_unlock(&aq->lock);
601 		return rc;
602 	}
603 
604 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
605 	if (req->op == NIX_AQ_INSTOP_INIT) {
606 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
607 			__set_bit(req->qidx, pfvf->rq_bmap);
608 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
609 			__set_bit(req->qidx, pfvf->sq_bmap);
610 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
611 			__set_bit(req->qidx, pfvf->cq_bmap);
612 	}
613 
614 	if (req->op == NIX_AQ_INSTOP_WRITE) {
615 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
616 			ena = (req->rq.ena & req->rq_mask.ena) |
617 				(test_bit(req->qidx, pfvf->rq_bmap) &
618 				~req->rq_mask.ena);
619 			if (ena)
620 				__set_bit(req->qidx, pfvf->rq_bmap);
621 			else
622 				__clear_bit(req->qidx, pfvf->rq_bmap);
623 		}
624 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
625 			ena = (req->rq.ena & req->sq_mask.ena) |
626 				(test_bit(req->qidx, pfvf->sq_bmap) &
627 				~req->sq_mask.ena);
628 			if (ena)
629 				__set_bit(req->qidx, pfvf->sq_bmap);
630 			else
631 				__clear_bit(req->qidx, pfvf->sq_bmap);
632 		}
633 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
634 			ena = (req->rq.ena & req->cq_mask.ena) |
635 				(test_bit(req->qidx, pfvf->cq_bmap) &
636 				~req->cq_mask.ena);
637 			if (ena)
638 				__set_bit(req->qidx, pfvf->cq_bmap);
639 			else
640 				__clear_bit(req->qidx, pfvf->cq_bmap);
641 		}
642 	}
643 
644 	if (rsp) {
645 		/* Copy read context into mailbox */
646 		if (req->op == NIX_AQ_INSTOP_READ) {
647 			if (req->ctype == NIX_AQ_CTYPE_RQ)
648 				memcpy(&rsp->rq, ctx,
649 				       sizeof(struct nix_rq_ctx_s));
650 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
651 				memcpy(&rsp->sq, ctx,
652 				       sizeof(struct nix_sq_ctx_s));
653 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
654 				memcpy(&rsp->cq, ctx,
655 				       sizeof(struct nix_cq_ctx_s));
656 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
657 				memcpy(&rsp->rss, ctx,
658 				       sizeof(struct nix_rsse_s));
659 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
660 				memcpy(&rsp->mce, ctx,
661 				       sizeof(struct nix_rx_mce_s));
662 		}
663 	}
664 
665 	spin_unlock(&aq->lock);
666 	return 0;
667 }
668 
669 static const char *nix_get_ctx_name(int ctype)
670 {
671 	switch (ctype) {
672 	case NIX_AQ_CTYPE_CQ:
673 		return "CQ";
674 	case NIX_AQ_CTYPE_SQ:
675 		return "SQ";
676 	case NIX_AQ_CTYPE_RQ:
677 		return "RQ";
678 	case NIX_AQ_CTYPE_RSS:
679 		return "RSS";
680 	}
681 	return "";
682 }
683 
684 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
685 {
686 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
687 	struct nix_aq_enq_req aq_req;
688 	unsigned long *bmap;
689 	int qidx, q_cnt = 0;
690 	int err = 0, rc;
691 
692 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
693 		return NIX_AF_ERR_AQ_ENQUEUE;
694 
695 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
696 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
697 
698 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
699 		aq_req.cq.ena = 0;
700 		aq_req.cq_mask.ena = 1;
701 		q_cnt = pfvf->cq_ctx->qsize;
702 		bmap = pfvf->cq_bmap;
703 	}
704 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
705 		aq_req.sq.ena = 0;
706 		aq_req.sq_mask.ena = 1;
707 		q_cnt = pfvf->sq_ctx->qsize;
708 		bmap = pfvf->sq_bmap;
709 	}
710 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
711 		aq_req.rq.ena = 0;
712 		aq_req.rq_mask.ena = 1;
713 		q_cnt = pfvf->rq_ctx->qsize;
714 		bmap = pfvf->rq_bmap;
715 	}
716 
717 	aq_req.ctype = req->ctype;
718 	aq_req.op = NIX_AQ_INSTOP_WRITE;
719 
720 	for (qidx = 0; qidx < q_cnt; qidx++) {
721 		if (!test_bit(qidx, bmap))
722 			continue;
723 		aq_req.qidx = qidx;
724 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
725 		if (rc) {
726 			err = rc;
727 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
728 				nix_get_ctx_name(req->ctype), qidx);
729 		}
730 	}
731 
732 	return err;
733 }
734 
735 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
736 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
737 {
738 	struct nix_aq_enq_req lock_ctx_req;
739 	int err;
740 
741 	if (req->op != NIX_AQ_INSTOP_INIT)
742 		return 0;
743 
744 	if (req->ctype == NIX_AQ_CTYPE_MCE ||
745 	    req->ctype == NIX_AQ_CTYPE_DYNO)
746 		return 0;
747 
748 	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
749 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
750 	lock_ctx_req.ctype = req->ctype;
751 	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
752 	lock_ctx_req.qidx = req->qidx;
753 	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
754 	if (err)
755 		dev_err(rvu->dev,
756 			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
757 			req->hdr.pcifunc,
758 			nix_get_ctx_name(req->ctype), req->qidx);
759 	return err;
760 }
761 
762 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
763 				struct nix_aq_enq_req *req,
764 				struct nix_aq_enq_rsp *rsp)
765 {
766 	int err;
767 
768 	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
769 	if (!err)
770 		err = nix_lf_hwctx_lockdown(rvu, req);
771 	return err;
772 }
773 #else
774 
775 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
776 				struct nix_aq_enq_req *req,
777 				struct nix_aq_enq_rsp *rsp)
778 {
779 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
780 }
781 #endif
782 
783 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
784 				       struct hwctx_disable_req *req,
785 				       struct msg_rsp *rsp)
786 {
787 	return nix_lf_hwctx_disable(rvu, req);
788 }
789 
790 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
791 				  struct nix_lf_alloc_req *req,
792 				  struct nix_lf_alloc_rsp *rsp)
793 {
794 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
795 	struct rvu_hwinfo *hw = rvu->hw;
796 	u16 pcifunc = req->hdr.pcifunc;
797 	struct rvu_block *block;
798 	struct rvu_pfvf *pfvf;
799 	u64 cfg, ctx_cfg;
800 	int blkaddr;
801 
802 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
803 		return NIX_AF_ERR_PARAM;
804 
805 	if (req->way_mask)
806 		req->way_mask &= 0xFFFF;
807 
808 	pfvf = rvu_get_pfvf(rvu, pcifunc);
809 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
810 	if (!pfvf->nixlf || blkaddr < 0)
811 		return NIX_AF_ERR_AF_LF_INVALID;
812 
813 	block = &hw->block[blkaddr];
814 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
815 	if (nixlf < 0)
816 		return NIX_AF_ERR_AF_LF_INVALID;
817 
818 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
819 	if (req->npa_func) {
820 		/* If default, use 'this' NIXLF's PFFUNC */
821 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
822 			req->npa_func = pcifunc;
823 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
824 			return NIX_AF_INVAL_NPA_PF_FUNC;
825 	}
826 
827 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
828 	if (req->sso_func) {
829 		/* If default, use 'this' NIXLF's PFFUNC */
830 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
831 			req->sso_func = pcifunc;
832 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
833 			return NIX_AF_INVAL_SSO_PF_FUNC;
834 	}
835 
836 	/* If RSS is being enabled, check if requested config is valid.
837 	 * RSS table size should be power of two, otherwise
838 	 * RSS_GRP::OFFSET + adder might go beyond that group or
839 	 * won't be able to use entire table.
840 	 */
841 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
842 			    !is_power_of_2(req->rss_sz)))
843 		return NIX_AF_ERR_RSS_SIZE_INVALID;
844 
845 	if (req->rss_sz &&
846 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
847 		return NIX_AF_ERR_RSS_GRPS_INVALID;
848 
849 	/* Reset this NIX LF */
850 	err = rvu_lf_reset(rvu, block, nixlf);
851 	if (err) {
852 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
853 			block->addr - BLKADDR_NIX0, nixlf);
854 		return NIX_AF_ERR_LF_RESET;
855 	}
856 
857 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
858 
859 	/* Alloc NIX RQ HW context memory and config the base */
860 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
861 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
862 	if (err)
863 		goto free_mem;
864 
865 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
866 	if (!pfvf->rq_bmap)
867 		goto free_mem;
868 
869 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
870 		    (u64)pfvf->rq_ctx->iova);
871 
872 	/* Set caching and queue count in HW */
873 	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
874 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
875 
876 	/* Alloc NIX SQ HW context memory and config the base */
877 	hwctx_size = 1UL << (ctx_cfg & 0xF);
878 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
879 	if (err)
880 		goto free_mem;
881 
882 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
883 	if (!pfvf->sq_bmap)
884 		goto free_mem;
885 
886 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
887 		    (u64)pfvf->sq_ctx->iova);
888 
889 	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
890 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
891 
892 	/* Alloc NIX CQ HW context memory and config the base */
893 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
894 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
895 	if (err)
896 		goto free_mem;
897 
898 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
899 	if (!pfvf->cq_bmap)
900 		goto free_mem;
901 
902 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
903 		    (u64)pfvf->cq_ctx->iova);
904 
905 	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
906 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
907 
908 	/* Initialize receive side scaling (RSS) */
909 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
910 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
911 				 req->rss_grps, hwctx_size, req->way_mask);
912 	if (err)
913 		goto free_mem;
914 
915 	/* Alloc memory for CQINT's HW contexts */
916 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
917 	qints = (cfg >> 24) & 0xFFF;
918 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
919 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
920 	if (err)
921 		goto free_mem;
922 
923 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
924 		    (u64)pfvf->cq_ints_ctx->iova);
925 
926 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
927 		    BIT_ULL(36) | req->way_mask << 20);
928 
929 	/* Alloc memory for QINT's HW contexts */
930 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
931 	qints = (cfg >> 12) & 0xFFF;
932 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
933 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
934 	if (err)
935 		goto free_mem;
936 
937 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
938 		    (u64)pfvf->nix_qints_ctx->iova);
939 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
940 		    BIT_ULL(36) | req->way_mask << 20);
941 
942 	/* Setup VLANX TPID's.
943 	 * Use VLAN1 for 802.1Q
944 	 * and VLAN0 for 802.1AD.
945 	 */
946 	cfg = (0x8100ULL << 16) | 0x88A8ULL;
947 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
948 
949 	/* Enable LMTST for this NIX LF */
950 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
951 
952 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
953 	if (req->npa_func)
954 		cfg = req->npa_func;
955 	if (req->sso_func)
956 		cfg |= (u64)req->sso_func << 16;
957 
958 	cfg |= (u64)req->xqe_sz << 33;
959 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
960 
961 	/* Config Rx pkt length, csum checks and apad  enable / disable */
962 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
963 
964 	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
965 	err = nix_interface_init(rvu, pcifunc, intf, nixlf);
966 	if (err)
967 		goto free_mem;
968 
969 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
970 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
971 
972 	goto exit;
973 
974 free_mem:
975 	nix_ctx_free(rvu, pfvf);
976 	rc = -ENOMEM;
977 
978 exit:
979 	/* Set macaddr of this PF/VF */
980 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
981 
982 	/* set SQB size info */
983 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
984 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
985 	rsp->rx_chan_base = pfvf->rx_chan_base;
986 	rsp->tx_chan_base = pfvf->tx_chan_base;
987 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
988 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
989 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
990 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
991 	/* Get HW supported stat count */
992 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
993 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
994 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
995 	/* Get count of CQ IRQs and error IRQs supported per LF */
996 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
997 	rsp->qints = ((cfg >> 12) & 0xFFF);
998 	rsp->cints = ((cfg >> 24) & 0xFFF);
999 	return rc;
1000 }
1001 
1002 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
1003 				 struct msg_rsp *rsp)
1004 {
1005 	struct rvu_hwinfo *hw = rvu->hw;
1006 	u16 pcifunc = req->hdr.pcifunc;
1007 	struct rvu_block *block;
1008 	int blkaddr, nixlf, err;
1009 	struct rvu_pfvf *pfvf;
1010 
1011 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1012 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1013 	if (!pfvf->nixlf || blkaddr < 0)
1014 		return NIX_AF_ERR_AF_LF_INVALID;
1015 
1016 	block = &hw->block[blkaddr];
1017 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1018 	if (nixlf < 0)
1019 		return NIX_AF_ERR_AF_LF_INVALID;
1020 
1021 	nix_interface_deinit(rvu, pcifunc, nixlf);
1022 
1023 	/* Reset this NIX LF */
1024 	err = rvu_lf_reset(rvu, block, nixlf);
1025 	if (err) {
1026 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1027 			block->addr - BLKADDR_NIX0, nixlf);
1028 		return NIX_AF_ERR_LF_RESET;
1029 	}
1030 
1031 	nix_ctx_free(rvu, pfvf);
1032 
1033 	return 0;
1034 }
1035 
1036 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1037 					 struct nix_mark_format_cfg  *req,
1038 					 struct nix_mark_format_cfg_rsp *rsp)
1039 {
1040 	u16 pcifunc = req->hdr.pcifunc;
1041 	struct nix_hw *nix_hw;
1042 	struct rvu_pfvf *pfvf;
1043 	int blkaddr, rc;
1044 	u32 cfg;
1045 
1046 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1047 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1048 	if (!pfvf->nixlf || blkaddr < 0)
1049 		return NIX_AF_ERR_AF_LF_INVALID;
1050 
1051 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1052 	if (!nix_hw)
1053 		return -EINVAL;
1054 
1055 	cfg = (((u32)req->offset & 0x7) << 16) |
1056 	      (((u32)req->y_mask & 0xF) << 12) |
1057 	      (((u32)req->y_val & 0xF) << 8) |
1058 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1059 
1060 	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1061 	if (rc < 0) {
1062 		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1063 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1064 		return NIX_AF_ERR_MARK_CFG_FAIL;
1065 	}
1066 
1067 	rsp->mark_format_idx = rc;
1068 	return 0;
1069 }
1070 
1071 /* Disable shaping of pkts by a scheduler queue
1072  * at a given scheduler level.
1073  */
1074 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1075 				 int lvl, int schq)
1076 {
1077 	u64  cir_reg = 0, pir_reg = 0;
1078 	u64  cfg;
1079 
1080 	switch (lvl) {
1081 	case NIX_TXSCH_LVL_TL1:
1082 		cir_reg = NIX_AF_TL1X_CIR(schq);
1083 		pir_reg = 0; /* PIR not available at TL1 */
1084 		break;
1085 	case NIX_TXSCH_LVL_TL2:
1086 		cir_reg = NIX_AF_TL2X_CIR(schq);
1087 		pir_reg = NIX_AF_TL2X_PIR(schq);
1088 		break;
1089 	case NIX_TXSCH_LVL_TL3:
1090 		cir_reg = NIX_AF_TL3X_CIR(schq);
1091 		pir_reg = NIX_AF_TL3X_PIR(schq);
1092 		break;
1093 	case NIX_TXSCH_LVL_TL4:
1094 		cir_reg = NIX_AF_TL4X_CIR(schq);
1095 		pir_reg = NIX_AF_TL4X_PIR(schq);
1096 		break;
1097 	}
1098 
1099 	if (!cir_reg)
1100 		return;
1101 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
1102 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1103 
1104 	if (!pir_reg)
1105 		return;
1106 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
1107 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1108 }
1109 
1110 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1111 				 int lvl, int schq)
1112 {
1113 	struct rvu_hwinfo *hw = rvu->hw;
1114 	int link;
1115 
1116 	if (lvl >= hw->cap.nix_tx_aggr_lvl)
1117 		return;
1118 
1119 	/* Reset TL4's SDP link config */
1120 	if (lvl == NIX_TXSCH_LVL_TL4)
1121 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1122 
1123 	if (lvl != NIX_TXSCH_LVL_TL2)
1124 		return;
1125 
1126 	/* Reset TL2's CGX or LBK link config */
1127 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1128 		rvu_write64(rvu, blkaddr,
1129 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1130 }
1131 
1132 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1133 {
1134 	struct rvu_hwinfo *hw = rvu->hw;
1135 	int pf = rvu_get_pf(pcifunc);
1136 	u8 cgx_id = 0, lmac_id = 0;
1137 
1138 	if (is_afvf(pcifunc)) {/* LBK links */
1139 		return hw->cgx_links;
1140 	} else if (is_pf_cgxmapped(rvu, pf)) {
1141 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1142 		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1143 	}
1144 
1145 	/* SDP link */
1146 	return hw->cgx_links + hw->lbk_links;
1147 }
1148 
1149 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1150 				 int link, int *start, int *end)
1151 {
1152 	struct rvu_hwinfo *hw = rvu->hw;
1153 	int pf = rvu_get_pf(pcifunc);
1154 
1155 	if (is_afvf(pcifunc)) { /* LBK links */
1156 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1157 		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1158 	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1159 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1160 		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1161 	} else { /* SDP link */
1162 		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1163 			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1164 		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1165 	}
1166 }
1167 
1168 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1169 				      struct nix_hw *nix_hw,
1170 				      struct nix_txsch_alloc_req *req)
1171 {
1172 	struct rvu_hwinfo *hw = rvu->hw;
1173 	int schq, req_schq, free_cnt;
1174 	struct nix_txsch *txsch;
1175 	int link, start, end;
1176 
1177 	txsch = &nix_hw->txsch[lvl];
1178 	req_schq = req->schq_contig[lvl] + req->schq[lvl];
1179 
1180 	if (!req_schq)
1181 		return 0;
1182 
1183 	link = nix_get_tx_link(rvu, pcifunc);
1184 
1185 	/* For traffic aggregating scheduler level, one queue is enough */
1186 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1187 		if (req_schq != 1)
1188 			return NIX_AF_ERR_TLX_ALLOC_FAIL;
1189 		return 0;
1190 	}
1191 
1192 	/* Get free SCHQ count and check if request can be accomodated */
1193 	if (hw->cap.nix_fixed_txschq_mapping) {
1194 		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1195 		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1196 		if (end <= txsch->schq.max && schq < end &&
1197 		    !test_bit(schq, txsch->schq.bmap))
1198 			free_cnt = 1;
1199 		else
1200 			free_cnt = 0;
1201 	} else {
1202 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
1203 	}
1204 
1205 	if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1206 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1207 
1208 	/* If contiguous queues are needed, check for availability */
1209 	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1210 	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1211 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1212 
1213 	return 0;
1214 }
1215 
1216 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1217 			    struct nix_txsch_alloc_rsp *rsp,
1218 			    int lvl, int start, int end)
1219 {
1220 	struct rvu_hwinfo *hw = rvu->hw;
1221 	u16 pcifunc = rsp->hdr.pcifunc;
1222 	int idx, schq;
1223 
1224 	/* For traffic aggregating levels, queue alloc is based
1225 	 * on transmit link to which PF_FUNC is mapped to.
1226 	 */
1227 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1228 		/* A single TL queue is allocated */
1229 		if (rsp->schq_contig[lvl]) {
1230 			rsp->schq_contig[lvl] = 1;
1231 			rsp->schq_contig_list[lvl][0] = start;
1232 		}
1233 
1234 		/* Both contig and non-contig reqs doesn't make sense here */
1235 		if (rsp->schq_contig[lvl])
1236 			rsp->schq[lvl] = 0;
1237 
1238 		if (rsp->schq[lvl]) {
1239 			rsp->schq[lvl] = 1;
1240 			rsp->schq_list[lvl][0] = start;
1241 		}
1242 		return;
1243 	}
1244 
1245 	/* Adjust the queue request count if HW supports
1246 	 * only one queue per level configuration.
1247 	 */
1248 	if (hw->cap.nix_fixed_txschq_mapping) {
1249 		idx = pcifunc & RVU_PFVF_FUNC_MASK;
1250 		schq = start + idx;
1251 		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1252 			rsp->schq_contig[lvl] = 0;
1253 			rsp->schq[lvl] = 0;
1254 			return;
1255 		}
1256 
1257 		if (rsp->schq_contig[lvl]) {
1258 			rsp->schq_contig[lvl] = 1;
1259 			set_bit(schq, txsch->schq.bmap);
1260 			rsp->schq_contig_list[lvl][0] = schq;
1261 			rsp->schq[lvl] = 0;
1262 		} else if (rsp->schq[lvl]) {
1263 			rsp->schq[lvl] = 1;
1264 			set_bit(schq, txsch->schq.bmap);
1265 			rsp->schq_list[lvl][0] = schq;
1266 		}
1267 		return;
1268 	}
1269 
1270 	/* Allocate contiguous queue indices requesty first */
1271 	if (rsp->schq_contig[lvl]) {
1272 		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1273 						  txsch->schq.max, start,
1274 						  rsp->schq_contig[lvl], 0);
1275 		if (schq >= end)
1276 			rsp->schq_contig[lvl] = 0;
1277 		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1278 			set_bit(schq, txsch->schq.bmap);
1279 			rsp->schq_contig_list[lvl][idx] = schq;
1280 			schq++;
1281 		}
1282 	}
1283 
1284 	/* Allocate non-contiguous queue indices */
1285 	if (rsp->schq[lvl]) {
1286 		idx = 0;
1287 		for (schq = start; schq < end; schq++) {
1288 			if (!test_bit(schq, txsch->schq.bmap)) {
1289 				set_bit(schq, txsch->schq.bmap);
1290 				rsp->schq_list[lvl][idx++] = schq;
1291 			}
1292 			if (idx == rsp->schq[lvl])
1293 				break;
1294 		}
1295 		/* Update how many were allocated */
1296 		rsp->schq[lvl] = idx;
1297 	}
1298 }
1299 
1300 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1301 				     struct nix_txsch_alloc_req *req,
1302 				     struct nix_txsch_alloc_rsp *rsp)
1303 {
1304 	struct rvu_hwinfo *hw = rvu->hw;
1305 	u16 pcifunc = req->hdr.pcifunc;
1306 	int link, blkaddr, rc = 0;
1307 	int lvl, idx, start, end;
1308 	struct nix_txsch *txsch;
1309 	struct rvu_pfvf *pfvf;
1310 	struct nix_hw *nix_hw;
1311 	u32 *pfvf_map;
1312 	u16 schq;
1313 
1314 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1315 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1316 	if (!pfvf->nixlf || blkaddr < 0)
1317 		return NIX_AF_ERR_AF_LF_INVALID;
1318 
1319 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1320 	if (!nix_hw)
1321 		return -EINVAL;
1322 
1323 	mutex_lock(&rvu->rsrc_lock);
1324 
1325 	/* Check if request is valid as per HW capabilities
1326 	 * and can be accomodated.
1327 	 */
1328 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1329 		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1330 		if (rc)
1331 			goto err;
1332 	}
1333 
1334 	/* Allocate requested Tx scheduler queues */
1335 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1336 		txsch = &nix_hw->txsch[lvl];
1337 		pfvf_map = txsch->pfvf_map;
1338 
1339 		if (!req->schq[lvl] && !req->schq_contig[lvl])
1340 			continue;
1341 
1342 		rsp->schq[lvl] = req->schq[lvl];
1343 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
1344 
1345 		link = nix_get_tx_link(rvu, pcifunc);
1346 
1347 		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1348 			start = link;
1349 			end = link;
1350 		} else if (hw->cap.nix_fixed_txschq_mapping) {
1351 			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1352 		} else {
1353 			start = 0;
1354 			end = txsch->schq.max;
1355 		}
1356 
1357 		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1358 
1359 		/* Reset queue config */
1360 		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1361 			schq = rsp->schq_contig_list[lvl][idx];
1362 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1363 			    NIX_TXSCHQ_CFG_DONE))
1364 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1365 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1366 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1367 		}
1368 
1369 		for (idx = 0; idx < req->schq[lvl]; idx++) {
1370 			schq = rsp->schq_list[lvl][idx];
1371 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1372 			    NIX_TXSCHQ_CFG_DONE))
1373 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1374 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1375 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1376 		}
1377 	}
1378 
1379 	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1380 	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1381 	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1382 				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1383 				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1384 	goto exit;
1385 err:
1386 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1387 exit:
1388 	mutex_unlock(&rvu->rsrc_lock);
1389 	return rc;
1390 }
1391 
1392 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1393 			  int smq, u16 pcifunc, int nixlf)
1394 {
1395 	int pf = rvu_get_pf(pcifunc);
1396 	u8 cgx_id = 0, lmac_id = 0;
1397 	int err, restore_tx_en = 0;
1398 	u64 cfg;
1399 
1400 	/* enable cgx tx if disabled */
1401 	if (is_pf_cgxmapped(rvu, pf)) {
1402 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1403 		restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1404 						    lmac_id, true);
1405 	}
1406 
1407 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1408 	/* Do SMQ flush and set enqueue xoff */
1409 	cfg |= BIT_ULL(50) | BIT_ULL(49);
1410 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1411 
1412 	/* Disable backpressure from physical link,
1413 	 * otherwise SMQ flush may stall.
1414 	 */
1415 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
1416 
1417 	/* Wait for flush to complete */
1418 	err = rvu_poll_reg(rvu, blkaddr,
1419 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1420 	if (err)
1421 		dev_err(rvu->dev,
1422 			"NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1423 
1424 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
1425 	/* restore cgx tx state */
1426 	if (restore_tx_en)
1427 		cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1428 }
1429 
1430 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1431 {
1432 	int blkaddr, nixlf, lvl, schq, err;
1433 	struct rvu_hwinfo *hw = rvu->hw;
1434 	struct nix_txsch *txsch;
1435 	struct nix_hw *nix_hw;
1436 
1437 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1438 	if (blkaddr < 0)
1439 		return NIX_AF_ERR_AF_LF_INVALID;
1440 
1441 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1442 	if (!nix_hw)
1443 		return -EINVAL;
1444 
1445 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1446 	if (nixlf < 0)
1447 		return NIX_AF_ERR_AF_LF_INVALID;
1448 
1449 	/* Disable TL2/3 queue links before SMQ flush*/
1450 	mutex_lock(&rvu->rsrc_lock);
1451 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1452 		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1453 			continue;
1454 
1455 		txsch = &nix_hw->txsch[lvl];
1456 		for (schq = 0; schq < txsch->schq.max; schq++) {
1457 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1458 				continue;
1459 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1460 		}
1461 	}
1462 
1463 	/* Flush SMQs */
1464 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1465 	for (schq = 0; schq < txsch->schq.max; schq++) {
1466 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1467 			continue;
1468 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1469 	}
1470 
1471 	/* Now free scheduler queues to free pool */
1472 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1473 		 /* TLs above aggregation level are shared across all PF
1474 		  * and it's VFs, hence skip freeing them.
1475 		  */
1476 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
1477 			continue;
1478 
1479 		txsch = &nix_hw->txsch[lvl];
1480 		for (schq = 0; schq < txsch->schq.max; schq++) {
1481 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1482 				continue;
1483 			rvu_free_rsrc(&txsch->schq, schq);
1484 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1485 		}
1486 	}
1487 	mutex_unlock(&rvu->rsrc_lock);
1488 
1489 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1490 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1491 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1492 	if (err)
1493 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1494 
1495 	return 0;
1496 }
1497 
1498 static int nix_txschq_free_one(struct rvu *rvu,
1499 			       struct nix_txsch_free_req *req)
1500 {
1501 	struct rvu_hwinfo *hw = rvu->hw;
1502 	u16 pcifunc = req->hdr.pcifunc;
1503 	int lvl, schq, nixlf, blkaddr;
1504 	struct nix_txsch *txsch;
1505 	struct nix_hw *nix_hw;
1506 	u32 *pfvf_map;
1507 
1508 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1509 	if (blkaddr < 0)
1510 		return NIX_AF_ERR_AF_LF_INVALID;
1511 
1512 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1513 	if (!nix_hw)
1514 		return -EINVAL;
1515 
1516 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1517 	if (nixlf < 0)
1518 		return NIX_AF_ERR_AF_LF_INVALID;
1519 
1520 	lvl = req->schq_lvl;
1521 	schq = req->schq;
1522 	txsch = &nix_hw->txsch[lvl];
1523 
1524 	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1525 		return 0;
1526 
1527 	pfvf_map = txsch->pfvf_map;
1528 	mutex_lock(&rvu->rsrc_lock);
1529 
1530 	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1531 		mutex_unlock(&rvu->rsrc_lock);
1532 		goto err;
1533 	}
1534 
1535 	/* Flush if it is a SMQ. Onus of disabling
1536 	 * TL2/3 queue links before SMQ flush is on user
1537 	 */
1538 	if (lvl == NIX_TXSCH_LVL_SMQ)
1539 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1540 
1541 	/* Free the resource */
1542 	rvu_free_rsrc(&txsch->schq, schq);
1543 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1544 	mutex_unlock(&rvu->rsrc_lock);
1545 	return 0;
1546 err:
1547 	return NIX_AF_ERR_TLX_INVALID;
1548 }
1549 
1550 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1551 				    struct nix_txsch_free_req *req,
1552 				    struct msg_rsp *rsp)
1553 {
1554 	if (req->flags & TXSCHQ_FREE_ALL)
1555 		return nix_txschq_free(rvu, req->hdr.pcifunc);
1556 	else
1557 		return nix_txschq_free_one(rvu, req);
1558 }
1559 
1560 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1561 				      int lvl, u64 reg, u64 regval)
1562 {
1563 	u64 regbase = reg & 0xFFFF;
1564 	u16 schq, parent;
1565 
1566 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1567 		return false;
1568 
1569 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1570 	/* Check if this schq belongs to this PF/VF or not */
1571 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1572 		return false;
1573 
1574 	parent = (regval >> 16) & 0x1FF;
1575 	/* Validate MDQ's TL4 parent */
1576 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
1577 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1578 		return false;
1579 
1580 	/* Validate TL4's TL3 parent */
1581 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
1582 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1583 		return false;
1584 
1585 	/* Validate TL3's TL2 parent */
1586 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
1587 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1588 		return false;
1589 
1590 	/* Validate TL2's TL1 parent */
1591 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
1592 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1593 		return false;
1594 
1595 	return true;
1596 }
1597 
1598 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1599 {
1600 	u64 regbase;
1601 
1602 	if (hw->cap.nix_shaping)
1603 		return true;
1604 
1605 	/* If shaping and coloring is not supported, then
1606 	 * *_CIR and *_PIR registers should not be configured.
1607 	 */
1608 	regbase = reg & 0xFFFF;
1609 
1610 	switch (lvl) {
1611 	case NIX_TXSCH_LVL_TL1:
1612 		if (regbase == NIX_AF_TL1X_CIR(0))
1613 			return false;
1614 		break;
1615 	case NIX_TXSCH_LVL_TL2:
1616 		if (regbase == NIX_AF_TL2X_CIR(0) ||
1617 		    regbase == NIX_AF_TL2X_PIR(0))
1618 			return false;
1619 		break;
1620 	case NIX_TXSCH_LVL_TL3:
1621 		if (regbase == NIX_AF_TL3X_CIR(0) ||
1622 		    regbase == NIX_AF_TL3X_PIR(0))
1623 			return false;
1624 		break;
1625 	case NIX_TXSCH_LVL_TL4:
1626 		if (regbase == NIX_AF_TL4X_CIR(0) ||
1627 		    regbase == NIX_AF_TL4X_PIR(0))
1628 			return false;
1629 		break;
1630 	}
1631 	return true;
1632 }
1633 
1634 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1635 				u16 pcifunc, int blkaddr)
1636 {
1637 	u32 *pfvf_map;
1638 	int schq;
1639 
1640 	schq = nix_get_tx_link(rvu, pcifunc);
1641 	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1642 	/* Skip if PF has already done the config */
1643 	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1644 		return;
1645 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1646 		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
1647 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1648 		    TXSCH_TL1_DFLT_RR_QTM);
1649 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1650 	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1651 }
1652 
1653 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1654 				    struct nix_txschq_config *req,
1655 				    struct msg_rsp *rsp)
1656 {
1657 	struct rvu_hwinfo *hw = rvu->hw;
1658 	u16 pcifunc = req->hdr.pcifunc;
1659 	u64 reg, regval, schq_regbase;
1660 	struct nix_txsch *txsch;
1661 	struct nix_hw *nix_hw;
1662 	int blkaddr, idx, err;
1663 	int nixlf, schq;
1664 	u32 *pfvf_map;
1665 
1666 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1667 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
1668 		return NIX_AF_INVAL_TXSCHQ_CFG;
1669 
1670 	err = nix_get_nixlf(rvu, pcifunc, &nixlf);
1671 	if (err)
1672 		return NIX_AF_ERR_AF_LF_INVALID;
1673 
1674 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1675 	if (blkaddr < 0)
1676 		return NIX_AF_ERR_AF_LF_INVALID;
1677 
1678 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1679 	if (!nix_hw)
1680 		return -EINVAL;
1681 
1682 	txsch = &nix_hw->txsch[req->lvl];
1683 	pfvf_map = txsch->pfvf_map;
1684 
1685 	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1686 	    pcifunc & RVU_PFVF_FUNC_MASK) {
1687 		mutex_lock(&rvu->rsrc_lock);
1688 		if (req->lvl == NIX_TXSCH_LVL_TL1)
1689 			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1690 		mutex_unlock(&rvu->rsrc_lock);
1691 		return 0;
1692 	}
1693 
1694 	for (idx = 0; idx < req->num_regs; idx++) {
1695 		reg = req->reg[idx];
1696 		regval = req->regval[idx];
1697 		schq_regbase = reg & 0xFFFF;
1698 
1699 		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1700 					       txsch->lvl, reg, regval))
1701 			return NIX_AF_INVAL_TXSCHQ_CFG;
1702 
1703 		/* Check if shaping and coloring is supported */
1704 		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
1705 			continue;
1706 
1707 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1708 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1709 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1710 					   pcifunc, 0);
1711 			regval &= ~(0x7FULL << 24);
1712 			regval |= ((u64)nixlf << 24);
1713 		}
1714 
1715 		/* Clear 'BP_ENA' config, if it's not allowed */
1716 		if (!hw->cap.nix_tx_link_bp) {
1717 			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
1718 			    (schq_regbase & 0xFF00) ==
1719 			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
1720 				regval &= ~BIT_ULL(13);
1721 		}
1722 
1723 		/* Mark config as done for TL1 by PF */
1724 		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1725 		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1726 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1727 			mutex_lock(&rvu->rsrc_lock);
1728 			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
1729 							NIX_TXSCHQ_CFG_DONE);
1730 			mutex_unlock(&rvu->rsrc_lock);
1731 		}
1732 
1733 		/* SMQ flush is special hence split register writes such
1734 		 * that flush first and write rest of the bits later.
1735 		 */
1736 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1737 		    (regval & BIT_ULL(49))) {
1738 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1739 			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1740 			regval &= ~BIT_ULL(49);
1741 		}
1742 		rvu_write64(rvu, blkaddr, reg, regval);
1743 	}
1744 
1745 	return 0;
1746 }
1747 
1748 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1749 			   struct nix_vtag_config *req)
1750 {
1751 	u64 regval = req->vtag_size;
1752 
1753 	if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
1754 		return -EINVAL;
1755 
1756 	if (req->rx.capture_vtag)
1757 		regval |= BIT_ULL(5);
1758 	if (req->rx.strip_vtag)
1759 		regval |= BIT_ULL(4);
1760 
1761 	rvu_write64(rvu, blkaddr,
1762 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1763 	return 0;
1764 }
1765 
1766 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
1767 				  struct nix_vtag_config *req,
1768 				  struct msg_rsp *rsp)
1769 {
1770 	struct rvu_hwinfo *hw = rvu->hw;
1771 	u16 pcifunc = req->hdr.pcifunc;
1772 	int blkaddr, nixlf, err;
1773 
1774 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1775 	if (blkaddr < 0)
1776 		return NIX_AF_ERR_AF_LF_INVALID;
1777 
1778 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1779 	if (nixlf < 0)
1780 		return NIX_AF_ERR_AF_LF_INVALID;
1781 
1782 	if (req->cfg_type) {
1783 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1784 		if (err)
1785 			return NIX_AF_ERR_PARAM;
1786 	} else {
1787 		/* TODO: handle tx vtag configuration */
1788 		return 0;
1789 	}
1790 
1791 	return 0;
1792 }
1793 
1794 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1795 			 u16 pcifunc, int next, bool eol)
1796 {
1797 	struct nix_aq_enq_req aq_req;
1798 	int err;
1799 
1800 	aq_req.hdr.pcifunc = 0;
1801 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
1802 	aq_req.op = op;
1803 	aq_req.qidx = mce;
1804 
1805 	/* Forward bcast pkts to RQ0, RSS not needed */
1806 	aq_req.mce.op = 0;
1807 	aq_req.mce.index = 0;
1808 	aq_req.mce.eol = eol;
1809 	aq_req.mce.pf_func = pcifunc;
1810 	aq_req.mce.next = next;
1811 
1812 	/* All fields valid */
1813 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
1814 
1815 	err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1816 	if (err) {
1817 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1818 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1819 		return err;
1820 	}
1821 	return 0;
1822 }
1823 
1824 static int nix_update_mce_list(struct nix_mce_list *mce_list,
1825 			       u16 pcifunc, bool add)
1826 {
1827 	struct mce *mce, *tail = NULL;
1828 	bool delete = false;
1829 
1830 	/* Scan through the current list */
1831 	hlist_for_each_entry(mce, &mce_list->head, node) {
1832 		/* If already exists, then delete */
1833 		if (mce->pcifunc == pcifunc && !add) {
1834 			delete = true;
1835 			break;
1836 		}
1837 		tail = mce;
1838 	}
1839 
1840 	if (delete) {
1841 		hlist_del(&mce->node);
1842 		kfree(mce);
1843 		mce_list->count--;
1844 		return 0;
1845 	}
1846 
1847 	if (!add)
1848 		return 0;
1849 
1850 	/* Add a new one to the list, at the tail */
1851 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
1852 	if (!mce)
1853 		return -ENOMEM;
1854 	mce->pcifunc = pcifunc;
1855 	if (!tail)
1856 		hlist_add_head(&mce->node, &mce_list->head);
1857 	else
1858 		hlist_add_behind(&mce->node, &tail->node);
1859 	mce_list->count++;
1860 	return 0;
1861 }
1862 
1863 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
1864 {
1865 	int err = 0, idx, next_idx, last_idx;
1866 	struct nix_mce_list *mce_list;
1867 	struct nix_mcast *mcast;
1868 	struct nix_hw *nix_hw;
1869 	struct rvu_pfvf *pfvf;
1870 	struct mce *mce;
1871 	int blkaddr;
1872 
1873 	/* Broadcast pkt replication is not needed for AF's VFs, hence skip */
1874 	if (is_afvf(pcifunc))
1875 		return 0;
1876 
1877 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1878 	if (blkaddr < 0)
1879 		return 0;
1880 
1881 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1882 	if (!nix_hw)
1883 		return 0;
1884 
1885 	mcast = &nix_hw->mcast;
1886 
1887 	/* Get this PF/VF func's MCE index */
1888 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1889 	idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
1890 
1891 	mce_list = &pfvf->bcast_mce_list;
1892 	if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
1893 		dev_err(rvu->dev,
1894 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
1895 			__func__, idx, mce_list->max,
1896 			pcifunc >> RVU_PFVF_PF_SHIFT);
1897 		return -EINVAL;
1898 	}
1899 
1900 	mutex_lock(&mcast->mce_lock);
1901 
1902 	err = nix_update_mce_list(mce_list, pcifunc, add);
1903 	if (err)
1904 		goto end;
1905 
1906 	/* Disable MCAM entry in NPC */
1907 	if (!mce_list->count) {
1908 		rvu_npc_disable_bcast_entry(rvu, pcifunc);
1909 		goto end;
1910 	}
1911 
1912 	/* Dump the updated list to HW */
1913 	idx = pfvf->bcast_mce_idx;
1914 	last_idx = idx + mce_list->count - 1;
1915 	hlist_for_each_entry(mce, &mce_list->head, node) {
1916 		if (idx > last_idx)
1917 			break;
1918 
1919 		next_idx = idx + 1;
1920 		/* EOL should be set in last MCE */
1921 		err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
1922 				    mce->pcifunc, next_idx,
1923 				    (next_idx > last_idx) ? true : false);
1924 		if (err)
1925 			goto end;
1926 		idx++;
1927 	}
1928 
1929 end:
1930 	mutex_unlock(&mcast->mce_lock);
1931 	return err;
1932 }
1933 
1934 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
1935 {
1936 	struct nix_mcast *mcast = &nix_hw->mcast;
1937 	int err, pf, numvfs, idx;
1938 	struct rvu_pfvf *pfvf;
1939 	u16 pcifunc;
1940 	u64 cfg;
1941 
1942 	/* Skip PF0 (i.e AF) */
1943 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
1944 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1945 		/* If PF is not enabled, nothing to do */
1946 		if (!((cfg >> 20) & 0x01))
1947 			continue;
1948 		/* Get numVFs attached to this PF */
1949 		numvfs = (cfg >> 12) & 0xFF;
1950 
1951 		pfvf = &rvu->pf[pf];
1952 		/* Save the start MCE */
1953 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
1954 
1955 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
1956 
1957 		for (idx = 0; idx < (numvfs + 1); idx++) {
1958 			/* idx-0 is for PF, followed by VFs */
1959 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1960 			pcifunc |= idx;
1961 			/* Add dummy entries now, so that we don't have to check
1962 			 * for whether AQ_OP should be INIT/WRITE later on.
1963 			 * Will be updated when a NIXLF is attached/detached to
1964 			 * these PF/VFs.
1965 			 */
1966 			err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
1967 					    NIX_AQ_INSTOP_INIT,
1968 					    pcifunc, 0, true);
1969 			if (err)
1970 				return err;
1971 		}
1972 	}
1973 	return 0;
1974 }
1975 
1976 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1977 {
1978 	struct nix_mcast *mcast = &nix_hw->mcast;
1979 	struct rvu_hwinfo *hw = rvu->hw;
1980 	int err, size;
1981 
1982 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
1983 	size = (1ULL << size);
1984 
1985 	/* Alloc memory for multicast/mirror replication entries */
1986 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
1987 			 (256UL << MC_TBL_SIZE), size);
1988 	if (err)
1989 		return -ENOMEM;
1990 
1991 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
1992 		    (u64)mcast->mce_ctx->iova);
1993 
1994 	/* Set max list length equal to max no of VFs per PF  + PF itself */
1995 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
1996 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
1997 
1998 	/* Alloc memory for multicast replication buffers */
1999 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2000 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2001 			 (8UL << MC_BUF_CNT), size);
2002 	if (err)
2003 		return -ENOMEM;
2004 
2005 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2006 		    (u64)mcast->mcast_buf->iova);
2007 
2008 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
2009 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2010 
2011 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2012 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
2013 		    BIT_ULL(20) | MC_BUF_CNT);
2014 
2015 	mutex_init(&mcast->mce_lock);
2016 
2017 	return nix_setup_bcast_tables(rvu, nix_hw);
2018 }
2019 
2020 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2021 {
2022 	struct nix_txsch *txsch;
2023 	int err, lvl, schq;
2024 	u64 cfg, reg;
2025 
2026 	/* Get scheduler queue count of each type and alloc
2027 	 * bitmap for each for alloc/free/attach operations.
2028 	 */
2029 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2030 		txsch = &nix_hw->txsch[lvl];
2031 		txsch->lvl = lvl;
2032 		switch (lvl) {
2033 		case NIX_TXSCH_LVL_SMQ:
2034 			reg = NIX_AF_MDQ_CONST;
2035 			break;
2036 		case NIX_TXSCH_LVL_TL4:
2037 			reg = NIX_AF_TL4_CONST;
2038 			break;
2039 		case NIX_TXSCH_LVL_TL3:
2040 			reg = NIX_AF_TL3_CONST;
2041 			break;
2042 		case NIX_TXSCH_LVL_TL2:
2043 			reg = NIX_AF_TL2_CONST;
2044 			break;
2045 		case NIX_TXSCH_LVL_TL1:
2046 			reg = NIX_AF_TL1_CONST;
2047 			break;
2048 		}
2049 		cfg = rvu_read64(rvu, blkaddr, reg);
2050 		txsch->schq.max = cfg & 0xFFFF;
2051 		err = rvu_alloc_bitmap(&txsch->schq);
2052 		if (err)
2053 			return err;
2054 
2055 		/* Allocate memory for scheduler queues to
2056 		 * PF/VF pcifunc mapping info.
2057 		 */
2058 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2059 					       sizeof(u32), GFP_KERNEL);
2060 		if (!txsch->pfvf_map)
2061 			return -ENOMEM;
2062 		for (schq = 0; schq < txsch->schq.max; schq++)
2063 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2064 	}
2065 	return 0;
2066 }
2067 
2068 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2069 				int blkaddr, u32 cfg)
2070 {
2071 	int fmt_idx;
2072 
2073 	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2074 		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2075 			return fmt_idx;
2076 	}
2077 	if (fmt_idx >= nix_hw->mark_format.total)
2078 		return -ERANGE;
2079 
2080 	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2081 	nix_hw->mark_format.cfg[fmt_idx] = cfg;
2082 	nix_hw->mark_format.in_use++;
2083 	return fmt_idx;
2084 }
2085 
2086 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2087 				    int blkaddr)
2088 {
2089 	u64 cfgs[] = {
2090 		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
2091 		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
2092 		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
2093 		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
2094 		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
2095 		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
2096 		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
2097 		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
2098 		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2099 	};
2100 	int i, rc;
2101 	u64 total;
2102 
2103 	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2104 	nix_hw->mark_format.total = (u8)total;
2105 	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2106 					       GFP_KERNEL);
2107 	if (!nix_hw->mark_format.cfg)
2108 		return -ENOMEM;
2109 	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2110 		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2111 		if (rc < 0)
2112 			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2113 				i, rc);
2114 	}
2115 
2116 	return 0;
2117 }
2118 
2119 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2120 				   struct msg_rsp *rsp)
2121 {
2122 	struct rvu_hwinfo *hw = rvu->hw;
2123 	u16 pcifunc = req->hdr.pcifunc;
2124 	int i, nixlf, blkaddr;
2125 	u64 stats;
2126 
2127 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2128 	if (blkaddr < 0)
2129 		return NIX_AF_ERR_AF_LF_INVALID;
2130 
2131 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2132 	if (nixlf < 0)
2133 		return NIX_AF_ERR_AF_LF_INVALID;
2134 
2135 	/* Get stats count supported by HW */
2136 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2137 
2138 	/* Reset tx stats */
2139 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2140 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2141 
2142 	/* Reset rx stats */
2143 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2144 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2145 
2146 	return 0;
2147 }
2148 
2149 /* Returns the ALG index to be set into NPC_RX_ACTION */
2150 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2151 {
2152 	int i;
2153 
2154 	/* Scan over exiting algo entries to find a match */
2155 	for (i = 0; i < nix_hw->flowkey.in_use; i++)
2156 		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2157 			return i;
2158 
2159 	return -ERANGE;
2160 }
2161 
2162 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2163 {
2164 	int idx, nr_field, key_off, field_marker, keyoff_marker;
2165 	int max_key_off, max_bit_pos, group_member;
2166 	struct nix_rx_flowkey_alg *field;
2167 	struct nix_rx_flowkey_alg tmp;
2168 	u32 key_type, valid_key;
2169 
2170 	if (!alg)
2171 		return -EINVAL;
2172 
2173 #define FIELDS_PER_ALG  5
2174 #define MAX_KEY_OFF	40
2175 	/* Clear all fields */
2176 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2177 
2178 	/* Each of the 32 possible flow key algorithm definitions should
2179 	 * fall into above incremental config (except ALG0). Otherwise a
2180 	 * single NPC MCAM entry is not sufficient for supporting RSS.
2181 	 *
2182 	 * If a different definition or combination needed then NPC MCAM
2183 	 * has to be programmed to filter such pkts and it's action should
2184 	 * point to this definition to calculate flowtag or hash.
2185 	 *
2186 	 * The `for loop` goes over _all_ protocol field and the following
2187 	 * variables depicts the state machine forward progress logic.
2188 	 *
2189 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
2190 	 * in field->key_offset update.
2191 	 * field_marker - Enabled when a new field needs to be selected.
2192 	 * group_member - Enabled when protocol is part of a group.
2193 	 */
2194 
2195 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
2196 	nr_field = 0; key_off = 0; field_marker = 1;
2197 	field = &tmp; max_bit_pos = fls(flow_cfg);
2198 	for (idx = 0;
2199 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2200 	     key_off < MAX_KEY_OFF; idx++) {
2201 		key_type = BIT(idx);
2202 		valid_key = flow_cfg & key_type;
2203 		/* Found a field marker, reset the field values */
2204 		if (field_marker)
2205 			memset(&tmp, 0, sizeof(tmp));
2206 
2207 		field_marker = true;
2208 		keyoff_marker = true;
2209 		switch (key_type) {
2210 		case NIX_FLOW_KEY_TYPE_PORT:
2211 			field->sel_chan = true;
2212 			/* This should be set to 1, when SEL_CHAN is set */
2213 			field->bytesm1 = 1;
2214 			break;
2215 		case NIX_FLOW_KEY_TYPE_IPV4:
2216 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2217 			field->lid = NPC_LID_LC;
2218 			field->ltype_match = NPC_LT_LC_IP;
2219 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2220 				field->lid = NPC_LID_LG;
2221 				field->ltype_match = NPC_LT_LG_TU_IP;
2222 			}
2223 			field->hdr_offset = 12; /* SIP offset */
2224 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2225 			field->ltype_mask = 0xF; /* Match only IPv4 */
2226 			keyoff_marker = false;
2227 			break;
2228 		case NIX_FLOW_KEY_TYPE_IPV6:
2229 		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2230 			field->lid = NPC_LID_LC;
2231 			field->ltype_match = NPC_LT_LC_IP6;
2232 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2233 				field->lid = NPC_LID_LG;
2234 				field->ltype_match = NPC_LT_LG_TU_IP6;
2235 			}
2236 			field->hdr_offset = 8; /* SIP offset */
2237 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2238 			field->ltype_mask = 0xF; /* Match only IPv6 */
2239 			break;
2240 		case NIX_FLOW_KEY_TYPE_TCP:
2241 		case NIX_FLOW_KEY_TYPE_UDP:
2242 		case NIX_FLOW_KEY_TYPE_SCTP:
2243 		case NIX_FLOW_KEY_TYPE_INNR_TCP:
2244 		case NIX_FLOW_KEY_TYPE_INNR_UDP:
2245 		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2246 			field->lid = NPC_LID_LD;
2247 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2248 			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2249 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2250 				field->lid = NPC_LID_LH;
2251 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2252 
2253 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2254 			 * so no need to change the ltype_match, just change
2255 			 * the lid for inner protocols
2256 			 */
2257 			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2258 				     (int)NPC_LT_LH_TU_TCP);
2259 			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2260 				     (int)NPC_LT_LH_TU_UDP);
2261 			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2262 				     (int)NPC_LT_LH_TU_SCTP);
2263 
2264 			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2265 			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2266 			    valid_key) {
2267 				field->ltype_match |= NPC_LT_LD_TCP;
2268 				group_member = true;
2269 			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2270 				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2271 				   valid_key) {
2272 				field->ltype_match |= NPC_LT_LD_UDP;
2273 				group_member = true;
2274 			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2275 				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2276 				   valid_key) {
2277 				field->ltype_match |= NPC_LT_LD_SCTP;
2278 				group_member = true;
2279 			}
2280 			field->ltype_mask = ~field->ltype_match;
2281 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2282 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2283 				/* Handle the case where any of the group item
2284 				 * is enabled in the group but not the final one
2285 				 */
2286 				if (group_member) {
2287 					valid_key = true;
2288 					group_member = false;
2289 				}
2290 			} else {
2291 				field_marker = false;
2292 				keyoff_marker = false;
2293 			}
2294 			break;
2295 		case NIX_FLOW_KEY_TYPE_NVGRE:
2296 			field->lid = NPC_LID_LD;
2297 			field->hdr_offset = 4; /* VSID offset */
2298 			field->bytesm1 = 2;
2299 			field->ltype_match = NPC_LT_LD_NVGRE;
2300 			field->ltype_mask = 0xF;
2301 			break;
2302 		case NIX_FLOW_KEY_TYPE_VXLAN:
2303 		case NIX_FLOW_KEY_TYPE_GENEVE:
2304 			field->lid = NPC_LID_LE;
2305 			field->bytesm1 = 2;
2306 			field->hdr_offset = 4;
2307 			field->ltype_mask = 0xF;
2308 			field_marker = false;
2309 			keyoff_marker = false;
2310 
2311 			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2312 				field->ltype_match |= NPC_LT_LE_VXLAN;
2313 				group_member = true;
2314 			}
2315 
2316 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2317 				field->ltype_match |= NPC_LT_LE_GENEVE;
2318 				group_member = true;
2319 			}
2320 
2321 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2322 				if (group_member) {
2323 					field->ltype_mask = ~field->ltype_match;
2324 					field_marker = true;
2325 					keyoff_marker = true;
2326 					valid_key = true;
2327 					group_member = false;
2328 				}
2329 			}
2330 			break;
2331 		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2332 		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2333 			field->lid = NPC_LID_LA;
2334 			field->ltype_match = NPC_LT_LA_ETHER;
2335 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2336 				field->lid = NPC_LID_LF;
2337 				field->ltype_match = NPC_LT_LF_TU_ETHER;
2338 			}
2339 			field->hdr_offset = 0;
2340 			field->bytesm1 = 5; /* DMAC 6 Byte */
2341 			field->ltype_mask = 0xF;
2342 			break;
2343 		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2344 			field->lid = NPC_LID_LC;
2345 			field->hdr_offset = 40; /* IPV6 hdr */
2346 			field->bytesm1 = 0; /* 1 Byte ext hdr*/
2347 			field->ltype_match = NPC_LT_LC_IP6_EXT;
2348 			field->ltype_mask = 0xF;
2349 			break;
2350 		case NIX_FLOW_KEY_TYPE_GTPU:
2351 			field->lid = NPC_LID_LE;
2352 			field->hdr_offset = 4;
2353 			field->bytesm1 = 3; /* 4 bytes TID*/
2354 			field->ltype_match = NPC_LT_LE_GTPU;
2355 			field->ltype_mask = 0xF;
2356 			break;
2357 		}
2358 		field->ena = 1;
2359 
2360 		/* Found a valid flow key type */
2361 		if (valid_key) {
2362 			field->key_offset = key_off;
2363 			memcpy(&alg[nr_field], field, sizeof(*field));
2364 			max_key_off = max(max_key_off, field->bytesm1 + 1);
2365 
2366 			/* Found a field marker, get the next field */
2367 			if (field_marker)
2368 				nr_field++;
2369 		}
2370 
2371 		/* Found a keyoff marker, update the new key_off */
2372 		if (keyoff_marker) {
2373 			key_off += max_key_off;
2374 			max_key_off = 0;
2375 		}
2376 	}
2377 	/* Processed all the flow key types */
2378 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2379 		return 0;
2380 	else
2381 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
2382 }
2383 
2384 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2385 {
2386 	u64 field[FIELDS_PER_ALG];
2387 	struct nix_hw *hw;
2388 	int fid, rc;
2389 
2390 	hw = get_nix_hw(rvu->hw, blkaddr);
2391 	if (!hw)
2392 		return -EINVAL;
2393 
2394 	/* No room to add new flow hash algoritham */
2395 	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
2396 		return NIX_AF_ERR_RSS_NOSPC_ALGO;
2397 
2398 	/* Generate algo fields for the given flow_cfg */
2399 	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
2400 	if (rc)
2401 		return rc;
2402 
2403 	/* Update ALGX_FIELDX register with generated fields */
2404 	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2405 		rvu_write64(rvu, blkaddr,
2406 			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
2407 							   fid), field[fid]);
2408 
2409 	/* Store the flow_cfg for futher lookup */
2410 	rc = hw->flowkey.in_use;
2411 	hw->flowkey.flowkey[rc] = flow_cfg;
2412 	hw->flowkey.in_use++;
2413 
2414 	return rc;
2415 }
2416 
2417 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
2418 					 struct nix_rss_flowkey_cfg *req,
2419 					 struct nix_rss_flowkey_cfg_rsp *rsp)
2420 {
2421 	struct rvu_hwinfo *hw = rvu->hw;
2422 	u16 pcifunc = req->hdr.pcifunc;
2423 	int alg_idx, nixlf, blkaddr;
2424 	struct nix_hw *nix_hw;
2425 
2426 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2427 	if (blkaddr < 0)
2428 		return NIX_AF_ERR_AF_LF_INVALID;
2429 
2430 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2431 	if (nixlf < 0)
2432 		return NIX_AF_ERR_AF_LF_INVALID;
2433 
2434 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2435 	if (!nix_hw)
2436 		return -EINVAL;
2437 
2438 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
2439 	/* Failed to get algo index from the exiting list, reserve new  */
2440 	if (alg_idx < 0) {
2441 		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
2442 						  req->flowkey_cfg);
2443 		if (alg_idx < 0)
2444 			return alg_idx;
2445 	}
2446 	rsp->alg_idx = alg_idx;
2447 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
2448 				       alg_idx, req->mcam_index);
2449 	return 0;
2450 }
2451 
2452 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
2453 {
2454 	u32 flowkey_cfg, minkey_cfg;
2455 	int alg, fid, rc;
2456 
2457 	/* Disable all flow key algx fieldx */
2458 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
2459 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2460 			rvu_write64(rvu, blkaddr,
2461 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
2462 				    0);
2463 	}
2464 
2465 	/* IPv4/IPv6 SIP/DIPs */
2466 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
2467 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2468 	if (rc < 0)
2469 		return rc;
2470 
2471 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2472 	minkey_cfg = flowkey_cfg;
2473 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
2474 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2475 	if (rc < 0)
2476 		return rc;
2477 
2478 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2479 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
2480 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2481 	if (rc < 0)
2482 		return rc;
2483 
2484 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2485 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
2486 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2487 	if (rc < 0)
2488 		return rc;
2489 
2490 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
2491 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2492 			NIX_FLOW_KEY_TYPE_UDP;
2493 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2494 	if (rc < 0)
2495 		return rc;
2496 
2497 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2498 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2499 			NIX_FLOW_KEY_TYPE_SCTP;
2500 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2501 	if (rc < 0)
2502 		return rc;
2503 
2504 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2505 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
2506 			NIX_FLOW_KEY_TYPE_SCTP;
2507 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2508 	if (rc < 0)
2509 		return rc;
2510 
2511 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2512 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2513 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
2514 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2515 	if (rc < 0)
2516 		return rc;
2517 
2518 	return 0;
2519 }
2520 
2521 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
2522 				      struct nix_set_mac_addr *req,
2523 				      struct msg_rsp *rsp)
2524 {
2525 	struct rvu_hwinfo *hw = rvu->hw;
2526 	u16 pcifunc = req->hdr.pcifunc;
2527 	struct rvu_pfvf *pfvf;
2528 	int blkaddr, nixlf;
2529 
2530 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2531 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2532 	if (!pfvf->nixlf || blkaddr < 0)
2533 		return NIX_AF_ERR_AF_LF_INVALID;
2534 
2535 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2536 	if (nixlf < 0)
2537 		return NIX_AF_ERR_AF_LF_INVALID;
2538 
2539 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
2540 
2541 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
2542 				    pfvf->rx_chan_base, req->mac_addr);
2543 
2544 	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2545 
2546 	return 0;
2547 }
2548 
2549 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
2550 				     struct msg_rsp *rsp)
2551 {
2552 	bool allmulti = false, disable_promisc = false;
2553 	struct rvu_hwinfo *hw = rvu->hw;
2554 	u16 pcifunc = req->hdr.pcifunc;
2555 	struct rvu_pfvf *pfvf;
2556 	int blkaddr, nixlf;
2557 
2558 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2559 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2560 	if (!pfvf->nixlf || blkaddr < 0)
2561 		return NIX_AF_ERR_AF_LF_INVALID;
2562 
2563 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2564 	if (nixlf < 0)
2565 		return NIX_AF_ERR_AF_LF_INVALID;
2566 
2567 	if (req->mode & NIX_RX_MODE_PROMISC)
2568 		allmulti = false;
2569 	else if (req->mode & NIX_RX_MODE_ALLMULTI)
2570 		allmulti = true;
2571 	else
2572 		disable_promisc = true;
2573 
2574 	if (disable_promisc)
2575 		rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
2576 	else
2577 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
2578 					      pfvf->rx_chan_base, allmulti);
2579 
2580 	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2581 
2582 	return 0;
2583 }
2584 
2585 static void nix_find_link_frs(struct rvu *rvu,
2586 			      struct nix_frs_cfg *req, u16 pcifunc)
2587 {
2588 	int pf = rvu_get_pf(pcifunc);
2589 	struct rvu_pfvf *pfvf;
2590 	int maxlen, minlen;
2591 	int numvfs, hwvf;
2592 	int vf;
2593 
2594 	/* Update with requester's min/max lengths */
2595 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2596 	pfvf->maxlen = req->maxlen;
2597 	if (req->update_minlen)
2598 		pfvf->minlen = req->minlen;
2599 
2600 	maxlen = req->maxlen;
2601 	minlen = req->update_minlen ? req->minlen : 0;
2602 
2603 	/* Get this PF's numVFs and starting hwvf */
2604 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
2605 
2606 	/* For each VF, compare requested max/minlen */
2607 	for (vf = 0; vf < numvfs; vf++) {
2608 		pfvf =  &rvu->hwvf[hwvf + vf];
2609 		if (pfvf->maxlen > maxlen)
2610 			maxlen = pfvf->maxlen;
2611 		if (req->update_minlen &&
2612 		    pfvf->minlen && pfvf->minlen < minlen)
2613 			minlen = pfvf->minlen;
2614 	}
2615 
2616 	/* Compare requested max/minlen with PF's max/minlen */
2617 	pfvf = &rvu->pf[pf];
2618 	if (pfvf->maxlen > maxlen)
2619 		maxlen = pfvf->maxlen;
2620 	if (req->update_minlen &&
2621 	    pfvf->minlen && pfvf->minlen < minlen)
2622 		minlen = pfvf->minlen;
2623 
2624 	/* Update the request with max/min PF's and it's VF's max/min */
2625 	req->maxlen = maxlen;
2626 	if (req->update_minlen)
2627 		req->minlen = minlen;
2628 }
2629 
2630 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
2631 				    struct msg_rsp *rsp)
2632 {
2633 	struct rvu_hwinfo *hw = rvu->hw;
2634 	u16 pcifunc = req->hdr.pcifunc;
2635 	int pf = rvu_get_pf(pcifunc);
2636 	int blkaddr, schq, link = -1;
2637 	struct nix_txsch *txsch;
2638 	u64 cfg, lmac_fifo_len;
2639 	struct nix_hw *nix_hw;
2640 	u8 cgx = 0, lmac = 0;
2641 
2642 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2643 	if (blkaddr < 0)
2644 		return NIX_AF_ERR_AF_LF_INVALID;
2645 
2646 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2647 	if (!nix_hw)
2648 		return -EINVAL;
2649 
2650 	if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
2651 		return NIX_AF_ERR_FRS_INVALID;
2652 
2653 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
2654 		return NIX_AF_ERR_FRS_INVALID;
2655 
2656 	/* Check if requester wants to update SMQ's */
2657 	if (!req->update_smq)
2658 		goto rx_frscfg;
2659 
2660 	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
2661 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2662 	mutex_lock(&rvu->rsrc_lock);
2663 	for (schq = 0; schq < txsch->schq.max; schq++) {
2664 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2665 			continue;
2666 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
2667 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
2668 		if (req->update_minlen)
2669 			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
2670 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
2671 	}
2672 	mutex_unlock(&rvu->rsrc_lock);
2673 
2674 rx_frscfg:
2675 	/* Check if config is for SDP link */
2676 	if (req->sdp_link) {
2677 		if (!hw->sdp_links)
2678 			return NIX_AF_ERR_RX_LINK_INVALID;
2679 		link = hw->cgx_links + hw->lbk_links;
2680 		goto linkcfg;
2681 	}
2682 
2683 	/* Check if the request is from CGX mapped RVU PF */
2684 	if (is_pf_cgxmapped(rvu, pf)) {
2685 		/* Get CGX and LMAC to which this PF is mapped and find link */
2686 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
2687 		link = (cgx * hw->lmac_per_cgx) + lmac;
2688 	} else if (pf == 0) {
2689 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
2690 		link = hw->cgx_links;
2691 	}
2692 
2693 	if (link < 0)
2694 		return NIX_AF_ERR_RX_LINK_INVALID;
2695 
2696 	nix_find_link_frs(rvu, req, pcifunc);
2697 
2698 linkcfg:
2699 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
2700 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
2701 	if (req->update_minlen)
2702 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
2703 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
2704 
2705 	if (req->sdp_link || pf == 0)
2706 		return 0;
2707 
2708 	/* Update transmit credits for CGX links */
2709 	lmac_fifo_len =
2710 		CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2711 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
2712 	cfg &= ~(0xFFFFFULL << 12);
2713 	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
2714 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
2715 	return 0;
2716 }
2717 
2718 int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
2719 				      struct msg_rsp *rsp)
2720 {
2721 	struct npc_mcam_alloc_entry_req alloc_req = { };
2722 	struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
2723 	struct npc_mcam_free_entry_req free_req = { };
2724 	u16 pcifunc = req->hdr.pcifunc;
2725 	int blkaddr, nixlf, err;
2726 	struct rvu_pfvf *pfvf;
2727 
2728 	/* LBK VFs do not have separate MCAM UCAST entry hence
2729 	 * skip allocating rxvlan for them
2730 	 */
2731 	if (is_afvf(pcifunc))
2732 		return 0;
2733 
2734 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2735 	if (pfvf->rxvlan)
2736 		return 0;
2737 
2738 	/* alloc new mcam entry */
2739 	alloc_req.hdr.pcifunc = pcifunc;
2740 	alloc_req.count = 1;
2741 
2742 	err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
2743 						    &alloc_rsp);
2744 	if (err)
2745 		return err;
2746 
2747 	/* update entry to enable rxvlan offload */
2748 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2749 	if (blkaddr < 0) {
2750 		err = NIX_AF_ERR_AF_LF_INVALID;
2751 		goto free_entry;
2752 	}
2753 
2754 	nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
2755 	if (nixlf < 0) {
2756 		err = NIX_AF_ERR_AF_LF_INVALID;
2757 		goto free_entry;
2758 	}
2759 
2760 	pfvf->rxvlan_index = alloc_rsp.entry_list[0];
2761 	/* all it means is that rxvlan_index is valid */
2762 	pfvf->rxvlan = true;
2763 
2764 	err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2765 	if (err)
2766 		goto free_entry;
2767 
2768 	return 0;
2769 free_entry:
2770 	free_req.hdr.pcifunc = pcifunc;
2771 	free_req.entry = alloc_rsp.entry_list[0];
2772 	rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
2773 	pfvf->rxvlan = false;
2774 	return err;
2775 }
2776 
2777 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
2778 				    struct msg_rsp *rsp)
2779 {
2780 	struct rvu_hwinfo *hw = rvu->hw;
2781 	u16 pcifunc = req->hdr.pcifunc;
2782 	struct rvu_block *block;
2783 	struct rvu_pfvf *pfvf;
2784 	int nixlf, blkaddr;
2785 	u64 cfg;
2786 
2787 	pfvf = rvu_get_pfvf(rvu, pcifunc);
2788 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2789 	if (!pfvf->nixlf || blkaddr < 0)
2790 		return NIX_AF_ERR_AF_LF_INVALID;
2791 
2792 	block = &hw->block[blkaddr];
2793 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
2794 	if (nixlf < 0)
2795 		return NIX_AF_ERR_AF_LF_INVALID;
2796 
2797 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
2798 	/* Set the interface configuration */
2799 	if (req->len_verify & BIT(0))
2800 		cfg |= BIT_ULL(41);
2801 	else
2802 		cfg &= ~BIT_ULL(41);
2803 
2804 	if (req->len_verify & BIT(1))
2805 		cfg |= BIT_ULL(40);
2806 	else
2807 		cfg &= ~BIT_ULL(40);
2808 
2809 	if (req->csum_verify & BIT(0))
2810 		cfg |= BIT_ULL(37);
2811 	else
2812 		cfg &= ~BIT_ULL(37);
2813 
2814 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
2815 
2816 	return 0;
2817 }
2818 
2819 static void nix_link_config(struct rvu *rvu, int blkaddr)
2820 {
2821 	struct rvu_hwinfo *hw = rvu->hw;
2822 	int cgx, lmac_cnt, slink, link;
2823 	u64 tx_credits;
2824 
2825 	/* Set default min/max packet lengths allowed on NIX Rx links.
2826 	 *
2827 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
2828 	 * as undersize and report them to SW as error pkts, hence
2829 	 * setting it to 40 bytes.
2830 	 */
2831 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
2832 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2833 			    NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2834 	}
2835 
2836 	if (hw->sdp_links) {
2837 		link = hw->cgx_links + hw->lbk_links;
2838 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2839 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2840 	}
2841 
2842 	/* Set credits for Tx links assuming max packet length allowed.
2843 	 * This will be reconfigured based on MTU set for PF/VF.
2844 	 */
2845 	for (cgx = 0; cgx < hw->cgx; cgx++) {
2846 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2847 		tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
2848 		/* Enable credits and set credit pkt count to max allowed */
2849 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
2850 		slink = cgx * hw->lmac_per_cgx;
2851 		for (link = slink; link < (slink + lmac_cnt); link++) {
2852 			rvu_write64(rvu, blkaddr,
2853 				    NIX_AF_TX_LINKX_NORM_CREDIT(link),
2854 				    tx_credits);
2855 		}
2856 	}
2857 
2858 	/* Set Tx credits for LBK link */
2859 	slink = hw->cgx_links;
2860 	for (link = slink; link < (slink + hw->lbk_links); link++) {
2861 		tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
2862 		/* Enable credits and set credit pkt count to max allowed */
2863 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
2864 		rvu_write64(rvu, blkaddr,
2865 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
2866 	}
2867 }
2868 
2869 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
2870 {
2871 	int idx, err;
2872 	u64 status;
2873 
2874 	/* Start X2P bus calibration */
2875 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2876 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
2877 	/* Wait for calibration to complete */
2878 	err = rvu_poll_reg(rvu, blkaddr,
2879 			   NIX_AF_STATUS, BIT_ULL(10), false);
2880 	if (err) {
2881 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
2882 		return err;
2883 	}
2884 
2885 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
2886 	/* Check if CGX devices are ready */
2887 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
2888 		/* Skip when cgx port is not available */
2889 		if (!rvu_cgx_pdata(idx, rvu) ||
2890 		    (status & (BIT_ULL(16 + idx))))
2891 			continue;
2892 		dev_err(rvu->dev,
2893 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
2894 		err = -EBUSY;
2895 	}
2896 
2897 	/* Check if LBK is ready */
2898 	if (!(status & BIT_ULL(19))) {
2899 		dev_err(rvu->dev,
2900 			"LBK didn't respond to NIX X2P calibration\n");
2901 		err = -EBUSY;
2902 	}
2903 
2904 	/* Clear 'calibrate_x2p' bit */
2905 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2906 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
2907 	if (err || (status & 0x3FFULL))
2908 		dev_err(rvu->dev,
2909 			"NIX X2P calibration failed, status 0x%llx\n", status);
2910 	if (err)
2911 		return err;
2912 	return 0;
2913 }
2914 
2915 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
2916 {
2917 	u64 cfg;
2918 	int err;
2919 
2920 	/* Set admin queue endianness */
2921 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
2922 #ifdef __BIG_ENDIAN
2923 	cfg |= BIT_ULL(8);
2924 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
2925 #else
2926 	cfg &= ~BIT_ULL(8);
2927 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
2928 #endif
2929 
2930 	/* Do not bypass NDC cache */
2931 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
2932 	cfg &= ~0x3FFEULL;
2933 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
2934 	/* Disable caching of SQB aka SQEs */
2935 	cfg |= 0x04ULL;
2936 #endif
2937 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
2938 
2939 	/* Result structure can be followed by RQ/SQ/CQ context at
2940 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
2941 	 * operation type. Alloc sufficient result memory for all operations.
2942 	 */
2943 	err = rvu_aq_alloc(rvu, &block->aq,
2944 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
2945 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
2946 	if (err)
2947 		return err;
2948 
2949 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
2950 	rvu_write64(rvu, block->addr,
2951 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
2952 	return 0;
2953 }
2954 
2955 int rvu_nix_init(struct rvu *rvu)
2956 {
2957 	struct rvu_hwinfo *hw = rvu->hw;
2958 	struct rvu_block *block;
2959 	int blkaddr, err;
2960 	u64 cfg;
2961 
2962 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
2963 	if (blkaddr < 0)
2964 		return 0;
2965 	block = &hw->block[blkaddr];
2966 
2967 	if (is_rvu_96xx_B0(rvu)) {
2968 		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
2969 		 * internal state when conditional clocks are turned off.
2970 		 * Hence enable them.
2971 		 */
2972 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2973 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
2974 
2975 		/* Set chan/link to backpressure TL3 instead of TL2 */
2976 		rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
2977 
2978 		/* Disable SQ manager's sticky mode operation (set TM6 = 0)
2979 		 * This sticky mode is known to cause SQ stalls when multiple
2980 		 * SQs are mapped to same SMQ and transmitting pkts at a time.
2981 		 */
2982 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
2983 		cfg &= ~BIT_ULL(15);
2984 		rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
2985 	}
2986 
2987 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
2988 	err = nix_calibrate_x2p(rvu, blkaddr);
2989 	if (err)
2990 		return err;
2991 
2992 	/* Set num of links of each type */
2993 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
2994 	hw->cgx = (cfg >> 12) & 0xF;
2995 	hw->lmac_per_cgx = (cfg >> 8) & 0xF;
2996 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
2997 	hw->lbk_links = 1;
2998 	hw->sdp_links = 1;
2999 
3000 	/* Initialize admin queue */
3001 	err = nix_aq_init(rvu, block);
3002 	if (err)
3003 		return err;
3004 
3005 	/* Restore CINT timer delay to HW reset values */
3006 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3007 
3008 	if (blkaddr == BLKADDR_NIX0) {
3009 		hw->nix0 = devm_kzalloc(rvu->dev,
3010 					sizeof(struct nix_hw), GFP_KERNEL);
3011 		if (!hw->nix0)
3012 			return -ENOMEM;
3013 
3014 		err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
3015 		if (err)
3016 			return err;
3017 
3018 		err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
3019 		if (err)
3020 			return err;
3021 
3022 		err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
3023 		if (err)
3024 			return err;
3025 
3026 		/* Configure segmentation offload formats */
3027 		nix_setup_lso(rvu, hw->nix0, blkaddr);
3028 
3029 		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3030 		 * This helps HW protocol checker to identify headers
3031 		 * and validate length and checksums.
3032 		 */
3033 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3034 			    (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
3035 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3036 			    (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
3037 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3038 			    (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
3039 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3040 			    (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
3041 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3042 			    (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP6 << 4) | 0x0F);
3043 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3044 			    (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
3045 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3046 			    (NPC_LID_LH << 8) | (NPC_LT_LH_TU_TCP << 4) | 0x0F);
3047 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3048 			    (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
3049 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3050 			    (NPC_LID_LH << 8) | (NPC_LT_LH_TU_UDP << 4) | 0x0F);
3051 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3052 			    (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
3053 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3054 			    (NPC_LID_LH << 8) | (NPC_LT_LH_TU_SCTP << 4) |
3055 			    0x0F);
3056 
3057 		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3058 		if (err)
3059 			return err;
3060 
3061 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3062 		nix_link_config(rvu, blkaddr);
3063 	}
3064 	return 0;
3065 }
3066 
3067 void rvu_nix_freemem(struct rvu *rvu)
3068 {
3069 	struct rvu_hwinfo *hw = rvu->hw;
3070 	struct rvu_block *block;
3071 	struct nix_txsch *txsch;
3072 	struct nix_mcast *mcast;
3073 	struct nix_hw *nix_hw;
3074 	int blkaddr, lvl;
3075 
3076 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
3077 	if (blkaddr < 0)
3078 		return;
3079 
3080 	block = &hw->block[blkaddr];
3081 	rvu_aq_free(rvu, block->aq);
3082 
3083 	if (blkaddr == BLKADDR_NIX0) {
3084 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
3085 		if (!nix_hw)
3086 			return;
3087 
3088 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3089 			txsch = &nix_hw->txsch[lvl];
3090 			kfree(txsch->schq.bmap);
3091 		}
3092 
3093 		mcast = &nix_hw->mcast;
3094 		qmem_free(rvu->dev, mcast->mce_ctx);
3095 		qmem_free(rvu->dev, mcast->mcast_buf);
3096 		mutex_destroy(&mcast->mce_lock);
3097 	}
3098 }
3099 
3100 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
3101 {
3102 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3103 	struct rvu_hwinfo *hw = rvu->hw;
3104 	int blkaddr;
3105 
3106 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3107 	if (!pfvf->nixlf || blkaddr < 0)
3108 		return NIX_AF_ERR_AF_LF_INVALID;
3109 
3110 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
3111 	if (*nixlf < 0)
3112 		return NIX_AF_ERR_AF_LF_INVALID;
3113 
3114 	return 0;
3115 }
3116 
3117 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3118 				     struct msg_rsp *rsp)
3119 {
3120 	u16 pcifunc = req->hdr.pcifunc;
3121 	int nixlf, err;
3122 
3123 	err = nix_get_nixlf(rvu, pcifunc, &nixlf);
3124 	if (err)
3125 		return err;
3126 
3127 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3128 
3129 	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3130 }
3131 
3132 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3133 				    struct msg_rsp *rsp)
3134 {
3135 	u16 pcifunc = req->hdr.pcifunc;
3136 	int nixlf, err;
3137 
3138 	err = nix_get_nixlf(rvu, pcifunc, &nixlf);
3139 	if (err)
3140 		return err;
3141 
3142 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
3143 
3144 	return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3145 }
3146 
3147 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3148 {
3149 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3150 	struct hwctx_disable_req ctx_req;
3151 	int err;
3152 
3153 	ctx_req.hdr.pcifunc = pcifunc;
3154 
3155 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3156 	nix_interface_deinit(rvu, pcifunc, nixlf);
3157 	nix_rx_sync(rvu, blkaddr);
3158 	nix_txschq_free(rvu, pcifunc);
3159 
3160 	rvu_cgx_start_stop_io(rvu, pcifunc, false);
3161 
3162 	if (pfvf->sq_ctx) {
3163 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3164 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3165 		if (err)
3166 			dev_err(rvu->dev, "SQ ctx disable failed\n");
3167 	}
3168 
3169 	if (pfvf->rq_ctx) {
3170 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3171 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3172 		if (err)
3173 			dev_err(rvu->dev, "RQ ctx disable failed\n");
3174 	}
3175 
3176 	if (pfvf->cq_ctx) {
3177 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3178 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3179 		if (err)
3180 			dev_err(rvu->dev, "CQ ctx disable failed\n");
3181 	}
3182 
3183 	nix_ctx_free(rvu, pfvf);
3184 }
3185 
3186 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3187 					struct nix_lso_format_cfg *req,
3188 					struct nix_lso_format_cfg_rsp *rsp)
3189 {
3190 	u16 pcifunc = req->hdr.pcifunc;
3191 	struct nix_hw *nix_hw;
3192 	struct rvu_pfvf *pfvf;
3193 	int blkaddr, idx, f;
3194 	u64 reg;
3195 
3196 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3197 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3198 	if (!pfvf->nixlf || blkaddr < 0)
3199 		return NIX_AF_ERR_AF_LF_INVALID;
3200 
3201 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3202 	if (!nix_hw)
3203 		return -EINVAL;
3204 
3205 	/* Find existing matching LSO format, if any */
3206 	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3207 		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3208 			reg = rvu_read64(rvu, blkaddr,
3209 					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3210 			if (req->fields[f] != (reg & req->field_mask))
3211 				break;
3212 		}
3213 
3214 		if (f == NIX_LSO_FIELD_MAX)
3215 			break;
3216 	}
3217 
3218 	if (idx < nix_hw->lso.in_use) {
3219 		/* Match found */
3220 		rsp->lso_format_idx = idx;
3221 		return 0;
3222 	}
3223 
3224 	if (nix_hw->lso.in_use == nix_hw->lso.total)
3225 		return NIX_AF_ERR_LSO_CFG_FAIL;
3226 
3227 	rsp->lso_format_idx = nix_hw->lso.in_use++;
3228 
3229 	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
3230 		rvu_write64(rvu, blkaddr,
3231 			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
3232 			    req->fields[f]);
3233 
3234 	return 0;
3235 }
3236