1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "npc.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 
21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
23 			    int type, int chan_id);
24 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
25 			       int type, bool add);
26 static int nix_setup_ipolicers(struct rvu *rvu,
27 			       struct nix_hw *nix_hw, int blkaddr);
28 static void nix_ipolicer_freemem(struct nix_hw *nix_hw);
29 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
30 			       struct nix_hw *nix_hw, u16 pcifunc);
31 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
32 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
33 				     u32 leaf_prof);
34 
35 enum mc_tbl_sz {
36 	MC_TBL_SZ_256,
37 	MC_TBL_SZ_512,
38 	MC_TBL_SZ_1K,
39 	MC_TBL_SZ_2K,
40 	MC_TBL_SZ_4K,
41 	MC_TBL_SZ_8K,
42 	MC_TBL_SZ_16K,
43 	MC_TBL_SZ_32K,
44 	MC_TBL_SZ_64K,
45 };
46 
47 enum mc_buf_cnt {
48 	MC_BUF_CNT_8,
49 	MC_BUF_CNT_16,
50 	MC_BUF_CNT_32,
51 	MC_BUF_CNT_64,
52 	MC_BUF_CNT_128,
53 	MC_BUF_CNT_256,
54 	MC_BUF_CNT_512,
55 	MC_BUF_CNT_1024,
56 	MC_BUF_CNT_2048,
57 };
58 
59 enum nix_makr_fmt_indexes {
60 	NIX_MARK_CFG_IP_DSCP_RED,
61 	NIX_MARK_CFG_IP_DSCP_YELLOW,
62 	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
63 	NIX_MARK_CFG_IP_ECN_RED,
64 	NIX_MARK_CFG_IP_ECN_YELLOW,
65 	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
66 	NIX_MARK_CFG_VLAN_DEI_RED,
67 	NIX_MARK_CFG_VLAN_DEI_YELLOW,
68 	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
69 	NIX_MARK_CFG_MAX,
70 };
71 
72 /* For now considering MC resources needed for broadcast
73  * pkt replication only. i.e 256 HWVFs + 12 PFs.
74  */
75 #define MC_TBL_SIZE	MC_TBL_SZ_512
76 #define MC_BUF_CNT	MC_BUF_CNT_128
77 
78 struct mce {
79 	struct hlist_node	node;
80 	u16			pcifunc;
81 };
82 
83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
84 {
85 	int i = 0;
86 
87 	/*If blkaddr is 0, return the first nix block address*/
88 	if (blkaddr == 0)
89 		return rvu->nix_blkaddr[blkaddr];
90 
91 	while (i + 1 < MAX_NIX_BLKS) {
92 		if (rvu->nix_blkaddr[i] == blkaddr)
93 			return rvu->nix_blkaddr[i + 1];
94 		i++;
95 	}
96 
97 	return 0;
98 }
99 
100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
101 {
102 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
103 	int blkaddr;
104 
105 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
106 	if (!pfvf->nixlf || blkaddr < 0)
107 		return false;
108 	return true;
109 }
110 
111 int rvu_get_nixlf_count(struct rvu *rvu)
112 {
113 	int blkaddr = 0, max = 0;
114 	struct rvu_block *block;
115 
116 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
117 	while (blkaddr) {
118 		block = &rvu->hw->block[blkaddr];
119 		max += block->lf.max;
120 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
121 	}
122 	return max;
123 }
124 
125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
126 {
127 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
128 	struct rvu_hwinfo *hw = rvu->hw;
129 	int blkaddr;
130 
131 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
132 	if (!pfvf->nixlf || blkaddr < 0)
133 		return NIX_AF_ERR_AF_LF_INVALID;
134 
135 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
136 	if (*nixlf < 0)
137 		return NIX_AF_ERR_AF_LF_INVALID;
138 
139 	if (nix_blkaddr)
140 		*nix_blkaddr = blkaddr;
141 
142 	return 0;
143 }
144 
145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
146 			struct nix_hw **nix_hw, int *blkaddr)
147 {
148 	struct rvu_pfvf *pfvf;
149 
150 	pfvf = rvu_get_pfvf(rvu, pcifunc);
151 	*blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
152 	if (!pfvf->nixlf || *blkaddr < 0)
153 		return NIX_AF_ERR_AF_LF_INVALID;
154 
155 	*nix_hw = get_nix_hw(rvu->hw, *blkaddr);
156 	if (!*nix_hw)
157 		return NIX_AF_ERR_INVALID_NIXBLK;
158 	return 0;
159 }
160 
161 static void nix_mce_list_init(struct nix_mce_list *list, int max)
162 {
163 	INIT_HLIST_HEAD(&list->head);
164 	list->count = 0;
165 	list->max = max;
166 }
167 
168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
169 {
170 	int idx;
171 
172 	if (!mcast)
173 		return 0;
174 
175 	idx = mcast->next_free_mce;
176 	mcast->next_free_mce += count;
177 	return idx;
178 }
179 
180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
181 {
182 	int nix_blkaddr = 0, i = 0;
183 	struct rvu *rvu = hw->rvu;
184 
185 	nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
186 	while (nix_blkaddr) {
187 		if (blkaddr == nix_blkaddr && hw->nix)
188 			return &hw->nix[i];
189 		nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
190 		i++;
191 	}
192 	return NULL;
193 }
194 
195 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
196 {
197 	int err;
198 
199 	/* Sync all in flight RX packets to LLC/DRAM */
200 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
201 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
202 	if (err)
203 		dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
204 
205 	/* SW_SYNC ensures all existing transactions are finished and pkts
206 	 * are written to LLC/DRAM, queues should be teared down after
207 	 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
208 	 * an existing transaction might end after SW_SYNC operation. To
209 	 * ensure operation is fully done, do the SW_SYNC twice.
210 	 */
211 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
212 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
213 	if (err)
214 		dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
215 }
216 
217 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
218 			    int lvl, u16 pcifunc, u16 schq)
219 {
220 	struct rvu_hwinfo *hw = rvu->hw;
221 	struct nix_txsch *txsch;
222 	struct nix_hw *nix_hw;
223 	u16 map_func;
224 
225 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
226 	if (!nix_hw)
227 		return false;
228 
229 	txsch = &nix_hw->txsch[lvl];
230 	/* Check out of bounds */
231 	if (schq >= txsch->schq.max)
232 		return false;
233 
234 	mutex_lock(&rvu->rsrc_lock);
235 	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
236 	mutex_unlock(&rvu->rsrc_lock);
237 
238 	/* TLs aggegating traffic are shared across PF and VFs */
239 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
240 		if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
241 			return false;
242 		else
243 			return true;
244 	}
245 
246 	if (map_func != pcifunc)
247 		return false;
248 
249 	return true;
250 }
251 
252 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
253 {
254 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
255 	struct mac_ops *mac_ops;
256 	int pkind, pf, vf, lbkid;
257 	u8 cgx_id, lmac_id;
258 	int err;
259 
260 	pf = rvu_get_pf(pcifunc);
261 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
262 		return 0;
263 
264 	switch (type) {
265 	case NIX_INTF_TYPE_CGX:
266 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
267 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
268 
269 		pkind = rvu_npc_get_pkind(rvu, pf);
270 		if (pkind < 0) {
271 			dev_err(rvu->dev,
272 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
273 			return -EINVAL;
274 		}
275 		pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
276 		pfvf->tx_chan_base = pfvf->rx_chan_base;
277 		pfvf->rx_chan_cnt = 1;
278 		pfvf->tx_chan_cnt = 1;
279 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
280 		rvu_npc_set_pkind(rvu, pkind, pfvf);
281 
282 		mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
283 		/* By default we enable pause frames */
284 		if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
285 			mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
286 								    rvu),
287 						      lmac_id, true, true);
288 		break;
289 	case NIX_INTF_TYPE_LBK:
290 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
291 
292 		/* If NIX1 block is present on the silicon then NIXes are
293 		 * assigned alternatively for lbk interfaces. NIX0 should
294 		 * send packets on lbk link 1 channels and NIX1 should send
295 		 * on lbk link 0 channels for the communication between
296 		 * NIX0 and NIX1.
297 		 */
298 		lbkid = 0;
299 		if (rvu->hw->lbk_links > 1)
300 			lbkid = vf & 0x1 ? 0 : 1;
301 
302 		/* Note that AF's VFs work in pairs and talk over consecutive
303 		 * loopback channels.Therefore if odd number of AF VFs are
304 		 * enabled then the last VF remains with no pair.
305 		 */
306 		pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
307 		pfvf->tx_chan_base = vf & 0x1 ?
308 					rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
309 					rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
310 		pfvf->rx_chan_cnt = 1;
311 		pfvf->tx_chan_cnt = 1;
312 		rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
313 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
314 					      pfvf->rx_chan_base,
315 					      pfvf->rx_chan_cnt);
316 		break;
317 	}
318 
319 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
320 	 * RVU PF/VF's MAC address.
321 	 */
322 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
323 				    pfvf->rx_chan_base, pfvf->mac_addr);
324 
325 	/* Add this PF_FUNC to bcast pkt replication list */
326 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
327 	if (err) {
328 		dev_err(rvu->dev,
329 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
330 			pcifunc);
331 		return err;
332 	}
333 	/* Install MCAM rule matching Ethernet broadcast mac address */
334 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
335 					  nixlf, pfvf->rx_chan_base);
336 
337 	pfvf->maxlen = NIC_HW_MIN_FRS;
338 	pfvf->minlen = NIC_HW_MIN_FRS;
339 
340 	return 0;
341 }
342 
343 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
344 {
345 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
346 	int err;
347 
348 	pfvf->maxlen = 0;
349 	pfvf->minlen = 0;
350 
351 	/* Remove this PF_FUNC from bcast pkt replication list */
352 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
353 	if (err) {
354 		dev_err(rvu->dev,
355 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
356 			pcifunc);
357 	}
358 
359 	/* Free and disable any MCAM entries used by this NIX LF */
360 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
361 
362 	/* Disable DMAC filters used */
363 	rvu_cgx_disable_dmac_entries(rvu, pcifunc);
364 }
365 
366 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
367 				    struct nix_bp_cfg_req *req,
368 				    struct msg_rsp *rsp)
369 {
370 	u16 pcifunc = req->hdr.pcifunc;
371 	struct rvu_pfvf *pfvf;
372 	int blkaddr, pf, type;
373 	u16 chan_base, chan;
374 	u64 cfg;
375 
376 	pf = rvu_get_pf(pcifunc);
377 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
378 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
379 		return 0;
380 
381 	pfvf = rvu_get_pfvf(rvu, pcifunc);
382 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
383 
384 	chan_base = pfvf->rx_chan_base + req->chan_base;
385 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
386 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
387 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
388 			    cfg & ~BIT_ULL(16));
389 	}
390 	return 0;
391 }
392 
393 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
394 			    int type, int chan_id)
395 {
396 	int bpid, blkaddr, lmac_chan_cnt;
397 	struct rvu_hwinfo *hw = rvu->hw;
398 	u16 cgx_bpid_cnt, lbk_bpid_cnt;
399 	struct rvu_pfvf *pfvf;
400 	u8 cgx_id, lmac_id;
401 	u64 cfg;
402 
403 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
404 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
405 	lmac_chan_cnt = cfg & 0xFF;
406 
407 	cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
408 	lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
409 
410 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
411 
412 	/* Backpressure IDs range division
413 	 * CGX channles are mapped to (0 - 191) BPIDs
414 	 * LBK channles are mapped to (192 - 255) BPIDs
415 	 * SDP channles are mapped to (256 - 511) BPIDs
416 	 *
417 	 * Lmac channles and bpids mapped as follows
418 	 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
419 	 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
420 	 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
421 	 */
422 	switch (type) {
423 	case NIX_INTF_TYPE_CGX:
424 		if ((req->chan_base + req->chan_cnt) > 15)
425 			return -EINVAL;
426 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
427 		/* Assign bpid based on cgx, lmac and chan id */
428 		bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
429 			(lmac_id * lmac_chan_cnt) + req->chan_base;
430 
431 		if (req->bpid_per_chan)
432 			bpid += chan_id;
433 		if (bpid > cgx_bpid_cnt)
434 			return -EINVAL;
435 		break;
436 
437 	case NIX_INTF_TYPE_LBK:
438 		if ((req->chan_base + req->chan_cnt) > 63)
439 			return -EINVAL;
440 		bpid = cgx_bpid_cnt + req->chan_base;
441 		if (req->bpid_per_chan)
442 			bpid += chan_id;
443 		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
444 			return -EINVAL;
445 		break;
446 	default:
447 		return -EINVAL;
448 	}
449 	return bpid;
450 }
451 
452 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
453 				   struct nix_bp_cfg_req *req,
454 				   struct nix_bp_cfg_rsp *rsp)
455 {
456 	int blkaddr, pf, type, chan_id = 0;
457 	u16 pcifunc = req->hdr.pcifunc;
458 	struct rvu_pfvf *pfvf;
459 	u16 chan_base, chan;
460 	s16 bpid, bpid_base;
461 	u64 cfg;
462 
463 	pf = rvu_get_pf(pcifunc);
464 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
465 
466 	/* Enable backpressure only for CGX mapped PFs and LBK interface */
467 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
468 		return 0;
469 
470 	pfvf = rvu_get_pfvf(rvu, pcifunc);
471 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
472 
473 	bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
474 	chan_base = pfvf->rx_chan_base + req->chan_base;
475 	bpid = bpid_base;
476 
477 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
478 		if (bpid < 0) {
479 			dev_warn(rvu->dev, "Fail to enable backpressure\n");
480 			return -EINVAL;
481 		}
482 
483 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
484 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
485 			    cfg | (bpid & 0xFF) | BIT_ULL(16));
486 		chan_id++;
487 		bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
488 	}
489 
490 	for (chan = 0; chan < req->chan_cnt; chan++) {
491 		/* Map channel and bpid assign to it */
492 		rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
493 					(bpid_base & 0x3FF);
494 		if (req->bpid_per_chan)
495 			bpid_base++;
496 	}
497 	rsp->chan_cnt = req->chan_cnt;
498 
499 	return 0;
500 }
501 
502 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
503 				 u64 format, bool v4, u64 *fidx)
504 {
505 	struct nix_lso_format field = {0};
506 
507 	/* IP's Length field */
508 	field.layer = NIX_TXLAYER_OL3;
509 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
510 	field.offset = v4 ? 2 : 4;
511 	field.sizem1 = 1; /* i.e 2 bytes */
512 	field.alg = NIX_LSOALG_ADD_PAYLEN;
513 	rvu_write64(rvu, blkaddr,
514 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
515 		    *(u64 *)&field);
516 
517 	/* No ID field in IPv6 header */
518 	if (!v4)
519 		return;
520 
521 	/* IP's ID field */
522 	field.layer = NIX_TXLAYER_OL3;
523 	field.offset = 4;
524 	field.sizem1 = 1; /* i.e 2 bytes */
525 	field.alg = NIX_LSOALG_ADD_SEGNUM;
526 	rvu_write64(rvu, blkaddr,
527 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
528 		    *(u64 *)&field);
529 }
530 
531 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
532 				 u64 format, u64 *fidx)
533 {
534 	struct nix_lso_format field = {0};
535 
536 	/* TCP's sequence number field */
537 	field.layer = NIX_TXLAYER_OL4;
538 	field.offset = 4;
539 	field.sizem1 = 3; /* i.e 4 bytes */
540 	field.alg = NIX_LSOALG_ADD_OFFSET;
541 	rvu_write64(rvu, blkaddr,
542 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
543 		    *(u64 *)&field);
544 
545 	/* TCP's flags field */
546 	field.layer = NIX_TXLAYER_OL4;
547 	field.offset = 12;
548 	field.sizem1 = 1; /* 2 bytes */
549 	field.alg = NIX_LSOALG_TCP_FLAGS;
550 	rvu_write64(rvu, blkaddr,
551 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
552 		    *(u64 *)&field);
553 }
554 
555 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
556 {
557 	u64 cfg, idx, fidx = 0;
558 
559 	/* Get max HW supported format indices */
560 	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
561 	nix_hw->lso.total = cfg;
562 
563 	/* Enable LSO */
564 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
565 	/* For TSO, set first and middle segment flags to
566 	 * mask out PSH, RST & FIN flags in TCP packet
567 	 */
568 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
569 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
570 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
571 
572 	/* Setup default static LSO formats
573 	 *
574 	 * Configure format fields for TCPv4 segmentation offload
575 	 */
576 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
577 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
578 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
579 
580 	/* Set rest of the fields to NOP */
581 	for (; fidx < 8; fidx++) {
582 		rvu_write64(rvu, blkaddr,
583 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
584 	}
585 	nix_hw->lso.in_use++;
586 
587 	/* Configure format fields for TCPv6 segmentation offload */
588 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
589 	fidx = 0;
590 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
591 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
592 
593 	/* Set rest of the fields to NOP */
594 	for (; fidx < 8; fidx++) {
595 		rvu_write64(rvu, blkaddr,
596 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
597 	}
598 	nix_hw->lso.in_use++;
599 }
600 
601 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
602 {
603 	kfree(pfvf->rq_bmap);
604 	kfree(pfvf->sq_bmap);
605 	kfree(pfvf->cq_bmap);
606 	if (pfvf->rq_ctx)
607 		qmem_free(rvu->dev, pfvf->rq_ctx);
608 	if (pfvf->sq_ctx)
609 		qmem_free(rvu->dev, pfvf->sq_ctx);
610 	if (pfvf->cq_ctx)
611 		qmem_free(rvu->dev, pfvf->cq_ctx);
612 	if (pfvf->rss_ctx)
613 		qmem_free(rvu->dev, pfvf->rss_ctx);
614 	if (pfvf->nix_qints_ctx)
615 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
616 	if (pfvf->cq_ints_ctx)
617 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
618 
619 	pfvf->rq_bmap = NULL;
620 	pfvf->cq_bmap = NULL;
621 	pfvf->sq_bmap = NULL;
622 	pfvf->rq_ctx = NULL;
623 	pfvf->sq_ctx = NULL;
624 	pfvf->cq_ctx = NULL;
625 	pfvf->rss_ctx = NULL;
626 	pfvf->nix_qints_ctx = NULL;
627 	pfvf->cq_ints_ctx = NULL;
628 }
629 
630 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
631 			      struct rvu_pfvf *pfvf, int nixlf,
632 			      int rss_sz, int rss_grps, int hwctx_size,
633 			      u64 way_mask)
634 {
635 	int err, grp, num_indices;
636 
637 	/* RSS is not requested for this NIXLF */
638 	if (!rss_sz)
639 		return 0;
640 	num_indices = rss_sz * rss_grps;
641 
642 	/* Alloc NIX RSS HW context memory and config the base */
643 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
644 	if (err)
645 		return err;
646 
647 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
648 		    (u64)pfvf->rss_ctx->iova);
649 
650 	/* Config full RSS table size, enable RSS and caching */
651 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
652 		    BIT_ULL(36) | BIT_ULL(4) |
653 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
654 		    way_mask << 20);
655 	/* Config RSS group offset and sizes */
656 	for (grp = 0; grp < rss_grps; grp++)
657 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
658 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
659 	return 0;
660 }
661 
662 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
663 			       struct nix_aq_inst_s *inst)
664 {
665 	struct admin_queue *aq = block->aq;
666 	struct nix_aq_res_s *result;
667 	int timeout = 1000;
668 	u64 reg, head;
669 
670 	result = (struct nix_aq_res_s *)aq->res->base;
671 
672 	/* Get current head pointer where to append this instruction */
673 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
674 	head = (reg >> 4) & AQ_PTR_MASK;
675 
676 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
677 	       (void *)inst, aq->inst->entry_sz);
678 	memset(result, 0, sizeof(*result));
679 	/* sync into memory */
680 	wmb();
681 
682 	/* Ring the doorbell and wait for result */
683 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
684 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
685 		cpu_relax();
686 		udelay(1);
687 		timeout--;
688 		if (!timeout)
689 			return -EBUSY;
690 	}
691 
692 	if (result->compcode != NIX_AQ_COMP_GOOD)
693 		/* TODO: Replace this with some error code */
694 		return -EBUSY;
695 
696 	return 0;
697 }
698 
699 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
700 				   struct nix_aq_enq_req *req,
701 				   struct nix_aq_enq_rsp *rsp)
702 {
703 	struct rvu_hwinfo *hw = rvu->hw;
704 	u16 pcifunc = req->hdr.pcifunc;
705 	int nixlf, blkaddr, rc = 0;
706 	struct nix_aq_inst_s inst;
707 	struct rvu_block *block;
708 	struct admin_queue *aq;
709 	struct rvu_pfvf *pfvf;
710 	void *ctx, *mask;
711 	bool ena;
712 	u64 cfg;
713 
714 	blkaddr = nix_hw->blkaddr;
715 	block = &hw->block[blkaddr];
716 	aq = block->aq;
717 	if (!aq) {
718 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
719 		return NIX_AF_ERR_AQ_ENQUEUE;
720 	}
721 
722 	pfvf = rvu_get_pfvf(rvu, pcifunc);
723 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
724 
725 	/* Skip NIXLF check for broadcast MCE entry and bandwidth profile
726 	 * operations done by AF itself.
727 	 */
728 	if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
729 	      (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
730 		if (!pfvf->nixlf || nixlf < 0)
731 			return NIX_AF_ERR_AF_LF_INVALID;
732 	}
733 
734 	switch (req->ctype) {
735 	case NIX_AQ_CTYPE_RQ:
736 		/* Check if index exceeds max no of queues */
737 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
738 			rc = NIX_AF_ERR_AQ_ENQUEUE;
739 		break;
740 	case NIX_AQ_CTYPE_SQ:
741 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
742 			rc = NIX_AF_ERR_AQ_ENQUEUE;
743 		break;
744 	case NIX_AQ_CTYPE_CQ:
745 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
746 			rc = NIX_AF_ERR_AQ_ENQUEUE;
747 		break;
748 	case NIX_AQ_CTYPE_RSS:
749 		/* Check if RSS is enabled and qidx is within range */
750 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
751 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
752 		    (req->qidx >= (256UL << (cfg & 0xF))))
753 			rc = NIX_AF_ERR_AQ_ENQUEUE;
754 		break;
755 	case NIX_AQ_CTYPE_MCE:
756 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
757 
758 		/* Check if index exceeds MCE list length */
759 		if (!nix_hw->mcast.mce_ctx ||
760 		    (req->qidx >= (256UL << (cfg & 0xF))))
761 			rc = NIX_AF_ERR_AQ_ENQUEUE;
762 
763 		/* Adding multicast lists for requests from PF/VFs is not
764 		 * yet supported, so ignore this.
765 		 */
766 		if (rsp)
767 			rc = NIX_AF_ERR_AQ_ENQUEUE;
768 		break;
769 	case NIX_AQ_CTYPE_BANDPROF:
770 		if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
771 					nix_hw, pcifunc))
772 			rc = NIX_AF_ERR_INVALID_BANDPROF;
773 		break;
774 	default:
775 		rc = NIX_AF_ERR_AQ_ENQUEUE;
776 	}
777 
778 	if (rc)
779 		return rc;
780 
781 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
782 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
783 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
784 	     (req->op == NIX_AQ_INSTOP_WRITE &&
785 	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
786 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
787 				     pcifunc, req->sq.smq))
788 			return NIX_AF_ERR_AQ_ENQUEUE;
789 	}
790 
791 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
792 	inst.lf = nixlf;
793 	inst.cindex = req->qidx;
794 	inst.ctype = req->ctype;
795 	inst.op = req->op;
796 	/* Currently we are not supporting enqueuing multiple instructions,
797 	 * so always choose first entry in result memory.
798 	 */
799 	inst.res_addr = (u64)aq->res->iova;
800 
801 	/* Hardware uses same aq->res->base for updating result of
802 	 * previous instruction hence wait here till it is done.
803 	 */
804 	spin_lock(&aq->lock);
805 
806 	/* Clean result + context memory */
807 	memset(aq->res->base, 0, aq->res->entry_sz);
808 	/* Context needs to be written at RES_ADDR + 128 */
809 	ctx = aq->res->base + 128;
810 	/* Mask needs to be written at RES_ADDR + 256 */
811 	mask = aq->res->base + 256;
812 
813 	switch (req->op) {
814 	case NIX_AQ_INSTOP_WRITE:
815 		if (req->ctype == NIX_AQ_CTYPE_RQ)
816 			memcpy(mask, &req->rq_mask,
817 			       sizeof(struct nix_rq_ctx_s));
818 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
819 			memcpy(mask, &req->sq_mask,
820 			       sizeof(struct nix_sq_ctx_s));
821 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
822 			memcpy(mask, &req->cq_mask,
823 			       sizeof(struct nix_cq_ctx_s));
824 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
825 			memcpy(mask, &req->rss_mask,
826 			       sizeof(struct nix_rsse_s));
827 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
828 			memcpy(mask, &req->mce_mask,
829 			       sizeof(struct nix_rx_mce_s));
830 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
831 			memcpy(mask, &req->prof_mask,
832 			       sizeof(struct nix_bandprof_s));
833 		fallthrough;
834 	case NIX_AQ_INSTOP_INIT:
835 		if (req->ctype == NIX_AQ_CTYPE_RQ)
836 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
837 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
838 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
839 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
840 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
841 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
842 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
843 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
844 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
845 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
846 			memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
847 		break;
848 	case NIX_AQ_INSTOP_NOP:
849 	case NIX_AQ_INSTOP_READ:
850 	case NIX_AQ_INSTOP_LOCK:
851 	case NIX_AQ_INSTOP_UNLOCK:
852 		break;
853 	default:
854 		rc = NIX_AF_ERR_AQ_ENQUEUE;
855 		spin_unlock(&aq->lock);
856 		return rc;
857 	}
858 
859 	/* Submit the instruction to AQ */
860 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
861 	if (rc) {
862 		spin_unlock(&aq->lock);
863 		return rc;
864 	}
865 
866 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
867 	if (req->op == NIX_AQ_INSTOP_INIT) {
868 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
869 			__set_bit(req->qidx, pfvf->rq_bmap);
870 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
871 			__set_bit(req->qidx, pfvf->sq_bmap);
872 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
873 			__set_bit(req->qidx, pfvf->cq_bmap);
874 	}
875 
876 	if (req->op == NIX_AQ_INSTOP_WRITE) {
877 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
878 			ena = (req->rq.ena & req->rq_mask.ena) |
879 				(test_bit(req->qidx, pfvf->rq_bmap) &
880 				~req->rq_mask.ena);
881 			if (ena)
882 				__set_bit(req->qidx, pfvf->rq_bmap);
883 			else
884 				__clear_bit(req->qidx, pfvf->rq_bmap);
885 		}
886 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
887 			ena = (req->rq.ena & req->sq_mask.ena) |
888 				(test_bit(req->qidx, pfvf->sq_bmap) &
889 				~req->sq_mask.ena);
890 			if (ena)
891 				__set_bit(req->qidx, pfvf->sq_bmap);
892 			else
893 				__clear_bit(req->qidx, pfvf->sq_bmap);
894 		}
895 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
896 			ena = (req->rq.ena & req->cq_mask.ena) |
897 				(test_bit(req->qidx, pfvf->cq_bmap) &
898 				~req->cq_mask.ena);
899 			if (ena)
900 				__set_bit(req->qidx, pfvf->cq_bmap);
901 			else
902 				__clear_bit(req->qidx, pfvf->cq_bmap);
903 		}
904 	}
905 
906 	if (rsp) {
907 		/* Copy read context into mailbox */
908 		if (req->op == NIX_AQ_INSTOP_READ) {
909 			if (req->ctype == NIX_AQ_CTYPE_RQ)
910 				memcpy(&rsp->rq, ctx,
911 				       sizeof(struct nix_rq_ctx_s));
912 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
913 				memcpy(&rsp->sq, ctx,
914 				       sizeof(struct nix_sq_ctx_s));
915 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
916 				memcpy(&rsp->cq, ctx,
917 				       sizeof(struct nix_cq_ctx_s));
918 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
919 				memcpy(&rsp->rss, ctx,
920 				       sizeof(struct nix_rsse_s));
921 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
922 				memcpy(&rsp->mce, ctx,
923 				       sizeof(struct nix_rx_mce_s));
924 			else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
925 				memcpy(&rsp->prof, ctx,
926 				       sizeof(struct nix_bandprof_s));
927 		}
928 	}
929 
930 	spin_unlock(&aq->lock);
931 	return 0;
932 }
933 
934 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
935 			       struct nix_aq_enq_rsp *rsp)
936 {
937 	struct nix_hw *nix_hw;
938 	int blkaddr;
939 
940 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
941 	if (blkaddr < 0)
942 		return NIX_AF_ERR_AF_LF_INVALID;
943 
944 	nix_hw =  get_nix_hw(rvu->hw, blkaddr);
945 	if (!nix_hw)
946 		return -EINVAL;
947 
948 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
949 }
950 
951 static const char *nix_get_ctx_name(int ctype)
952 {
953 	switch (ctype) {
954 	case NIX_AQ_CTYPE_CQ:
955 		return "CQ";
956 	case NIX_AQ_CTYPE_SQ:
957 		return "SQ";
958 	case NIX_AQ_CTYPE_RQ:
959 		return "RQ";
960 	case NIX_AQ_CTYPE_RSS:
961 		return "RSS";
962 	}
963 	return "";
964 }
965 
966 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
967 {
968 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
969 	struct nix_aq_enq_req aq_req;
970 	unsigned long *bmap;
971 	int qidx, q_cnt = 0;
972 	int err = 0, rc;
973 
974 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
975 		return NIX_AF_ERR_AQ_ENQUEUE;
976 
977 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
978 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
979 
980 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
981 		aq_req.cq.ena = 0;
982 		aq_req.cq_mask.ena = 1;
983 		aq_req.cq.bp_ena = 0;
984 		aq_req.cq_mask.bp_ena = 1;
985 		q_cnt = pfvf->cq_ctx->qsize;
986 		bmap = pfvf->cq_bmap;
987 	}
988 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
989 		aq_req.sq.ena = 0;
990 		aq_req.sq_mask.ena = 1;
991 		q_cnt = pfvf->sq_ctx->qsize;
992 		bmap = pfvf->sq_bmap;
993 	}
994 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
995 		aq_req.rq.ena = 0;
996 		aq_req.rq_mask.ena = 1;
997 		q_cnt = pfvf->rq_ctx->qsize;
998 		bmap = pfvf->rq_bmap;
999 	}
1000 
1001 	aq_req.ctype = req->ctype;
1002 	aq_req.op = NIX_AQ_INSTOP_WRITE;
1003 
1004 	for (qidx = 0; qidx < q_cnt; qidx++) {
1005 		if (!test_bit(qidx, bmap))
1006 			continue;
1007 		aq_req.qidx = qidx;
1008 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1009 		if (rc) {
1010 			err = rc;
1011 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1012 				nix_get_ctx_name(req->ctype), qidx);
1013 		}
1014 	}
1015 
1016 	return err;
1017 }
1018 
1019 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1020 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1021 {
1022 	struct nix_aq_enq_req lock_ctx_req;
1023 	int err;
1024 
1025 	if (req->op != NIX_AQ_INSTOP_INIT)
1026 		return 0;
1027 
1028 	if (req->ctype == NIX_AQ_CTYPE_MCE ||
1029 	    req->ctype == NIX_AQ_CTYPE_DYNO)
1030 		return 0;
1031 
1032 	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1033 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1034 	lock_ctx_req.ctype = req->ctype;
1035 	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1036 	lock_ctx_req.qidx = req->qidx;
1037 	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1038 	if (err)
1039 		dev_err(rvu->dev,
1040 			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1041 			req->hdr.pcifunc,
1042 			nix_get_ctx_name(req->ctype), req->qidx);
1043 	return err;
1044 }
1045 
1046 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1047 				struct nix_aq_enq_req *req,
1048 				struct nix_aq_enq_rsp *rsp)
1049 {
1050 	int err;
1051 
1052 	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1053 	if (!err)
1054 		err = nix_lf_hwctx_lockdown(rvu, req);
1055 	return err;
1056 }
1057 #else
1058 
1059 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1060 				struct nix_aq_enq_req *req,
1061 				struct nix_aq_enq_rsp *rsp)
1062 {
1063 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
1064 }
1065 #endif
1066 /* CN10K mbox handler */
1067 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1068 				      struct nix_cn10k_aq_enq_req *req,
1069 				      struct nix_cn10k_aq_enq_rsp *rsp)
1070 {
1071 	return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1072 				  (struct nix_aq_enq_rsp *)rsp);
1073 }
1074 
1075 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1076 				       struct hwctx_disable_req *req,
1077 				       struct msg_rsp *rsp)
1078 {
1079 	return nix_lf_hwctx_disable(rvu, req);
1080 }
1081 
1082 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1083 				  struct nix_lf_alloc_req *req,
1084 				  struct nix_lf_alloc_rsp *rsp)
1085 {
1086 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
1087 	struct rvu_hwinfo *hw = rvu->hw;
1088 	u16 pcifunc = req->hdr.pcifunc;
1089 	struct rvu_block *block;
1090 	struct rvu_pfvf *pfvf;
1091 	u64 cfg, ctx_cfg;
1092 	int blkaddr;
1093 
1094 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1095 		return NIX_AF_ERR_PARAM;
1096 
1097 	if (req->way_mask)
1098 		req->way_mask &= 0xFFFF;
1099 
1100 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1101 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1102 	if (!pfvf->nixlf || blkaddr < 0)
1103 		return NIX_AF_ERR_AF_LF_INVALID;
1104 
1105 	block = &hw->block[blkaddr];
1106 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1107 	if (nixlf < 0)
1108 		return NIX_AF_ERR_AF_LF_INVALID;
1109 
1110 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1111 	if (req->npa_func) {
1112 		/* If default, use 'this' NIXLF's PFFUNC */
1113 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1114 			req->npa_func = pcifunc;
1115 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1116 			return NIX_AF_INVAL_NPA_PF_FUNC;
1117 	}
1118 
1119 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1120 	if (req->sso_func) {
1121 		/* If default, use 'this' NIXLF's PFFUNC */
1122 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1123 			req->sso_func = pcifunc;
1124 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1125 			return NIX_AF_INVAL_SSO_PF_FUNC;
1126 	}
1127 
1128 	/* If RSS is being enabled, check if requested config is valid.
1129 	 * RSS table size should be power of two, otherwise
1130 	 * RSS_GRP::OFFSET + adder might go beyond that group or
1131 	 * won't be able to use entire table.
1132 	 */
1133 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1134 			    !is_power_of_2(req->rss_sz)))
1135 		return NIX_AF_ERR_RSS_SIZE_INVALID;
1136 
1137 	if (req->rss_sz &&
1138 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1139 		return NIX_AF_ERR_RSS_GRPS_INVALID;
1140 
1141 	/* Reset this NIX LF */
1142 	err = rvu_lf_reset(rvu, block, nixlf);
1143 	if (err) {
1144 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1145 			block->addr - BLKADDR_NIX0, nixlf);
1146 		return NIX_AF_ERR_LF_RESET;
1147 	}
1148 
1149 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1150 
1151 	/* Alloc NIX RQ HW context memory and config the base */
1152 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1153 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1154 	if (err)
1155 		goto free_mem;
1156 
1157 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1158 	if (!pfvf->rq_bmap)
1159 		goto free_mem;
1160 
1161 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1162 		    (u64)pfvf->rq_ctx->iova);
1163 
1164 	/* Set caching and queue count in HW */
1165 	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1166 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1167 
1168 	/* Alloc NIX SQ HW context memory and config the base */
1169 	hwctx_size = 1UL << (ctx_cfg & 0xF);
1170 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1171 	if (err)
1172 		goto free_mem;
1173 
1174 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1175 	if (!pfvf->sq_bmap)
1176 		goto free_mem;
1177 
1178 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1179 		    (u64)pfvf->sq_ctx->iova);
1180 
1181 	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1182 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1183 
1184 	/* Alloc NIX CQ HW context memory and config the base */
1185 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1186 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1187 	if (err)
1188 		goto free_mem;
1189 
1190 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1191 	if (!pfvf->cq_bmap)
1192 		goto free_mem;
1193 
1194 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1195 		    (u64)pfvf->cq_ctx->iova);
1196 
1197 	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1198 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1199 
1200 	/* Initialize receive side scaling (RSS) */
1201 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1202 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1203 				 req->rss_grps, hwctx_size, req->way_mask);
1204 	if (err)
1205 		goto free_mem;
1206 
1207 	/* Alloc memory for CQINT's HW contexts */
1208 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1209 	qints = (cfg >> 24) & 0xFFF;
1210 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1211 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1212 	if (err)
1213 		goto free_mem;
1214 
1215 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1216 		    (u64)pfvf->cq_ints_ctx->iova);
1217 
1218 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1219 		    BIT_ULL(36) | req->way_mask << 20);
1220 
1221 	/* Alloc memory for QINT's HW contexts */
1222 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1223 	qints = (cfg >> 12) & 0xFFF;
1224 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1225 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1226 	if (err)
1227 		goto free_mem;
1228 
1229 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1230 		    (u64)pfvf->nix_qints_ctx->iova);
1231 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1232 		    BIT_ULL(36) | req->way_mask << 20);
1233 
1234 	/* Setup VLANX TPID's.
1235 	 * Use VLAN1 for 802.1Q
1236 	 * and VLAN0 for 802.1AD.
1237 	 */
1238 	cfg = (0x8100ULL << 16) | 0x88A8ULL;
1239 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1240 
1241 	/* Enable LMTST for this NIX LF */
1242 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1243 
1244 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1245 	if (req->npa_func)
1246 		cfg = req->npa_func;
1247 	if (req->sso_func)
1248 		cfg |= (u64)req->sso_func << 16;
1249 
1250 	cfg |= (u64)req->xqe_sz << 33;
1251 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1252 
1253 	/* Config Rx pkt length, csum checks and apad  enable / disable */
1254 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1255 
1256 	/* Configure pkind for TX parse config */
1257 	cfg = NPC_TX_DEF_PKIND;
1258 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1259 
1260 	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1261 	err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1262 	if (err)
1263 		goto free_mem;
1264 
1265 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
1266 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1267 
1268 	/* Configure RX VTAG Type 7 (strip) for vf vlan */
1269 	rvu_write64(rvu, blkaddr,
1270 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1271 		    VTAGSIZE_T4 | VTAG_STRIP);
1272 
1273 	goto exit;
1274 
1275 free_mem:
1276 	nix_ctx_free(rvu, pfvf);
1277 	rc = -ENOMEM;
1278 
1279 exit:
1280 	/* Set macaddr of this PF/VF */
1281 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1282 
1283 	/* set SQB size info */
1284 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1285 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1286 	rsp->rx_chan_base = pfvf->rx_chan_base;
1287 	rsp->tx_chan_base = pfvf->tx_chan_base;
1288 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1289 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1290 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1291 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1292 	/* Get HW supported stat count */
1293 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1294 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1295 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1296 	/* Get count of CQ IRQs and error IRQs supported per LF */
1297 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1298 	rsp->qints = ((cfg >> 12) & 0xFFF);
1299 	rsp->cints = ((cfg >> 24) & 0xFFF);
1300 	rsp->cgx_links = hw->cgx_links;
1301 	rsp->lbk_links = hw->lbk_links;
1302 	rsp->sdp_links = hw->sdp_links;
1303 
1304 	return rc;
1305 }
1306 
1307 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1308 				 struct msg_rsp *rsp)
1309 {
1310 	struct rvu_hwinfo *hw = rvu->hw;
1311 	u16 pcifunc = req->hdr.pcifunc;
1312 	struct rvu_block *block;
1313 	int blkaddr, nixlf, err;
1314 	struct rvu_pfvf *pfvf;
1315 
1316 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1317 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1318 	if (!pfvf->nixlf || blkaddr < 0)
1319 		return NIX_AF_ERR_AF_LF_INVALID;
1320 
1321 	block = &hw->block[blkaddr];
1322 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1323 	if (nixlf < 0)
1324 		return NIX_AF_ERR_AF_LF_INVALID;
1325 
1326 	if (req->flags & NIX_LF_DISABLE_FLOWS)
1327 		rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1328 	else
1329 		rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1330 
1331 	/* Free any tx vtag def entries used by this NIX LF */
1332 	if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1333 		nix_free_tx_vtag_entries(rvu, pcifunc);
1334 
1335 	nix_interface_deinit(rvu, pcifunc, nixlf);
1336 
1337 	/* Reset this NIX LF */
1338 	err = rvu_lf_reset(rvu, block, nixlf);
1339 	if (err) {
1340 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1341 			block->addr - BLKADDR_NIX0, nixlf);
1342 		return NIX_AF_ERR_LF_RESET;
1343 	}
1344 
1345 	nix_ctx_free(rvu, pfvf);
1346 
1347 	return 0;
1348 }
1349 
1350 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1351 					 struct nix_mark_format_cfg  *req,
1352 					 struct nix_mark_format_cfg_rsp *rsp)
1353 {
1354 	u16 pcifunc = req->hdr.pcifunc;
1355 	struct nix_hw *nix_hw;
1356 	struct rvu_pfvf *pfvf;
1357 	int blkaddr, rc;
1358 	u32 cfg;
1359 
1360 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1361 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1362 	if (!pfvf->nixlf || blkaddr < 0)
1363 		return NIX_AF_ERR_AF_LF_INVALID;
1364 
1365 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1366 	if (!nix_hw)
1367 		return -EINVAL;
1368 
1369 	cfg = (((u32)req->offset & 0x7) << 16) |
1370 	      (((u32)req->y_mask & 0xF) << 12) |
1371 	      (((u32)req->y_val & 0xF) << 8) |
1372 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1373 
1374 	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1375 	if (rc < 0) {
1376 		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1377 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1378 		return NIX_AF_ERR_MARK_CFG_FAIL;
1379 	}
1380 
1381 	rsp->mark_format_idx = rc;
1382 	return 0;
1383 }
1384 
1385 /* Disable shaping of pkts by a scheduler queue
1386  * at a given scheduler level.
1387  */
1388 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1389 				 int lvl, int schq)
1390 {
1391 	u64  cir_reg = 0, pir_reg = 0;
1392 	u64  cfg;
1393 
1394 	switch (lvl) {
1395 	case NIX_TXSCH_LVL_TL1:
1396 		cir_reg = NIX_AF_TL1X_CIR(schq);
1397 		pir_reg = 0; /* PIR not available at TL1 */
1398 		break;
1399 	case NIX_TXSCH_LVL_TL2:
1400 		cir_reg = NIX_AF_TL2X_CIR(schq);
1401 		pir_reg = NIX_AF_TL2X_PIR(schq);
1402 		break;
1403 	case NIX_TXSCH_LVL_TL3:
1404 		cir_reg = NIX_AF_TL3X_CIR(schq);
1405 		pir_reg = NIX_AF_TL3X_PIR(schq);
1406 		break;
1407 	case NIX_TXSCH_LVL_TL4:
1408 		cir_reg = NIX_AF_TL4X_CIR(schq);
1409 		pir_reg = NIX_AF_TL4X_PIR(schq);
1410 		break;
1411 	}
1412 
1413 	if (!cir_reg)
1414 		return;
1415 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
1416 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1417 
1418 	if (!pir_reg)
1419 		return;
1420 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
1421 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1422 }
1423 
1424 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1425 				 int lvl, int schq)
1426 {
1427 	struct rvu_hwinfo *hw = rvu->hw;
1428 	int link;
1429 
1430 	if (lvl >= hw->cap.nix_tx_aggr_lvl)
1431 		return;
1432 
1433 	/* Reset TL4's SDP link config */
1434 	if (lvl == NIX_TXSCH_LVL_TL4)
1435 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1436 
1437 	if (lvl != NIX_TXSCH_LVL_TL2)
1438 		return;
1439 
1440 	/* Reset TL2's CGX or LBK link config */
1441 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1442 		rvu_write64(rvu, blkaddr,
1443 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1444 }
1445 
1446 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1447 {
1448 	struct rvu_hwinfo *hw = rvu->hw;
1449 	int pf = rvu_get_pf(pcifunc);
1450 	u8 cgx_id = 0, lmac_id = 0;
1451 
1452 	if (is_afvf(pcifunc)) {/* LBK links */
1453 		return hw->cgx_links;
1454 	} else if (is_pf_cgxmapped(rvu, pf)) {
1455 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1456 		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1457 	}
1458 
1459 	/* SDP link */
1460 	return hw->cgx_links + hw->lbk_links;
1461 }
1462 
1463 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1464 				 int link, int *start, int *end)
1465 {
1466 	struct rvu_hwinfo *hw = rvu->hw;
1467 	int pf = rvu_get_pf(pcifunc);
1468 
1469 	if (is_afvf(pcifunc)) { /* LBK links */
1470 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1471 		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1472 	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1473 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1474 		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1475 	} else { /* SDP link */
1476 		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1477 			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1478 		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1479 	}
1480 }
1481 
1482 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1483 				      struct nix_hw *nix_hw,
1484 				      struct nix_txsch_alloc_req *req)
1485 {
1486 	struct rvu_hwinfo *hw = rvu->hw;
1487 	int schq, req_schq, free_cnt;
1488 	struct nix_txsch *txsch;
1489 	int link, start, end;
1490 
1491 	txsch = &nix_hw->txsch[lvl];
1492 	req_schq = req->schq_contig[lvl] + req->schq[lvl];
1493 
1494 	if (!req_schq)
1495 		return 0;
1496 
1497 	link = nix_get_tx_link(rvu, pcifunc);
1498 
1499 	/* For traffic aggregating scheduler level, one queue is enough */
1500 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1501 		if (req_schq != 1)
1502 			return NIX_AF_ERR_TLX_ALLOC_FAIL;
1503 		return 0;
1504 	}
1505 
1506 	/* Get free SCHQ count and check if request can be accomodated */
1507 	if (hw->cap.nix_fixed_txschq_mapping) {
1508 		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1509 		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1510 		if (end <= txsch->schq.max && schq < end &&
1511 		    !test_bit(schq, txsch->schq.bmap))
1512 			free_cnt = 1;
1513 		else
1514 			free_cnt = 0;
1515 	} else {
1516 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
1517 	}
1518 
1519 	if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1520 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1521 
1522 	/* If contiguous queues are needed, check for availability */
1523 	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1524 	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1525 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1526 
1527 	return 0;
1528 }
1529 
1530 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1531 			    struct nix_txsch_alloc_rsp *rsp,
1532 			    int lvl, int start, int end)
1533 {
1534 	struct rvu_hwinfo *hw = rvu->hw;
1535 	u16 pcifunc = rsp->hdr.pcifunc;
1536 	int idx, schq;
1537 
1538 	/* For traffic aggregating levels, queue alloc is based
1539 	 * on transmit link to which PF_FUNC is mapped to.
1540 	 */
1541 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1542 		/* A single TL queue is allocated */
1543 		if (rsp->schq_contig[lvl]) {
1544 			rsp->schq_contig[lvl] = 1;
1545 			rsp->schq_contig_list[lvl][0] = start;
1546 		}
1547 
1548 		/* Both contig and non-contig reqs doesn't make sense here */
1549 		if (rsp->schq_contig[lvl])
1550 			rsp->schq[lvl] = 0;
1551 
1552 		if (rsp->schq[lvl]) {
1553 			rsp->schq[lvl] = 1;
1554 			rsp->schq_list[lvl][0] = start;
1555 		}
1556 		return;
1557 	}
1558 
1559 	/* Adjust the queue request count if HW supports
1560 	 * only one queue per level configuration.
1561 	 */
1562 	if (hw->cap.nix_fixed_txschq_mapping) {
1563 		idx = pcifunc & RVU_PFVF_FUNC_MASK;
1564 		schq = start + idx;
1565 		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1566 			rsp->schq_contig[lvl] = 0;
1567 			rsp->schq[lvl] = 0;
1568 			return;
1569 		}
1570 
1571 		if (rsp->schq_contig[lvl]) {
1572 			rsp->schq_contig[lvl] = 1;
1573 			set_bit(schq, txsch->schq.bmap);
1574 			rsp->schq_contig_list[lvl][0] = schq;
1575 			rsp->schq[lvl] = 0;
1576 		} else if (rsp->schq[lvl]) {
1577 			rsp->schq[lvl] = 1;
1578 			set_bit(schq, txsch->schq.bmap);
1579 			rsp->schq_list[lvl][0] = schq;
1580 		}
1581 		return;
1582 	}
1583 
1584 	/* Allocate contiguous queue indices requesty first */
1585 	if (rsp->schq_contig[lvl]) {
1586 		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1587 						  txsch->schq.max, start,
1588 						  rsp->schq_contig[lvl], 0);
1589 		if (schq >= end)
1590 			rsp->schq_contig[lvl] = 0;
1591 		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1592 			set_bit(schq, txsch->schq.bmap);
1593 			rsp->schq_contig_list[lvl][idx] = schq;
1594 			schq++;
1595 		}
1596 	}
1597 
1598 	/* Allocate non-contiguous queue indices */
1599 	if (rsp->schq[lvl]) {
1600 		idx = 0;
1601 		for (schq = start; schq < end; schq++) {
1602 			if (!test_bit(schq, txsch->schq.bmap)) {
1603 				set_bit(schq, txsch->schq.bmap);
1604 				rsp->schq_list[lvl][idx++] = schq;
1605 			}
1606 			if (idx == rsp->schq[lvl])
1607 				break;
1608 		}
1609 		/* Update how many were allocated */
1610 		rsp->schq[lvl] = idx;
1611 	}
1612 }
1613 
1614 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1615 				     struct nix_txsch_alloc_req *req,
1616 				     struct nix_txsch_alloc_rsp *rsp)
1617 {
1618 	struct rvu_hwinfo *hw = rvu->hw;
1619 	u16 pcifunc = req->hdr.pcifunc;
1620 	int link, blkaddr, rc = 0;
1621 	int lvl, idx, start, end;
1622 	struct nix_txsch *txsch;
1623 	struct rvu_pfvf *pfvf;
1624 	struct nix_hw *nix_hw;
1625 	u32 *pfvf_map;
1626 	u16 schq;
1627 
1628 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1629 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1630 	if (!pfvf->nixlf || blkaddr < 0)
1631 		return NIX_AF_ERR_AF_LF_INVALID;
1632 
1633 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1634 	if (!nix_hw)
1635 		return -EINVAL;
1636 
1637 	mutex_lock(&rvu->rsrc_lock);
1638 
1639 	/* Check if request is valid as per HW capabilities
1640 	 * and can be accomodated.
1641 	 */
1642 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1643 		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1644 		if (rc)
1645 			goto err;
1646 	}
1647 
1648 	/* Allocate requested Tx scheduler queues */
1649 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1650 		txsch = &nix_hw->txsch[lvl];
1651 		pfvf_map = txsch->pfvf_map;
1652 
1653 		if (!req->schq[lvl] && !req->schq_contig[lvl])
1654 			continue;
1655 
1656 		rsp->schq[lvl] = req->schq[lvl];
1657 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
1658 
1659 		link = nix_get_tx_link(rvu, pcifunc);
1660 
1661 		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1662 			start = link;
1663 			end = link;
1664 		} else if (hw->cap.nix_fixed_txschq_mapping) {
1665 			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1666 		} else {
1667 			start = 0;
1668 			end = txsch->schq.max;
1669 		}
1670 
1671 		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1672 
1673 		/* Reset queue config */
1674 		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1675 			schq = rsp->schq_contig_list[lvl][idx];
1676 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1677 			    NIX_TXSCHQ_CFG_DONE))
1678 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1679 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1680 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1681 		}
1682 
1683 		for (idx = 0; idx < req->schq[lvl]; idx++) {
1684 			schq = rsp->schq_list[lvl][idx];
1685 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1686 			    NIX_TXSCHQ_CFG_DONE))
1687 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1688 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1689 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1690 		}
1691 	}
1692 
1693 	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1694 	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1695 	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1696 				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1697 				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1698 	goto exit;
1699 err:
1700 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1701 exit:
1702 	mutex_unlock(&rvu->rsrc_lock);
1703 	return rc;
1704 }
1705 
1706 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1707 			  int smq, u16 pcifunc, int nixlf)
1708 {
1709 	int pf = rvu_get_pf(pcifunc);
1710 	u8 cgx_id = 0, lmac_id = 0;
1711 	int err, restore_tx_en = 0;
1712 	u64 cfg;
1713 
1714 	/* enable cgx tx if disabled */
1715 	if (is_pf_cgxmapped(rvu, pf)) {
1716 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1717 		restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1718 						    lmac_id, true);
1719 	}
1720 
1721 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1722 	/* Do SMQ flush and set enqueue xoff */
1723 	cfg |= BIT_ULL(50) | BIT_ULL(49);
1724 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1725 
1726 	/* Disable backpressure from physical link,
1727 	 * otherwise SMQ flush may stall.
1728 	 */
1729 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
1730 
1731 	/* Wait for flush to complete */
1732 	err = rvu_poll_reg(rvu, blkaddr,
1733 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1734 	if (err)
1735 		dev_err(rvu->dev,
1736 			"NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1737 
1738 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
1739 	/* restore cgx tx state */
1740 	if (restore_tx_en)
1741 		cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1742 }
1743 
1744 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1745 {
1746 	int blkaddr, nixlf, lvl, schq, err;
1747 	struct rvu_hwinfo *hw = rvu->hw;
1748 	struct nix_txsch *txsch;
1749 	struct nix_hw *nix_hw;
1750 
1751 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1752 	if (blkaddr < 0)
1753 		return NIX_AF_ERR_AF_LF_INVALID;
1754 
1755 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1756 	if (!nix_hw)
1757 		return -EINVAL;
1758 
1759 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1760 	if (nixlf < 0)
1761 		return NIX_AF_ERR_AF_LF_INVALID;
1762 
1763 	/* Disable TL2/3 queue links before SMQ flush*/
1764 	mutex_lock(&rvu->rsrc_lock);
1765 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1766 		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1767 			continue;
1768 
1769 		txsch = &nix_hw->txsch[lvl];
1770 		for (schq = 0; schq < txsch->schq.max; schq++) {
1771 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1772 				continue;
1773 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1774 		}
1775 	}
1776 
1777 	/* Flush SMQs */
1778 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1779 	for (schq = 0; schq < txsch->schq.max; schq++) {
1780 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1781 			continue;
1782 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1783 	}
1784 
1785 	/* Now free scheduler queues to free pool */
1786 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1787 		 /* TLs above aggregation level are shared across all PF
1788 		  * and it's VFs, hence skip freeing them.
1789 		  */
1790 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
1791 			continue;
1792 
1793 		txsch = &nix_hw->txsch[lvl];
1794 		for (schq = 0; schq < txsch->schq.max; schq++) {
1795 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1796 				continue;
1797 			rvu_free_rsrc(&txsch->schq, schq);
1798 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1799 		}
1800 	}
1801 	mutex_unlock(&rvu->rsrc_lock);
1802 
1803 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1804 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1805 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1806 	if (err)
1807 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1808 
1809 	return 0;
1810 }
1811 
1812 static int nix_txschq_free_one(struct rvu *rvu,
1813 			       struct nix_txsch_free_req *req)
1814 {
1815 	struct rvu_hwinfo *hw = rvu->hw;
1816 	u16 pcifunc = req->hdr.pcifunc;
1817 	int lvl, schq, nixlf, blkaddr;
1818 	struct nix_txsch *txsch;
1819 	struct nix_hw *nix_hw;
1820 	u32 *pfvf_map;
1821 
1822 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1823 	if (blkaddr < 0)
1824 		return NIX_AF_ERR_AF_LF_INVALID;
1825 
1826 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1827 	if (!nix_hw)
1828 		return -EINVAL;
1829 
1830 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1831 	if (nixlf < 0)
1832 		return NIX_AF_ERR_AF_LF_INVALID;
1833 
1834 	lvl = req->schq_lvl;
1835 	schq = req->schq;
1836 	txsch = &nix_hw->txsch[lvl];
1837 
1838 	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1839 		return 0;
1840 
1841 	pfvf_map = txsch->pfvf_map;
1842 	mutex_lock(&rvu->rsrc_lock);
1843 
1844 	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1845 		mutex_unlock(&rvu->rsrc_lock);
1846 		goto err;
1847 	}
1848 
1849 	/* Flush if it is a SMQ. Onus of disabling
1850 	 * TL2/3 queue links before SMQ flush is on user
1851 	 */
1852 	if (lvl == NIX_TXSCH_LVL_SMQ)
1853 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1854 
1855 	/* Free the resource */
1856 	rvu_free_rsrc(&txsch->schq, schq);
1857 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1858 	mutex_unlock(&rvu->rsrc_lock);
1859 	return 0;
1860 err:
1861 	return NIX_AF_ERR_TLX_INVALID;
1862 }
1863 
1864 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1865 				    struct nix_txsch_free_req *req,
1866 				    struct msg_rsp *rsp)
1867 {
1868 	if (req->flags & TXSCHQ_FREE_ALL)
1869 		return nix_txschq_free(rvu, req->hdr.pcifunc);
1870 	else
1871 		return nix_txschq_free_one(rvu, req);
1872 }
1873 
1874 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1875 				      int lvl, u64 reg, u64 regval)
1876 {
1877 	u64 regbase = reg & 0xFFFF;
1878 	u16 schq, parent;
1879 
1880 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1881 		return false;
1882 
1883 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1884 	/* Check if this schq belongs to this PF/VF or not */
1885 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1886 		return false;
1887 
1888 	parent = (regval >> 16) & 0x1FF;
1889 	/* Validate MDQ's TL4 parent */
1890 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
1891 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1892 		return false;
1893 
1894 	/* Validate TL4's TL3 parent */
1895 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
1896 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1897 		return false;
1898 
1899 	/* Validate TL3's TL2 parent */
1900 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
1901 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1902 		return false;
1903 
1904 	/* Validate TL2's TL1 parent */
1905 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
1906 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1907 		return false;
1908 
1909 	return true;
1910 }
1911 
1912 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1913 {
1914 	u64 regbase;
1915 
1916 	if (hw->cap.nix_shaping)
1917 		return true;
1918 
1919 	/* If shaping and coloring is not supported, then
1920 	 * *_CIR and *_PIR registers should not be configured.
1921 	 */
1922 	regbase = reg & 0xFFFF;
1923 
1924 	switch (lvl) {
1925 	case NIX_TXSCH_LVL_TL1:
1926 		if (regbase == NIX_AF_TL1X_CIR(0))
1927 			return false;
1928 		break;
1929 	case NIX_TXSCH_LVL_TL2:
1930 		if (regbase == NIX_AF_TL2X_CIR(0) ||
1931 		    regbase == NIX_AF_TL2X_PIR(0))
1932 			return false;
1933 		break;
1934 	case NIX_TXSCH_LVL_TL3:
1935 		if (regbase == NIX_AF_TL3X_CIR(0) ||
1936 		    regbase == NIX_AF_TL3X_PIR(0))
1937 			return false;
1938 		break;
1939 	case NIX_TXSCH_LVL_TL4:
1940 		if (regbase == NIX_AF_TL4X_CIR(0) ||
1941 		    regbase == NIX_AF_TL4X_PIR(0))
1942 			return false;
1943 		break;
1944 	}
1945 	return true;
1946 }
1947 
1948 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1949 				u16 pcifunc, int blkaddr)
1950 {
1951 	u32 *pfvf_map;
1952 	int schq;
1953 
1954 	schq = nix_get_tx_link(rvu, pcifunc);
1955 	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1956 	/* Skip if PF has already done the config */
1957 	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1958 		return;
1959 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1960 		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
1961 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1962 		    TXSCH_TL1_DFLT_RR_QTM);
1963 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1964 	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1965 }
1966 
1967 static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
1968 			       u16 pcifunc, struct nix_txsch *txsch)
1969 {
1970 	struct rvu_hwinfo *hw = rvu->hw;
1971 	int lbk_link_start, lbk_links;
1972 	u8 pf = rvu_get_pf(pcifunc);
1973 	int schq;
1974 
1975 	if (!is_pf_cgxmapped(rvu, pf))
1976 		return;
1977 
1978 	lbk_link_start = hw->cgx_links;
1979 
1980 	for (schq = 0; schq < txsch->schq.max; schq++) {
1981 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1982 			continue;
1983 		/* Enable all LBK links with channel 63 by default so that
1984 		 * packets can be sent to LBK with a NPC TX MCAM rule
1985 		 */
1986 		lbk_links = hw->lbk_links;
1987 		while (lbk_links--)
1988 			rvu_write64(rvu, blkaddr,
1989 				    NIX_AF_TL3_TL2X_LINKX_CFG(schq,
1990 							      lbk_link_start +
1991 							      lbk_links),
1992 				    BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
1993 	}
1994 }
1995 
1996 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1997 				    struct nix_txschq_config *req,
1998 				    struct msg_rsp *rsp)
1999 {
2000 	struct rvu_hwinfo *hw = rvu->hw;
2001 	u16 pcifunc = req->hdr.pcifunc;
2002 	u64 reg, regval, schq_regbase;
2003 	struct nix_txsch *txsch;
2004 	struct nix_hw *nix_hw;
2005 	int blkaddr, idx, err;
2006 	int nixlf, schq;
2007 	u32 *pfvf_map;
2008 
2009 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
2010 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
2011 		return NIX_AF_INVAL_TXSCHQ_CFG;
2012 
2013 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2014 	if (err)
2015 		return err;
2016 
2017 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2018 	if (!nix_hw)
2019 		return -EINVAL;
2020 
2021 	txsch = &nix_hw->txsch[req->lvl];
2022 	pfvf_map = txsch->pfvf_map;
2023 
2024 	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2025 	    pcifunc & RVU_PFVF_FUNC_MASK) {
2026 		mutex_lock(&rvu->rsrc_lock);
2027 		if (req->lvl == NIX_TXSCH_LVL_TL1)
2028 			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2029 		mutex_unlock(&rvu->rsrc_lock);
2030 		return 0;
2031 	}
2032 
2033 	for (idx = 0; idx < req->num_regs; idx++) {
2034 		reg = req->reg[idx];
2035 		regval = req->regval[idx];
2036 		schq_regbase = reg & 0xFFFF;
2037 
2038 		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2039 					       txsch->lvl, reg, regval))
2040 			return NIX_AF_INVAL_TXSCHQ_CFG;
2041 
2042 		/* Check if shaping and coloring is supported */
2043 		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2044 			continue;
2045 
2046 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2047 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2048 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2049 					   pcifunc, 0);
2050 			regval &= ~(0x7FULL << 24);
2051 			regval |= ((u64)nixlf << 24);
2052 		}
2053 
2054 		/* Clear 'BP_ENA' config, if it's not allowed */
2055 		if (!hw->cap.nix_tx_link_bp) {
2056 			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2057 			    (schq_regbase & 0xFF00) ==
2058 			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2059 				regval &= ~BIT_ULL(13);
2060 		}
2061 
2062 		/* Mark config as done for TL1 by PF */
2063 		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2064 		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2065 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2066 			mutex_lock(&rvu->rsrc_lock);
2067 			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2068 							NIX_TXSCHQ_CFG_DONE);
2069 			mutex_unlock(&rvu->rsrc_lock);
2070 		}
2071 
2072 		/* SMQ flush is special hence split register writes such
2073 		 * that flush first and write rest of the bits later.
2074 		 */
2075 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2076 		    (regval & BIT_ULL(49))) {
2077 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2078 			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2079 			regval &= ~BIT_ULL(49);
2080 		}
2081 		rvu_write64(rvu, blkaddr, reg, regval);
2082 	}
2083 
2084 	rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
2085 			   &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
2086 
2087 	return 0;
2088 }
2089 
2090 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2091 			   struct nix_vtag_config *req)
2092 {
2093 	u64 regval = req->vtag_size;
2094 
2095 	if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2096 	    req->vtag_size > VTAGSIZE_T8)
2097 		return -EINVAL;
2098 
2099 	/* RX VTAG Type 7 reserved for vf vlan */
2100 	if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2101 		return NIX_AF_ERR_RX_VTAG_INUSE;
2102 
2103 	if (req->rx.capture_vtag)
2104 		regval |= BIT_ULL(5);
2105 	if (req->rx.strip_vtag)
2106 		regval |= BIT_ULL(4);
2107 
2108 	rvu_write64(rvu, blkaddr,
2109 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2110 	return 0;
2111 }
2112 
2113 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2114 			    u16 pcifunc, int index)
2115 {
2116 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2117 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2118 
2119 	if (vlan->entry2pfvf_map[index] != pcifunc)
2120 		return NIX_AF_ERR_PARAM;
2121 
2122 	rvu_write64(rvu, blkaddr,
2123 		    NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2124 	rvu_write64(rvu, blkaddr,
2125 		    NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2126 
2127 	vlan->entry2pfvf_map[index] = 0;
2128 	rvu_free_rsrc(&vlan->rsrc, index);
2129 
2130 	return 0;
2131 }
2132 
2133 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2134 {
2135 	struct nix_txvlan *vlan;
2136 	struct nix_hw *nix_hw;
2137 	int index, blkaddr;
2138 
2139 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2140 	if (blkaddr < 0)
2141 		return;
2142 
2143 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2144 	vlan = &nix_hw->txvlan;
2145 
2146 	mutex_lock(&vlan->rsrc_lock);
2147 	/* Scan all the entries and free the ones mapped to 'pcifunc' */
2148 	for (index = 0; index < vlan->rsrc.max; index++) {
2149 		if (vlan->entry2pfvf_map[index] == pcifunc)
2150 			nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2151 	}
2152 	mutex_unlock(&vlan->rsrc_lock);
2153 }
2154 
2155 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2156 			     u64 vtag, u8 size)
2157 {
2158 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2159 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2160 	u64 regval;
2161 	int index;
2162 
2163 	mutex_lock(&vlan->rsrc_lock);
2164 
2165 	index = rvu_alloc_rsrc(&vlan->rsrc);
2166 	if (index < 0) {
2167 		mutex_unlock(&vlan->rsrc_lock);
2168 		return index;
2169 	}
2170 
2171 	mutex_unlock(&vlan->rsrc_lock);
2172 
2173 	regval = size ? vtag : vtag << 32;
2174 
2175 	rvu_write64(rvu, blkaddr,
2176 		    NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2177 	rvu_write64(rvu, blkaddr,
2178 		    NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2179 
2180 	return index;
2181 }
2182 
2183 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2184 			     struct nix_vtag_config *req)
2185 {
2186 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2187 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2188 	u16 pcifunc = req->hdr.pcifunc;
2189 	int idx0 = req->tx.vtag0_idx;
2190 	int idx1 = req->tx.vtag1_idx;
2191 	int err = 0;
2192 
2193 	if (req->tx.free_vtag0 && req->tx.free_vtag1)
2194 		if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2195 		    vlan->entry2pfvf_map[idx1] != pcifunc)
2196 			return NIX_AF_ERR_PARAM;
2197 
2198 	mutex_lock(&vlan->rsrc_lock);
2199 
2200 	if (req->tx.free_vtag0) {
2201 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2202 		if (err)
2203 			goto exit;
2204 	}
2205 
2206 	if (req->tx.free_vtag1)
2207 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2208 
2209 exit:
2210 	mutex_unlock(&vlan->rsrc_lock);
2211 	return err;
2212 }
2213 
2214 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2215 			   struct nix_vtag_config *req,
2216 			   struct nix_vtag_config_rsp *rsp)
2217 {
2218 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2219 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2220 	u16 pcifunc = req->hdr.pcifunc;
2221 
2222 	if (req->tx.cfg_vtag0) {
2223 		rsp->vtag0_idx =
2224 			nix_tx_vtag_alloc(rvu, blkaddr,
2225 					  req->tx.vtag0, req->vtag_size);
2226 
2227 		if (rsp->vtag0_idx < 0)
2228 			return NIX_AF_ERR_TX_VTAG_NOSPC;
2229 
2230 		vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2231 	}
2232 
2233 	if (req->tx.cfg_vtag1) {
2234 		rsp->vtag1_idx =
2235 			nix_tx_vtag_alloc(rvu, blkaddr,
2236 					  req->tx.vtag1, req->vtag_size);
2237 
2238 		if (rsp->vtag1_idx < 0)
2239 			goto err_free;
2240 
2241 		vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2242 	}
2243 
2244 	return 0;
2245 
2246 err_free:
2247 	if (req->tx.cfg_vtag0)
2248 		nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2249 
2250 	return NIX_AF_ERR_TX_VTAG_NOSPC;
2251 }
2252 
2253 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2254 				  struct nix_vtag_config *req,
2255 				  struct nix_vtag_config_rsp *rsp)
2256 {
2257 	u16 pcifunc = req->hdr.pcifunc;
2258 	int blkaddr, nixlf, err;
2259 
2260 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2261 	if (err)
2262 		return err;
2263 
2264 	if (req->cfg_type) {
2265 		/* rx vtag configuration */
2266 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2267 		if (err)
2268 			return NIX_AF_ERR_PARAM;
2269 	} else {
2270 		/* tx vtag configuration */
2271 		if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2272 		    (req->tx.free_vtag0 || req->tx.free_vtag1))
2273 			return NIX_AF_ERR_PARAM;
2274 
2275 		if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2276 			return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2277 
2278 		if (req->tx.free_vtag0 || req->tx.free_vtag1)
2279 			return nix_tx_vtag_decfg(rvu, blkaddr, req);
2280 	}
2281 
2282 	return 0;
2283 }
2284 
2285 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2286 			     int mce, u8 op, u16 pcifunc, int next, bool eol)
2287 {
2288 	struct nix_aq_enq_req aq_req;
2289 	int err;
2290 
2291 	aq_req.hdr.pcifunc = 0;
2292 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
2293 	aq_req.op = op;
2294 	aq_req.qidx = mce;
2295 
2296 	/* Use RSS with RSS index 0 */
2297 	aq_req.mce.op = 1;
2298 	aq_req.mce.index = 0;
2299 	aq_req.mce.eol = eol;
2300 	aq_req.mce.pf_func = pcifunc;
2301 	aq_req.mce.next = next;
2302 
2303 	/* All fields valid */
2304 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
2305 
2306 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2307 	if (err) {
2308 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2309 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2310 		return err;
2311 	}
2312 	return 0;
2313 }
2314 
2315 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2316 				     u16 pcifunc, bool add)
2317 {
2318 	struct mce *mce, *tail = NULL;
2319 	bool delete = false;
2320 
2321 	/* Scan through the current list */
2322 	hlist_for_each_entry(mce, &mce_list->head, node) {
2323 		/* If already exists, then delete */
2324 		if (mce->pcifunc == pcifunc && !add) {
2325 			delete = true;
2326 			break;
2327 		} else if (mce->pcifunc == pcifunc && add) {
2328 			/* entry already exists */
2329 			return 0;
2330 		}
2331 		tail = mce;
2332 	}
2333 
2334 	if (delete) {
2335 		hlist_del(&mce->node);
2336 		kfree(mce);
2337 		mce_list->count--;
2338 		return 0;
2339 	}
2340 
2341 	if (!add)
2342 		return 0;
2343 
2344 	/* Add a new one to the list, at the tail */
2345 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2346 	if (!mce)
2347 		return -ENOMEM;
2348 	mce->pcifunc = pcifunc;
2349 	if (!tail)
2350 		hlist_add_head(&mce->node, &mce_list->head);
2351 	else
2352 		hlist_add_behind(&mce->node, &tail->node);
2353 	mce_list->count++;
2354 	return 0;
2355 }
2356 
2357 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
2358 			struct nix_mce_list *mce_list,
2359 			int mce_idx, int mcam_index, bool add)
2360 {
2361 	int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
2362 	struct npc_mcam *mcam = &rvu->hw->mcam;
2363 	struct nix_mcast *mcast;
2364 	struct nix_hw *nix_hw;
2365 	struct mce *mce;
2366 
2367 	if (!mce_list)
2368 		return -EINVAL;
2369 
2370 	/* Get this PF/VF func's MCE index */
2371 	idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2372 
2373 	if (idx > (mce_idx + mce_list->max)) {
2374 		dev_err(rvu->dev,
2375 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2376 			__func__, idx, mce_list->max,
2377 			pcifunc >> RVU_PFVF_PF_SHIFT);
2378 		return -EINVAL;
2379 	}
2380 
2381 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
2382 	if (err)
2383 		return err;
2384 
2385 	mcast = &nix_hw->mcast;
2386 	mutex_lock(&mcast->mce_lock);
2387 
2388 	err = nix_update_mce_list_entry(mce_list, pcifunc, add);
2389 	if (err)
2390 		goto end;
2391 
2392 	/* Disable MCAM entry in NPC */
2393 	if (!mce_list->count) {
2394 		npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2395 		npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
2396 		goto end;
2397 	}
2398 
2399 	/* Dump the updated list to HW */
2400 	idx = mce_idx;
2401 	last_idx = idx + mce_list->count - 1;
2402 	hlist_for_each_entry(mce, &mce_list->head, node) {
2403 		if (idx > last_idx)
2404 			break;
2405 
2406 		next_idx = idx + 1;
2407 		/* EOL should be set in last MCE */
2408 		err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2409 					mce->pcifunc, next_idx,
2410 					(next_idx > last_idx) ? true : false);
2411 		if (err)
2412 			goto end;
2413 		idx++;
2414 	}
2415 
2416 end:
2417 	mutex_unlock(&mcast->mce_lock);
2418 	return err;
2419 }
2420 
2421 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
2422 		      struct nix_mce_list **mce_list, int *mce_idx)
2423 {
2424 	struct rvu_hwinfo *hw = rvu->hw;
2425 	struct rvu_pfvf *pfvf;
2426 
2427 	if (!hw->cap.nix_rx_multicast ||
2428 	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
2429 		*mce_list = NULL;
2430 		*mce_idx = 0;
2431 		return;
2432 	}
2433 
2434 	/* Get this PF/VF func's MCE index */
2435 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2436 
2437 	if (type == NIXLF_BCAST_ENTRY) {
2438 		*mce_list = &pfvf->bcast_mce_list;
2439 		*mce_idx = pfvf->bcast_mce_idx;
2440 	} else if (type == NIXLF_ALLMULTI_ENTRY) {
2441 		*mce_list = &pfvf->mcast_mce_list;
2442 		*mce_idx = pfvf->mcast_mce_idx;
2443 	} else if (type == NIXLF_PROMISC_ENTRY) {
2444 		*mce_list = &pfvf->promisc_mce_list;
2445 		*mce_idx = pfvf->promisc_mce_idx;
2446 	}  else {
2447 		*mce_list = NULL;
2448 		*mce_idx = 0;
2449 	}
2450 }
2451 
2452 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
2453 			       int type, bool add)
2454 {
2455 	int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
2456 	struct npc_mcam *mcam = &rvu->hw->mcam;
2457 	struct rvu_hwinfo *hw = rvu->hw;
2458 	struct nix_mce_list *mce_list;
2459 
2460 	/* skip multicast pkt replication for AF's VFs */
2461 	if (is_afvf(pcifunc))
2462 		return 0;
2463 
2464 	if (!hw->cap.nix_rx_multicast)
2465 		return 0;
2466 
2467 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2468 	if (blkaddr < 0)
2469 		return -EINVAL;
2470 
2471 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2472 	if (nixlf < 0)
2473 		return -EINVAL;
2474 
2475 	nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
2476 
2477 	mcam_index = npc_get_nixlf_mcam_index(mcam,
2478 					      pcifunc & ~RVU_PFVF_FUNC_MASK,
2479 					      nixlf, type);
2480 	err = nix_update_mce_list(rvu, pcifunc, mce_list,
2481 				  mce_idx, mcam_index, add);
2482 	return err;
2483 }
2484 
2485 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2486 {
2487 	struct nix_mcast *mcast = &nix_hw->mcast;
2488 	int err, pf, numvfs, idx;
2489 	struct rvu_pfvf *pfvf;
2490 	u16 pcifunc;
2491 	u64 cfg;
2492 
2493 	/* Skip PF0 (i.e AF) */
2494 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2495 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2496 		/* If PF is not enabled, nothing to do */
2497 		if (!((cfg >> 20) & 0x01))
2498 			continue;
2499 		/* Get numVFs attached to this PF */
2500 		numvfs = (cfg >> 12) & 0xFF;
2501 
2502 		pfvf = &rvu->pf[pf];
2503 
2504 		/* This NIX0/1 block mapped to PF ? */
2505 		if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2506 			continue;
2507 
2508 		/* save start idx of broadcast mce list */
2509 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2510 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2511 
2512 		/* save start idx of multicast mce list */
2513 		pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2514 		nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
2515 
2516 		/* save the start idx of promisc mce list */
2517 		pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2518 		nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
2519 
2520 		for (idx = 0; idx < (numvfs + 1); idx++) {
2521 			/* idx-0 is for PF, followed by VFs */
2522 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2523 			pcifunc |= idx;
2524 			/* Add dummy entries now, so that we don't have to check
2525 			 * for whether AQ_OP should be INIT/WRITE later on.
2526 			 * Will be updated when a NIXLF is attached/detached to
2527 			 * these PF/VFs.
2528 			 */
2529 			err = nix_blk_setup_mce(rvu, nix_hw,
2530 						pfvf->bcast_mce_idx + idx,
2531 						NIX_AQ_INSTOP_INIT,
2532 						pcifunc, 0, true);
2533 			if (err)
2534 				return err;
2535 
2536 			/* add dummy entries to multicast mce list */
2537 			err = nix_blk_setup_mce(rvu, nix_hw,
2538 						pfvf->mcast_mce_idx + idx,
2539 						NIX_AQ_INSTOP_INIT,
2540 						pcifunc, 0, true);
2541 			if (err)
2542 				return err;
2543 
2544 			/* add dummy entries to promisc mce list */
2545 			err = nix_blk_setup_mce(rvu, nix_hw,
2546 						pfvf->promisc_mce_idx + idx,
2547 						NIX_AQ_INSTOP_INIT,
2548 						pcifunc, 0, true);
2549 			if (err)
2550 				return err;
2551 		}
2552 	}
2553 	return 0;
2554 }
2555 
2556 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2557 {
2558 	struct nix_mcast *mcast = &nix_hw->mcast;
2559 	struct rvu_hwinfo *hw = rvu->hw;
2560 	int err, size;
2561 
2562 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2563 	size = (1ULL << size);
2564 
2565 	/* Alloc memory for multicast/mirror replication entries */
2566 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2567 			 (256UL << MC_TBL_SIZE), size);
2568 	if (err)
2569 		return -ENOMEM;
2570 
2571 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2572 		    (u64)mcast->mce_ctx->iova);
2573 
2574 	/* Set max list length equal to max no of VFs per PF  + PF itself */
2575 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2576 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2577 
2578 	/* Alloc memory for multicast replication buffers */
2579 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2580 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2581 			 (8UL << MC_BUF_CNT), size);
2582 	if (err)
2583 		return -ENOMEM;
2584 
2585 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2586 		    (u64)mcast->mcast_buf->iova);
2587 
2588 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
2589 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2590 
2591 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2592 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
2593 		    BIT_ULL(20) | MC_BUF_CNT);
2594 
2595 	mutex_init(&mcast->mce_lock);
2596 
2597 	return nix_setup_mce_tables(rvu, nix_hw);
2598 }
2599 
2600 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
2601 {
2602 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2603 	int err;
2604 
2605 	/* Allocate resource bimap for tx vtag def registers*/
2606 	vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
2607 	err = rvu_alloc_bitmap(&vlan->rsrc);
2608 	if (err)
2609 		return -ENOMEM;
2610 
2611 	/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
2612 	vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
2613 					    sizeof(u16), GFP_KERNEL);
2614 	if (!vlan->entry2pfvf_map)
2615 		goto free_mem;
2616 
2617 	mutex_init(&vlan->rsrc_lock);
2618 	return 0;
2619 
2620 free_mem:
2621 	kfree(vlan->rsrc.bmap);
2622 	return -ENOMEM;
2623 }
2624 
2625 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2626 {
2627 	struct nix_txsch *txsch;
2628 	int err, lvl, schq;
2629 	u64 cfg, reg;
2630 
2631 	/* Get scheduler queue count of each type and alloc
2632 	 * bitmap for each for alloc/free/attach operations.
2633 	 */
2634 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2635 		txsch = &nix_hw->txsch[lvl];
2636 		txsch->lvl = lvl;
2637 		switch (lvl) {
2638 		case NIX_TXSCH_LVL_SMQ:
2639 			reg = NIX_AF_MDQ_CONST;
2640 			break;
2641 		case NIX_TXSCH_LVL_TL4:
2642 			reg = NIX_AF_TL4_CONST;
2643 			break;
2644 		case NIX_TXSCH_LVL_TL3:
2645 			reg = NIX_AF_TL3_CONST;
2646 			break;
2647 		case NIX_TXSCH_LVL_TL2:
2648 			reg = NIX_AF_TL2_CONST;
2649 			break;
2650 		case NIX_TXSCH_LVL_TL1:
2651 			reg = NIX_AF_TL1_CONST;
2652 			break;
2653 		}
2654 		cfg = rvu_read64(rvu, blkaddr, reg);
2655 		txsch->schq.max = cfg & 0xFFFF;
2656 		err = rvu_alloc_bitmap(&txsch->schq);
2657 		if (err)
2658 			return err;
2659 
2660 		/* Allocate memory for scheduler queues to
2661 		 * PF/VF pcifunc mapping info.
2662 		 */
2663 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2664 					       sizeof(u32), GFP_KERNEL);
2665 		if (!txsch->pfvf_map)
2666 			return -ENOMEM;
2667 		for (schq = 0; schq < txsch->schq.max; schq++)
2668 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2669 	}
2670 	return 0;
2671 }
2672 
2673 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2674 				int blkaddr, u32 cfg)
2675 {
2676 	int fmt_idx;
2677 
2678 	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2679 		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2680 			return fmt_idx;
2681 	}
2682 	if (fmt_idx >= nix_hw->mark_format.total)
2683 		return -ERANGE;
2684 
2685 	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2686 	nix_hw->mark_format.cfg[fmt_idx] = cfg;
2687 	nix_hw->mark_format.in_use++;
2688 	return fmt_idx;
2689 }
2690 
2691 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2692 				    int blkaddr)
2693 {
2694 	u64 cfgs[] = {
2695 		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
2696 		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
2697 		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
2698 		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
2699 		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
2700 		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
2701 		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
2702 		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
2703 		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2704 	};
2705 	int i, rc;
2706 	u64 total;
2707 
2708 	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2709 	nix_hw->mark_format.total = (u8)total;
2710 	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2711 					       GFP_KERNEL);
2712 	if (!nix_hw->mark_format.cfg)
2713 		return -ENOMEM;
2714 	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2715 		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2716 		if (rc < 0)
2717 			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2718 				i, rc);
2719 	}
2720 
2721 	return 0;
2722 }
2723 
2724 static void rvu_get_lbk_link_max_frs(struct rvu *rvu,  u16 *max_mtu)
2725 {
2726 	/* CN10K supports LBK FIFO size 72 KB */
2727 	if (rvu->hw->lbk_bufsize == 0x12000)
2728 		*max_mtu = CN10K_LBK_LINK_MAX_FRS;
2729 	else
2730 		*max_mtu = NIC_HW_MAX_FRS;
2731 }
2732 
2733 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2734 {
2735 	/* RPM supports FIFO len 128 KB */
2736 	if (rvu_cgx_get_fifolen(rvu) == 0x20000)
2737 		*max_mtu = CN10K_LMAC_LINK_MAX_FRS;
2738 	else
2739 		*max_mtu = NIC_HW_MAX_FRS;
2740 }
2741 
2742 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
2743 				     struct nix_hw_info *rsp)
2744 {
2745 	u16 pcifunc = req->hdr.pcifunc;
2746 	int blkaddr;
2747 
2748 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2749 	if (blkaddr < 0)
2750 		return NIX_AF_ERR_AF_LF_INVALID;
2751 
2752 	if (is_afvf(pcifunc))
2753 		rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
2754 	else
2755 		rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
2756 
2757 	rsp->min_mtu = NIC_HW_MIN_FRS;
2758 	return 0;
2759 }
2760 
2761 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2762 				   struct msg_rsp *rsp)
2763 {
2764 	u16 pcifunc = req->hdr.pcifunc;
2765 	int i, nixlf, blkaddr, err;
2766 	u64 stats;
2767 
2768 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2769 	if (err)
2770 		return err;
2771 
2772 	/* Get stats count supported by HW */
2773 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2774 
2775 	/* Reset tx stats */
2776 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2777 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2778 
2779 	/* Reset rx stats */
2780 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2781 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2782 
2783 	return 0;
2784 }
2785 
2786 /* Returns the ALG index to be set into NPC_RX_ACTION */
2787 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2788 {
2789 	int i;
2790 
2791 	/* Scan over exiting algo entries to find a match */
2792 	for (i = 0; i < nix_hw->flowkey.in_use; i++)
2793 		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2794 			return i;
2795 
2796 	return -ERANGE;
2797 }
2798 
2799 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2800 {
2801 	int idx, nr_field, key_off, field_marker, keyoff_marker;
2802 	int max_key_off, max_bit_pos, group_member;
2803 	struct nix_rx_flowkey_alg *field;
2804 	struct nix_rx_flowkey_alg tmp;
2805 	u32 key_type, valid_key;
2806 	int l4_key_offset = 0;
2807 
2808 	if (!alg)
2809 		return -EINVAL;
2810 
2811 #define FIELDS_PER_ALG  5
2812 #define MAX_KEY_OFF	40
2813 	/* Clear all fields */
2814 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2815 
2816 	/* Each of the 32 possible flow key algorithm definitions should
2817 	 * fall into above incremental config (except ALG0). Otherwise a
2818 	 * single NPC MCAM entry is not sufficient for supporting RSS.
2819 	 *
2820 	 * If a different definition or combination needed then NPC MCAM
2821 	 * has to be programmed to filter such pkts and it's action should
2822 	 * point to this definition to calculate flowtag or hash.
2823 	 *
2824 	 * The `for loop` goes over _all_ protocol field and the following
2825 	 * variables depicts the state machine forward progress logic.
2826 	 *
2827 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
2828 	 * in field->key_offset update.
2829 	 * field_marker - Enabled when a new field needs to be selected.
2830 	 * group_member - Enabled when protocol is part of a group.
2831 	 */
2832 
2833 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
2834 	nr_field = 0; key_off = 0; field_marker = 1;
2835 	field = &tmp; max_bit_pos = fls(flow_cfg);
2836 	for (idx = 0;
2837 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2838 	     key_off < MAX_KEY_OFF; idx++) {
2839 		key_type = BIT(idx);
2840 		valid_key = flow_cfg & key_type;
2841 		/* Found a field marker, reset the field values */
2842 		if (field_marker)
2843 			memset(&tmp, 0, sizeof(tmp));
2844 
2845 		field_marker = true;
2846 		keyoff_marker = true;
2847 		switch (key_type) {
2848 		case NIX_FLOW_KEY_TYPE_PORT:
2849 			field->sel_chan = true;
2850 			/* This should be set to 1, when SEL_CHAN is set */
2851 			field->bytesm1 = 1;
2852 			break;
2853 		case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
2854 			field->lid = NPC_LID_LC;
2855 			field->hdr_offset = 9; /* offset */
2856 			field->bytesm1 = 0; /* 1 byte */
2857 			field->ltype_match = NPC_LT_LC_IP;
2858 			field->ltype_mask = 0xF;
2859 			break;
2860 		case NIX_FLOW_KEY_TYPE_IPV4:
2861 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2862 			field->lid = NPC_LID_LC;
2863 			field->ltype_match = NPC_LT_LC_IP;
2864 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2865 				field->lid = NPC_LID_LG;
2866 				field->ltype_match = NPC_LT_LG_TU_IP;
2867 			}
2868 			field->hdr_offset = 12; /* SIP offset */
2869 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2870 			field->ltype_mask = 0xF; /* Match only IPv4 */
2871 			keyoff_marker = false;
2872 			break;
2873 		case NIX_FLOW_KEY_TYPE_IPV6:
2874 		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2875 			field->lid = NPC_LID_LC;
2876 			field->ltype_match = NPC_LT_LC_IP6;
2877 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2878 				field->lid = NPC_LID_LG;
2879 				field->ltype_match = NPC_LT_LG_TU_IP6;
2880 			}
2881 			field->hdr_offset = 8; /* SIP offset */
2882 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2883 			field->ltype_mask = 0xF; /* Match only IPv6 */
2884 			break;
2885 		case NIX_FLOW_KEY_TYPE_TCP:
2886 		case NIX_FLOW_KEY_TYPE_UDP:
2887 		case NIX_FLOW_KEY_TYPE_SCTP:
2888 		case NIX_FLOW_KEY_TYPE_INNR_TCP:
2889 		case NIX_FLOW_KEY_TYPE_INNR_UDP:
2890 		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2891 			field->lid = NPC_LID_LD;
2892 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2893 			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2894 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2895 				field->lid = NPC_LID_LH;
2896 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2897 
2898 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2899 			 * so no need to change the ltype_match, just change
2900 			 * the lid for inner protocols
2901 			 */
2902 			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2903 				     (int)NPC_LT_LH_TU_TCP);
2904 			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2905 				     (int)NPC_LT_LH_TU_UDP);
2906 			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2907 				     (int)NPC_LT_LH_TU_SCTP);
2908 
2909 			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2910 			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2911 			    valid_key) {
2912 				field->ltype_match |= NPC_LT_LD_TCP;
2913 				group_member = true;
2914 			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2915 				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2916 				   valid_key) {
2917 				field->ltype_match |= NPC_LT_LD_UDP;
2918 				group_member = true;
2919 			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2920 				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2921 				   valid_key) {
2922 				field->ltype_match |= NPC_LT_LD_SCTP;
2923 				group_member = true;
2924 			}
2925 			field->ltype_mask = ~field->ltype_match;
2926 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2927 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2928 				/* Handle the case where any of the group item
2929 				 * is enabled in the group but not the final one
2930 				 */
2931 				if (group_member) {
2932 					valid_key = true;
2933 					group_member = false;
2934 				}
2935 			} else {
2936 				field_marker = false;
2937 				keyoff_marker = false;
2938 			}
2939 
2940 			/* TCP/UDP/SCTP and ESP/AH falls at same offset so
2941 			 * remember the TCP key offset of 40 byte hash key.
2942 			 */
2943 			if (key_type == NIX_FLOW_KEY_TYPE_TCP)
2944 				l4_key_offset = key_off;
2945 			break;
2946 		case NIX_FLOW_KEY_TYPE_NVGRE:
2947 			field->lid = NPC_LID_LD;
2948 			field->hdr_offset = 4; /* VSID offset */
2949 			field->bytesm1 = 2;
2950 			field->ltype_match = NPC_LT_LD_NVGRE;
2951 			field->ltype_mask = 0xF;
2952 			break;
2953 		case NIX_FLOW_KEY_TYPE_VXLAN:
2954 		case NIX_FLOW_KEY_TYPE_GENEVE:
2955 			field->lid = NPC_LID_LE;
2956 			field->bytesm1 = 2;
2957 			field->hdr_offset = 4;
2958 			field->ltype_mask = 0xF;
2959 			field_marker = false;
2960 			keyoff_marker = false;
2961 
2962 			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2963 				field->ltype_match |= NPC_LT_LE_VXLAN;
2964 				group_member = true;
2965 			}
2966 
2967 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2968 				field->ltype_match |= NPC_LT_LE_GENEVE;
2969 				group_member = true;
2970 			}
2971 
2972 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2973 				if (group_member) {
2974 					field->ltype_mask = ~field->ltype_match;
2975 					field_marker = true;
2976 					keyoff_marker = true;
2977 					valid_key = true;
2978 					group_member = false;
2979 				}
2980 			}
2981 			break;
2982 		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2983 		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2984 			field->lid = NPC_LID_LA;
2985 			field->ltype_match = NPC_LT_LA_ETHER;
2986 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2987 				field->lid = NPC_LID_LF;
2988 				field->ltype_match = NPC_LT_LF_TU_ETHER;
2989 			}
2990 			field->hdr_offset = 0;
2991 			field->bytesm1 = 5; /* DMAC 6 Byte */
2992 			field->ltype_mask = 0xF;
2993 			break;
2994 		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2995 			field->lid = NPC_LID_LC;
2996 			field->hdr_offset = 40; /* IPV6 hdr */
2997 			field->bytesm1 = 0; /* 1 Byte ext hdr*/
2998 			field->ltype_match = NPC_LT_LC_IP6_EXT;
2999 			field->ltype_mask = 0xF;
3000 			break;
3001 		case NIX_FLOW_KEY_TYPE_GTPU:
3002 			field->lid = NPC_LID_LE;
3003 			field->hdr_offset = 4;
3004 			field->bytesm1 = 3; /* 4 bytes TID*/
3005 			field->ltype_match = NPC_LT_LE_GTPU;
3006 			field->ltype_mask = 0xF;
3007 			break;
3008 		case NIX_FLOW_KEY_TYPE_VLAN:
3009 			field->lid = NPC_LID_LB;
3010 			field->hdr_offset = 2; /* Skip TPID (2-bytes) */
3011 			field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
3012 			field->ltype_match = NPC_LT_LB_CTAG;
3013 			field->ltype_mask = 0xF;
3014 			field->fn_mask = 1; /* Mask out the first nibble */
3015 			break;
3016 		case NIX_FLOW_KEY_TYPE_AH:
3017 		case NIX_FLOW_KEY_TYPE_ESP:
3018 			field->hdr_offset = 0;
3019 			field->bytesm1 = 7; /* SPI + sequence number */
3020 			field->ltype_mask = 0xF;
3021 			field->lid = NPC_LID_LE;
3022 			field->ltype_match = NPC_LT_LE_ESP;
3023 			if (key_type == NIX_FLOW_KEY_TYPE_AH) {
3024 				field->lid = NPC_LID_LD;
3025 				field->ltype_match = NPC_LT_LD_AH;
3026 				field->hdr_offset = 4;
3027 				keyoff_marker = false;
3028 			}
3029 			break;
3030 		}
3031 		field->ena = 1;
3032 
3033 		/* Found a valid flow key type */
3034 		if (valid_key) {
3035 			/* Use the key offset of TCP/UDP/SCTP fields
3036 			 * for ESP/AH fields.
3037 			 */
3038 			if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
3039 			    key_type == NIX_FLOW_KEY_TYPE_AH)
3040 				key_off = l4_key_offset;
3041 			field->key_offset = key_off;
3042 			memcpy(&alg[nr_field], field, sizeof(*field));
3043 			max_key_off = max(max_key_off, field->bytesm1 + 1);
3044 
3045 			/* Found a field marker, get the next field */
3046 			if (field_marker)
3047 				nr_field++;
3048 		}
3049 
3050 		/* Found a keyoff marker, update the new key_off */
3051 		if (keyoff_marker) {
3052 			key_off += max_key_off;
3053 			max_key_off = 0;
3054 		}
3055 	}
3056 	/* Processed all the flow key types */
3057 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
3058 		return 0;
3059 	else
3060 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
3061 }
3062 
3063 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
3064 {
3065 	u64 field[FIELDS_PER_ALG];
3066 	struct nix_hw *hw;
3067 	int fid, rc;
3068 
3069 	hw = get_nix_hw(rvu->hw, blkaddr);
3070 	if (!hw)
3071 		return -EINVAL;
3072 
3073 	/* No room to add new flow hash algoritham */
3074 	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3075 		return NIX_AF_ERR_RSS_NOSPC_ALGO;
3076 
3077 	/* Generate algo fields for the given flow_cfg */
3078 	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3079 	if (rc)
3080 		return rc;
3081 
3082 	/* Update ALGX_FIELDX register with generated fields */
3083 	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3084 		rvu_write64(rvu, blkaddr,
3085 			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3086 							   fid), field[fid]);
3087 
3088 	/* Store the flow_cfg for futher lookup */
3089 	rc = hw->flowkey.in_use;
3090 	hw->flowkey.flowkey[rc] = flow_cfg;
3091 	hw->flowkey.in_use++;
3092 
3093 	return rc;
3094 }
3095 
3096 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3097 					 struct nix_rss_flowkey_cfg *req,
3098 					 struct nix_rss_flowkey_cfg_rsp *rsp)
3099 {
3100 	u16 pcifunc = req->hdr.pcifunc;
3101 	int alg_idx, nixlf, blkaddr;
3102 	struct nix_hw *nix_hw;
3103 	int err;
3104 
3105 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3106 	if (err)
3107 		return err;
3108 
3109 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3110 	if (!nix_hw)
3111 		return -EINVAL;
3112 
3113 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3114 	/* Failed to get algo index from the exiting list, reserve new  */
3115 	if (alg_idx < 0) {
3116 		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3117 						  req->flowkey_cfg);
3118 		if (alg_idx < 0)
3119 			return alg_idx;
3120 	}
3121 	rsp->alg_idx = alg_idx;
3122 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3123 				       alg_idx, req->mcam_index);
3124 	return 0;
3125 }
3126 
3127 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3128 {
3129 	u32 flowkey_cfg, minkey_cfg;
3130 	int alg, fid, rc;
3131 
3132 	/* Disable all flow key algx fieldx */
3133 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3134 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3135 			rvu_write64(rvu, blkaddr,
3136 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3137 				    0);
3138 	}
3139 
3140 	/* IPv4/IPv6 SIP/DIPs */
3141 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3142 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3143 	if (rc < 0)
3144 		return rc;
3145 
3146 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3147 	minkey_cfg = flowkey_cfg;
3148 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3149 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3150 	if (rc < 0)
3151 		return rc;
3152 
3153 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3154 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3155 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3156 	if (rc < 0)
3157 		return rc;
3158 
3159 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3160 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3161 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3162 	if (rc < 0)
3163 		return rc;
3164 
3165 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3166 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3167 			NIX_FLOW_KEY_TYPE_UDP;
3168 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3169 	if (rc < 0)
3170 		return rc;
3171 
3172 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3173 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3174 			NIX_FLOW_KEY_TYPE_SCTP;
3175 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3176 	if (rc < 0)
3177 		return rc;
3178 
3179 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3180 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3181 			NIX_FLOW_KEY_TYPE_SCTP;
3182 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3183 	if (rc < 0)
3184 		return rc;
3185 
3186 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3187 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3188 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3189 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3190 	if (rc < 0)
3191 		return rc;
3192 
3193 	return 0;
3194 }
3195 
3196 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3197 				      struct nix_set_mac_addr *req,
3198 				      struct msg_rsp *rsp)
3199 {
3200 	bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3201 	u16 pcifunc = req->hdr.pcifunc;
3202 	int blkaddr, nixlf, err;
3203 	struct rvu_pfvf *pfvf;
3204 
3205 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3206 	if (err)
3207 		return err;
3208 
3209 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3210 
3211 	/* untrusted VF can't overwrite admin(PF) changes */
3212 	if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3213 	    (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3214 		dev_warn(rvu->dev,
3215 			 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3216 		return -EPERM;
3217 	}
3218 
3219 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3220 
3221 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3222 				    pfvf->rx_chan_base, req->mac_addr);
3223 
3224 	if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
3225 		ether_addr_copy(pfvf->default_mac, req->mac_addr);
3226 
3227 	rvu_switch_update_rules(rvu, pcifunc);
3228 
3229 	return 0;
3230 }
3231 
3232 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3233 				      struct msg_req *req,
3234 				      struct nix_get_mac_addr_rsp *rsp)
3235 {
3236 	u16 pcifunc = req->hdr.pcifunc;
3237 	struct rvu_pfvf *pfvf;
3238 
3239 	if (!is_nixlf_attached(rvu, pcifunc))
3240 		return NIX_AF_ERR_AF_LF_INVALID;
3241 
3242 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3243 
3244 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
3245 
3246 	return 0;
3247 }
3248 
3249 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
3250 				     struct msg_rsp *rsp)
3251 {
3252 	bool allmulti, promisc, nix_rx_multicast;
3253 	u16 pcifunc = req->hdr.pcifunc;
3254 	struct rvu_pfvf *pfvf;
3255 	int nixlf, err;
3256 
3257 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3258 	promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
3259 	allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
3260 	pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
3261 
3262 	nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
3263 
3264 	if (is_vf(pcifunc) && !nix_rx_multicast &&
3265 	    (promisc || allmulti)) {
3266 		dev_warn_ratelimited(rvu->dev,
3267 				     "VF promisc/multicast not supported\n");
3268 		return 0;
3269 	}
3270 
3271 	/* untrusted VF can't configure promisc/allmulti */
3272 	if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3273 	    (promisc || allmulti))
3274 		return 0;
3275 
3276 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3277 	if (err)
3278 		return err;
3279 
3280 	if (nix_rx_multicast) {
3281 		/* add/del this PF_FUNC to/from mcast pkt replication list */
3282 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
3283 					  allmulti);
3284 		if (err) {
3285 			dev_err(rvu->dev,
3286 				"Failed to update pcifunc 0x%x to multicast list\n",
3287 				pcifunc);
3288 			return err;
3289 		}
3290 
3291 		/* add/del this PF_FUNC to/from promisc pkt replication list */
3292 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
3293 					  promisc);
3294 		if (err) {
3295 			dev_err(rvu->dev,
3296 				"Failed to update pcifunc 0x%x to promisc list\n",
3297 				pcifunc);
3298 			return err;
3299 		}
3300 	}
3301 
3302 	/* install/uninstall allmulti entry */
3303 	if (allmulti) {
3304 		rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
3305 					       pfvf->rx_chan_base);
3306 	} else {
3307 		if (!nix_rx_multicast)
3308 			rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
3309 	}
3310 
3311 	/* install/uninstall promisc entry */
3312 	if (promisc) {
3313 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
3314 					      pfvf->rx_chan_base,
3315 					      pfvf->rx_chan_cnt);
3316 	} else {
3317 		if (!nix_rx_multicast)
3318 			rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
3319 	}
3320 
3321 	return 0;
3322 }
3323 
3324 static void nix_find_link_frs(struct rvu *rvu,
3325 			      struct nix_frs_cfg *req, u16 pcifunc)
3326 {
3327 	int pf = rvu_get_pf(pcifunc);
3328 	struct rvu_pfvf *pfvf;
3329 	int maxlen, minlen;
3330 	int numvfs, hwvf;
3331 	int vf;
3332 
3333 	/* Update with requester's min/max lengths */
3334 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3335 	pfvf->maxlen = req->maxlen;
3336 	if (req->update_minlen)
3337 		pfvf->minlen = req->minlen;
3338 
3339 	maxlen = req->maxlen;
3340 	minlen = req->update_minlen ? req->minlen : 0;
3341 
3342 	/* Get this PF's numVFs and starting hwvf */
3343 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
3344 
3345 	/* For each VF, compare requested max/minlen */
3346 	for (vf = 0; vf < numvfs; vf++) {
3347 		pfvf =  &rvu->hwvf[hwvf + vf];
3348 		if (pfvf->maxlen > maxlen)
3349 			maxlen = pfvf->maxlen;
3350 		if (req->update_minlen &&
3351 		    pfvf->minlen && pfvf->minlen < minlen)
3352 			minlen = pfvf->minlen;
3353 	}
3354 
3355 	/* Compare requested max/minlen with PF's max/minlen */
3356 	pfvf = &rvu->pf[pf];
3357 	if (pfvf->maxlen > maxlen)
3358 		maxlen = pfvf->maxlen;
3359 	if (req->update_minlen &&
3360 	    pfvf->minlen && pfvf->minlen < minlen)
3361 		minlen = pfvf->minlen;
3362 
3363 	/* Update the request with max/min PF's and it's VF's max/min */
3364 	req->maxlen = maxlen;
3365 	if (req->update_minlen)
3366 		req->minlen = minlen;
3367 }
3368 
3369 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
3370 				    struct msg_rsp *rsp)
3371 {
3372 	struct rvu_hwinfo *hw = rvu->hw;
3373 	u16 pcifunc = req->hdr.pcifunc;
3374 	int pf = rvu_get_pf(pcifunc);
3375 	int blkaddr, schq, link = -1;
3376 	struct nix_txsch *txsch;
3377 	u64 cfg, lmac_fifo_len;
3378 	struct nix_hw *nix_hw;
3379 	u8 cgx = 0, lmac = 0;
3380 	u16 max_mtu;
3381 
3382 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3383 	if (blkaddr < 0)
3384 		return NIX_AF_ERR_AF_LF_INVALID;
3385 
3386 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3387 	if (!nix_hw)
3388 		return -EINVAL;
3389 
3390 	if (is_afvf(pcifunc))
3391 		rvu_get_lbk_link_max_frs(rvu, &max_mtu);
3392 	else
3393 		rvu_get_lmac_link_max_frs(rvu, &max_mtu);
3394 
3395 	if (!req->sdp_link && req->maxlen > max_mtu)
3396 		return NIX_AF_ERR_FRS_INVALID;
3397 
3398 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
3399 		return NIX_AF_ERR_FRS_INVALID;
3400 
3401 	/* Check if requester wants to update SMQ's */
3402 	if (!req->update_smq)
3403 		goto rx_frscfg;
3404 
3405 	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
3406 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
3407 	mutex_lock(&rvu->rsrc_lock);
3408 	for (schq = 0; schq < txsch->schq.max; schq++) {
3409 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
3410 			continue;
3411 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
3412 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
3413 		if (req->update_minlen)
3414 			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
3415 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
3416 	}
3417 	mutex_unlock(&rvu->rsrc_lock);
3418 
3419 rx_frscfg:
3420 	/* Check if config is for SDP link */
3421 	if (req->sdp_link) {
3422 		if (!hw->sdp_links)
3423 			return NIX_AF_ERR_RX_LINK_INVALID;
3424 		link = hw->cgx_links + hw->lbk_links;
3425 		goto linkcfg;
3426 	}
3427 
3428 	/* Check if the request is from CGX mapped RVU PF */
3429 	if (is_pf_cgxmapped(rvu, pf)) {
3430 		/* Get CGX and LMAC to which this PF is mapped and find link */
3431 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
3432 		link = (cgx * hw->lmac_per_cgx) + lmac;
3433 	} else if (pf == 0) {
3434 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
3435 		link = hw->cgx_links;
3436 	}
3437 
3438 	if (link < 0)
3439 		return NIX_AF_ERR_RX_LINK_INVALID;
3440 
3441 	nix_find_link_frs(rvu, req, pcifunc);
3442 
3443 linkcfg:
3444 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
3445 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
3446 	if (req->update_minlen)
3447 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
3448 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
3449 
3450 	if (req->sdp_link || pf == 0)
3451 		return 0;
3452 
3453 	/* Update transmit credits for CGX links */
3454 	lmac_fifo_len =
3455 		rvu_cgx_get_fifolen(rvu) /
3456 		cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3457 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
3458 	cfg &= ~(0xFFFFFULL << 12);
3459 	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
3460 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
3461 	return 0;
3462 }
3463 
3464 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
3465 				    struct msg_rsp *rsp)
3466 {
3467 	int nixlf, blkaddr, err;
3468 	u64 cfg;
3469 
3470 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
3471 	if (err)
3472 		return err;
3473 
3474 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
3475 	/* Set the interface configuration */
3476 	if (req->len_verify & BIT(0))
3477 		cfg |= BIT_ULL(41);
3478 	else
3479 		cfg &= ~BIT_ULL(41);
3480 
3481 	if (req->len_verify & BIT(1))
3482 		cfg |= BIT_ULL(40);
3483 	else
3484 		cfg &= ~BIT_ULL(40);
3485 
3486 	if (req->csum_verify & BIT(0))
3487 		cfg |= BIT_ULL(37);
3488 	else
3489 		cfg &= ~BIT_ULL(37);
3490 
3491 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
3492 
3493 	return 0;
3494 }
3495 
3496 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
3497 {
3498 	/* CN10k supports 72KB FIFO size and max packet size of 64k */
3499 	if (rvu->hw->lbk_bufsize == 0x12000)
3500 		return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
3501 
3502 	return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
3503 }
3504 
3505 static void nix_link_config(struct rvu *rvu, int blkaddr)
3506 {
3507 	struct rvu_hwinfo *hw = rvu->hw;
3508 	int cgx, lmac_cnt, slink, link;
3509 	u16 lbk_max_frs, lmac_max_frs;
3510 	u64 tx_credits;
3511 
3512 	rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
3513 	rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
3514 
3515 	/* Set default min/max packet lengths allowed on NIX Rx links.
3516 	 *
3517 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3518 	 * as undersize and report them to SW as error pkts, hence
3519 	 * setting it to 40 bytes.
3520 	 */
3521 	for (link = 0; link < hw->cgx_links; link++) {
3522 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3523 				((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
3524 	}
3525 
3526 	for (link = hw->cgx_links; link < hw->lbk_links; link++) {
3527 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3528 			    ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
3529 	}
3530 	if (hw->sdp_links) {
3531 		link = hw->cgx_links + hw->lbk_links;
3532 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3533 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3534 	}
3535 
3536 	/* Set credits for Tx links assuming max packet length allowed.
3537 	 * This will be reconfigured based on MTU set for PF/VF.
3538 	 */
3539 	for (cgx = 0; cgx < hw->cgx; cgx++) {
3540 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3541 		tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
3542 			       lmac_max_frs) / 16;
3543 		/* Enable credits and set credit pkt count to max allowed */
3544 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3545 		slink = cgx * hw->lmac_per_cgx;
3546 		for (link = slink; link < (slink + lmac_cnt); link++) {
3547 			rvu_write64(rvu, blkaddr,
3548 				    NIX_AF_TX_LINKX_NORM_CREDIT(link),
3549 				    tx_credits);
3550 		}
3551 	}
3552 
3553 	/* Set Tx credits for LBK link */
3554 	slink = hw->cgx_links;
3555 	for (link = slink; link < (slink + hw->lbk_links); link++) {
3556 		tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
3557 		/* Enable credits and set credit pkt count to max allowed */
3558 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3559 		rvu_write64(rvu, blkaddr,
3560 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3561 	}
3562 }
3563 
3564 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3565 {
3566 	int idx, err;
3567 	u64 status;
3568 
3569 	/* Start X2P bus calibration */
3570 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3571 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3572 	/* Wait for calibration to complete */
3573 	err = rvu_poll_reg(rvu, blkaddr,
3574 			   NIX_AF_STATUS, BIT_ULL(10), false);
3575 	if (err) {
3576 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3577 		return err;
3578 	}
3579 
3580 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3581 	/* Check if CGX devices are ready */
3582 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3583 		/* Skip when cgx port is not available */
3584 		if (!rvu_cgx_pdata(idx, rvu) ||
3585 		    (status & (BIT_ULL(16 + idx))))
3586 			continue;
3587 		dev_err(rvu->dev,
3588 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
3589 		err = -EBUSY;
3590 	}
3591 
3592 	/* Check if LBK is ready */
3593 	if (!(status & BIT_ULL(19))) {
3594 		dev_err(rvu->dev,
3595 			"LBK didn't respond to NIX X2P calibration\n");
3596 		err = -EBUSY;
3597 	}
3598 
3599 	/* Clear 'calibrate_x2p' bit */
3600 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3601 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3602 	if (err || (status & 0x3FFULL))
3603 		dev_err(rvu->dev,
3604 			"NIX X2P calibration failed, status 0x%llx\n", status);
3605 	if (err)
3606 		return err;
3607 	return 0;
3608 }
3609 
3610 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3611 {
3612 	u64 cfg;
3613 	int err;
3614 
3615 	/* Set admin queue endianness */
3616 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3617 #ifdef __BIG_ENDIAN
3618 	cfg |= BIT_ULL(8);
3619 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3620 #else
3621 	cfg &= ~BIT_ULL(8);
3622 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3623 #endif
3624 
3625 	/* Do not bypass NDC cache */
3626 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3627 	cfg &= ~0x3FFEULL;
3628 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3629 	/* Disable caching of SQB aka SQEs */
3630 	cfg |= 0x04ULL;
3631 #endif
3632 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3633 
3634 	/* Result structure can be followed by RQ/SQ/CQ context at
3635 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3636 	 * operation type. Alloc sufficient result memory for all operations.
3637 	 */
3638 	err = rvu_aq_alloc(rvu, &block->aq,
3639 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3640 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3641 	if (err)
3642 		return err;
3643 
3644 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3645 	rvu_write64(rvu, block->addr,
3646 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3647 	return 0;
3648 }
3649 
3650 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
3651 {
3652 	const struct npc_lt_def_cfg *ltdefs;
3653 	struct rvu_hwinfo *hw = rvu->hw;
3654 	int blkaddr = nix_hw->blkaddr;
3655 	struct rvu_block *block;
3656 	int err;
3657 	u64 cfg;
3658 
3659 	block = &hw->block[blkaddr];
3660 
3661 	if (is_rvu_96xx_B0(rvu)) {
3662 		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3663 		 * internal state when conditional clocks are turned off.
3664 		 * Hence enable them.
3665 		 */
3666 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3667 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3668 
3669 		/* Set chan/link to backpressure TL3 instead of TL2 */
3670 		rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3671 
3672 		/* Disable SQ manager's sticky mode operation (set TM6 = 0)
3673 		 * This sticky mode is known to cause SQ stalls when multiple
3674 		 * SQs are mapped to same SMQ and transmitting pkts at a time.
3675 		 */
3676 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3677 		cfg &= ~BIT_ULL(15);
3678 		rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3679 	}
3680 
3681 	ltdefs = rvu->kpu.lt_def;
3682 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
3683 	err = nix_calibrate_x2p(rvu, blkaddr);
3684 	if (err)
3685 		return err;
3686 
3687 	/* Initialize admin queue */
3688 	err = nix_aq_init(rvu, block);
3689 	if (err)
3690 		return err;
3691 
3692 	/* Restore CINT timer delay to HW reset values */
3693 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3694 
3695 	if (is_block_implemented(hw, blkaddr)) {
3696 		err = nix_setup_txschq(rvu, nix_hw, blkaddr);
3697 		if (err)
3698 			return err;
3699 
3700 		err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
3701 		if (err)
3702 			return err;
3703 
3704 		err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
3705 		if (err)
3706 			return err;
3707 
3708 		err = nix_setup_mcast(rvu, nix_hw, blkaddr);
3709 		if (err)
3710 			return err;
3711 
3712 		err = nix_setup_txvlan(rvu, nix_hw);
3713 		if (err)
3714 			return err;
3715 
3716 		/* Configure segmentation offload formats */
3717 		nix_setup_lso(rvu, nix_hw, blkaddr);
3718 
3719 		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3720 		 * This helps HW protocol checker to identify headers
3721 		 * and validate length and checksums.
3722 		 */
3723 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3724 			    (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3725 			    ltdefs->rx_ol2.ltype_mask);
3726 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3727 			    (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3728 			    ltdefs->rx_oip4.ltype_mask);
3729 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3730 			    (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3731 			    ltdefs->rx_iip4.ltype_mask);
3732 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3733 			    (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3734 			    ltdefs->rx_oip6.ltype_mask);
3735 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3736 			    (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3737 			    ltdefs->rx_iip6.ltype_mask);
3738 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3739 			    (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3740 			    ltdefs->rx_otcp.ltype_mask);
3741 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3742 			    (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3743 			    ltdefs->rx_itcp.ltype_mask);
3744 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3745 			    (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3746 			    ltdefs->rx_oudp.ltype_mask);
3747 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3748 			    (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3749 			    ltdefs->rx_iudp.ltype_mask);
3750 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3751 			    (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3752 			    ltdefs->rx_osctp.ltype_mask);
3753 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3754 			    (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3755 			    ltdefs->rx_isctp.ltype_mask);
3756 
3757 		if (!is_rvu_otx2(rvu)) {
3758 			/* Enable APAD calculation for other protocols
3759 			 * matching APAD0 and APAD1 lt def registers.
3760 			 */
3761 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
3762 				    (ltdefs->rx_apad0.valid << 11) |
3763 				    (ltdefs->rx_apad0.lid << 8) |
3764 				    (ltdefs->rx_apad0.ltype_match << 4) |
3765 				    ltdefs->rx_apad0.ltype_mask);
3766 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
3767 				    (ltdefs->rx_apad1.valid << 11) |
3768 				    (ltdefs->rx_apad1.lid << 8) |
3769 				    (ltdefs->rx_apad1.ltype_match << 4) |
3770 				    ltdefs->rx_apad1.ltype_mask);
3771 
3772 			/* Receive ethertype defination register defines layer
3773 			 * information in NPC_RESULT_S to identify the Ethertype
3774 			 * location in L2 header. Used for Ethertype overwriting
3775 			 * in inline IPsec flow.
3776 			 */
3777 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
3778 				    (ltdefs->rx_et[0].offset << 12) |
3779 				    (ltdefs->rx_et[0].valid << 11) |
3780 				    (ltdefs->rx_et[0].lid << 8) |
3781 				    (ltdefs->rx_et[0].ltype_match << 4) |
3782 				    ltdefs->rx_et[0].ltype_mask);
3783 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
3784 				    (ltdefs->rx_et[1].offset << 12) |
3785 				    (ltdefs->rx_et[1].valid << 11) |
3786 				    (ltdefs->rx_et[1].lid << 8) |
3787 				    (ltdefs->rx_et[1].ltype_match << 4) |
3788 				    ltdefs->rx_et[1].ltype_mask);
3789 		}
3790 
3791 		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3792 		if (err)
3793 			return err;
3794 
3795 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3796 		nix_link_config(rvu, blkaddr);
3797 
3798 		/* Enable Channel backpressure */
3799 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3800 	}
3801 	return 0;
3802 }
3803 
3804 int rvu_nix_init(struct rvu *rvu)
3805 {
3806 	struct rvu_hwinfo *hw = rvu->hw;
3807 	struct nix_hw *nix_hw;
3808 	int blkaddr = 0, err;
3809 	int i = 0;
3810 
3811 	hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
3812 			       GFP_KERNEL);
3813 	if (!hw->nix)
3814 		return -ENOMEM;
3815 
3816 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3817 	while (blkaddr) {
3818 		nix_hw = &hw->nix[i];
3819 		nix_hw->rvu = rvu;
3820 		nix_hw->blkaddr = blkaddr;
3821 		err = rvu_nix_block_init(rvu, nix_hw);
3822 		if (err)
3823 			return err;
3824 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3825 		i++;
3826 	}
3827 
3828 	return 0;
3829 }
3830 
3831 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
3832 				  struct rvu_block *block)
3833 {
3834 	struct nix_txsch *txsch;
3835 	struct nix_mcast *mcast;
3836 	struct nix_txvlan *vlan;
3837 	struct nix_hw *nix_hw;
3838 	int lvl;
3839 
3840 	rvu_aq_free(rvu, block->aq);
3841 
3842 	if (is_block_implemented(rvu->hw, blkaddr)) {
3843 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
3844 		if (!nix_hw)
3845 			return;
3846 
3847 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3848 			txsch = &nix_hw->txsch[lvl];
3849 			kfree(txsch->schq.bmap);
3850 		}
3851 
3852 		nix_ipolicer_freemem(nix_hw);
3853 
3854 		vlan = &nix_hw->txvlan;
3855 		kfree(vlan->rsrc.bmap);
3856 		mutex_destroy(&vlan->rsrc_lock);
3857 
3858 		mcast = &nix_hw->mcast;
3859 		qmem_free(rvu->dev, mcast->mce_ctx);
3860 		qmem_free(rvu->dev, mcast->mcast_buf);
3861 		mutex_destroy(&mcast->mce_lock);
3862 	}
3863 }
3864 
3865 void rvu_nix_freemem(struct rvu *rvu)
3866 {
3867 	struct rvu_hwinfo *hw = rvu->hw;
3868 	struct rvu_block *block;
3869 	int blkaddr = 0;
3870 
3871 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3872 	while (blkaddr) {
3873 		block = &hw->block[blkaddr];
3874 		rvu_nix_block_freemem(rvu, blkaddr, block);
3875 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3876 	}
3877 }
3878 
3879 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3880 				     struct msg_rsp *rsp)
3881 {
3882 	u16 pcifunc = req->hdr.pcifunc;
3883 	struct rvu_pfvf *pfvf;
3884 	int nixlf, err;
3885 
3886 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3887 	if (err)
3888 		return err;
3889 
3890 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3891 
3892 	npc_mcam_enable_flows(rvu, pcifunc);
3893 
3894 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3895 	set_bit(NIXLF_INITIALIZED, &pfvf->flags);
3896 
3897 	rvu_switch_update_rules(rvu, pcifunc);
3898 
3899 	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3900 }
3901 
3902 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3903 				    struct msg_rsp *rsp)
3904 {
3905 	u16 pcifunc = req->hdr.pcifunc;
3906 	struct rvu_pfvf *pfvf;
3907 	int nixlf, err;
3908 
3909 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3910 	if (err)
3911 		return err;
3912 
3913 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3914 
3915 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3916 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3917 
3918 	return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3919 }
3920 
3921 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3922 {
3923 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3924 	struct hwctx_disable_req ctx_req;
3925 	int err;
3926 
3927 	ctx_req.hdr.pcifunc = pcifunc;
3928 
3929 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3930 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3931 	rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
3932 	nix_interface_deinit(rvu, pcifunc, nixlf);
3933 	nix_rx_sync(rvu, blkaddr);
3934 	nix_txschq_free(rvu, pcifunc);
3935 
3936 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3937 
3938 	rvu_cgx_start_stop_io(rvu, pcifunc, false);
3939 
3940 	if (pfvf->sq_ctx) {
3941 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3942 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3943 		if (err)
3944 			dev_err(rvu->dev, "SQ ctx disable failed\n");
3945 	}
3946 
3947 	if (pfvf->rq_ctx) {
3948 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3949 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3950 		if (err)
3951 			dev_err(rvu->dev, "RQ ctx disable failed\n");
3952 	}
3953 
3954 	if (pfvf->cq_ctx) {
3955 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3956 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3957 		if (err)
3958 			dev_err(rvu->dev, "CQ ctx disable failed\n");
3959 	}
3960 
3961 	nix_ctx_free(rvu, pfvf);
3962 
3963 	nix_free_all_bandprof(rvu, pcifunc);
3964 }
3965 
3966 #define NIX_AF_LFX_TX_CFG_PTP_EN	BIT_ULL(32)
3967 
3968 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3969 {
3970 	struct rvu_hwinfo *hw = rvu->hw;
3971 	struct rvu_block *block;
3972 	int blkaddr, pf;
3973 	int nixlf;
3974 	u64 cfg;
3975 
3976 	pf = rvu_get_pf(pcifunc);
3977 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
3978 		return 0;
3979 
3980 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3981 	if (blkaddr < 0)
3982 		return NIX_AF_ERR_AF_LF_INVALID;
3983 
3984 	block = &hw->block[blkaddr];
3985 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3986 	if (nixlf < 0)
3987 		return NIX_AF_ERR_AF_LF_INVALID;
3988 
3989 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3990 
3991 	if (enable)
3992 		cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3993 	else
3994 		cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3995 
3996 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3997 
3998 	return 0;
3999 }
4000 
4001 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
4002 					  struct msg_rsp *rsp)
4003 {
4004 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
4005 }
4006 
4007 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
4008 					   struct msg_rsp *rsp)
4009 {
4010 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
4011 }
4012 
4013 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
4014 					struct nix_lso_format_cfg *req,
4015 					struct nix_lso_format_cfg_rsp *rsp)
4016 {
4017 	u16 pcifunc = req->hdr.pcifunc;
4018 	struct nix_hw *nix_hw;
4019 	struct rvu_pfvf *pfvf;
4020 	int blkaddr, idx, f;
4021 	u64 reg;
4022 
4023 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4024 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4025 	if (!pfvf->nixlf || blkaddr < 0)
4026 		return NIX_AF_ERR_AF_LF_INVALID;
4027 
4028 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
4029 	if (!nix_hw)
4030 		return -EINVAL;
4031 
4032 	/* Find existing matching LSO format, if any */
4033 	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
4034 		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
4035 			reg = rvu_read64(rvu, blkaddr,
4036 					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
4037 			if (req->fields[f] != (reg & req->field_mask))
4038 				break;
4039 		}
4040 
4041 		if (f == NIX_LSO_FIELD_MAX)
4042 			break;
4043 	}
4044 
4045 	if (idx < nix_hw->lso.in_use) {
4046 		/* Match found */
4047 		rsp->lso_format_idx = idx;
4048 		return 0;
4049 	}
4050 
4051 	if (nix_hw->lso.in_use == nix_hw->lso.total)
4052 		return NIX_AF_ERR_LSO_CFG_FAIL;
4053 
4054 	rsp->lso_format_idx = nix_hw->lso.in_use++;
4055 
4056 	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
4057 		rvu_write64(rvu, blkaddr,
4058 			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
4059 			    req->fields[f]);
4060 
4061 	return 0;
4062 }
4063 
4064 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
4065 {
4066 	bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
4067 
4068 	/* overwrite vf mac address with default_mac */
4069 	if (from_vf)
4070 		ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
4071 }
4072 
4073 /* NIX ingress policers or bandwidth profiles APIs */
4074 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
4075 {
4076 	struct npc_lt_def_cfg defs, *ltdefs;
4077 
4078 	ltdefs = &defs;
4079 	memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
4080 
4081 	/* Extract PCP and DEI fields from outer VLAN from byte offset
4082 	 * 2 from the start of LB_PTR (ie TAG).
4083 	 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
4084 	 * fields are considered when 'Tunnel enable' is set in profile.
4085 	 */
4086 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
4087 		    (2UL << 12) | (ltdefs->ovlan.lid << 8) |
4088 		    (ltdefs->ovlan.ltype_match << 4) |
4089 		    ltdefs->ovlan.ltype_mask);
4090 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
4091 		    (2UL << 12) | (ltdefs->ivlan.lid << 8) |
4092 		    (ltdefs->ivlan.ltype_match << 4) |
4093 		    ltdefs->ivlan.ltype_mask);
4094 
4095 	/* DSCP field in outer and tunneled IPv4 packets */
4096 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
4097 		    (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
4098 		    (ltdefs->rx_oip4.ltype_match << 4) |
4099 		    ltdefs->rx_oip4.ltype_mask);
4100 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
4101 		    (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
4102 		    (ltdefs->rx_iip4.ltype_match << 4) |
4103 		    ltdefs->rx_iip4.ltype_mask);
4104 
4105 	/* DSCP field (traffic class) in outer and tunneled IPv6 packets */
4106 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
4107 		    (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
4108 		    (ltdefs->rx_oip6.ltype_match << 4) |
4109 		    ltdefs->rx_oip6.ltype_mask);
4110 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
4111 		    (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
4112 		    (ltdefs->rx_iip6.ltype_match << 4) |
4113 		    ltdefs->rx_iip6.ltype_mask);
4114 }
4115 
4116 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
4117 				    int layer, int prof_idx)
4118 {
4119 	struct nix_cn10k_aq_enq_req aq_req;
4120 	int rc;
4121 
4122 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4123 
4124 	aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
4125 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4126 	aq_req.op = NIX_AQ_INSTOP_INIT;
4127 
4128 	/* Context is all zeros, submit to AQ */
4129 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4130 				     (struct nix_aq_enq_req *)&aq_req, NULL);
4131 	if (rc)
4132 		dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
4133 			layer, prof_idx);
4134 	return rc;
4135 }
4136 
4137 static int nix_setup_ipolicers(struct rvu *rvu,
4138 			       struct nix_hw *nix_hw, int blkaddr)
4139 {
4140 	struct rvu_hwinfo *hw = rvu->hw;
4141 	struct nix_ipolicer *ipolicer;
4142 	int err, layer, prof_idx;
4143 	u64 cfg;
4144 
4145 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
4146 	if (!(cfg & BIT_ULL(61))) {
4147 		hw->cap.ipolicer = false;
4148 		return 0;
4149 	}
4150 
4151 	hw->cap.ipolicer = true;
4152 	nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
4153 					sizeof(*ipolicer), GFP_KERNEL);
4154 	if (!nix_hw->ipolicer)
4155 		return -ENOMEM;
4156 
4157 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
4158 
4159 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4160 		ipolicer = &nix_hw->ipolicer[layer];
4161 		switch (layer) {
4162 		case BAND_PROF_LEAF_LAYER:
4163 			ipolicer->band_prof.max = cfg & 0XFFFF;
4164 			break;
4165 		case BAND_PROF_MID_LAYER:
4166 			ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
4167 			break;
4168 		case BAND_PROF_TOP_LAYER:
4169 			ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
4170 			break;
4171 		}
4172 
4173 		if (!ipolicer->band_prof.max)
4174 			continue;
4175 
4176 		err = rvu_alloc_bitmap(&ipolicer->band_prof);
4177 		if (err)
4178 			return err;
4179 
4180 		ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
4181 						  ipolicer->band_prof.max,
4182 						  sizeof(u16), GFP_KERNEL);
4183 		if (!ipolicer->pfvf_map)
4184 			return -ENOMEM;
4185 
4186 		ipolicer->match_id = devm_kcalloc(rvu->dev,
4187 						  ipolicer->band_prof.max,
4188 						  sizeof(u16), GFP_KERNEL);
4189 		if (!ipolicer->match_id)
4190 			return -ENOMEM;
4191 
4192 		for (prof_idx = 0;
4193 		     prof_idx < ipolicer->band_prof.max; prof_idx++) {
4194 			/* Set AF as current owner for INIT ops to succeed */
4195 			ipolicer->pfvf_map[prof_idx] = 0x00;
4196 
4197 			/* There is no enable bit in the profile context,
4198 			 * so no context disable. So let's INIT them here
4199 			 * so that PF/VF later on have to just do WRITE to
4200 			 * setup policer rates and config.
4201 			 */
4202 			err = nix_init_policer_context(rvu, nix_hw,
4203 						       layer, prof_idx);
4204 			if (err)
4205 				return err;
4206 		}
4207 
4208 		/* Allocate memory for maintaining ref_counts for MID level
4209 		 * profiles, this will be needed for leaf layer profiles'
4210 		 * aggregation.
4211 		 */
4212 		if (layer != BAND_PROF_MID_LAYER)
4213 			continue;
4214 
4215 		ipolicer->ref_count = devm_kcalloc(rvu->dev,
4216 						   ipolicer->band_prof.max,
4217 						   sizeof(u16), GFP_KERNEL);
4218 	}
4219 
4220 	/* Set policer timeunit to 2us ie  (19 + 1) * 100 nsec = 2us */
4221 	rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
4222 
4223 	nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
4224 
4225 	return 0;
4226 }
4227 
4228 static void nix_ipolicer_freemem(struct nix_hw *nix_hw)
4229 {
4230 	struct nix_ipolicer *ipolicer;
4231 	int layer;
4232 
4233 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4234 		ipolicer = &nix_hw->ipolicer[layer];
4235 
4236 		if (!ipolicer->band_prof.max)
4237 			continue;
4238 
4239 		kfree(ipolicer->band_prof.bmap);
4240 	}
4241 }
4242 
4243 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
4244 			       struct nix_hw *nix_hw, u16 pcifunc)
4245 {
4246 	struct nix_ipolicer *ipolicer;
4247 	int layer, hi_layer, prof_idx;
4248 
4249 	/* Bits [15:14] in profile index represent layer */
4250 	layer = (req->qidx >> 14) & 0x03;
4251 	prof_idx = req->qidx & 0x3FFF;
4252 
4253 	ipolicer = &nix_hw->ipolicer[layer];
4254 	if (prof_idx >= ipolicer->band_prof.max)
4255 		return -EINVAL;
4256 
4257 	/* Check if the profile is allocated to the requesting PCIFUNC or not
4258 	 * with the exception of AF. AF is allowed to read and update contexts.
4259 	 */
4260 	if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
4261 		return -EINVAL;
4262 
4263 	/* If this profile is linked to higher layer profile then check
4264 	 * if that profile is also allocated to the requesting PCIFUNC
4265 	 * or not.
4266 	 */
4267 	if (!req->prof.hl_en)
4268 		return 0;
4269 
4270 	/* Leaf layer profile can link only to mid layer and
4271 	 * mid layer to top layer.
4272 	 */
4273 	if (layer == BAND_PROF_LEAF_LAYER)
4274 		hi_layer = BAND_PROF_MID_LAYER;
4275 	else if (layer == BAND_PROF_MID_LAYER)
4276 		hi_layer = BAND_PROF_TOP_LAYER;
4277 	else
4278 		return -EINVAL;
4279 
4280 	ipolicer = &nix_hw->ipolicer[hi_layer];
4281 	prof_idx = req->prof.band_prof_id;
4282 	if (prof_idx >= ipolicer->band_prof.max ||
4283 	    ipolicer->pfvf_map[prof_idx] != pcifunc)
4284 		return -EINVAL;
4285 
4286 	return 0;
4287 }
4288 
4289 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
4290 					struct nix_bandprof_alloc_req *req,
4291 					struct nix_bandprof_alloc_rsp *rsp)
4292 {
4293 	int blkaddr, layer, prof, idx, err;
4294 	u16 pcifunc = req->hdr.pcifunc;
4295 	struct nix_ipolicer *ipolicer;
4296 	struct nix_hw *nix_hw;
4297 
4298 	if (!rvu->hw->cap.ipolicer)
4299 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
4300 
4301 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4302 	if (err)
4303 		return err;
4304 
4305 	mutex_lock(&rvu->rsrc_lock);
4306 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4307 		if (layer == BAND_PROF_INVAL_LAYER)
4308 			continue;
4309 		if (!req->prof_count[layer])
4310 			continue;
4311 
4312 		ipolicer = &nix_hw->ipolicer[layer];
4313 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
4314 			/* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
4315 			if (idx == MAX_BANDPROF_PER_PFFUNC)
4316 				break;
4317 
4318 			prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4319 			if (prof < 0)
4320 				break;
4321 			rsp->prof_count[layer]++;
4322 			rsp->prof_idx[layer][idx] = prof;
4323 			ipolicer->pfvf_map[prof] = pcifunc;
4324 		}
4325 	}
4326 	mutex_unlock(&rvu->rsrc_lock);
4327 	return 0;
4328 }
4329 
4330 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
4331 {
4332 	int blkaddr, layer, prof_idx, err;
4333 	struct nix_ipolicer *ipolicer;
4334 	struct nix_hw *nix_hw;
4335 
4336 	if (!rvu->hw->cap.ipolicer)
4337 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
4338 
4339 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4340 	if (err)
4341 		return err;
4342 
4343 	mutex_lock(&rvu->rsrc_lock);
4344 	/* Free all the profiles allocated to the PCIFUNC */
4345 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4346 		if (layer == BAND_PROF_INVAL_LAYER)
4347 			continue;
4348 		ipolicer = &nix_hw->ipolicer[layer];
4349 
4350 		for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
4351 			if (ipolicer->pfvf_map[prof_idx] != pcifunc)
4352 				continue;
4353 
4354 			/* Clear ratelimit aggregation, if any */
4355 			if (layer == BAND_PROF_LEAF_LAYER &&
4356 			    ipolicer->match_id[prof_idx])
4357 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4358 
4359 			ipolicer->pfvf_map[prof_idx] = 0x00;
4360 			ipolicer->match_id[prof_idx] = 0;
4361 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4362 		}
4363 	}
4364 	mutex_unlock(&rvu->rsrc_lock);
4365 	return 0;
4366 }
4367 
4368 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
4369 				       struct nix_bandprof_free_req *req,
4370 				       struct msg_rsp *rsp)
4371 {
4372 	int blkaddr, layer, prof_idx, idx, err;
4373 	u16 pcifunc = req->hdr.pcifunc;
4374 	struct nix_ipolicer *ipolicer;
4375 	struct nix_hw *nix_hw;
4376 
4377 	if (req->free_all)
4378 		return nix_free_all_bandprof(rvu, pcifunc);
4379 
4380 	if (!rvu->hw->cap.ipolicer)
4381 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
4382 
4383 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4384 	if (err)
4385 		return err;
4386 
4387 	mutex_lock(&rvu->rsrc_lock);
4388 	/* Free the requested profile indices */
4389 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4390 		if (layer == BAND_PROF_INVAL_LAYER)
4391 			continue;
4392 		if (!req->prof_count[layer])
4393 			continue;
4394 
4395 		ipolicer = &nix_hw->ipolicer[layer];
4396 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
4397 			prof_idx = req->prof_idx[layer][idx];
4398 			if (prof_idx >= ipolicer->band_prof.max ||
4399 			    ipolicer->pfvf_map[prof_idx] != pcifunc)
4400 				continue;
4401 
4402 			/* Clear ratelimit aggregation, if any */
4403 			if (layer == BAND_PROF_LEAF_LAYER &&
4404 			    ipolicer->match_id[prof_idx])
4405 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4406 
4407 			ipolicer->pfvf_map[prof_idx] = 0x00;
4408 			ipolicer->match_id[prof_idx] = 0;
4409 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4410 			if (idx == MAX_BANDPROF_PER_PFFUNC)
4411 				break;
4412 		}
4413 	}
4414 	mutex_unlock(&rvu->rsrc_lock);
4415 	return 0;
4416 }
4417 
4418 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
4419 			struct nix_cn10k_aq_enq_req *aq_req,
4420 			struct nix_cn10k_aq_enq_rsp *aq_rsp,
4421 			u16 pcifunc, u8 ctype, u32 qidx)
4422 {
4423 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4424 	aq_req->hdr.pcifunc = pcifunc;
4425 	aq_req->ctype = ctype;
4426 	aq_req->op = NIX_AQ_INSTOP_READ;
4427 	aq_req->qidx = qidx;
4428 
4429 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4430 				       (struct nix_aq_enq_req *)aq_req,
4431 				       (struct nix_aq_enq_rsp *)aq_rsp);
4432 }
4433 
4434 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
4435 					  struct nix_hw *nix_hw,
4436 					  struct nix_cn10k_aq_enq_req *aq_req,
4437 					  struct nix_cn10k_aq_enq_rsp *aq_rsp,
4438 					  u32 leaf_prof, u16 mid_prof)
4439 {
4440 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4441 	aq_req->hdr.pcifunc = 0x00;
4442 	aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
4443 	aq_req->op = NIX_AQ_INSTOP_WRITE;
4444 	aq_req->qidx = leaf_prof;
4445 
4446 	aq_req->prof.band_prof_id = mid_prof;
4447 	aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
4448 	aq_req->prof.hl_en = 1;
4449 	aq_req->prof_mask.hl_en = 1;
4450 
4451 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4452 				       (struct nix_aq_enq_req *)aq_req,
4453 				       (struct nix_aq_enq_rsp *)aq_rsp);
4454 }
4455 
4456 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
4457 				 u16 rq_idx, u16 match_id)
4458 {
4459 	int leaf_prof, mid_prof, leaf_match;
4460 	struct nix_cn10k_aq_enq_req aq_req;
4461 	struct nix_cn10k_aq_enq_rsp aq_rsp;
4462 	struct nix_ipolicer *ipolicer;
4463 	struct nix_hw *nix_hw;
4464 	int blkaddr, idx, rc;
4465 
4466 	if (!rvu->hw->cap.ipolicer)
4467 		return 0;
4468 
4469 	rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4470 	if (rc)
4471 		return rc;
4472 
4473 	/* Fetch the RQ's context to see if policing is enabled */
4474 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
4475 				 NIX_AQ_CTYPE_RQ, rq_idx);
4476 	if (rc) {
4477 		dev_err(rvu->dev,
4478 			"%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
4479 			__func__, rq_idx, pcifunc);
4480 		return rc;
4481 	}
4482 
4483 	if (!aq_rsp.rq.policer_ena)
4484 		return 0;
4485 
4486 	/* Get the bandwidth profile ID mapped to this RQ */
4487 	leaf_prof = aq_rsp.rq.band_prof_id;
4488 
4489 	ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
4490 	ipolicer->match_id[leaf_prof] = match_id;
4491 
4492 	/* Check if any other leaf profile is marked with same match_id */
4493 	for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
4494 		if (idx == leaf_prof)
4495 			continue;
4496 		if (ipolicer->match_id[idx] != match_id)
4497 			continue;
4498 
4499 		leaf_match = idx;
4500 		break;
4501 	}
4502 
4503 	if (idx == ipolicer->band_prof.max)
4504 		return 0;
4505 
4506 	/* Fetch the matching profile's context to check if it's already
4507 	 * mapped to a mid level profile.
4508 	 */
4509 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4510 				 NIX_AQ_CTYPE_BANDPROF, leaf_match);
4511 	if (rc) {
4512 		dev_err(rvu->dev,
4513 			"%s: Failed to fetch context of leaf profile %d\n",
4514 			__func__, leaf_match);
4515 		return rc;
4516 	}
4517 
4518 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
4519 	if (aq_rsp.prof.hl_en) {
4520 		/* Get Mid layer prof index and map leaf_prof index
4521 		 * also such that flows that are being steered
4522 		 * to different RQs and marked with same match_id
4523 		 * are rate limited in a aggregate fashion
4524 		 */
4525 		mid_prof = aq_rsp.prof.band_prof_id;
4526 		rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4527 						    &aq_req, &aq_rsp,
4528 						    leaf_prof, mid_prof);
4529 		if (rc) {
4530 			dev_err(rvu->dev,
4531 				"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4532 				__func__, leaf_prof, mid_prof);
4533 			goto exit;
4534 		}
4535 
4536 		mutex_lock(&rvu->rsrc_lock);
4537 		ipolicer->ref_count[mid_prof]++;
4538 		mutex_unlock(&rvu->rsrc_lock);
4539 		goto exit;
4540 	}
4541 
4542 	/* Allocate a mid layer profile and
4543 	 * map both 'leaf_prof' and 'leaf_match' profiles to it.
4544 	 */
4545 	mutex_lock(&rvu->rsrc_lock);
4546 	mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4547 	if (mid_prof < 0) {
4548 		dev_err(rvu->dev,
4549 			"%s: Unable to allocate mid layer profile\n", __func__);
4550 		mutex_unlock(&rvu->rsrc_lock);
4551 		goto exit;
4552 	}
4553 	mutex_unlock(&rvu->rsrc_lock);
4554 	ipolicer->pfvf_map[mid_prof] = 0x00;
4555 	ipolicer->ref_count[mid_prof] = 0;
4556 
4557 	/* Initialize mid layer profile same as 'leaf_prof' */
4558 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4559 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
4560 	if (rc) {
4561 		dev_err(rvu->dev,
4562 			"%s: Failed to fetch context of leaf profile %d\n",
4563 			__func__, leaf_prof);
4564 		goto exit;
4565 	}
4566 
4567 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4568 	aq_req.hdr.pcifunc = 0x00;
4569 	aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
4570 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4571 	aq_req.op = NIX_AQ_INSTOP_WRITE;
4572 	memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
4573 	/* Clear higher layer enable bit in the mid profile, just in case */
4574 	aq_req.prof.hl_en = 0;
4575 	aq_req.prof_mask.hl_en = 1;
4576 
4577 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4578 				     (struct nix_aq_enq_req *)&aq_req, NULL);
4579 	if (rc) {
4580 		dev_err(rvu->dev,
4581 			"%s: Failed to INIT context of mid layer profile %d\n",
4582 			__func__, mid_prof);
4583 		goto exit;
4584 	}
4585 
4586 	/* Map both leaf profiles to this mid layer profile */
4587 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4588 					    &aq_req, &aq_rsp,
4589 					    leaf_prof, mid_prof);
4590 	if (rc) {
4591 		dev_err(rvu->dev,
4592 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4593 			__func__, leaf_prof, mid_prof);
4594 		goto exit;
4595 	}
4596 
4597 	mutex_lock(&rvu->rsrc_lock);
4598 	ipolicer->ref_count[mid_prof]++;
4599 	mutex_unlock(&rvu->rsrc_lock);
4600 
4601 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4602 					    &aq_req, &aq_rsp,
4603 					    leaf_match, mid_prof);
4604 	if (rc) {
4605 		dev_err(rvu->dev,
4606 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4607 			__func__, leaf_match, mid_prof);
4608 		ipolicer->ref_count[mid_prof]--;
4609 		goto exit;
4610 	}
4611 
4612 	mutex_lock(&rvu->rsrc_lock);
4613 	ipolicer->ref_count[mid_prof]++;
4614 	mutex_unlock(&rvu->rsrc_lock);
4615 
4616 exit:
4617 	return rc;
4618 }
4619 
4620 /* Called with mutex rsrc_lock */
4621 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
4622 				     u32 leaf_prof)
4623 {
4624 	struct nix_cn10k_aq_enq_req aq_req;
4625 	struct nix_cn10k_aq_enq_rsp aq_rsp;
4626 	struct nix_ipolicer *ipolicer;
4627 	u16 mid_prof;
4628 	int rc;
4629 
4630 	mutex_unlock(&rvu->rsrc_lock);
4631 
4632 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4633 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
4634 
4635 	mutex_lock(&rvu->rsrc_lock);
4636 	if (rc) {
4637 		dev_err(rvu->dev,
4638 			"%s: Failed to fetch context of leaf profile %d\n",
4639 			__func__, leaf_prof);
4640 		return;
4641 	}
4642 
4643 	if (!aq_rsp.prof.hl_en)
4644 		return;
4645 
4646 	mid_prof = aq_rsp.prof.band_prof_id;
4647 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
4648 	ipolicer->ref_count[mid_prof]--;
4649 	/* If ref_count is zero, free mid layer profile */
4650 	if (!ipolicer->ref_count[mid_prof]) {
4651 		ipolicer->pfvf_map[mid_prof] = 0x00;
4652 		rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
4653 	}
4654 }
4655