1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7 
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 
11 #include "rvu_struct.h"
12 #include "rvu_reg.h"
13 #include "rvu.h"
14 #include "npc.h"
15 #include "mcs.h"
16 #include "cgx.h"
17 #include "lmac_common.h"
18 #include "rvu_npc_hash.h"
19 
20 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
21 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
22 			    int type, int chan_id);
23 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
24 			       int type, bool add);
25 static int nix_setup_ipolicers(struct rvu *rvu,
26 			       struct nix_hw *nix_hw, int blkaddr);
27 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
28 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
29 			       struct nix_hw *nix_hw, u16 pcifunc);
30 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
31 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
32 				     u32 leaf_prof);
33 static const char *nix_get_ctx_name(int ctype);
34 
35 enum mc_tbl_sz {
36 	MC_TBL_SZ_256,
37 	MC_TBL_SZ_512,
38 	MC_TBL_SZ_1K,
39 	MC_TBL_SZ_2K,
40 	MC_TBL_SZ_4K,
41 	MC_TBL_SZ_8K,
42 	MC_TBL_SZ_16K,
43 	MC_TBL_SZ_32K,
44 	MC_TBL_SZ_64K,
45 };
46 
47 enum mc_buf_cnt {
48 	MC_BUF_CNT_8,
49 	MC_BUF_CNT_16,
50 	MC_BUF_CNT_32,
51 	MC_BUF_CNT_64,
52 	MC_BUF_CNT_128,
53 	MC_BUF_CNT_256,
54 	MC_BUF_CNT_512,
55 	MC_BUF_CNT_1024,
56 	MC_BUF_CNT_2048,
57 };
58 
59 enum nix_makr_fmt_indexes {
60 	NIX_MARK_CFG_IP_DSCP_RED,
61 	NIX_MARK_CFG_IP_DSCP_YELLOW,
62 	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
63 	NIX_MARK_CFG_IP_ECN_RED,
64 	NIX_MARK_CFG_IP_ECN_YELLOW,
65 	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
66 	NIX_MARK_CFG_VLAN_DEI_RED,
67 	NIX_MARK_CFG_VLAN_DEI_YELLOW,
68 	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
69 	NIX_MARK_CFG_MAX,
70 };
71 
72 /* For now considering MC resources needed for broadcast
73  * pkt replication only. i.e 256 HWVFs + 12 PFs.
74  */
75 #define MC_TBL_SIZE	MC_TBL_SZ_512
76 #define MC_BUF_CNT	MC_BUF_CNT_128
77 
78 struct mce {
79 	struct hlist_node	node;
80 	u16			pcifunc;
81 };
82 
83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
84 {
85 	int i = 0;
86 
87 	/*If blkaddr is 0, return the first nix block address*/
88 	if (blkaddr == 0)
89 		return rvu->nix_blkaddr[blkaddr];
90 
91 	while (i + 1 < MAX_NIX_BLKS) {
92 		if (rvu->nix_blkaddr[i] == blkaddr)
93 			return rvu->nix_blkaddr[i + 1];
94 		i++;
95 	}
96 
97 	return 0;
98 }
99 
100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
101 {
102 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
103 	int blkaddr;
104 
105 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
106 	if (!pfvf->nixlf || blkaddr < 0)
107 		return false;
108 	return true;
109 }
110 
111 int rvu_get_nixlf_count(struct rvu *rvu)
112 {
113 	int blkaddr = 0, max = 0;
114 	struct rvu_block *block;
115 
116 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
117 	while (blkaddr) {
118 		block = &rvu->hw->block[blkaddr];
119 		max += block->lf.max;
120 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
121 	}
122 	return max;
123 }
124 
125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
126 {
127 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
128 	struct rvu_hwinfo *hw = rvu->hw;
129 	int blkaddr;
130 
131 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
132 	if (!pfvf->nixlf || blkaddr < 0)
133 		return NIX_AF_ERR_AF_LF_INVALID;
134 
135 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
136 	if (*nixlf < 0)
137 		return NIX_AF_ERR_AF_LF_INVALID;
138 
139 	if (nix_blkaddr)
140 		*nix_blkaddr = blkaddr;
141 
142 	return 0;
143 }
144 
145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
146 			struct nix_hw **nix_hw, int *blkaddr)
147 {
148 	struct rvu_pfvf *pfvf;
149 
150 	pfvf = rvu_get_pfvf(rvu, pcifunc);
151 	*blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
152 	if (!pfvf->nixlf || *blkaddr < 0)
153 		return NIX_AF_ERR_AF_LF_INVALID;
154 
155 	*nix_hw = get_nix_hw(rvu->hw, *blkaddr);
156 	if (!*nix_hw)
157 		return NIX_AF_ERR_INVALID_NIXBLK;
158 	return 0;
159 }
160 
161 static void nix_mce_list_init(struct nix_mce_list *list, int max)
162 {
163 	INIT_HLIST_HEAD(&list->head);
164 	list->count = 0;
165 	list->max = max;
166 }
167 
168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
169 {
170 	int idx;
171 
172 	if (!mcast)
173 		return 0;
174 
175 	idx = mcast->next_free_mce;
176 	mcast->next_free_mce += count;
177 	return idx;
178 }
179 
180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
181 {
182 	int nix_blkaddr = 0, i = 0;
183 	struct rvu *rvu = hw->rvu;
184 
185 	nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
186 	while (nix_blkaddr) {
187 		if (blkaddr == nix_blkaddr && hw->nix)
188 			return &hw->nix[i];
189 		nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
190 		i++;
191 	}
192 	return NULL;
193 }
194 
195 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type)
196 {
197 	if (hw->cap.nix_multiple_dwrr_mtu)
198 		return NIX_AF_DWRR_MTUX(smq_link_type);
199 
200 	if (smq_link_type == SMQ_LINK_TYPE_SDP)
201 		return NIX_AF_DWRR_SDP_MTU;
202 
203 	/* Here it's same reg for RPM and LBK */
204 	return NIX_AF_DWRR_RPM_MTU;
205 }
206 
207 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
208 {
209 	dwrr_mtu &= 0x1FULL;
210 
211 	/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
212 	 * Value of 4 is reserved for MTU value of 9728 bytes.
213 	 * Value of 5 is reserved for MTU value of 10240 bytes.
214 	 */
215 	switch (dwrr_mtu) {
216 	case 4:
217 		return 9728;
218 	case 5:
219 		return 10240;
220 	default:
221 		return BIT_ULL(dwrr_mtu);
222 	}
223 
224 	return 0;
225 }
226 
227 u32 convert_bytes_to_dwrr_mtu(u32 bytes)
228 {
229 	/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
230 	 * Value of 4 is reserved for MTU value of 9728 bytes.
231 	 * Value of 5 is reserved for MTU value of 10240 bytes.
232 	 */
233 	if (bytes > BIT_ULL(16))
234 		return 0;
235 
236 	switch (bytes) {
237 	case 9728:
238 		return 4;
239 	case 10240:
240 		return 5;
241 	default:
242 		return ilog2(bytes);
243 	}
244 
245 	return 0;
246 }
247 
248 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
249 {
250 	int err;
251 
252 	/* Sync all in flight RX packets to LLC/DRAM */
253 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
254 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
255 	if (err)
256 		dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
257 
258 	/* SW_SYNC ensures all existing transactions are finished and pkts
259 	 * are written to LLC/DRAM, queues should be teared down after
260 	 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
261 	 * an existing transaction might end after SW_SYNC operation. To
262 	 * ensure operation is fully done, do the SW_SYNC twice.
263 	 */
264 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
265 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
266 	if (err)
267 		dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
268 }
269 
270 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
271 			    int lvl, u16 pcifunc, u16 schq)
272 {
273 	struct rvu_hwinfo *hw = rvu->hw;
274 	struct nix_txsch *txsch;
275 	struct nix_hw *nix_hw;
276 	u16 map_func;
277 
278 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
279 	if (!nix_hw)
280 		return false;
281 
282 	txsch = &nix_hw->txsch[lvl];
283 	/* Check out of bounds */
284 	if (schq >= txsch->schq.max)
285 		return false;
286 
287 	mutex_lock(&rvu->rsrc_lock);
288 	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
289 	mutex_unlock(&rvu->rsrc_lock);
290 
291 	/* TLs aggegating traffic are shared across PF and VFs */
292 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
293 		if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
294 			return false;
295 		else
296 			return true;
297 	}
298 
299 	if (map_func != pcifunc)
300 		return false;
301 
302 	return true;
303 }
304 
305 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
306 			      struct nix_lf_alloc_rsp *rsp, bool loop)
307 {
308 	struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
309 	u16 req_chan_base, req_chan_end, req_chan_cnt;
310 	struct rvu_hwinfo *hw = rvu->hw;
311 	struct sdp_node_info *sdp_info;
312 	int pkind, pf, vf, lbkid, vfid;
313 	u8 cgx_id, lmac_id;
314 	bool from_vf;
315 	int err;
316 
317 	pf = rvu_get_pf(pcifunc);
318 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
319 	    type != NIX_INTF_TYPE_SDP)
320 		return 0;
321 
322 	switch (type) {
323 	case NIX_INTF_TYPE_CGX:
324 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
325 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
326 
327 		pkind = rvu_npc_get_pkind(rvu, pf);
328 		if (pkind < 0) {
329 			dev_err(rvu->dev,
330 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
331 			return -EINVAL;
332 		}
333 		pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
334 		pfvf->tx_chan_base = pfvf->rx_chan_base;
335 		pfvf->rx_chan_cnt = 1;
336 		pfvf->tx_chan_cnt = 1;
337 		rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
338 
339 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
340 		rvu_npc_set_pkind(rvu, pkind, pfvf);
341 
342 		break;
343 	case NIX_INTF_TYPE_LBK:
344 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
345 
346 		/* If NIX1 block is present on the silicon then NIXes are
347 		 * assigned alternatively for lbk interfaces. NIX0 should
348 		 * send packets on lbk link 1 channels and NIX1 should send
349 		 * on lbk link 0 channels for the communication between
350 		 * NIX0 and NIX1.
351 		 */
352 		lbkid = 0;
353 		if (rvu->hw->lbk_links > 1)
354 			lbkid = vf & 0x1 ? 0 : 1;
355 
356 		/* By default NIX0 is configured to send packet on lbk link 1
357 		 * (which corresponds to LBK1), same packet will receive on
358 		 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
359 		 * (which corresponds to LBK2) packet will receive on NIX0 lbk
360 		 * link 1.
361 		 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
362 		 * transmits and receives on lbk link 0, whick corresponds
363 		 * to LBK1 block, back to back connectivity between NIX and
364 		 * LBK can be achieved (which is similar to 96xx)
365 		 *
366 		 *			RX		TX
367 		 * NIX0 lbk link	1 (LBK2)	1 (LBK1)
368 		 * NIX0 lbk link	0 (LBK0)	0 (LBK0)
369 		 * NIX1 lbk link	0 (LBK1)	0 (LBK2)
370 		 * NIX1 lbk link	1 (LBK3)	1 (LBK3)
371 		 */
372 		if (loop)
373 			lbkid = !lbkid;
374 
375 		/* Note that AF's VFs work in pairs and talk over consecutive
376 		 * loopback channels.Therefore if odd number of AF VFs are
377 		 * enabled then the last VF remains with no pair.
378 		 */
379 		pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
380 		pfvf->tx_chan_base = vf & 0x1 ?
381 					rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
382 					rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
383 		pfvf->rx_chan_cnt = 1;
384 		pfvf->tx_chan_cnt = 1;
385 		rsp->tx_link = hw->cgx_links + lbkid;
386 		pfvf->lbkid = lbkid;
387 		rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
388 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
389 					      pfvf->rx_chan_base,
390 					      pfvf->rx_chan_cnt);
391 
392 		break;
393 	case NIX_INTF_TYPE_SDP:
394 		from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
395 		parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
396 		sdp_info = parent_pf->sdp_info;
397 		if (!sdp_info) {
398 			dev_err(rvu->dev, "Invalid sdp_info pointer\n");
399 			return -EINVAL;
400 		}
401 		if (from_vf) {
402 			req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
403 				sdp_info->num_pf_rings;
404 			vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
405 			for (vfid = 0; vfid < vf; vfid++)
406 				req_chan_base += sdp_info->vf_rings[vfid];
407 			req_chan_cnt = sdp_info->vf_rings[vf];
408 			req_chan_end = req_chan_base + req_chan_cnt - 1;
409 			if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
410 			    req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
411 				dev_err(rvu->dev,
412 					"PF_Func 0x%x: Invalid channel base and count\n",
413 					pcifunc);
414 				return -EINVAL;
415 			}
416 		} else {
417 			req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
418 			req_chan_cnt = sdp_info->num_pf_rings;
419 		}
420 
421 		pfvf->rx_chan_base = req_chan_base;
422 		pfvf->rx_chan_cnt = req_chan_cnt;
423 		pfvf->tx_chan_base = pfvf->rx_chan_base;
424 		pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
425 
426 		rsp->tx_link = hw->cgx_links + hw->lbk_links;
427 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
428 					      pfvf->rx_chan_base,
429 					      pfvf->rx_chan_cnt);
430 		break;
431 	}
432 
433 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
434 	 * RVU PF/VF's MAC address.
435 	 */
436 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
437 				    pfvf->rx_chan_base, pfvf->mac_addr);
438 
439 	/* Add this PF_FUNC to bcast pkt replication list */
440 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
441 	if (err) {
442 		dev_err(rvu->dev,
443 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
444 			pcifunc);
445 		return err;
446 	}
447 	/* Install MCAM rule matching Ethernet broadcast mac address */
448 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
449 					  nixlf, pfvf->rx_chan_base);
450 
451 	pfvf->maxlen = NIC_HW_MIN_FRS;
452 	pfvf->minlen = NIC_HW_MIN_FRS;
453 
454 	return 0;
455 }
456 
457 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
458 {
459 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
460 	int err;
461 
462 	pfvf->maxlen = 0;
463 	pfvf->minlen = 0;
464 
465 	/* Remove this PF_FUNC from bcast pkt replication list */
466 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
467 	if (err) {
468 		dev_err(rvu->dev,
469 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
470 			pcifunc);
471 	}
472 
473 	/* Free and disable any MCAM entries used by this NIX LF */
474 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
475 
476 	/* Disable DMAC filters used */
477 	rvu_cgx_disable_dmac_entries(rvu, pcifunc);
478 }
479 
480 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
481 				    struct nix_bp_cfg_req *req,
482 				    struct msg_rsp *rsp)
483 {
484 	u16 pcifunc = req->hdr.pcifunc;
485 	struct rvu_pfvf *pfvf;
486 	int blkaddr, pf, type;
487 	u16 chan_base, chan;
488 	u64 cfg;
489 
490 	pf = rvu_get_pf(pcifunc);
491 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
492 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
493 		return 0;
494 
495 	pfvf = rvu_get_pfvf(rvu, pcifunc);
496 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
497 
498 	chan_base = pfvf->rx_chan_base + req->chan_base;
499 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
500 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
501 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
502 			    cfg & ~BIT_ULL(16));
503 	}
504 	return 0;
505 }
506 
507 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
508 			    int type, int chan_id)
509 {
510 	int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
511 	u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
512 	struct rvu_hwinfo *hw = rvu->hw;
513 	struct rvu_pfvf *pfvf;
514 	u8 cgx_id, lmac_id;
515 	u64 cfg;
516 
517 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
518 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
519 	lmac_chan_cnt = cfg & 0xFF;
520 
521 	cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
522 	lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
523 
524 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
525 	sdp_chan_cnt = cfg & 0xFFF;
526 	sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
527 
528 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
529 
530 	/* Backpressure IDs range division
531 	 * CGX channles are mapped to (0 - 191) BPIDs
532 	 * LBK channles are mapped to (192 - 255) BPIDs
533 	 * SDP channles are mapped to (256 - 511) BPIDs
534 	 *
535 	 * Lmac channles and bpids mapped as follows
536 	 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
537 	 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
538 	 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
539 	 */
540 	switch (type) {
541 	case NIX_INTF_TYPE_CGX:
542 		if ((req->chan_base + req->chan_cnt) > 16)
543 			return -EINVAL;
544 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
545 		/* Assign bpid based on cgx, lmac and chan id */
546 		bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
547 			(lmac_id * lmac_chan_cnt) + req->chan_base;
548 
549 		if (req->bpid_per_chan)
550 			bpid += chan_id;
551 		if (bpid > cgx_bpid_cnt)
552 			return -EINVAL;
553 		break;
554 
555 	case NIX_INTF_TYPE_LBK:
556 		if ((req->chan_base + req->chan_cnt) > 63)
557 			return -EINVAL;
558 		bpid = cgx_bpid_cnt + req->chan_base;
559 		if (req->bpid_per_chan)
560 			bpid += chan_id;
561 		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
562 			return -EINVAL;
563 		break;
564 	case NIX_INTF_TYPE_SDP:
565 		if ((req->chan_base + req->chan_cnt) > 255)
566 			return -EINVAL;
567 
568 		bpid = sdp_bpid_cnt + req->chan_base;
569 		if (req->bpid_per_chan)
570 			bpid += chan_id;
571 
572 		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
573 			return -EINVAL;
574 		break;
575 	default:
576 		return -EINVAL;
577 	}
578 	return bpid;
579 }
580 
581 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
582 				   struct nix_bp_cfg_req *req,
583 				   struct nix_bp_cfg_rsp *rsp)
584 {
585 	int blkaddr, pf, type, chan_id = 0;
586 	u16 pcifunc = req->hdr.pcifunc;
587 	struct rvu_pfvf *pfvf;
588 	u16 chan_base, chan;
589 	s16 bpid, bpid_base;
590 	u64 cfg;
591 
592 	pf = rvu_get_pf(pcifunc);
593 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
594 	if (is_sdp_pfvf(pcifunc))
595 		type = NIX_INTF_TYPE_SDP;
596 
597 	/* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
598 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
599 	    type != NIX_INTF_TYPE_SDP)
600 		return 0;
601 
602 	pfvf = rvu_get_pfvf(rvu, pcifunc);
603 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
604 
605 	bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
606 	chan_base = pfvf->rx_chan_base + req->chan_base;
607 	bpid = bpid_base;
608 
609 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
610 		if (bpid < 0) {
611 			dev_warn(rvu->dev, "Fail to enable backpressure\n");
612 			return -EINVAL;
613 		}
614 
615 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
616 		cfg &= ~GENMASK_ULL(8, 0);
617 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
618 			    cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
619 		chan_id++;
620 		bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
621 	}
622 
623 	for (chan = 0; chan < req->chan_cnt; chan++) {
624 		/* Map channel and bpid assign to it */
625 		rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
626 					(bpid_base & 0x3FF);
627 		if (req->bpid_per_chan)
628 			bpid_base++;
629 	}
630 	rsp->chan_cnt = req->chan_cnt;
631 
632 	return 0;
633 }
634 
635 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
636 				 u64 format, bool v4, u64 *fidx)
637 {
638 	struct nix_lso_format field = {0};
639 
640 	/* IP's Length field */
641 	field.layer = NIX_TXLAYER_OL3;
642 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
643 	field.offset = v4 ? 2 : 4;
644 	field.sizem1 = 1; /* i.e 2 bytes */
645 	field.alg = NIX_LSOALG_ADD_PAYLEN;
646 	rvu_write64(rvu, blkaddr,
647 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
648 		    *(u64 *)&field);
649 
650 	/* No ID field in IPv6 header */
651 	if (!v4)
652 		return;
653 
654 	/* IP's ID field */
655 	field.layer = NIX_TXLAYER_OL3;
656 	field.offset = 4;
657 	field.sizem1 = 1; /* i.e 2 bytes */
658 	field.alg = NIX_LSOALG_ADD_SEGNUM;
659 	rvu_write64(rvu, blkaddr,
660 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
661 		    *(u64 *)&field);
662 }
663 
664 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
665 				 u64 format, u64 *fidx)
666 {
667 	struct nix_lso_format field = {0};
668 
669 	/* TCP's sequence number field */
670 	field.layer = NIX_TXLAYER_OL4;
671 	field.offset = 4;
672 	field.sizem1 = 3; /* i.e 4 bytes */
673 	field.alg = NIX_LSOALG_ADD_OFFSET;
674 	rvu_write64(rvu, blkaddr,
675 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
676 		    *(u64 *)&field);
677 
678 	/* TCP's flags field */
679 	field.layer = NIX_TXLAYER_OL4;
680 	field.offset = 12;
681 	field.sizem1 = 1; /* 2 bytes */
682 	field.alg = NIX_LSOALG_TCP_FLAGS;
683 	rvu_write64(rvu, blkaddr,
684 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
685 		    *(u64 *)&field);
686 }
687 
688 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
689 {
690 	u64 cfg, idx, fidx = 0;
691 
692 	/* Get max HW supported format indices */
693 	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
694 	nix_hw->lso.total = cfg;
695 
696 	/* Enable LSO */
697 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
698 	/* For TSO, set first and middle segment flags to
699 	 * mask out PSH, RST & FIN flags in TCP packet
700 	 */
701 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
702 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
703 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
704 
705 	/* Setup default static LSO formats
706 	 *
707 	 * Configure format fields for TCPv4 segmentation offload
708 	 */
709 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
710 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
711 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
712 
713 	/* Set rest of the fields to NOP */
714 	for (; fidx < 8; fidx++) {
715 		rvu_write64(rvu, blkaddr,
716 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
717 	}
718 	nix_hw->lso.in_use++;
719 
720 	/* Configure format fields for TCPv6 segmentation offload */
721 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
722 	fidx = 0;
723 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
724 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
725 
726 	/* Set rest of the fields to NOP */
727 	for (; fidx < 8; fidx++) {
728 		rvu_write64(rvu, blkaddr,
729 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
730 	}
731 	nix_hw->lso.in_use++;
732 }
733 
734 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
735 {
736 	kfree(pfvf->rq_bmap);
737 	kfree(pfvf->sq_bmap);
738 	kfree(pfvf->cq_bmap);
739 	if (pfvf->rq_ctx)
740 		qmem_free(rvu->dev, pfvf->rq_ctx);
741 	if (pfvf->sq_ctx)
742 		qmem_free(rvu->dev, pfvf->sq_ctx);
743 	if (pfvf->cq_ctx)
744 		qmem_free(rvu->dev, pfvf->cq_ctx);
745 	if (pfvf->rss_ctx)
746 		qmem_free(rvu->dev, pfvf->rss_ctx);
747 	if (pfvf->nix_qints_ctx)
748 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
749 	if (pfvf->cq_ints_ctx)
750 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
751 
752 	pfvf->rq_bmap = NULL;
753 	pfvf->cq_bmap = NULL;
754 	pfvf->sq_bmap = NULL;
755 	pfvf->rq_ctx = NULL;
756 	pfvf->sq_ctx = NULL;
757 	pfvf->cq_ctx = NULL;
758 	pfvf->rss_ctx = NULL;
759 	pfvf->nix_qints_ctx = NULL;
760 	pfvf->cq_ints_ctx = NULL;
761 }
762 
763 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
764 			      struct rvu_pfvf *pfvf, int nixlf,
765 			      int rss_sz, int rss_grps, int hwctx_size,
766 			      u64 way_mask, bool tag_lsb_as_adder)
767 {
768 	int err, grp, num_indices;
769 	u64 val;
770 
771 	/* RSS is not requested for this NIXLF */
772 	if (!rss_sz)
773 		return 0;
774 	num_indices = rss_sz * rss_grps;
775 
776 	/* Alloc NIX RSS HW context memory and config the base */
777 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
778 	if (err)
779 		return err;
780 
781 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
782 		    (u64)pfvf->rss_ctx->iova);
783 
784 	/* Config full RSS table size, enable RSS and caching */
785 	val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
786 			ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
787 
788 	if (tag_lsb_as_adder)
789 		val |= BIT_ULL(5);
790 
791 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
792 	/* Config RSS group offset and sizes */
793 	for (grp = 0; grp < rss_grps; grp++)
794 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
795 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
796 	return 0;
797 }
798 
799 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
800 			       struct nix_aq_inst_s *inst)
801 {
802 	struct admin_queue *aq = block->aq;
803 	struct nix_aq_res_s *result;
804 	int timeout = 1000;
805 	u64 reg, head;
806 	int ret;
807 
808 	result = (struct nix_aq_res_s *)aq->res->base;
809 
810 	/* Get current head pointer where to append this instruction */
811 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
812 	head = (reg >> 4) & AQ_PTR_MASK;
813 
814 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
815 	       (void *)inst, aq->inst->entry_sz);
816 	memset(result, 0, sizeof(*result));
817 	/* sync into memory */
818 	wmb();
819 
820 	/* Ring the doorbell and wait for result */
821 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
822 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
823 		cpu_relax();
824 		udelay(1);
825 		timeout--;
826 		if (!timeout)
827 			return -EBUSY;
828 	}
829 
830 	if (result->compcode != NIX_AQ_COMP_GOOD) {
831 		/* TODO: Replace this with some error code */
832 		if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
833 		    result->compcode == NIX_AQ_COMP_LOCKERR ||
834 		    result->compcode == NIX_AQ_COMP_CTX_POISON) {
835 			ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
836 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
837 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
838 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
839 			if (ret)
840 				dev_err(rvu->dev,
841 					"%s: Not able to unlock cachelines\n", __func__);
842 		}
843 
844 		return -EBUSY;
845 	}
846 
847 	return 0;
848 }
849 
850 static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
851 			       u16 *smq, u16 *smq_mask)
852 {
853 	struct nix_cn10k_aq_enq_req *aq_req;
854 
855 	if (!is_rvu_otx2(rvu)) {
856 		aq_req = (struct nix_cn10k_aq_enq_req *)req;
857 		*smq = aq_req->sq.smq;
858 		*smq_mask = aq_req->sq_mask.smq;
859 	} else {
860 		*smq = req->sq.smq;
861 		*smq_mask = req->sq_mask.smq;
862 	}
863 }
864 
865 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
866 				   struct nix_aq_enq_req *req,
867 				   struct nix_aq_enq_rsp *rsp)
868 {
869 	struct rvu_hwinfo *hw = rvu->hw;
870 	u16 pcifunc = req->hdr.pcifunc;
871 	int nixlf, blkaddr, rc = 0;
872 	struct nix_aq_inst_s inst;
873 	struct rvu_block *block;
874 	struct admin_queue *aq;
875 	struct rvu_pfvf *pfvf;
876 	u16 smq, smq_mask;
877 	void *ctx, *mask;
878 	bool ena;
879 	u64 cfg;
880 
881 	blkaddr = nix_hw->blkaddr;
882 	block = &hw->block[blkaddr];
883 	aq = block->aq;
884 	if (!aq) {
885 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
886 		return NIX_AF_ERR_AQ_ENQUEUE;
887 	}
888 
889 	pfvf = rvu_get_pfvf(rvu, pcifunc);
890 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
891 
892 	/* Skip NIXLF check for broadcast MCE entry and bandwidth profile
893 	 * operations done by AF itself.
894 	 */
895 	if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
896 	      (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
897 		if (!pfvf->nixlf || nixlf < 0)
898 			return NIX_AF_ERR_AF_LF_INVALID;
899 	}
900 
901 	switch (req->ctype) {
902 	case NIX_AQ_CTYPE_RQ:
903 		/* Check if index exceeds max no of queues */
904 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
905 			rc = NIX_AF_ERR_AQ_ENQUEUE;
906 		break;
907 	case NIX_AQ_CTYPE_SQ:
908 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
909 			rc = NIX_AF_ERR_AQ_ENQUEUE;
910 		break;
911 	case NIX_AQ_CTYPE_CQ:
912 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
913 			rc = NIX_AF_ERR_AQ_ENQUEUE;
914 		break;
915 	case NIX_AQ_CTYPE_RSS:
916 		/* Check if RSS is enabled and qidx is within range */
917 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
918 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
919 		    (req->qidx >= (256UL << (cfg & 0xF))))
920 			rc = NIX_AF_ERR_AQ_ENQUEUE;
921 		break;
922 	case NIX_AQ_CTYPE_MCE:
923 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
924 
925 		/* Check if index exceeds MCE list length */
926 		if (!nix_hw->mcast.mce_ctx ||
927 		    (req->qidx >= (256UL << (cfg & 0xF))))
928 			rc = NIX_AF_ERR_AQ_ENQUEUE;
929 
930 		/* Adding multicast lists for requests from PF/VFs is not
931 		 * yet supported, so ignore this.
932 		 */
933 		if (rsp)
934 			rc = NIX_AF_ERR_AQ_ENQUEUE;
935 		break;
936 	case NIX_AQ_CTYPE_BANDPROF:
937 		if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
938 					nix_hw, pcifunc))
939 			rc = NIX_AF_ERR_INVALID_BANDPROF;
940 		break;
941 	default:
942 		rc = NIX_AF_ERR_AQ_ENQUEUE;
943 	}
944 
945 	if (rc)
946 		return rc;
947 
948 	nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
949 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
950 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
951 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
952 	     (req->op == NIX_AQ_INSTOP_WRITE &&
953 	      req->sq_mask.ena && req->sq.ena && smq_mask))) {
954 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
955 				     pcifunc, smq))
956 			return NIX_AF_ERR_AQ_ENQUEUE;
957 	}
958 
959 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
960 	inst.lf = nixlf;
961 	inst.cindex = req->qidx;
962 	inst.ctype = req->ctype;
963 	inst.op = req->op;
964 	/* Currently we are not supporting enqueuing multiple instructions,
965 	 * so always choose first entry in result memory.
966 	 */
967 	inst.res_addr = (u64)aq->res->iova;
968 
969 	/* Hardware uses same aq->res->base for updating result of
970 	 * previous instruction hence wait here till it is done.
971 	 */
972 	spin_lock(&aq->lock);
973 
974 	/* Clean result + context memory */
975 	memset(aq->res->base, 0, aq->res->entry_sz);
976 	/* Context needs to be written at RES_ADDR + 128 */
977 	ctx = aq->res->base + 128;
978 	/* Mask needs to be written at RES_ADDR + 256 */
979 	mask = aq->res->base + 256;
980 
981 	switch (req->op) {
982 	case NIX_AQ_INSTOP_WRITE:
983 		if (req->ctype == NIX_AQ_CTYPE_RQ)
984 			memcpy(mask, &req->rq_mask,
985 			       sizeof(struct nix_rq_ctx_s));
986 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
987 			memcpy(mask, &req->sq_mask,
988 			       sizeof(struct nix_sq_ctx_s));
989 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
990 			memcpy(mask, &req->cq_mask,
991 			       sizeof(struct nix_cq_ctx_s));
992 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
993 			memcpy(mask, &req->rss_mask,
994 			       sizeof(struct nix_rsse_s));
995 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
996 			memcpy(mask, &req->mce_mask,
997 			       sizeof(struct nix_rx_mce_s));
998 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
999 			memcpy(mask, &req->prof_mask,
1000 			       sizeof(struct nix_bandprof_s));
1001 		fallthrough;
1002 	case NIX_AQ_INSTOP_INIT:
1003 		if (req->ctype == NIX_AQ_CTYPE_RQ)
1004 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
1005 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
1006 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
1007 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
1008 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
1009 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
1010 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
1011 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
1012 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
1013 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1014 			memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
1015 		break;
1016 	case NIX_AQ_INSTOP_NOP:
1017 	case NIX_AQ_INSTOP_READ:
1018 	case NIX_AQ_INSTOP_LOCK:
1019 	case NIX_AQ_INSTOP_UNLOCK:
1020 		break;
1021 	default:
1022 		rc = NIX_AF_ERR_AQ_ENQUEUE;
1023 		spin_unlock(&aq->lock);
1024 		return rc;
1025 	}
1026 
1027 	/* Submit the instruction to AQ */
1028 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
1029 	if (rc) {
1030 		spin_unlock(&aq->lock);
1031 		return rc;
1032 	}
1033 
1034 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
1035 	if (req->op == NIX_AQ_INSTOP_INIT) {
1036 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
1037 			__set_bit(req->qidx, pfvf->rq_bmap);
1038 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
1039 			__set_bit(req->qidx, pfvf->sq_bmap);
1040 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
1041 			__set_bit(req->qidx, pfvf->cq_bmap);
1042 	}
1043 
1044 	if (req->op == NIX_AQ_INSTOP_WRITE) {
1045 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
1046 			ena = (req->rq.ena & req->rq_mask.ena) |
1047 				(test_bit(req->qidx, pfvf->rq_bmap) &
1048 				~req->rq_mask.ena);
1049 			if (ena)
1050 				__set_bit(req->qidx, pfvf->rq_bmap);
1051 			else
1052 				__clear_bit(req->qidx, pfvf->rq_bmap);
1053 		}
1054 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
1055 			ena = (req->rq.ena & req->sq_mask.ena) |
1056 				(test_bit(req->qidx, pfvf->sq_bmap) &
1057 				~req->sq_mask.ena);
1058 			if (ena)
1059 				__set_bit(req->qidx, pfvf->sq_bmap);
1060 			else
1061 				__clear_bit(req->qidx, pfvf->sq_bmap);
1062 		}
1063 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
1064 			ena = (req->rq.ena & req->cq_mask.ena) |
1065 				(test_bit(req->qidx, pfvf->cq_bmap) &
1066 				~req->cq_mask.ena);
1067 			if (ena)
1068 				__set_bit(req->qidx, pfvf->cq_bmap);
1069 			else
1070 				__clear_bit(req->qidx, pfvf->cq_bmap);
1071 		}
1072 	}
1073 
1074 	if (rsp) {
1075 		/* Copy read context into mailbox */
1076 		if (req->op == NIX_AQ_INSTOP_READ) {
1077 			if (req->ctype == NIX_AQ_CTYPE_RQ)
1078 				memcpy(&rsp->rq, ctx,
1079 				       sizeof(struct nix_rq_ctx_s));
1080 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
1081 				memcpy(&rsp->sq, ctx,
1082 				       sizeof(struct nix_sq_ctx_s));
1083 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
1084 				memcpy(&rsp->cq, ctx,
1085 				       sizeof(struct nix_cq_ctx_s));
1086 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
1087 				memcpy(&rsp->rss, ctx,
1088 				       sizeof(struct nix_rsse_s));
1089 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
1090 				memcpy(&rsp->mce, ctx,
1091 				       sizeof(struct nix_rx_mce_s));
1092 			else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1093 				memcpy(&rsp->prof, ctx,
1094 				       sizeof(struct nix_bandprof_s));
1095 		}
1096 	}
1097 
1098 	spin_unlock(&aq->lock);
1099 	return 0;
1100 }
1101 
1102 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
1103 				 struct nix_aq_enq_req *req, u8 ctype)
1104 {
1105 	struct nix_cn10k_aq_enq_req aq_req;
1106 	struct nix_cn10k_aq_enq_rsp aq_rsp;
1107 	int rc, word;
1108 
1109 	if (req->ctype != NIX_AQ_CTYPE_CQ)
1110 		return 0;
1111 
1112 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1113 				 req->hdr.pcifunc, ctype, req->qidx);
1114 	if (rc) {
1115 		dev_err(rvu->dev,
1116 			"%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
1117 			__func__, nix_get_ctx_name(ctype), req->qidx,
1118 			req->hdr.pcifunc);
1119 		return rc;
1120 	}
1121 
1122 	/* Make copy of original context & mask which are required
1123 	 * for resubmission
1124 	 */
1125 	memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
1126 	memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
1127 
1128 	/* exclude fields which HW can update */
1129 	aq_req.cq_mask.cq_err       = 0;
1130 	aq_req.cq_mask.wrptr        = 0;
1131 	aq_req.cq_mask.tail         = 0;
1132 	aq_req.cq_mask.head	    = 0;
1133 	aq_req.cq_mask.avg_level    = 0;
1134 	aq_req.cq_mask.update_time  = 0;
1135 	aq_req.cq_mask.substream    = 0;
1136 
1137 	/* Context mask (cq_mask) holds mask value of fields which
1138 	 * are changed in AQ WRITE operation.
1139 	 * for example cq.drop = 0xa;
1140 	 *	       cq_mask.drop = 0xff;
1141 	 * Below logic performs '&' between cq and cq_mask so that non
1142 	 * updated fields are masked out for request and response
1143 	 * comparison
1144 	 */
1145 	for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
1146 	     word++) {
1147 		*(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
1148 			(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1149 		*(u64 *)((u8 *)&aq_req.cq + word * 8) &=
1150 			(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1151 	}
1152 
1153 	if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
1154 		return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
1155 
1156 	return 0;
1157 }
1158 
1159 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
1160 			       struct nix_aq_enq_rsp *rsp)
1161 {
1162 	struct nix_hw *nix_hw;
1163 	int err, retries = 5;
1164 	int blkaddr;
1165 
1166 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
1167 	if (blkaddr < 0)
1168 		return NIX_AF_ERR_AF_LF_INVALID;
1169 
1170 	nix_hw =  get_nix_hw(rvu->hw, blkaddr);
1171 	if (!nix_hw)
1172 		return NIX_AF_ERR_INVALID_NIXBLK;
1173 
1174 retry:
1175 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
1176 
1177 	/* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
1178 	 * As a work around perfrom CQ context read after each AQ write. If AQ
1179 	 * read shows AQ write is not updated perform AQ write again.
1180 	 */
1181 	if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
1182 		err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
1183 		if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
1184 			if (retries--)
1185 				goto retry;
1186 			else
1187 				return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
1188 		}
1189 	}
1190 
1191 	return err;
1192 }
1193 
1194 static const char *nix_get_ctx_name(int ctype)
1195 {
1196 	switch (ctype) {
1197 	case NIX_AQ_CTYPE_CQ:
1198 		return "CQ";
1199 	case NIX_AQ_CTYPE_SQ:
1200 		return "SQ";
1201 	case NIX_AQ_CTYPE_RQ:
1202 		return "RQ";
1203 	case NIX_AQ_CTYPE_RSS:
1204 		return "RSS";
1205 	}
1206 	return "";
1207 }
1208 
1209 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
1210 {
1211 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1212 	struct nix_aq_enq_req aq_req;
1213 	unsigned long *bmap;
1214 	int qidx, q_cnt = 0;
1215 	int err = 0, rc;
1216 
1217 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
1218 		return NIX_AF_ERR_AQ_ENQUEUE;
1219 
1220 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1221 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
1222 
1223 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
1224 		aq_req.cq.ena = 0;
1225 		aq_req.cq_mask.ena = 1;
1226 		aq_req.cq.bp_ena = 0;
1227 		aq_req.cq_mask.bp_ena = 1;
1228 		q_cnt = pfvf->cq_ctx->qsize;
1229 		bmap = pfvf->cq_bmap;
1230 	}
1231 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
1232 		aq_req.sq.ena = 0;
1233 		aq_req.sq_mask.ena = 1;
1234 		q_cnt = pfvf->sq_ctx->qsize;
1235 		bmap = pfvf->sq_bmap;
1236 	}
1237 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
1238 		aq_req.rq.ena = 0;
1239 		aq_req.rq_mask.ena = 1;
1240 		q_cnt = pfvf->rq_ctx->qsize;
1241 		bmap = pfvf->rq_bmap;
1242 	}
1243 
1244 	aq_req.ctype = req->ctype;
1245 	aq_req.op = NIX_AQ_INSTOP_WRITE;
1246 
1247 	for (qidx = 0; qidx < q_cnt; qidx++) {
1248 		if (!test_bit(qidx, bmap))
1249 			continue;
1250 		aq_req.qidx = qidx;
1251 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1252 		if (rc) {
1253 			err = rc;
1254 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1255 				nix_get_ctx_name(req->ctype), qidx);
1256 		}
1257 	}
1258 
1259 	return err;
1260 }
1261 
1262 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1263 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1264 {
1265 	struct nix_aq_enq_req lock_ctx_req;
1266 	int err;
1267 
1268 	if (req->op != NIX_AQ_INSTOP_INIT)
1269 		return 0;
1270 
1271 	if (req->ctype == NIX_AQ_CTYPE_MCE ||
1272 	    req->ctype == NIX_AQ_CTYPE_DYNO)
1273 		return 0;
1274 
1275 	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1276 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1277 	lock_ctx_req.ctype = req->ctype;
1278 	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1279 	lock_ctx_req.qidx = req->qidx;
1280 	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1281 	if (err)
1282 		dev_err(rvu->dev,
1283 			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1284 			req->hdr.pcifunc,
1285 			nix_get_ctx_name(req->ctype), req->qidx);
1286 	return err;
1287 }
1288 
1289 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1290 				struct nix_aq_enq_req *req,
1291 				struct nix_aq_enq_rsp *rsp)
1292 {
1293 	int err;
1294 
1295 	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1296 	if (!err)
1297 		err = nix_lf_hwctx_lockdown(rvu, req);
1298 	return err;
1299 }
1300 #else
1301 
1302 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1303 				struct nix_aq_enq_req *req,
1304 				struct nix_aq_enq_rsp *rsp)
1305 {
1306 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
1307 }
1308 #endif
1309 /* CN10K mbox handler */
1310 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1311 				      struct nix_cn10k_aq_enq_req *req,
1312 				      struct nix_cn10k_aq_enq_rsp *rsp)
1313 {
1314 	return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1315 				  (struct nix_aq_enq_rsp *)rsp);
1316 }
1317 
1318 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1319 				       struct hwctx_disable_req *req,
1320 				       struct msg_rsp *rsp)
1321 {
1322 	return nix_lf_hwctx_disable(rvu, req);
1323 }
1324 
1325 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1326 				  struct nix_lf_alloc_req *req,
1327 				  struct nix_lf_alloc_rsp *rsp)
1328 {
1329 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
1330 	struct rvu_hwinfo *hw = rvu->hw;
1331 	u16 pcifunc = req->hdr.pcifunc;
1332 	struct rvu_block *block;
1333 	struct rvu_pfvf *pfvf;
1334 	u64 cfg, ctx_cfg;
1335 	int blkaddr;
1336 
1337 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1338 		return NIX_AF_ERR_PARAM;
1339 
1340 	if (req->way_mask)
1341 		req->way_mask &= 0xFFFF;
1342 
1343 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1344 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1345 	if (!pfvf->nixlf || blkaddr < 0)
1346 		return NIX_AF_ERR_AF_LF_INVALID;
1347 
1348 	block = &hw->block[blkaddr];
1349 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1350 	if (nixlf < 0)
1351 		return NIX_AF_ERR_AF_LF_INVALID;
1352 
1353 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1354 	if (req->npa_func) {
1355 		/* If default, use 'this' NIXLF's PFFUNC */
1356 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1357 			req->npa_func = pcifunc;
1358 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1359 			return NIX_AF_INVAL_NPA_PF_FUNC;
1360 	}
1361 
1362 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1363 	if (req->sso_func) {
1364 		/* If default, use 'this' NIXLF's PFFUNC */
1365 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1366 			req->sso_func = pcifunc;
1367 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1368 			return NIX_AF_INVAL_SSO_PF_FUNC;
1369 	}
1370 
1371 	/* If RSS is being enabled, check if requested config is valid.
1372 	 * RSS table size should be power of two, otherwise
1373 	 * RSS_GRP::OFFSET + adder might go beyond that group or
1374 	 * won't be able to use entire table.
1375 	 */
1376 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1377 			    !is_power_of_2(req->rss_sz)))
1378 		return NIX_AF_ERR_RSS_SIZE_INVALID;
1379 
1380 	if (req->rss_sz &&
1381 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1382 		return NIX_AF_ERR_RSS_GRPS_INVALID;
1383 
1384 	/* Reset this NIX LF */
1385 	err = rvu_lf_reset(rvu, block, nixlf);
1386 	if (err) {
1387 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1388 			block->addr - BLKADDR_NIX0, nixlf);
1389 		return NIX_AF_ERR_LF_RESET;
1390 	}
1391 
1392 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1393 
1394 	/* Alloc NIX RQ HW context memory and config the base */
1395 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1396 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1397 	if (err)
1398 		goto free_mem;
1399 
1400 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1401 	if (!pfvf->rq_bmap)
1402 		goto free_mem;
1403 
1404 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1405 		    (u64)pfvf->rq_ctx->iova);
1406 
1407 	/* Set caching and queue count in HW */
1408 	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1409 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1410 
1411 	/* Alloc NIX SQ HW context memory and config the base */
1412 	hwctx_size = 1UL << (ctx_cfg & 0xF);
1413 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1414 	if (err)
1415 		goto free_mem;
1416 
1417 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1418 	if (!pfvf->sq_bmap)
1419 		goto free_mem;
1420 
1421 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1422 		    (u64)pfvf->sq_ctx->iova);
1423 
1424 	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1425 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1426 
1427 	/* Alloc NIX CQ HW context memory and config the base */
1428 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1429 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1430 	if (err)
1431 		goto free_mem;
1432 
1433 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1434 	if (!pfvf->cq_bmap)
1435 		goto free_mem;
1436 
1437 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1438 		    (u64)pfvf->cq_ctx->iova);
1439 
1440 	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1441 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1442 
1443 	/* Initialize receive side scaling (RSS) */
1444 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1445 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1446 				 req->rss_grps, hwctx_size, req->way_mask,
1447 				 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
1448 	if (err)
1449 		goto free_mem;
1450 
1451 	/* Alloc memory for CQINT's HW contexts */
1452 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1453 	qints = (cfg >> 24) & 0xFFF;
1454 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1455 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1456 	if (err)
1457 		goto free_mem;
1458 
1459 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1460 		    (u64)pfvf->cq_ints_ctx->iova);
1461 
1462 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1463 		    BIT_ULL(36) | req->way_mask << 20);
1464 
1465 	/* Alloc memory for QINT's HW contexts */
1466 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1467 	qints = (cfg >> 12) & 0xFFF;
1468 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1469 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1470 	if (err)
1471 		goto free_mem;
1472 
1473 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1474 		    (u64)pfvf->nix_qints_ctx->iova);
1475 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1476 		    BIT_ULL(36) | req->way_mask << 20);
1477 
1478 	/* Setup VLANX TPID's.
1479 	 * Use VLAN1 for 802.1Q
1480 	 * and VLAN0 for 802.1AD.
1481 	 */
1482 	cfg = (0x8100ULL << 16) | 0x88A8ULL;
1483 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1484 
1485 	/* Enable LMTST for this NIX LF */
1486 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1487 
1488 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1489 	if (req->npa_func)
1490 		cfg = req->npa_func;
1491 	if (req->sso_func)
1492 		cfg |= (u64)req->sso_func << 16;
1493 
1494 	cfg |= (u64)req->xqe_sz << 33;
1495 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1496 
1497 	/* Config Rx pkt length, csum checks and apad  enable / disable */
1498 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1499 
1500 	/* Configure pkind for TX parse config */
1501 	cfg = NPC_TX_DEF_PKIND;
1502 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1503 
1504 	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1505 	if (is_sdp_pfvf(pcifunc))
1506 		intf = NIX_INTF_TYPE_SDP;
1507 
1508 	err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
1509 				 !!(req->flags & NIX_LF_LBK_BLK_SEL));
1510 	if (err)
1511 		goto free_mem;
1512 
1513 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
1514 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1515 
1516 	/* Configure RX VTAG Type 7 (strip) for vf vlan */
1517 	rvu_write64(rvu, blkaddr,
1518 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1519 		    VTAGSIZE_T4 | VTAG_STRIP);
1520 
1521 	goto exit;
1522 
1523 free_mem:
1524 	nix_ctx_free(rvu, pfvf);
1525 	rc = -ENOMEM;
1526 
1527 exit:
1528 	/* Set macaddr of this PF/VF */
1529 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1530 
1531 	/* set SQB size info */
1532 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1533 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1534 	rsp->rx_chan_base = pfvf->rx_chan_base;
1535 	rsp->tx_chan_base = pfvf->tx_chan_base;
1536 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1537 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1538 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1539 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1540 	/* Get HW supported stat count */
1541 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1542 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1543 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1544 	/* Get count of CQ IRQs and error IRQs supported per LF */
1545 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1546 	rsp->qints = ((cfg >> 12) & 0xFFF);
1547 	rsp->cints = ((cfg >> 24) & 0xFFF);
1548 	rsp->cgx_links = hw->cgx_links;
1549 	rsp->lbk_links = hw->lbk_links;
1550 	rsp->sdp_links = hw->sdp_links;
1551 
1552 	return rc;
1553 }
1554 
1555 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1556 				 struct msg_rsp *rsp)
1557 {
1558 	struct rvu_hwinfo *hw = rvu->hw;
1559 	u16 pcifunc = req->hdr.pcifunc;
1560 	struct rvu_block *block;
1561 	int blkaddr, nixlf, err;
1562 	struct rvu_pfvf *pfvf;
1563 
1564 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1565 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1566 	if (!pfvf->nixlf || blkaddr < 0)
1567 		return NIX_AF_ERR_AF_LF_INVALID;
1568 
1569 	block = &hw->block[blkaddr];
1570 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1571 	if (nixlf < 0)
1572 		return NIX_AF_ERR_AF_LF_INVALID;
1573 
1574 	if (req->flags & NIX_LF_DISABLE_FLOWS)
1575 		rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1576 	else
1577 		rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1578 
1579 	/* Free any tx vtag def entries used by this NIX LF */
1580 	if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1581 		nix_free_tx_vtag_entries(rvu, pcifunc);
1582 
1583 	nix_interface_deinit(rvu, pcifunc, nixlf);
1584 
1585 	/* Reset this NIX LF */
1586 	err = rvu_lf_reset(rvu, block, nixlf);
1587 	if (err) {
1588 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1589 			block->addr - BLKADDR_NIX0, nixlf);
1590 		return NIX_AF_ERR_LF_RESET;
1591 	}
1592 
1593 	nix_ctx_free(rvu, pfvf);
1594 
1595 	return 0;
1596 }
1597 
1598 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1599 					 struct nix_mark_format_cfg  *req,
1600 					 struct nix_mark_format_cfg_rsp *rsp)
1601 {
1602 	u16 pcifunc = req->hdr.pcifunc;
1603 	struct nix_hw *nix_hw;
1604 	struct rvu_pfvf *pfvf;
1605 	int blkaddr, rc;
1606 	u32 cfg;
1607 
1608 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1609 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1610 	if (!pfvf->nixlf || blkaddr < 0)
1611 		return NIX_AF_ERR_AF_LF_INVALID;
1612 
1613 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1614 	if (!nix_hw)
1615 		return NIX_AF_ERR_INVALID_NIXBLK;
1616 
1617 	cfg = (((u32)req->offset & 0x7) << 16) |
1618 	      (((u32)req->y_mask & 0xF) << 12) |
1619 	      (((u32)req->y_val & 0xF) << 8) |
1620 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1621 
1622 	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1623 	if (rc < 0) {
1624 		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1625 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1626 		return NIX_AF_ERR_MARK_CFG_FAIL;
1627 	}
1628 
1629 	rsp->mark_format_idx = rc;
1630 	return 0;
1631 }
1632 
1633 /* Handle shaper update specially for few revisions */
1634 static bool
1635 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
1636 			    int lvl, u64 reg, u64 regval)
1637 {
1638 	u64 regbase, oldval, sw_xoff = 0;
1639 	u64 dbgval, md_debug0 = 0;
1640 	unsigned long poll_tmo;
1641 	bool rate_reg = 0;
1642 	u32 schq;
1643 
1644 	regbase = reg & 0xFFFF;
1645 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1646 
1647 	/* Check for rate register */
1648 	switch (lvl) {
1649 	case NIX_TXSCH_LVL_TL1:
1650 		md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
1651 		sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
1652 
1653 		rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
1654 		break;
1655 	case NIX_TXSCH_LVL_TL2:
1656 		md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
1657 		sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
1658 
1659 		rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
1660 			    regbase == NIX_AF_TL2X_PIR(0));
1661 		break;
1662 	case NIX_TXSCH_LVL_TL3:
1663 		md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
1664 		sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
1665 
1666 		rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
1667 			    regbase == NIX_AF_TL3X_PIR(0));
1668 		break;
1669 	case NIX_TXSCH_LVL_TL4:
1670 		md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
1671 		sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
1672 
1673 		rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
1674 			    regbase == NIX_AF_TL4X_PIR(0));
1675 		break;
1676 	case NIX_TXSCH_LVL_MDQ:
1677 		sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
1678 		rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
1679 			    regbase == NIX_AF_MDQX_PIR(0));
1680 		break;
1681 	}
1682 
1683 	if (!rate_reg)
1684 		return false;
1685 
1686 	/* Nothing special to do when state is not toggled */
1687 	oldval = rvu_read64(rvu, blkaddr, reg);
1688 	if ((oldval & 0x1) == (regval & 0x1)) {
1689 		rvu_write64(rvu, blkaddr, reg, regval);
1690 		return true;
1691 	}
1692 
1693 	/* PIR/CIR disable */
1694 	if (!(regval & 0x1)) {
1695 		rvu_write64(rvu, blkaddr, sw_xoff, 1);
1696 		rvu_write64(rvu, blkaddr, reg, 0);
1697 		udelay(4);
1698 		rvu_write64(rvu, blkaddr, sw_xoff, 0);
1699 		return true;
1700 	}
1701 
1702 	/* PIR/CIR enable */
1703 	rvu_write64(rvu, blkaddr, sw_xoff, 1);
1704 	if (md_debug0) {
1705 		poll_tmo = jiffies + usecs_to_jiffies(10000);
1706 		/* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
1707 		do {
1708 			if (time_after(jiffies, poll_tmo)) {
1709 				dev_err(rvu->dev,
1710 					"NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
1711 					nixlf, schq, lvl);
1712 				goto exit;
1713 			}
1714 			usleep_range(1, 5);
1715 			dbgval = rvu_read64(rvu, blkaddr, md_debug0);
1716 		} while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
1717 	}
1718 	rvu_write64(rvu, blkaddr, reg, regval);
1719 exit:
1720 	rvu_write64(rvu, blkaddr, sw_xoff, 0);
1721 	return true;
1722 }
1723 
1724 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
1725 				  int lvl, int schq)
1726 {
1727 	u64 tlx_parent = 0, tlx_schedule = 0;
1728 
1729 	switch (lvl) {
1730 	case NIX_TXSCH_LVL_TL2:
1731 		tlx_parent   = NIX_AF_TL2X_PARENT(schq);
1732 		tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
1733 		break;
1734 	case NIX_TXSCH_LVL_TL3:
1735 		tlx_parent   = NIX_AF_TL3X_PARENT(schq);
1736 		tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
1737 		break;
1738 	case NIX_TXSCH_LVL_TL4:
1739 		tlx_parent   = NIX_AF_TL4X_PARENT(schq);
1740 		tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
1741 		break;
1742 	case NIX_TXSCH_LVL_MDQ:
1743 		/* no need to reset SMQ_CFG as HW clears this CSR
1744 		 * on SMQ flush
1745 		 */
1746 		tlx_parent   = NIX_AF_MDQX_PARENT(schq);
1747 		tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
1748 		break;
1749 	default:
1750 		return;
1751 	}
1752 
1753 	if (tlx_parent)
1754 		rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
1755 
1756 	if (tlx_schedule)
1757 		rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
1758 }
1759 
1760 /* Disable shaping of pkts by a scheduler queue
1761  * at a given scheduler level.
1762  */
1763 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1764 				 int nixlf, int lvl, int schq)
1765 {
1766 	struct rvu_hwinfo *hw = rvu->hw;
1767 	u64  cir_reg = 0, pir_reg = 0;
1768 	u64  cfg;
1769 
1770 	switch (lvl) {
1771 	case NIX_TXSCH_LVL_TL1:
1772 		cir_reg = NIX_AF_TL1X_CIR(schq);
1773 		pir_reg = 0; /* PIR not available at TL1 */
1774 		break;
1775 	case NIX_TXSCH_LVL_TL2:
1776 		cir_reg = NIX_AF_TL2X_CIR(schq);
1777 		pir_reg = NIX_AF_TL2X_PIR(schq);
1778 		break;
1779 	case NIX_TXSCH_LVL_TL3:
1780 		cir_reg = NIX_AF_TL3X_CIR(schq);
1781 		pir_reg = NIX_AF_TL3X_PIR(schq);
1782 		break;
1783 	case NIX_TXSCH_LVL_TL4:
1784 		cir_reg = NIX_AF_TL4X_CIR(schq);
1785 		pir_reg = NIX_AF_TL4X_PIR(schq);
1786 		break;
1787 	case NIX_TXSCH_LVL_MDQ:
1788 		cir_reg = NIX_AF_MDQX_CIR(schq);
1789 		pir_reg = NIX_AF_MDQX_PIR(schq);
1790 		break;
1791 	}
1792 
1793 	/* Shaper state toggle needs wait/poll */
1794 	if (hw->cap.nix_shaper_toggle_wait) {
1795 		if (cir_reg)
1796 			handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1797 						    lvl, cir_reg, 0);
1798 		if (pir_reg)
1799 			handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1800 						    lvl, pir_reg, 0);
1801 		return;
1802 	}
1803 
1804 	if (!cir_reg)
1805 		return;
1806 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
1807 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1808 
1809 	if (!pir_reg)
1810 		return;
1811 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
1812 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1813 }
1814 
1815 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1816 				 int lvl, int schq)
1817 {
1818 	struct rvu_hwinfo *hw = rvu->hw;
1819 	int link_level;
1820 	int link;
1821 
1822 	if (lvl >= hw->cap.nix_tx_aggr_lvl)
1823 		return;
1824 
1825 	/* Reset TL4's SDP link config */
1826 	if (lvl == NIX_TXSCH_LVL_TL4)
1827 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1828 
1829 	link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1830 			NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1831 	if (lvl != link_level)
1832 		return;
1833 
1834 	/* Reset TL2's CGX or LBK link config */
1835 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1836 		rvu_write64(rvu, blkaddr,
1837 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1838 }
1839 
1840 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
1841 			      int lvl, int schq)
1842 {
1843 	struct rvu_hwinfo *hw = rvu->hw;
1844 	u64 reg;
1845 
1846 	/* Skip this if shaping is not supported */
1847 	if (!hw->cap.nix_shaping)
1848 		return;
1849 
1850 	/* Clear level specific SW_XOFF */
1851 	switch (lvl) {
1852 	case NIX_TXSCH_LVL_TL1:
1853 		reg = NIX_AF_TL1X_SW_XOFF(schq);
1854 		break;
1855 	case NIX_TXSCH_LVL_TL2:
1856 		reg = NIX_AF_TL2X_SW_XOFF(schq);
1857 		break;
1858 	case NIX_TXSCH_LVL_TL3:
1859 		reg = NIX_AF_TL3X_SW_XOFF(schq);
1860 		break;
1861 	case NIX_TXSCH_LVL_TL4:
1862 		reg = NIX_AF_TL4X_SW_XOFF(schq);
1863 		break;
1864 	case NIX_TXSCH_LVL_MDQ:
1865 		reg = NIX_AF_MDQX_SW_XOFF(schq);
1866 		break;
1867 	default:
1868 		return;
1869 	}
1870 
1871 	rvu_write64(rvu, blkaddr, reg, 0x0);
1872 }
1873 
1874 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1875 {
1876 	struct rvu_hwinfo *hw = rvu->hw;
1877 	int pf = rvu_get_pf(pcifunc);
1878 	u8 cgx_id = 0, lmac_id = 0;
1879 
1880 	if (is_afvf(pcifunc)) {/* LBK links */
1881 		return hw->cgx_links;
1882 	} else if (is_pf_cgxmapped(rvu, pf)) {
1883 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1884 		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1885 	}
1886 
1887 	/* SDP link */
1888 	return hw->cgx_links + hw->lbk_links;
1889 }
1890 
1891 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1892 				 int link, int *start, int *end)
1893 {
1894 	struct rvu_hwinfo *hw = rvu->hw;
1895 	int pf = rvu_get_pf(pcifunc);
1896 
1897 	if (is_afvf(pcifunc)) { /* LBK links */
1898 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1899 		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1900 	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1901 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1902 		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1903 	} else { /* SDP link */
1904 		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1905 			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1906 		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1907 	}
1908 }
1909 
1910 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1911 				      struct nix_hw *nix_hw,
1912 				      struct nix_txsch_alloc_req *req)
1913 {
1914 	struct rvu_hwinfo *hw = rvu->hw;
1915 	int schq, req_schq, free_cnt;
1916 	struct nix_txsch *txsch;
1917 	int link, start, end;
1918 
1919 	txsch = &nix_hw->txsch[lvl];
1920 	req_schq = req->schq_contig[lvl] + req->schq[lvl];
1921 
1922 	if (!req_schq)
1923 		return 0;
1924 
1925 	link = nix_get_tx_link(rvu, pcifunc);
1926 
1927 	/* For traffic aggregating scheduler level, one queue is enough */
1928 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1929 		if (req_schq != 1)
1930 			return NIX_AF_ERR_TLX_ALLOC_FAIL;
1931 		return 0;
1932 	}
1933 
1934 	/* Get free SCHQ count and check if request can be accomodated */
1935 	if (hw->cap.nix_fixed_txschq_mapping) {
1936 		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1937 		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1938 		if (end <= txsch->schq.max && schq < end &&
1939 		    !test_bit(schq, txsch->schq.bmap))
1940 			free_cnt = 1;
1941 		else
1942 			free_cnt = 0;
1943 	} else {
1944 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
1945 	}
1946 
1947 	if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
1948 	    req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
1949 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1950 
1951 	/* If contiguous queues are needed, check for availability */
1952 	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1953 	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1954 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1955 
1956 	return 0;
1957 }
1958 
1959 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1960 			    struct nix_txsch_alloc_rsp *rsp,
1961 			    int lvl, int start, int end)
1962 {
1963 	struct rvu_hwinfo *hw = rvu->hw;
1964 	u16 pcifunc = rsp->hdr.pcifunc;
1965 	int idx, schq;
1966 
1967 	/* For traffic aggregating levels, queue alloc is based
1968 	 * on transmit link to which PF_FUNC is mapped to.
1969 	 */
1970 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1971 		/* A single TL queue is allocated */
1972 		if (rsp->schq_contig[lvl]) {
1973 			rsp->schq_contig[lvl] = 1;
1974 			rsp->schq_contig_list[lvl][0] = start;
1975 		}
1976 
1977 		/* Both contig and non-contig reqs doesn't make sense here */
1978 		if (rsp->schq_contig[lvl])
1979 			rsp->schq[lvl] = 0;
1980 
1981 		if (rsp->schq[lvl]) {
1982 			rsp->schq[lvl] = 1;
1983 			rsp->schq_list[lvl][0] = start;
1984 		}
1985 		return;
1986 	}
1987 
1988 	/* Adjust the queue request count if HW supports
1989 	 * only one queue per level configuration.
1990 	 */
1991 	if (hw->cap.nix_fixed_txschq_mapping) {
1992 		idx = pcifunc & RVU_PFVF_FUNC_MASK;
1993 		schq = start + idx;
1994 		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1995 			rsp->schq_contig[lvl] = 0;
1996 			rsp->schq[lvl] = 0;
1997 			return;
1998 		}
1999 
2000 		if (rsp->schq_contig[lvl]) {
2001 			rsp->schq_contig[lvl] = 1;
2002 			set_bit(schq, txsch->schq.bmap);
2003 			rsp->schq_contig_list[lvl][0] = schq;
2004 			rsp->schq[lvl] = 0;
2005 		} else if (rsp->schq[lvl]) {
2006 			rsp->schq[lvl] = 1;
2007 			set_bit(schq, txsch->schq.bmap);
2008 			rsp->schq_list[lvl][0] = schq;
2009 		}
2010 		return;
2011 	}
2012 
2013 	/* Allocate contiguous queue indices requesty first */
2014 	if (rsp->schq_contig[lvl]) {
2015 		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
2016 						  txsch->schq.max, start,
2017 						  rsp->schq_contig[lvl], 0);
2018 		if (schq >= end)
2019 			rsp->schq_contig[lvl] = 0;
2020 		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
2021 			set_bit(schq, txsch->schq.bmap);
2022 			rsp->schq_contig_list[lvl][idx] = schq;
2023 			schq++;
2024 		}
2025 	}
2026 
2027 	/* Allocate non-contiguous queue indices */
2028 	if (rsp->schq[lvl]) {
2029 		idx = 0;
2030 		for (schq = start; schq < end; schq++) {
2031 			if (!test_bit(schq, txsch->schq.bmap)) {
2032 				set_bit(schq, txsch->schq.bmap);
2033 				rsp->schq_list[lvl][idx++] = schq;
2034 			}
2035 			if (idx == rsp->schq[lvl])
2036 				break;
2037 		}
2038 		/* Update how many were allocated */
2039 		rsp->schq[lvl] = idx;
2040 	}
2041 }
2042 
2043 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
2044 				     struct nix_txsch_alloc_req *req,
2045 				     struct nix_txsch_alloc_rsp *rsp)
2046 {
2047 	struct rvu_hwinfo *hw = rvu->hw;
2048 	u16 pcifunc = req->hdr.pcifunc;
2049 	int link, blkaddr, rc = 0;
2050 	int lvl, idx, start, end;
2051 	struct nix_txsch *txsch;
2052 	struct nix_hw *nix_hw;
2053 	u32 *pfvf_map;
2054 	int nixlf;
2055 	u16 schq;
2056 
2057 	rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2058 	if (rc)
2059 		return rc;
2060 
2061 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2062 	if (!nix_hw)
2063 		return NIX_AF_ERR_INVALID_NIXBLK;
2064 
2065 	mutex_lock(&rvu->rsrc_lock);
2066 
2067 	/* Check if request is valid as per HW capabilities
2068 	 * and can be accomodated.
2069 	 */
2070 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2071 		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
2072 		if (rc)
2073 			goto err;
2074 	}
2075 
2076 	/* Allocate requested Tx scheduler queues */
2077 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2078 		txsch = &nix_hw->txsch[lvl];
2079 		pfvf_map = txsch->pfvf_map;
2080 
2081 		if (!req->schq[lvl] && !req->schq_contig[lvl])
2082 			continue;
2083 
2084 		rsp->schq[lvl] = req->schq[lvl];
2085 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
2086 
2087 		link = nix_get_tx_link(rvu, pcifunc);
2088 
2089 		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2090 			start = link;
2091 			end = link;
2092 		} else if (hw->cap.nix_fixed_txschq_mapping) {
2093 			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
2094 		} else {
2095 			start = 0;
2096 			end = txsch->schq.max;
2097 		}
2098 
2099 		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
2100 
2101 		/* Reset queue config */
2102 		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
2103 			schq = rsp->schq_contig_list[lvl][idx];
2104 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2105 			    NIX_TXSCHQ_CFG_DONE))
2106 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2107 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2108 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2109 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2110 		}
2111 
2112 		for (idx = 0; idx < req->schq[lvl]; idx++) {
2113 			schq = rsp->schq_list[lvl][idx];
2114 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2115 			    NIX_TXSCHQ_CFG_DONE))
2116 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2117 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2118 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2119 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2120 		}
2121 	}
2122 
2123 	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
2124 	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
2125 	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
2126 				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2127 				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2128 	goto exit;
2129 err:
2130 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
2131 exit:
2132 	mutex_unlock(&rvu->rsrc_lock);
2133 	return rc;
2134 }
2135 
2136 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
2137 				   struct nix_smq_flush_ctx *smq_flush_ctx)
2138 {
2139 	struct nix_smq_tree_ctx *smq_tree_ctx;
2140 	u64 parent_off, regval;
2141 	u16 schq;
2142 	int lvl;
2143 
2144 	smq_flush_ctx->smq = smq;
2145 
2146 	schq = smq;
2147 	for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2148 		smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2149 		if (lvl == NIX_TXSCH_LVL_TL1) {
2150 			smq_flush_ctx->tl1_schq = schq;
2151 			smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
2152 			smq_tree_ctx->pir_off = 0;
2153 			smq_tree_ctx->pir_val = 0;
2154 			parent_off = 0;
2155 		} else if (lvl == NIX_TXSCH_LVL_TL2) {
2156 			smq_flush_ctx->tl2_schq = schq;
2157 			smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
2158 			smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
2159 			parent_off = NIX_AF_TL2X_PARENT(schq);
2160 		} else if (lvl == NIX_TXSCH_LVL_TL3) {
2161 			smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
2162 			smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
2163 			parent_off = NIX_AF_TL3X_PARENT(schq);
2164 		} else if (lvl == NIX_TXSCH_LVL_TL4) {
2165 			smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
2166 			smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
2167 			parent_off = NIX_AF_TL4X_PARENT(schq);
2168 		} else if (lvl == NIX_TXSCH_LVL_MDQ) {
2169 			smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
2170 			smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
2171 			parent_off = NIX_AF_MDQX_PARENT(schq);
2172 		}
2173 		/* save cir/pir register values */
2174 		smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
2175 		if (smq_tree_ctx->pir_off)
2176 			smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
2177 
2178 		/* get parent txsch node */
2179 		if (parent_off) {
2180 			regval = rvu_read64(rvu, blkaddr, parent_off);
2181 			schq = (regval >> 16) & 0x1FF;
2182 		}
2183 	}
2184 }
2185 
2186 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
2187 				      struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2188 {
2189 	struct nix_txsch *txsch;
2190 	struct nix_hw *nix_hw;
2191 	u64 regoff;
2192 	int tl2;
2193 
2194 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2195 	if (!nix_hw)
2196 		return;
2197 
2198 	/* loop through all TL2s with matching PF_FUNC */
2199 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
2200 	for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
2201 		/* skip the smq(flush) TL2 */
2202 		if (tl2 == smq_flush_ctx->tl2_schq)
2203 			continue;
2204 		/* skip unused TL2s */
2205 		if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
2206 			continue;
2207 		/* skip if PF_FUNC doesn't match */
2208 		if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
2209 		    (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
2210 				    ~RVU_PFVF_FUNC_MASK)))
2211 			continue;
2212 		/* enable/disable XOFF */
2213 		regoff = NIX_AF_TL2X_SW_XOFF(tl2);
2214 		if (enable)
2215 			rvu_write64(rvu, blkaddr, regoff, 0x1);
2216 		else
2217 			rvu_write64(rvu, blkaddr, regoff, 0x0);
2218 	}
2219 }
2220 
2221 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
2222 				      struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2223 {
2224 	u64 cir_off, pir_off, cir_val, pir_val;
2225 	struct nix_smq_tree_ctx *smq_tree_ctx;
2226 	int lvl;
2227 
2228 	for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2229 		smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2230 		cir_off = smq_tree_ctx->cir_off;
2231 		cir_val = smq_tree_ctx->cir_val;
2232 		pir_off = smq_tree_ctx->pir_off;
2233 		pir_val = smq_tree_ctx->pir_val;
2234 
2235 		if (enable) {
2236 			rvu_write64(rvu, blkaddr, cir_off, cir_val);
2237 			if (lvl != NIX_TXSCH_LVL_TL1)
2238 				rvu_write64(rvu, blkaddr, pir_off, pir_val);
2239 		} else {
2240 			rvu_write64(rvu, blkaddr, cir_off, 0x0);
2241 			if (lvl != NIX_TXSCH_LVL_TL1)
2242 				rvu_write64(rvu, blkaddr, pir_off, 0x0);
2243 		}
2244 	}
2245 }
2246 
2247 static int nix_smq_flush(struct rvu *rvu, int blkaddr,
2248 			 int smq, u16 pcifunc, int nixlf)
2249 {
2250 	struct nix_smq_flush_ctx *smq_flush_ctx;
2251 	int pf = rvu_get_pf(pcifunc);
2252 	u8 cgx_id = 0, lmac_id = 0;
2253 	int err, restore_tx_en = 0;
2254 	u64 cfg;
2255 
2256 	if (!is_rvu_otx2(rvu)) {
2257 		/* Skip SMQ flush if pkt count is zero */
2258 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
2259 		if (!cfg)
2260 			return 0;
2261 	}
2262 
2263 	/* enable cgx tx if disabled */
2264 	if (is_pf_cgxmapped(rvu, pf)) {
2265 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
2266 		restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
2267 						   lmac_id, true);
2268 	}
2269 
2270 	/* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
2271 	smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
2272 	if (!smq_flush_ctx)
2273 		return -ENOMEM;
2274 	nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
2275 	nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
2276 	nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
2277 
2278 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2279 	/* Do SMQ flush and set enqueue xoff */
2280 	cfg |= BIT_ULL(50) | BIT_ULL(49);
2281 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2282 
2283 	/* Disable backpressure from physical link,
2284 	 * otherwise SMQ flush may stall.
2285 	 */
2286 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
2287 
2288 	/* Wait for flush to complete */
2289 	err = rvu_poll_reg(rvu, blkaddr,
2290 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
2291 	if (err)
2292 		dev_info(rvu->dev,
2293 			 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
2294 			 nixlf, smq);
2295 
2296 	/* clear XOFF on TL2s */
2297 	nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
2298 	nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
2299 	kfree(smq_flush_ctx);
2300 
2301 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
2302 	/* restore cgx tx state */
2303 	if (restore_tx_en)
2304 		rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
2305 	return err;
2306 }
2307 
2308 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
2309 {
2310 	int blkaddr, nixlf, lvl, schq, err;
2311 	struct rvu_hwinfo *hw = rvu->hw;
2312 	struct nix_txsch *txsch;
2313 	struct nix_hw *nix_hw;
2314 	u16 map_func;
2315 
2316 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2317 	if (blkaddr < 0)
2318 		return NIX_AF_ERR_AF_LF_INVALID;
2319 
2320 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2321 	if (!nix_hw)
2322 		return NIX_AF_ERR_INVALID_NIXBLK;
2323 
2324 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2325 	if (nixlf < 0)
2326 		return NIX_AF_ERR_AF_LF_INVALID;
2327 
2328 	/* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
2329 	mutex_lock(&rvu->rsrc_lock);
2330 	for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2331 		txsch = &nix_hw->txsch[lvl];
2332 
2333 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
2334 			continue;
2335 
2336 		for (schq = 0; schq < txsch->schq.max; schq++) {
2337 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2338 				continue;
2339 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2340 			nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2341 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2342 		}
2343 	}
2344 	nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
2345 			  nix_get_tx_link(rvu, pcifunc));
2346 
2347 	/* On PF cleanup, clear cfg done flag as
2348 	 * PF would have changed default config.
2349 	 */
2350 	if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
2351 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
2352 		schq = nix_get_tx_link(rvu, pcifunc);
2353 		/* Do not clear pcifunc in txsch->pfvf_map[schq] because
2354 		 * VF might be using this TL1 queue
2355 		 */
2356 		map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
2357 		txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
2358 	}
2359 
2360 	/* Flush SMQs */
2361 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2362 	for (schq = 0; schq < txsch->schq.max; schq++) {
2363 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2364 			continue;
2365 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2366 	}
2367 
2368 	/* Now free scheduler queues to free pool */
2369 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2370 		 /* TLs above aggregation level are shared across all PF
2371 		  * and it's VFs, hence skip freeing them.
2372 		  */
2373 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
2374 			continue;
2375 
2376 		txsch = &nix_hw->txsch[lvl];
2377 		for (schq = 0; schq < txsch->schq.max; schq++) {
2378 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2379 				continue;
2380 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2381 			rvu_free_rsrc(&txsch->schq, schq);
2382 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2383 		}
2384 	}
2385 	mutex_unlock(&rvu->rsrc_lock);
2386 
2387 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
2388 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
2389 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
2390 	if (err)
2391 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
2392 
2393 	return 0;
2394 }
2395 
2396 static int nix_txschq_free_one(struct rvu *rvu,
2397 			       struct nix_txsch_free_req *req)
2398 {
2399 	struct rvu_hwinfo *hw = rvu->hw;
2400 	u16 pcifunc = req->hdr.pcifunc;
2401 	int lvl, schq, nixlf, blkaddr;
2402 	struct nix_txsch *txsch;
2403 	struct nix_hw *nix_hw;
2404 	u32 *pfvf_map;
2405 	int rc;
2406 
2407 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2408 	if (blkaddr < 0)
2409 		return NIX_AF_ERR_AF_LF_INVALID;
2410 
2411 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2412 	if (!nix_hw)
2413 		return NIX_AF_ERR_INVALID_NIXBLK;
2414 
2415 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2416 	if (nixlf < 0)
2417 		return NIX_AF_ERR_AF_LF_INVALID;
2418 
2419 	lvl = req->schq_lvl;
2420 	schq = req->schq;
2421 	txsch = &nix_hw->txsch[lvl];
2422 
2423 	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
2424 		return 0;
2425 
2426 	pfvf_map = txsch->pfvf_map;
2427 	mutex_lock(&rvu->rsrc_lock);
2428 
2429 	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
2430 		rc = NIX_AF_ERR_TLX_INVALID;
2431 		goto err;
2432 	}
2433 
2434 	/* Clear SW_XOFF of this resource only.
2435 	 * For SMQ level, all path XOFF's
2436 	 * need to be made clear by user
2437 	 */
2438 	nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2439 
2440 	nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2441 	nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2442 
2443 	/* Flush if it is a SMQ. Onus of disabling
2444 	 * TL2/3 queue links before SMQ flush is on user
2445 	 */
2446 	if (lvl == NIX_TXSCH_LVL_SMQ &&
2447 	    nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
2448 		rc = NIX_AF_SMQ_FLUSH_FAILED;
2449 		goto err;
2450 	}
2451 
2452 	nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2453 
2454 	/* Free the resource */
2455 	rvu_free_rsrc(&txsch->schq, schq);
2456 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2457 	mutex_unlock(&rvu->rsrc_lock);
2458 	return 0;
2459 err:
2460 	mutex_unlock(&rvu->rsrc_lock);
2461 	return rc;
2462 }
2463 
2464 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
2465 				    struct nix_txsch_free_req *req,
2466 				    struct msg_rsp *rsp)
2467 {
2468 	if (req->flags & TXSCHQ_FREE_ALL)
2469 		return nix_txschq_free(rvu, req->hdr.pcifunc);
2470 	else
2471 		return nix_txschq_free_one(rvu, req);
2472 }
2473 
2474 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
2475 				      int lvl, u64 reg, u64 regval)
2476 {
2477 	u64 regbase = reg & 0xFFFF;
2478 	u16 schq, parent;
2479 
2480 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
2481 		return false;
2482 
2483 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2484 	/* Check if this schq belongs to this PF/VF or not */
2485 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
2486 		return false;
2487 
2488 	parent = (regval >> 16) & 0x1FF;
2489 	/* Validate MDQ's TL4 parent */
2490 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
2491 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
2492 		return false;
2493 
2494 	/* Validate TL4's TL3 parent */
2495 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
2496 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
2497 		return false;
2498 
2499 	/* Validate TL3's TL2 parent */
2500 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
2501 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
2502 		return false;
2503 
2504 	/* Validate TL2's TL1 parent */
2505 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
2506 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
2507 		return false;
2508 
2509 	return true;
2510 }
2511 
2512 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
2513 {
2514 	u64 regbase;
2515 
2516 	if (hw->cap.nix_shaping)
2517 		return true;
2518 
2519 	/* If shaping and coloring is not supported, then
2520 	 * *_CIR and *_PIR registers should not be configured.
2521 	 */
2522 	regbase = reg & 0xFFFF;
2523 
2524 	switch (lvl) {
2525 	case NIX_TXSCH_LVL_TL1:
2526 		if (regbase == NIX_AF_TL1X_CIR(0))
2527 			return false;
2528 		break;
2529 	case NIX_TXSCH_LVL_TL2:
2530 		if (regbase == NIX_AF_TL2X_CIR(0) ||
2531 		    regbase == NIX_AF_TL2X_PIR(0))
2532 			return false;
2533 		break;
2534 	case NIX_TXSCH_LVL_TL3:
2535 		if (regbase == NIX_AF_TL3X_CIR(0) ||
2536 		    regbase == NIX_AF_TL3X_PIR(0))
2537 			return false;
2538 		break;
2539 	case NIX_TXSCH_LVL_TL4:
2540 		if (regbase == NIX_AF_TL4X_CIR(0) ||
2541 		    regbase == NIX_AF_TL4X_PIR(0))
2542 			return false;
2543 		break;
2544 	case NIX_TXSCH_LVL_MDQ:
2545 		if (regbase == NIX_AF_MDQX_CIR(0) ||
2546 		    regbase == NIX_AF_MDQX_PIR(0))
2547 			return false;
2548 		break;
2549 	}
2550 	return true;
2551 }
2552 
2553 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
2554 				u16 pcifunc, int blkaddr)
2555 {
2556 	u32 *pfvf_map;
2557 	int schq;
2558 
2559 	schq = nix_get_tx_link(rvu, pcifunc);
2560 	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
2561 	/* Skip if PF has already done the config */
2562 	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
2563 		return;
2564 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
2565 		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
2566 
2567 	/* On OcteonTx2 the config was in bytes and newer silcons
2568 	 * it's changed to weight.
2569 	 */
2570 	if (!rvu->hw->cap.nix_common_dwrr_mtu)
2571 		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2572 			    TXSCH_TL1_DFLT_RR_QTM);
2573 	else
2574 		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2575 			    CN10K_MAX_DWRR_WEIGHT);
2576 
2577 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
2578 	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
2579 }
2580 
2581 /* Register offset - [15:0]
2582  * Scheduler Queue number - [25:16]
2583  */
2584 #define NIX_TX_SCHQ_MASK	GENMASK_ULL(25, 0)
2585 
2586 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
2587 			       int blkaddr, struct nix_txschq_config *req,
2588 			       struct nix_txschq_config *rsp)
2589 {
2590 	u16 pcifunc = req->hdr.pcifunc;
2591 	int idx, schq;
2592 	u64 reg;
2593 
2594 	for (idx = 0; idx < req->num_regs; idx++) {
2595 		reg = req->reg[idx];
2596 		reg &= NIX_TX_SCHQ_MASK;
2597 		schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2598 		if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
2599 		    !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
2600 			return NIX_AF_INVAL_TXSCHQ_CFG;
2601 		rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
2602 	}
2603 	rsp->lvl = req->lvl;
2604 	rsp->num_regs = req->num_regs;
2605 	return 0;
2606 }
2607 
2608 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
2609 			struct nix_txsch *txsch, bool enable)
2610 {
2611 	struct rvu_hwinfo *hw = rvu->hw;
2612 	int lbk_link_start, lbk_links;
2613 	u8 pf = rvu_get_pf(pcifunc);
2614 	int schq;
2615 	u64 cfg;
2616 
2617 	if (!is_pf_cgxmapped(rvu, pf))
2618 		return;
2619 
2620 	cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
2621 	lbk_link_start = hw->cgx_links;
2622 
2623 	for (schq = 0; schq < txsch->schq.max; schq++) {
2624 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2625 			continue;
2626 		/* Enable all LBK links with channel 63 by default so that
2627 		 * packets can be sent to LBK with a NPC TX MCAM rule
2628 		 */
2629 		lbk_links = hw->lbk_links;
2630 		while (lbk_links--)
2631 			rvu_write64(rvu, blkaddr,
2632 				    NIX_AF_TL3_TL2X_LINKX_CFG(schq,
2633 							      lbk_link_start +
2634 							      lbk_links), cfg);
2635 	}
2636 }
2637 
2638 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
2639 				    struct nix_txschq_config *req,
2640 				    struct nix_txschq_config *rsp)
2641 {
2642 	u64 reg, val, regval, schq_regbase, val_mask;
2643 	struct rvu_hwinfo *hw = rvu->hw;
2644 	u16 pcifunc = req->hdr.pcifunc;
2645 	struct nix_txsch *txsch;
2646 	struct nix_hw *nix_hw;
2647 	int blkaddr, idx, err;
2648 	int nixlf, schq;
2649 	u32 *pfvf_map;
2650 
2651 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
2652 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
2653 		return NIX_AF_INVAL_TXSCHQ_CFG;
2654 
2655 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2656 	if (err)
2657 		return err;
2658 
2659 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2660 	if (!nix_hw)
2661 		return NIX_AF_ERR_INVALID_NIXBLK;
2662 
2663 	if (req->read)
2664 		return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
2665 
2666 	txsch = &nix_hw->txsch[req->lvl];
2667 	pfvf_map = txsch->pfvf_map;
2668 
2669 	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2670 	    pcifunc & RVU_PFVF_FUNC_MASK) {
2671 		mutex_lock(&rvu->rsrc_lock);
2672 		if (req->lvl == NIX_TXSCH_LVL_TL1)
2673 			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2674 		mutex_unlock(&rvu->rsrc_lock);
2675 		return 0;
2676 	}
2677 
2678 	for (idx = 0; idx < req->num_regs; idx++) {
2679 		reg = req->reg[idx];
2680 		reg &= NIX_TX_SCHQ_MASK;
2681 		regval = req->regval[idx];
2682 		schq_regbase = reg & 0xFFFF;
2683 		val_mask = req->regval_mask[idx];
2684 
2685 		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2686 					       txsch->lvl, reg, regval))
2687 			return NIX_AF_INVAL_TXSCHQ_CFG;
2688 
2689 		/* Check if shaping and coloring is supported */
2690 		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2691 			continue;
2692 
2693 		val = rvu_read64(rvu, blkaddr, reg);
2694 		regval = (val & val_mask) | (regval & ~val_mask);
2695 
2696 		/* Handle shaping state toggle specially */
2697 		if (hw->cap.nix_shaper_toggle_wait &&
2698 		    handle_txschq_shaper_update(rvu, blkaddr, nixlf,
2699 						req->lvl, reg, regval))
2700 			continue;
2701 
2702 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2703 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2704 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2705 					   pcifunc, 0);
2706 			regval &= ~(0x7FULL << 24);
2707 			regval |= ((u64)nixlf << 24);
2708 		}
2709 
2710 		/* Clear 'BP_ENA' config, if it's not allowed */
2711 		if (!hw->cap.nix_tx_link_bp) {
2712 			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2713 			    (schq_regbase & 0xFF00) ==
2714 			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2715 				regval &= ~BIT_ULL(13);
2716 		}
2717 
2718 		/* Mark config as done for TL1 by PF */
2719 		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2720 		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2721 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2722 			mutex_lock(&rvu->rsrc_lock);
2723 			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2724 							NIX_TXSCHQ_CFG_DONE);
2725 			mutex_unlock(&rvu->rsrc_lock);
2726 		}
2727 
2728 		/* SMQ flush is special hence split register writes such
2729 		 * that flush first and write rest of the bits later.
2730 		 */
2731 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2732 		    (regval & BIT_ULL(49))) {
2733 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2734 			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2735 			regval &= ~BIT_ULL(49);
2736 		}
2737 		rvu_write64(rvu, blkaddr, reg, regval);
2738 	}
2739 
2740 	return 0;
2741 }
2742 
2743 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2744 			   struct nix_vtag_config *req)
2745 {
2746 	u64 regval = req->vtag_size;
2747 
2748 	if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2749 	    req->vtag_size > VTAGSIZE_T8)
2750 		return -EINVAL;
2751 
2752 	/* RX VTAG Type 7 reserved for vf vlan */
2753 	if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2754 		return NIX_AF_ERR_RX_VTAG_INUSE;
2755 
2756 	if (req->rx.capture_vtag)
2757 		regval |= BIT_ULL(5);
2758 	if (req->rx.strip_vtag)
2759 		regval |= BIT_ULL(4);
2760 
2761 	rvu_write64(rvu, blkaddr,
2762 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2763 	return 0;
2764 }
2765 
2766 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2767 			    u16 pcifunc, int index)
2768 {
2769 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2770 	struct nix_txvlan *vlan;
2771 
2772 	if (!nix_hw)
2773 		return NIX_AF_ERR_INVALID_NIXBLK;
2774 
2775 	vlan = &nix_hw->txvlan;
2776 	if (vlan->entry2pfvf_map[index] != pcifunc)
2777 		return NIX_AF_ERR_PARAM;
2778 
2779 	rvu_write64(rvu, blkaddr,
2780 		    NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2781 	rvu_write64(rvu, blkaddr,
2782 		    NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2783 
2784 	vlan->entry2pfvf_map[index] = 0;
2785 	rvu_free_rsrc(&vlan->rsrc, index);
2786 
2787 	return 0;
2788 }
2789 
2790 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2791 {
2792 	struct nix_txvlan *vlan;
2793 	struct nix_hw *nix_hw;
2794 	int index, blkaddr;
2795 
2796 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2797 	if (blkaddr < 0)
2798 		return;
2799 
2800 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2801 	if (!nix_hw)
2802 		return;
2803 
2804 	vlan = &nix_hw->txvlan;
2805 
2806 	mutex_lock(&vlan->rsrc_lock);
2807 	/* Scan all the entries and free the ones mapped to 'pcifunc' */
2808 	for (index = 0; index < vlan->rsrc.max; index++) {
2809 		if (vlan->entry2pfvf_map[index] == pcifunc)
2810 			nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2811 	}
2812 	mutex_unlock(&vlan->rsrc_lock);
2813 }
2814 
2815 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2816 			     u64 vtag, u8 size)
2817 {
2818 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2819 	struct nix_txvlan *vlan;
2820 	u64 regval;
2821 	int index;
2822 
2823 	if (!nix_hw)
2824 		return NIX_AF_ERR_INVALID_NIXBLK;
2825 
2826 	vlan = &nix_hw->txvlan;
2827 
2828 	mutex_lock(&vlan->rsrc_lock);
2829 
2830 	index = rvu_alloc_rsrc(&vlan->rsrc);
2831 	if (index < 0) {
2832 		mutex_unlock(&vlan->rsrc_lock);
2833 		return index;
2834 	}
2835 
2836 	mutex_unlock(&vlan->rsrc_lock);
2837 
2838 	regval = size ? vtag : vtag << 32;
2839 
2840 	rvu_write64(rvu, blkaddr,
2841 		    NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2842 	rvu_write64(rvu, blkaddr,
2843 		    NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2844 
2845 	return index;
2846 }
2847 
2848 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2849 			     struct nix_vtag_config *req)
2850 {
2851 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2852 	u16 pcifunc = req->hdr.pcifunc;
2853 	int idx0 = req->tx.vtag0_idx;
2854 	int idx1 = req->tx.vtag1_idx;
2855 	struct nix_txvlan *vlan;
2856 	int err = 0;
2857 
2858 	if (!nix_hw)
2859 		return NIX_AF_ERR_INVALID_NIXBLK;
2860 
2861 	vlan = &nix_hw->txvlan;
2862 	if (req->tx.free_vtag0 && req->tx.free_vtag1)
2863 		if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2864 		    vlan->entry2pfvf_map[idx1] != pcifunc)
2865 			return NIX_AF_ERR_PARAM;
2866 
2867 	mutex_lock(&vlan->rsrc_lock);
2868 
2869 	if (req->tx.free_vtag0) {
2870 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2871 		if (err)
2872 			goto exit;
2873 	}
2874 
2875 	if (req->tx.free_vtag1)
2876 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2877 
2878 exit:
2879 	mutex_unlock(&vlan->rsrc_lock);
2880 	return err;
2881 }
2882 
2883 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2884 			   struct nix_vtag_config *req,
2885 			   struct nix_vtag_config_rsp *rsp)
2886 {
2887 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2888 	struct nix_txvlan *vlan;
2889 	u16 pcifunc = req->hdr.pcifunc;
2890 
2891 	if (!nix_hw)
2892 		return NIX_AF_ERR_INVALID_NIXBLK;
2893 
2894 	vlan = &nix_hw->txvlan;
2895 	if (req->tx.cfg_vtag0) {
2896 		rsp->vtag0_idx =
2897 			nix_tx_vtag_alloc(rvu, blkaddr,
2898 					  req->tx.vtag0, req->vtag_size);
2899 
2900 		if (rsp->vtag0_idx < 0)
2901 			return NIX_AF_ERR_TX_VTAG_NOSPC;
2902 
2903 		vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2904 	}
2905 
2906 	if (req->tx.cfg_vtag1) {
2907 		rsp->vtag1_idx =
2908 			nix_tx_vtag_alloc(rvu, blkaddr,
2909 					  req->tx.vtag1, req->vtag_size);
2910 
2911 		if (rsp->vtag1_idx < 0)
2912 			goto err_free;
2913 
2914 		vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2915 	}
2916 
2917 	return 0;
2918 
2919 err_free:
2920 	if (req->tx.cfg_vtag0)
2921 		nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2922 
2923 	return NIX_AF_ERR_TX_VTAG_NOSPC;
2924 }
2925 
2926 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2927 				  struct nix_vtag_config *req,
2928 				  struct nix_vtag_config_rsp *rsp)
2929 {
2930 	u16 pcifunc = req->hdr.pcifunc;
2931 	int blkaddr, nixlf, err;
2932 
2933 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2934 	if (err)
2935 		return err;
2936 
2937 	if (req->cfg_type) {
2938 		/* rx vtag configuration */
2939 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2940 		if (err)
2941 			return NIX_AF_ERR_PARAM;
2942 	} else {
2943 		/* tx vtag configuration */
2944 		if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2945 		    (req->tx.free_vtag0 || req->tx.free_vtag1))
2946 			return NIX_AF_ERR_PARAM;
2947 
2948 		if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2949 			return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2950 
2951 		if (req->tx.free_vtag0 || req->tx.free_vtag1)
2952 			return nix_tx_vtag_decfg(rvu, blkaddr, req);
2953 	}
2954 
2955 	return 0;
2956 }
2957 
2958 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2959 			     int mce, u8 op, u16 pcifunc, int next, bool eol)
2960 {
2961 	struct nix_aq_enq_req aq_req;
2962 	int err;
2963 
2964 	aq_req.hdr.pcifunc = 0;
2965 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
2966 	aq_req.op = op;
2967 	aq_req.qidx = mce;
2968 
2969 	/* Use RSS with RSS index 0 */
2970 	aq_req.mce.op = 1;
2971 	aq_req.mce.index = 0;
2972 	aq_req.mce.eol = eol;
2973 	aq_req.mce.pf_func = pcifunc;
2974 	aq_req.mce.next = next;
2975 
2976 	/* All fields valid */
2977 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
2978 
2979 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2980 	if (err) {
2981 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2982 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2983 		return err;
2984 	}
2985 	return 0;
2986 }
2987 
2988 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2989 				     u16 pcifunc, bool add)
2990 {
2991 	struct mce *mce, *tail = NULL;
2992 	bool delete = false;
2993 
2994 	/* Scan through the current list */
2995 	hlist_for_each_entry(mce, &mce_list->head, node) {
2996 		/* If already exists, then delete */
2997 		if (mce->pcifunc == pcifunc && !add) {
2998 			delete = true;
2999 			break;
3000 		} else if (mce->pcifunc == pcifunc && add) {
3001 			/* entry already exists */
3002 			return 0;
3003 		}
3004 		tail = mce;
3005 	}
3006 
3007 	if (delete) {
3008 		hlist_del(&mce->node);
3009 		kfree(mce);
3010 		mce_list->count--;
3011 		return 0;
3012 	}
3013 
3014 	if (!add)
3015 		return 0;
3016 
3017 	/* Add a new one to the list, at the tail */
3018 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
3019 	if (!mce)
3020 		return -ENOMEM;
3021 	mce->pcifunc = pcifunc;
3022 	if (!tail)
3023 		hlist_add_head(&mce->node, &mce_list->head);
3024 	else
3025 		hlist_add_behind(&mce->node, &tail->node);
3026 	mce_list->count++;
3027 	return 0;
3028 }
3029 
3030 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
3031 			struct nix_mce_list *mce_list,
3032 			int mce_idx, int mcam_index, bool add)
3033 {
3034 	int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
3035 	struct npc_mcam *mcam = &rvu->hw->mcam;
3036 	struct nix_mcast *mcast;
3037 	struct nix_hw *nix_hw;
3038 	struct mce *mce;
3039 
3040 	if (!mce_list)
3041 		return -EINVAL;
3042 
3043 	/* Get this PF/VF func's MCE index */
3044 	idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
3045 
3046 	if (idx > (mce_idx + mce_list->max)) {
3047 		dev_err(rvu->dev,
3048 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
3049 			__func__, idx, mce_list->max,
3050 			pcifunc >> RVU_PFVF_PF_SHIFT);
3051 		return -EINVAL;
3052 	}
3053 
3054 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
3055 	if (err)
3056 		return err;
3057 
3058 	mcast = &nix_hw->mcast;
3059 	mutex_lock(&mcast->mce_lock);
3060 
3061 	err = nix_update_mce_list_entry(mce_list, pcifunc, add);
3062 	if (err)
3063 		goto end;
3064 
3065 	/* Disable MCAM entry in NPC */
3066 	if (!mce_list->count) {
3067 		npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3068 		npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
3069 		goto end;
3070 	}
3071 
3072 	/* Dump the updated list to HW */
3073 	idx = mce_idx;
3074 	last_idx = idx + mce_list->count - 1;
3075 	hlist_for_each_entry(mce, &mce_list->head, node) {
3076 		if (idx > last_idx)
3077 			break;
3078 
3079 		next_idx = idx + 1;
3080 		/* EOL should be set in last MCE */
3081 		err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3082 					mce->pcifunc, next_idx,
3083 					(next_idx > last_idx) ? true : false);
3084 		if (err)
3085 			goto end;
3086 		idx++;
3087 	}
3088 
3089 end:
3090 	mutex_unlock(&mcast->mce_lock);
3091 	return err;
3092 }
3093 
3094 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
3095 		      struct nix_mce_list **mce_list, int *mce_idx)
3096 {
3097 	struct rvu_hwinfo *hw = rvu->hw;
3098 	struct rvu_pfvf *pfvf;
3099 
3100 	if (!hw->cap.nix_rx_multicast ||
3101 	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
3102 		*mce_list = NULL;
3103 		*mce_idx = 0;
3104 		return;
3105 	}
3106 
3107 	/* Get this PF/VF func's MCE index */
3108 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
3109 
3110 	if (type == NIXLF_BCAST_ENTRY) {
3111 		*mce_list = &pfvf->bcast_mce_list;
3112 		*mce_idx = pfvf->bcast_mce_idx;
3113 	} else if (type == NIXLF_ALLMULTI_ENTRY) {
3114 		*mce_list = &pfvf->mcast_mce_list;
3115 		*mce_idx = pfvf->mcast_mce_idx;
3116 	} else if (type == NIXLF_PROMISC_ENTRY) {
3117 		*mce_list = &pfvf->promisc_mce_list;
3118 		*mce_idx = pfvf->promisc_mce_idx;
3119 	}  else {
3120 		*mce_list = NULL;
3121 		*mce_idx = 0;
3122 	}
3123 }
3124 
3125 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
3126 			       int type, bool add)
3127 {
3128 	int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
3129 	struct npc_mcam *mcam = &rvu->hw->mcam;
3130 	struct rvu_hwinfo *hw = rvu->hw;
3131 	struct nix_mce_list *mce_list;
3132 	int pf;
3133 
3134 	/* skip multicast pkt replication for AF's VFs & SDP links */
3135 	if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
3136 		return 0;
3137 
3138 	if (!hw->cap.nix_rx_multicast)
3139 		return 0;
3140 
3141 	pf = rvu_get_pf(pcifunc);
3142 	if (!is_pf_cgxmapped(rvu, pf))
3143 		return 0;
3144 
3145 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3146 	if (blkaddr < 0)
3147 		return -EINVAL;
3148 
3149 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
3150 	if (nixlf < 0)
3151 		return -EINVAL;
3152 
3153 	nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
3154 
3155 	mcam_index = npc_get_nixlf_mcam_index(mcam,
3156 					      pcifunc & ~RVU_PFVF_FUNC_MASK,
3157 					      nixlf, type);
3158 	err = nix_update_mce_list(rvu, pcifunc, mce_list,
3159 				  mce_idx, mcam_index, add);
3160 	return err;
3161 }
3162 
3163 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
3164 {
3165 	struct nix_mcast *mcast = &nix_hw->mcast;
3166 	int err, pf, numvfs, idx;
3167 	struct rvu_pfvf *pfvf;
3168 	u16 pcifunc;
3169 	u64 cfg;
3170 
3171 	/* Skip PF0 (i.e AF) */
3172 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
3173 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3174 		/* If PF is not enabled, nothing to do */
3175 		if (!((cfg >> 20) & 0x01))
3176 			continue;
3177 		/* Get numVFs attached to this PF */
3178 		numvfs = (cfg >> 12) & 0xFF;
3179 
3180 		pfvf = &rvu->pf[pf];
3181 
3182 		/* This NIX0/1 block mapped to PF ? */
3183 		if (pfvf->nix_blkaddr != nix_hw->blkaddr)
3184 			continue;
3185 
3186 		/* save start idx of broadcast mce list */
3187 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
3188 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
3189 
3190 		/* save start idx of multicast mce list */
3191 		pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
3192 		nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
3193 
3194 		/* save the start idx of promisc mce list */
3195 		pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
3196 		nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
3197 
3198 		for (idx = 0; idx < (numvfs + 1); idx++) {
3199 			/* idx-0 is for PF, followed by VFs */
3200 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
3201 			pcifunc |= idx;
3202 			/* Add dummy entries now, so that we don't have to check
3203 			 * for whether AQ_OP should be INIT/WRITE later on.
3204 			 * Will be updated when a NIXLF is attached/detached to
3205 			 * these PF/VFs.
3206 			 */
3207 			err = nix_blk_setup_mce(rvu, nix_hw,
3208 						pfvf->bcast_mce_idx + idx,
3209 						NIX_AQ_INSTOP_INIT,
3210 						pcifunc, 0, true);
3211 			if (err)
3212 				return err;
3213 
3214 			/* add dummy entries to multicast mce list */
3215 			err = nix_blk_setup_mce(rvu, nix_hw,
3216 						pfvf->mcast_mce_idx + idx,
3217 						NIX_AQ_INSTOP_INIT,
3218 						pcifunc, 0, true);
3219 			if (err)
3220 				return err;
3221 
3222 			/* add dummy entries to promisc mce list */
3223 			err = nix_blk_setup_mce(rvu, nix_hw,
3224 						pfvf->promisc_mce_idx + idx,
3225 						NIX_AQ_INSTOP_INIT,
3226 						pcifunc, 0, true);
3227 			if (err)
3228 				return err;
3229 		}
3230 	}
3231 	return 0;
3232 }
3233 
3234 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3235 {
3236 	struct nix_mcast *mcast = &nix_hw->mcast;
3237 	struct rvu_hwinfo *hw = rvu->hw;
3238 	int err, size;
3239 
3240 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
3241 	size = (1ULL << size);
3242 
3243 	/* Alloc memory for multicast/mirror replication entries */
3244 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
3245 			 (256UL << MC_TBL_SIZE), size);
3246 	if (err)
3247 		return -ENOMEM;
3248 
3249 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
3250 		    (u64)mcast->mce_ctx->iova);
3251 
3252 	/* Set max list length equal to max no of VFs per PF  + PF itself */
3253 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
3254 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
3255 
3256 	/* Alloc memory for multicast replication buffers */
3257 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
3258 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
3259 			 (8UL << MC_BUF_CNT), size);
3260 	if (err)
3261 		return -ENOMEM;
3262 
3263 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
3264 		    (u64)mcast->mcast_buf->iova);
3265 
3266 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
3267 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
3268 
3269 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
3270 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
3271 		    BIT_ULL(20) | MC_BUF_CNT);
3272 
3273 	mutex_init(&mcast->mce_lock);
3274 
3275 	return nix_setup_mce_tables(rvu, nix_hw);
3276 }
3277 
3278 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
3279 {
3280 	struct nix_txvlan *vlan = &nix_hw->txvlan;
3281 	int err;
3282 
3283 	/* Allocate resource bimap for tx vtag def registers*/
3284 	vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
3285 	err = rvu_alloc_bitmap(&vlan->rsrc);
3286 	if (err)
3287 		return -ENOMEM;
3288 
3289 	/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
3290 	vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
3291 					    sizeof(u16), GFP_KERNEL);
3292 	if (!vlan->entry2pfvf_map)
3293 		goto free_mem;
3294 
3295 	mutex_init(&vlan->rsrc_lock);
3296 	return 0;
3297 
3298 free_mem:
3299 	kfree(vlan->rsrc.bmap);
3300 	return -ENOMEM;
3301 }
3302 
3303 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3304 {
3305 	struct nix_txsch *txsch;
3306 	int err, lvl, schq;
3307 	u64 cfg, reg;
3308 
3309 	/* Get scheduler queue count of each type and alloc
3310 	 * bitmap for each for alloc/free/attach operations.
3311 	 */
3312 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3313 		txsch = &nix_hw->txsch[lvl];
3314 		txsch->lvl = lvl;
3315 		switch (lvl) {
3316 		case NIX_TXSCH_LVL_SMQ:
3317 			reg = NIX_AF_MDQ_CONST;
3318 			break;
3319 		case NIX_TXSCH_LVL_TL4:
3320 			reg = NIX_AF_TL4_CONST;
3321 			break;
3322 		case NIX_TXSCH_LVL_TL3:
3323 			reg = NIX_AF_TL3_CONST;
3324 			break;
3325 		case NIX_TXSCH_LVL_TL2:
3326 			reg = NIX_AF_TL2_CONST;
3327 			break;
3328 		case NIX_TXSCH_LVL_TL1:
3329 			reg = NIX_AF_TL1_CONST;
3330 			break;
3331 		}
3332 		cfg = rvu_read64(rvu, blkaddr, reg);
3333 		txsch->schq.max = cfg & 0xFFFF;
3334 		err = rvu_alloc_bitmap(&txsch->schq);
3335 		if (err)
3336 			return err;
3337 
3338 		/* Allocate memory for scheduler queues to
3339 		 * PF/VF pcifunc mapping info.
3340 		 */
3341 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
3342 					       sizeof(u32), GFP_KERNEL);
3343 		if (!txsch->pfvf_map)
3344 			return -ENOMEM;
3345 		for (schq = 0; schq < txsch->schq.max; schq++)
3346 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
3347 	}
3348 
3349 	/* Setup a default value of 8192 as DWRR MTU */
3350 	if (rvu->hw->cap.nix_common_dwrr_mtu ||
3351 	    rvu->hw->cap.nix_multiple_dwrr_mtu) {
3352 		rvu_write64(rvu, blkaddr,
3353 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM),
3354 			    convert_bytes_to_dwrr_mtu(8192));
3355 		rvu_write64(rvu, blkaddr,
3356 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK),
3357 			    convert_bytes_to_dwrr_mtu(8192));
3358 		rvu_write64(rvu, blkaddr,
3359 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP),
3360 			    convert_bytes_to_dwrr_mtu(8192));
3361 	}
3362 
3363 	return 0;
3364 }
3365 
3366 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
3367 				int blkaddr, u32 cfg)
3368 {
3369 	int fmt_idx;
3370 
3371 	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
3372 		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
3373 			return fmt_idx;
3374 	}
3375 	if (fmt_idx >= nix_hw->mark_format.total)
3376 		return -ERANGE;
3377 
3378 	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
3379 	nix_hw->mark_format.cfg[fmt_idx] = cfg;
3380 	nix_hw->mark_format.in_use++;
3381 	return fmt_idx;
3382 }
3383 
3384 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
3385 				    int blkaddr)
3386 {
3387 	u64 cfgs[] = {
3388 		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
3389 		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
3390 		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
3391 		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
3392 		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
3393 		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
3394 		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
3395 		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
3396 		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
3397 	};
3398 	int i, rc;
3399 	u64 total;
3400 
3401 	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
3402 	nix_hw->mark_format.total = (u8)total;
3403 	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
3404 					       GFP_KERNEL);
3405 	if (!nix_hw->mark_format.cfg)
3406 		return -ENOMEM;
3407 	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
3408 		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
3409 		if (rc < 0)
3410 			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
3411 				i, rc);
3412 	}
3413 
3414 	return 0;
3415 }
3416 
3417 static void rvu_get_lbk_link_max_frs(struct rvu *rvu,  u16 *max_mtu)
3418 {
3419 	/* CN10K supports LBK FIFO size 72 KB */
3420 	if (rvu->hw->lbk_bufsize == 0x12000)
3421 		*max_mtu = CN10K_LBK_LINK_MAX_FRS;
3422 	else
3423 		*max_mtu = NIC_HW_MAX_FRS;
3424 }
3425 
3426 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3427 {
3428 	int fifo_size = rvu_cgx_get_fifolen(rvu);
3429 
3430 	/* RPM supports FIFO len 128 KB and RPM2 supports double the
3431 	 * FIFO len to accommodate 8 LMACS
3432 	 */
3433 	if (fifo_size == 0x20000 || fifo_size == 0x40000)
3434 		*max_mtu = CN10K_LMAC_LINK_MAX_FRS;
3435 	else
3436 		*max_mtu = NIC_HW_MAX_FRS;
3437 }
3438 
3439 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
3440 				     struct nix_hw_info *rsp)
3441 {
3442 	u16 pcifunc = req->hdr.pcifunc;
3443 	u64 dwrr_mtu;
3444 	int blkaddr;
3445 
3446 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3447 	if (blkaddr < 0)
3448 		return NIX_AF_ERR_AF_LF_INVALID;
3449 
3450 	if (is_afvf(pcifunc))
3451 		rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
3452 	else
3453 		rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
3454 
3455 	rsp->min_mtu = NIC_HW_MIN_FRS;
3456 
3457 	if (!rvu->hw->cap.nix_common_dwrr_mtu &&
3458 	    !rvu->hw->cap.nix_multiple_dwrr_mtu) {
3459 		/* Return '1' on OTx2 */
3460 		rsp->rpm_dwrr_mtu = 1;
3461 		rsp->sdp_dwrr_mtu = 1;
3462 		rsp->lbk_dwrr_mtu = 1;
3463 		return 0;
3464 	}
3465 
3466 	/* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
3467 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3468 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
3469 	rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3470 
3471 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3472 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP));
3473 	rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3474 
3475 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3476 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK));
3477 	rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3478 
3479 	return 0;
3480 }
3481 
3482 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
3483 				   struct msg_rsp *rsp)
3484 {
3485 	u16 pcifunc = req->hdr.pcifunc;
3486 	int i, nixlf, blkaddr, err;
3487 	u64 stats;
3488 
3489 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3490 	if (err)
3491 		return err;
3492 
3493 	/* Get stats count supported by HW */
3494 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
3495 
3496 	/* Reset tx stats */
3497 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
3498 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
3499 
3500 	/* Reset rx stats */
3501 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
3502 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
3503 
3504 	return 0;
3505 }
3506 
3507 /* Returns the ALG index to be set into NPC_RX_ACTION */
3508 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
3509 {
3510 	int i;
3511 
3512 	/* Scan over exiting algo entries to find a match */
3513 	for (i = 0; i < nix_hw->flowkey.in_use; i++)
3514 		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
3515 			return i;
3516 
3517 	return -ERANGE;
3518 }
3519 
3520 /* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */
3521 #define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf)
3522 /* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */
3523 #define NPC_LT_LC_IP_MATCH_MSK  ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf)
3524 
3525 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
3526 {
3527 	int idx, nr_field, key_off, field_marker, keyoff_marker;
3528 	int max_key_off, max_bit_pos, group_member;
3529 	struct nix_rx_flowkey_alg *field;
3530 	struct nix_rx_flowkey_alg tmp;
3531 	u32 key_type, valid_key;
3532 	u32 l3_l4_src_dst;
3533 	int l4_key_offset = 0;
3534 
3535 	if (!alg)
3536 		return -EINVAL;
3537 
3538 #define FIELDS_PER_ALG  5
3539 #define MAX_KEY_OFF	40
3540 	/* Clear all fields */
3541 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
3542 
3543 	/* Each of the 32 possible flow key algorithm definitions should
3544 	 * fall into above incremental config (except ALG0). Otherwise a
3545 	 * single NPC MCAM entry is not sufficient for supporting RSS.
3546 	 *
3547 	 * If a different definition or combination needed then NPC MCAM
3548 	 * has to be programmed to filter such pkts and it's action should
3549 	 * point to this definition to calculate flowtag or hash.
3550 	 *
3551 	 * The `for loop` goes over _all_ protocol field and the following
3552 	 * variables depicts the state machine forward progress logic.
3553 	 *
3554 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
3555 	 * in field->key_offset update.
3556 	 * field_marker - Enabled when a new field needs to be selected.
3557 	 * group_member - Enabled when protocol is part of a group.
3558 	 */
3559 
3560 	/* Last 4 bits (31:28) are reserved to specify SRC, DST
3561 	 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
3562 	 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
3563 	 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
3564 	 */
3565 	l3_l4_src_dst = flow_cfg;
3566 	/* Reset these 4 bits, so that these won't be part of key */
3567 	flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
3568 
3569 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
3570 	nr_field = 0; key_off = 0; field_marker = 1;
3571 	field = &tmp; max_bit_pos = fls(flow_cfg);
3572 	for (idx = 0;
3573 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
3574 	     key_off < MAX_KEY_OFF; idx++) {
3575 		key_type = BIT(idx);
3576 		valid_key = flow_cfg & key_type;
3577 		/* Found a field marker, reset the field values */
3578 		if (field_marker)
3579 			memset(&tmp, 0, sizeof(tmp));
3580 
3581 		field_marker = true;
3582 		keyoff_marker = true;
3583 		switch (key_type) {
3584 		case NIX_FLOW_KEY_TYPE_PORT:
3585 			field->sel_chan = true;
3586 			/* This should be set to 1, when SEL_CHAN is set */
3587 			field->bytesm1 = 1;
3588 			break;
3589 		case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
3590 			field->lid = NPC_LID_LC;
3591 			field->hdr_offset = 9; /* offset */
3592 			field->bytesm1 = 0; /* 1 byte */
3593 			field->ltype_match = NPC_LT_LC_IP;
3594 			field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
3595 			break;
3596 		case NIX_FLOW_KEY_TYPE_IPV4:
3597 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
3598 			field->lid = NPC_LID_LC;
3599 			field->ltype_match = NPC_LT_LC_IP;
3600 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
3601 				field->lid = NPC_LID_LG;
3602 				field->ltype_match = NPC_LT_LG_TU_IP;
3603 			}
3604 			field->hdr_offset = 12; /* SIP offset */
3605 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
3606 
3607 			/* Only SIP */
3608 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
3609 				field->bytesm1 = 3; /* SIP, 4 bytes */
3610 
3611 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
3612 				/* Both SIP + DIP */
3613 				if (field->bytesm1 == 3) {
3614 					field->bytesm1 = 7; /* SIP + DIP, 8B */
3615 				} else {
3616 					/* Only DIP */
3617 					field->hdr_offset = 16; /* DIP off */
3618 					field->bytesm1 = 3; /* DIP, 4 bytes */
3619 				}
3620 			}
3621 			field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
3622 			keyoff_marker = false;
3623 			break;
3624 		case NIX_FLOW_KEY_TYPE_IPV6:
3625 		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
3626 			field->lid = NPC_LID_LC;
3627 			field->ltype_match = NPC_LT_LC_IP6;
3628 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
3629 				field->lid = NPC_LID_LG;
3630 				field->ltype_match = NPC_LT_LG_TU_IP6;
3631 			}
3632 			field->hdr_offset = 8; /* SIP offset */
3633 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
3634 
3635 			/* Only SIP */
3636 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
3637 				field->bytesm1 = 15; /* SIP, 16 bytes */
3638 
3639 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
3640 				/* Both SIP + DIP */
3641 				if (field->bytesm1 == 15) {
3642 					/* SIP + DIP, 32 bytes */
3643 					field->bytesm1 = 31;
3644 				} else {
3645 					/* Only DIP */
3646 					field->hdr_offset = 24; /* DIP off */
3647 					field->bytesm1 = 15; /* DIP,16 bytes */
3648 				}
3649 			}
3650 			field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK;
3651 			break;
3652 		case NIX_FLOW_KEY_TYPE_TCP:
3653 		case NIX_FLOW_KEY_TYPE_UDP:
3654 		case NIX_FLOW_KEY_TYPE_SCTP:
3655 		case NIX_FLOW_KEY_TYPE_INNR_TCP:
3656 		case NIX_FLOW_KEY_TYPE_INNR_UDP:
3657 		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
3658 			field->lid = NPC_LID_LD;
3659 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
3660 			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
3661 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
3662 				field->lid = NPC_LID_LH;
3663 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
3664 
3665 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
3666 				field->bytesm1 = 1; /* SRC, 2 bytes */
3667 
3668 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
3669 				/* Both SRC + DST */
3670 				if (field->bytesm1 == 1) {
3671 					/* SRC + DST, 4 bytes */
3672 					field->bytesm1 = 3;
3673 				} else {
3674 					/* Only DIP */
3675 					field->hdr_offset = 2; /* DST off */
3676 					field->bytesm1 = 1; /* DST, 2 bytes */
3677 				}
3678 			}
3679 
3680 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
3681 			 * so no need to change the ltype_match, just change
3682 			 * the lid for inner protocols
3683 			 */
3684 			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
3685 				     (int)NPC_LT_LH_TU_TCP);
3686 			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
3687 				     (int)NPC_LT_LH_TU_UDP);
3688 			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
3689 				     (int)NPC_LT_LH_TU_SCTP);
3690 
3691 			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
3692 			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
3693 			    valid_key) {
3694 				field->ltype_match |= NPC_LT_LD_TCP;
3695 				group_member = true;
3696 			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
3697 				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
3698 				   valid_key) {
3699 				field->ltype_match |= NPC_LT_LD_UDP;
3700 				group_member = true;
3701 			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3702 				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
3703 				   valid_key) {
3704 				field->ltype_match |= NPC_LT_LD_SCTP;
3705 				group_member = true;
3706 			}
3707 			field->ltype_mask = ~field->ltype_match;
3708 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3709 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
3710 				/* Handle the case where any of the group item
3711 				 * is enabled in the group but not the final one
3712 				 */
3713 				if (group_member) {
3714 					valid_key = true;
3715 					group_member = false;
3716 				}
3717 			} else {
3718 				field_marker = false;
3719 				keyoff_marker = false;
3720 			}
3721 
3722 			/* TCP/UDP/SCTP and ESP/AH falls at same offset so
3723 			 * remember the TCP key offset of 40 byte hash key.
3724 			 */
3725 			if (key_type == NIX_FLOW_KEY_TYPE_TCP)
3726 				l4_key_offset = key_off;
3727 			break;
3728 		case NIX_FLOW_KEY_TYPE_NVGRE:
3729 			field->lid = NPC_LID_LD;
3730 			field->hdr_offset = 4; /* VSID offset */
3731 			field->bytesm1 = 2;
3732 			field->ltype_match = NPC_LT_LD_NVGRE;
3733 			field->ltype_mask = 0xF;
3734 			break;
3735 		case NIX_FLOW_KEY_TYPE_VXLAN:
3736 		case NIX_FLOW_KEY_TYPE_GENEVE:
3737 			field->lid = NPC_LID_LE;
3738 			field->bytesm1 = 2;
3739 			field->hdr_offset = 4;
3740 			field->ltype_mask = 0xF;
3741 			field_marker = false;
3742 			keyoff_marker = false;
3743 
3744 			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
3745 				field->ltype_match |= NPC_LT_LE_VXLAN;
3746 				group_member = true;
3747 			}
3748 
3749 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
3750 				field->ltype_match |= NPC_LT_LE_GENEVE;
3751 				group_member = true;
3752 			}
3753 
3754 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
3755 				if (group_member) {
3756 					field->ltype_mask = ~field->ltype_match;
3757 					field_marker = true;
3758 					keyoff_marker = true;
3759 					valid_key = true;
3760 					group_member = false;
3761 				}
3762 			}
3763 			break;
3764 		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
3765 		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
3766 			field->lid = NPC_LID_LA;
3767 			field->ltype_match = NPC_LT_LA_ETHER;
3768 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
3769 				field->lid = NPC_LID_LF;
3770 				field->ltype_match = NPC_LT_LF_TU_ETHER;
3771 			}
3772 			field->hdr_offset = 0;
3773 			field->bytesm1 = 5; /* DMAC 6 Byte */
3774 			field->ltype_mask = 0xF;
3775 			break;
3776 		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
3777 			field->lid = NPC_LID_LC;
3778 			field->hdr_offset = 40; /* IPV6 hdr */
3779 			field->bytesm1 = 0; /* 1 Byte ext hdr*/
3780 			field->ltype_match = NPC_LT_LC_IP6_EXT;
3781 			field->ltype_mask = 0xF;
3782 			break;
3783 		case NIX_FLOW_KEY_TYPE_GTPU:
3784 			field->lid = NPC_LID_LE;
3785 			field->hdr_offset = 4;
3786 			field->bytesm1 = 3; /* 4 bytes TID*/
3787 			field->ltype_match = NPC_LT_LE_GTPU;
3788 			field->ltype_mask = 0xF;
3789 			break;
3790 		case NIX_FLOW_KEY_TYPE_VLAN:
3791 			field->lid = NPC_LID_LB;
3792 			field->hdr_offset = 2; /* Skip TPID (2-bytes) */
3793 			field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
3794 			field->ltype_match = NPC_LT_LB_CTAG;
3795 			field->ltype_mask = 0xF;
3796 			field->fn_mask = 1; /* Mask out the first nibble */
3797 			break;
3798 		case NIX_FLOW_KEY_TYPE_AH:
3799 		case NIX_FLOW_KEY_TYPE_ESP:
3800 			field->hdr_offset = 0;
3801 			field->bytesm1 = 7; /* SPI + sequence number */
3802 			field->ltype_mask = 0xF;
3803 			field->lid = NPC_LID_LE;
3804 			field->ltype_match = NPC_LT_LE_ESP;
3805 			if (key_type == NIX_FLOW_KEY_TYPE_AH) {
3806 				field->lid = NPC_LID_LD;
3807 				field->ltype_match = NPC_LT_LD_AH;
3808 				field->hdr_offset = 4;
3809 				keyoff_marker = false;
3810 			}
3811 			break;
3812 		}
3813 		field->ena = 1;
3814 
3815 		/* Found a valid flow key type */
3816 		if (valid_key) {
3817 			/* Use the key offset of TCP/UDP/SCTP fields
3818 			 * for ESP/AH fields.
3819 			 */
3820 			if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
3821 			    key_type == NIX_FLOW_KEY_TYPE_AH)
3822 				key_off = l4_key_offset;
3823 			field->key_offset = key_off;
3824 			memcpy(&alg[nr_field], field, sizeof(*field));
3825 			max_key_off = max(max_key_off, field->bytesm1 + 1);
3826 
3827 			/* Found a field marker, get the next field */
3828 			if (field_marker)
3829 				nr_field++;
3830 		}
3831 
3832 		/* Found a keyoff marker, update the new key_off */
3833 		if (keyoff_marker) {
3834 			key_off += max_key_off;
3835 			max_key_off = 0;
3836 		}
3837 	}
3838 	/* Processed all the flow key types */
3839 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
3840 		return 0;
3841 	else
3842 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
3843 }
3844 
3845 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
3846 {
3847 	u64 field[FIELDS_PER_ALG];
3848 	struct nix_hw *hw;
3849 	int fid, rc;
3850 
3851 	hw = get_nix_hw(rvu->hw, blkaddr);
3852 	if (!hw)
3853 		return NIX_AF_ERR_INVALID_NIXBLK;
3854 
3855 	/* No room to add new flow hash algoritham */
3856 	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3857 		return NIX_AF_ERR_RSS_NOSPC_ALGO;
3858 
3859 	/* Generate algo fields for the given flow_cfg */
3860 	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3861 	if (rc)
3862 		return rc;
3863 
3864 	/* Update ALGX_FIELDX register with generated fields */
3865 	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3866 		rvu_write64(rvu, blkaddr,
3867 			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3868 							   fid), field[fid]);
3869 
3870 	/* Store the flow_cfg for futher lookup */
3871 	rc = hw->flowkey.in_use;
3872 	hw->flowkey.flowkey[rc] = flow_cfg;
3873 	hw->flowkey.in_use++;
3874 
3875 	return rc;
3876 }
3877 
3878 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3879 					 struct nix_rss_flowkey_cfg *req,
3880 					 struct nix_rss_flowkey_cfg_rsp *rsp)
3881 {
3882 	u16 pcifunc = req->hdr.pcifunc;
3883 	int alg_idx, nixlf, blkaddr;
3884 	struct nix_hw *nix_hw;
3885 	int err;
3886 
3887 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3888 	if (err)
3889 		return err;
3890 
3891 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3892 	if (!nix_hw)
3893 		return NIX_AF_ERR_INVALID_NIXBLK;
3894 
3895 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3896 	/* Failed to get algo index from the exiting list, reserve new  */
3897 	if (alg_idx < 0) {
3898 		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3899 						  req->flowkey_cfg);
3900 		if (alg_idx < 0)
3901 			return alg_idx;
3902 	}
3903 	rsp->alg_idx = alg_idx;
3904 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3905 				       alg_idx, req->mcam_index);
3906 	return 0;
3907 }
3908 
3909 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3910 {
3911 	u32 flowkey_cfg, minkey_cfg;
3912 	int alg, fid, rc;
3913 
3914 	/* Disable all flow key algx fieldx */
3915 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3916 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3917 			rvu_write64(rvu, blkaddr,
3918 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3919 				    0);
3920 	}
3921 
3922 	/* IPv4/IPv6 SIP/DIPs */
3923 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3924 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3925 	if (rc < 0)
3926 		return rc;
3927 
3928 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3929 	minkey_cfg = flowkey_cfg;
3930 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3931 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3932 	if (rc < 0)
3933 		return rc;
3934 
3935 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3936 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3937 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3938 	if (rc < 0)
3939 		return rc;
3940 
3941 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3942 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3943 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3944 	if (rc < 0)
3945 		return rc;
3946 
3947 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3948 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3949 			NIX_FLOW_KEY_TYPE_UDP;
3950 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3951 	if (rc < 0)
3952 		return rc;
3953 
3954 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3955 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3956 			NIX_FLOW_KEY_TYPE_SCTP;
3957 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3958 	if (rc < 0)
3959 		return rc;
3960 
3961 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3962 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3963 			NIX_FLOW_KEY_TYPE_SCTP;
3964 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3965 	if (rc < 0)
3966 		return rc;
3967 
3968 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3969 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3970 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3971 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3972 	if (rc < 0)
3973 		return rc;
3974 
3975 	return 0;
3976 }
3977 
3978 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3979 				      struct nix_set_mac_addr *req,
3980 				      struct msg_rsp *rsp)
3981 {
3982 	bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3983 	u16 pcifunc = req->hdr.pcifunc;
3984 	int blkaddr, nixlf, err;
3985 	struct rvu_pfvf *pfvf;
3986 
3987 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3988 	if (err)
3989 		return err;
3990 
3991 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3992 
3993 	/* untrusted VF can't overwrite admin(PF) changes */
3994 	if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3995 	    (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3996 		dev_warn(rvu->dev,
3997 			 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3998 		return -EPERM;
3999 	}
4000 
4001 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
4002 
4003 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
4004 				    pfvf->rx_chan_base, req->mac_addr);
4005 
4006 	if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
4007 		ether_addr_copy(pfvf->default_mac, req->mac_addr);
4008 
4009 	rvu_switch_update_rules(rvu, pcifunc);
4010 
4011 	return 0;
4012 }
4013 
4014 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
4015 				      struct msg_req *req,
4016 				      struct nix_get_mac_addr_rsp *rsp)
4017 {
4018 	u16 pcifunc = req->hdr.pcifunc;
4019 	struct rvu_pfvf *pfvf;
4020 
4021 	if (!is_nixlf_attached(rvu, pcifunc))
4022 		return NIX_AF_ERR_AF_LF_INVALID;
4023 
4024 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4025 
4026 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
4027 
4028 	return 0;
4029 }
4030 
4031 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
4032 				     struct msg_rsp *rsp)
4033 {
4034 	bool allmulti, promisc, nix_rx_multicast;
4035 	u16 pcifunc = req->hdr.pcifunc;
4036 	struct rvu_pfvf *pfvf;
4037 	int nixlf, err;
4038 
4039 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4040 	promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
4041 	allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
4042 	pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
4043 
4044 	nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
4045 
4046 	if (is_vf(pcifunc) && !nix_rx_multicast &&
4047 	    (promisc || allmulti)) {
4048 		dev_warn_ratelimited(rvu->dev,
4049 				     "VF promisc/multicast not supported\n");
4050 		return 0;
4051 	}
4052 
4053 	/* untrusted VF can't configure promisc/allmulti */
4054 	if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4055 	    (promisc || allmulti))
4056 		return 0;
4057 
4058 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4059 	if (err)
4060 		return err;
4061 
4062 	if (nix_rx_multicast) {
4063 		/* add/del this PF_FUNC to/from mcast pkt replication list */
4064 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
4065 					  allmulti);
4066 		if (err) {
4067 			dev_err(rvu->dev,
4068 				"Failed to update pcifunc 0x%x to multicast list\n",
4069 				pcifunc);
4070 			return err;
4071 		}
4072 
4073 		/* add/del this PF_FUNC to/from promisc pkt replication list */
4074 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
4075 					  promisc);
4076 		if (err) {
4077 			dev_err(rvu->dev,
4078 				"Failed to update pcifunc 0x%x to promisc list\n",
4079 				pcifunc);
4080 			return err;
4081 		}
4082 	}
4083 
4084 	/* install/uninstall allmulti entry */
4085 	if (allmulti) {
4086 		rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
4087 					       pfvf->rx_chan_base);
4088 	} else {
4089 		if (!nix_rx_multicast)
4090 			rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
4091 	}
4092 
4093 	/* install/uninstall promisc entry */
4094 	if (promisc)
4095 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
4096 					      pfvf->rx_chan_base,
4097 					      pfvf->rx_chan_cnt);
4098 	else
4099 		if (!nix_rx_multicast)
4100 			rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
4101 
4102 	return 0;
4103 }
4104 
4105 static void nix_find_link_frs(struct rvu *rvu,
4106 			      struct nix_frs_cfg *req, u16 pcifunc)
4107 {
4108 	int pf = rvu_get_pf(pcifunc);
4109 	struct rvu_pfvf *pfvf;
4110 	int maxlen, minlen;
4111 	int numvfs, hwvf;
4112 	int vf;
4113 
4114 	/* Update with requester's min/max lengths */
4115 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4116 	pfvf->maxlen = req->maxlen;
4117 	if (req->update_minlen)
4118 		pfvf->minlen = req->minlen;
4119 
4120 	maxlen = req->maxlen;
4121 	minlen = req->update_minlen ? req->minlen : 0;
4122 
4123 	/* Get this PF's numVFs and starting hwvf */
4124 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
4125 
4126 	/* For each VF, compare requested max/minlen */
4127 	for (vf = 0; vf < numvfs; vf++) {
4128 		pfvf =  &rvu->hwvf[hwvf + vf];
4129 		if (pfvf->maxlen > maxlen)
4130 			maxlen = pfvf->maxlen;
4131 		if (req->update_minlen &&
4132 		    pfvf->minlen && pfvf->minlen < minlen)
4133 			minlen = pfvf->minlen;
4134 	}
4135 
4136 	/* Compare requested max/minlen with PF's max/minlen */
4137 	pfvf = &rvu->pf[pf];
4138 	if (pfvf->maxlen > maxlen)
4139 		maxlen = pfvf->maxlen;
4140 	if (req->update_minlen &&
4141 	    pfvf->minlen && pfvf->minlen < minlen)
4142 		minlen = pfvf->minlen;
4143 
4144 	/* Update the request with max/min PF's and it's VF's max/min */
4145 	req->maxlen = maxlen;
4146 	if (req->update_minlen)
4147 		req->minlen = minlen;
4148 }
4149 
4150 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
4151 				    struct msg_rsp *rsp)
4152 {
4153 	struct rvu_hwinfo *hw = rvu->hw;
4154 	u16 pcifunc = req->hdr.pcifunc;
4155 	int pf = rvu_get_pf(pcifunc);
4156 	int blkaddr, link = -1;
4157 	struct nix_hw *nix_hw;
4158 	struct rvu_pfvf *pfvf;
4159 	u8 cgx = 0, lmac = 0;
4160 	u16 max_mtu;
4161 	u64 cfg;
4162 
4163 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4164 	if (blkaddr < 0)
4165 		return NIX_AF_ERR_AF_LF_INVALID;
4166 
4167 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
4168 	if (!nix_hw)
4169 		return NIX_AF_ERR_INVALID_NIXBLK;
4170 
4171 	if (is_afvf(pcifunc))
4172 		rvu_get_lbk_link_max_frs(rvu, &max_mtu);
4173 	else
4174 		rvu_get_lmac_link_max_frs(rvu, &max_mtu);
4175 
4176 	if (!req->sdp_link && req->maxlen > max_mtu)
4177 		return NIX_AF_ERR_FRS_INVALID;
4178 
4179 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
4180 		return NIX_AF_ERR_FRS_INVALID;
4181 
4182 	/* Check if config is for SDP link */
4183 	if (req->sdp_link) {
4184 		if (!hw->sdp_links)
4185 			return NIX_AF_ERR_RX_LINK_INVALID;
4186 		link = hw->cgx_links + hw->lbk_links;
4187 		goto linkcfg;
4188 	}
4189 
4190 	/* Check if the request is from CGX mapped RVU PF */
4191 	if (is_pf_cgxmapped(rvu, pf)) {
4192 		/* Get CGX and LMAC to which this PF is mapped and find link */
4193 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
4194 		link = (cgx * hw->lmac_per_cgx) + lmac;
4195 	} else if (pf == 0) {
4196 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
4197 		pfvf = rvu_get_pfvf(rvu, pcifunc);
4198 		link = hw->cgx_links + pfvf->lbkid;
4199 	}
4200 
4201 	if (link < 0)
4202 		return NIX_AF_ERR_RX_LINK_INVALID;
4203 
4204 linkcfg:
4205 	nix_find_link_frs(rvu, req, pcifunc);
4206 
4207 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
4208 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
4209 	if (req->update_minlen)
4210 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
4211 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
4212 
4213 	return 0;
4214 }
4215 
4216 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
4217 				    struct msg_rsp *rsp)
4218 {
4219 	int nixlf, blkaddr, err;
4220 	u64 cfg;
4221 
4222 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
4223 	if (err)
4224 		return err;
4225 
4226 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
4227 	/* Set the interface configuration */
4228 	if (req->len_verify & BIT(0))
4229 		cfg |= BIT_ULL(41);
4230 	else
4231 		cfg &= ~BIT_ULL(41);
4232 
4233 	if (req->len_verify & BIT(1))
4234 		cfg |= BIT_ULL(40);
4235 	else
4236 		cfg &= ~BIT_ULL(40);
4237 
4238 	if (req->len_verify & NIX_RX_DROP_RE)
4239 		cfg |= BIT_ULL(32);
4240 	else
4241 		cfg &= ~BIT_ULL(32);
4242 
4243 	if (req->csum_verify & BIT(0))
4244 		cfg |= BIT_ULL(37);
4245 	else
4246 		cfg &= ~BIT_ULL(37);
4247 
4248 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
4249 
4250 	return 0;
4251 }
4252 
4253 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
4254 {
4255 	return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
4256 }
4257 
4258 static void nix_link_config(struct rvu *rvu, int blkaddr,
4259 			    struct nix_hw *nix_hw)
4260 {
4261 	struct rvu_hwinfo *hw = rvu->hw;
4262 	int cgx, lmac_cnt, slink, link;
4263 	u16 lbk_max_frs, lmac_max_frs;
4264 	unsigned long lmac_bmap;
4265 	u64 tx_credits, cfg;
4266 	u64 lmac_fifo_len;
4267 	int iter;
4268 
4269 	rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
4270 	rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
4271 
4272 	/* Set default min/max packet lengths allowed on NIX Rx links.
4273 	 *
4274 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
4275 	 * as undersize and report them to SW as error pkts, hence
4276 	 * setting it to 40 bytes.
4277 	 */
4278 	for (link = 0; link < hw->cgx_links; link++) {
4279 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4280 				((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
4281 	}
4282 
4283 	for (link = hw->cgx_links; link < hw->lbk_links; link++) {
4284 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4285 			    ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
4286 	}
4287 	if (hw->sdp_links) {
4288 		link = hw->cgx_links + hw->lbk_links;
4289 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4290 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
4291 	}
4292 
4293 	/* Get MCS external bypass status for CN10K-B */
4294 	if (mcs_get_blkcnt() == 1) {
4295 		/* Adjust for 2 credits when external bypass is disabled */
4296 		nix_hw->cc_mcs_cnt = is_mcs_bypass(0) ? 0 : 2;
4297 	}
4298 
4299 	/* Set credits for Tx links assuming max packet length allowed.
4300 	 * This will be reconfigured based on MTU set for PF/VF.
4301 	 */
4302 	for (cgx = 0; cgx < hw->cgx; cgx++) {
4303 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
4304 		/* Skip when cgx is not available or lmac cnt is zero */
4305 		if (lmac_cnt <= 0)
4306 			continue;
4307 		slink = cgx * hw->lmac_per_cgx;
4308 
4309 		/* Get LMAC id's from bitmap */
4310 		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
4311 		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
4312 			lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
4313 			if (!lmac_fifo_len) {
4314 				dev_err(rvu->dev,
4315 					"%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4316 					__func__, cgx, iter);
4317 				continue;
4318 			}
4319 			tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
4320 			/* Enable credits and set credit pkt count to max allowed */
4321 			cfg =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4322 			cfg |= FIELD_PREP(NIX_AF_LINKX_MCS_CNT_MASK, nix_hw->cc_mcs_cnt);
4323 
4324 			link = iter + slink;
4325 			nix_hw->tx_credits[link] = tx_credits;
4326 			rvu_write64(rvu, blkaddr,
4327 				    NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
4328 		}
4329 	}
4330 
4331 	/* Set Tx credits for LBK link */
4332 	slink = hw->cgx_links;
4333 	for (link = slink; link < (slink + hw->lbk_links); link++) {
4334 		tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
4335 		nix_hw->tx_credits[link] = tx_credits;
4336 		/* Enable credits and set credit pkt count to max allowed */
4337 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4338 		rvu_write64(rvu, blkaddr,
4339 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
4340 	}
4341 }
4342 
4343 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
4344 {
4345 	int idx, err;
4346 	u64 status;
4347 
4348 	/* Start X2P bus calibration */
4349 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4350 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
4351 	/* Wait for calibration to complete */
4352 	err = rvu_poll_reg(rvu, blkaddr,
4353 			   NIX_AF_STATUS, BIT_ULL(10), false);
4354 	if (err) {
4355 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
4356 		return err;
4357 	}
4358 
4359 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
4360 	/* Check if CGX devices are ready */
4361 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
4362 		/* Skip when cgx port is not available */
4363 		if (!rvu_cgx_pdata(idx, rvu) ||
4364 		    (status & (BIT_ULL(16 + idx))))
4365 			continue;
4366 		dev_err(rvu->dev,
4367 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
4368 		err = -EBUSY;
4369 	}
4370 
4371 	/* Check if LBK is ready */
4372 	if (!(status & BIT_ULL(19))) {
4373 		dev_err(rvu->dev,
4374 			"LBK didn't respond to NIX X2P calibration\n");
4375 		err = -EBUSY;
4376 	}
4377 
4378 	/* Clear 'calibrate_x2p' bit */
4379 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4380 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
4381 	if (err || (status & 0x3FFULL))
4382 		dev_err(rvu->dev,
4383 			"NIX X2P calibration failed, status 0x%llx\n", status);
4384 	if (err)
4385 		return err;
4386 	return 0;
4387 }
4388 
4389 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
4390 {
4391 	u64 cfg;
4392 	int err;
4393 
4394 	/* Set admin queue endianness */
4395 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
4396 #ifdef __BIG_ENDIAN
4397 	cfg |= BIT_ULL(8);
4398 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4399 #else
4400 	cfg &= ~BIT_ULL(8);
4401 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4402 #endif
4403 
4404 	/* Do not bypass NDC cache */
4405 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
4406 	cfg &= ~0x3FFEULL;
4407 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
4408 	/* Disable caching of SQB aka SQEs */
4409 	cfg |= 0x04ULL;
4410 #endif
4411 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
4412 
4413 	/* Result structure can be followed by RQ/SQ/CQ context at
4414 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4415 	 * operation type. Alloc sufficient result memory for all operations.
4416 	 */
4417 	err = rvu_aq_alloc(rvu, &block->aq,
4418 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
4419 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
4420 	if (err)
4421 		return err;
4422 
4423 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
4424 	rvu_write64(rvu, block->addr,
4425 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
4426 	return 0;
4427 }
4428 
4429 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
4430 {
4431 	struct rvu_hwinfo *hw = rvu->hw;
4432 	u64 hw_const;
4433 
4434 	hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
4435 
4436 	/* On OcteonTx2 DWRR quantum is directly configured into each of
4437 	 * the transmit scheduler queues. And PF/VF drivers were free to
4438 	 * config any value upto 2^24.
4439 	 * On CN10K, HW is modified, the quantum configuration at scheduler
4440 	 * queues is in terms of weight. And SW needs to setup a base DWRR MTU
4441 	 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
4442 	 * 'DWRR MTU * weight' to get the quantum.
4443 	 *
4444 	 * Check if HW uses a common MTU for all DWRR quantum configs.
4445 	 * On OcteonTx2 this register field is '0'.
4446 	 */
4447 	if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61)))
4448 		hw->cap.nix_common_dwrr_mtu = true;
4449 
4450 	if (hw_const & BIT_ULL(61))
4451 		hw->cap.nix_multiple_dwrr_mtu = true;
4452 }
4453 
4454 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
4455 {
4456 	const struct npc_lt_def_cfg *ltdefs;
4457 	struct rvu_hwinfo *hw = rvu->hw;
4458 	int blkaddr = nix_hw->blkaddr;
4459 	struct rvu_block *block;
4460 	int err;
4461 	u64 cfg;
4462 
4463 	block = &hw->block[blkaddr];
4464 
4465 	if (is_rvu_96xx_B0(rvu)) {
4466 		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
4467 		 * internal state when conditional clocks are turned off.
4468 		 * Hence enable them.
4469 		 */
4470 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4471 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
4472 	}
4473 
4474 	/* Set chan/link to backpressure TL3 instead of TL2 */
4475 	rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
4476 
4477 	/* Disable SQ manager's sticky mode operation (set TM6 = 0)
4478 	 * This sticky mode is known to cause SQ stalls when multiple
4479 	 * SQs are mapped to same SMQ and transmitting pkts at a time.
4480 	 */
4481 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
4482 	cfg &= ~BIT_ULL(15);
4483 	rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
4484 
4485 	ltdefs = rvu->kpu.lt_def;
4486 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
4487 	err = nix_calibrate_x2p(rvu, blkaddr);
4488 	if (err)
4489 		return err;
4490 
4491 	/* Setup capabilities of the NIX block */
4492 	rvu_nix_setup_capabilities(rvu, blkaddr);
4493 
4494 	/* Initialize admin queue */
4495 	err = nix_aq_init(rvu, block);
4496 	if (err)
4497 		return err;
4498 
4499 	/* Restore CINT timer delay to HW reset values */
4500 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
4501 
4502 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
4503 
4504 	/* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
4505 	cfg |= 1ULL;
4506 	if (!is_rvu_otx2(rvu))
4507 		cfg |= NIX_PTP_1STEP_EN;
4508 
4509 	rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
4510 
4511 	if (!is_rvu_otx2(rvu))
4512 		rvu_nix_block_cn10k_init(rvu, nix_hw);
4513 
4514 	if (is_block_implemented(hw, blkaddr)) {
4515 		err = nix_setup_txschq(rvu, nix_hw, blkaddr);
4516 		if (err)
4517 			return err;
4518 
4519 		err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
4520 		if (err)
4521 			return err;
4522 
4523 		err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
4524 		if (err)
4525 			return err;
4526 
4527 		err = nix_setup_mcast(rvu, nix_hw, blkaddr);
4528 		if (err)
4529 			return err;
4530 
4531 		err = nix_setup_txvlan(rvu, nix_hw);
4532 		if (err)
4533 			return err;
4534 
4535 		/* Configure segmentation offload formats */
4536 		nix_setup_lso(rvu, nix_hw, blkaddr);
4537 
4538 		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
4539 		 * This helps HW protocol checker to identify headers
4540 		 * and validate length and checksums.
4541 		 */
4542 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
4543 			    (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
4544 			    ltdefs->rx_ol2.ltype_mask);
4545 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
4546 			    (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
4547 			    ltdefs->rx_oip4.ltype_mask);
4548 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
4549 			    (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
4550 			    ltdefs->rx_iip4.ltype_mask);
4551 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
4552 			    (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
4553 			    ltdefs->rx_oip6.ltype_mask);
4554 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
4555 			    (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
4556 			    ltdefs->rx_iip6.ltype_mask);
4557 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
4558 			    (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
4559 			    ltdefs->rx_otcp.ltype_mask);
4560 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
4561 			    (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
4562 			    ltdefs->rx_itcp.ltype_mask);
4563 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
4564 			    (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
4565 			    ltdefs->rx_oudp.ltype_mask);
4566 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
4567 			    (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
4568 			    ltdefs->rx_iudp.ltype_mask);
4569 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
4570 			    (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
4571 			    ltdefs->rx_osctp.ltype_mask);
4572 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
4573 			    (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
4574 			    ltdefs->rx_isctp.ltype_mask);
4575 
4576 		if (!is_rvu_otx2(rvu)) {
4577 			/* Enable APAD calculation for other protocols
4578 			 * matching APAD0 and APAD1 lt def registers.
4579 			 */
4580 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
4581 				    (ltdefs->rx_apad0.valid << 11) |
4582 				    (ltdefs->rx_apad0.lid << 8) |
4583 				    (ltdefs->rx_apad0.ltype_match << 4) |
4584 				    ltdefs->rx_apad0.ltype_mask);
4585 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
4586 				    (ltdefs->rx_apad1.valid << 11) |
4587 				    (ltdefs->rx_apad1.lid << 8) |
4588 				    (ltdefs->rx_apad1.ltype_match << 4) |
4589 				    ltdefs->rx_apad1.ltype_mask);
4590 
4591 			/* Receive ethertype defination register defines layer
4592 			 * information in NPC_RESULT_S to identify the Ethertype
4593 			 * location in L2 header. Used for Ethertype overwriting
4594 			 * in inline IPsec flow.
4595 			 */
4596 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
4597 				    (ltdefs->rx_et[0].offset << 12) |
4598 				    (ltdefs->rx_et[0].valid << 11) |
4599 				    (ltdefs->rx_et[0].lid << 8) |
4600 				    (ltdefs->rx_et[0].ltype_match << 4) |
4601 				    ltdefs->rx_et[0].ltype_mask);
4602 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
4603 				    (ltdefs->rx_et[1].offset << 12) |
4604 				    (ltdefs->rx_et[1].valid << 11) |
4605 				    (ltdefs->rx_et[1].lid << 8) |
4606 				    (ltdefs->rx_et[1].ltype_match << 4) |
4607 				    ltdefs->rx_et[1].ltype_mask);
4608 		}
4609 
4610 		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
4611 		if (err)
4612 			return err;
4613 
4614 		nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
4615 					     sizeof(u64), GFP_KERNEL);
4616 		if (!nix_hw->tx_credits)
4617 			return -ENOMEM;
4618 
4619 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
4620 		nix_link_config(rvu, blkaddr, nix_hw);
4621 
4622 		/* Enable Channel backpressure */
4623 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
4624 	}
4625 	return 0;
4626 }
4627 
4628 int rvu_nix_init(struct rvu *rvu)
4629 {
4630 	struct rvu_hwinfo *hw = rvu->hw;
4631 	struct nix_hw *nix_hw;
4632 	int blkaddr = 0, err;
4633 	int i = 0;
4634 
4635 	hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
4636 			       GFP_KERNEL);
4637 	if (!hw->nix)
4638 		return -ENOMEM;
4639 
4640 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4641 	while (blkaddr) {
4642 		nix_hw = &hw->nix[i];
4643 		nix_hw->rvu = rvu;
4644 		nix_hw->blkaddr = blkaddr;
4645 		err = rvu_nix_block_init(rvu, nix_hw);
4646 		if (err)
4647 			return err;
4648 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4649 		i++;
4650 	}
4651 
4652 	return 0;
4653 }
4654 
4655 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
4656 				  struct rvu_block *block)
4657 {
4658 	struct nix_txsch *txsch;
4659 	struct nix_mcast *mcast;
4660 	struct nix_txvlan *vlan;
4661 	struct nix_hw *nix_hw;
4662 	int lvl;
4663 
4664 	rvu_aq_free(rvu, block->aq);
4665 
4666 	if (is_block_implemented(rvu->hw, blkaddr)) {
4667 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
4668 		if (!nix_hw)
4669 			return;
4670 
4671 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
4672 			txsch = &nix_hw->txsch[lvl];
4673 			kfree(txsch->schq.bmap);
4674 		}
4675 
4676 		kfree(nix_hw->tx_credits);
4677 
4678 		nix_ipolicer_freemem(rvu, nix_hw);
4679 
4680 		vlan = &nix_hw->txvlan;
4681 		kfree(vlan->rsrc.bmap);
4682 		mutex_destroy(&vlan->rsrc_lock);
4683 
4684 		mcast = &nix_hw->mcast;
4685 		qmem_free(rvu->dev, mcast->mce_ctx);
4686 		qmem_free(rvu->dev, mcast->mcast_buf);
4687 		mutex_destroy(&mcast->mce_lock);
4688 	}
4689 }
4690 
4691 void rvu_nix_freemem(struct rvu *rvu)
4692 {
4693 	struct rvu_hwinfo *hw = rvu->hw;
4694 	struct rvu_block *block;
4695 	int blkaddr = 0;
4696 
4697 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4698 	while (blkaddr) {
4699 		block = &hw->block[blkaddr];
4700 		rvu_nix_block_freemem(rvu, blkaddr, block);
4701 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4702 	}
4703 }
4704 
4705 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
4706 				     struct msg_rsp *rsp)
4707 {
4708 	u16 pcifunc = req->hdr.pcifunc;
4709 	struct rvu_pfvf *pfvf;
4710 	int nixlf, err;
4711 
4712 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4713 	if (err)
4714 		return err;
4715 
4716 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
4717 
4718 	npc_mcam_enable_flows(rvu, pcifunc);
4719 
4720 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4721 	set_bit(NIXLF_INITIALIZED, &pfvf->flags);
4722 
4723 	rvu_switch_update_rules(rvu, pcifunc);
4724 
4725 	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
4726 }
4727 
4728 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
4729 				    struct msg_rsp *rsp)
4730 {
4731 	u16 pcifunc = req->hdr.pcifunc;
4732 	struct rvu_pfvf *pfvf;
4733 	int nixlf, err;
4734 
4735 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4736 	if (err)
4737 		return err;
4738 
4739 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
4740 
4741 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4742 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
4743 
4744 	err = rvu_cgx_start_stop_io(rvu, pcifunc, false);
4745 	if (err)
4746 		return err;
4747 
4748 	rvu_cgx_tx_enable(rvu, pcifunc, true);
4749 
4750 	return 0;
4751 }
4752 
4753 #define RX_SA_BASE  GENMASK_ULL(52, 7)
4754 
4755 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
4756 {
4757 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
4758 	struct hwctx_disable_req ctx_req;
4759 	int pf = rvu_get_pf(pcifunc);
4760 	struct mac_ops *mac_ops;
4761 	u8 cgx_id, lmac_id;
4762 	u64 sa_base;
4763 	void *cgxd;
4764 	int err;
4765 
4766 	ctx_req.hdr.pcifunc = pcifunc;
4767 
4768 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
4769 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
4770 	rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
4771 	nix_interface_deinit(rvu, pcifunc, nixlf);
4772 	nix_rx_sync(rvu, blkaddr);
4773 	nix_txschq_free(rvu, pcifunc);
4774 
4775 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
4776 
4777 	rvu_cgx_start_stop_io(rvu, pcifunc, false);
4778 
4779 	if (pfvf->sq_ctx) {
4780 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
4781 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
4782 		if (err)
4783 			dev_err(rvu->dev, "SQ ctx disable failed\n");
4784 	}
4785 
4786 	if (pfvf->rq_ctx) {
4787 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
4788 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
4789 		if (err)
4790 			dev_err(rvu->dev, "RQ ctx disable failed\n");
4791 	}
4792 
4793 	if (pfvf->cq_ctx) {
4794 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
4795 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
4796 		if (err)
4797 			dev_err(rvu->dev, "CQ ctx disable failed\n");
4798 	}
4799 
4800 	/* reset HW config done for Switch headers */
4801 	rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
4802 			       (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
4803 
4804 	/* Disabling CGX and NPC config done for PTP */
4805 	if (pfvf->hw_rx_tstamp_en) {
4806 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
4807 		cgxd = rvu_cgx_pdata(cgx_id, rvu);
4808 		mac_ops = get_mac_ops(cgxd);
4809 		mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
4810 		/* Undo NPC config done for PTP */
4811 		if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
4812 			dev_err(rvu->dev, "NPC config for PTP failed\n");
4813 		pfvf->hw_rx_tstamp_en = false;
4814 	}
4815 
4816 	/* reset priority flow control config */
4817 	rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
4818 
4819 	/* reset 802.3x flow control config */
4820 	rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
4821 
4822 	nix_ctx_free(rvu, pfvf);
4823 
4824 	nix_free_all_bandprof(rvu, pcifunc);
4825 
4826 	sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
4827 	if (FIELD_GET(RX_SA_BASE, sa_base)) {
4828 		err = rvu_cpt_ctx_flush(rvu, pcifunc);
4829 		if (err)
4830 			dev_err(rvu->dev,
4831 				"CPT ctx flush failed with error: %d\n", err);
4832 	}
4833 }
4834 
4835 #define NIX_AF_LFX_TX_CFG_PTP_EN	BIT_ULL(32)
4836 
4837 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
4838 {
4839 	struct rvu_hwinfo *hw = rvu->hw;
4840 	struct rvu_block *block;
4841 	int blkaddr, pf;
4842 	int nixlf;
4843 	u64 cfg;
4844 
4845 	pf = rvu_get_pf(pcifunc);
4846 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
4847 		return 0;
4848 
4849 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4850 	if (blkaddr < 0)
4851 		return NIX_AF_ERR_AF_LF_INVALID;
4852 
4853 	block = &hw->block[blkaddr];
4854 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
4855 	if (nixlf < 0)
4856 		return NIX_AF_ERR_AF_LF_INVALID;
4857 
4858 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
4859 
4860 	if (enable)
4861 		cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
4862 	else
4863 		cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
4864 
4865 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
4866 
4867 	return 0;
4868 }
4869 
4870 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
4871 					  struct msg_rsp *rsp)
4872 {
4873 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
4874 }
4875 
4876 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
4877 					   struct msg_rsp *rsp)
4878 {
4879 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
4880 }
4881 
4882 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
4883 					struct nix_lso_format_cfg *req,
4884 					struct nix_lso_format_cfg_rsp *rsp)
4885 {
4886 	u16 pcifunc = req->hdr.pcifunc;
4887 	struct nix_hw *nix_hw;
4888 	struct rvu_pfvf *pfvf;
4889 	int blkaddr, idx, f;
4890 	u64 reg;
4891 
4892 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4893 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4894 	if (!pfvf->nixlf || blkaddr < 0)
4895 		return NIX_AF_ERR_AF_LF_INVALID;
4896 
4897 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
4898 	if (!nix_hw)
4899 		return NIX_AF_ERR_INVALID_NIXBLK;
4900 
4901 	/* Find existing matching LSO format, if any */
4902 	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
4903 		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
4904 			reg = rvu_read64(rvu, blkaddr,
4905 					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
4906 			if (req->fields[f] != (reg & req->field_mask))
4907 				break;
4908 		}
4909 
4910 		if (f == NIX_LSO_FIELD_MAX)
4911 			break;
4912 	}
4913 
4914 	if (idx < nix_hw->lso.in_use) {
4915 		/* Match found */
4916 		rsp->lso_format_idx = idx;
4917 		return 0;
4918 	}
4919 
4920 	if (nix_hw->lso.in_use == nix_hw->lso.total)
4921 		return NIX_AF_ERR_LSO_CFG_FAIL;
4922 
4923 	rsp->lso_format_idx = nix_hw->lso.in_use++;
4924 
4925 	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
4926 		rvu_write64(rvu, blkaddr,
4927 			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
4928 			    req->fields[f]);
4929 
4930 	return 0;
4931 }
4932 
4933 #define IPSEC_GEN_CFG_EGRP    GENMASK_ULL(50, 48)
4934 #define IPSEC_GEN_CFG_OPCODE  GENMASK_ULL(47, 32)
4935 #define IPSEC_GEN_CFG_PARAM1  GENMASK_ULL(31, 16)
4936 #define IPSEC_GEN_CFG_PARAM2  GENMASK_ULL(15, 0)
4937 
4938 #define CPT_INST_QSEL_BLOCK   GENMASK_ULL(28, 24)
4939 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
4940 #define CPT_INST_QSEL_SLOT    GENMASK_ULL(7, 0)
4941 
4942 #define CPT_INST_CREDIT_TH    GENMASK_ULL(53, 32)
4943 #define CPT_INST_CREDIT_BPID  GENMASK_ULL(30, 22)
4944 #define CPT_INST_CREDIT_CNT   GENMASK_ULL(21, 0)
4945 
4946 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
4947 				 int blkaddr)
4948 {
4949 	u8 cpt_idx, cpt_blkaddr;
4950 	u64 val;
4951 
4952 	cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
4953 	if (req->enable) {
4954 		val = 0;
4955 		/* Enable context prefetching */
4956 		if (!is_rvu_otx2(rvu))
4957 			val |= BIT_ULL(51);
4958 
4959 		/* Set OPCODE and EGRP */
4960 		val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
4961 		val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
4962 		val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
4963 		val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
4964 
4965 		rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
4966 
4967 		/* Set CPT queue for inline IPSec */
4968 		val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
4969 		val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
4970 				  req->inst_qsel.cpt_pf_func);
4971 
4972 		if (!is_rvu_otx2(rvu)) {
4973 			cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
4974 						       BLKADDR_CPT1;
4975 			val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
4976 		}
4977 
4978 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
4979 			    val);
4980 
4981 		/* Set CPT credit */
4982 		val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
4983 		if ((val & 0x3FFFFF) != 0x3FFFFF)
4984 			rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
4985 				    0x3FFFFF - val);
4986 
4987 		val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
4988 		val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
4989 		val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
4990 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
4991 	} else {
4992 		rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
4993 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
4994 			    0x0);
4995 		val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
4996 		if ((val & 0x3FFFFF) != 0x3FFFFF)
4997 			rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
4998 				    0x3FFFFF - val);
4999 	}
5000 }
5001 
5002 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
5003 					  struct nix_inline_ipsec_cfg *req,
5004 					  struct msg_rsp *rsp)
5005 {
5006 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5007 		return 0;
5008 
5009 	nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
5010 	if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
5011 		nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
5012 
5013 	return 0;
5014 }
5015 
5016 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
5017 					       struct msg_req *req,
5018 					       struct nix_inline_ipsec_cfg *rsp)
5019 
5020 {
5021 	u64 val;
5022 
5023 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5024 		return 0;
5025 
5026 	val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
5027 	rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
5028 	rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
5029 	rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
5030 	rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
5031 
5032 	val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
5033 	rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
5034 	rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
5035 	rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
5036 
5037 	return 0;
5038 }
5039 
5040 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
5041 					     struct nix_inline_ipsec_lf_cfg *req,
5042 					     struct msg_rsp *rsp)
5043 {
5044 	int lf, blkaddr, err;
5045 	u64 val;
5046 
5047 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5048 		return 0;
5049 
5050 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
5051 	if (err)
5052 		return err;
5053 
5054 	if (req->enable) {
5055 		/* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
5056 		val = (u64)req->ipsec_cfg0.tt << 44 |
5057 		      (u64)req->ipsec_cfg0.tag_const << 20 |
5058 		      (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
5059 		      req->ipsec_cfg0.lenm1_max;
5060 
5061 		if (blkaddr == BLKADDR_NIX1)
5062 			val |= BIT_ULL(46);
5063 
5064 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
5065 
5066 		/* Set SA_IDX_W and SA_IDX_MAX */
5067 		val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
5068 		      req->ipsec_cfg1.sa_idx_max;
5069 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
5070 
5071 		/* Set SA base address */
5072 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5073 			    req->sa_base_addr);
5074 	} else {
5075 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
5076 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
5077 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5078 			    0x0);
5079 	}
5080 
5081 	return 0;
5082 }
5083 
5084 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
5085 {
5086 	bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
5087 
5088 	/* overwrite vf mac address with default_mac */
5089 	if (from_vf)
5090 		ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
5091 }
5092 
5093 /* NIX ingress policers or bandwidth profiles APIs */
5094 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
5095 {
5096 	struct npc_lt_def_cfg defs, *ltdefs;
5097 
5098 	ltdefs = &defs;
5099 	memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
5100 
5101 	/* Extract PCP and DEI fields from outer VLAN from byte offset
5102 	 * 2 from the start of LB_PTR (ie TAG).
5103 	 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
5104 	 * fields are considered when 'Tunnel enable' is set in profile.
5105 	 */
5106 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
5107 		    (2UL << 12) | (ltdefs->ovlan.lid << 8) |
5108 		    (ltdefs->ovlan.ltype_match << 4) |
5109 		    ltdefs->ovlan.ltype_mask);
5110 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
5111 		    (2UL << 12) | (ltdefs->ivlan.lid << 8) |
5112 		    (ltdefs->ivlan.ltype_match << 4) |
5113 		    ltdefs->ivlan.ltype_mask);
5114 
5115 	/* DSCP field in outer and tunneled IPv4 packets */
5116 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
5117 		    (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
5118 		    (ltdefs->rx_oip4.ltype_match << 4) |
5119 		    ltdefs->rx_oip4.ltype_mask);
5120 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
5121 		    (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
5122 		    (ltdefs->rx_iip4.ltype_match << 4) |
5123 		    ltdefs->rx_iip4.ltype_mask);
5124 
5125 	/* DSCP field (traffic class) in outer and tunneled IPv6 packets */
5126 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
5127 		    (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
5128 		    (ltdefs->rx_oip6.ltype_match << 4) |
5129 		    ltdefs->rx_oip6.ltype_mask);
5130 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
5131 		    (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
5132 		    (ltdefs->rx_iip6.ltype_match << 4) |
5133 		    ltdefs->rx_iip6.ltype_mask);
5134 }
5135 
5136 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
5137 				    int layer, int prof_idx)
5138 {
5139 	struct nix_cn10k_aq_enq_req aq_req;
5140 	int rc;
5141 
5142 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5143 
5144 	aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
5145 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5146 	aq_req.op = NIX_AQ_INSTOP_INIT;
5147 
5148 	/* Context is all zeros, submit to AQ */
5149 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5150 				     (struct nix_aq_enq_req *)&aq_req, NULL);
5151 	if (rc)
5152 		dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
5153 			layer, prof_idx);
5154 	return rc;
5155 }
5156 
5157 static int nix_setup_ipolicers(struct rvu *rvu,
5158 			       struct nix_hw *nix_hw, int blkaddr)
5159 {
5160 	struct rvu_hwinfo *hw = rvu->hw;
5161 	struct nix_ipolicer *ipolicer;
5162 	int err, layer, prof_idx;
5163 	u64 cfg;
5164 
5165 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
5166 	if (!(cfg & BIT_ULL(61))) {
5167 		hw->cap.ipolicer = false;
5168 		return 0;
5169 	}
5170 
5171 	hw->cap.ipolicer = true;
5172 	nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
5173 					sizeof(*ipolicer), GFP_KERNEL);
5174 	if (!nix_hw->ipolicer)
5175 		return -ENOMEM;
5176 
5177 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
5178 
5179 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5180 		ipolicer = &nix_hw->ipolicer[layer];
5181 		switch (layer) {
5182 		case BAND_PROF_LEAF_LAYER:
5183 			ipolicer->band_prof.max = cfg & 0XFFFF;
5184 			break;
5185 		case BAND_PROF_MID_LAYER:
5186 			ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
5187 			break;
5188 		case BAND_PROF_TOP_LAYER:
5189 			ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
5190 			break;
5191 		}
5192 
5193 		if (!ipolicer->band_prof.max)
5194 			continue;
5195 
5196 		err = rvu_alloc_bitmap(&ipolicer->band_prof);
5197 		if (err)
5198 			return err;
5199 
5200 		ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
5201 						  ipolicer->band_prof.max,
5202 						  sizeof(u16), GFP_KERNEL);
5203 		if (!ipolicer->pfvf_map)
5204 			return -ENOMEM;
5205 
5206 		ipolicer->match_id = devm_kcalloc(rvu->dev,
5207 						  ipolicer->band_prof.max,
5208 						  sizeof(u16), GFP_KERNEL);
5209 		if (!ipolicer->match_id)
5210 			return -ENOMEM;
5211 
5212 		for (prof_idx = 0;
5213 		     prof_idx < ipolicer->band_prof.max; prof_idx++) {
5214 			/* Set AF as current owner for INIT ops to succeed */
5215 			ipolicer->pfvf_map[prof_idx] = 0x00;
5216 
5217 			/* There is no enable bit in the profile context,
5218 			 * so no context disable. So let's INIT them here
5219 			 * so that PF/VF later on have to just do WRITE to
5220 			 * setup policer rates and config.
5221 			 */
5222 			err = nix_init_policer_context(rvu, nix_hw,
5223 						       layer, prof_idx);
5224 			if (err)
5225 				return err;
5226 		}
5227 
5228 		/* Allocate memory for maintaining ref_counts for MID level
5229 		 * profiles, this will be needed for leaf layer profiles'
5230 		 * aggregation.
5231 		 */
5232 		if (layer != BAND_PROF_MID_LAYER)
5233 			continue;
5234 
5235 		ipolicer->ref_count = devm_kcalloc(rvu->dev,
5236 						   ipolicer->band_prof.max,
5237 						   sizeof(u16), GFP_KERNEL);
5238 		if (!ipolicer->ref_count)
5239 			return -ENOMEM;
5240 	}
5241 
5242 	/* Set policer timeunit to 2us ie  (19 + 1) * 100 nsec = 2us */
5243 	rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
5244 
5245 	nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
5246 
5247 	return 0;
5248 }
5249 
5250 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
5251 {
5252 	struct nix_ipolicer *ipolicer;
5253 	int layer;
5254 
5255 	if (!rvu->hw->cap.ipolicer)
5256 		return;
5257 
5258 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5259 		ipolicer = &nix_hw->ipolicer[layer];
5260 
5261 		if (!ipolicer->band_prof.max)
5262 			continue;
5263 
5264 		kfree(ipolicer->band_prof.bmap);
5265 	}
5266 }
5267 
5268 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
5269 			       struct nix_hw *nix_hw, u16 pcifunc)
5270 {
5271 	struct nix_ipolicer *ipolicer;
5272 	int layer, hi_layer, prof_idx;
5273 
5274 	/* Bits [15:14] in profile index represent layer */
5275 	layer = (req->qidx >> 14) & 0x03;
5276 	prof_idx = req->qidx & 0x3FFF;
5277 
5278 	ipolicer = &nix_hw->ipolicer[layer];
5279 	if (prof_idx >= ipolicer->band_prof.max)
5280 		return -EINVAL;
5281 
5282 	/* Check if the profile is allocated to the requesting PCIFUNC or not
5283 	 * with the exception of AF. AF is allowed to read and update contexts.
5284 	 */
5285 	if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
5286 		return -EINVAL;
5287 
5288 	/* If this profile is linked to higher layer profile then check
5289 	 * if that profile is also allocated to the requesting PCIFUNC
5290 	 * or not.
5291 	 */
5292 	if (!req->prof.hl_en)
5293 		return 0;
5294 
5295 	/* Leaf layer profile can link only to mid layer and
5296 	 * mid layer to top layer.
5297 	 */
5298 	if (layer == BAND_PROF_LEAF_LAYER)
5299 		hi_layer = BAND_PROF_MID_LAYER;
5300 	else if (layer == BAND_PROF_MID_LAYER)
5301 		hi_layer = BAND_PROF_TOP_LAYER;
5302 	else
5303 		return -EINVAL;
5304 
5305 	ipolicer = &nix_hw->ipolicer[hi_layer];
5306 	prof_idx = req->prof.band_prof_id;
5307 	if (prof_idx >= ipolicer->band_prof.max ||
5308 	    ipolicer->pfvf_map[prof_idx] != pcifunc)
5309 		return -EINVAL;
5310 
5311 	return 0;
5312 }
5313 
5314 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
5315 					struct nix_bandprof_alloc_req *req,
5316 					struct nix_bandprof_alloc_rsp *rsp)
5317 {
5318 	int blkaddr, layer, prof, idx, err;
5319 	u16 pcifunc = req->hdr.pcifunc;
5320 	struct nix_ipolicer *ipolicer;
5321 	struct nix_hw *nix_hw;
5322 
5323 	if (!rvu->hw->cap.ipolicer)
5324 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5325 
5326 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5327 	if (err)
5328 		return err;
5329 
5330 	mutex_lock(&rvu->rsrc_lock);
5331 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5332 		if (layer == BAND_PROF_INVAL_LAYER)
5333 			continue;
5334 		if (!req->prof_count[layer])
5335 			continue;
5336 
5337 		ipolicer = &nix_hw->ipolicer[layer];
5338 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
5339 			/* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
5340 			if (idx == MAX_BANDPROF_PER_PFFUNC)
5341 				break;
5342 
5343 			prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5344 			if (prof < 0)
5345 				break;
5346 			rsp->prof_count[layer]++;
5347 			rsp->prof_idx[layer][idx] = prof;
5348 			ipolicer->pfvf_map[prof] = pcifunc;
5349 		}
5350 	}
5351 	mutex_unlock(&rvu->rsrc_lock);
5352 	return 0;
5353 }
5354 
5355 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
5356 {
5357 	int blkaddr, layer, prof_idx, err;
5358 	struct nix_ipolicer *ipolicer;
5359 	struct nix_hw *nix_hw;
5360 
5361 	if (!rvu->hw->cap.ipolicer)
5362 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5363 
5364 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5365 	if (err)
5366 		return err;
5367 
5368 	mutex_lock(&rvu->rsrc_lock);
5369 	/* Free all the profiles allocated to the PCIFUNC */
5370 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5371 		if (layer == BAND_PROF_INVAL_LAYER)
5372 			continue;
5373 		ipolicer = &nix_hw->ipolicer[layer];
5374 
5375 		for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
5376 			if (ipolicer->pfvf_map[prof_idx] != pcifunc)
5377 				continue;
5378 
5379 			/* Clear ratelimit aggregation, if any */
5380 			if (layer == BAND_PROF_LEAF_LAYER &&
5381 			    ipolicer->match_id[prof_idx])
5382 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5383 
5384 			ipolicer->pfvf_map[prof_idx] = 0x00;
5385 			ipolicer->match_id[prof_idx] = 0;
5386 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5387 		}
5388 	}
5389 	mutex_unlock(&rvu->rsrc_lock);
5390 	return 0;
5391 }
5392 
5393 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
5394 				       struct nix_bandprof_free_req *req,
5395 				       struct msg_rsp *rsp)
5396 {
5397 	int blkaddr, layer, prof_idx, idx, err;
5398 	u16 pcifunc = req->hdr.pcifunc;
5399 	struct nix_ipolicer *ipolicer;
5400 	struct nix_hw *nix_hw;
5401 
5402 	if (req->free_all)
5403 		return nix_free_all_bandprof(rvu, pcifunc);
5404 
5405 	if (!rvu->hw->cap.ipolicer)
5406 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5407 
5408 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5409 	if (err)
5410 		return err;
5411 
5412 	mutex_lock(&rvu->rsrc_lock);
5413 	/* Free the requested profile indices */
5414 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5415 		if (layer == BAND_PROF_INVAL_LAYER)
5416 			continue;
5417 		if (!req->prof_count[layer])
5418 			continue;
5419 
5420 		ipolicer = &nix_hw->ipolicer[layer];
5421 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
5422 			if (idx == MAX_BANDPROF_PER_PFFUNC)
5423 				break;
5424 			prof_idx = req->prof_idx[layer][idx];
5425 			if (prof_idx >= ipolicer->band_prof.max ||
5426 			    ipolicer->pfvf_map[prof_idx] != pcifunc)
5427 				continue;
5428 
5429 			/* Clear ratelimit aggregation, if any */
5430 			if (layer == BAND_PROF_LEAF_LAYER &&
5431 			    ipolicer->match_id[prof_idx])
5432 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5433 
5434 			ipolicer->pfvf_map[prof_idx] = 0x00;
5435 			ipolicer->match_id[prof_idx] = 0;
5436 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5437 		}
5438 	}
5439 	mutex_unlock(&rvu->rsrc_lock);
5440 	return 0;
5441 }
5442 
5443 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
5444 			struct nix_cn10k_aq_enq_req *aq_req,
5445 			struct nix_cn10k_aq_enq_rsp *aq_rsp,
5446 			u16 pcifunc, u8 ctype, u32 qidx)
5447 {
5448 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5449 	aq_req->hdr.pcifunc = pcifunc;
5450 	aq_req->ctype = ctype;
5451 	aq_req->op = NIX_AQ_INSTOP_READ;
5452 	aq_req->qidx = qidx;
5453 
5454 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5455 				       (struct nix_aq_enq_req *)aq_req,
5456 				       (struct nix_aq_enq_rsp *)aq_rsp);
5457 }
5458 
5459 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
5460 					  struct nix_hw *nix_hw,
5461 					  struct nix_cn10k_aq_enq_req *aq_req,
5462 					  struct nix_cn10k_aq_enq_rsp *aq_rsp,
5463 					  u32 leaf_prof, u16 mid_prof)
5464 {
5465 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5466 	aq_req->hdr.pcifunc = 0x00;
5467 	aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
5468 	aq_req->op = NIX_AQ_INSTOP_WRITE;
5469 	aq_req->qidx = leaf_prof;
5470 
5471 	aq_req->prof.band_prof_id = mid_prof;
5472 	aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
5473 	aq_req->prof.hl_en = 1;
5474 	aq_req->prof_mask.hl_en = 1;
5475 
5476 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5477 				       (struct nix_aq_enq_req *)aq_req,
5478 				       (struct nix_aq_enq_rsp *)aq_rsp);
5479 }
5480 
5481 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
5482 				 u16 rq_idx, u16 match_id)
5483 {
5484 	int leaf_prof, mid_prof, leaf_match;
5485 	struct nix_cn10k_aq_enq_req aq_req;
5486 	struct nix_cn10k_aq_enq_rsp aq_rsp;
5487 	struct nix_ipolicer *ipolicer;
5488 	struct nix_hw *nix_hw;
5489 	int blkaddr, idx, rc;
5490 
5491 	if (!rvu->hw->cap.ipolicer)
5492 		return 0;
5493 
5494 	rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5495 	if (rc)
5496 		return rc;
5497 
5498 	/* Fetch the RQ's context to see if policing is enabled */
5499 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
5500 				 NIX_AQ_CTYPE_RQ, rq_idx);
5501 	if (rc) {
5502 		dev_err(rvu->dev,
5503 			"%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
5504 			__func__, rq_idx, pcifunc);
5505 		return rc;
5506 	}
5507 
5508 	if (!aq_rsp.rq.policer_ena)
5509 		return 0;
5510 
5511 	/* Get the bandwidth profile ID mapped to this RQ */
5512 	leaf_prof = aq_rsp.rq.band_prof_id;
5513 
5514 	ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
5515 	ipolicer->match_id[leaf_prof] = match_id;
5516 
5517 	/* Check if any other leaf profile is marked with same match_id */
5518 	for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
5519 		if (idx == leaf_prof)
5520 			continue;
5521 		if (ipolicer->match_id[idx] != match_id)
5522 			continue;
5523 
5524 		leaf_match = idx;
5525 		break;
5526 	}
5527 
5528 	if (idx == ipolicer->band_prof.max)
5529 		return 0;
5530 
5531 	/* Fetch the matching profile's context to check if it's already
5532 	 * mapped to a mid level profile.
5533 	 */
5534 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5535 				 NIX_AQ_CTYPE_BANDPROF, leaf_match);
5536 	if (rc) {
5537 		dev_err(rvu->dev,
5538 			"%s: Failed to fetch context of leaf profile %d\n",
5539 			__func__, leaf_match);
5540 		return rc;
5541 	}
5542 
5543 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5544 	if (aq_rsp.prof.hl_en) {
5545 		/* Get Mid layer prof index and map leaf_prof index
5546 		 * also such that flows that are being steered
5547 		 * to different RQs and marked with same match_id
5548 		 * are rate limited in a aggregate fashion
5549 		 */
5550 		mid_prof = aq_rsp.prof.band_prof_id;
5551 		rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5552 						    &aq_req, &aq_rsp,
5553 						    leaf_prof, mid_prof);
5554 		if (rc) {
5555 			dev_err(rvu->dev,
5556 				"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5557 				__func__, leaf_prof, mid_prof);
5558 			goto exit;
5559 		}
5560 
5561 		mutex_lock(&rvu->rsrc_lock);
5562 		ipolicer->ref_count[mid_prof]++;
5563 		mutex_unlock(&rvu->rsrc_lock);
5564 		goto exit;
5565 	}
5566 
5567 	/* Allocate a mid layer profile and
5568 	 * map both 'leaf_prof' and 'leaf_match' profiles to it.
5569 	 */
5570 	mutex_lock(&rvu->rsrc_lock);
5571 	mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5572 	if (mid_prof < 0) {
5573 		dev_err(rvu->dev,
5574 			"%s: Unable to allocate mid layer profile\n", __func__);
5575 		mutex_unlock(&rvu->rsrc_lock);
5576 		goto exit;
5577 	}
5578 	mutex_unlock(&rvu->rsrc_lock);
5579 	ipolicer->pfvf_map[mid_prof] = 0x00;
5580 	ipolicer->ref_count[mid_prof] = 0;
5581 
5582 	/* Initialize mid layer profile same as 'leaf_prof' */
5583 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5584 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5585 	if (rc) {
5586 		dev_err(rvu->dev,
5587 			"%s: Failed to fetch context of leaf profile %d\n",
5588 			__func__, leaf_prof);
5589 		goto exit;
5590 	}
5591 
5592 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5593 	aq_req.hdr.pcifunc = 0x00;
5594 	aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
5595 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5596 	aq_req.op = NIX_AQ_INSTOP_WRITE;
5597 	memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
5598 	memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
5599 	/* Clear higher layer enable bit in the mid profile, just in case */
5600 	aq_req.prof.hl_en = 0;
5601 	aq_req.prof_mask.hl_en = 1;
5602 
5603 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5604 				     (struct nix_aq_enq_req *)&aq_req, NULL);
5605 	if (rc) {
5606 		dev_err(rvu->dev,
5607 			"%s: Failed to INIT context of mid layer profile %d\n",
5608 			__func__, mid_prof);
5609 		goto exit;
5610 	}
5611 
5612 	/* Map both leaf profiles to this mid layer profile */
5613 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5614 					    &aq_req, &aq_rsp,
5615 					    leaf_prof, mid_prof);
5616 	if (rc) {
5617 		dev_err(rvu->dev,
5618 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5619 			__func__, leaf_prof, mid_prof);
5620 		goto exit;
5621 	}
5622 
5623 	mutex_lock(&rvu->rsrc_lock);
5624 	ipolicer->ref_count[mid_prof]++;
5625 	mutex_unlock(&rvu->rsrc_lock);
5626 
5627 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5628 					    &aq_req, &aq_rsp,
5629 					    leaf_match, mid_prof);
5630 	if (rc) {
5631 		dev_err(rvu->dev,
5632 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5633 			__func__, leaf_match, mid_prof);
5634 		ipolicer->ref_count[mid_prof]--;
5635 		goto exit;
5636 	}
5637 
5638 	mutex_lock(&rvu->rsrc_lock);
5639 	ipolicer->ref_count[mid_prof]++;
5640 	mutex_unlock(&rvu->rsrc_lock);
5641 
5642 exit:
5643 	return rc;
5644 }
5645 
5646 /* Called with mutex rsrc_lock */
5647 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
5648 				     u32 leaf_prof)
5649 {
5650 	struct nix_cn10k_aq_enq_req aq_req;
5651 	struct nix_cn10k_aq_enq_rsp aq_rsp;
5652 	struct nix_ipolicer *ipolicer;
5653 	u16 mid_prof;
5654 	int rc;
5655 
5656 	mutex_unlock(&rvu->rsrc_lock);
5657 
5658 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5659 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5660 
5661 	mutex_lock(&rvu->rsrc_lock);
5662 	if (rc) {
5663 		dev_err(rvu->dev,
5664 			"%s: Failed to fetch context of leaf profile %d\n",
5665 			__func__, leaf_prof);
5666 		return;
5667 	}
5668 
5669 	if (!aq_rsp.prof.hl_en)
5670 		return;
5671 
5672 	mid_prof = aq_rsp.prof.band_prof_id;
5673 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5674 	ipolicer->ref_count[mid_prof]--;
5675 	/* If ref_count is zero, free mid layer profile */
5676 	if (!ipolicer->ref_count[mid_prof]) {
5677 		ipolicer->pfvf_map[mid_prof] = 0x00;
5678 		rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
5679 	}
5680 }
5681 
5682 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
5683 					     struct nix_bandprof_get_hwinfo_rsp *rsp)
5684 {
5685 	struct nix_ipolicer *ipolicer;
5686 	int blkaddr, layer, err;
5687 	struct nix_hw *nix_hw;
5688 	u64 tu;
5689 
5690 	if (!rvu->hw->cap.ipolicer)
5691 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5692 
5693 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
5694 	if (err)
5695 		return err;
5696 
5697 	/* Return number of bandwidth profiles free at each layer */
5698 	mutex_lock(&rvu->rsrc_lock);
5699 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5700 		if (layer == BAND_PROF_INVAL_LAYER)
5701 			continue;
5702 
5703 		ipolicer = &nix_hw->ipolicer[layer];
5704 		rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
5705 	}
5706 	mutex_unlock(&rvu->rsrc_lock);
5707 
5708 	/* Set the policer timeunit in nanosec */
5709 	tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
5710 	rsp->policer_timeunit = (tu + 1) * 100;
5711 
5712 	return 0;
5713 }
5714