1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7 
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 
11 #include "rvu_struct.h"
12 #include "rvu_reg.h"
13 #include "rvu.h"
14 #include "npc.h"
15 #include "cgx.h"
16 #include "lmac_common.h"
17 #include "rvu_npc_hash.h"
18 
19 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
20 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
21 			    int type, int chan_id);
22 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
23 			       int type, bool add);
24 static int nix_setup_ipolicers(struct rvu *rvu,
25 			       struct nix_hw *nix_hw, int blkaddr);
26 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
27 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
28 			       struct nix_hw *nix_hw, u16 pcifunc);
29 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
30 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
31 				     u32 leaf_prof);
32 static const char *nix_get_ctx_name(int ctype);
33 
34 enum mc_tbl_sz {
35 	MC_TBL_SZ_256,
36 	MC_TBL_SZ_512,
37 	MC_TBL_SZ_1K,
38 	MC_TBL_SZ_2K,
39 	MC_TBL_SZ_4K,
40 	MC_TBL_SZ_8K,
41 	MC_TBL_SZ_16K,
42 	MC_TBL_SZ_32K,
43 	MC_TBL_SZ_64K,
44 };
45 
46 enum mc_buf_cnt {
47 	MC_BUF_CNT_8,
48 	MC_BUF_CNT_16,
49 	MC_BUF_CNT_32,
50 	MC_BUF_CNT_64,
51 	MC_BUF_CNT_128,
52 	MC_BUF_CNT_256,
53 	MC_BUF_CNT_512,
54 	MC_BUF_CNT_1024,
55 	MC_BUF_CNT_2048,
56 };
57 
58 enum nix_makr_fmt_indexes {
59 	NIX_MARK_CFG_IP_DSCP_RED,
60 	NIX_MARK_CFG_IP_DSCP_YELLOW,
61 	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
62 	NIX_MARK_CFG_IP_ECN_RED,
63 	NIX_MARK_CFG_IP_ECN_YELLOW,
64 	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
65 	NIX_MARK_CFG_VLAN_DEI_RED,
66 	NIX_MARK_CFG_VLAN_DEI_YELLOW,
67 	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
68 	NIX_MARK_CFG_MAX,
69 };
70 
71 /* For now considering MC resources needed for broadcast
72  * pkt replication only. i.e 256 HWVFs + 12 PFs.
73  */
74 #define MC_TBL_SIZE	MC_TBL_SZ_512
75 #define MC_BUF_CNT	MC_BUF_CNT_128
76 
77 struct mce {
78 	struct hlist_node	node;
79 	u16			pcifunc;
80 };
81 
82 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
83 {
84 	int i = 0;
85 
86 	/*If blkaddr is 0, return the first nix block address*/
87 	if (blkaddr == 0)
88 		return rvu->nix_blkaddr[blkaddr];
89 
90 	while (i + 1 < MAX_NIX_BLKS) {
91 		if (rvu->nix_blkaddr[i] == blkaddr)
92 			return rvu->nix_blkaddr[i + 1];
93 		i++;
94 	}
95 
96 	return 0;
97 }
98 
99 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
100 {
101 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
102 	int blkaddr;
103 
104 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
105 	if (!pfvf->nixlf || blkaddr < 0)
106 		return false;
107 	return true;
108 }
109 
110 int rvu_get_nixlf_count(struct rvu *rvu)
111 {
112 	int blkaddr = 0, max = 0;
113 	struct rvu_block *block;
114 
115 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
116 	while (blkaddr) {
117 		block = &rvu->hw->block[blkaddr];
118 		max += block->lf.max;
119 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
120 	}
121 	return max;
122 }
123 
124 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
125 {
126 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
127 	struct rvu_hwinfo *hw = rvu->hw;
128 	int blkaddr;
129 
130 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
131 	if (!pfvf->nixlf || blkaddr < 0)
132 		return NIX_AF_ERR_AF_LF_INVALID;
133 
134 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
135 	if (*nixlf < 0)
136 		return NIX_AF_ERR_AF_LF_INVALID;
137 
138 	if (nix_blkaddr)
139 		*nix_blkaddr = blkaddr;
140 
141 	return 0;
142 }
143 
144 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
145 			struct nix_hw **nix_hw, int *blkaddr)
146 {
147 	struct rvu_pfvf *pfvf;
148 
149 	pfvf = rvu_get_pfvf(rvu, pcifunc);
150 	*blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
151 	if (!pfvf->nixlf || *blkaddr < 0)
152 		return NIX_AF_ERR_AF_LF_INVALID;
153 
154 	*nix_hw = get_nix_hw(rvu->hw, *blkaddr);
155 	if (!*nix_hw)
156 		return NIX_AF_ERR_INVALID_NIXBLK;
157 	return 0;
158 }
159 
160 static void nix_mce_list_init(struct nix_mce_list *list, int max)
161 {
162 	INIT_HLIST_HEAD(&list->head);
163 	list->count = 0;
164 	list->max = max;
165 }
166 
167 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
168 {
169 	int idx;
170 
171 	if (!mcast)
172 		return 0;
173 
174 	idx = mcast->next_free_mce;
175 	mcast->next_free_mce += count;
176 	return idx;
177 }
178 
179 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
180 {
181 	int nix_blkaddr = 0, i = 0;
182 	struct rvu *rvu = hw->rvu;
183 
184 	nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
185 	while (nix_blkaddr) {
186 		if (blkaddr == nix_blkaddr && hw->nix)
187 			return &hw->nix[i];
188 		nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
189 		i++;
190 	}
191 	return NULL;
192 }
193 
194 int nix_get_dwrr_mtu_reg(struct rvu_hwinfo *hw, int smq_link_type)
195 {
196 	if (hw->cap.nix_multiple_dwrr_mtu)
197 		return NIX_AF_DWRR_MTUX(smq_link_type);
198 
199 	if (smq_link_type == SMQ_LINK_TYPE_SDP)
200 		return NIX_AF_DWRR_SDP_MTU;
201 
202 	/* Here it's same reg for RPM and LBK */
203 	return NIX_AF_DWRR_RPM_MTU;
204 }
205 
206 u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu)
207 {
208 	dwrr_mtu &= 0x1FULL;
209 
210 	/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
211 	 * Value of 4 is reserved for MTU value of 9728 bytes.
212 	 * Value of 5 is reserved for MTU value of 10240 bytes.
213 	 */
214 	switch (dwrr_mtu) {
215 	case 4:
216 		return 9728;
217 	case 5:
218 		return 10240;
219 	default:
220 		return BIT_ULL(dwrr_mtu);
221 	}
222 
223 	return 0;
224 }
225 
226 u32 convert_bytes_to_dwrr_mtu(u32 bytes)
227 {
228 	/* MTU used for DWRR calculation is in power of 2 up until 64K bytes.
229 	 * Value of 4 is reserved for MTU value of 9728 bytes.
230 	 * Value of 5 is reserved for MTU value of 10240 bytes.
231 	 */
232 	if (bytes > BIT_ULL(16))
233 		return 0;
234 
235 	switch (bytes) {
236 	case 9728:
237 		return 4;
238 	case 10240:
239 		return 5;
240 	default:
241 		return ilog2(bytes);
242 	}
243 
244 	return 0;
245 }
246 
247 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
248 {
249 	int err;
250 
251 	/* Sync all in flight RX packets to LLC/DRAM */
252 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
253 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
254 	if (err)
255 		dev_err(rvu->dev, "SYNC1: NIX RX software sync failed\n");
256 
257 	/* SW_SYNC ensures all existing transactions are finished and pkts
258 	 * are written to LLC/DRAM, queues should be teared down after
259 	 * successful SW_SYNC. Due to a HW errata, in some rare scenarios
260 	 * an existing transaction might end after SW_SYNC operation. To
261 	 * ensure operation is fully done, do the SW_SYNC twice.
262 	 */
263 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
264 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
265 	if (err)
266 		dev_err(rvu->dev, "SYNC2: NIX RX software sync failed\n");
267 }
268 
269 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
270 			    int lvl, u16 pcifunc, u16 schq)
271 {
272 	struct rvu_hwinfo *hw = rvu->hw;
273 	struct nix_txsch *txsch;
274 	struct nix_hw *nix_hw;
275 	u16 map_func;
276 
277 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
278 	if (!nix_hw)
279 		return false;
280 
281 	txsch = &nix_hw->txsch[lvl];
282 	/* Check out of bounds */
283 	if (schq >= txsch->schq.max)
284 		return false;
285 
286 	mutex_lock(&rvu->rsrc_lock);
287 	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
288 	mutex_unlock(&rvu->rsrc_lock);
289 
290 	/* TLs aggegating traffic are shared across PF and VFs */
291 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
292 		if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
293 			return false;
294 		else
295 			return true;
296 	}
297 
298 	if (map_func != pcifunc)
299 		return false;
300 
301 	return true;
302 }
303 
304 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
305 			      struct nix_lf_alloc_rsp *rsp, bool loop)
306 {
307 	struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
308 	u16 req_chan_base, req_chan_end, req_chan_cnt;
309 	struct rvu_hwinfo *hw = rvu->hw;
310 	struct sdp_node_info *sdp_info;
311 	int pkind, pf, vf, lbkid, vfid;
312 	u8 cgx_id, lmac_id;
313 	bool from_vf;
314 	int err;
315 
316 	pf = rvu_get_pf(pcifunc);
317 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
318 	    type != NIX_INTF_TYPE_SDP)
319 		return 0;
320 
321 	switch (type) {
322 	case NIX_INTF_TYPE_CGX:
323 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
324 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
325 
326 		pkind = rvu_npc_get_pkind(rvu, pf);
327 		if (pkind < 0) {
328 			dev_err(rvu->dev,
329 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
330 			return -EINVAL;
331 		}
332 		pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
333 		pfvf->tx_chan_base = pfvf->rx_chan_base;
334 		pfvf->rx_chan_cnt = 1;
335 		pfvf->tx_chan_cnt = 1;
336 		rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id;
337 
338 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
339 		rvu_npc_set_pkind(rvu, pkind, pfvf);
340 
341 		break;
342 	case NIX_INTF_TYPE_LBK:
343 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
344 
345 		/* If NIX1 block is present on the silicon then NIXes are
346 		 * assigned alternatively for lbk interfaces. NIX0 should
347 		 * send packets on lbk link 1 channels and NIX1 should send
348 		 * on lbk link 0 channels for the communication between
349 		 * NIX0 and NIX1.
350 		 */
351 		lbkid = 0;
352 		if (rvu->hw->lbk_links > 1)
353 			lbkid = vf & 0x1 ? 0 : 1;
354 
355 		/* By default NIX0 is configured to send packet on lbk link 1
356 		 * (which corresponds to LBK1), same packet will receive on
357 		 * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0
358 		 * (which corresponds to LBK2) packet will receive on NIX0 lbk
359 		 * link 1.
360 		 * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0
361 		 * transmits and receives on lbk link 0, whick corresponds
362 		 * to LBK1 block, back to back connectivity between NIX and
363 		 * LBK can be achieved (which is similar to 96xx)
364 		 *
365 		 *			RX		TX
366 		 * NIX0 lbk link	1 (LBK2)	1 (LBK1)
367 		 * NIX0 lbk link	0 (LBK0)	0 (LBK0)
368 		 * NIX1 lbk link	0 (LBK1)	0 (LBK2)
369 		 * NIX1 lbk link	1 (LBK3)	1 (LBK3)
370 		 */
371 		if (loop)
372 			lbkid = !lbkid;
373 
374 		/* Note that AF's VFs work in pairs and talk over consecutive
375 		 * loopback channels.Therefore if odd number of AF VFs are
376 		 * enabled then the last VF remains with no pair.
377 		 */
378 		pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
379 		pfvf->tx_chan_base = vf & 0x1 ?
380 					rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
381 					rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
382 		pfvf->rx_chan_cnt = 1;
383 		pfvf->tx_chan_cnt = 1;
384 		rsp->tx_link = hw->cgx_links + lbkid;
385 		pfvf->lbkid = lbkid;
386 		rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
387 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
388 					      pfvf->rx_chan_base,
389 					      pfvf->rx_chan_cnt);
390 
391 		break;
392 	case NIX_INTF_TYPE_SDP:
393 		from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
394 		parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
395 		sdp_info = parent_pf->sdp_info;
396 		if (!sdp_info) {
397 			dev_err(rvu->dev, "Invalid sdp_info pointer\n");
398 			return -EINVAL;
399 		}
400 		if (from_vf) {
401 			req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
402 				sdp_info->num_pf_rings;
403 			vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
404 			for (vfid = 0; vfid < vf; vfid++)
405 				req_chan_base += sdp_info->vf_rings[vfid];
406 			req_chan_cnt = sdp_info->vf_rings[vf];
407 			req_chan_end = req_chan_base + req_chan_cnt - 1;
408 			if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
409 			    req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
410 				dev_err(rvu->dev,
411 					"PF_Func 0x%x: Invalid channel base and count\n",
412 					pcifunc);
413 				return -EINVAL;
414 			}
415 		} else {
416 			req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
417 			req_chan_cnt = sdp_info->num_pf_rings;
418 		}
419 
420 		pfvf->rx_chan_base = req_chan_base;
421 		pfvf->rx_chan_cnt = req_chan_cnt;
422 		pfvf->tx_chan_base = pfvf->rx_chan_base;
423 		pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
424 
425 		rsp->tx_link = hw->cgx_links + hw->lbk_links;
426 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
427 					      pfvf->rx_chan_base,
428 					      pfvf->rx_chan_cnt);
429 		break;
430 	}
431 
432 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
433 	 * RVU PF/VF's MAC address.
434 	 */
435 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
436 				    pfvf->rx_chan_base, pfvf->mac_addr);
437 
438 	/* Add this PF_FUNC to bcast pkt replication list */
439 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
440 	if (err) {
441 		dev_err(rvu->dev,
442 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
443 			pcifunc);
444 		return err;
445 	}
446 	/* Install MCAM rule matching Ethernet broadcast mac address */
447 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
448 					  nixlf, pfvf->rx_chan_base);
449 
450 	pfvf->maxlen = NIC_HW_MIN_FRS;
451 	pfvf->minlen = NIC_HW_MIN_FRS;
452 
453 	return 0;
454 }
455 
456 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
457 {
458 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
459 	int err;
460 
461 	pfvf->maxlen = 0;
462 	pfvf->minlen = 0;
463 
464 	/* Remove this PF_FUNC from bcast pkt replication list */
465 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
466 	if (err) {
467 		dev_err(rvu->dev,
468 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
469 			pcifunc);
470 	}
471 
472 	/* Free and disable any MCAM entries used by this NIX LF */
473 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
474 
475 	/* Disable DMAC filters used */
476 	rvu_cgx_disable_dmac_entries(rvu, pcifunc);
477 }
478 
479 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
480 				    struct nix_bp_cfg_req *req,
481 				    struct msg_rsp *rsp)
482 {
483 	u16 pcifunc = req->hdr.pcifunc;
484 	struct rvu_pfvf *pfvf;
485 	int blkaddr, pf, type;
486 	u16 chan_base, chan;
487 	u64 cfg;
488 
489 	pf = rvu_get_pf(pcifunc);
490 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
491 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
492 		return 0;
493 
494 	pfvf = rvu_get_pfvf(rvu, pcifunc);
495 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
496 
497 	chan_base = pfvf->rx_chan_base + req->chan_base;
498 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
499 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
500 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
501 			    cfg & ~BIT_ULL(16));
502 	}
503 	return 0;
504 }
505 
506 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
507 			    int type, int chan_id)
508 {
509 	int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
510 	u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
511 	struct rvu_hwinfo *hw = rvu->hw;
512 	struct rvu_pfvf *pfvf;
513 	u8 cgx_id, lmac_id;
514 	u64 cfg;
515 
516 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
517 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
518 	lmac_chan_cnt = cfg & 0xFF;
519 
520 	cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
521 	lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
522 
523 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
524 	sdp_chan_cnt = cfg & 0xFFF;
525 	sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
526 
527 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
528 
529 	/* Backpressure IDs range division
530 	 * CGX channles are mapped to (0 - 191) BPIDs
531 	 * LBK channles are mapped to (192 - 255) BPIDs
532 	 * SDP channles are mapped to (256 - 511) BPIDs
533 	 *
534 	 * Lmac channles and bpids mapped as follows
535 	 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
536 	 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
537 	 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
538 	 */
539 	switch (type) {
540 	case NIX_INTF_TYPE_CGX:
541 		if ((req->chan_base + req->chan_cnt) > 16)
542 			return -EINVAL;
543 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
544 		/* Assign bpid based on cgx, lmac and chan id */
545 		bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
546 			(lmac_id * lmac_chan_cnt) + req->chan_base;
547 
548 		if (req->bpid_per_chan)
549 			bpid += chan_id;
550 		if (bpid > cgx_bpid_cnt)
551 			return -EINVAL;
552 		break;
553 
554 	case NIX_INTF_TYPE_LBK:
555 		if ((req->chan_base + req->chan_cnt) > 63)
556 			return -EINVAL;
557 		bpid = cgx_bpid_cnt + req->chan_base;
558 		if (req->bpid_per_chan)
559 			bpid += chan_id;
560 		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
561 			return -EINVAL;
562 		break;
563 	case NIX_INTF_TYPE_SDP:
564 		if ((req->chan_base + req->chan_cnt) > 255)
565 			return -EINVAL;
566 
567 		bpid = sdp_bpid_cnt + req->chan_base;
568 		if (req->bpid_per_chan)
569 			bpid += chan_id;
570 
571 		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
572 			return -EINVAL;
573 		break;
574 	default:
575 		return -EINVAL;
576 	}
577 	return bpid;
578 }
579 
580 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
581 				   struct nix_bp_cfg_req *req,
582 				   struct nix_bp_cfg_rsp *rsp)
583 {
584 	int blkaddr, pf, type, chan_id = 0;
585 	u16 pcifunc = req->hdr.pcifunc;
586 	struct rvu_pfvf *pfvf;
587 	u16 chan_base, chan;
588 	s16 bpid, bpid_base;
589 	u64 cfg;
590 
591 	pf = rvu_get_pf(pcifunc);
592 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
593 	if (is_sdp_pfvf(pcifunc))
594 		type = NIX_INTF_TYPE_SDP;
595 
596 	/* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
597 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
598 	    type != NIX_INTF_TYPE_SDP)
599 		return 0;
600 
601 	pfvf = rvu_get_pfvf(rvu, pcifunc);
602 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
603 
604 	bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
605 	chan_base = pfvf->rx_chan_base + req->chan_base;
606 	bpid = bpid_base;
607 
608 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
609 		if (bpid < 0) {
610 			dev_warn(rvu->dev, "Fail to enable backpressure\n");
611 			return -EINVAL;
612 		}
613 
614 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
615 		cfg &= ~GENMASK_ULL(8, 0);
616 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
617 			    cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
618 		chan_id++;
619 		bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
620 	}
621 
622 	for (chan = 0; chan < req->chan_cnt; chan++) {
623 		/* Map channel and bpid assign to it */
624 		rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
625 					(bpid_base & 0x3FF);
626 		if (req->bpid_per_chan)
627 			bpid_base++;
628 	}
629 	rsp->chan_cnt = req->chan_cnt;
630 
631 	return 0;
632 }
633 
634 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
635 				 u64 format, bool v4, u64 *fidx)
636 {
637 	struct nix_lso_format field = {0};
638 
639 	/* IP's Length field */
640 	field.layer = NIX_TXLAYER_OL3;
641 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
642 	field.offset = v4 ? 2 : 4;
643 	field.sizem1 = 1; /* i.e 2 bytes */
644 	field.alg = NIX_LSOALG_ADD_PAYLEN;
645 	rvu_write64(rvu, blkaddr,
646 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
647 		    *(u64 *)&field);
648 
649 	/* No ID field in IPv6 header */
650 	if (!v4)
651 		return;
652 
653 	/* IP's ID field */
654 	field.layer = NIX_TXLAYER_OL3;
655 	field.offset = 4;
656 	field.sizem1 = 1; /* i.e 2 bytes */
657 	field.alg = NIX_LSOALG_ADD_SEGNUM;
658 	rvu_write64(rvu, blkaddr,
659 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
660 		    *(u64 *)&field);
661 }
662 
663 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
664 				 u64 format, u64 *fidx)
665 {
666 	struct nix_lso_format field = {0};
667 
668 	/* TCP's sequence number field */
669 	field.layer = NIX_TXLAYER_OL4;
670 	field.offset = 4;
671 	field.sizem1 = 3; /* i.e 4 bytes */
672 	field.alg = NIX_LSOALG_ADD_OFFSET;
673 	rvu_write64(rvu, blkaddr,
674 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
675 		    *(u64 *)&field);
676 
677 	/* TCP's flags field */
678 	field.layer = NIX_TXLAYER_OL4;
679 	field.offset = 12;
680 	field.sizem1 = 1; /* 2 bytes */
681 	field.alg = NIX_LSOALG_TCP_FLAGS;
682 	rvu_write64(rvu, blkaddr,
683 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
684 		    *(u64 *)&field);
685 }
686 
687 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
688 {
689 	u64 cfg, idx, fidx = 0;
690 
691 	/* Get max HW supported format indices */
692 	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
693 	nix_hw->lso.total = cfg;
694 
695 	/* Enable LSO */
696 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
697 	/* For TSO, set first and middle segment flags to
698 	 * mask out PSH, RST & FIN flags in TCP packet
699 	 */
700 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
701 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
702 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
703 
704 	/* Setup default static LSO formats
705 	 *
706 	 * Configure format fields for TCPv4 segmentation offload
707 	 */
708 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
709 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
710 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
711 
712 	/* Set rest of the fields to NOP */
713 	for (; fidx < 8; fidx++) {
714 		rvu_write64(rvu, blkaddr,
715 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
716 	}
717 	nix_hw->lso.in_use++;
718 
719 	/* Configure format fields for TCPv6 segmentation offload */
720 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
721 	fidx = 0;
722 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
723 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
724 
725 	/* Set rest of the fields to NOP */
726 	for (; fidx < 8; fidx++) {
727 		rvu_write64(rvu, blkaddr,
728 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
729 	}
730 	nix_hw->lso.in_use++;
731 }
732 
733 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
734 {
735 	kfree(pfvf->rq_bmap);
736 	kfree(pfvf->sq_bmap);
737 	kfree(pfvf->cq_bmap);
738 	if (pfvf->rq_ctx)
739 		qmem_free(rvu->dev, pfvf->rq_ctx);
740 	if (pfvf->sq_ctx)
741 		qmem_free(rvu->dev, pfvf->sq_ctx);
742 	if (pfvf->cq_ctx)
743 		qmem_free(rvu->dev, pfvf->cq_ctx);
744 	if (pfvf->rss_ctx)
745 		qmem_free(rvu->dev, pfvf->rss_ctx);
746 	if (pfvf->nix_qints_ctx)
747 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
748 	if (pfvf->cq_ints_ctx)
749 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
750 
751 	pfvf->rq_bmap = NULL;
752 	pfvf->cq_bmap = NULL;
753 	pfvf->sq_bmap = NULL;
754 	pfvf->rq_ctx = NULL;
755 	pfvf->sq_ctx = NULL;
756 	pfvf->cq_ctx = NULL;
757 	pfvf->rss_ctx = NULL;
758 	pfvf->nix_qints_ctx = NULL;
759 	pfvf->cq_ints_ctx = NULL;
760 }
761 
762 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
763 			      struct rvu_pfvf *pfvf, int nixlf,
764 			      int rss_sz, int rss_grps, int hwctx_size,
765 			      u64 way_mask, bool tag_lsb_as_adder)
766 {
767 	int err, grp, num_indices;
768 	u64 val;
769 
770 	/* RSS is not requested for this NIXLF */
771 	if (!rss_sz)
772 		return 0;
773 	num_indices = rss_sz * rss_grps;
774 
775 	/* Alloc NIX RSS HW context memory and config the base */
776 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
777 	if (err)
778 		return err;
779 
780 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
781 		    (u64)pfvf->rss_ctx->iova);
782 
783 	/* Config full RSS table size, enable RSS and caching */
784 	val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 |
785 			ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE);
786 
787 	if (tag_lsb_as_adder)
788 		val |= BIT_ULL(5);
789 
790 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val);
791 	/* Config RSS group offset and sizes */
792 	for (grp = 0; grp < rss_grps; grp++)
793 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
794 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
795 	return 0;
796 }
797 
798 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
799 			       struct nix_aq_inst_s *inst)
800 {
801 	struct admin_queue *aq = block->aq;
802 	struct nix_aq_res_s *result;
803 	int timeout = 1000;
804 	u64 reg, head;
805 	int ret;
806 
807 	result = (struct nix_aq_res_s *)aq->res->base;
808 
809 	/* Get current head pointer where to append this instruction */
810 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
811 	head = (reg >> 4) & AQ_PTR_MASK;
812 
813 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
814 	       (void *)inst, aq->inst->entry_sz);
815 	memset(result, 0, sizeof(*result));
816 	/* sync into memory */
817 	wmb();
818 
819 	/* Ring the doorbell and wait for result */
820 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
821 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
822 		cpu_relax();
823 		udelay(1);
824 		timeout--;
825 		if (!timeout)
826 			return -EBUSY;
827 	}
828 
829 	if (result->compcode != NIX_AQ_COMP_GOOD) {
830 		/* TODO: Replace this with some error code */
831 		if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
832 		    result->compcode == NIX_AQ_COMP_LOCKERR ||
833 		    result->compcode == NIX_AQ_COMP_CTX_POISON) {
834 			ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
835 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
836 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
837 			ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
838 			if (ret)
839 				dev_err(rvu->dev,
840 					"%s: Not able to unlock cachelines\n", __func__);
841 		}
842 
843 		return -EBUSY;
844 	}
845 
846 	return 0;
847 }
848 
849 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
850 				   struct nix_aq_enq_req *req,
851 				   struct nix_aq_enq_rsp *rsp)
852 {
853 	struct rvu_hwinfo *hw = rvu->hw;
854 	u16 pcifunc = req->hdr.pcifunc;
855 	int nixlf, blkaddr, rc = 0;
856 	struct nix_aq_inst_s inst;
857 	struct rvu_block *block;
858 	struct admin_queue *aq;
859 	struct rvu_pfvf *pfvf;
860 	void *ctx, *mask;
861 	bool ena;
862 	u64 cfg;
863 
864 	blkaddr = nix_hw->blkaddr;
865 	block = &hw->block[blkaddr];
866 	aq = block->aq;
867 	if (!aq) {
868 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
869 		return NIX_AF_ERR_AQ_ENQUEUE;
870 	}
871 
872 	pfvf = rvu_get_pfvf(rvu, pcifunc);
873 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
874 
875 	/* Skip NIXLF check for broadcast MCE entry and bandwidth profile
876 	 * operations done by AF itself.
877 	 */
878 	if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
879 	      (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
880 		if (!pfvf->nixlf || nixlf < 0)
881 			return NIX_AF_ERR_AF_LF_INVALID;
882 	}
883 
884 	switch (req->ctype) {
885 	case NIX_AQ_CTYPE_RQ:
886 		/* Check if index exceeds max no of queues */
887 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
888 			rc = NIX_AF_ERR_AQ_ENQUEUE;
889 		break;
890 	case NIX_AQ_CTYPE_SQ:
891 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
892 			rc = NIX_AF_ERR_AQ_ENQUEUE;
893 		break;
894 	case NIX_AQ_CTYPE_CQ:
895 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
896 			rc = NIX_AF_ERR_AQ_ENQUEUE;
897 		break;
898 	case NIX_AQ_CTYPE_RSS:
899 		/* Check if RSS is enabled and qidx is within range */
900 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
901 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
902 		    (req->qidx >= (256UL << (cfg & 0xF))))
903 			rc = NIX_AF_ERR_AQ_ENQUEUE;
904 		break;
905 	case NIX_AQ_CTYPE_MCE:
906 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
907 
908 		/* Check if index exceeds MCE list length */
909 		if (!nix_hw->mcast.mce_ctx ||
910 		    (req->qidx >= (256UL << (cfg & 0xF))))
911 			rc = NIX_AF_ERR_AQ_ENQUEUE;
912 
913 		/* Adding multicast lists for requests from PF/VFs is not
914 		 * yet supported, so ignore this.
915 		 */
916 		if (rsp)
917 			rc = NIX_AF_ERR_AQ_ENQUEUE;
918 		break;
919 	case NIX_AQ_CTYPE_BANDPROF:
920 		if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
921 					nix_hw, pcifunc))
922 			rc = NIX_AF_ERR_INVALID_BANDPROF;
923 		break;
924 	default:
925 		rc = NIX_AF_ERR_AQ_ENQUEUE;
926 	}
927 
928 	if (rc)
929 		return rc;
930 
931 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
932 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
933 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
934 	     (req->op == NIX_AQ_INSTOP_WRITE &&
935 	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
936 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
937 				     pcifunc, req->sq.smq))
938 			return NIX_AF_ERR_AQ_ENQUEUE;
939 	}
940 
941 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
942 	inst.lf = nixlf;
943 	inst.cindex = req->qidx;
944 	inst.ctype = req->ctype;
945 	inst.op = req->op;
946 	/* Currently we are not supporting enqueuing multiple instructions,
947 	 * so always choose first entry in result memory.
948 	 */
949 	inst.res_addr = (u64)aq->res->iova;
950 
951 	/* Hardware uses same aq->res->base for updating result of
952 	 * previous instruction hence wait here till it is done.
953 	 */
954 	spin_lock(&aq->lock);
955 
956 	/* Clean result + context memory */
957 	memset(aq->res->base, 0, aq->res->entry_sz);
958 	/* Context needs to be written at RES_ADDR + 128 */
959 	ctx = aq->res->base + 128;
960 	/* Mask needs to be written at RES_ADDR + 256 */
961 	mask = aq->res->base + 256;
962 
963 	switch (req->op) {
964 	case NIX_AQ_INSTOP_WRITE:
965 		if (req->ctype == NIX_AQ_CTYPE_RQ)
966 			memcpy(mask, &req->rq_mask,
967 			       sizeof(struct nix_rq_ctx_s));
968 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
969 			memcpy(mask, &req->sq_mask,
970 			       sizeof(struct nix_sq_ctx_s));
971 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
972 			memcpy(mask, &req->cq_mask,
973 			       sizeof(struct nix_cq_ctx_s));
974 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
975 			memcpy(mask, &req->rss_mask,
976 			       sizeof(struct nix_rsse_s));
977 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
978 			memcpy(mask, &req->mce_mask,
979 			       sizeof(struct nix_rx_mce_s));
980 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
981 			memcpy(mask, &req->prof_mask,
982 			       sizeof(struct nix_bandprof_s));
983 		fallthrough;
984 	case NIX_AQ_INSTOP_INIT:
985 		if (req->ctype == NIX_AQ_CTYPE_RQ)
986 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
987 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
988 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
989 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
990 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
991 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
992 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
993 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
994 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
995 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
996 			memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
997 		break;
998 	case NIX_AQ_INSTOP_NOP:
999 	case NIX_AQ_INSTOP_READ:
1000 	case NIX_AQ_INSTOP_LOCK:
1001 	case NIX_AQ_INSTOP_UNLOCK:
1002 		break;
1003 	default:
1004 		rc = NIX_AF_ERR_AQ_ENQUEUE;
1005 		spin_unlock(&aq->lock);
1006 		return rc;
1007 	}
1008 
1009 	/* Submit the instruction to AQ */
1010 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
1011 	if (rc) {
1012 		spin_unlock(&aq->lock);
1013 		return rc;
1014 	}
1015 
1016 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
1017 	if (req->op == NIX_AQ_INSTOP_INIT) {
1018 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
1019 			__set_bit(req->qidx, pfvf->rq_bmap);
1020 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
1021 			__set_bit(req->qidx, pfvf->sq_bmap);
1022 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
1023 			__set_bit(req->qidx, pfvf->cq_bmap);
1024 	}
1025 
1026 	if (req->op == NIX_AQ_INSTOP_WRITE) {
1027 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
1028 			ena = (req->rq.ena & req->rq_mask.ena) |
1029 				(test_bit(req->qidx, pfvf->rq_bmap) &
1030 				~req->rq_mask.ena);
1031 			if (ena)
1032 				__set_bit(req->qidx, pfvf->rq_bmap);
1033 			else
1034 				__clear_bit(req->qidx, pfvf->rq_bmap);
1035 		}
1036 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
1037 			ena = (req->rq.ena & req->sq_mask.ena) |
1038 				(test_bit(req->qidx, pfvf->sq_bmap) &
1039 				~req->sq_mask.ena);
1040 			if (ena)
1041 				__set_bit(req->qidx, pfvf->sq_bmap);
1042 			else
1043 				__clear_bit(req->qidx, pfvf->sq_bmap);
1044 		}
1045 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
1046 			ena = (req->rq.ena & req->cq_mask.ena) |
1047 				(test_bit(req->qidx, pfvf->cq_bmap) &
1048 				~req->cq_mask.ena);
1049 			if (ena)
1050 				__set_bit(req->qidx, pfvf->cq_bmap);
1051 			else
1052 				__clear_bit(req->qidx, pfvf->cq_bmap);
1053 		}
1054 	}
1055 
1056 	if (rsp) {
1057 		/* Copy read context into mailbox */
1058 		if (req->op == NIX_AQ_INSTOP_READ) {
1059 			if (req->ctype == NIX_AQ_CTYPE_RQ)
1060 				memcpy(&rsp->rq, ctx,
1061 				       sizeof(struct nix_rq_ctx_s));
1062 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
1063 				memcpy(&rsp->sq, ctx,
1064 				       sizeof(struct nix_sq_ctx_s));
1065 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
1066 				memcpy(&rsp->cq, ctx,
1067 				       sizeof(struct nix_cq_ctx_s));
1068 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
1069 				memcpy(&rsp->rss, ctx,
1070 				       sizeof(struct nix_rsse_s));
1071 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
1072 				memcpy(&rsp->mce, ctx,
1073 				       sizeof(struct nix_rx_mce_s));
1074 			else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
1075 				memcpy(&rsp->prof, ctx,
1076 				       sizeof(struct nix_bandprof_s));
1077 		}
1078 	}
1079 
1080 	spin_unlock(&aq->lock);
1081 	return 0;
1082 }
1083 
1084 static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
1085 				 struct nix_aq_enq_req *req, u8 ctype)
1086 {
1087 	struct nix_cn10k_aq_enq_req aq_req;
1088 	struct nix_cn10k_aq_enq_rsp aq_rsp;
1089 	int rc, word;
1090 
1091 	if (req->ctype != NIX_AQ_CTYPE_CQ)
1092 		return 0;
1093 
1094 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
1095 				 req->hdr.pcifunc, ctype, req->qidx);
1096 	if (rc) {
1097 		dev_err(rvu->dev,
1098 			"%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
1099 			__func__, nix_get_ctx_name(ctype), req->qidx,
1100 			req->hdr.pcifunc);
1101 		return rc;
1102 	}
1103 
1104 	/* Make copy of original context & mask which are required
1105 	 * for resubmission
1106 	 */
1107 	memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
1108 	memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
1109 
1110 	/* exclude fields which HW can update */
1111 	aq_req.cq_mask.cq_err       = 0;
1112 	aq_req.cq_mask.wrptr        = 0;
1113 	aq_req.cq_mask.tail         = 0;
1114 	aq_req.cq_mask.head	    = 0;
1115 	aq_req.cq_mask.avg_level    = 0;
1116 	aq_req.cq_mask.update_time  = 0;
1117 	aq_req.cq_mask.substream    = 0;
1118 
1119 	/* Context mask (cq_mask) holds mask value of fields which
1120 	 * are changed in AQ WRITE operation.
1121 	 * for example cq.drop = 0xa;
1122 	 *	       cq_mask.drop = 0xff;
1123 	 * Below logic performs '&' between cq and cq_mask so that non
1124 	 * updated fields are masked out for request and response
1125 	 * comparison
1126 	 */
1127 	for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
1128 	     word++) {
1129 		*(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
1130 			(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1131 		*(u64 *)((u8 *)&aq_req.cq + word * 8) &=
1132 			(*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
1133 	}
1134 
1135 	if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
1136 		return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
1137 
1138 	return 0;
1139 }
1140 
1141 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
1142 			       struct nix_aq_enq_rsp *rsp)
1143 {
1144 	struct nix_hw *nix_hw;
1145 	int err, retries = 5;
1146 	int blkaddr;
1147 
1148 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
1149 	if (blkaddr < 0)
1150 		return NIX_AF_ERR_AF_LF_INVALID;
1151 
1152 	nix_hw =  get_nix_hw(rvu->hw, blkaddr);
1153 	if (!nix_hw)
1154 		return NIX_AF_ERR_INVALID_NIXBLK;
1155 
1156 retry:
1157 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
1158 
1159 	/* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
1160 	 * As a work around perfrom CQ context read after each AQ write. If AQ
1161 	 * read shows AQ write is not updated perform AQ write again.
1162 	 */
1163 	if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
1164 		err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
1165 		if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
1166 			if (retries--)
1167 				goto retry;
1168 			else
1169 				return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
1170 		}
1171 	}
1172 
1173 	return err;
1174 }
1175 
1176 static const char *nix_get_ctx_name(int ctype)
1177 {
1178 	switch (ctype) {
1179 	case NIX_AQ_CTYPE_CQ:
1180 		return "CQ";
1181 	case NIX_AQ_CTYPE_SQ:
1182 		return "SQ";
1183 	case NIX_AQ_CTYPE_RQ:
1184 		return "RQ";
1185 	case NIX_AQ_CTYPE_RSS:
1186 		return "RSS";
1187 	}
1188 	return "";
1189 }
1190 
1191 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
1192 {
1193 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
1194 	struct nix_aq_enq_req aq_req;
1195 	unsigned long *bmap;
1196 	int qidx, q_cnt = 0;
1197 	int err = 0, rc;
1198 
1199 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
1200 		return NIX_AF_ERR_AQ_ENQUEUE;
1201 
1202 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
1203 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
1204 
1205 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
1206 		aq_req.cq.ena = 0;
1207 		aq_req.cq_mask.ena = 1;
1208 		aq_req.cq.bp_ena = 0;
1209 		aq_req.cq_mask.bp_ena = 1;
1210 		q_cnt = pfvf->cq_ctx->qsize;
1211 		bmap = pfvf->cq_bmap;
1212 	}
1213 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
1214 		aq_req.sq.ena = 0;
1215 		aq_req.sq_mask.ena = 1;
1216 		q_cnt = pfvf->sq_ctx->qsize;
1217 		bmap = pfvf->sq_bmap;
1218 	}
1219 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
1220 		aq_req.rq.ena = 0;
1221 		aq_req.rq_mask.ena = 1;
1222 		q_cnt = pfvf->rq_ctx->qsize;
1223 		bmap = pfvf->rq_bmap;
1224 	}
1225 
1226 	aq_req.ctype = req->ctype;
1227 	aq_req.op = NIX_AQ_INSTOP_WRITE;
1228 
1229 	for (qidx = 0; qidx < q_cnt; qidx++) {
1230 		if (!test_bit(qidx, bmap))
1231 			continue;
1232 		aq_req.qidx = qidx;
1233 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1234 		if (rc) {
1235 			err = rc;
1236 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1237 				nix_get_ctx_name(req->ctype), qidx);
1238 		}
1239 	}
1240 
1241 	return err;
1242 }
1243 
1244 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1245 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1246 {
1247 	struct nix_aq_enq_req lock_ctx_req;
1248 	int err;
1249 
1250 	if (req->op != NIX_AQ_INSTOP_INIT)
1251 		return 0;
1252 
1253 	if (req->ctype == NIX_AQ_CTYPE_MCE ||
1254 	    req->ctype == NIX_AQ_CTYPE_DYNO)
1255 		return 0;
1256 
1257 	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1258 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1259 	lock_ctx_req.ctype = req->ctype;
1260 	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1261 	lock_ctx_req.qidx = req->qidx;
1262 	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1263 	if (err)
1264 		dev_err(rvu->dev,
1265 			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1266 			req->hdr.pcifunc,
1267 			nix_get_ctx_name(req->ctype), req->qidx);
1268 	return err;
1269 }
1270 
1271 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1272 				struct nix_aq_enq_req *req,
1273 				struct nix_aq_enq_rsp *rsp)
1274 {
1275 	int err;
1276 
1277 	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1278 	if (!err)
1279 		err = nix_lf_hwctx_lockdown(rvu, req);
1280 	return err;
1281 }
1282 #else
1283 
1284 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1285 				struct nix_aq_enq_req *req,
1286 				struct nix_aq_enq_rsp *rsp)
1287 {
1288 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
1289 }
1290 #endif
1291 /* CN10K mbox handler */
1292 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1293 				      struct nix_cn10k_aq_enq_req *req,
1294 				      struct nix_cn10k_aq_enq_rsp *rsp)
1295 {
1296 	return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1297 				  (struct nix_aq_enq_rsp *)rsp);
1298 }
1299 
1300 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1301 				       struct hwctx_disable_req *req,
1302 				       struct msg_rsp *rsp)
1303 {
1304 	return nix_lf_hwctx_disable(rvu, req);
1305 }
1306 
1307 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1308 				  struct nix_lf_alloc_req *req,
1309 				  struct nix_lf_alloc_rsp *rsp)
1310 {
1311 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
1312 	struct rvu_hwinfo *hw = rvu->hw;
1313 	u16 pcifunc = req->hdr.pcifunc;
1314 	struct rvu_block *block;
1315 	struct rvu_pfvf *pfvf;
1316 	u64 cfg, ctx_cfg;
1317 	int blkaddr;
1318 
1319 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1320 		return NIX_AF_ERR_PARAM;
1321 
1322 	if (req->way_mask)
1323 		req->way_mask &= 0xFFFF;
1324 
1325 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1326 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1327 	if (!pfvf->nixlf || blkaddr < 0)
1328 		return NIX_AF_ERR_AF_LF_INVALID;
1329 
1330 	block = &hw->block[blkaddr];
1331 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1332 	if (nixlf < 0)
1333 		return NIX_AF_ERR_AF_LF_INVALID;
1334 
1335 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1336 	if (req->npa_func) {
1337 		/* If default, use 'this' NIXLF's PFFUNC */
1338 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1339 			req->npa_func = pcifunc;
1340 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1341 			return NIX_AF_INVAL_NPA_PF_FUNC;
1342 	}
1343 
1344 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1345 	if (req->sso_func) {
1346 		/* If default, use 'this' NIXLF's PFFUNC */
1347 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1348 			req->sso_func = pcifunc;
1349 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1350 			return NIX_AF_INVAL_SSO_PF_FUNC;
1351 	}
1352 
1353 	/* If RSS is being enabled, check if requested config is valid.
1354 	 * RSS table size should be power of two, otherwise
1355 	 * RSS_GRP::OFFSET + adder might go beyond that group or
1356 	 * won't be able to use entire table.
1357 	 */
1358 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1359 			    !is_power_of_2(req->rss_sz)))
1360 		return NIX_AF_ERR_RSS_SIZE_INVALID;
1361 
1362 	if (req->rss_sz &&
1363 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1364 		return NIX_AF_ERR_RSS_GRPS_INVALID;
1365 
1366 	/* Reset this NIX LF */
1367 	err = rvu_lf_reset(rvu, block, nixlf);
1368 	if (err) {
1369 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1370 			block->addr - BLKADDR_NIX0, nixlf);
1371 		return NIX_AF_ERR_LF_RESET;
1372 	}
1373 
1374 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1375 
1376 	/* Alloc NIX RQ HW context memory and config the base */
1377 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1378 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1379 	if (err)
1380 		goto free_mem;
1381 
1382 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1383 	if (!pfvf->rq_bmap)
1384 		goto free_mem;
1385 
1386 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1387 		    (u64)pfvf->rq_ctx->iova);
1388 
1389 	/* Set caching and queue count in HW */
1390 	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1391 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1392 
1393 	/* Alloc NIX SQ HW context memory and config the base */
1394 	hwctx_size = 1UL << (ctx_cfg & 0xF);
1395 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1396 	if (err)
1397 		goto free_mem;
1398 
1399 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1400 	if (!pfvf->sq_bmap)
1401 		goto free_mem;
1402 
1403 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1404 		    (u64)pfvf->sq_ctx->iova);
1405 
1406 	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1407 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1408 
1409 	/* Alloc NIX CQ HW context memory and config the base */
1410 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1411 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1412 	if (err)
1413 		goto free_mem;
1414 
1415 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1416 	if (!pfvf->cq_bmap)
1417 		goto free_mem;
1418 
1419 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1420 		    (u64)pfvf->cq_ctx->iova);
1421 
1422 	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1423 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1424 
1425 	/* Initialize receive side scaling (RSS) */
1426 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1427 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1428 				 req->rss_grps, hwctx_size, req->way_mask,
1429 				 !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER));
1430 	if (err)
1431 		goto free_mem;
1432 
1433 	/* Alloc memory for CQINT's HW contexts */
1434 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1435 	qints = (cfg >> 24) & 0xFFF;
1436 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1437 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1438 	if (err)
1439 		goto free_mem;
1440 
1441 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1442 		    (u64)pfvf->cq_ints_ctx->iova);
1443 
1444 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1445 		    BIT_ULL(36) | req->way_mask << 20);
1446 
1447 	/* Alloc memory for QINT's HW contexts */
1448 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1449 	qints = (cfg >> 12) & 0xFFF;
1450 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1451 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1452 	if (err)
1453 		goto free_mem;
1454 
1455 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1456 		    (u64)pfvf->nix_qints_ctx->iova);
1457 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1458 		    BIT_ULL(36) | req->way_mask << 20);
1459 
1460 	/* Setup VLANX TPID's.
1461 	 * Use VLAN1 for 802.1Q
1462 	 * and VLAN0 for 802.1AD.
1463 	 */
1464 	cfg = (0x8100ULL << 16) | 0x88A8ULL;
1465 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1466 
1467 	/* Enable LMTST for this NIX LF */
1468 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1469 
1470 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1471 	if (req->npa_func)
1472 		cfg = req->npa_func;
1473 	if (req->sso_func)
1474 		cfg |= (u64)req->sso_func << 16;
1475 
1476 	cfg |= (u64)req->xqe_sz << 33;
1477 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1478 
1479 	/* Config Rx pkt length, csum checks and apad  enable / disable */
1480 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1481 
1482 	/* Configure pkind for TX parse config */
1483 	cfg = NPC_TX_DEF_PKIND;
1484 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1485 
1486 	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1487 	if (is_sdp_pfvf(pcifunc))
1488 		intf = NIX_INTF_TYPE_SDP;
1489 
1490 	err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
1491 				 !!(req->flags & NIX_LF_LBK_BLK_SEL));
1492 	if (err)
1493 		goto free_mem;
1494 
1495 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
1496 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1497 
1498 	/* Configure RX VTAG Type 7 (strip) for vf vlan */
1499 	rvu_write64(rvu, blkaddr,
1500 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1501 		    VTAGSIZE_T4 | VTAG_STRIP);
1502 
1503 	goto exit;
1504 
1505 free_mem:
1506 	nix_ctx_free(rvu, pfvf);
1507 	rc = -ENOMEM;
1508 
1509 exit:
1510 	/* Set macaddr of this PF/VF */
1511 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1512 
1513 	/* set SQB size info */
1514 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1515 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1516 	rsp->rx_chan_base = pfvf->rx_chan_base;
1517 	rsp->tx_chan_base = pfvf->tx_chan_base;
1518 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1519 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1520 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1521 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1522 	/* Get HW supported stat count */
1523 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1524 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1525 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1526 	/* Get count of CQ IRQs and error IRQs supported per LF */
1527 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1528 	rsp->qints = ((cfg >> 12) & 0xFFF);
1529 	rsp->cints = ((cfg >> 24) & 0xFFF);
1530 	rsp->cgx_links = hw->cgx_links;
1531 	rsp->lbk_links = hw->lbk_links;
1532 	rsp->sdp_links = hw->sdp_links;
1533 
1534 	return rc;
1535 }
1536 
1537 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1538 				 struct msg_rsp *rsp)
1539 {
1540 	struct rvu_hwinfo *hw = rvu->hw;
1541 	u16 pcifunc = req->hdr.pcifunc;
1542 	struct rvu_block *block;
1543 	int blkaddr, nixlf, err;
1544 	struct rvu_pfvf *pfvf;
1545 
1546 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1547 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1548 	if (!pfvf->nixlf || blkaddr < 0)
1549 		return NIX_AF_ERR_AF_LF_INVALID;
1550 
1551 	block = &hw->block[blkaddr];
1552 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1553 	if (nixlf < 0)
1554 		return NIX_AF_ERR_AF_LF_INVALID;
1555 
1556 	if (req->flags & NIX_LF_DISABLE_FLOWS)
1557 		rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1558 	else
1559 		rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1560 
1561 	/* Free any tx vtag def entries used by this NIX LF */
1562 	if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1563 		nix_free_tx_vtag_entries(rvu, pcifunc);
1564 
1565 	nix_interface_deinit(rvu, pcifunc, nixlf);
1566 
1567 	/* Reset this NIX LF */
1568 	err = rvu_lf_reset(rvu, block, nixlf);
1569 	if (err) {
1570 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1571 			block->addr - BLKADDR_NIX0, nixlf);
1572 		return NIX_AF_ERR_LF_RESET;
1573 	}
1574 
1575 	nix_ctx_free(rvu, pfvf);
1576 
1577 	return 0;
1578 }
1579 
1580 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1581 					 struct nix_mark_format_cfg  *req,
1582 					 struct nix_mark_format_cfg_rsp *rsp)
1583 {
1584 	u16 pcifunc = req->hdr.pcifunc;
1585 	struct nix_hw *nix_hw;
1586 	struct rvu_pfvf *pfvf;
1587 	int blkaddr, rc;
1588 	u32 cfg;
1589 
1590 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1591 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1592 	if (!pfvf->nixlf || blkaddr < 0)
1593 		return NIX_AF_ERR_AF_LF_INVALID;
1594 
1595 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1596 	if (!nix_hw)
1597 		return NIX_AF_ERR_INVALID_NIXBLK;
1598 
1599 	cfg = (((u32)req->offset & 0x7) << 16) |
1600 	      (((u32)req->y_mask & 0xF) << 12) |
1601 	      (((u32)req->y_val & 0xF) << 8) |
1602 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1603 
1604 	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1605 	if (rc < 0) {
1606 		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1607 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1608 		return NIX_AF_ERR_MARK_CFG_FAIL;
1609 	}
1610 
1611 	rsp->mark_format_idx = rc;
1612 	return 0;
1613 }
1614 
1615 /* Handle shaper update specially for few revisions */
1616 static bool
1617 handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf,
1618 			    int lvl, u64 reg, u64 regval)
1619 {
1620 	u64 regbase, oldval, sw_xoff = 0;
1621 	u64 dbgval, md_debug0 = 0;
1622 	unsigned long poll_tmo;
1623 	bool rate_reg = 0;
1624 	u32 schq;
1625 
1626 	regbase = reg & 0xFFFF;
1627 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1628 
1629 	/* Check for rate register */
1630 	switch (lvl) {
1631 	case NIX_TXSCH_LVL_TL1:
1632 		md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq);
1633 		sw_xoff = NIX_AF_TL1X_SW_XOFF(schq);
1634 
1635 		rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0));
1636 		break;
1637 	case NIX_TXSCH_LVL_TL2:
1638 		md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq);
1639 		sw_xoff = NIX_AF_TL2X_SW_XOFF(schq);
1640 
1641 		rate_reg = (regbase == NIX_AF_TL2X_CIR(0) ||
1642 			    regbase == NIX_AF_TL2X_PIR(0));
1643 		break;
1644 	case NIX_TXSCH_LVL_TL3:
1645 		md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq);
1646 		sw_xoff = NIX_AF_TL3X_SW_XOFF(schq);
1647 
1648 		rate_reg = (regbase == NIX_AF_TL3X_CIR(0) ||
1649 			    regbase == NIX_AF_TL3X_PIR(0));
1650 		break;
1651 	case NIX_TXSCH_LVL_TL4:
1652 		md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq);
1653 		sw_xoff = NIX_AF_TL4X_SW_XOFF(schq);
1654 
1655 		rate_reg = (regbase == NIX_AF_TL4X_CIR(0) ||
1656 			    regbase == NIX_AF_TL4X_PIR(0));
1657 		break;
1658 	case NIX_TXSCH_LVL_MDQ:
1659 		sw_xoff = NIX_AF_MDQX_SW_XOFF(schq);
1660 		rate_reg = (regbase == NIX_AF_MDQX_CIR(0) ||
1661 			    regbase == NIX_AF_MDQX_PIR(0));
1662 		break;
1663 	}
1664 
1665 	if (!rate_reg)
1666 		return false;
1667 
1668 	/* Nothing special to do when state is not toggled */
1669 	oldval = rvu_read64(rvu, blkaddr, reg);
1670 	if ((oldval & 0x1) == (regval & 0x1)) {
1671 		rvu_write64(rvu, blkaddr, reg, regval);
1672 		return true;
1673 	}
1674 
1675 	/* PIR/CIR disable */
1676 	if (!(regval & 0x1)) {
1677 		rvu_write64(rvu, blkaddr, sw_xoff, 1);
1678 		rvu_write64(rvu, blkaddr, reg, 0);
1679 		udelay(4);
1680 		rvu_write64(rvu, blkaddr, sw_xoff, 0);
1681 		return true;
1682 	}
1683 
1684 	/* PIR/CIR enable */
1685 	rvu_write64(rvu, blkaddr, sw_xoff, 1);
1686 	if (md_debug0) {
1687 		poll_tmo = jiffies + usecs_to_jiffies(10000);
1688 		/* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */
1689 		do {
1690 			if (time_after(jiffies, poll_tmo)) {
1691 				dev_err(rvu->dev,
1692 					"NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n",
1693 					nixlf, schq, lvl);
1694 				goto exit;
1695 			}
1696 			usleep_range(1, 5);
1697 			dbgval = rvu_read64(rvu, blkaddr, md_debug0);
1698 		} while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48)));
1699 	}
1700 	rvu_write64(rvu, blkaddr, reg, regval);
1701 exit:
1702 	rvu_write64(rvu, blkaddr, sw_xoff, 0);
1703 	return true;
1704 }
1705 
1706 static void nix_reset_tx_schedule(struct rvu *rvu, int blkaddr,
1707 				  int lvl, int schq)
1708 {
1709 	u64 tlx_parent = 0, tlx_schedule = 0;
1710 
1711 	switch (lvl) {
1712 	case NIX_TXSCH_LVL_TL2:
1713 		tlx_parent   = NIX_AF_TL2X_PARENT(schq);
1714 		tlx_schedule = NIX_AF_TL2X_SCHEDULE(schq);
1715 		break;
1716 	case NIX_TXSCH_LVL_TL3:
1717 		tlx_parent   = NIX_AF_TL3X_PARENT(schq);
1718 		tlx_schedule = NIX_AF_TL3X_SCHEDULE(schq);
1719 		break;
1720 	case NIX_TXSCH_LVL_TL4:
1721 		tlx_parent   = NIX_AF_TL4X_PARENT(schq);
1722 		tlx_schedule = NIX_AF_TL4X_SCHEDULE(schq);
1723 		break;
1724 	case NIX_TXSCH_LVL_MDQ:
1725 		/* no need to reset SMQ_CFG as HW clears this CSR
1726 		 * on SMQ flush
1727 		 */
1728 		tlx_parent   = NIX_AF_MDQX_PARENT(schq);
1729 		tlx_schedule = NIX_AF_MDQX_SCHEDULE(schq);
1730 		break;
1731 	default:
1732 		return;
1733 	}
1734 
1735 	if (tlx_parent)
1736 		rvu_write64(rvu, blkaddr, tlx_parent, 0x0);
1737 
1738 	if (tlx_schedule)
1739 		rvu_write64(rvu, blkaddr, tlx_schedule, 0x0);
1740 }
1741 
1742 /* Disable shaping of pkts by a scheduler queue
1743  * at a given scheduler level.
1744  */
1745 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1746 				 int nixlf, int lvl, int schq)
1747 {
1748 	struct rvu_hwinfo *hw = rvu->hw;
1749 	u64  cir_reg = 0, pir_reg = 0;
1750 	u64  cfg;
1751 
1752 	switch (lvl) {
1753 	case NIX_TXSCH_LVL_TL1:
1754 		cir_reg = NIX_AF_TL1X_CIR(schq);
1755 		pir_reg = 0; /* PIR not available at TL1 */
1756 		break;
1757 	case NIX_TXSCH_LVL_TL2:
1758 		cir_reg = NIX_AF_TL2X_CIR(schq);
1759 		pir_reg = NIX_AF_TL2X_PIR(schq);
1760 		break;
1761 	case NIX_TXSCH_LVL_TL3:
1762 		cir_reg = NIX_AF_TL3X_CIR(schq);
1763 		pir_reg = NIX_AF_TL3X_PIR(schq);
1764 		break;
1765 	case NIX_TXSCH_LVL_TL4:
1766 		cir_reg = NIX_AF_TL4X_CIR(schq);
1767 		pir_reg = NIX_AF_TL4X_PIR(schq);
1768 		break;
1769 	case NIX_TXSCH_LVL_MDQ:
1770 		cir_reg = NIX_AF_MDQX_CIR(schq);
1771 		pir_reg = NIX_AF_MDQX_PIR(schq);
1772 		break;
1773 	}
1774 
1775 	/* Shaper state toggle needs wait/poll */
1776 	if (hw->cap.nix_shaper_toggle_wait) {
1777 		if (cir_reg)
1778 			handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1779 						    lvl, cir_reg, 0);
1780 		if (pir_reg)
1781 			handle_txschq_shaper_update(rvu, blkaddr, nixlf,
1782 						    lvl, pir_reg, 0);
1783 		return;
1784 	}
1785 
1786 	if (!cir_reg)
1787 		return;
1788 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
1789 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1790 
1791 	if (!pir_reg)
1792 		return;
1793 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
1794 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1795 }
1796 
1797 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1798 				 int lvl, int schq)
1799 {
1800 	struct rvu_hwinfo *hw = rvu->hw;
1801 	int link_level;
1802 	int link;
1803 
1804 	if (lvl >= hw->cap.nix_tx_aggr_lvl)
1805 		return;
1806 
1807 	/* Reset TL4's SDP link config */
1808 	if (lvl == NIX_TXSCH_LVL_TL4)
1809 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1810 
1811 	link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1812 			NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1813 	if (lvl != link_level)
1814 		return;
1815 
1816 	/* Reset TL2's CGX or LBK link config */
1817 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1818 		rvu_write64(rvu, blkaddr,
1819 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1820 }
1821 
1822 static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr,
1823 			      int lvl, int schq)
1824 {
1825 	struct rvu_hwinfo *hw = rvu->hw;
1826 	u64 reg;
1827 
1828 	/* Skip this if shaping is not supported */
1829 	if (!hw->cap.nix_shaping)
1830 		return;
1831 
1832 	/* Clear level specific SW_XOFF */
1833 	switch (lvl) {
1834 	case NIX_TXSCH_LVL_TL1:
1835 		reg = NIX_AF_TL1X_SW_XOFF(schq);
1836 		break;
1837 	case NIX_TXSCH_LVL_TL2:
1838 		reg = NIX_AF_TL2X_SW_XOFF(schq);
1839 		break;
1840 	case NIX_TXSCH_LVL_TL3:
1841 		reg = NIX_AF_TL3X_SW_XOFF(schq);
1842 		break;
1843 	case NIX_TXSCH_LVL_TL4:
1844 		reg = NIX_AF_TL4X_SW_XOFF(schq);
1845 		break;
1846 	case NIX_TXSCH_LVL_MDQ:
1847 		reg = NIX_AF_MDQX_SW_XOFF(schq);
1848 		break;
1849 	default:
1850 		return;
1851 	}
1852 
1853 	rvu_write64(rvu, blkaddr, reg, 0x0);
1854 }
1855 
1856 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1857 {
1858 	struct rvu_hwinfo *hw = rvu->hw;
1859 	int pf = rvu_get_pf(pcifunc);
1860 	u8 cgx_id = 0, lmac_id = 0;
1861 
1862 	if (is_afvf(pcifunc)) {/* LBK links */
1863 		return hw->cgx_links;
1864 	} else if (is_pf_cgxmapped(rvu, pf)) {
1865 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1866 		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1867 	}
1868 
1869 	/* SDP link */
1870 	return hw->cgx_links + hw->lbk_links;
1871 }
1872 
1873 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1874 				 int link, int *start, int *end)
1875 {
1876 	struct rvu_hwinfo *hw = rvu->hw;
1877 	int pf = rvu_get_pf(pcifunc);
1878 
1879 	if (is_afvf(pcifunc)) { /* LBK links */
1880 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1881 		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1882 	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1883 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1884 		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1885 	} else { /* SDP link */
1886 		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1887 			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1888 		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1889 	}
1890 }
1891 
1892 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1893 				      struct nix_hw *nix_hw,
1894 				      struct nix_txsch_alloc_req *req)
1895 {
1896 	struct rvu_hwinfo *hw = rvu->hw;
1897 	int schq, req_schq, free_cnt;
1898 	struct nix_txsch *txsch;
1899 	int link, start, end;
1900 
1901 	txsch = &nix_hw->txsch[lvl];
1902 	req_schq = req->schq_contig[lvl] + req->schq[lvl];
1903 
1904 	if (!req_schq)
1905 		return 0;
1906 
1907 	link = nix_get_tx_link(rvu, pcifunc);
1908 
1909 	/* For traffic aggregating scheduler level, one queue is enough */
1910 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1911 		if (req_schq != 1)
1912 			return NIX_AF_ERR_TLX_ALLOC_FAIL;
1913 		return 0;
1914 	}
1915 
1916 	/* Get free SCHQ count and check if request can be accomodated */
1917 	if (hw->cap.nix_fixed_txschq_mapping) {
1918 		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1919 		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1920 		if (end <= txsch->schq.max && schq < end &&
1921 		    !test_bit(schq, txsch->schq.bmap))
1922 			free_cnt = 1;
1923 		else
1924 			free_cnt = 0;
1925 	} else {
1926 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
1927 	}
1928 
1929 	if (free_cnt < req_schq || req->schq[lvl] > MAX_TXSCHQ_PER_FUNC ||
1930 	    req->schq_contig[lvl] > MAX_TXSCHQ_PER_FUNC)
1931 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1932 
1933 	/* If contiguous queues are needed, check for availability */
1934 	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1935 	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1936 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1937 
1938 	return 0;
1939 }
1940 
1941 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1942 			    struct nix_txsch_alloc_rsp *rsp,
1943 			    int lvl, int start, int end)
1944 {
1945 	struct rvu_hwinfo *hw = rvu->hw;
1946 	u16 pcifunc = rsp->hdr.pcifunc;
1947 	int idx, schq;
1948 
1949 	/* For traffic aggregating levels, queue alloc is based
1950 	 * on transmit link to which PF_FUNC is mapped to.
1951 	 */
1952 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1953 		/* A single TL queue is allocated */
1954 		if (rsp->schq_contig[lvl]) {
1955 			rsp->schq_contig[lvl] = 1;
1956 			rsp->schq_contig_list[lvl][0] = start;
1957 		}
1958 
1959 		/* Both contig and non-contig reqs doesn't make sense here */
1960 		if (rsp->schq_contig[lvl])
1961 			rsp->schq[lvl] = 0;
1962 
1963 		if (rsp->schq[lvl]) {
1964 			rsp->schq[lvl] = 1;
1965 			rsp->schq_list[lvl][0] = start;
1966 		}
1967 		return;
1968 	}
1969 
1970 	/* Adjust the queue request count if HW supports
1971 	 * only one queue per level configuration.
1972 	 */
1973 	if (hw->cap.nix_fixed_txschq_mapping) {
1974 		idx = pcifunc & RVU_PFVF_FUNC_MASK;
1975 		schq = start + idx;
1976 		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1977 			rsp->schq_contig[lvl] = 0;
1978 			rsp->schq[lvl] = 0;
1979 			return;
1980 		}
1981 
1982 		if (rsp->schq_contig[lvl]) {
1983 			rsp->schq_contig[lvl] = 1;
1984 			set_bit(schq, txsch->schq.bmap);
1985 			rsp->schq_contig_list[lvl][0] = schq;
1986 			rsp->schq[lvl] = 0;
1987 		} else if (rsp->schq[lvl]) {
1988 			rsp->schq[lvl] = 1;
1989 			set_bit(schq, txsch->schq.bmap);
1990 			rsp->schq_list[lvl][0] = schq;
1991 		}
1992 		return;
1993 	}
1994 
1995 	/* Allocate contiguous queue indices requesty first */
1996 	if (rsp->schq_contig[lvl]) {
1997 		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1998 						  txsch->schq.max, start,
1999 						  rsp->schq_contig[lvl], 0);
2000 		if (schq >= end)
2001 			rsp->schq_contig[lvl] = 0;
2002 		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
2003 			set_bit(schq, txsch->schq.bmap);
2004 			rsp->schq_contig_list[lvl][idx] = schq;
2005 			schq++;
2006 		}
2007 	}
2008 
2009 	/* Allocate non-contiguous queue indices */
2010 	if (rsp->schq[lvl]) {
2011 		idx = 0;
2012 		for (schq = start; schq < end; schq++) {
2013 			if (!test_bit(schq, txsch->schq.bmap)) {
2014 				set_bit(schq, txsch->schq.bmap);
2015 				rsp->schq_list[lvl][idx++] = schq;
2016 			}
2017 			if (idx == rsp->schq[lvl])
2018 				break;
2019 		}
2020 		/* Update how many were allocated */
2021 		rsp->schq[lvl] = idx;
2022 	}
2023 }
2024 
2025 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
2026 				     struct nix_txsch_alloc_req *req,
2027 				     struct nix_txsch_alloc_rsp *rsp)
2028 {
2029 	struct rvu_hwinfo *hw = rvu->hw;
2030 	u16 pcifunc = req->hdr.pcifunc;
2031 	int link, blkaddr, rc = 0;
2032 	int lvl, idx, start, end;
2033 	struct nix_txsch *txsch;
2034 	struct nix_hw *nix_hw;
2035 	u32 *pfvf_map;
2036 	int nixlf;
2037 	u16 schq;
2038 
2039 	rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2040 	if (rc)
2041 		return rc;
2042 
2043 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2044 	if (!nix_hw)
2045 		return NIX_AF_ERR_INVALID_NIXBLK;
2046 
2047 	mutex_lock(&rvu->rsrc_lock);
2048 
2049 	/* Check if request is valid as per HW capabilities
2050 	 * and can be accomodated.
2051 	 */
2052 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2053 		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
2054 		if (rc)
2055 			goto err;
2056 	}
2057 
2058 	/* Allocate requested Tx scheduler queues */
2059 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2060 		txsch = &nix_hw->txsch[lvl];
2061 		pfvf_map = txsch->pfvf_map;
2062 
2063 		if (!req->schq[lvl] && !req->schq_contig[lvl])
2064 			continue;
2065 
2066 		rsp->schq[lvl] = req->schq[lvl];
2067 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
2068 
2069 		link = nix_get_tx_link(rvu, pcifunc);
2070 
2071 		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
2072 			start = link;
2073 			end = link;
2074 		} else if (hw->cap.nix_fixed_txschq_mapping) {
2075 			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
2076 		} else {
2077 			start = 0;
2078 			end = txsch->schq.max;
2079 		}
2080 
2081 		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
2082 
2083 		/* Reset queue config */
2084 		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
2085 			schq = rsp->schq_contig_list[lvl][idx];
2086 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2087 			    NIX_TXSCHQ_CFG_DONE))
2088 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2089 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2090 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2091 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2092 		}
2093 
2094 		for (idx = 0; idx < req->schq[lvl]; idx++) {
2095 			schq = rsp->schq_list[lvl][idx];
2096 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
2097 			    NIX_TXSCHQ_CFG_DONE))
2098 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
2099 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2100 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2101 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2102 		}
2103 	}
2104 
2105 	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
2106 	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
2107 	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
2108 				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
2109 				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
2110 	goto exit;
2111 err:
2112 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
2113 exit:
2114 	mutex_unlock(&rvu->rsrc_lock);
2115 	return rc;
2116 }
2117 
2118 static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
2119 				   struct nix_smq_flush_ctx *smq_flush_ctx)
2120 {
2121 	struct nix_smq_tree_ctx *smq_tree_ctx;
2122 	u64 parent_off, regval;
2123 	u16 schq;
2124 	int lvl;
2125 
2126 	smq_flush_ctx->smq = smq;
2127 
2128 	schq = smq;
2129 	for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2130 		smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2131 		if (lvl == NIX_TXSCH_LVL_TL1) {
2132 			smq_flush_ctx->tl1_schq = schq;
2133 			smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
2134 			smq_tree_ctx->pir_off = 0;
2135 			smq_tree_ctx->pir_val = 0;
2136 			parent_off = 0;
2137 		} else if (lvl == NIX_TXSCH_LVL_TL2) {
2138 			smq_flush_ctx->tl2_schq = schq;
2139 			smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
2140 			smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
2141 			parent_off = NIX_AF_TL2X_PARENT(schq);
2142 		} else if (lvl == NIX_TXSCH_LVL_TL3) {
2143 			smq_tree_ctx->cir_off = NIX_AF_TL3X_CIR(schq);
2144 			smq_tree_ctx->pir_off = NIX_AF_TL3X_PIR(schq);
2145 			parent_off = NIX_AF_TL3X_PARENT(schq);
2146 		} else if (lvl == NIX_TXSCH_LVL_TL4) {
2147 			smq_tree_ctx->cir_off = NIX_AF_TL4X_CIR(schq);
2148 			smq_tree_ctx->pir_off = NIX_AF_TL4X_PIR(schq);
2149 			parent_off = NIX_AF_TL4X_PARENT(schq);
2150 		} else if (lvl == NIX_TXSCH_LVL_MDQ) {
2151 			smq_tree_ctx->cir_off = NIX_AF_MDQX_CIR(schq);
2152 			smq_tree_ctx->pir_off = NIX_AF_MDQX_PIR(schq);
2153 			parent_off = NIX_AF_MDQX_PARENT(schq);
2154 		}
2155 		/* save cir/pir register values */
2156 		smq_tree_ctx->cir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->cir_off);
2157 		if (smq_tree_ctx->pir_off)
2158 			smq_tree_ctx->pir_val = rvu_read64(rvu, blkaddr, smq_tree_ctx->pir_off);
2159 
2160 		/* get parent txsch node */
2161 		if (parent_off) {
2162 			regval = rvu_read64(rvu, blkaddr, parent_off);
2163 			schq = (regval >> 16) & 0x1FF;
2164 		}
2165 	}
2166 }
2167 
2168 static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
2169 				      struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2170 {
2171 	struct nix_txsch *txsch;
2172 	struct nix_hw *nix_hw;
2173 	u64 regoff;
2174 	int tl2;
2175 
2176 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2177 	if (!nix_hw)
2178 		return;
2179 
2180 	/* loop through all TL2s with matching PF_FUNC */
2181 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
2182 	for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
2183 		/* skip the smq(flush) TL2 */
2184 		if (tl2 == smq_flush_ctx->tl2_schq)
2185 			continue;
2186 		/* skip unused TL2s */
2187 		if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
2188 			continue;
2189 		/* skip if PF_FUNC doesn't match */
2190 		if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
2191 		    (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
2192 				    ~RVU_PFVF_FUNC_MASK)))
2193 			continue;
2194 		/* enable/disable XOFF */
2195 		regoff = NIX_AF_TL2X_SW_XOFF(tl2);
2196 		if (enable)
2197 			rvu_write64(rvu, blkaddr, regoff, 0x1);
2198 		else
2199 			rvu_write64(rvu, blkaddr, regoff, 0x0);
2200 	}
2201 }
2202 
2203 static void nix_smq_flush_enadis_rate(struct rvu *rvu, int blkaddr,
2204 				      struct nix_smq_flush_ctx *smq_flush_ctx, bool enable)
2205 {
2206 	u64 cir_off, pir_off, cir_val, pir_val;
2207 	struct nix_smq_tree_ctx *smq_tree_ctx;
2208 	int lvl;
2209 
2210 	for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
2211 		smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
2212 		cir_off = smq_tree_ctx->cir_off;
2213 		cir_val = smq_tree_ctx->cir_val;
2214 		pir_off = smq_tree_ctx->pir_off;
2215 		pir_val = smq_tree_ctx->pir_val;
2216 
2217 		if (enable) {
2218 			rvu_write64(rvu, blkaddr, cir_off, cir_val);
2219 			if (lvl != NIX_TXSCH_LVL_TL1)
2220 				rvu_write64(rvu, blkaddr, pir_off, pir_val);
2221 		} else {
2222 			rvu_write64(rvu, blkaddr, cir_off, 0x0);
2223 			if (lvl != NIX_TXSCH_LVL_TL1)
2224 				rvu_write64(rvu, blkaddr, pir_off, 0x0);
2225 		}
2226 	}
2227 }
2228 
2229 static int nix_smq_flush(struct rvu *rvu, int blkaddr,
2230 			 int smq, u16 pcifunc, int nixlf)
2231 {
2232 	struct nix_smq_flush_ctx *smq_flush_ctx;
2233 	int pf = rvu_get_pf(pcifunc);
2234 	u8 cgx_id = 0, lmac_id = 0;
2235 	int err, restore_tx_en = 0;
2236 	u64 cfg;
2237 
2238 	if (!is_rvu_otx2(rvu)) {
2239 		/* Skip SMQ flush if pkt count is zero */
2240 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
2241 		if (!cfg)
2242 			return 0;
2243 	}
2244 
2245 	/* enable cgx tx if disabled */
2246 	if (is_pf_cgxmapped(rvu, pf)) {
2247 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
2248 		restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
2249 						   lmac_id, true);
2250 	}
2251 
2252 	/* XOFF all TL2s whose parent TL1 matches SMQ tree TL1 */
2253 	smq_flush_ctx = kzalloc(sizeof(*smq_flush_ctx), GFP_KERNEL);
2254 	if (!smq_flush_ctx)
2255 		return -ENOMEM;
2256 	nix_smq_flush_fill_ctx(rvu, blkaddr, smq, smq_flush_ctx);
2257 	nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
2258 	nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
2259 
2260 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
2261 	/* Do SMQ flush and set enqueue xoff */
2262 	cfg |= BIT_ULL(50) | BIT_ULL(49);
2263 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
2264 
2265 	/* Disable backpressure from physical link,
2266 	 * otherwise SMQ flush may stall.
2267 	 */
2268 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
2269 
2270 	/* Wait for flush to complete */
2271 	err = rvu_poll_reg(rvu, blkaddr,
2272 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
2273 	if (err)
2274 		dev_info(rvu->dev,
2275 			 "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
2276 			 nixlf, smq);
2277 
2278 	/* clear XOFF on TL2s */
2279 	nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
2280 	nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
2281 	kfree(smq_flush_ctx);
2282 
2283 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
2284 	/* restore cgx tx state */
2285 	if (restore_tx_en)
2286 		rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
2287 	return err;
2288 }
2289 
2290 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
2291 {
2292 	int blkaddr, nixlf, lvl, schq, err;
2293 	struct rvu_hwinfo *hw = rvu->hw;
2294 	struct nix_txsch *txsch;
2295 	struct nix_hw *nix_hw;
2296 	u16 map_func;
2297 
2298 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2299 	if (blkaddr < 0)
2300 		return NIX_AF_ERR_AF_LF_INVALID;
2301 
2302 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2303 	if (!nix_hw)
2304 		return NIX_AF_ERR_INVALID_NIXBLK;
2305 
2306 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2307 	if (nixlf < 0)
2308 		return NIX_AF_ERR_AF_LF_INVALID;
2309 
2310 	/* Disable TL2/3 queue links and all XOFF's before SMQ flush*/
2311 	mutex_lock(&rvu->rsrc_lock);
2312 	for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2313 		txsch = &nix_hw->txsch[lvl];
2314 
2315 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
2316 			continue;
2317 
2318 		for (schq = 0; schq < txsch->schq.max; schq++) {
2319 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2320 				continue;
2321 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2322 			nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2323 			nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2324 		}
2325 	}
2326 	nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1,
2327 			  nix_get_tx_link(rvu, pcifunc));
2328 
2329 	/* On PF cleanup, clear cfg done flag as
2330 	 * PF would have changed default config.
2331 	 */
2332 	if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
2333 		txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
2334 		schq = nix_get_tx_link(rvu, pcifunc);
2335 		/* Do not clear pcifunc in txsch->pfvf_map[schq] because
2336 		 * VF might be using this TL1 queue
2337 		 */
2338 		map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
2339 		txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0);
2340 	}
2341 
2342 	/* Flush SMQs */
2343 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
2344 	for (schq = 0; schq < txsch->schq.max; schq++) {
2345 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2346 			continue;
2347 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2348 	}
2349 
2350 	/* Now free scheduler queues to free pool */
2351 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2352 		 /* TLs above aggregation level are shared across all PF
2353 		  * and it's VFs, hence skip freeing them.
2354 		  */
2355 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
2356 			continue;
2357 
2358 		txsch = &nix_hw->txsch[lvl];
2359 		for (schq = 0; schq < txsch->schq.max; schq++) {
2360 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2361 				continue;
2362 			nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2363 			rvu_free_rsrc(&txsch->schq, schq);
2364 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2365 		}
2366 	}
2367 	mutex_unlock(&rvu->rsrc_lock);
2368 
2369 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
2370 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
2371 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
2372 	if (err)
2373 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
2374 
2375 	return 0;
2376 }
2377 
2378 static int nix_txschq_free_one(struct rvu *rvu,
2379 			       struct nix_txsch_free_req *req)
2380 {
2381 	struct rvu_hwinfo *hw = rvu->hw;
2382 	u16 pcifunc = req->hdr.pcifunc;
2383 	int lvl, schq, nixlf, blkaddr;
2384 	struct nix_txsch *txsch;
2385 	struct nix_hw *nix_hw;
2386 	u32 *pfvf_map;
2387 	int rc;
2388 
2389 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2390 	if (blkaddr < 0)
2391 		return NIX_AF_ERR_AF_LF_INVALID;
2392 
2393 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2394 	if (!nix_hw)
2395 		return NIX_AF_ERR_INVALID_NIXBLK;
2396 
2397 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2398 	if (nixlf < 0)
2399 		return NIX_AF_ERR_AF_LF_INVALID;
2400 
2401 	lvl = req->schq_lvl;
2402 	schq = req->schq;
2403 	txsch = &nix_hw->txsch[lvl];
2404 
2405 	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
2406 		return 0;
2407 
2408 	pfvf_map = txsch->pfvf_map;
2409 	mutex_lock(&rvu->rsrc_lock);
2410 
2411 	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
2412 		rc = NIX_AF_ERR_TLX_INVALID;
2413 		goto err;
2414 	}
2415 
2416 	/* Clear SW_XOFF of this resource only.
2417 	 * For SMQ level, all path XOFF's
2418 	 * need to be made clear by user
2419 	 */
2420 	nix_clear_tx_xoff(rvu, blkaddr, lvl, schq);
2421 
2422 	nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
2423 	nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq);
2424 
2425 	/* Flush if it is a SMQ. Onus of disabling
2426 	 * TL2/3 queue links before SMQ flush is on user
2427 	 */
2428 	if (lvl == NIX_TXSCH_LVL_SMQ &&
2429 	    nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) {
2430 		rc = NIX_AF_SMQ_FLUSH_FAILED;
2431 		goto err;
2432 	}
2433 
2434 	nix_reset_tx_schedule(rvu, blkaddr, lvl, schq);
2435 
2436 	/* Free the resource */
2437 	rvu_free_rsrc(&txsch->schq, schq);
2438 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2439 	mutex_unlock(&rvu->rsrc_lock);
2440 	return 0;
2441 err:
2442 	mutex_unlock(&rvu->rsrc_lock);
2443 	return rc;
2444 }
2445 
2446 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
2447 				    struct nix_txsch_free_req *req,
2448 				    struct msg_rsp *rsp)
2449 {
2450 	if (req->flags & TXSCHQ_FREE_ALL)
2451 		return nix_txschq_free(rvu, req->hdr.pcifunc);
2452 	else
2453 		return nix_txschq_free_one(rvu, req);
2454 }
2455 
2456 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
2457 				      int lvl, u64 reg, u64 regval)
2458 {
2459 	u64 regbase = reg & 0xFFFF;
2460 	u16 schq, parent;
2461 
2462 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
2463 		return false;
2464 
2465 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2466 	/* Check if this schq belongs to this PF/VF or not */
2467 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
2468 		return false;
2469 
2470 	parent = (regval >> 16) & 0x1FF;
2471 	/* Validate MDQ's TL4 parent */
2472 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
2473 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
2474 		return false;
2475 
2476 	/* Validate TL4's TL3 parent */
2477 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
2478 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
2479 		return false;
2480 
2481 	/* Validate TL3's TL2 parent */
2482 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
2483 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
2484 		return false;
2485 
2486 	/* Validate TL2's TL1 parent */
2487 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
2488 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
2489 		return false;
2490 
2491 	return true;
2492 }
2493 
2494 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
2495 {
2496 	u64 regbase;
2497 
2498 	if (hw->cap.nix_shaping)
2499 		return true;
2500 
2501 	/* If shaping and coloring is not supported, then
2502 	 * *_CIR and *_PIR registers should not be configured.
2503 	 */
2504 	regbase = reg & 0xFFFF;
2505 
2506 	switch (lvl) {
2507 	case NIX_TXSCH_LVL_TL1:
2508 		if (regbase == NIX_AF_TL1X_CIR(0))
2509 			return false;
2510 		break;
2511 	case NIX_TXSCH_LVL_TL2:
2512 		if (regbase == NIX_AF_TL2X_CIR(0) ||
2513 		    regbase == NIX_AF_TL2X_PIR(0))
2514 			return false;
2515 		break;
2516 	case NIX_TXSCH_LVL_TL3:
2517 		if (regbase == NIX_AF_TL3X_CIR(0) ||
2518 		    regbase == NIX_AF_TL3X_PIR(0))
2519 			return false;
2520 		break;
2521 	case NIX_TXSCH_LVL_TL4:
2522 		if (regbase == NIX_AF_TL4X_CIR(0) ||
2523 		    regbase == NIX_AF_TL4X_PIR(0))
2524 			return false;
2525 		break;
2526 	case NIX_TXSCH_LVL_MDQ:
2527 		if (regbase == NIX_AF_MDQX_CIR(0) ||
2528 		    regbase == NIX_AF_MDQX_PIR(0))
2529 			return false;
2530 		break;
2531 	}
2532 	return true;
2533 }
2534 
2535 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
2536 				u16 pcifunc, int blkaddr)
2537 {
2538 	u32 *pfvf_map;
2539 	int schq;
2540 
2541 	schq = nix_get_tx_link(rvu, pcifunc);
2542 	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
2543 	/* Skip if PF has already done the config */
2544 	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
2545 		return;
2546 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
2547 		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
2548 
2549 	/* On OcteonTx2 the config was in bytes and newer silcons
2550 	 * it's changed to weight.
2551 	 */
2552 	if (!rvu->hw->cap.nix_common_dwrr_mtu)
2553 		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2554 			    TXSCH_TL1_DFLT_RR_QTM);
2555 	else
2556 		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
2557 			    CN10K_MAX_DWRR_WEIGHT);
2558 
2559 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
2560 	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
2561 }
2562 
2563 /* Register offset - [15:0]
2564  * Scheduler Queue number - [25:16]
2565  */
2566 #define NIX_TX_SCHQ_MASK	GENMASK_ULL(25, 0)
2567 
2568 static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw,
2569 			       int blkaddr, struct nix_txschq_config *req,
2570 			       struct nix_txschq_config *rsp)
2571 {
2572 	u16 pcifunc = req->hdr.pcifunc;
2573 	int idx, schq;
2574 	u64 reg;
2575 
2576 	for (idx = 0; idx < req->num_regs; idx++) {
2577 		reg = req->reg[idx];
2578 		reg &= NIX_TX_SCHQ_MASK;
2579 		schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2580 		if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) ||
2581 		    !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq))
2582 			return NIX_AF_INVAL_TXSCHQ_CFG;
2583 		rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg);
2584 	}
2585 	rsp->lvl = req->lvl;
2586 	rsp->num_regs = req->num_regs;
2587 	return 0;
2588 }
2589 
2590 void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
2591 			struct nix_txsch *txsch, bool enable)
2592 {
2593 	struct rvu_hwinfo *hw = rvu->hw;
2594 	int lbk_link_start, lbk_links;
2595 	u8 pf = rvu_get_pf(pcifunc);
2596 	int schq;
2597 	u64 cfg;
2598 
2599 	if (!is_pf_cgxmapped(rvu, pf))
2600 		return;
2601 
2602 	cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
2603 	lbk_link_start = hw->cgx_links;
2604 
2605 	for (schq = 0; schq < txsch->schq.max; schq++) {
2606 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
2607 			continue;
2608 		/* Enable all LBK links with channel 63 by default so that
2609 		 * packets can be sent to LBK with a NPC TX MCAM rule
2610 		 */
2611 		lbk_links = hw->lbk_links;
2612 		while (lbk_links--)
2613 			rvu_write64(rvu, blkaddr,
2614 				    NIX_AF_TL3_TL2X_LINKX_CFG(schq,
2615 							      lbk_link_start +
2616 							      lbk_links), cfg);
2617 	}
2618 }
2619 
2620 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
2621 				    struct nix_txschq_config *req,
2622 				    struct nix_txschq_config *rsp)
2623 {
2624 	u64 reg, val, regval, schq_regbase, val_mask;
2625 	struct rvu_hwinfo *hw = rvu->hw;
2626 	u16 pcifunc = req->hdr.pcifunc;
2627 	struct nix_txsch *txsch;
2628 	struct nix_hw *nix_hw;
2629 	int blkaddr, idx, err;
2630 	int nixlf, schq;
2631 	u32 *pfvf_map;
2632 
2633 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
2634 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
2635 		return NIX_AF_INVAL_TXSCHQ_CFG;
2636 
2637 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2638 	if (err)
2639 		return err;
2640 
2641 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2642 	if (!nix_hw)
2643 		return NIX_AF_ERR_INVALID_NIXBLK;
2644 
2645 	if (req->read)
2646 		return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp);
2647 
2648 	txsch = &nix_hw->txsch[req->lvl];
2649 	pfvf_map = txsch->pfvf_map;
2650 
2651 	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2652 	    pcifunc & RVU_PFVF_FUNC_MASK) {
2653 		mutex_lock(&rvu->rsrc_lock);
2654 		if (req->lvl == NIX_TXSCH_LVL_TL1)
2655 			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2656 		mutex_unlock(&rvu->rsrc_lock);
2657 		return 0;
2658 	}
2659 
2660 	for (idx = 0; idx < req->num_regs; idx++) {
2661 		reg = req->reg[idx];
2662 		reg &= NIX_TX_SCHQ_MASK;
2663 		regval = req->regval[idx];
2664 		schq_regbase = reg & 0xFFFF;
2665 		val_mask = req->regval_mask[idx];
2666 
2667 		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2668 					       txsch->lvl, reg, regval))
2669 			return NIX_AF_INVAL_TXSCHQ_CFG;
2670 
2671 		/* Check if shaping and coloring is supported */
2672 		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2673 			continue;
2674 
2675 		val = rvu_read64(rvu, blkaddr, reg);
2676 		regval = (val & val_mask) | (regval & ~val_mask);
2677 
2678 		/* Handle shaping state toggle specially */
2679 		if (hw->cap.nix_shaper_toggle_wait &&
2680 		    handle_txschq_shaper_update(rvu, blkaddr, nixlf,
2681 						req->lvl, reg, regval))
2682 			continue;
2683 
2684 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2685 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2686 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2687 					   pcifunc, 0);
2688 			regval &= ~(0x7FULL << 24);
2689 			regval |= ((u64)nixlf << 24);
2690 		}
2691 
2692 		/* Clear 'BP_ENA' config, if it's not allowed */
2693 		if (!hw->cap.nix_tx_link_bp) {
2694 			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2695 			    (schq_regbase & 0xFF00) ==
2696 			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2697 				regval &= ~BIT_ULL(13);
2698 		}
2699 
2700 		/* Mark config as done for TL1 by PF */
2701 		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2702 		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2703 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2704 			mutex_lock(&rvu->rsrc_lock);
2705 			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2706 							NIX_TXSCHQ_CFG_DONE);
2707 			mutex_unlock(&rvu->rsrc_lock);
2708 		}
2709 
2710 		/* SMQ flush is special hence split register writes such
2711 		 * that flush first and write rest of the bits later.
2712 		 */
2713 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2714 		    (regval & BIT_ULL(49))) {
2715 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2716 			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2717 			regval &= ~BIT_ULL(49);
2718 		}
2719 		rvu_write64(rvu, blkaddr, reg, regval);
2720 	}
2721 
2722 	return 0;
2723 }
2724 
2725 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2726 			   struct nix_vtag_config *req)
2727 {
2728 	u64 regval = req->vtag_size;
2729 
2730 	if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2731 	    req->vtag_size > VTAGSIZE_T8)
2732 		return -EINVAL;
2733 
2734 	/* RX VTAG Type 7 reserved for vf vlan */
2735 	if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2736 		return NIX_AF_ERR_RX_VTAG_INUSE;
2737 
2738 	if (req->rx.capture_vtag)
2739 		regval |= BIT_ULL(5);
2740 	if (req->rx.strip_vtag)
2741 		regval |= BIT_ULL(4);
2742 
2743 	rvu_write64(rvu, blkaddr,
2744 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2745 	return 0;
2746 }
2747 
2748 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2749 			    u16 pcifunc, int index)
2750 {
2751 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2752 	struct nix_txvlan *vlan;
2753 
2754 	if (!nix_hw)
2755 		return NIX_AF_ERR_INVALID_NIXBLK;
2756 
2757 	vlan = &nix_hw->txvlan;
2758 	if (vlan->entry2pfvf_map[index] != pcifunc)
2759 		return NIX_AF_ERR_PARAM;
2760 
2761 	rvu_write64(rvu, blkaddr,
2762 		    NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2763 	rvu_write64(rvu, blkaddr,
2764 		    NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2765 
2766 	vlan->entry2pfvf_map[index] = 0;
2767 	rvu_free_rsrc(&vlan->rsrc, index);
2768 
2769 	return 0;
2770 }
2771 
2772 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2773 {
2774 	struct nix_txvlan *vlan;
2775 	struct nix_hw *nix_hw;
2776 	int index, blkaddr;
2777 
2778 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2779 	if (blkaddr < 0)
2780 		return;
2781 
2782 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2783 	if (!nix_hw)
2784 		return;
2785 
2786 	vlan = &nix_hw->txvlan;
2787 
2788 	mutex_lock(&vlan->rsrc_lock);
2789 	/* Scan all the entries and free the ones mapped to 'pcifunc' */
2790 	for (index = 0; index < vlan->rsrc.max; index++) {
2791 		if (vlan->entry2pfvf_map[index] == pcifunc)
2792 			nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2793 	}
2794 	mutex_unlock(&vlan->rsrc_lock);
2795 }
2796 
2797 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2798 			     u64 vtag, u8 size)
2799 {
2800 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2801 	struct nix_txvlan *vlan;
2802 	u64 regval;
2803 	int index;
2804 
2805 	if (!nix_hw)
2806 		return NIX_AF_ERR_INVALID_NIXBLK;
2807 
2808 	vlan = &nix_hw->txvlan;
2809 
2810 	mutex_lock(&vlan->rsrc_lock);
2811 
2812 	index = rvu_alloc_rsrc(&vlan->rsrc);
2813 	if (index < 0) {
2814 		mutex_unlock(&vlan->rsrc_lock);
2815 		return index;
2816 	}
2817 
2818 	mutex_unlock(&vlan->rsrc_lock);
2819 
2820 	regval = size ? vtag : vtag << 32;
2821 
2822 	rvu_write64(rvu, blkaddr,
2823 		    NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2824 	rvu_write64(rvu, blkaddr,
2825 		    NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2826 
2827 	return index;
2828 }
2829 
2830 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2831 			     struct nix_vtag_config *req)
2832 {
2833 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2834 	u16 pcifunc = req->hdr.pcifunc;
2835 	int idx0 = req->tx.vtag0_idx;
2836 	int idx1 = req->tx.vtag1_idx;
2837 	struct nix_txvlan *vlan;
2838 	int err = 0;
2839 
2840 	if (!nix_hw)
2841 		return NIX_AF_ERR_INVALID_NIXBLK;
2842 
2843 	vlan = &nix_hw->txvlan;
2844 	if (req->tx.free_vtag0 && req->tx.free_vtag1)
2845 		if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2846 		    vlan->entry2pfvf_map[idx1] != pcifunc)
2847 			return NIX_AF_ERR_PARAM;
2848 
2849 	mutex_lock(&vlan->rsrc_lock);
2850 
2851 	if (req->tx.free_vtag0) {
2852 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2853 		if (err)
2854 			goto exit;
2855 	}
2856 
2857 	if (req->tx.free_vtag1)
2858 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2859 
2860 exit:
2861 	mutex_unlock(&vlan->rsrc_lock);
2862 	return err;
2863 }
2864 
2865 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2866 			   struct nix_vtag_config *req,
2867 			   struct nix_vtag_config_rsp *rsp)
2868 {
2869 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2870 	struct nix_txvlan *vlan;
2871 	u16 pcifunc = req->hdr.pcifunc;
2872 
2873 	if (!nix_hw)
2874 		return NIX_AF_ERR_INVALID_NIXBLK;
2875 
2876 	vlan = &nix_hw->txvlan;
2877 	if (req->tx.cfg_vtag0) {
2878 		rsp->vtag0_idx =
2879 			nix_tx_vtag_alloc(rvu, blkaddr,
2880 					  req->tx.vtag0, req->vtag_size);
2881 
2882 		if (rsp->vtag0_idx < 0)
2883 			return NIX_AF_ERR_TX_VTAG_NOSPC;
2884 
2885 		vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2886 	}
2887 
2888 	if (req->tx.cfg_vtag1) {
2889 		rsp->vtag1_idx =
2890 			nix_tx_vtag_alloc(rvu, blkaddr,
2891 					  req->tx.vtag1, req->vtag_size);
2892 
2893 		if (rsp->vtag1_idx < 0)
2894 			goto err_free;
2895 
2896 		vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2897 	}
2898 
2899 	return 0;
2900 
2901 err_free:
2902 	if (req->tx.cfg_vtag0)
2903 		nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2904 
2905 	return NIX_AF_ERR_TX_VTAG_NOSPC;
2906 }
2907 
2908 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2909 				  struct nix_vtag_config *req,
2910 				  struct nix_vtag_config_rsp *rsp)
2911 {
2912 	u16 pcifunc = req->hdr.pcifunc;
2913 	int blkaddr, nixlf, err;
2914 
2915 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2916 	if (err)
2917 		return err;
2918 
2919 	if (req->cfg_type) {
2920 		/* rx vtag configuration */
2921 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2922 		if (err)
2923 			return NIX_AF_ERR_PARAM;
2924 	} else {
2925 		/* tx vtag configuration */
2926 		if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2927 		    (req->tx.free_vtag0 || req->tx.free_vtag1))
2928 			return NIX_AF_ERR_PARAM;
2929 
2930 		if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2931 			return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2932 
2933 		if (req->tx.free_vtag0 || req->tx.free_vtag1)
2934 			return nix_tx_vtag_decfg(rvu, blkaddr, req);
2935 	}
2936 
2937 	return 0;
2938 }
2939 
2940 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2941 			     int mce, u8 op, u16 pcifunc, int next, bool eol)
2942 {
2943 	struct nix_aq_enq_req aq_req;
2944 	int err;
2945 
2946 	aq_req.hdr.pcifunc = 0;
2947 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
2948 	aq_req.op = op;
2949 	aq_req.qidx = mce;
2950 
2951 	/* Use RSS with RSS index 0 */
2952 	aq_req.mce.op = 1;
2953 	aq_req.mce.index = 0;
2954 	aq_req.mce.eol = eol;
2955 	aq_req.mce.pf_func = pcifunc;
2956 	aq_req.mce.next = next;
2957 
2958 	/* All fields valid */
2959 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
2960 
2961 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2962 	if (err) {
2963 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2964 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2965 		return err;
2966 	}
2967 	return 0;
2968 }
2969 
2970 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2971 				     u16 pcifunc, bool add)
2972 {
2973 	struct mce *mce, *tail = NULL;
2974 	bool delete = false;
2975 
2976 	/* Scan through the current list */
2977 	hlist_for_each_entry(mce, &mce_list->head, node) {
2978 		/* If already exists, then delete */
2979 		if (mce->pcifunc == pcifunc && !add) {
2980 			delete = true;
2981 			break;
2982 		} else if (mce->pcifunc == pcifunc && add) {
2983 			/* entry already exists */
2984 			return 0;
2985 		}
2986 		tail = mce;
2987 	}
2988 
2989 	if (delete) {
2990 		hlist_del(&mce->node);
2991 		kfree(mce);
2992 		mce_list->count--;
2993 		return 0;
2994 	}
2995 
2996 	if (!add)
2997 		return 0;
2998 
2999 	/* Add a new one to the list, at the tail */
3000 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
3001 	if (!mce)
3002 		return -ENOMEM;
3003 	mce->pcifunc = pcifunc;
3004 	if (!tail)
3005 		hlist_add_head(&mce->node, &mce_list->head);
3006 	else
3007 		hlist_add_behind(&mce->node, &tail->node);
3008 	mce_list->count++;
3009 	return 0;
3010 }
3011 
3012 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
3013 			struct nix_mce_list *mce_list,
3014 			int mce_idx, int mcam_index, bool add)
3015 {
3016 	int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
3017 	struct npc_mcam *mcam = &rvu->hw->mcam;
3018 	struct nix_mcast *mcast;
3019 	struct nix_hw *nix_hw;
3020 	struct mce *mce;
3021 
3022 	if (!mce_list)
3023 		return -EINVAL;
3024 
3025 	/* Get this PF/VF func's MCE index */
3026 	idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
3027 
3028 	if (idx > (mce_idx + mce_list->max)) {
3029 		dev_err(rvu->dev,
3030 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
3031 			__func__, idx, mce_list->max,
3032 			pcifunc >> RVU_PFVF_PF_SHIFT);
3033 		return -EINVAL;
3034 	}
3035 
3036 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
3037 	if (err)
3038 		return err;
3039 
3040 	mcast = &nix_hw->mcast;
3041 	mutex_lock(&mcast->mce_lock);
3042 
3043 	err = nix_update_mce_list_entry(mce_list, pcifunc, add);
3044 	if (err)
3045 		goto end;
3046 
3047 	/* Disable MCAM entry in NPC */
3048 	if (!mce_list->count) {
3049 		npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
3050 		npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
3051 		goto end;
3052 	}
3053 
3054 	/* Dump the updated list to HW */
3055 	idx = mce_idx;
3056 	last_idx = idx + mce_list->count - 1;
3057 	hlist_for_each_entry(mce, &mce_list->head, node) {
3058 		if (idx > last_idx)
3059 			break;
3060 
3061 		next_idx = idx + 1;
3062 		/* EOL should be set in last MCE */
3063 		err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
3064 					mce->pcifunc, next_idx,
3065 					(next_idx > last_idx) ? true : false);
3066 		if (err)
3067 			goto end;
3068 		idx++;
3069 	}
3070 
3071 end:
3072 	mutex_unlock(&mcast->mce_lock);
3073 	return err;
3074 }
3075 
3076 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
3077 		      struct nix_mce_list **mce_list, int *mce_idx)
3078 {
3079 	struct rvu_hwinfo *hw = rvu->hw;
3080 	struct rvu_pfvf *pfvf;
3081 
3082 	if (!hw->cap.nix_rx_multicast ||
3083 	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
3084 		*mce_list = NULL;
3085 		*mce_idx = 0;
3086 		return;
3087 	}
3088 
3089 	/* Get this PF/VF func's MCE index */
3090 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
3091 
3092 	if (type == NIXLF_BCAST_ENTRY) {
3093 		*mce_list = &pfvf->bcast_mce_list;
3094 		*mce_idx = pfvf->bcast_mce_idx;
3095 	} else if (type == NIXLF_ALLMULTI_ENTRY) {
3096 		*mce_list = &pfvf->mcast_mce_list;
3097 		*mce_idx = pfvf->mcast_mce_idx;
3098 	} else if (type == NIXLF_PROMISC_ENTRY) {
3099 		*mce_list = &pfvf->promisc_mce_list;
3100 		*mce_idx = pfvf->promisc_mce_idx;
3101 	}  else {
3102 		*mce_list = NULL;
3103 		*mce_idx = 0;
3104 	}
3105 }
3106 
3107 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
3108 			       int type, bool add)
3109 {
3110 	int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
3111 	struct npc_mcam *mcam = &rvu->hw->mcam;
3112 	struct rvu_hwinfo *hw = rvu->hw;
3113 	struct nix_mce_list *mce_list;
3114 	int pf;
3115 
3116 	/* skip multicast pkt replication for AF's VFs & SDP links */
3117 	if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
3118 		return 0;
3119 
3120 	if (!hw->cap.nix_rx_multicast)
3121 		return 0;
3122 
3123 	pf = rvu_get_pf(pcifunc);
3124 	if (!is_pf_cgxmapped(rvu, pf))
3125 		return 0;
3126 
3127 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3128 	if (blkaddr < 0)
3129 		return -EINVAL;
3130 
3131 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
3132 	if (nixlf < 0)
3133 		return -EINVAL;
3134 
3135 	nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
3136 
3137 	mcam_index = npc_get_nixlf_mcam_index(mcam,
3138 					      pcifunc & ~RVU_PFVF_FUNC_MASK,
3139 					      nixlf, type);
3140 	err = nix_update_mce_list(rvu, pcifunc, mce_list,
3141 				  mce_idx, mcam_index, add);
3142 	return err;
3143 }
3144 
3145 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
3146 {
3147 	struct nix_mcast *mcast = &nix_hw->mcast;
3148 	int err, pf, numvfs, idx;
3149 	struct rvu_pfvf *pfvf;
3150 	u16 pcifunc;
3151 	u64 cfg;
3152 
3153 	/* Skip PF0 (i.e AF) */
3154 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
3155 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
3156 		/* If PF is not enabled, nothing to do */
3157 		if (!((cfg >> 20) & 0x01))
3158 			continue;
3159 		/* Get numVFs attached to this PF */
3160 		numvfs = (cfg >> 12) & 0xFF;
3161 
3162 		pfvf = &rvu->pf[pf];
3163 
3164 		/* This NIX0/1 block mapped to PF ? */
3165 		if (pfvf->nix_blkaddr != nix_hw->blkaddr)
3166 			continue;
3167 
3168 		/* save start idx of broadcast mce list */
3169 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
3170 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
3171 
3172 		/* save start idx of multicast mce list */
3173 		pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
3174 		nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
3175 
3176 		/* save the start idx of promisc mce list */
3177 		pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
3178 		nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
3179 
3180 		for (idx = 0; idx < (numvfs + 1); idx++) {
3181 			/* idx-0 is for PF, followed by VFs */
3182 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
3183 			pcifunc |= idx;
3184 			/* Add dummy entries now, so that we don't have to check
3185 			 * for whether AQ_OP should be INIT/WRITE later on.
3186 			 * Will be updated when a NIXLF is attached/detached to
3187 			 * these PF/VFs.
3188 			 */
3189 			err = nix_blk_setup_mce(rvu, nix_hw,
3190 						pfvf->bcast_mce_idx + idx,
3191 						NIX_AQ_INSTOP_INIT,
3192 						pcifunc, 0, true);
3193 			if (err)
3194 				return err;
3195 
3196 			/* add dummy entries to multicast mce list */
3197 			err = nix_blk_setup_mce(rvu, nix_hw,
3198 						pfvf->mcast_mce_idx + idx,
3199 						NIX_AQ_INSTOP_INIT,
3200 						pcifunc, 0, true);
3201 			if (err)
3202 				return err;
3203 
3204 			/* add dummy entries to promisc mce list */
3205 			err = nix_blk_setup_mce(rvu, nix_hw,
3206 						pfvf->promisc_mce_idx + idx,
3207 						NIX_AQ_INSTOP_INIT,
3208 						pcifunc, 0, true);
3209 			if (err)
3210 				return err;
3211 		}
3212 	}
3213 	return 0;
3214 }
3215 
3216 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3217 {
3218 	struct nix_mcast *mcast = &nix_hw->mcast;
3219 	struct rvu_hwinfo *hw = rvu->hw;
3220 	int err, size;
3221 
3222 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
3223 	size = (1ULL << size);
3224 
3225 	/* Alloc memory for multicast/mirror replication entries */
3226 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
3227 			 (256UL << MC_TBL_SIZE), size);
3228 	if (err)
3229 		return -ENOMEM;
3230 
3231 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
3232 		    (u64)mcast->mce_ctx->iova);
3233 
3234 	/* Set max list length equal to max no of VFs per PF  + PF itself */
3235 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
3236 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
3237 
3238 	/* Alloc memory for multicast replication buffers */
3239 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
3240 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
3241 			 (8UL << MC_BUF_CNT), size);
3242 	if (err)
3243 		return -ENOMEM;
3244 
3245 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
3246 		    (u64)mcast->mcast_buf->iova);
3247 
3248 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
3249 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
3250 
3251 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
3252 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
3253 		    BIT_ULL(20) | MC_BUF_CNT);
3254 
3255 	mutex_init(&mcast->mce_lock);
3256 
3257 	return nix_setup_mce_tables(rvu, nix_hw);
3258 }
3259 
3260 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
3261 {
3262 	struct nix_txvlan *vlan = &nix_hw->txvlan;
3263 	int err;
3264 
3265 	/* Allocate resource bimap for tx vtag def registers*/
3266 	vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
3267 	err = rvu_alloc_bitmap(&vlan->rsrc);
3268 	if (err)
3269 		return -ENOMEM;
3270 
3271 	/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
3272 	vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
3273 					    sizeof(u16), GFP_KERNEL);
3274 	if (!vlan->entry2pfvf_map)
3275 		goto free_mem;
3276 
3277 	mutex_init(&vlan->rsrc_lock);
3278 	return 0;
3279 
3280 free_mem:
3281 	kfree(vlan->rsrc.bmap);
3282 	return -ENOMEM;
3283 }
3284 
3285 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
3286 {
3287 	struct nix_txsch *txsch;
3288 	int err, lvl, schq;
3289 	u64 cfg, reg;
3290 
3291 	/* Get scheduler queue count of each type and alloc
3292 	 * bitmap for each for alloc/free/attach operations.
3293 	 */
3294 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3295 		txsch = &nix_hw->txsch[lvl];
3296 		txsch->lvl = lvl;
3297 		switch (lvl) {
3298 		case NIX_TXSCH_LVL_SMQ:
3299 			reg = NIX_AF_MDQ_CONST;
3300 			break;
3301 		case NIX_TXSCH_LVL_TL4:
3302 			reg = NIX_AF_TL4_CONST;
3303 			break;
3304 		case NIX_TXSCH_LVL_TL3:
3305 			reg = NIX_AF_TL3_CONST;
3306 			break;
3307 		case NIX_TXSCH_LVL_TL2:
3308 			reg = NIX_AF_TL2_CONST;
3309 			break;
3310 		case NIX_TXSCH_LVL_TL1:
3311 			reg = NIX_AF_TL1_CONST;
3312 			break;
3313 		}
3314 		cfg = rvu_read64(rvu, blkaddr, reg);
3315 		txsch->schq.max = cfg & 0xFFFF;
3316 		err = rvu_alloc_bitmap(&txsch->schq);
3317 		if (err)
3318 			return err;
3319 
3320 		/* Allocate memory for scheduler queues to
3321 		 * PF/VF pcifunc mapping info.
3322 		 */
3323 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
3324 					       sizeof(u32), GFP_KERNEL);
3325 		if (!txsch->pfvf_map)
3326 			return -ENOMEM;
3327 		for (schq = 0; schq < txsch->schq.max; schq++)
3328 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
3329 	}
3330 
3331 	/* Setup a default value of 8192 as DWRR MTU */
3332 	if (rvu->hw->cap.nix_common_dwrr_mtu ||
3333 	    rvu->hw->cap.nix_multiple_dwrr_mtu) {
3334 		rvu_write64(rvu, blkaddr,
3335 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM),
3336 			    convert_bytes_to_dwrr_mtu(8192));
3337 		rvu_write64(rvu, blkaddr,
3338 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK),
3339 			    convert_bytes_to_dwrr_mtu(8192));
3340 		rvu_write64(rvu, blkaddr,
3341 			    nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP),
3342 			    convert_bytes_to_dwrr_mtu(8192));
3343 	}
3344 
3345 	return 0;
3346 }
3347 
3348 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
3349 				int blkaddr, u32 cfg)
3350 {
3351 	int fmt_idx;
3352 
3353 	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
3354 		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
3355 			return fmt_idx;
3356 	}
3357 	if (fmt_idx >= nix_hw->mark_format.total)
3358 		return -ERANGE;
3359 
3360 	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
3361 	nix_hw->mark_format.cfg[fmt_idx] = cfg;
3362 	nix_hw->mark_format.in_use++;
3363 	return fmt_idx;
3364 }
3365 
3366 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
3367 				    int blkaddr)
3368 {
3369 	u64 cfgs[] = {
3370 		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
3371 		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
3372 		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
3373 		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
3374 		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
3375 		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
3376 		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
3377 		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
3378 		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
3379 	};
3380 	int i, rc;
3381 	u64 total;
3382 
3383 	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
3384 	nix_hw->mark_format.total = (u8)total;
3385 	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
3386 					       GFP_KERNEL);
3387 	if (!nix_hw->mark_format.cfg)
3388 		return -ENOMEM;
3389 	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
3390 		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
3391 		if (rc < 0)
3392 			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
3393 				i, rc);
3394 	}
3395 
3396 	return 0;
3397 }
3398 
3399 static void rvu_get_lbk_link_max_frs(struct rvu *rvu,  u16 *max_mtu)
3400 {
3401 	/* CN10K supports LBK FIFO size 72 KB */
3402 	if (rvu->hw->lbk_bufsize == 0x12000)
3403 		*max_mtu = CN10K_LBK_LINK_MAX_FRS;
3404 	else
3405 		*max_mtu = NIC_HW_MAX_FRS;
3406 }
3407 
3408 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
3409 {
3410 	int fifo_size = rvu_cgx_get_fifolen(rvu);
3411 
3412 	/* RPM supports FIFO len 128 KB and RPM2 supports double the
3413 	 * FIFO len to accommodate 8 LMACS
3414 	 */
3415 	if (fifo_size == 0x20000 || fifo_size == 0x40000)
3416 		*max_mtu = CN10K_LMAC_LINK_MAX_FRS;
3417 	else
3418 		*max_mtu = NIC_HW_MAX_FRS;
3419 }
3420 
3421 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
3422 				     struct nix_hw_info *rsp)
3423 {
3424 	u16 pcifunc = req->hdr.pcifunc;
3425 	u64 dwrr_mtu;
3426 	int blkaddr;
3427 
3428 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3429 	if (blkaddr < 0)
3430 		return NIX_AF_ERR_AF_LF_INVALID;
3431 
3432 	if (is_afvf(pcifunc))
3433 		rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
3434 	else
3435 		rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
3436 
3437 	rsp->min_mtu = NIC_HW_MIN_FRS;
3438 
3439 	if (!rvu->hw->cap.nix_common_dwrr_mtu &&
3440 	    !rvu->hw->cap.nix_multiple_dwrr_mtu) {
3441 		/* Return '1' on OTx2 */
3442 		rsp->rpm_dwrr_mtu = 1;
3443 		rsp->sdp_dwrr_mtu = 1;
3444 		rsp->lbk_dwrr_mtu = 1;
3445 		return 0;
3446 	}
3447 
3448 	/* Return DWRR_MTU for TLx_SCHEDULE[RR_WEIGHT] config */
3449 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3450 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
3451 	rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3452 
3453 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3454 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_SDP));
3455 	rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3456 
3457 	dwrr_mtu = rvu_read64(rvu, blkaddr,
3458 			      nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_LBK));
3459 	rsp->lbk_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu);
3460 
3461 	return 0;
3462 }
3463 
3464 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
3465 				   struct msg_rsp *rsp)
3466 {
3467 	u16 pcifunc = req->hdr.pcifunc;
3468 	int i, nixlf, blkaddr, err;
3469 	u64 stats;
3470 
3471 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3472 	if (err)
3473 		return err;
3474 
3475 	/* Get stats count supported by HW */
3476 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
3477 
3478 	/* Reset tx stats */
3479 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
3480 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
3481 
3482 	/* Reset rx stats */
3483 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
3484 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
3485 
3486 	return 0;
3487 }
3488 
3489 /* Returns the ALG index to be set into NPC_RX_ACTION */
3490 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
3491 {
3492 	int i;
3493 
3494 	/* Scan over exiting algo entries to find a match */
3495 	for (i = 0; i < nix_hw->flowkey.in_use; i++)
3496 		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
3497 			return i;
3498 
3499 	return -ERANGE;
3500 }
3501 
3502 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
3503 {
3504 	int idx, nr_field, key_off, field_marker, keyoff_marker;
3505 	int max_key_off, max_bit_pos, group_member;
3506 	struct nix_rx_flowkey_alg *field;
3507 	struct nix_rx_flowkey_alg tmp;
3508 	u32 key_type, valid_key;
3509 	u32 l3_l4_src_dst;
3510 	int l4_key_offset = 0;
3511 
3512 	if (!alg)
3513 		return -EINVAL;
3514 
3515 #define FIELDS_PER_ALG  5
3516 #define MAX_KEY_OFF	40
3517 	/* Clear all fields */
3518 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
3519 
3520 	/* Each of the 32 possible flow key algorithm definitions should
3521 	 * fall into above incremental config (except ALG0). Otherwise a
3522 	 * single NPC MCAM entry is not sufficient for supporting RSS.
3523 	 *
3524 	 * If a different definition or combination needed then NPC MCAM
3525 	 * has to be programmed to filter such pkts and it's action should
3526 	 * point to this definition to calculate flowtag or hash.
3527 	 *
3528 	 * The `for loop` goes over _all_ protocol field and the following
3529 	 * variables depicts the state machine forward progress logic.
3530 	 *
3531 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
3532 	 * in field->key_offset update.
3533 	 * field_marker - Enabled when a new field needs to be selected.
3534 	 * group_member - Enabled when protocol is part of a group.
3535 	 */
3536 
3537 	/* Last 4 bits (31:28) are reserved to specify SRC, DST
3538 	 * selection for L3, L4 i.e IPV[4,6]_SRC, IPV[4,6]_DST,
3539 	 * [TCP,UDP,SCTP]_SRC, [TCP,UDP,SCTP]_DST
3540 	 * 31 => L3_SRC, 30 => L3_DST, 29 => L4_SRC, 28 => L4_DST
3541 	 */
3542 	l3_l4_src_dst = flow_cfg;
3543 	/* Reset these 4 bits, so that these won't be part of key */
3544 	flow_cfg &= NIX_FLOW_KEY_TYPE_L3_L4_MASK;
3545 
3546 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
3547 	nr_field = 0; key_off = 0; field_marker = 1;
3548 	field = &tmp; max_bit_pos = fls(flow_cfg);
3549 	for (idx = 0;
3550 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
3551 	     key_off < MAX_KEY_OFF; idx++) {
3552 		key_type = BIT(idx);
3553 		valid_key = flow_cfg & key_type;
3554 		/* Found a field marker, reset the field values */
3555 		if (field_marker)
3556 			memset(&tmp, 0, sizeof(tmp));
3557 
3558 		field_marker = true;
3559 		keyoff_marker = true;
3560 		switch (key_type) {
3561 		case NIX_FLOW_KEY_TYPE_PORT:
3562 			field->sel_chan = true;
3563 			/* This should be set to 1, when SEL_CHAN is set */
3564 			field->bytesm1 = 1;
3565 			break;
3566 		case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
3567 			field->lid = NPC_LID_LC;
3568 			field->hdr_offset = 9; /* offset */
3569 			field->bytesm1 = 0; /* 1 byte */
3570 			field->ltype_match = NPC_LT_LC_IP;
3571 			field->ltype_mask = 0xF;
3572 			break;
3573 		case NIX_FLOW_KEY_TYPE_IPV4:
3574 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
3575 			field->lid = NPC_LID_LC;
3576 			field->ltype_match = NPC_LT_LC_IP;
3577 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
3578 				field->lid = NPC_LID_LG;
3579 				field->ltype_match = NPC_LT_LG_TU_IP;
3580 			}
3581 			field->hdr_offset = 12; /* SIP offset */
3582 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
3583 
3584 			/* Only SIP */
3585 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
3586 				field->bytesm1 = 3; /* SIP, 4 bytes */
3587 
3588 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
3589 				/* Both SIP + DIP */
3590 				if (field->bytesm1 == 3) {
3591 					field->bytesm1 = 7; /* SIP + DIP, 8B */
3592 				} else {
3593 					/* Only DIP */
3594 					field->hdr_offset = 16; /* DIP off */
3595 					field->bytesm1 = 3; /* DIP, 4 bytes */
3596 				}
3597 			}
3598 
3599 			field->ltype_mask = 0xF; /* Match only IPv4 */
3600 			keyoff_marker = false;
3601 			break;
3602 		case NIX_FLOW_KEY_TYPE_IPV6:
3603 		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
3604 			field->lid = NPC_LID_LC;
3605 			field->ltype_match = NPC_LT_LC_IP6;
3606 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
3607 				field->lid = NPC_LID_LG;
3608 				field->ltype_match = NPC_LT_LG_TU_IP6;
3609 			}
3610 			field->hdr_offset = 8; /* SIP offset */
3611 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
3612 
3613 			/* Only SIP */
3614 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_SRC_ONLY)
3615 				field->bytesm1 = 15; /* SIP, 16 bytes */
3616 
3617 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L3_DST_ONLY) {
3618 				/* Both SIP + DIP */
3619 				if (field->bytesm1 == 15) {
3620 					/* SIP + DIP, 32 bytes */
3621 					field->bytesm1 = 31;
3622 				} else {
3623 					/* Only DIP */
3624 					field->hdr_offset = 24; /* DIP off */
3625 					field->bytesm1 = 15; /* DIP,16 bytes */
3626 				}
3627 			}
3628 			field->ltype_mask = 0xF; /* Match only IPv6 */
3629 			break;
3630 		case NIX_FLOW_KEY_TYPE_TCP:
3631 		case NIX_FLOW_KEY_TYPE_UDP:
3632 		case NIX_FLOW_KEY_TYPE_SCTP:
3633 		case NIX_FLOW_KEY_TYPE_INNR_TCP:
3634 		case NIX_FLOW_KEY_TYPE_INNR_UDP:
3635 		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
3636 			field->lid = NPC_LID_LD;
3637 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
3638 			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
3639 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
3640 				field->lid = NPC_LID_LH;
3641 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
3642 
3643 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_SRC_ONLY)
3644 				field->bytesm1 = 1; /* SRC, 2 bytes */
3645 
3646 			if (l3_l4_src_dst & NIX_FLOW_KEY_TYPE_L4_DST_ONLY) {
3647 				/* Both SRC + DST */
3648 				if (field->bytesm1 == 1) {
3649 					/* SRC + DST, 4 bytes */
3650 					field->bytesm1 = 3;
3651 				} else {
3652 					/* Only DIP */
3653 					field->hdr_offset = 2; /* DST off */
3654 					field->bytesm1 = 1; /* DST, 2 bytes */
3655 				}
3656 			}
3657 
3658 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
3659 			 * so no need to change the ltype_match, just change
3660 			 * the lid for inner protocols
3661 			 */
3662 			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
3663 				     (int)NPC_LT_LH_TU_TCP);
3664 			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
3665 				     (int)NPC_LT_LH_TU_UDP);
3666 			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
3667 				     (int)NPC_LT_LH_TU_SCTP);
3668 
3669 			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
3670 			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
3671 			    valid_key) {
3672 				field->ltype_match |= NPC_LT_LD_TCP;
3673 				group_member = true;
3674 			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
3675 				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
3676 				   valid_key) {
3677 				field->ltype_match |= NPC_LT_LD_UDP;
3678 				group_member = true;
3679 			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3680 				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
3681 				   valid_key) {
3682 				field->ltype_match |= NPC_LT_LD_SCTP;
3683 				group_member = true;
3684 			}
3685 			field->ltype_mask = ~field->ltype_match;
3686 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
3687 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
3688 				/* Handle the case where any of the group item
3689 				 * is enabled in the group but not the final one
3690 				 */
3691 				if (group_member) {
3692 					valid_key = true;
3693 					group_member = false;
3694 				}
3695 			} else {
3696 				field_marker = false;
3697 				keyoff_marker = false;
3698 			}
3699 
3700 			/* TCP/UDP/SCTP and ESP/AH falls at same offset so
3701 			 * remember the TCP key offset of 40 byte hash key.
3702 			 */
3703 			if (key_type == NIX_FLOW_KEY_TYPE_TCP)
3704 				l4_key_offset = key_off;
3705 			break;
3706 		case NIX_FLOW_KEY_TYPE_NVGRE:
3707 			field->lid = NPC_LID_LD;
3708 			field->hdr_offset = 4; /* VSID offset */
3709 			field->bytesm1 = 2;
3710 			field->ltype_match = NPC_LT_LD_NVGRE;
3711 			field->ltype_mask = 0xF;
3712 			break;
3713 		case NIX_FLOW_KEY_TYPE_VXLAN:
3714 		case NIX_FLOW_KEY_TYPE_GENEVE:
3715 			field->lid = NPC_LID_LE;
3716 			field->bytesm1 = 2;
3717 			field->hdr_offset = 4;
3718 			field->ltype_mask = 0xF;
3719 			field_marker = false;
3720 			keyoff_marker = false;
3721 
3722 			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
3723 				field->ltype_match |= NPC_LT_LE_VXLAN;
3724 				group_member = true;
3725 			}
3726 
3727 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
3728 				field->ltype_match |= NPC_LT_LE_GENEVE;
3729 				group_member = true;
3730 			}
3731 
3732 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
3733 				if (group_member) {
3734 					field->ltype_mask = ~field->ltype_match;
3735 					field_marker = true;
3736 					keyoff_marker = true;
3737 					valid_key = true;
3738 					group_member = false;
3739 				}
3740 			}
3741 			break;
3742 		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
3743 		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
3744 			field->lid = NPC_LID_LA;
3745 			field->ltype_match = NPC_LT_LA_ETHER;
3746 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
3747 				field->lid = NPC_LID_LF;
3748 				field->ltype_match = NPC_LT_LF_TU_ETHER;
3749 			}
3750 			field->hdr_offset = 0;
3751 			field->bytesm1 = 5; /* DMAC 6 Byte */
3752 			field->ltype_mask = 0xF;
3753 			break;
3754 		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
3755 			field->lid = NPC_LID_LC;
3756 			field->hdr_offset = 40; /* IPV6 hdr */
3757 			field->bytesm1 = 0; /* 1 Byte ext hdr*/
3758 			field->ltype_match = NPC_LT_LC_IP6_EXT;
3759 			field->ltype_mask = 0xF;
3760 			break;
3761 		case NIX_FLOW_KEY_TYPE_GTPU:
3762 			field->lid = NPC_LID_LE;
3763 			field->hdr_offset = 4;
3764 			field->bytesm1 = 3; /* 4 bytes TID*/
3765 			field->ltype_match = NPC_LT_LE_GTPU;
3766 			field->ltype_mask = 0xF;
3767 			break;
3768 		case NIX_FLOW_KEY_TYPE_VLAN:
3769 			field->lid = NPC_LID_LB;
3770 			field->hdr_offset = 2; /* Skip TPID (2-bytes) */
3771 			field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
3772 			field->ltype_match = NPC_LT_LB_CTAG;
3773 			field->ltype_mask = 0xF;
3774 			field->fn_mask = 1; /* Mask out the first nibble */
3775 			break;
3776 		case NIX_FLOW_KEY_TYPE_AH:
3777 		case NIX_FLOW_KEY_TYPE_ESP:
3778 			field->hdr_offset = 0;
3779 			field->bytesm1 = 7; /* SPI + sequence number */
3780 			field->ltype_mask = 0xF;
3781 			field->lid = NPC_LID_LE;
3782 			field->ltype_match = NPC_LT_LE_ESP;
3783 			if (key_type == NIX_FLOW_KEY_TYPE_AH) {
3784 				field->lid = NPC_LID_LD;
3785 				field->ltype_match = NPC_LT_LD_AH;
3786 				field->hdr_offset = 4;
3787 				keyoff_marker = false;
3788 			}
3789 			break;
3790 		}
3791 		field->ena = 1;
3792 
3793 		/* Found a valid flow key type */
3794 		if (valid_key) {
3795 			/* Use the key offset of TCP/UDP/SCTP fields
3796 			 * for ESP/AH fields.
3797 			 */
3798 			if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
3799 			    key_type == NIX_FLOW_KEY_TYPE_AH)
3800 				key_off = l4_key_offset;
3801 			field->key_offset = key_off;
3802 			memcpy(&alg[nr_field], field, sizeof(*field));
3803 			max_key_off = max(max_key_off, field->bytesm1 + 1);
3804 
3805 			/* Found a field marker, get the next field */
3806 			if (field_marker)
3807 				nr_field++;
3808 		}
3809 
3810 		/* Found a keyoff marker, update the new key_off */
3811 		if (keyoff_marker) {
3812 			key_off += max_key_off;
3813 			max_key_off = 0;
3814 		}
3815 	}
3816 	/* Processed all the flow key types */
3817 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
3818 		return 0;
3819 	else
3820 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
3821 }
3822 
3823 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
3824 {
3825 	u64 field[FIELDS_PER_ALG];
3826 	struct nix_hw *hw;
3827 	int fid, rc;
3828 
3829 	hw = get_nix_hw(rvu->hw, blkaddr);
3830 	if (!hw)
3831 		return NIX_AF_ERR_INVALID_NIXBLK;
3832 
3833 	/* No room to add new flow hash algoritham */
3834 	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3835 		return NIX_AF_ERR_RSS_NOSPC_ALGO;
3836 
3837 	/* Generate algo fields for the given flow_cfg */
3838 	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3839 	if (rc)
3840 		return rc;
3841 
3842 	/* Update ALGX_FIELDX register with generated fields */
3843 	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3844 		rvu_write64(rvu, blkaddr,
3845 			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3846 							   fid), field[fid]);
3847 
3848 	/* Store the flow_cfg for futher lookup */
3849 	rc = hw->flowkey.in_use;
3850 	hw->flowkey.flowkey[rc] = flow_cfg;
3851 	hw->flowkey.in_use++;
3852 
3853 	return rc;
3854 }
3855 
3856 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3857 					 struct nix_rss_flowkey_cfg *req,
3858 					 struct nix_rss_flowkey_cfg_rsp *rsp)
3859 {
3860 	u16 pcifunc = req->hdr.pcifunc;
3861 	int alg_idx, nixlf, blkaddr;
3862 	struct nix_hw *nix_hw;
3863 	int err;
3864 
3865 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3866 	if (err)
3867 		return err;
3868 
3869 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3870 	if (!nix_hw)
3871 		return NIX_AF_ERR_INVALID_NIXBLK;
3872 
3873 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3874 	/* Failed to get algo index from the exiting list, reserve new  */
3875 	if (alg_idx < 0) {
3876 		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3877 						  req->flowkey_cfg);
3878 		if (alg_idx < 0)
3879 			return alg_idx;
3880 	}
3881 	rsp->alg_idx = alg_idx;
3882 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3883 				       alg_idx, req->mcam_index);
3884 	return 0;
3885 }
3886 
3887 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3888 {
3889 	u32 flowkey_cfg, minkey_cfg;
3890 	int alg, fid, rc;
3891 
3892 	/* Disable all flow key algx fieldx */
3893 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3894 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3895 			rvu_write64(rvu, blkaddr,
3896 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3897 				    0);
3898 	}
3899 
3900 	/* IPv4/IPv6 SIP/DIPs */
3901 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3902 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3903 	if (rc < 0)
3904 		return rc;
3905 
3906 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3907 	minkey_cfg = flowkey_cfg;
3908 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3909 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3910 	if (rc < 0)
3911 		return rc;
3912 
3913 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3914 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3915 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3916 	if (rc < 0)
3917 		return rc;
3918 
3919 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3920 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3921 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3922 	if (rc < 0)
3923 		return rc;
3924 
3925 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3926 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3927 			NIX_FLOW_KEY_TYPE_UDP;
3928 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3929 	if (rc < 0)
3930 		return rc;
3931 
3932 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3933 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3934 			NIX_FLOW_KEY_TYPE_SCTP;
3935 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3936 	if (rc < 0)
3937 		return rc;
3938 
3939 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3940 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3941 			NIX_FLOW_KEY_TYPE_SCTP;
3942 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3943 	if (rc < 0)
3944 		return rc;
3945 
3946 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3947 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3948 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3949 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3950 	if (rc < 0)
3951 		return rc;
3952 
3953 	return 0;
3954 }
3955 
3956 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3957 				      struct nix_set_mac_addr *req,
3958 				      struct msg_rsp *rsp)
3959 {
3960 	bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3961 	u16 pcifunc = req->hdr.pcifunc;
3962 	int blkaddr, nixlf, err;
3963 	struct rvu_pfvf *pfvf;
3964 
3965 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3966 	if (err)
3967 		return err;
3968 
3969 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3970 
3971 	/* untrusted VF can't overwrite admin(PF) changes */
3972 	if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3973 	    (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3974 		dev_warn(rvu->dev,
3975 			 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3976 		return -EPERM;
3977 	}
3978 
3979 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3980 
3981 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3982 				    pfvf->rx_chan_base, req->mac_addr);
3983 
3984 	if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
3985 		ether_addr_copy(pfvf->default_mac, req->mac_addr);
3986 
3987 	rvu_switch_update_rules(rvu, pcifunc);
3988 
3989 	return 0;
3990 }
3991 
3992 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3993 				      struct msg_req *req,
3994 				      struct nix_get_mac_addr_rsp *rsp)
3995 {
3996 	u16 pcifunc = req->hdr.pcifunc;
3997 	struct rvu_pfvf *pfvf;
3998 
3999 	if (!is_nixlf_attached(rvu, pcifunc))
4000 		return NIX_AF_ERR_AF_LF_INVALID;
4001 
4002 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4003 
4004 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
4005 
4006 	return 0;
4007 }
4008 
4009 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
4010 				     struct msg_rsp *rsp)
4011 {
4012 	bool allmulti, promisc, nix_rx_multicast;
4013 	u16 pcifunc = req->hdr.pcifunc;
4014 	struct rvu_pfvf *pfvf;
4015 	int nixlf, err;
4016 
4017 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4018 	promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
4019 	allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
4020 	pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
4021 
4022 	nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
4023 
4024 	if (is_vf(pcifunc) && !nix_rx_multicast &&
4025 	    (promisc || allmulti)) {
4026 		dev_warn_ratelimited(rvu->dev,
4027 				     "VF promisc/multicast not supported\n");
4028 		return 0;
4029 	}
4030 
4031 	/* untrusted VF can't configure promisc/allmulti */
4032 	if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
4033 	    (promisc || allmulti))
4034 		return 0;
4035 
4036 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4037 	if (err)
4038 		return err;
4039 
4040 	if (nix_rx_multicast) {
4041 		/* add/del this PF_FUNC to/from mcast pkt replication list */
4042 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
4043 					  allmulti);
4044 		if (err) {
4045 			dev_err(rvu->dev,
4046 				"Failed to update pcifunc 0x%x to multicast list\n",
4047 				pcifunc);
4048 			return err;
4049 		}
4050 
4051 		/* add/del this PF_FUNC to/from promisc pkt replication list */
4052 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
4053 					  promisc);
4054 		if (err) {
4055 			dev_err(rvu->dev,
4056 				"Failed to update pcifunc 0x%x to promisc list\n",
4057 				pcifunc);
4058 			return err;
4059 		}
4060 	}
4061 
4062 	/* install/uninstall allmulti entry */
4063 	if (allmulti) {
4064 		rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
4065 					       pfvf->rx_chan_base);
4066 	} else {
4067 		if (!nix_rx_multicast)
4068 			rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
4069 	}
4070 
4071 	/* install/uninstall promisc entry */
4072 	if (promisc) {
4073 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
4074 					      pfvf->rx_chan_base,
4075 					      pfvf->rx_chan_cnt);
4076 
4077 		if (rvu_npc_exact_has_match_table(rvu))
4078 			rvu_npc_exact_promisc_enable(rvu, pcifunc);
4079 	} else {
4080 		if (!nix_rx_multicast)
4081 			rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
4082 
4083 		if (rvu_npc_exact_has_match_table(rvu))
4084 			rvu_npc_exact_promisc_disable(rvu, pcifunc);
4085 	}
4086 
4087 	return 0;
4088 }
4089 
4090 static void nix_find_link_frs(struct rvu *rvu,
4091 			      struct nix_frs_cfg *req, u16 pcifunc)
4092 {
4093 	int pf = rvu_get_pf(pcifunc);
4094 	struct rvu_pfvf *pfvf;
4095 	int maxlen, minlen;
4096 	int numvfs, hwvf;
4097 	int vf;
4098 
4099 	/* Update with requester's min/max lengths */
4100 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4101 	pfvf->maxlen = req->maxlen;
4102 	if (req->update_minlen)
4103 		pfvf->minlen = req->minlen;
4104 
4105 	maxlen = req->maxlen;
4106 	minlen = req->update_minlen ? req->minlen : 0;
4107 
4108 	/* Get this PF's numVFs and starting hwvf */
4109 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
4110 
4111 	/* For each VF, compare requested max/minlen */
4112 	for (vf = 0; vf < numvfs; vf++) {
4113 		pfvf =  &rvu->hwvf[hwvf + vf];
4114 		if (pfvf->maxlen > maxlen)
4115 			maxlen = pfvf->maxlen;
4116 		if (req->update_minlen &&
4117 		    pfvf->minlen && pfvf->minlen < minlen)
4118 			minlen = pfvf->minlen;
4119 	}
4120 
4121 	/* Compare requested max/minlen with PF's max/minlen */
4122 	pfvf = &rvu->pf[pf];
4123 	if (pfvf->maxlen > maxlen)
4124 		maxlen = pfvf->maxlen;
4125 	if (req->update_minlen &&
4126 	    pfvf->minlen && pfvf->minlen < minlen)
4127 		minlen = pfvf->minlen;
4128 
4129 	/* Update the request with max/min PF's and it's VF's max/min */
4130 	req->maxlen = maxlen;
4131 	if (req->update_minlen)
4132 		req->minlen = minlen;
4133 }
4134 
4135 static int
4136 nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
4137 			u16 pcifunc, u64 tx_credits)
4138 {
4139 	struct rvu_hwinfo *hw = rvu->hw;
4140 	int pf = rvu_get_pf(pcifunc);
4141 	u8 cgx_id = 0, lmac_id = 0;
4142 	unsigned long poll_tmo;
4143 	bool restore_tx_en = 0;
4144 	struct nix_hw *nix_hw;
4145 	u64 cfg, sw_xoff = 0;
4146 	u32 schq = 0;
4147 	u32 credits;
4148 	int rc;
4149 
4150 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
4151 	if (!nix_hw)
4152 		return NIX_AF_ERR_INVALID_NIXBLK;
4153 
4154 	if (tx_credits == nix_hw->tx_credits[link])
4155 		return 0;
4156 
4157 	/* Enable cgx tx if disabled for credits to be back */
4158 	if (is_pf_cgxmapped(rvu, pf)) {
4159 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
4160 		restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
4161 						    lmac_id, true);
4162 	}
4163 
4164 	mutex_lock(&rvu->rsrc_lock);
4165 	/* Disable new traffic to link */
4166 	if (hw->cap.nix_shaping) {
4167 		schq = nix_get_tx_link(rvu, pcifunc);
4168 		sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq));
4169 		rvu_write64(rvu, blkaddr,
4170 			    NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
4171 	}
4172 
4173 	rc = NIX_AF_ERR_LINK_CREDITS;
4174 	poll_tmo = jiffies + usecs_to_jiffies(200000);
4175 	/* Wait for credits to return */
4176 	do {
4177 		if (time_after(jiffies, poll_tmo))
4178 			goto exit;
4179 		usleep_range(100, 200);
4180 
4181 		cfg = rvu_read64(rvu, blkaddr,
4182 				 NIX_AF_TX_LINKX_NORM_CREDIT(link));
4183 		credits = (cfg >> 12) & 0xFFFFFULL;
4184 	} while (credits != nix_hw->tx_credits[link]);
4185 
4186 	cfg &= ~(0xFFFFFULL << 12);
4187 	cfg |= (tx_credits << 12);
4188 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
4189 	rc = 0;
4190 
4191 	nix_hw->tx_credits[link] = tx_credits;
4192 
4193 exit:
4194 	/* Enable traffic back */
4195 	if (hw->cap.nix_shaping && !sw_xoff)
4196 		rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0);
4197 
4198 	/* Restore state of cgx tx */
4199 	if (restore_tx_en)
4200 		rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
4201 
4202 	mutex_unlock(&rvu->rsrc_lock);
4203 	return rc;
4204 }
4205 
4206 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
4207 				    struct msg_rsp *rsp)
4208 {
4209 	struct rvu_hwinfo *hw = rvu->hw;
4210 	u16 pcifunc = req->hdr.pcifunc;
4211 	int pf = rvu_get_pf(pcifunc);
4212 	int blkaddr, schq, link = -1;
4213 	struct nix_txsch *txsch;
4214 	u64 cfg, lmac_fifo_len;
4215 	struct nix_hw *nix_hw;
4216 	struct rvu_pfvf *pfvf;
4217 	u8 cgx = 0, lmac = 0;
4218 	u16 max_mtu;
4219 
4220 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4221 	if (blkaddr < 0)
4222 		return NIX_AF_ERR_AF_LF_INVALID;
4223 
4224 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
4225 	if (!nix_hw)
4226 		return NIX_AF_ERR_INVALID_NIXBLK;
4227 
4228 	if (is_afvf(pcifunc))
4229 		rvu_get_lbk_link_max_frs(rvu, &max_mtu);
4230 	else
4231 		rvu_get_lmac_link_max_frs(rvu, &max_mtu);
4232 
4233 	if (!req->sdp_link && req->maxlen > max_mtu)
4234 		return NIX_AF_ERR_FRS_INVALID;
4235 
4236 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
4237 		return NIX_AF_ERR_FRS_INVALID;
4238 
4239 	/* Check if requester wants to update SMQ's */
4240 	if (!req->update_smq)
4241 		goto rx_frscfg;
4242 
4243 	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
4244 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
4245 	mutex_lock(&rvu->rsrc_lock);
4246 	for (schq = 0; schq < txsch->schq.max; schq++) {
4247 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
4248 			continue;
4249 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
4250 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
4251 		if (req->update_minlen)
4252 			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
4253 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
4254 	}
4255 	mutex_unlock(&rvu->rsrc_lock);
4256 
4257 rx_frscfg:
4258 	/* Check if config is for SDP link */
4259 	if (req->sdp_link) {
4260 		if (!hw->sdp_links)
4261 			return NIX_AF_ERR_RX_LINK_INVALID;
4262 		link = hw->cgx_links + hw->lbk_links;
4263 		goto linkcfg;
4264 	}
4265 
4266 	/* Check if the request is from CGX mapped RVU PF */
4267 	if (is_pf_cgxmapped(rvu, pf)) {
4268 		/* Get CGX and LMAC to which this PF is mapped and find link */
4269 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
4270 		link = (cgx * hw->lmac_per_cgx) + lmac;
4271 	} else if (pf == 0) {
4272 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
4273 		pfvf = rvu_get_pfvf(rvu, pcifunc);
4274 		link = hw->cgx_links + pfvf->lbkid;
4275 	}
4276 
4277 	if (link < 0)
4278 		return NIX_AF_ERR_RX_LINK_INVALID;
4279 
4280 	nix_find_link_frs(rvu, req, pcifunc);
4281 
4282 linkcfg:
4283 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
4284 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
4285 	if (req->update_minlen)
4286 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
4287 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
4288 
4289 	if (req->sdp_link || pf == 0)
4290 		return 0;
4291 
4292 	/* Update transmit credits for CGX links */
4293 	lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac);
4294 	if (!lmac_fifo_len) {
4295 		dev_err(rvu->dev,
4296 			"%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4297 			__func__, cgx, lmac);
4298 		return 0;
4299 	}
4300 	return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
4301 				       (lmac_fifo_len - req->maxlen) / 16);
4302 }
4303 
4304 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
4305 				    struct msg_rsp *rsp)
4306 {
4307 	int nixlf, blkaddr, err;
4308 	u64 cfg;
4309 
4310 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
4311 	if (err)
4312 		return err;
4313 
4314 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
4315 	/* Set the interface configuration */
4316 	if (req->len_verify & BIT(0))
4317 		cfg |= BIT_ULL(41);
4318 	else
4319 		cfg &= ~BIT_ULL(41);
4320 
4321 	if (req->len_verify & BIT(1))
4322 		cfg |= BIT_ULL(40);
4323 	else
4324 		cfg &= ~BIT_ULL(40);
4325 
4326 	if (req->len_verify & NIX_RX_DROP_RE)
4327 		cfg |= BIT_ULL(32);
4328 	else
4329 		cfg &= ~BIT_ULL(32);
4330 
4331 	if (req->csum_verify & BIT(0))
4332 		cfg |= BIT_ULL(37);
4333 	else
4334 		cfg &= ~BIT_ULL(37);
4335 
4336 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
4337 
4338 	return 0;
4339 }
4340 
4341 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
4342 {
4343 	return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
4344 }
4345 
4346 static void nix_link_config(struct rvu *rvu, int blkaddr,
4347 			    struct nix_hw *nix_hw)
4348 {
4349 	struct rvu_hwinfo *hw = rvu->hw;
4350 	int cgx, lmac_cnt, slink, link;
4351 	u16 lbk_max_frs, lmac_max_frs;
4352 	unsigned long lmac_bmap;
4353 	u64 tx_credits, cfg;
4354 	u64 lmac_fifo_len;
4355 	int iter;
4356 
4357 	rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
4358 	rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
4359 
4360 	/* Set default min/max packet lengths allowed on NIX Rx links.
4361 	 *
4362 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
4363 	 * as undersize and report them to SW as error pkts, hence
4364 	 * setting it to 40 bytes.
4365 	 */
4366 	for (link = 0; link < hw->cgx_links; link++) {
4367 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4368 				((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
4369 	}
4370 
4371 	for (link = hw->cgx_links; link < hw->lbk_links; link++) {
4372 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4373 			    ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
4374 	}
4375 	if (hw->sdp_links) {
4376 		link = hw->cgx_links + hw->lbk_links;
4377 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
4378 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
4379 	}
4380 
4381 	/* Set credits for Tx links assuming max packet length allowed.
4382 	 * This will be reconfigured based on MTU set for PF/VF.
4383 	 */
4384 	for (cgx = 0; cgx < hw->cgx; cgx++) {
4385 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
4386 		/* Skip when cgx is not available or lmac cnt is zero */
4387 		if (lmac_cnt <= 0)
4388 			continue;
4389 		slink = cgx * hw->lmac_per_cgx;
4390 
4391 		/* Get LMAC id's from bitmap */
4392 		lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
4393 		for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
4394 			lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
4395 			if (!lmac_fifo_len) {
4396 				dev_err(rvu->dev,
4397 					"%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
4398 					__func__, cgx, iter);
4399 				continue;
4400 			}
4401 			tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
4402 			/* Enable credits and set credit pkt count to max allowed */
4403 			cfg =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4404 
4405 			link = iter + slink;
4406 			nix_hw->tx_credits[link] = tx_credits;
4407 			rvu_write64(rvu, blkaddr,
4408 				    NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
4409 		}
4410 	}
4411 
4412 	/* Set Tx credits for LBK link */
4413 	slink = hw->cgx_links;
4414 	for (link = slink; link < (slink + hw->lbk_links); link++) {
4415 		tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
4416 		nix_hw->tx_credits[link] = tx_credits;
4417 		/* Enable credits and set credit pkt count to max allowed */
4418 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
4419 		rvu_write64(rvu, blkaddr,
4420 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
4421 	}
4422 }
4423 
4424 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
4425 {
4426 	int idx, err;
4427 	u64 status;
4428 
4429 	/* Start X2P bus calibration */
4430 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4431 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
4432 	/* Wait for calibration to complete */
4433 	err = rvu_poll_reg(rvu, blkaddr,
4434 			   NIX_AF_STATUS, BIT_ULL(10), false);
4435 	if (err) {
4436 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
4437 		return err;
4438 	}
4439 
4440 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
4441 	/* Check if CGX devices are ready */
4442 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
4443 		/* Skip when cgx port is not available */
4444 		if (!rvu_cgx_pdata(idx, rvu) ||
4445 		    (status & (BIT_ULL(16 + idx))))
4446 			continue;
4447 		dev_err(rvu->dev,
4448 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
4449 		err = -EBUSY;
4450 	}
4451 
4452 	/* Check if LBK is ready */
4453 	if (!(status & BIT_ULL(19))) {
4454 		dev_err(rvu->dev,
4455 			"LBK didn't respond to NIX X2P calibration\n");
4456 		err = -EBUSY;
4457 	}
4458 
4459 	/* Clear 'calibrate_x2p' bit */
4460 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4461 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
4462 	if (err || (status & 0x3FFULL))
4463 		dev_err(rvu->dev,
4464 			"NIX X2P calibration failed, status 0x%llx\n", status);
4465 	if (err)
4466 		return err;
4467 	return 0;
4468 }
4469 
4470 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
4471 {
4472 	u64 cfg;
4473 	int err;
4474 
4475 	/* Set admin queue endianness */
4476 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
4477 #ifdef __BIG_ENDIAN
4478 	cfg |= BIT_ULL(8);
4479 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4480 #else
4481 	cfg &= ~BIT_ULL(8);
4482 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
4483 #endif
4484 
4485 	/* Do not bypass NDC cache */
4486 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
4487 	cfg &= ~0x3FFEULL;
4488 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
4489 	/* Disable caching of SQB aka SQEs */
4490 	cfg |= 0x04ULL;
4491 #endif
4492 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
4493 
4494 	/* Result structure can be followed by RQ/SQ/CQ context at
4495 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
4496 	 * operation type. Alloc sufficient result memory for all operations.
4497 	 */
4498 	err = rvu_aq_alloc(rvu, &block->aq,
4499 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
4500 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
4501 	if (err)
4502 		return err;
4503 
4504 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
4505 	rvu_write64(rvu, block->addr,
4506 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
4507 	return 0;
4508 }
4509 
4510 static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr)
4511 {
4512 	struct rvu_hwinfo *hw = rvu->hw;
4513 	u64 hw_const;
4514 
4515 	hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
4516 
4517 	/* On OcteonTx2 DWRR quantum is directly configured into each of
4518 	 * the transmit scheduler queues. And PF/VF drivers were free to
4519 	 * config any value upto 2^24.
4520 	 * On CN10K, HW is modified, the quantum configuration at scheduler
4521 	 * queues is in terms of weight. And SW needs to setup a base DWRR MTU
4522 	 * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do
4523 	 * 'DWRR MTU * weight' to get the quantum.
4524 	 *
4525 	 * Check if HW uses a common MTU for all DWRR quantum configs.
4526 	 * On OcteonTx2 this register field is '0'.
4527 	 */
4528 	if ((((hw_const >> 56) & 0x10) == 0x10) && !(hw_const & BIT_ULL(61)))
4529 		hw->cap.nix_common_dwrr_mtu = true;
4530 
4531 	if (hw_const & BIT_ULL(61))
4532 		hw->cap.nix_multiple_dwrr_mtu = true;
4533 }
4534 
4535 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
4536 {
4537 	const struct npc_lt_def_cfg *ltdefs;
4538 	struct rvu_hwinfo *hw = rvu->hw;
4539 	int blkaddr = nix_hw->blkaddr;
4540 	struct rvu_block *block;
4541 	int err;
4542 	u64 cfg;
4543 
4544 	block = &hw->block[blkaddr];
4545 
4546 	if (is_rvu_96xx_B0(rvu)) {
4547 		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
4548 		 * internal state when conditional clocks are turned off.
4549 		 * Hence enable them.
4550 		 */
4551 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
4552 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
4553 
4554 		/* Set chan/link to backpressure TL3 instead of TL2 */
4555 		rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
4556 
4557 		/* Disable SQ manager's sticky mode operation (set TM6 = 0)
4558 		 * This sticky mode is known to cause SQ stalls when multiple
4559 		 * SQs are mapped to same SMQ and transmitting pkts at a time.
4560 		 */
4561 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
4562 		cfg &= ~BIT_ULL(15);
4563 		rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
4564 	}
4565 
4566 	ltdefs = rvu->kpu.lt_def;
4567 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
4568 	err = nix_calibrate_x2p(rvu, blkaddr);
4569 	if (err)
4570 		return err;
4571 
4572 	/* Setup capabilities of the NIX block */
4573 	rvu_nix_setup_capabilities(rvu, blkaddr);
4574 
4575 	/* Initialize admin queue */
4576 	err = nix_aq_init(rvu, block);
4577 	if (err)
4578 		return err;
4579 
4580 	/* Restore CINT timer delay to HW reset values */
4581 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
4582 
4583 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
4584 
4585 	/* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
4586 	cfg |= 1ULL;
4587 	if (!is_rvu_otx2(rvu))
4588 		cfg |= NIX_PTP_1STEP_EN;
4589 
4590 	rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
4591 
4592 	if (!is_rvu_otx2(rvu))
4593 		rvu_nix_block_cn10k_init(rvu, nix_hw);
4594 
4595 	if (is_block_implemented(hw, blkaddr)) {
4596 		err = nix_setup_txschq(rvu, nix_hw, blkaddr);
4597 		if (err)
4598 			return err;
4599 
4600 		err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
4601 		if (err)
4602 			return err;
4603 
4604 		err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
4605 		if (err)
4606 			return err;
4607 
4608 		err = nix_setup_mcast(rvu, nix_hw, blkaddr);
4609 		if (err)
4610 			return err;
4611 
4612 		err = nix_setup_txvlan(rvu, nix_hw);
4613 		if (err)
4614 			return err;
4615 
4616 		/* Configure segmentation offload formats */
4617 		nix_setup_lso(rvu, nix_hw, blkaddr);
4618 
4619 		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
4620 		 * This helps HW protocol checker to identify headers
4621 		 * and validate length and checksums.
4622 		 */
4623 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
4624 			    (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
4625 			    ltdefs->rx_ol2.ltype_mask);
4626 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
4627 			    (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
4628 			    ltdefs->rx_oip4.ltype_mask);
4629 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
4630 			    (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
4631 			    ltdefs->rx_iip4.ltype_mask);
4632 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
4633 			    (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
4634 			    ltdefs->rx_oip6.ltype_mask);
4635 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
4636 			    (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
4637 			    ltdefs->rx_iip6.ltype_mask);
4638 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
4639 			    (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
4640 			    ltdefs->rx_otcp.ltype_mask);
4641 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
4642 			    (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
4643 			    ltdefs->rx_itcp.ltype_mask);
4644 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
4645 			    (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
4646 			    ltdefs->rx_oudp.ltype_mask);
4647 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
4648 			    (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
4649 			    ltdefs->rx_iudp.ltype_mask);
4650 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
4651 			    (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
4652 			    ltdefs->rx_osctp.ltype_mask);
4653 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
4654 			    (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
4655 			    ltdefs->rx_isctp.ltype_mask);
4656 
4657 		if (!is_rvu_otx2(rvu)) {
4658 			/* Enable APAD calculation for other protocols
4659 			 * matching APAD0 and APAD1 lt def registers.
4660 			 */
4661 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
4662 				    (ltdefs->rx_apad0.valid << 11) |
4663 				    (ltdefs->rx_apad0.lid << 8) |
4664 				    (ltdefs->rx_apad0.ltype_match << 4) |
4665 				    ltdefs->rx_apad0.ltype_mask);
4666 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
4667 				    (ltdefs->rx_apad1.valid << 11) |
4668 				    (ltdefs->rx_apad1.lid << 8) |
4669 				    (ltdefs->rx_apad1.ltype_match << 4) |
4670 				    ltdefs->rx_apad1.ltype_mask);
4671 
4672 			/* Receive ethertype defination register defines layer
4673 			 * information in NPC_RESULT_S to identify the Ethertype
4674 			 * location in L2 header. Used for Ethertype overwriting
4675 			 * in inline IPsec flow.
4676 			 */
4677 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
4678 				    (ltdefs->rx_et[0].offset << 12) |
4679 				    (ltdefs->rx_et[0].valid << 11) |
4680 				    (ltdefs->rx_et[0].lid << 8) |
4681 				    (ltdefs->rx_et[0].ltype_match << 4) |
4682 				    ltdefs->rx_et[0].ltype_mask);
4683 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
4684 				    (ltdefs->rx_et[1].offset << 12) |
4685 				    (ltdefs->rx_et[1].valid << 11) |
4686 				    (ltdefs->rx_et[1].lid << 8) |
4687 				    (ltdefs->rx_et[1].ltype_match << 4) |
4688 				    ltdefs->rx_et[1].ltype_mask);
4689 		}
4690 
4691 		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
4692 		if (err)
4693 			return err;
4694 
4695 		nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links,
4696 					     sizeof(u64), GFP_KERNEL);
4697 		if (!nix_hw->tx_credits)
4698 			return -ENOMEM;
4699 
4700 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
4701 		nix_link_config(rvu, blkaddr, nix_hw);
4702 
4703 		/* Enable Channel backpressure */
4704 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
4705 	}
4706 	return 0;
4707 }
4708 
4709 int rvu_nix_init(struct rvu *rvu)
4710 {
4711 	struct rvu_hwinfo *hw = rvu->hw;
4712 	struct nix_hw *nix_hw;
4713 	int blkaddr = 0, err;
4714 	int i = 0;
4715 
4716 	hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
4717 			       GFP_KERNEL);
4718 	if (!hw->nix)
4719 		return -ENOMEM;
4720 
4721 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4722 	while (blkaddr) {
4723 		nix_hw = &hw->nix[i];
4724 		nix_hw->rvu = rvu;
4725 		nix_hw->blkaddr = blkaddr;
4726 		err = rvu_nix_block_init(rvu, nix_hw);
4727 		if (err)
4728 			return err;
4729 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4730 		i++;
4731 	}
4732 
4733 	return 0;
4734 }
4735 
4736 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
4737 				  struct rvu_block *block)
4738 {
4739 	struct nix_txsch *txsch;
4740 	struct nix_mcast *mcast;
4741 	struct nix_txvlan *vlan;
4742 	struct nix_hw *nix_hw;
4743 	int lvl;
4744 
4745 	rvu_aq_free(rvu, block->aq);
4746 
4747 	if (is_block_implemented(rvu->hw, blkaddr)) {
4748 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
4749 		if (!nix_hw)
4750 			return;
4751 
4752 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
4753 			txsch = &nix_hw->txsch[lvl];
4754 			kfree(txsch->schq.bmap);
4755 		}
4756 
4757 		kfree(nix_hw->tx_credits);
4758 
4759 		nix_ipolicer_freemem(rvu, nix_hw);
4760 
4761 		vlan = &nix_hw->txvlan;
4762 		kfree(vlan->rsrc.bmap);
4763 		mutex_destroy(&vlan->rsrc_lock);
4764 
4765 		mcast = &nix_hw->mcast;
4766 		qmem_free(rvu->dev, mcast->mce_ctx);
4767 		qmem_free(rvu->dev, mcast->mcast_buf);
4768 		mutex_destroy(&mcast->mce_lock);
4769 	}
4770 }
4771 
4772 void rvu_nix_freemem(struct rvu *rvu)
4773 {
4774 	struct rvu_hwinfo *hw = rvu->hw;
4775 	struct rvu_block *block;
4776 	int blkaddr = 0;
4777 
4778 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4779 	while (blkaddr) {
4780 		block = &hw->block[blkaddr];
4781 		rvu_nix_block_freemem(rvu, blkaddr, block);
4782 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
4783 	}
4784 }
4785 
4786 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
4787 				     struct msg_rsp *rsp)
4788 {
4789 	u16 pcifunc = req->hdr.pcifunc;
4790 	struct rvu_pfvf *pfvf;
4791 	int nixlf, err;
4792 
4793 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4794 	if (err)
4795 		return err;
4796 
4797 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
4798 
4799 	npc_mcam_enable_flows(rvu, pcifunc);
4800 
4801 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4802 	set_bit(NIXLF_INITIALIZED, &pfvf->flags);
4803 
4804 	rvu_switch_update_rules(rvu, pcifunc);
4805 
4806 	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
4807 }
4808 
4809 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
4810 				    struct msg_rsp *rsp)
4811 {
4812 	u16 pcifunc = req->hdr.pcifunc;
4813 	struct rvu_pfvf *pfvf;
4814 	int nixlf, err;
4815 
4816 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
4817 	if (err)
4818 		return err;
4819 
4820 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
4821 
4822 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4823 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
4824 
4825 	return rvu_cgx_start_stop_io(rvu, pcifunc, false);
4826 }
4827 
4828 #define RX_SA_BASE  GENMASK_ULL(52, 7)
4829 
4830 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
4831 {
4832 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
4833 	struct hwctx_disable_req ctx_req;
4834 	int pf = rvu_get_pf(pcifunc);
4835 	struct mac_ops *mac_ops;
4836 	u8 cgx_id, lmac_id;
4837 	u64 sa_base;
4838 	void *cgxd;
4839 	int err;
4840 
4841 	ctx_req.hdr.pcifunc = pcifunc;
4842 
4843 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
4844 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
4845 	rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
4846 	nix_interface_deinit(rvu, pcifunc, nixlf);
4847 	nix_rx_sync(rvu, blkaddr);
4848 	nix_txschq_free(rvu, pcifunc);
4849 
4850 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
4851 
4852 	rvu_cgx_start_stop_io(rvu, pcifunc, false);
4853 
4854 	if (pfvf->sq_ctx) {
4855 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
4856 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
4857 		if (err)
4858 			dev_err(rvu->dev, "SQ ctx disable failed\n");
4859 	}
4860 
4861 	if (pfvf->rq_ctx) {
4862 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
4863 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
4864 		if (err)
4865 			dev_err(rvu->dev, "RQ ctx disable failed\n");
4866 	}
4867 
4868 	if (pfvf->cq_ctx) {
4869 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
4870 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
4871 		if (err)
4872 			dev_err(rvu->dev, "CQ ctx disable failed\n");
4873 	}
4874 
4875 	/* reset HW config done for Switch headers */
4876 	rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
4877 			       (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
4878 
4879 	/* Disabling CGX and NPC config done for PTP */
4880 	if (pfvf->hw_rx_tstamp_en) {
4881 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
4882 		cgxd = rvu_cgx_pdata(cgx_id, rvu);
4883 		mac_ops = get_mac_ops(cgxd);
4884 		mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
4885 		/* Undo NPC config done for PTP */
4886 		if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
4887 			dev_err(rvu->dev, "NPC config for PTP failed\n");
4888 		pfvf->hw_rx_tstamp_en = false;
4889 	}
4890 
4891 	/* reset priority flow control config */
4892 	rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
4893 
4894 	/* reset 802.3x flow control config */
4895 	rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
4896 
4897 	nix_ctx_free(rvu, pfvf);
4898 
4899 	nix_free_all_bandprof(rvu, pcifunc);
4900 
4901 	sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
4902 	if (FIELD_GET(RX_SA_BASE, sa_base)) {
4903 		err = rvu_cpt_ctx_flush(rvu, pcifunc);
4904 		if (err)
4905 			dev_err(rvu->dev,
4906 				"CPT ctx flush failed with error: %d\n", err);
4907 	}
4908 }
4909 
4910 #define NIX_AF_LFX_TX_CFG_PTP_EN	BIT_ULL(32)
4911 
4912 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
4913 {
4914 	struct rvu_hwinfo *hw = rvu->hw;
4915 	struct rvu_block *block;
4916 	int blkaddr, pf;
4917 	int nixlf;
4918 	u64 cfg;
4919 
4920 	pf = rvu_get_pf(pcifunc);
4921 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
4922 		return 0;
4923 
4924 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4925 	if (blkaddr < 0)
4926 		return NIX_AF_ERR_AF_LF_INVALID;
4927 
4928 	block = &hw->block[blkaddr];
4929 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
4930 	if (nixlf < 0)
4931 		return NIX_AF_ERR_AF_LF_INVALID;
4932 
4933 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
4934 
4935 	if (enable)
4936 		cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
4937 	else
4938 		cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
4939 
4940 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
4941 
4942 	return 0;
4943 }
4944 
4945 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
4946 					  struct msg_rsp *rsp)
4947 {
4948 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
4949 }
4950 
4951 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
4952 					   struct msg_rsp *rsp)
4953 {
4954 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
4955 }
4956 
4957 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
4958 					struct nix_lso_format_cfg *req,
4959 					struct nix_lso_format_cfg_rsp *rsp)
4960 {
4961 	u16 pcifunc = req->hdr.pcifunc;
4962 	struct nix_hw *nix_hw;
4963 	struct rvu_pfvf *pfvf;
4964 	int blkaddr, idx, f;
4965 	u64 reg;
4966 
4967 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4968 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4969 	if (!pfvf->nixlf || blkaddr < 0)
4970 		return NIX_AF_ERR_AF_LF_INVALID;
4971 
4972 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
4973 	if (!nix_hw)
4974 		return NIX_AF_ERR_INVALID_NIXBLK;
4975 
4976 	/* Find existing matching LSO format, if any */
4977 	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
4978 		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
4979 			reg = rvu_read64(rvu, blkaddr,
4980 					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
4981 			if (req->fields[f] != (reg & req->field_mask))
4982 				break;
4983 		}
4984 
4985 		if (f == NIX_LSO_FIELD_MAX)
4986 			break;
4987 	}
4988 
4989 	if (idx < nix_hw->lso.in_use) {
4990 		/* Match found */
4991 		rsp->lso_format_idx = idx;
4992 		return 0;
4993 	}
4994 
4995 	if (nix_hw->lso.in_use == nix_hw->lso.total)
4996 		return NIX_AF_ERR_LSO_CFG_FAIL;
4997 
4998 	rsp->lso_format_idx = nix_hw->lso.in_use++;
4999 
5000 	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
5001 		rvu_write64(rvu, blkaddr,
5002 			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
5003 			    req->fields[f]);
5004 
5005 	return 0;
5006 }
5007 
5008 #define IPSEC_GEN_CFG_EGRP    GENMASK_ULL(50, 48)
5009 #define IPSEC_GEN_CFG_OPCODE  GENMASK_ULL(47, 32)
5010 #define IPSEC_GEN_CFG_PARAM1  GENMASK_ULL(31, 16)
5011 #define IPSEC_GEN_CFG_PARAM2  GENMASK_ULL(15, 0)
5012 
5013 #define CPT_INST_QSEL_BLOCK   GENMASK_ULL(28, 24)
5014 #define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
5015 #define CPT_INST_QSEL_SLOT    GENMASK_ULL(7, 0)
5016 
5017 #define CPT_INST_CREDIT_TH    GENMASK_ULL(53, 32)
5018 #define CPT_INST_CREDIT_BPID  GENMASK_ULL(30, 22)
5019 #define CPT_INST_CREDIT_CNT   GENMASK_ULL(21, 0)
5020 
5021 static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
5022 				 int blkaddr)
5023 {
5024 	u8 cpt_idx, cpt_blkaddr;
5025 	u64 val;
5026 
5027 	cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
5028 	if (req->enable) {
5029 		val = 0;
5030 		/* Enable context prefetching */
5031 		if (!is_rvu_otx2(rvu))
5032 			val |= BIT_ULL(51);
5033 
5034 		/* Set OPCODE and EGRP */
5035 		val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
5036 		val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
5037 		val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
5038 		val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
5039 
5040 		rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
5041 
5042 		/* Set CPT queue for inline IPSec */
5043 		val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
5044 		val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
5045 				  req->inst_qsel.cpt_pf_func);
5046 
5047 		if (!is_rvu_otx2(rvu)) {
5048 			cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
5049 						       BLKADDR_CPT1;
5050 			val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
5051 		}
5052 
5053 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5054 			    val);
5055 
5056 		/* Set CPT credit */
5057 		val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5058 		if ((val & 0x3FFFFF) != 0x3FFFFF)
5059 			rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5060 				    0x3FFFFF - val);
5061 
5062 		val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
5063 		val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
5064 		val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
5065 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
5066 	} else {
5067 		rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
5068 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
5069 			    0x0);
5070 		val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
5071 		if ((val & 0x3FFFFF) != 0x3FFFFF)
5072 			rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
5073 				    0x3FFFFF - val);
5074 	}
5075 }
5076 
5077 int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
5078 					  struct nix_inline_ipsec_cfg *req,
5079 					  struct msg_rsp *rsp)
5080 {
5081 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5082 		return 0;
5083 
5084 	nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
5085 	if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
5086 		nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
5087 
5088 	return 0;
5089 }
5090 
5091 int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
5092 					       struct msg_req *req,
5093 					       struct nix_inline_ipsec_cfg *rsp)
5094 
5095 {
5096 	u64 val;
5097 
5098 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5099 		return 0;
5100 
5101 	val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
5102 	rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
5103 	rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
5104 	rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
5105 	rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
5106 
5107 	val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
5108 	rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
5109 	rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
5110 	rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
5111 
5112 	return 0;
5113 }
5114 
5115 int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
5116 					     struct nix_inline_ipsec_lf_cfg *req,
5117 					     struct msg_rsp *rsp)
5118 {
5119 	int lf, blkaddr, err;
5120 	u64 val;
5121 
5122 	if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
5123 		return 0;
5124 
5125 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
5126 	if (err)
5127 		return err;
5128 
5129 	if (req->enable) {
5130 		/* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
5131 		val = (u64)req->ipsec_cfg0.tt << 44 |
5132 		      (u64)req->ipsec_cfg0.tag_const << 20 |
5133 		      (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
5134 		      req->ipsec_cfg0.lenm1_max;
5135 
5136 		if (blkaddr == BLKADDR_NIX1)
5137 			val |= BIT_ULL(46);
5138 
5139 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
5140 
5141 		/* Set SA_IDX_W and SA_IDX_MAX */
5142 		val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
5143 		      req->ipsec_cfg1.sa_idx_max;
5144 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
5145 
5146 		/* Set SA base address */
5147 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5148 			    req->sa_base_addr);
5149 	} else {
5150 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
5151 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
5152 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
5153 			    0x0);
5154 	}
5155 
5156 	return 0;
5157 }
5158 
5159 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
5160 {
5161 	bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
5162 
5163 	/* overwrite vf mac address with default_mac */
5164 	if (from_vf)
5165 		ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
5166 }
5167 
5168 /* NIX ingress policers or bandwidth profiles APIs */
5169 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
5170 {
5171 	struct npc_lt_def_cfg defs, *ltdefs;
5172 
5173 	ltdefs = &defs;
5174 	memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
5175 
5176 	/* Extract PCP and DEI fields from outer VLAN from byte offset
5177 	 * 2 from the start of LB_PTR (ie TAG).
5178 	 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
5179 	 * fields are considered when 'Tunnel enable' is set in profile.
5180 	 */
5181 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
5182 		    (2UL << 12) | (ltdefs->ovlan.lid << 8) |
5183 		    (ltdefs->ovlan.ltype_match << 4) |
5184 		    ltdefs->ovlan.ltype_mask);
5185 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
5186 		    (2UL << 12) | (ltdefs->ivlan.lid << 8) |
5187 		    (ltdefs->ivlan.ltype_match << 4) |
5188 		    ltdefs->ivlan.ltype_mask);
5189 
5190 	/* DSCP field in outer and tunneled IPv4 packets */
5191 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
5192 		    (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
5193 		    (ltdefs->rx_oip4.ltype_match << 4) |
5194 		    ltdefs->rx_oip4.ltype_mask);
5195 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
5196 		    (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
5197 		    (ltdefs->rx_iip4.ltype_match << 4) |
5198 		    ltdefs->rx_iip4.ltype_mask);
5199 
5200 	/* DSCP field (traffic class) in outer and tunneled IPv6 packets */
5201 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
5202 		    (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
5203 		    (ltdefs->rx_oip6.ltype_match << 4) |
5204 		    ltdefs->rx_oip6.ltype_mask);
5205 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
5206 		    (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
5207 		    (ltdefs->rx_iip6.ltype_match << 4) |
5208 		    ltdefs->rx_iip6.ltype_mask);
5209 }
5210 
5211 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
5212 				    int layer, int prof_idx)
5213 {
5214 	struct nix_cn10k_aq_enq_req aq_req;
5215 	int rc;
5216 
5217 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5218 
5219 	aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
5220 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5221 	aq_req.op = NIX_AQ_INSTOP_INIT;
5222 
5223 	/* Context is all zeros, submit to AQ */
5224 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5225 				     (struct nix_aq_enq_req *)&aq_req, NULL);
5226 	if (rc)
5227 		dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
5228 			layer, prof_idx);
5229 	return rc;
5230 }
5231 
5232 static int nix_setup_ipolicers(struct rvu *rvu,
5233 			       struct nix_hw *nix_hw, int blkaddr)
5234 {
5235 	struct rvu_hwinfo *hw = rvu->hw;
5236 	struct nix_ipolicer *ipolicer;
5237 	int err, layer, prof_idx;
5238 	u64 cfg;
5239 
5240 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
5241 	if (!(cfg & BIT_ULL(61))) {
5242 		hw->cap.ipolicer = false;
5243 		return 0;
5244 	}
5245 
5246 	hw->cap.ipolicer = true;
5247 	nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
5248 					sizeof(*ipolicer), GFP_KERNEL);
5249 	if (!nix_hw->ipolicer)
5250 		return -ENOMEM;
5251 
5252 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
5253 
5254 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5255 		ipolicer = &nix_hw->ipolicer[layer];
5256 		switch (layer) {
5257 		case BAND_PROF_LEAF_LAYER:
5258 			ipolicer->band_prof.max = cfg & 0XFFFF;
5259 			break;
5260 		case BAND_PROF_MID_LAYER:
5261 			ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
5262 			break;
5263 		case BAND_PROF_TOP_LAYER:
5264 			ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
5265 			break;
5266 		}
5267 
5268 		if (!ipolicer->band_prof.max)
5269 			continue;
5270 
5271 		err = rvu_alloc_bitmap(&ipolicer->band_prof);
5272 		if (err)
5273 			return err;
5274 
5275 		ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
5276 						  ipolicer->band_prof.max,
5277 						  sizeof(u16), GFP_KERNEL);
5278 		if (!ipolicer->pfvf_map)
5279 			return -ENOMEM;
5280 
5281 		ipolicer->match_id = devm_kcalloc(rvu->dev,
5282 						  ipolicer->band_prof.max,
5283 						  sizeof(u16), GFP_KERNEL);
5284 		if (!ipolicer->match_id)
5285 			return -ENOMEM;
5286 
5287 		for (prof_idx = 0;
5288 		     prof_idx < ipolicer->band_prof.max; prof_idx++) {
5289 			/* Set AF as current owner for INIT ops to succeed */
5290 			ipolicer->pfvf_map[prof_idx] = 0x00;
5291 
5292 			/* There is no enable bit in the profile context,
5293 			 * so no context disable. So let's INIT them here
5294 			 * so that PF/VF later on have to just do WRITE to
5295 			 * setup policer rates and config.
5296 			 */
5297 			err = nix_init_policer_context(rvu, nix_hw,
5298 						       layer, prof_idx);
5299 			if (err)
5300 				return err;
5301 		}
5302 
5303 		/* Allocate memory for maintaining ref_counts for MID level
5304 		 * profiles, this will be needed for leaf layer profiles'
5305 		 * aggregation.
5306 		 */
5307 		if (layer != BAND_PROF_MID_LAYER)
5308 			continue;
5309 
5310 		ipolicer->ref_count = devm_kcalloc(rvu->dev,
5311 						   ipolicer->band_prof.max,
5312 						   sizeof(u16), GFP_KERNEL);
5313 		if (!ipolicer->ref_count)
5314 			return -ENOMEM;
5315 	}
5316 
5317 	/* Set policer timeunit to 2us ie  (19 + 1) * 100 nsec = 2us */
5318 	rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
5319 
5320 	nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
5321 
5322 	return 0;
5323 }
5324 
5325 static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
5326 {
5327 	struct nix_ipolicer *ipolicer;
5328 	int layer;
5329 
5330 	if (!rvu->hw->cap.ipolicer)
5331 		return;
5332 
5333 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5334 		ipolicer = &nix_hw->ipolicer[layer];
5335 
5336 		if (!ipolicer->band_prof.max)
5337 			continue;
5338 
5339 		kfree(ipolicer->band_prof.bmap);
5340 	}
5341 }
5342 
5343 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
5344 			       struct nix_hw *nix_hw, u16 pcifunc)
5345 {
5346 	struct nix_ipolicer *ipolicer;
5347 	int layer, hi_layer, prof_idx;
5348 
5349 	/* Bits [15:14] in profile index represent layer */
5350 	layer = (req->qidx >> 14) & 0x03;
5351 	prof_idx = req->qidx & 0x3FFF;
5352 
5353 	ipolicer = &nix_hw->ipolicer[layer];
5354 	if (prof_idx >= ipolicer->band_prof.max)
5355 		return -EINVAL;
5356 
5357 	/* Check if the profile is allocated to the requesting PCIFUNC or not
5358 	 * with the exception of AF. AF is allowed to read and update contexts.
5359 	 */
5360 	if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
5361 		return -EINVAL;
5362 
5363 	/* If this profile is linked to higher layer profile then check
5364 	 * if that profile is also allocated to the requesting PCIFUNC
5365 	 * or not.
5366 	 */
5367 	if (!req->prof.hl_en)
5368 		return 0;
5369 
5370 	/* Leaf layer profile can link only to mid layer and
5371 	 * mid layer to top layer.
5372 	 */
5373 	if (layer == BAND_PROF_LEAF_LAYER)
5374 		hi_layer = BAND_PROF_MID_LAYER;
5375 	else if (layer == BAND_PROF_MID_LAYER)
5376 		hi_layer = BAND_PROF_TOP_LAYER;
5377 	else
5378 		return -EINVAL;
5379 
5380 	ipolicer = &nix_hw->ipolicer[hi_layer];
5381 	prof_idx = req->prof.band_prof_id;
5382 	if (prof_idx >= ipolicer->band_prof.max ||
5383 	    ipolicer->pfvf_map[prof_idx] != pcifunc)
5384 		return -EINVAL;
5385 
5386 	return 0;
5387 }
5388 
5389 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
5390 					struct nix_bandprof_alloc_req *req,
5391 					struct nix_bandprof_alloc_rsp *rsp)
5392 {
5393 	int blkaddr, layer, prof, idx, err;
5394 	u16 pcifunc = req->hdr.pcifunc;
5395 	struct nix_ipolicer *ipolicer;
5396 	struct nix_hw *nix_hw;
5397 
5398 	if (!rvu->hw->cap.ipolicer)
5399 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5400 
5401 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5402 	if (err)
5403 		return err;
5404 
5405 	mutex_lock(&rvu->rsrc_lock);
5406 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5407 		if (layer == BAND_PROF_INVAL_LAYER)
5408 			continue;
5409 		if (!req->prof_count[layer])
5410 			continue;
5411 
5412 		ipolicer = &nix_hw->ipolicer[layer];
5413 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
5414 			/* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
5415 			if (idx == MAX_BANDPROF_PER_PFFUNC)
5416 				break;
5417 
5418 			prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5419 			if (prof < 0)
5420 				break;
5421 			rsp->prof_count[layer]++;
5422 			rsp->prof_idx[layer][idx] = prof;
5423 			ipolicer->pfvf_map[prof] = pcifunc;
5424 		}
5425 	}
5426 	mutex_unlock(&rvu->rsrc_lock);
5427 	return 0;
5428 }
5429 
5430 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
5431 {
5432 	int blkaddr, layer, prof_idx, err;
5433 	struct nix_ipolicer *ipolicer;
5434 	struct nix_hw *nix_hw;
5435 
5436 	if (!rvu->hw->cap.ipolicer)
5437 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5438 
5439 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5440 	if (err)
5441 		return err;
5442 
5443 	mutex_lock(&rvu->rsrc_lock);
5444 	/* Free all the profiles allocated to the PCIFUNC */
5445 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5446 		if (layer == BAND_PROF_INVAL_LAYER)
5447 			continue;
5448 		ipolicer = &nix_hw->ipolicer[layer];
5449 
5450 		for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
5451 			if (ipolicer->pfvf_map[prof_idx] != pcifunc)
5452 				continue;
5453 
5454 			/* Clear ratelimit aggregation, if any */
5455 			if (layer == BAND_PROF_LEAF_LAYER &&
5456 			    ipolicer->match_id[prof_idx])
5457 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5458 
5459 			ipolicer->pfvf_map[prof_idx] = 0x00;
5460 			ipolicer->match_id[prof_idx] = 0;
5461 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5462 		}
5463 	}
5464 	mutex_unlock(&rvu->rsrc_lock);
5465 	return 0;
5466 }
5467 
5468 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
5469 				       struct nix_bandprof_free_req *req,
5470 				       struct msg_rsp *rsp)
5471 {
5472 	int blkaddr, layer, prof_idx, idx, err;
5473 	u16 pcifunc = req->hdr.pcifunc;
5474 	struct nix_ipolicer *ipolicer;
5475 	struct nix_hw *nix_hw;
5476 
5477 	if (req->free_all)
5478 		return nix_free_all_bandprof(rvu, pcifunc);
5479 
5480 	if (!rvu->hw->cap.ipolicer)
5481 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5482 
5483 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5484 	if (err)
5485 		return err;
5486 
5487 	mutex_lock(&rvu->rsrc_lock);
5488 	/* Free the requested profile indices */
5489 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5490 		if (layer == BAND_PROF_INVAL_LAYER)
5491 			continue;
5492 		if (!req->prof_count[layer])
5493 			continue;
5494 
5495 		ipolicer = &nix_hw->ipolicer[layer];
5496 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
5497 			prof_idx = req->prof_idx[layer][idx];
5498 			if (prof_idx >= ipolicer->band_prof.max ||
5499 			    ipolicer->pfvf_map[prof_idx] != pcifunc)
5500 				continue;
5501 
5502 			/* Clear ratelimit aggregation, if any */
5503 			if (layer == BAND_PROF_LEAF_LAYER &&
5504 			    ipolicer->match_id[prof_idx])
5505 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
5506 
5507 			ipolicer->pfvf_map[prof_idx] = 0x00;
5508 			ipolicer->match_id[prof_idx] = 0;
5509 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
5510 			if (idx == MAX_BANDPROF_PER_PFFUNC)
5511 				break;
5512 		}
5513 	}
5514 	mutex_unlock(&rvu->rsrc_lock);
5515 	return 0;
5516 }
5517 
5518 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
5519 			struct nix_cn10k_aq_enq_req *aq_req,
5520 			struct nix_cn10k_aq_enq_rsp *aq_rsp,
5521 			u16 pcifunc, u8 ctype, u32 qidx)
5522 {
5523 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5524 	aq_req->hdr.pcifunc = pcifunc;
5525 	aq_req->ctype = ctype;
5526 	aq_req->op = NIX_AQ_INSTOP_READ;
5527 	aq_req->qidx = qidx;
5528 
5529 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5530 				       (struct nix_aq_enq_req *)aq_req,
5531 				       (struct nix_aq_enq_rsp *)aq_rsp);
5532 }
5533 
5534 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
5535 					  struct nix_hw *nix_hw,
5536 					  struct nix_cn10k_aq_enq_req *aq_req,
5537 					  struct nix_cn10k_aq_enq_rsp *aq_rsp,
5538 					  u32 leaf_prof, u16 mid_prof)
5539 {
5540 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5541 	aq_req->hdr.pcifunc = 0x00;
5542 	aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
5543 	aq_req->op = NIX_AQ_INSTOP_WRITE;
5544 	aq_req->qidx = leaf_prof;
5545 
5546 	aq_req->prof.band_prof_id = mid_prof;
5547 	aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
5548 	aq_req->prof.hl_en = 1;
5549 	aq_req->prof_mask.hl_en = 1;
5550 
5551 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5552 				       (struct nix_aq_enq_req *)aq_req,
5553 				       (struct nix_aq_enq_rsp *)aq_rsp);
5554 }
5555 
5556 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
5557 				 u16 rq_idx, u16 match_id)
5558 {
5559 	int leaf_prof, mid_prof, leaf_match;
5560 	struct nix_cn10k_aq_enq_req aq_req;
5561 	struct nix_cn10k_aq_enq_rsp aq_rsp;
5562 	struct nix_ipolicer *ipolicer;
5563 	struct nix_hw *nix_hw;
5564 	int blkaddr, idx, rc;
5565 
5566 	if (!rvu->hw->cap.ipolicer)
5567 		return 0;
5568 
5569 	rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
5570 	if (rc)
5571 		return rc;
5572 
5573 	/* Fetch the RQ's context to see if policing is enabled */
5574 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
5575 				 NIX_AQ_CTYPE_RQ, rq_idx);
5576 	if (rc) {
5577 		dev_err(rvu->dev,
5578 			"%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
5579 			__func__, rq_idx, pcifunc);
5580 		return rc;
5581 	}
5582 
5583 	if (!aq_rsp.rq.policer_ena)
5584 		return 0;
5585 
5586 	/* Get the bandwidth profile ID mapped to this RQ */
5587 	leaf_prof = aq_rsp.rq.band_prof_id;
5588 
5589 	ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
5590 	ipolicer->match_id[leaf_prof] = match_id;
5591 
5592 	/* Check if any other leaf profile is marked with same match_id */
5593 	for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
5594 		if (idx == leaf_prof)
5595 			continue;
5596 		if (ipolicer->match_id[idx] != match_id)
5597 			continue;
5598 
5599 		leaf_match = idx;
5600 		break;
5601 	}
5602 
5603 	if (idx == ipolicer->band_prof.max)
5604 		return 0;
5605 
5606 	/* Fetch the matching profile's context to check if it's already
5607 	 * mapped to a mid level profile.
5608 	 */
5609 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5610 				 NIX_AQ_CTYPE_BANDPROF, leaf_match);
5611 	if (rc) {
5612 		dev_err(rvu->dev,
5613 			"%s: Failed to fetch context of leaf profile %d\n",
5614 			__func__, leaf_match);
5615 		return rc;
5616 	}
5617 
5618 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5619 	if (aq_rsp.prof.hl_en) {
5620 		/* Get Mid layer prof index and map leaf_prof index
5621 		 * also such that flows that are being steered
5622 		 * to different RQs and marked with same match_id
5623 		 * are rate limited in a aggregate fashion
5624 		 */
5625 		mid_prof = aq_rsp.prof.band_prof_id;
5626 		rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5627 						    &aq_req, &aq_rsp,
5628 						    leaf_prof, mid_prof);
5629 		if (rc) {
5630 			dev_err(rvu->dev,
5631 				"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5632 				__func__, leaf_prof, mid_prof);
5633 			goto exit;
5634 		}
5635 
5636 		mutex_lock(&rvu->rsrc_lock);
5637 		ipolicer->ref_count[mid_prof]++;
5638 		mutex_unlock(&rvu->rsrc_lock);
5639 		goto exit;
5640 	}
5641 
5642 	/* Allocate a mid layer profile and
5643 	 * map both 'leaf_prof' and 'leaf_match' profiles to it.
5644 	 */
5645 	mutex_lock(&rvu->rsrc_lock);
5646 	mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
5647 	if (mid_prof < 0) {
5648 		dev_err(rvu->dev,
5649 			"%s: Unable to allocate mid layer profile\n", __func__);
5650 		mutex_unlock(&rvu->rsrc_lock);
5651 		goto exit;
5652 	}
5653 	mutex_unlock(&rvu->rsrc_lock);
5654 	ipolicer->pfvf_map[mid_prof] = 0x00;
5655 	ipolicer->ref_count[mid_prof] = 0;
5656 
5657 	/* Initialize mid layer profile same as 'leaf_prof' */
5658 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5659 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5660 	if (rc) {
5661 		dev_err(rvu->dev,
5662 			"%s: Failed to fetch context of leaf profile %d\n",
5663 			__func__, leaf_prof);
5664 		goto exit;
5665 	}
5666 
5667 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
5668 	aq_req.hdr.pcifunc = 0x00;
5669 	aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
5670 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
5671 	aq_req.op = NIX_AQ_INSTOP_WRITE;
5672 	memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
5673 	memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
5674 	/* Clear higher layer enable bit in the mid profile, just in case */
5675 	aq_req.prof.hl_en = 0;
5676 	aq_req.prof_mask.hl_en = 1;
5677 
5678 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
5679 				     (struct nix_aq_enq_req *)&aq_req, NULL);
5680 	if (rc) {
5681 		dev_err(rvu->dev,
5682 			"%s: Failed to INIT context of mid layer profile %d\n",
5683 			__func__, mid_prof);
5684 		goto exit;
5685 	}
5686 
5687 	/* Map both leaf profiles to this mid layer profile */
5688 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5689 					    &aq_req, &aq_rsp,
5690 					    leaf_prof, mid_prof);
5691 	if (rc) {
5692 		dev_err(rvu->dev,
5693 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5694 			__func__, leaf_prof, mid_prof);
5695 		goto exit;
5696 	}
5697 
5698 	mutex_lock(&rvu->rsrc_lock);
5699 	ipolicer->ref_count[mid_prof]++;
5700 	mutex_unlock(&rvu->rsrc_lock);
5701 
5702 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
5703 					    &aq_req, &aq_rsp,
5704 					    leaf_match, mid_prof);
5705 	if (rc) {
5706 		dev_err(rvu->dev,
5707 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
5708 			__func__, leaf_match, mid_prof);
5709 		ipolicer->ref_count[mid_prof]--;
5710 		goto exit;
5711 	}
5712 
5713 	mutex_lock(&rvu->rsrc_lock);
5714 	ipolicer->ref_count[mid_prof]++;
5715 	mutex_unlock(&rvu->rsrc_lock);
5716 
5717 exit:
5718 	return rc;
5719 }
5720 
5721 /* Called with mutex rsrc_lock */
5722 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
5723 				     u32 leaf_prof)
5724 {
5725 	struct nix_cn10k_aq_enq_req aq_req;
5726 	struct nix_cn10k_aq_enq_rsp aq_rsp;
5727 	struct nix_ipolicer *ipolicer;
5728 	u16 mid_prof;
5729 	int rc;
5730 
5731 	mutex_unlock(&rvu->rsrc_lock);
5732 
5733 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
5734 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
5735 
5736 	mutex_lock(&rvu->rsrc_lock);
5737 	if (rc) {
5738 		dev_err(rvu->dev,
5739 			"%s: Failed to fetch context of leaf profile %d\n",
5740 			__func__, leaf_prof);
5741 		return;
5742 	}
5743 
5744 	if (!aq_rsp.prof.hl_en)
5745 		return;
5746 
5747 	mid_prof = aq_rsp.prof.band_prof_id;
5748 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
5749 	ipolicer->ref_count[mid_prof]--;
5750 	/* If ref_count is zero, free mid layer profile */
5751 	if (!ipolicer->ref_count[mid_prof]) {
5752 		ipolicer->pfvf_map[mid_prof] = 0x00;
5753 		rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
5754 	}
5755 }
5756 
5757 int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req,
5758 					     struct nix_bandprof_get_hwinfo_rsp *rsp)
5759 {
5760 	struct nix_ipolicer *ipolicer;
5761 	int blkaddr, layer, err;
5762 	struct nix_hw *nix_hw;
5763 	u64 tu;
5764 
5765 	if (!rvu->hw->cap.ipolicer)
5766 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
5767 
5768 	err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
5769 	if (err)
5770 		return err;
5771 
5772 	/* Return number of bandwidth profiles free at each layer */
5773 	mutex_lock(&rvu->rsrc_lock);
5774 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
5775 		if (layer == BAND_PROF_INVAL_LAYER)
5776 			continue;
5777 
5778 		ipolicer = &nix_hw->ipolicer[layer];
5779 		rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof);
5780 	}
5781 	mutex_unlock(&rvu->rsrc_lock);
5782 
5783 	/* Set the policer timeunit in nanosec */
5784 	tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0);
5785 	rsp->policer_timeunit = (tu + 1) * 100;
5786 
5787 	return 0;
5788 }
5789