1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "npc.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 
21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
23 			    int type, int chan_id);
24 
25 enum mc_tbl_sz {
26 	MC_TBL_SZ_256,
27 	MC_TBL_SZ_512,
28 	MC_TBL_SZ_1K,
29 	MC_TBL_SZ_2K,
30 	MC_TBL_SZ_4K,
31 	MC_TBL_SZ_8K,
32 	MC_TBL_SZ_16K,
33 	MC_TBL_SZ_32K,
34 	MC_TBL_SZ_64K,
35 };
36 
37 enum mc_buf_cnt {
38 	MC_BUF_CNT_8,
39 	MC_BUF_CNT_16,
40 	MC_BUF_CNT_32,
41 	MC_BUF_CNT_64,
42 	MC_BUF_CNT_128,
43 	MC_BUF_CNT_256,
44 	MC_BUF_CNT_512,
45 	MC_BUF_CNT_1024,
46 	MC_BUF_CNT_2048,
47 };
48 
49 enum nix_makr_fmt_indexes {
50 	NIX_MARK_CFG_IP_DSCP_RED,
51 	NIX_MARK_CFG_IP_DSCP_YELLOW,
52 	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
53 	NIX_MARK_CFG_IP_ECN_RED,
54 	NIX_MARK_CFG_IP_ECN_YELLOW,
55 	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
56 	NIX_MARK_CFG_VLAN_DEI_RED,
57 	NIX_MARK_CFG_VLAN_DEI_YELLOW,
58 	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
59 	NIX_MARK_CFG_MAX,
60 };
61 
62 /* For now considering MC resources needed for broadcast
63  * pkt replication only. i.e 256 HWVFs + 12 PFs.
64  */
65 #define MC_TBL_SIZE	MC_TBL_SZ_512
66 #define MC_BUF_CNT	MC_BUF_CNT_128
67 
68 struct mce {
69 	struct hlist_node	node;
70 	u16			pcifunc;
71 };
72 
73 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
74 {
75 	int i = 0;
76 
77 	/*If blkaddr is 0, return the first nix block address*/
78 	if (blkaddr == 0)
79 		return rvu->nix_blkaddr[blkaddr];
80 
81 	while (i + 1 < MAX_NIX_BLKS) {
82 		if (rvu->nix_blkaddr[i] == blkaddr)
83 			return rvu->nix_blkaddr[i + 1];
84 		i++;
85 	}
86 
87 	return 0;
88 }
89 
90 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
91 {
92 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
93 	int blkaddr;
94 
95 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
96 	if (!pfvf->nixlf || blkaddr < 0)
97 		return false;
98 	return true;
99 }
100 
101 int rvu_get_nixlf_count(struct rvu *rvu)
102 {
103 	int blkaddr = 0, max = 0;
104 	struct rvu_block *block;
105 
106 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
107 	while (blkaddr) {
108 		block = &rvu->hw->block[blkaddr];
109 		max += block->lf.max;
110 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
111 	}
112 	return max;
113 }
114 
115 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
116 {
117 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
118 	struct rvu_hwinfo *hw = rvu->hw;
119 	int blkaddr;
120 
121 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
122 	if (!pfvf->nixlf || blkaddr < 0)
123 		return NIX_AF_ERR_AF_LF_INVALID;
124 
125 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
126 	if (*nixlf < 0)
127 		return NIX_AF_ERR_AF_LF_INVALID;
128 
129 	if (nix_blkaddr)
130 		*nix_blkaddr = blkaddr;
131 
132 	return 0;
133 }
134 
135 static void nix_mce_list_init(struct nix_mce_list *list, int max)
136 {
137 	INIT_HLIST_HEAD(&list->head);
138 	list->count = 0;
139 	list->max = max;
140 }
141 
142 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
143 {
144 	int idx;
145 
146 	if (!mcast)
147 		return 0;
148 
149 	idx = mcast->next_free_mce;
150 	mcast->next_free_mce += count;
151 	return idx;
152 }
153 
154 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
155 {
156 	int nix_blkaddr = 0, i = 0;
157 	struct rvu *rvu = hw->rvu;
158 
159 	nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
160 	while (nix_blkaddr) {
161 		if (blkaddr == nix_blkaddr && hw->nix)
162 			return &hw->nix[i];
163 		nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
164 		i++;
165 	}
166 	return NULL;
167 }
168 
169 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
170 {
171 	int err;
172 
173 	/*Sync all in flight RX packets to LLC/DRAM */
174 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
175 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
176 	if (err)
177 		dev_err(rvu->dev, "NIX RX software sync failed\n");
178 }
179 
180 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
181 			    int lvl, u16 pcifunc, u16 schq)
182 {
183 	struct rvu_hwinfo *hw = rvu->hw;
184 	struct nix_txsch *txsch;
185 	struct nix_hw *nix_hw;
186 	u16 map_func;
187 
188 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
189 	if (!nix_hw)
190 		return false;
191 
192 	txsch = &nix_hw->txsch[lvl];
193 	/* Check out of bounds */
194 	if (schq >= txsch->schq.max)
195 		return false;
196 
197 	mutex_lock(&rvu->rsrc_lock);
198 	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
199 	mutex_unlock(&rvu->rsrc_lock);
200 
201 	/* TLs aggegating traffic are shared across PF and VFs */
202 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
203 		if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
204 			return false;
205 		else
206 			return true;
207 	}
208 
209 	if (map_func != pcifunc)
210 		return false;
211 
212 	return true;
213 }
214 
215 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
216 {
217 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
218 	struct mac_ops *mac_ops;
219 	int pkind, pf, vf, lbkid;
220 	u8 cgx_id, lmac_id;
221 	int err;
222 
223 	pf = rvu_get_pf(pcifunc);
224 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
225 		return 0;
226 
227 	switch (type) {
228 	case NIX_INTF_TYPE_CGX:
229 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
230 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
231 
232 		pkind = rvu_npc_get_pkind(rvu, pf);
233 		if (pkind < 0) {
234 			dev_err(rvu->dev,
235 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
236 			return -EINVAL;
237 		}
238 		pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
239 		pfvf->tx_chan_base = pfvf->rx_chan_base;
240 		pfvf->rx_chan_cnt = 1;
241 		pfvf->tx_chan_cnt = 1;
242 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
243 		rvu_npc_set_pkind(rvu, pkind, pfvf);
244 
245 		mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
246 		/* By default we enable pause frames */
247 		if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
248 			mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
249 								    rvu),
250 						      lmac_id, true, true);
251 		break;
252 	case NIX_INTF_TYPE_LBK:
253 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
254 
255 		/* If NIX1 block is present on the silicon then NIXes are
256 		 * assigned alternatively for lbk interfaces. NIX0 should
257 		 * send packets on lbk link 1 channels and NIX1 should send
258 		 * on lbk link 0 channels for the communication between
259 		 * NIX0 and NIX1.
260 		 */
261 		lbkid = 0;
262 		if (rvu->hw->lbk_links > 1)
263 			lbkid = vf & 0x1 ? 0 : 1;
264 
265 		/* Note that AF's VFs work in pairs and talk over consecutive
266 		 * loopback channels.Therefore if odd number of AF VFs are
267 		 * enabled then the last VF remains with no pair.
268 		 */
269 		pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
270 		pfvf->tx_chan_base = vf & 0x1 ?
271 					rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
272 					rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
273 		pfvf->rx_chan_cnt = 1;
274 		pfvf->tx_chan_cnt = 1;
275 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
276 					      pfvf->rx_chan_base, false);
277 		break;
278 	}
279 
280 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
281 	 * RVU PF/VF's MAC address.
282 	 */
283 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
284 				    pfvf->rx_chan_base, pfvf->mac_addr);
285 
286 	/* Add this PF_FUNC to bcast pkt replication list */
287 	err = nix_update_bcast_mce_list(rvu, pcifunc, true);
288 	if (err) {
289 		dev_err(rvu->dev,
290 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
291 			pcifunc);
292 		return err;
293 	}
294 
295 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
296 					  nixlf, pfvf->rx_chan_base);
297 	pfvf->maxlen = NIC_HW_MIN_FRS;
298 	pfvf->minlen = NIC_HW_MIN_FRS;
299 
300 	return 0;
301 }
302 
303 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
304 {
305 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
306 	int err;
307 
308 	pfvf->maxlen = 0;
309 	pfvf->minlen = 0;
310 
311 	/* Remove this PF_FUNC from bcast pkt replication list */
312 	err = nix_update_bcast_mce_list(rvu, pcifunc, false);
313 	if (err) {
314 		dev_err(rvu->dev,
315 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
316 			pcifunc);
317 	}
318 
319 	/* Free and disable any MCAM entries used by this NIX LF */
320 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
321 }
322 
323 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
324 				    struct nix_bp_cfg_req *req,
325 				    struct msg_rsp *rsp)
326 {
327 	u16 pcifunc = req->hdr.pcifunc;
328 	struct rvu_pfvf *pfvf;
329 	int blkaddr, pf, type;
330 	u16 chan_base, chan;
331 	u64 cfg;
332 
333 	pf = rvu_get_pf(pcifunc);
334 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
335 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
336 		return 0;
337 
338 	pfvf = rvu_get_pfvf(rvu, pcifunc);
339 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
340 
341 	chan_base = pfvf->rx_chan_base + req->chan_base;
342 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
343 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
344 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
345 			    cfg & ~BIT_ULL(16));
346 	}
347 	return 0;
348 }
349 
350 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
351 			    int type, int chan_id)
352 {
353 	int bpid, blkaddr, lmac_chan_cnt;
354 	struct rvu_hwinfo *hw = rvu->hw;
355 	u16 cgx_bpid_cnt, lbk_bpid_cnt;
356 	struct rvu_pfvf *pfvf;
357 	u8 cgx_id, lmac_id;
358 	u64 cfg;
359 
360 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
361 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
362 	lmac_chan_cnt = cfg & 0xFF;
363 
364 	cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
365 	lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
366 
367 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
368 
369 	/* Backpressure IDs range division
370 	 * CGX channles are mapped to (0 - 191) BPIDs
371 	 * LBK channles are mapped to (192 - 255) BPIDs
372 	 * SDP channles are mapped to (256 - 511) BPIDs
373 	 *
374 	 * Lmac channles and bpids mapped as follows
375 	 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
376 	 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
377 	 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
378 	 */
379 	switch (type) {
380 	case NIX_INTF_TYPE_CGX:
381 		if ((req->chan_base + req->chan_cnt) > 15)
382 			return -EINVAL;
383 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
384 		/* Assign bpid based on cgx, lmac and chan id */
385 		bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
386 			(lmac_id * lmac_chan_cnt) + req->chan_base;
387 
388 		if (req->bpid_per_chan)
389 			bpid += chan_id;
390 		if (bpid > cgx_bpid_cnt)
391 			return -EINVAL;
392 		break;
393 
394 	case NIX_INTF_TYPE_LBK:
395 		if ((req->chan_base + req->chan_cnt) > 63)
396 			return -EINVAL;
397 		bpid = cgx_bpid_cnt + req->chan_base;
398 		if (req->bpid_per_chan)
399 			bpid += chan_id;
400 		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
401 			return -EINVAL;
402 		break;
403 	default:
404 		return -EINVAL;
405 	}
406 	return bpid;
407 }
408 
409 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
410 				   struct nix_bp_cfg_req *req,
411 				   struct nix_bp_cfg_rsp *rsp)
412 {
413 	int blkaddr, pf, type, chan_id = 0;
414 	u16 pcifunc = req->hdr.pcifunc;
415 	struct rvu_pfvf *pfvf;
416 	u16 chan_base, chan;
417 	s16 bpid, bpid_base;
418 	u64 cfg;
419 
420 	pf = rvu_get_pf(pcifunc);
421 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
422 
423 	/* Enable backpressure only for CGX mapped PFs and LBK interface */
424 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
425 		return 0;
426 
427 	pfvf = rvu_get_pfvf(rvu, pcifunc);
428 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
429 
430 	bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
431 	chan_base = pfvf->rx_chan_base + req->chan_base;
432 	bpid = bpid_base;
433 
434 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
435 		if (bpid < 0) {
436 			dev_warn(rvu->dev, "Fail to enable backpressure\n");
437 			return -EINVAL;
438 		}
439 
440 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
441 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
442 			    cfg | (bpid & 0xFF) | BIT_ULL(16));
443 		chan_id++;
444 		bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
445 	}
446 
447 	for (chan = 0; chan < req->chan_cnt; chan++) {
448 		/* Map channel and bpid assign to it */
449 		rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
450 					(bpid_base & 0x3FF);
451 		if (req->bpid_per_chan)
452 			bpid_base++;
453 	}
454 	rsp->chan_cnt = req->chan_cnt;
455 
456 	return 0;
457 }
458 
459 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
460 				 u64 format, bool v4, u64 *fidx)
461 {
462 	struct nix_lso_format field = {0};
463 
464 	/* IP's Length field */
465 	field.layer = NIX_TXLAYER_OL3;
466 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
467 	field.offset = v4 ? 2 : 4;
468 	field.sizem1 = 1; /* i.e 2 bytes */
469 	field.alg = NIX_LSOALG_ADD_PAYLEN;
470 	rvu_write64(rvu, blkaddr,
471 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
472 		    *(u64 *)&field);
473 
474 	/* No ID field in IPv6 header */
475 	if (!v4)
476 		return;
477 
478 	/* IP's ID field */
479 	field.layer = NIX_TXLAYER_OL3;
480 	field.offset = 4;
481 	field.sizem1 = 1; /* i.e 2 bytes */
482 	field.alg = NIX_LSOALG_ADD_SEGNUM;
483 	rvu_write64(rvu, blkaddr,
484 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
485 		    *(u64 *)&field);
486 }
487 
488 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
489 				 u64 format, u64 *fidx)
490 {
491 	struct nix_lso_format field = {0};
492 
493 	/* TCP's sequence number field */
494 	field.layer = NIX_TXLAYER_OL4;
495 	field.offset = 4;
496 	field.sizem1 = 3; /* i.e 4 bytes */
497 	field.alg = NIX_LSOALG_ADD_OFFSET;
498 	rvu_write64(rvu, blkaddr,
499 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
500 		    *(u64 *)&field);
501 
502 	/* TCP's flags field */
503 	field.layer = NIX_TXLAYER_OL4;
504 	field.offset = 12;
505 	field.sizem1 = 1; /* 2 bytes */
506 	field.alg = NIX_LSOALG_TCP_FLAGS;
507 	rvu_write64(rvu, blkaddr,
508 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
509 		    *(u64 *)&field);
510 }
511 
512 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
513 {
514 	u64 cfg, idx, fidx = 0;
515 
516 	/* Get max HW supported format indices */
517 	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
518 	nix_hw->lso.total = cfg;
519 
520 	/* Enable LSO */
521 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
522 	/* For TSO, set first and middle segment flags to
523 	 * mask out PSH, RST & FIN flags in TCP packet
524 	 */
525 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
526 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
527 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
528 
529 	/* Setup default static LSO formats
530 	 *
531 	 * Configure format fields for TCPv4 segmentation offload
532 	 */
533 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
534 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
535 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
536 
537 	/* Set rest of the fields to NOP */
538 	for (; fidx < 8; fidx++) {
539 		rvu_write64(rvu, blkaddr,
540 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
541 	}
542 	nix_hw->lso.in_use++;
543 
544 	/* Configure format fields for TCPv6 segmentation offload */
545 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
546 	fidx = 0;
547 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
548 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
549 
550 	/* Set rest of the fields to NOP */
551 	for (; fidx < 8; fidx++) {
552 		rvu_write64(rvu, blkaddr,
553 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
554 	}
555 	nix_hw->lso.in_use++;
556 }
557 
558 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
559 {
560 	kfree(pfvf->rq_bmap);
561 	kfree(pfvf->sq_bmap);
562 	kfree(pfvf->cq_bmap);
563 	if (pfvf->rq_ctx)
564 		qmem_free(rvu->dev, pfvf->rq_ctx);
565 	if (pfvf->sq_ctx)
566 		qmem_free(rvu->dev, pfvf->sq_ctx);
567 	if (pfvf->cq_ctx)
568 		qmem_free(rvu->dev, pfvf->cq_ctx);
569 	if (pfvf->rss_ctx)
570 		qmem_free(rvu->dev, pfvf->rss_ctx);
571 	if (pfvf->nix_qints_ctx)
572 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
573 	if (pfvf->cq_ints_ctx)
574 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
575 
576 	pfvf->rq_bmap = NULL;
577 	pfvf->cq_bmap = NULL;
578 	pfvf->sq_bmap = NULL;
579 	pfvf->rq_ctx = NULL;
580 	pfvf->sq_ctx = NULL;
581 	pfvf->cq_ctx = NULL;
582 	pfvf->rss_ctx = NULL;
583 	pfvf->nix_qints_ctx = NULL;
584 	pfvf->cq_ints_ctx = NULL;
585 }
586 
587 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
588 			      struct rvu_pfvf *pfvf, int nixlf,
589 			      int rss_sz, int rss_grps, int hwctx_size,
590 			      u64 way_mask)
591 {
592 	int err, grp, num_indices;
593 
594 	/* RSS is not requested for this NIXLF */
595 	if (!rss_sz)
596 		return 0;
597 	num_indices = rss_sz * rss_grps;
598 
599 	/* Alloc NIX RSS HW context memory and config the base */
600 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
601 	if (err)
602 		return err;
603 
604 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
605 		    (u64)pfvf->rss_ctx->iova);
606 
607 	/* Config full RSS table size, enable RSS and caching */
608 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
609 		    BIT_ULL(36) | BIT_ULL(4) |
610 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
611 		    way_mask << 20);
612 	/* Config RSS group offset and sizes */
613 	for (grp = 0; grp < rss_grps; grp++)
614 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
615 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
616 	return 0;
617 }
618 
619 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
620 			       struct nix_aq_inst_s *inst)
621 {
622 	struct admin_queue *aq = block->aq;
623 	struct nix_aq_res_s *result;
624 	int timeout = 1000;
625 	u64 reg, head;
626 
627 	result = (struct nix_aq_res_s *)aq->res->base;
628 
629 	/* Get current head pointer where to append this instruction */
630 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
631 	head = (reg >> 4) & AQ_PTR_MASK;
632 
633 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
634 	       (void *)inst, aq->inst->entry_sz);
635 	memset(result, 0, sizeof(*result));
636 	/* sync into memory */
637 	wmb();
638 
639 	/* Ring the doorbell and wait for result */
640 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
641 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
642 		cpu_relax();
643 		udelay(1);
644 		timeout--;
645 		if (!timeout)
646 			return -EBUSY;
647 	}
648 
649 	if (result->compcode != NIX_AQ_COMP_GOOD)
650 		/* TODO: Replace this with some error code */
651 		return -EBUSY;
652 
653 	return 0;
654 }
655 
656 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
657 				   struct nix_aq_enq_req *req,
658 				   struct nix_aq_enq_rsp *rsp)
659 {
660 	struct rvu_hwinfo *hw = rvu->hw;
661 	u16 pcifunc = req->hdr.pcifunc;
662 	int nixlf, blkaddr, rc = 0;
663 	struct nix_aq_inst_s inst;
664 	struct rvu_block *block;
665 	struct admin_queue *aq;
666 	struct rvu_pfvf *pfvf;
667 	void *ctx, *mask;
668 	bool ena;
669 	u64 cfg;
670 
671 	blkaddr = nix_hw->blkaddr;
672 	block = &hw->block[blkaddr];
673 	aq = block->aq;
674 	if (!aq) {
675 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
676 		return NIX_AF_ERR_AQ_ENQUEUE;
677 	}
678 
679 	pfvf = rvu_get_pfvf(rvu, pcifunc);
680 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
681 
682 	/* Skip NIXLF check for broadcast MCE entry init */
683 	if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
684 		if (!pfvf->nixlf || nixlf < 0)
685 			return NIX_AF_ERR_AF_LF_INVALID;
686 	}
687 
688 	switch (req->ctype) {
689 	case NIX_AQ_CTYPE_RQ:
690 		/* Check if index exceeds max no of queues */
691 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
692 			rc = NIX_AF_ERR_AQ_ENQUEUE;
693 		break;
694 	case NIX_AQ_CTYPE_SQ:
695 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
696 			rc = NIX_AF_ERR_AQ_ENQUEUE;
697 		break;
698 	case NIX_AQ_CTYPE_CQ:
699 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
700 			rc = NIX_AF_ERR_AQ_ENQUEUE;
701 		break;
702 	case NIX_AQ_CTYPE_RSS:
703 		/* Check if RSS is enabled and qidx is within range */
704 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
705 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
706 		    (req->qidx >= (256UL << (cfg & 0xF))))
707 			rc = NIX_AF_ERR_AQ_ENQUEUE;
708 		break;
709 	case NIX_AQ_CTYPE_MCE:
710 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
711 
712 		/* Check if index exceeds MCE list length */
713 		if (!nix_hw->mcast.mce_ctx ||
714 		    (req->qidx >= (256UL << (cfg & 0xF))))
715 			rc = NIX_AF_ERR_AQ_ENQUEUE;
716 
717 		/* Adding multicast lists for requests from PF/VFs is not
718 		 * yet supported, so ignore this.
719 		 */
720 		if (rsp)
721 			rc = NIX_AF_ERR_AQ_ENQUEUE;
722 		break;
723 	default:
724 		rc = NIX_AF_ERR_AQ_ENQUEUE;
725 	}
726 
727 	if (rc)
728 		return rc;
729 
730 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
731 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
732 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
733 	     (req->op == NIX_AQ_INSTOP_WRITE &&
734 	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
735 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
736 				     pcifunc, req->sq.smq))
737 			return NIX_AF_ERR_AQ_ENQUEUE;
738 	}
739 
740 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
741 	inst.lf = nixlf;
742 	inst.cindex = req->qidx;
743 	inst.ctype = req->ctype;
744 	inst.op = req->op;
745 	/* Currently we are not supporting enqueuing multiple instructions,
746 	 * so always choose first entry in result memory.
747 	 */
748 	inst.res_addr = (u64)aq->res->iova;
749 
750 	/* Hardware uses same aq->res->base for updating result of
751 	 * previous instruction hence wait here till it is done.
752 	 */
753 	spin_lock(&aq->lock);
754 
755 	/* Clean result + context memory */
756 	memset(aq->res->base, 0, aq->res->entry_sz);
757 	/* Context needs to be written at RES_ADDR + 128 */
758 	ctx = aq->res->base + 128;
759 	/* Mask needs to be written at RES_ADDR + 256 */
760 	mask = aq->res->base + 256;
761 
762 	switch (req->op) {
763 	case NIX_AQ_INSTOP_WRITE:
764 		if (req->ctype == NIX_AQ_CTYPE_RQ)
765 			memcpy(mask, &req->rq_mask,
766 			       sizeof(struct nix_rq_ctx_s));
767 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
768 			memcpy(mask, &req->sq_mask,
769 			       sizeof(struct nix_sq_ctx_s));
770 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
771 			memcpy(mask, &req->cq_mask,
772 			       sizeof(struct nix_cq_ctx_s));
773 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
774 			memcpy(mask, &req->rss_mask,
775 			       sizeof(struct nix_rsse_s));
776 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
777 			memcpy(mask, &req->mce_mask,
778 			       sizeof(struct nix_rx_mce_s));
779 		fallthrough;
780 	case NIX_AQ_INSTOP_INIT:
781 		if (req->ctype == NIX_AQ_CTYPE_RQ)
782 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
783 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
784 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
785 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
786 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
787 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
788 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
789 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
790 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
791 		break;
792 	case NIX_AQ_INSTOP_NOP:
793 	case NIX_AQ_INSTOP_READ:
794 	case NIX_AQ_INSTOP_LOCK:
795 	case NIX_AQ_INSTOP_UNLOCK:
796 		break;
797 	default:
798 		rc = NIX_AF_ERR_AQ_ENQUEUE;
799 		spin_unlock(&aq->lock);
800 		return rc;
801 	}
802 
803 	/* Submit the instruction to AQ */
804 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
805 	if (rc) {
806 		spin_unlock(&aq->lock);
807 		return rc;
808 	}
809 
810 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
811 	if (req->op == NIX_AQ_INSTOP_INIT) {
812 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
813 			__set_bit(req->qidx, pfvf->rq_bmap);
814 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
815 			__set_bit(req->qidx, pfvf->sq_bmap);
816 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
817 			__set_bit(req->qidx, pfvf->cq_bmap);
818 	}
819 
820 	if (req->op == NIX_AQ_INSTOP_WRITE) {
821 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
822 			ena = (req->rq.ena & req->rq_mask.ena) |
823 				(test_bit(req->qidx, pfvf->rq_bmap) &
824 				~req->rq_mask.ena);
825 			if (ena)
826 				__set_bit(req->qidx, pfvf->rq_bmap);
827 			else
828 				__clear_bit(req->qidx, pfvf->rq_bmap);
829 		}
830 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
831 			ena = (req->rq.ena & req->sq_mask.ena) |
832 				(test_bit(req->qidx, pfvf->sq_bmap) &
833 				~req->sq_mask.ena);
834 			if (ena)
835 				__set_bit(req->qidx, pfvf->sq_bmap);
836 			else
837 				__clear_bit(req->qidx, pfvf->sq_bmap);
838 		}
839 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
840 			ena = (req->rq.ena & req->cq_mask.ena) |
841 				(test_bit(req->qidx, pfvf->cq_bmap) &
842 				~req->cq_mask.ena);
843 			if (ena)
844 				__set_bit(req->qidx, pfvf->cq_bmap);
845 			else
846 				__clear_bit(req->qidx, pfvf->cq_bmap);
847 		}
848 	}
849 
850 	if (rsp) {
851 		/* Copy read context into mailbox */
852 		if (req->op == NIX_AQ_INSTOP_READ) {
853 			if (req->ctype == NIX_AQ_CTYPE_RQ)
854 				memcpy(&rsp->rq, ctx,
855 				       sizeof(struct nix_rq_ctx_s));
856 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
857 				memcpy(&rsp->sq, ctx,
858 				       sizeof(struct nix_sq_ctx_s));
859 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
860 				memcpy(&rsp->cq, ctx,
861 				       sizeof(struct nix_cq_ctx_s));
862 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
863 				memcpy(&rsp->rss, ctx,
864 				       sizeof(struct nix_rsse_s));
865 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
866 				memcpy(&rsp->mce, ctx,
867 				       sizeof(struct nix_rx_mce_s));
868 		}
869 	}
870 
871 	spin_unlock(&aq->lock);
872 	return 0;
873 }
874 
875 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
876 			       struct nix_aq_enq_rsp *rsp)
877 {
878 	struct nix_hw *nix_hw;
879 	int blkaddr;
880 
881 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
882 	if (blkaddr < 0)
883 		return NIX_AF_ERR_AF_LF_INVALID;
884 
885 	nix_hw =  get_nix_hw(rvu->hw, blkaddr);
886 	if (!nix_hw)
887 		return -EINVAL;
888 
889 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
890 }
891 
892 static const char *nix_get_ctx_name(int ctype)
893 {
894 	switch (ctype) {
895 	case NIX_AQ_CTYPE_CQ:
896 		return "CQ";
897 	case NIX_AQ_CTYPE_SQ:
898 		return "SQ";
899 	case NIX_AQ_CTYPE_RQ:
900 		return "RQ";
901 	case NIX_AQ_CTYPE_RSS:
902 		return "RSS";
903 	}
904 	return "";
905 }
906 
907 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
908 {
909 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
910 	struct nix_aq_enq_req aq_req;
911 	unsigned long *bmap;
912 	int qidx, q_cnt = 0;
913 	int err = 0, rc;
914 
915 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
916 		return NIX_AF_ERR_AQ_ENQUEUE;
917 
918 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
919 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
920 
921 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
922 		aq_req.cq.ena = 0;
923 		aq_req.cq_mask.ena = 1;
924 		aq_req.cq.bp_ena = 0;
925 		aq_req.cq_mask.bp_ena = 1;
926 		q_cnt = pfvf->cq_ctx->qsize;
927 		bmap = pfvf->cq_bmap;
928 	}
929 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
930 		aq_req.sq.ena = 0;
931 		aq_req.sq_mask.ena = 1;
932 		q_cnt = pfvf->sq_ctx->qsize;
933 		bmap = pfvf->sq_bmap;
934 	}
935 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
936 		aq_req.rq.ena = 0;
937 		aq_req.rq_mask.ena = 1;
938 		q_cnt = pfvf->rq_ctx->qsize;
939 		bmap = pfvf->rq_bmap;
940 	}
941 
942 	aq_req.ctype = req->ctype;
943 	aq_req.op = NIX_AQ_INSTOP_WRITE;
944 
945 	for (qidx = 0; qidx < q_cnt; qidx++) {
946 		if (!test_bit(qidx, bmap))
947 			continue;
948 		aq_req.qidx = qidx;
949 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
950 		if (rc) {
951 			err = rc;
952 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
953 				nix_get_ctx_name(req->ctype), qidx);
954 		}
955 	}
956 
957 	return err;
958 }
959 
960 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
961 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
962 {
963 	struct nix_aq_enq_req lock_ctx_req;
964 	int err;
965 
966 	if (req->op != NIX_AQ_INSTOP_INIT)
967 		return 0;
968 
969 	if (req->ctype == NIX_AQ_CTYPE_MCE ||
970 	    req->ctype == NIX_AQ_CTYPE_DYNO)
971 		return 0;
972 
973 	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
974 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
975 	lock_ctx_req.ctype = req->ctype;
976 	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
977 	lock_ctx_req.qidx = req->qidx;
978 	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
979 	if (err)
980 		dev_err(rvu->dev,
981 			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
982 			req->hdr.pcifunc,
983 			nix_get_ctx_name(req->ctype), req->qidx);
984 	return err;
985 }
986 
987 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
988 				struct nix_aq_enq_req *req,
989 				struct nix_aq_enq_rsp *rsp)
990 {
991 	int err;
992 
993 	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
994 	if (!err)
995 		err = nix_lf_hwctx_lockdown(rvu, req);
996 	return err;
997 }
998 #else
999 
1000 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1001 				struct nix_aq_enq_req *req,
1002 				struct nix_aq_enq_rsp *rsp)
1003 {
1004 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
1005 }
1006 #endif
1007 /* CN10K mbox handler */
1008 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1009 				      struct nix_cn10k_aq_enq_req *req,
1010 				      struct nix_cn10k_aq_enq_rsp *rsp)
1011 {
1012 	return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1013 				  (struct nix_aq_enq_rsp *)rsp);
1014 }
1015 
1016 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1017 				       struct hwctx_disable_req *req,
1018 				       struct msg_rsp *rsp)
1019 {
1020 	return nix_lf_hwctx_disable(rvu, req);
1021 }
1022 
1023 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1024 				  struct nix_lf_alloc_req *req,
1025 				  struct nix_lf_alloc_rsp *rsp)
1026 {
1027 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
1028 	struct rvu_hwinfo *hw = rvu->hw;
1029 	u16 pcifunc = req->hdr.pcifunc;
1030 	struct rvu_block *block;
1031 	struct rvu_pfvf *pfvf;
1032 	u64 cfg, ctx_cfg;
1033 	int blkaddr;
1034 
1035 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1036 		return NIX_AF_ERR_PARAM;
1037 
1038 	if (req->way_mask)
1039 		req->way_mask &= 0xFFFF;
1040 
1041 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1042 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1043 	if (!pfvf->nixlf || blkaddr < 0)
1044 		return NIX_AF_ERR_AF_LF_INVALID;
1045 
1046 	block = &hw->block[blkaddr];
1047 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1048 	if (nixlf < 0)
1049 		return NIX_AF_ERR_AF_LF_INVALID;
1050 
1051 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1052 	if (req->npa_func) {
1053 		/* If default, use 'this' NIXLF's PFFUNC */
1054 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1055 			req->npa_func = pcifunc;
1056 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1057 			return NIX_AF_INVAL_NPA_PF_FUNC;
1058 	}
1059 
1060 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1061 	if (req->sso_func) {
1062 		/* If default, use 'this' NIXLF's PFFUNC */
1063 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1064 			req->sso_func = pcifunc;
1065 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1066 			return NIX_AF_INVAL_SSO_PF_FUNC;
1067 	}
1068 
1069 	/* If RSS is being enabled, check if requested config is valid.
1070 	 * RSS table size should be power of two, otherwise
1071 	 * RSS_GRP::OFFSET + adder might go beyond that group or
1072 	 * won't be able to use entire table.
1073 	 */
1074 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1075 			    !is_power_of_2(req->rss_sz)))
1076 		return NIX_AF_ERR_RSS_SIZE_INVALID;
1077 
1078 	if (req->rss_sz &&
1079 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1080 		return NIX_AF_ERR_RSS_GRPS_INVALID;
1081 
1082 	/* Reset this NIX LF */
1083 	err = rvu_lf_reset(rvu, block, nixlf);
1084 	if (err) {
1085 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1086 			block->addr - BLKADDR_NIX0, nixlf);
1087 		return NIX_AF_ERR_LF_RESET;
1088 	}
1089 
1090 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1091 
1092 	/* Alloc NIX RQ HW context memory and config the base */
1093 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1094 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1095 	if (err)
1096 		goto free_mem;
1097 
1098 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1099 	if (!pfvf->rq_bmap)
1100 		goto free_mem;
1101 
1102 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1103 		    (u64)pfvf->rq_ctx->iova);
1104 
1105 	/* Set caching and queue count in HW */
1106 	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1107 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1108 
1109 	/* Alloc NIX SQ HW context memory and config the base */
1110 	hwctx_size = 1UL << (ctx_cfg & 0xF);
1111 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1112 	if (err)
1113 		goto free_mem;
1114 
1115 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1116 	if (!pfvf->sq_bmap)
1117 		goto free_mem;
1118 
1119 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1120 		    (u64)pfvf->sq_ctx->iova);
1121 
1122 	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1123 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1124 
1125 	/* Alloc NIX CQ HW context memory and config the base */
1126 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1127 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1128 	if (err)
1129 		goto free_mem;
1130 
1131 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1132 	if (!pfvf->cq_bmap)
1133 		goto free_mem;
1134 
1135 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1136 		    (u64)pfvf->cq_ctx->iova);
1137 
1138 	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1139 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1140 
1141 	/* Initialize receive side scaling (RSS) */
1142 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1143 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1144 				 req->rss_grps, hwctx_size, req->way_mask);
1145 	if (err)
1146 		goto free_mem;
1147 
1148 	/* Alloc memory for CQINT's HW contexts */
1149 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1150 	qints = (cfg >> 24) & 0xFFF;
1151 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1152 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1153 	if (err)
1154 		goto free_mem;
1155 
1156 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1157 		    (u64)pfvf->cq_ints_ctx->iova);
1158 
1159 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1160 		    BIT_ULL(36) | req->way_mask << 20);
1161 
1162 	/* Alloc memory for QINT's HW contexts */
1163 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1164 	qints = (cfg >> 12) & 0xFFF;
1165 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1166 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1167 	if (err)
1168 		goto free_mem;
1169 
1170 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1171 		    (u64)pfvf->nix_qints_ctx->iova);
1172 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1173 		    BIT_ULL(36) | req->way_mask << 20);
1174 
1175 	/* Setup VLANX TPID's.
1176 	 * Use VLAN1 for 802.1Q
1177 	 * and VLAN0 for 802.1AD.
1178 	 */
1179 	cfg = (0x8100ULL << 16) | 0x88A8ULL;
1180 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1181 
1182 	/* Enable LMTST for this NIX LF */
1183 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1184 
1185 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1186 	if (req->npa_func)
1187 		cfg = req->npa_func;
1188 	if (req->sso_func)
1189 		cfg |= (u64)req->sso_func << 16;
1190 
1191 	cfg |= (u64)req->xqe_sz << 33;
1192 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1193 
1194 	/* Config Rx pkt length, csum checks and apad  enable / disable */
1195 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1196 
1197 	/* Configure pkind for TX parse config */
1198 	cfg = NPC_TX_DEF_PKIND;
1199 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1200 
1201 	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1202 	err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1203 	if (err)
1204 		goto free_mem;
1205 
1206 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
1207 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1208 
1209 	/* Configure RX VTAG Type 7 (strip) for vf vlan */
1210 	rvu_write64(rvu, blkaddr,
1211 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1212 		    VTAGSIZE_T4 | VTAG_STRIP);
1213 
1214 	goto exit;
1215 
1216 free_mem:
1217 	nix_ctx_free(rvu, pfvf);
1218 	rc = -ENOMEM;
1219 
1220 exit:
1221 	/* Set macaddr of this PF/VF */
1222 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1223 
1224 	/* set SQB size info */
1225 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1226 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1227 	rsp->rx_chan_base = pfvf->rx_chan_base;
1228 	rsp->tx_chan_base = pfvf->tx_chan_base;
1229 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1230 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1231 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1232 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1233 	/* Get HW supported stat count */
1234 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1235 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1236 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1237 	/* Get count of CQ IRQs and error IRQs supported per LF */
1238 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1239 	rsp->qints = ((cfg >> 12) & 0xFFF);
1240 	rsp->cints = ((cfg >> 24) & 0xFFF);
1241 	rsp->cgx_links = hw->cgx_links;
1242 	rsp->lbk_links = hw->lbk_links;
1243 	rsp->sdp_links = hw->sdp_links;
1244 
1245 	return rc;
1246 }
1247 
1248 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1249 				 struct msg_rsp *rsp)
1250 {
1251 	struct rvu_hwinfo *hw = rvu->hw;
1252 	u16 pcifunc = req->hdr.pcifunc;
1253 	struct rvu_block *block;
1254 	int blkaddr, nixlf, err;
1255 	struct rvu_pfvf *pfvf;
1256 
1257 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1258 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1259 	if (!pfvf->nixlf || blkaddr < 0)
1260 		return NIX_AF_ERR_AF_LF_INVALID;
1261 
1262 	block = &hw->block[blkaddr];
1263 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1264 	if (nixlf < 0)
1265 		return NIX_AF_ERR_AF_LF_INVALID;
1266 
1267 	if (req->flags & NIX_LF_DISABLE_FLOWS)
1268 		rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1269 	else
1270 		rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1271 
1272 	/* Free any tx vtag def entries used by this NIX LF */
1273 	if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1274 		nix_free_tx_vtag_entries(rvu, pcifunc);
1275 
1276 	nix_interface_deinit(rvu, pcifunc, nixlf);
1277 
1278 	/* Reset this NIX LF */
1279 	err = rvu_lf_reset(rvu, block, nixlf);
1280 	if (err) {
1281 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1282 			block->addr - BLKADDR_NIX0, nixlf);
1283 		return NIX_AF_ERR_LF_RESET;
1284 	}
1285 
1286 	nix_ctx_free(rvu, pfvf);
1287 
1288 	return 0;
1289 }
1290 
1291 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1292 					 struct nix_mark_format_cfg  *req,
1293 					 struct nix_mark_format_cfg_rsp *rsp)
1294 {
1295 	u16 pcifunc = req->hdr.pcifunc;
1296 	struct nix_hw *nix_hw;
1297 	struct rvu_pfvf *pfvf;
1298 	int blkaddr, rc;
1299 	u32 cfg;
1300 
1301 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1302 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1303 	if (!pfvf->nixlf || blkaddr < 0)
1304 		return NIX_AF_ERR_AF_LF_INVALID;
1305 
1306 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1307 	if (!nix_hw)
1308 		return -EINVAL;
1309 
1310 	cfg = (((u32)req->offset & 0x7) << 16) |
1311 	      (((u32)req->y_mask & 0xF) << 12) |
1312 	      (((u32)req->y_val & 0xF) << 8) |
1313 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1314 
1315 	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1316 	if (rc < 0) {
1317 		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1318 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1319 		return NIX_AF_ERR_MARK_CFG_FAIL;
1320 	}
1321 
1322 	rsp->mark_format_idx = rc;
1323 	return 0;
1324 }
1325 
1326 /* Disable shaping of pkts by a scheduler queue
1327  * at a given scheduler level.
1328  */
1329 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1330 				 int lvl, int schq)
1331 {
1332 	u64  cir_reg = 0, pir_reg = 0;
1333 	u64  cfg;
1334 
1335 	switch (lvl) {
1336 	case NIX_TXSCH_LVL_TL1:
1337 		cir_reg = NIX_AF_TL1X_CIR(schq);
1338 		pir_reg = 0; /* PIR not available at TL1 */
1339 		break;
1340 	case NIX_TXSCH_LVL_TL2:
1341 		cir_reg = NIX_AF_TL2X_CIR(schq);
1342 		pir_reg = NIX_AF_TL2X_PIR(schq);
1343 		break;
1344 	case NIX_TXSCH_LVL_TL3:
1345 		cir_reg = NIX_AF_TL3X_CIR(schq);
1346 		pir_reg = NIX_AF_TL3X_PIR(schq);
1347 		break;
1348 	case NIX_TXSCH_LVL_TL4:
1349 		cir_reg = NIX_AF_TL4X_CIR(schq);
1350 		pir_reg = NIX_AF_TL4X_PIR(schq);
1351 		break;
1352 	}
1353 
1354 	if (!cir_reg)
1355 		return;
1356 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
1357 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1358 
1359 	if (!pir_reg)
1360 		return;
1361 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
1362 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1363 }
1364 
1365 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1366 				 int lvl, int schq)
1367 {
1368 	struct rvu_hwinfo *hw = rvu->hw;
1369 	int link;
1370 
1371 	if (lvl >= hw->cap.nix_tx_aggr_lvl)
1372 		return;
1373 
1374 	/* Reset TL4's SDP link config */
1375 	if (lvl == NIX_TXSCH_LVL_TL4)
1376 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1377 
1378 	if (lvl != NIX_TXSCH_LVL_TL2)
1379 		return;
1380 
1381 	/* Reset TL2's CGX or LBK link config */
1382 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1383 		rvu_write64(rvu, blkaddr,
1384 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1385 }
1386 
1387 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1388 {
1389 	struct rvu_hwinfo *hw = rvu->hw;
1390 	int pf = rvu_get_pf(pcifunc);
1391 	u8 cgx_id = 0, lmac_id = 0;
1392 
1393 	if (is_afvf(pcifunc)) {/* LBK links */
1394 		return hw->cgx_links;
1395 	} else if (is_pf_cgxmapped(rvu, pf)) {
1396 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1397 		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1398 	}
1399 
1400 	/* SDP link */
1401 	return hw->cgx_links + hw->lbk_links;
1402 }
1403 
1404 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1405 				 int link, int *start, int *end)
1406 {
1407 	struct rvu_hwinfo *hw = rvu->hw;
1408 	int pf = rvu_get_pf(pcifunc);
1409 
1410 	if (is_afvf(pcifunc)) { /* LBK links */
1411 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1412 		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1413 	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1414 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1415 		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1416 	} else { /* SDP link */
1417 		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1418 			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1419 		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1420 	}
1421 }
1422 
1423 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1424 				      struct nix_hw *nix_hw,
1425 				      struct nix_txsch_alloc_req *req)
1426 {
1427 	struct rvu_hwinfo *hw = rvu->hw;
1428 	int schq, req_schq, free_cnt;
1429 	struct nix_txsch *txsch;
1430 	int link, start, end;
1431 
1432 	txsch = &nix_hw->txsch[lvl];
1433 	req_schq = req->schq_contig[lvl] + req->schq[lvl];
1434 
1435 	if (!req_schq)
1436 		return 0;
1437 
1438 	link = nix_get_tx_link(rvu, pcifunc);
1439 
1440 	/* For traffic aggregating scheduler level, one queue is enough */
1441 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1442 		if (req_schq != 1)
1443 			return NIX_AF_ERR_TLX_ALLOC_FAIL;
1444 		return 0;
1445 	}
1446 
1447 	/* Get free SCHQ count and check if request can be accomodated */
1448 	if (hw->cap.nix_fixed_txschq_mapping) {
1449 		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1450 		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1451 		if (end <= txsch->schq.max && schq < end &&
1452 		    !test_bit(schq, txsch->schq.bmap))
1453 			free_cnt = 1;
1454 		else
1455 			free_cnt = 0;
1456 	} else {
1457 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
1458 	}
1459 
1460 	if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1461 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1462 
1463 	/* If contiguous queues are needed, check for availability */
1464 	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1465 	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1466 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1467 
1468 	return 0;
1469 }
1470 
1471 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1472 			    struct nix_txsch_alloc_rsp *rsp,
1473 			    int lvl, int start, int end)
1474 {
1475 	struct rvu_hwinfo *hw = rvu->hw;
1476 	u16 pcifunc = rsp->hdr.pcifunc;
1477 	int idx, schq;
1478 
1479 	/* For traffic aggregating levels, queue alloc is based
1480 	 * on transmit link to which PF_FUNC is mapped to.
1481 	 */
1482 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1483 		/* A single TL queue is allocated */
1484 		if (rsp->schq_contig[lvl]) {
1485 			rsp->schq_contig[lvl] = 1;
1486 			rsp->schq_contig_list[lvl][0] = start;
1487 		}
1488 
1489 		/* Both contig and non-contig reqs doesn't make sense here */
1490 		if (rsp->schq_contig[lvl])
1491 			rsp->schq[lvl] = 0;
1492 
1493 		if (rsp->schq[lvl]) {
1494 			rsp->schq[lvl] = 1;
1495 			rsp->schq_list[lvl][0] = start;
1496 		}
1497 		return;
1498 	}
1499 
1500 	/* Adjust the queue request count if HW supports
1501 	 * only one queue per level configuration.
1502 	 */
1503 	if (hw->cap.nix_fixed_txschq_mapping) {
1504 		idx = pcifunc & RVU_PFVF_FUNC_MASK;
1505 		schq = start + idx;
1506 		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1507 			rsp->schq_contig[lvl] = 0;
1508 			rsp->schq[lvl] = 0;
1509 			return;
1510 		}
1511 
1512 		if (rsp->schq_contig[lvl]) {
1513 			rsp->schq_contig[lvl] = 1;
1514 			set_bit(schq, txsch->schq.bmap);
1515 			rsp->schq_contig_list[lvl][0] = schq;
1516 			rsp->schq[lvl] = 0;
1517 		} else if (rsp->schq[lvl]) {
1518 			rsp->schq[lvl] = 1;
1519 			set_bit(schq, txsch->schq.bmap);
1520 			rsp->schq_list[lvl][0] = schq;
1521 		}
1522 		return;
1523 	}
1524 
1525 	/* Allocate contiguous queue indices requesty first */
1526 	if (rsp->schq_contig[lvl]) {
1527 		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1528 						  txsch->schq.max, start,
1529 						  rsp->schq_contig[lvl], 0);
1530 		if (schq >= end)
1531 			rsp->schq_contig[lvl] = 0;
1532 		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1533 			set_bit(schq, txsch->schq.bmap);
1534 			rsp->schq_contig_list[lvl][idx] = schq;
1535 			schq++;
1536 		}
1537 	}
1538 
1539 	/* Allocate non-contiguous queue indices */
1540 	if (rsp->schq[lvl]) {
1541 		idx = 0;
1542 		for (schq = start; schq < end; schq++) {
1543 			if (!test_bit(schq, txsch->schq.bmap)) {
1544 				set_bit(schq, txsch->schq.bmap);
1545 				rsp->schq_list[lvl][idx++] = schq;
1546 			}
1547 			if (idx == rsp->schq[lvl])
1548 				break;
1549 		}
1550 		/* Update how many were allocated */
1551 		rsp->schq[lvl] = idx;
1552 	}
1553 }
1554 
1555 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1556 				     struct nix_txsch_alloc_req *req,
1557 				     struct nix_txsch_alloc_rsp *rsp)
1558 {
1559 	struct rvu_hwinfo *hw = rvu->hw;
1560 	u16 pcifunc = req->hdr.pcifunc;
1561 	int link, blkaddr, rc = 0;
1562 	int lvl, idx, start, end;
1563 	struct nix_txsch *txsch;
1564 	struct rvu_pfvf *pfvf;
1565 	struct nix_hw *nix_hw;
1566 	u32 *pfvf_map;
1567 	u16 schq;
1568 
1569 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1570 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1571 	if (!pfvf->nixlf || blkaddr < 0)
1572 		return NIX_AF_ERR_AF_LF_INVALID;
1573 
1574 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1575 	if (!nix_hw)
1576 		return -EINVAL;
1577 
1578 	mutex_lock(&rvu->rsrc_lock);
1579 
1580 	/* Check if request is valid as per HW capabilities
1581 	 * and can be accomodated.
1582 	 */
1583 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1584 		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1585 		if (rc)
1586 			goto err;
1587 	}
1588 
1589 	/* Allocate requested Tx scheduler queues */
1590 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1591 		txsch = &nix_hw->txsch[lvl];
1592 		pfvf_map = txsch->pfvf_map;
1593 
1594 		if (!req->schq[lvl] && !req->schq_contig[lvl])
1595 			continue;
1596 
1597 		rsp->schq[lvl] = req->schq[lvl];
1598 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
1599 
1600 		link = nix_get_tx_link(rvu, pcifunc);
1601 
1602 		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1603 			start = link;
1604 			end = link;
1605 		} else if (hw->cap.nix_fixed_txschq_mapping) {
1606 			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1607 		} else {
1608 			start = 0;
1609 			end = txsch->schq.max;
1610 		}
1611 
1612 		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1613 
1614 		/* Reset queue config */
1615 		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1616 			schq = rsp->schq_contig_list[lvl][idx];
1617 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1618 			    NIX_TXSCHQ_CFG_DONE))
1619 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1620 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1621 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1622 		}
1623 
1624 		for (idx = 0; idx < req->schq[lvl]; idx++) {
1625 			schq = rsp->schq_list[lvl][idx];
1626 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1627 			    NIX_TXSCHQ_CFG_DONE))
1628 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1629 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1630 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1631 		}
1632 	}
1633 
1634 	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1635 	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1636 	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1637 				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1638 				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1639 	goto exit;
1640 err:
1641 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1642 exit:
1643 	mutex_unlock(&rvu->rsrc_lock);
1644 	return rc;
1645 }
1646 
1647 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1648 			  int smq, u16 pcifunc, int nixlf)
1649 {
1650 	int pf = rvu_get_pf(pcifunc);
1651 	u8 cgx_id = 0, lmac_id = 0;
1652 	int err, restore_tx_en = 0;
1653 	u64 cfg;
1654 
1655 	/* enable cgx tx if disabled */
1656 	if (is_pf_cgxmapped(rvu, pf)) {
1657 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1658 		restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1659 						    lmac_id, true);
1660 	}
1661 
1662 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1663 	/* Do SMQ flush and set enqueue xoff */
1664 	cfg |= BIT_ULL(50) | BIT_ULL(49);
1665 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1666 
1667 	/* Disable backpressure from physical link,
1668 	 * otherwise SMQ flush may stall.
1669 	 */
1670 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
1671 
1672 	/* Wait for flush to complete */
1673 	err = rvu_poll_reg(rvu, blkaddr,
1674 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1675 	if (err)
1676 		dev_err(rvu->dev,
1677 			"NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1678 
1679 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
1680 	/* restore cgx tx state */
1681 	if (restore_tx_en)
1682 		cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1683 }
1684 
1685 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1686 {
1687 	int blkaddr, nixlf, lvl, schq, err;
1688 	struct rvu_hwinfo *hw = rvu->hw;
1689 	struct nix_txsch *txsch;
1690 	struct nix_hw *nix_hw;
1691 
1692 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1693 	if (blkaddr < 0)
1694 		return NIX_AF_ERR_AF_LF_INVALID;
1695 
1696 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1697 	if (!nix_hw)
1698 		return -EINVAL;
1699 
1700 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1701 	if (nixlf < 0)
1702 		return NIX_AF_ERR_AF_LF_INVALID;
1703 
1704 	/* Disable TL2/3 queue links before SMQ flush*/
1705 	mutex_lock(&rvu->rsrc_lock);
1706 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1707 		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1708 			continue;
1709 
1710 		txsch = &nix_hw->txsch[lvl];
1711 		for (schq = 0; schq < txsch->schq.max; schq++) {
1712 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1713 				continue;
1714 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1715 		}
1716 	}
1717 
1718 	/* Flush SMQs */
1719 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1720 	for (schq = 0; schq < txsch->schq.max; schq++) {
1721 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1722 			continue;
1723 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1724 	}
1725 
1726 	/* Now free scheduler queues to free pool */
1727 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1728 		 /* TLs above aggregation level are shared across all PF
1729 		  * and it's VFs, hence skip freeing them.
1730 		  */
1731 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
1732 			continue;
1733 
1734 		txsch = &nix_hw->txsch[lvl];
1735 		for (schq = 0; schq < txsch->schq.max; schq++) {
1736 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1737 				continue;
1738 			rvu_free_rsrc(&txsch->schq, schq);
1739 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1740 		}
1741 	}
1742 	mutex_unlock(&rvu->rsrc_lock);
1743 
1744 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1745 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1746 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1747 	if (err)
1748 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1749 
1750 	return 0;
1751 }
1752 
1753 static int nix_txschq_free_one(struct rvu *rvu,
1754 			       struct nix_txsch_free_req *req)
1755 {
1756 	struct rvu_hwinfo *hw = rvu->hw;
1757 	u16 pcifunc = req->hdr.pcifunc;
1758 	int lvl, schq, nixlf, blkaddr;
1759 	struct nix_txsch *txsch;
1760 	struct nix_hw *nix_hw;
1761 	u32 *pfvf_map;
1762 
1763 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1764 	if (blkaddr < 0)
1765 		return NIX_AF_ERR_AF_LF_INVALID;
1766 
1767 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1768 	if (!nix_hw)
1769 		return -EINVAL;
1770 
1771 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1772 	if (nixlf < 0)
1773 		return NIX_AF_ERR_AF_LF_INVALID;
1774 
1775 	lvl = req->schq_lvl;
1776 	schq = req->schq;
1777 	txsch = &nix_hw->txsch[lvl];
1778 
1779 	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1780 		return 0;
1781 
1782 	pfvf_map = txsch->pfvf_map;
1783 	mutex_lock(&rvu->rsrc_lock);
1784 
1785 	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1786 		mutex_unlock(&rvu->rsrc_lock);
1787 		goto err;
1788 	}
1789 
1790 	/* Flush if it is a SMQ. Onus of disabling
1791 	 * TL2/3 queue links before SMQ flush is on user
1792 	 */
1793 	if (lvl == NIX_TXSCH_LVL_SMQ)
1794 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1795 
1796 	/* Free the resource */
1797 	rvu_free_rsrc(&txsch->schq, schq);
1798 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1799 	mutex_unlock(&rvu->rsrc_lock);
1800 	return 0;
1801 err:
1802 	return NIX_AF_ERR_TLX_INVALID;
1803 }
1804 
1805 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1806 				    struct nix_txsch_free_req *req,
1807 				    struct msg_rsp *rsp)
1808 {
1809 	if (req->flags & TXSCHQ_FREE_ALL)
1810 		return nix_txschq_free(rvu, req->hdr.pcifunc);
1811 	else
1812 		return nix_txschq_free_one(rvu, req);
1813 }
1814 
1815 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1816 				      int lvl, u64 reg, u64 regval)
1817 {
1818 	u64 regbase = reg & 0xFFFF;
1819 	u16 schq, parent;
1820 
1821 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1822 		return false;
1823 
1824 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1825 	/* Check if this schq belongs to this PF/VF or not */
1826 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1827 		return false;
1828 
1829 	parent = (regval >> 16) & 0x1FF;
1830 	/* Validate MDQ's TL4 parent */
1831 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
1832 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1833 		return false;
1834 
1835 	/* Validate TL4's TL3 parent */
1836 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
1837 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1838 		return false;
1839 
1840 	/* Validate TL3's TL2 parent */
1841 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
1842 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1843 		return false;
1844 
1845 	/* Validate TL2's TL1 parent */
1846 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
1847 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1848 		return false;
1849 
1850 	return true;
1851 }
1852 
1853 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1854 {
1855 	u64 regbase;
1856 
1857 	if (hw->cap.nix_shaping)
1858 		return true;
1859 
1860 	/* If shaping and coloring is not supported, then
1861 	 * *_CIR and *_PIR registers should not be configured.
1862 	 */
1863 	regbase = reg & 0xFFFF;
1864 
1865 	switch (lvl) {
1866 	case NIX_TXSCH_LVL_TL1:
1867 		if (regbase == NIX_AF_TL1X_CIR(0))
1868 			return false;
1869 		break;
1870 	case NIX_TXSCH_LVL_TL2:
1871 		if (regbase == NIX_AF_TL2X_CIR(0) ||
1872 		    regbase == NIX_AF_TL2X_PIR(0))
1873 			return false;
1874 		break;
1875 	case NIX_TXSCH_LVL_TL3:
1876 		if (regbase == NIX_AF_TL3X_CIR(0) ||
1877 		    regbase == NIX_AF_TL3X_PIR(0))
1878 			return false;
1879 		break;
1880 	case NIX_TXSCH_LVL_TL4:
1881 		if (regbase == NIX_AF_TL4X_CIR(0) ||
1882 		    regbase == NIX_AF_TL4X_PIR(0))
1883 			return false;
1884 		break;
1885 	}
1886 	return true;
1887 }
1888 
1889 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1890 				u16 pcifunc, int blkaddr)
1891 {
1892 	u32 *pfvf_map;
1893 	int schq;
1894 
1895 	schq = nix_get_tx_link(rvu, pcifunc);
1896 	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1897 	/* Skip if PF has already done the config */
1898 	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1899 		return;
1900 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1901 		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
1902 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1903 		    TXSCH_TL1_DFLT_RR_QTM);
1904 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1905 	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1906 }
1907 
1908 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1909 				    struct nix_txschq_config *req,
1910 				    struct msg_rsp *rsp)
1911 {
1912 	struct rvu_hwinfo *hw = rvu->hw;
1913 	u16 pcifunc = req->hdr.pcifunc;
1914 	u64 reg, regval, schq_regbase;
1915 	struct nix_txsch *txsch;
1916 	struct nix_hw *nix_hw;
1917 	int blkaddr, idx, err;
1918 	int nixlf, schq;
1919 	u32 *pfvf_map;
1920 
1921 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1922 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
1923 		return NIX_AF_INVAL_TXSCHQ_CFG;
1924 
1925 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1926 	if (err)
1927 		return err;
1928 
1929 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1930 	if (!nix_hw)
1931 		return -EINVAL;
1932 
1933 	txsch = &nix_hw->txsch[req->lvl];
1934 	pfvf_map = txsch->pfvf_map;
1935 
1936 	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1937 	    pcifunc & RVU_PFVF_FUNC_MASK) {
1938 		mutex_lock(&rvu->rsrc_lock);
1939 		if (req->lvl == NIX_TXSCH_LVL_TL1)
1940 			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1941 		mutex_unlock(&rvu->rsrc_lock);
1942 		return 0;
1943 	}
1944 
1945 	for (idx = 0; idx < req->num_regs; idx++) {
1946 		reg = req->reg[idx];
1947 		regval = req->regval[idx];
1948 		schq_regbase = reg & 0xFFFF;
1949 
1950 		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1951 					       txsch->lvl, reg, regval))
1952 			return NIX_AF_INVAL_TXSCHQ_CFG;
1953 
1954 		/* Check if shaping and coloring is supported */
1955 		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
1956 			continue;
1957 
1958 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1959 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1960 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1961 					   pcifunc, 0);
1962 			regval &= ~(0x7FULL << 24);
1963 			regval |= ((u64)nixlf << 24);
1964 		}
1965 
1966 		/* Clear 'BP_ENA' config, if it's not allowed */
1967 		if (!hw->cap.nix_tx_link_bp) {
1968 			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
1969 			    (schq_regbase & 0xFF00) ==
1970 			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
1971 				regval &= ~BIT_ULL(13);
1972 		}
1973 
1974 		/* Mark config as done for TL1 by PF */
1975 		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1976 		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1977 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1978 			mutex_lock(&rvu->rsrc_lock);
1979 			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
1980 							NIX_TXSCHQ_CFG_DONE);
1981 			mutex_unlock(&rvu->rsrc_lock);
1982 		}
1983 
1984 		/* SMQ flush is special hence split register writes such
1985 		 * that flush first and write rest of the bits later.
1986 		 */
1987 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1988 		    (regval & BIT_ULL(49))) {
1989 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1990 			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1991 			regval &= ~BIT_ULL(49);
1992 		}
1993 		rvu_write64(rvu, blkaddr, reg, regval);
1994 	}
1995 
1996 	return 0;
1997 }
1998 
1999 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2000 			   struct nix_vtag_config *req)
2001 {
2002 	u64 regval = req->vtag_size;
2003 
2004 	if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2005 	    req->vtag_size > VTAGSIZE_T8)
2006 		return -EINVAL;
2007 
2008 	/* RX VTAG Type 7 reserved for vf vlan */
2009 	if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2010 		return NIX_AF_ERR_RX_VTAG_INUSE;
2011 
2012 	if (req->rx.capture_vtag)
2013 		regval |= BIT_ULL(5);
2014 	if (req->rx.strip_vtag)
2015 		regval |= BIT_ULL(4);
2016 
2017 	rvu_write64(rvu, blkaddr,
2018 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2019 	return 0;
2020 }
2021 
2022 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2023 			    u16 pcifunc, int index)
2024 {
2025 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2026 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2027 
2028 	if (vlan->entry2pfvf_map[index] != pcifunc)
2029 		return NIX_AF_ERR_PARAM;
2030 
2031 	rvu_write64(rvu, blkaddr,
2032 		    NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2033 	rvu_write64(rvu, blkaddr,
2034 		    NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2035 
2036 	vlan->entry2pfvf_map[index] = 0;
2037 	rvu_free_rsrc(&vlan->rsrc, index);
2038 
2039 	return 0;
2040 }
2041 
2042 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2043 {
2044 	struct nix_txvlan *vlan;
2045 	struct nix_hw *nix_hw;
2046 	int index, blkaddr;
2047 
2048 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2049 	if (blkaddr < 0)
2050 		return;
2051 
2052 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2053 	vlan = &nix_hw->txvlan;
2054 
2055 	mutex_lock(&vlan->rsrc_lock);
2056 	/* Scan all the entries and free the ones mapped to 'pcifunc' */
2057 	for (index = 0; index < vlan->rsrc.max; index++) {
2058 		if (vlan->entry2pfvf_map[index] == pcifunc)
2059 			nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2060 	}
2061 	mutex_unlock(&vlan->rsrc_lock);
2062 }
2063 
2064 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2065 			     u64 vtag, u8 size)
2066 {
2067 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2068 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2069 	u64 regval;
2070 	int index;
2071 
2072 	mutex_lock(&vlan->rsrc_lock);
2073 
2074 	index = rvu_alloc_rsrc(&vlan->rsrc);
2075 	if (index < 0) {
2076 		mutex_unlock(&vlan->rsrc_lock);
2077 		return index;
2078 	}
2079 
2080 	mutex_unlock(&vlan->rsrc_lock);
2081 
2082 	regval = size ? vtag : vtag << 32;
2083 
2084 	rvu_write64(rvu, blkaddr,
2085 		    NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2086 	rvu_write64(rvu, blkaddr,
2087 		    NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2088 
2089 	return index;
2090 }
2091 
2092 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2093 			     struct nix_vtag_config *req)
2094 {
2095 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2096 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2097 	u16 pcifunc = req->hdr.pcifunc;
2098 	int idx0 = req->tx.vtag0_idx;
2099 	int idx1 = req->tx.vtag1_idx;
2100 	int err = 0;
2101 
2102 	if (req->tx.free_vtag0 && req->tx.free_vtag1)
2103 		if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2104 		    vlan->entry2pfvf_map[idx1] != pcifunc)
2105 			return NIX_AF_ERR_PARAM;
2106 
2107 	mutex_lock(&vlan->rsrc_lock);
2108 
2109 	if (req->tx.free_vtag0) {
2110 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2111 		if (err)
2112 			goto exit;
2113 	}
2114 
2115 	if (req->tx.free_vtag1)
2116 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2117 
2118 exit:
2119 	mutex_unlock(&vlan->rsrc_lock);
2120 	return err;
2121 }
2122 
2123 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2124 			   struct nix_vtag_config *req,
2125 			   struct nix_vtag_config_rsp *rsp)
2126 {
2127 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2128 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2129 	u16 pcifunc = req->hdr.pcifunc;
2130 
2131 	if (req->tx.cfg_vtag0) {
2132 		rsp->vtag0_idx =
2133 			nix_tx_vtag_alloc(rvu, blkaddr,
2134 					  req->tx.vtag0, req->vtag_size);
2135 
2136 		if (rsp->vtag0_idx < 0)
2137 			return NIX_AF_ERR_TX_VTAG_NOSPC;
2138 
2139 		vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2140 	}
2141 
2142 	if (req->tx.cfg_vtag1) {
2143 		rsp->vtag1_idx =
2144 			nix_tx_vtag_alloc(rvu, blkaddr,
2145 					  req->tx.vtag1, req->vtag_size);
2146 
2147 		if (rsp->vtag1_idx < 0)
2148 			goto err_free;
2149 
2150 		vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2151 	}
2152 
2153 	return 0;
2154 
2155 err_free:
2156 	if (req->tx.cfg_vtag0)
2157 		nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2158 
2159 	return NIX_AF_ERR_TX_VTAG_NOSPC;
2160 }
2161 
2162 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2163 				  struct nix_vtag_config *req,
2164 				  struct nix_vtag_config_rsp *rsp)
2165 {
2166 	u16 pcifunc = req->hdr.pcifunc;
2167 	int blkaddr, nixlf, err;
2168 
2169 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2170 	if (err)
2171 		return err;
2172 
2173 	if (req->cfg_type) {
2174 		/* rx vtag configuration */
2175 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2176 		if (err)
2177 			return NIX_AF_ERR_PARAM;
2178 	} else {
2179 		/* tx vtag configuration */
2180 		if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2181 		    (req->tx.free_vtag0 || req->tx.free_vtag1))
2182 			return NIX_AF_ERR_PARAM;
2183 
2184 		if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2185 			return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2186 
2187 		if (req->tx.free_vtag0 || req->tx.free_vtag1)
2188 			return nix_tx_vtag_decfg(rvu, blkaddr, req);
2189 	}
2190 
2191 	return 0;
2192 }
2193 
2194 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2195 			     int mce, u8 op, u16 pcifunc, int next, bool eol)
2196 {
2197 	struct nix_aq_enq_req aq_req;
2198 	int err;
2199 
2200 	aq_req.hdr.pcifunc = 0;
2201 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
2202 	aq_req.op = op;
2203 	aq_req.qidx = mce;
2204 
2205 	/* Forward bcast pkts to RQ0, RSS not needed */
2206 	aq_req.mce.op = 0;
2207 	aq_req.mce.index = 0;
2208 	aq_req.mce.eol = eol;
2209 	aq_req.mce.pf_func = pcifunc;
2210 	aq_req.mce.next = next;
2211 
2212 	/* All fields valid */
2213 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
2214 
2215 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2216 	if (err) {
2217 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2218 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2219 		return err;
2220 	}
2221 	return 0;
2222 }
2223 
2224 static int nix_update_mce_list(struct nix_mce_list *mce_list,
2225 			       u16 pcifunc, bool add)
2226 {
2227 	struct mce *mce, *tail = NULL;
2228 	bool delete = false;
2229 
2230 	/* Scan through the current list */
2231 	hlist_for_each_entry(mce, &mce_list->head, node) {
2232 		/* If already exists, then delete */
2233 		if (mce->pcifunc == pcifunc && !add) {
2234 			delete = true;
2235 			break;
2236 		}
2237 		tail = mce;
2238 	}
2239 
2240 	if (delete) {
2241 		hlist_del(&mce->node);
2242 		kfree(mce);
2243 		mce_list->count--;
2244 		return 0;
2245 	}
2246 
2247 	if (!add)
2248 		return 0;
2249 
2250 	/* Add a new one to the list, at the tail */
2251 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2252 	if (!mce)
2253 		return -ENOMEM;
2254 	mce->pcifunc = pcifunc;
2255 	if (!tail)
2256 		hlist_add_head(&mce->node, &mce_list->head);
2257 	else
2258 		hlist_add_behind(&mce->node, &tail->node);
2259 	mce_list->count++;
2260 	return 0;
2261 }
2262 
2263 int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
2264 {
2265 	int err = 0, idx, next_idx, last_idx;
2266 	struct nix_mce_list *mce_list;
2267 	struct nix_mcast *mcast;
2268 	struct nix_hw *nix_hw;
2269 	struct rvu_pfvf *pfvf;
2270 	struct mce *mce;
2271 	int blkaddr;
2272 
2273 	/* Broadcast pkt replication is not needed for AF's VFs, hence skip */
2274 	if (is_afvf(pcifunc))
2275 		return 0;
2276 
2277 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2278 	if (blkaddr < 0)
2279 		return 0;
2280 
2281 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2282 	if (!nix_hw)
2283 		return 0;
2284 
2285 	mcast = &nix_hw->mcast;
2286 
2287 	/* Get this PF/VF func's MCE index */
2288 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2289 	idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2290 
2291 	mce_list = &pfvf->bcast_mce_list;
2292 	if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
2293 		dev_err(rvu->dev,
2294 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2295 			__func__, idx, mce_list->max,
2296 			pcifunc >> RVU_PFVF_PF_SHIFT);
2297 		return -EINVAL;
2298 	}
2299 
2300 	mutex_lock(&mcast->mce_lock);
2301 
2302 	err = nix_update_mce_list(mce_list, pcifunc, add);
2303 	if (err)
2304 		goto end;
2305 
2306 	/* Disable MCAM entry in NPC */
2307 	if (!mce_list->count) {
2308 		rvu_npc_enable_bcast_entry(rvu, pcifunc, false);
2309 		goto end;
2310 	}
2311 
2312 	/* Dump the updated list to HW */
2313 	idx = pfvf->bcast_mce_idx;
2314 	last_idx = idx + mce_list->count - 1;
2315 	hlist_for_each_entry(mce, &mce_list->head, node) {
2316 		if (idx > last_idx)
2317 			break;
2318 
2319 		next_idx = idx + 1;
2320 		/* EOL should be set in last MCE */
2321 		err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2322 					mce->pcifunc, next_idx,
2323 					(next_idx > last_idx) ? true : false);
2324 		if (err)
2325 			goto end;
2326 		idx++;
2327 	}
2328 
2329 end:
2330 	mutex_unlock(&mcast->mce_lock);
2331 	return err;
2332 }
2333 
2334 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2335 {
2336 	struct nix_mcast *mcast = &nix_hw->mcast;
2337 	int err, pf, numvfs, idx;
2338 	struct rvu_pfvf *pfvf;
2339 	u16 pcifunc;
2340 	u64 cfg;
2341 
2342 	/* Skip PF0 (i.e AF) */
2343 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2344 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2345 		/* If PF is not enabled, nothing to do */
2346 		if (!((cfg >> 20) & 0x01))
2347 			continue;
2348 		/* Get numVFs attached to this PF */
2349 		numvfs = (cfg >> 12) & 0xFF;
2350 
2351 		pfvf = &rvu->pf[pf];
2352 
2353 		/* This NIX0/1 block mapped to PF ? */
2354 		if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2355 			continue;
2356 
2357 		/* Save the start MCE */
2358 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2359 
2360 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2361 
2362 		for (idx = 0; idx < (numvfs + 1); idx++) {
2363 			/* idx-0 is for PF, followed by VFs */
2364 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2365 			pcifunc |= idx;
2366 			/* Add dummy entries now, so that we don't have to check
2367 			 * for whether AQ_OP should be INIT/WRITE later on.
2368 			 * Will be updated when a NIXLF is attached/detached to
2369 			 * these PF/VFs.
2370 			 */
2371 			err = nix_blk_setup_mce(rvu, nix_hw,
2372 						pfvf->bcast_mce_idx + idx,
2373 						NIX_AQ_INSTOP_INIT,
2374 						pcifunc, 0, true);
2375 			if (err)
2376 				return err;
2377 		}
2378 	}
2379 	return 0;
2380 }
2381 
2382 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2383 {
2384 	struct nix_mcast *mcast = &nix_hw->mcast;
2385 	struct rvu_hwinfo *hw = rvu->hw;
2386 	int err, size;
2387 
2388 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2389 	size = (1ULL << size);
2390 
2391 	/* Alloc memory for multicast/mirror replication entries */
2392 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2393 			 (256UL << MC_TBL_SIZE), size);
2394 	if (err)
2395 		return -ENOMEM;
2396 
2397 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2398 		    (u64)mcast->mce_ctx->iova);
2399 
2400 	/* Set max list length equal to max no of VFs per PF  + PF itself */
2401 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2402 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2403 
2404 	/* Alloc memory for multicast replication buffers */
2405 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2406 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2407 			 (8UL << MC_BUF_CNT), size);
2408 	if (err)
2409 		return -ENOMEM;
2410 
2411 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2412 		    (u64)mcast->mcast_buf->iova);
2413 
2414 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
2415 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2416 
2417 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2418 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
2419 		    BIT_ULL(20) | MC_BUF_CNT);
2420 
2421 	mutex_init(&mcast->mce_lock);
2422 
2423 	return nix_setup_bcast_tables(rvu, nix_hw);
2424 }
2425 
2426 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
2427 {
2428 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2429 	int err;
2430 
2431 	/* Allocate resource bimap for tx vtag def registers*/
2432 	vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
2433 	err = rvu_alloc_bitmap(&vlan->rsrc);
2434 	if (err)
2435 		return -ENOMEM;
2436 
2437 	/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
2438 	vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
2439 					    sizeof(u16), GFP_KERNEL);
2440 	if (!vlan->entry2pfvf_map)
2441 		goto free_mem;
2442 
2443 	mutex_init(&vlan->rsrc_lock);
2444 	return 0;
2445 
2446 free_mem:
2447 	kfree(vlan->rsrc.bmap);
2448 	return -ENOMEM;
2449 }
2450 
2451 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2452 {
2453 	struct nix_txsch *txsch;
2454 	int err, lvl, schq;
2455 	u64 cfg, reg;
2456 
2457 	/* Get scheduler queue count of each type and alloc
2458 	 * bitmap for each for alloc/free/attach operations.
2459 	 */
2460 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2461 		txsch = &nix_hw->txsch[lvl];
2462 		txsch->lvl = lvl;
2463 		switch (lvl) {
2464 		case NIX_TXSCH_LVL_SMQ:
2465 			reg = NIX_AF_MDQ_CONST;
2466 			break;
2467 		case NIX_TXSCH_LVL_TL4:
2468 			reg = NIX_AF_TL4_CONST;
2469 			break;
2470 		case NIX_TXSCH_LVL_TL3:
2471 			reg = NIX_AF_TL3_CONST;
2472 			break;
2473 		case NIX_TXSCH_LVL_TL2:
2474 			reg = NIX_AF_TL2_CONST;
2475 			break;
2476 		case NIX_TXSCH_LVL_TL1:
2477 			reg = NIX_AF_TL1_CONST;
2478 			break;
2479 		}
2480 		cfg = rvu_read64(rvu, blkaddr, reg);
2481 		txsch->schq.max = cfg & 0xFFFF;
2482 		err = rvu_alloc_bitmap(&txsch->schq);
2483 		if (err)
2484 			return err;
2485 
2486 		/* Allocate memory for scheduler queues to
2487 		 * PF/VF pcifunc mapping info.
2488 		 */
2489 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2490 					       sizeof(u32), GFP_KERNEL);
2491 		if (!txsch->pfvf_map)
2492 			return -ENOMEM;
2493 		for (schq = 0; schq < txsch->schq.max; schq++)
2494 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2495 	}
2496 	return 0;
2497 }
2498 
2499 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2500 				int blkaddr, u32 cfg)
2501 {
2502 	int fmt_idx;
2503 
2504 	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2505 		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2506 			return fmt_idx;
2507 	}
2508 	if (fmt_idx >= nix_hw->mark_format.total)
2509 		return -ERANGE;
2510 
2511 	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2512 	nix_hw->mark_format.cfg[fmt_idx] = cfg;
2513 	nix_hw->mark_format.in_use++;
2514 	return fmt_idx;
2515 }
2516 
2517 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2518 				    int blkaddr)
2519 {
2520 	u64 cfgs[] = {
2521 		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
2522 		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
2523 		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
2524 		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
2525 		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
2526 		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
2527 		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
2528 		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
2529 		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2530 	};
2531 	int i, rc;
2532 	u64 total;
2533 
2534 	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2535 	nix_hw->mark_format.total = (u8)total;
2536 	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2537 					       GFP_KERNEL);
2538 	if (!nix_hw->mark_format.cfg)
2539 		return -ENOMEM;
2540 	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2541 		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2542 		if (rc < 0)
2543 			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2544 				i, rc);
2545 	}
2546 
2547 	return 0;
2548 }
2549 
2550 static void rvu_get_lbk_link_max_frs(struct rvu *rvu,  u16 *max_mtu)
2551 {
2552 	/* CN10K supports LBK FIFO size 72 KB */
2553 	if (rvu->hw->lbk_bufsize == 0x12000)
2554 		*max_mtu = CN10K_LBK_LINK_MAX_FRS;
2555 	else
2556 		*max_mtu = NIC_HW_MAX_FRS;
2557 }
2558 
2559 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2560 {
2561 	/* RPM supports FIFO len 128 KB */
2562 	if (rvu_cgx_get_fifolen(rvu) == 0x20000)
2563 		*max_mtu = CN10K_LMAC_LINK_MAX_FRS;
2564 	else
2565 		*max_mtu = NIC_HW_MAX_FRS;
2566 }
2567 
2568 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
2569 				     struct nix_hw_info *rsp)
2570 {
2571 	u16 pcifunc = req->hdr.pcifunc;
2572 	int blkaddr;
2573 
2574 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2575 	if (blkaddr < 0)
2576 		return NIX_AF_ERR_AF_LF_INVALID;
2577 
2578 	if (is_afvf(pcifunc))
2579 		rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
2580 	else
2581 		rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
2582 
2583 	rsp->min_mtu = NIC_HW_MIN_FRS;
2584 	return 0;
2585 }
2586 
2587 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2588 				   struct msg_rsp *rsp)
2589 {
2590 	u16 pcifunc = req->hdr.pcifunc;
2591 	int i, nixlf, blkaddr, err;
2592 	u64 stats;
2593 
2594 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2595 	if (err)
2596 		return err;
2597 
2598 	/* Get stats count supported by HW */
2599 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2600 
2601 	/* Reset tx stats */
2602 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2603 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2604 
2605 	/* Reset rx stats */
2606 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2607 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2608 
2609 	return 0;
2610 }
2611 
2612 /* Returns the ALG index to be set into NPC_RX_ACTION */
2613 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2614 {
2615 	int i;
2616 
2617 	/* Scan over exiting algo entries to find a match */
2618 	for (i = 0; i < nix_hw->flowkey.in_use; i++)
2619 		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2620 			return i;
2621 
2622 	return -ERANGE;
2623 }
2624 
2625 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2626 {
2627 	int idx, nr_field, key_off, field_marker, keyoff_marker;
2628 	int max_key_off, max_bit_pos, group_member;
2629 	struct nix_rx_flowkey_alg *field;
2630 	struct nix_rx_flowkey_alg tmp;
2631 	u32 key_type, valid_key;
2632 	int l4_key_offset = 0;
2633 
2634 	if (!alg)
2635 		return -EINVAL;
2636 
2637 #define FIELDS_PER_ALG  5
2638 #define MAX_KEY_OFF	40
2639 	/* Clear all fields */
2640 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2641 
2642 	/* Each of the 32 possible flow key algorithm definitions should
2643 	 * fall into above incremental config (except ALG0). Otherwise a
2644 	 * single NPC MCAM entry is not sufficient for supporting RSS.
2645 	 *
2646 	 * If a different definition or combination needed then NPC MCAM
2647 	 * has to be programmed to filter such pkts and it's action should
2648 	 * point to this definition to calculate flowtag or hash.
2649 	 *
2650 	 * The `for loop` goes over _all_ protocol field and the following
2651 	 * variables depicts the state machine forward progress logic.
2652 	 *
2653 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
2654 	 * in field->key_offset update.
2655 	 * field_marker - Enabled when a new field needs to be selected.
2656 	 * group_member - Enabled when protocol is part of a group.
2657 	 */
2658 
2659 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
2660 	nr_field = 0; key_off = 0; field_marker = 1;
2661 	field = &tmp; max_bit_pos = fls(flow_cfg);
2662 	for (idx = 0;
2663 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2664 	     key_off < MAX_KEY_OFF; idx++) {
2665 		key_type = BIT(idx);
2666 		valid_key = flow_cfg & key_type;
2667 		/* Found a field marker, reset the field values */
2668 		if (field_marker)
2669 			memset(&tmp, 0, sizeof(tmp));
2670 
2671 		field_marker = true;
2672 		keyoff_marker = true;
2673 		switch (key_type) {
2674 		case NIX_FLOW_KEY_TYPE_PORT:
2675 			field->sel_chan = true;
2676 			/* This should be set to 1, when SEL_CHAN is set */
2677 			field->bytesm1 = 1;
2678 			break;
2679 		case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
2680 			field->lid = NPC_LID_LC;
2681 			field->hdr_offset = 9; /* offset */
2682 			field->bytesm1 = 0; /* 1 byte */
2683 			field->ltype_match = NPC_LT_LC_IP;
2684 			field->ltype_mask = 0xF;
2685 			break;
2686 		case NIX_FLOW_KEY_TYPE_IPV4:
2687 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2688 			field->lid = NPC_LID_LC;
2689 			field->ltype_match = NPC_LT_LC_IP;
2690 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2691 				field->lid = NPC_LID_LG;
2692 				field->ltype_match = NPC_LT_LG_TU_IP;
2693 			}
2694 			field->hdr_offset = 12; /* SIP offset */
2695 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2696 			field->ltype_mask = 0xF; /* Match only IPv4 */
2697 			keyoff_marker = false;
2698 			break;
2699 		case NIX_FLOW_KEY_TYPE_IPV6:
2700 		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2701 			field->lid = NPC_LID_LC;
2702 			field->ltype_match = NPC_LT_LC_IP6;
2703 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2704 				field->lid = NPC_LID_LG;
2705 				field->ltype_match = NPC_LT_LG_TU_IP6;
2706 			}
2707 			field->hdr_offset = 8; /* SIP offset */
2708 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2709 			field->ltype_mask = 0xF; /* Match only IPv6 */
2710 			break;
2711 		case NIX_FLOW_KEY_TYPE_TCP:
2712 		case NIX_FLOW_KEY_TYPE_UDP:
2713 		case NIX_FLOW_KEY_TYPE_SCTP:
2714 		case NIX_FLOW_KEY_TYPE_INNR_TCP:
2715 		case NIX_FLOW_KEY_TYPE_INNR_UDP:
2716 		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2717 			field->lid = NPC_LID_LD;
2718 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2719 			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2720 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2721 				field->lid = NPC_LID_LH;
2722 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2723 
2724 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2725 			 * so no need to change the ltype_match, just change
2726 			 * the lid for inner protocols
2727 			 */
2728 			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2729 				     (int)NPC_LT_LH_TU_TCP);
2730 			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2731 				     (int)NPC_LT_LH_TU_UDP);
2732 			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2733 				     (int)NPC_LT_LH_TU_SCTP);
2734 
2735 			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2736 			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2737 			    valid_key) {
2738 				field->ltype_match |= NPC_LT_LD_TCP;
2739 				group_member = true;
2740 			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2741 				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2742 				   valid_key) {
2743 				field->ltype_match |= NPC_LT_LD_UDP;
2744 				group_member = true;
2745 			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2746 				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2747 				   valid_key) {
2748 				field->ltype_match |= NPC_LT_LD_SCTP;
2749 				group_member = true;
2750 			}
2751 			field->ltype_mask = ~field->ltype_match;
2752 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2753 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2754 				/* Handle the case where any of the group item
2755 				 * is enabled in the group but not the final one
2756 				 */
2757 				if (group_member) {
2758 					valid_key = true;
2759 					group_member = false;
2760 				}
2761 			} else {
2762 				field_marker = false;
2763 				keyoff_marker = false;
2764 			}
2765 
2766 			/* TCP/UDP/SCTP and ESP/AH falls at same offset so
2767 			 * remember the TCP key offset of 40 byte hash key.
2768 			 */
2769 			if (key_type == NIX_FLOW_KEY_TYPE_TCP)
2770 				l4_key_offset = key_off;
2771 			break;
2772 		case NIX_FLOW_KEY_TYPE_NVGRE:
2773 			field->lid = NPC_LID_LD;
2774 			field->hdr_offset = 4; /* VSID offset */
2775 			field->bytesm1 = 2;
2776 			field->ltype_match = NPC_LT_LD_NVGRE;
2777 			field->ltype_mask = 0xF;
2778 			break;
2779 		case NIX_FLOW_KEY_TYPE_VXLAN:
2780 		case NIX_FLOW_KEY_TYPE_GENEVE:
2781 			field->lid = NPC_LID_LE;
2782 			field->bytesm1 = 2;
2783 			field->hdr_offset = 4;
2784 			field->ltype_mask = 0xF;
2785 			field_marker = false;
2786 			keyoff_marker = false;
2787 
2788 			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2789 				field->ltype_match |= NPC_LT_LE_VXLAN;
2790 				group_member = true;
2791 			}
2792 
2793 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2794 				field->ltype_match |= NPC_LT_LE_GENEVE;
2795 				group_member = true;
2796 			}
2797 
2798 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2799 				if (group_member) {
2800 					field->ltype_mask = ~field->ltype_match;
2801 					field_marker = true;
2802 					keyoff_marker = true;
2803 					valid_key = true;
2804 					group_member = false;
2805 				}
2806 			}
2807 			break;
2808 		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2809 		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2810 			field->lid = NPC_LID_LA;
2811 			field->ltype_match = NPC_LT_LA_ETHER;
2812 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2813 				field->lid = NPC_LID_LF;
2814 				field->ltype_match = NPC_LT_LF_TU_ETHER;
2815 			}
2816 			field->hdr_offset = 0;
2817 			field->bytesm1 = 5; /* DMAC 6 Byte */
2818 			field->ltype_mask = 0xF;
2819 			break;
2820 		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2821 			field->lid = NPC_LID_LC;
2822 			field->hdr_offset = 40; /* IPV6 hdr */
2823 			field->bytesm1 = 0; /* 1 Byte ext hdr*/
2824 			field->ltype_match = NPC_LT_LC_IP6_EXT;
2825 			field->ltype_mask = 0xF;
2826 			break;
2827 		case NIX_FLOW_KEY_TYPE_GTPU:
2828 			field->lid = NPC_LID_LE;
2829 			field->hdr_offset = 4;
2830 			field->bytesm1 = 3; /* 4 bytes TID*/
2831 			field->ltype_match = NPC_LT_LE_GTPU;
2832 			field->ltype_mask = 0xF;
2833 			break;
2834 		case NIX_FLOW_KEY_TYPE_VLAN:
2835 			field->lid = NPC_LID_LB;
2836 			field->hdr_offset = 2; /* Skip TPID (2-bytes) */
2837 			field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
2838 			field->ltype_match = NPC_LT_LB_CTAG;
2839 			field->ltype_mask = 0xF;
2840 			field->fn_mask = 1; /* Mask out the first nibble */
2841 			break;
2842 		case NIX_FLOW_KEY_TYPE_AH:
2843 		case NIX_FLOW_KEY_TYPE_ESP:
2844 			field->hdr_offset = 0;
2845 			field->bytesm1 = 7; /* SPI + sequence number */
2846 			field->ltype_mask = 0xF;
2847 			field->lid = NPC_LID_LE;
2848 			field->ltype_match = NPC_LT_LE_ESP;
2849 			if (key_type == NIX_FLOW_KEY_TYPE_AH) {
2850 				field->lid = NPC_LID_LD;
2851 				field->ltype_match = NPC_LT_LD_AH;
2852 				field->hdr_offset = 4;
2853 				keyoff_marker = false;
2854 			}
2855 			break;
2856 		}
2857 		field->ena = 1;
2858 
2859 		/* Found a valid flow key type */
2860 		if (valid_key) {
2861 			/* Use the key offset of TCP/UDP/SCTP fields
2862 			 * for ESP/AH fields.
2863 			 */
2864 			if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
2865 			    key_type == NIX_FLOW_KEY_TYPE_AH)
2866 				key_off = l4_key_offset;
2867 			field->key_offset = key_off;
2868 			memcpy(&alg[nr_field], field, sizeof(*field));
2869 			max_key_off = max(max_key_off, field->bytesm1 + 1);
2870 
2871 			/* Found a field marker, get the next field */
2872 			if (field_marker)
2873 				nr_field++;
2874 		}
2875 
2876 		/* Found a keyoff marker, update the new key_off */
2877 		if (keyoff_marker) {
2878 			key_off += max_key_off;
2879 			max_key_off = 0;
2880 		}
2881 	}
2882 	/* Processed all the flow key types */
2883 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2884 		return 0;
2885 	else
2886 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
2887 }
2888 
2889 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2890 {
2891 	u64 field[FIELDS_PER_ALG];
2892 	struct nix_hw *hw;
2893 	int fid, rc;
2894 
2895 	hw = get_nix_hw(rvu->hw, blkaddr);
2896 	if (!hw)
2897 		return -EINVAL;
2898 
2899 	/* No room to add new flow hash algoritham */
2900 	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
2901 		return NIX_AF_ERR_RSS_NOSPC_ALGO;
2902 
2903 	/* Generate algo fields for the given flow_cfg */
2904 	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
2905 	if (rc)
2906 		return rc;
2907 
2908 	/* Update ALGX_FIELDX register with generated fields */
2909 	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2910 		rvu_write64(rvu, blkaddr,
2911 			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
2912 							   fid), field[fid]);
2913 
2914 	/* Store the flow_cfg for futher lookup */
2915 	rc = hw->flowkey.in_use;
2916 	hw->flowkey.flowkey[rc] = flow_cfg;
2917 	hw->flowkey.in_use++;
2918 
2919 	return rc;
2920 }
2921 
2922 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
2923 					 struct nix_rss_flowkey_cfg *req,
2924 					 struct nix_rss_flowkey_cfg_rsp *rsp)
2925 {
2926 	u16 pcifunc = req->hdr.pcifunc;
2927 	int alg_idx, nixlf, blkaddr;
2928 	struct nix_hw *nix_hw;
2929 	int err;
2930 
2931 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2932 	if (err)
2933 		return err;
2934 
2935 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2936 	if (!nix_hw)
2937 		return -EINVAL;
2938 
2939 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
2940 	/* Failed to get algo index from the exiting list, reserve new  */
2941 	if (alg_idx < 0) {
2942 		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
2943 						  req->flowkey_cfg);
2944 		if (alg_idx < 0)
2945 			return alg_idx;
2946 	}
2947 	rsp->alg_idx = alg_idx;
2948 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
2949 				       alg_idx, req->mcam_index);
2950 	return 0;
2951 }
2952 
2953 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
2954 {
2955 	u32 flowkey_cfg, minkey_cfg;
2956 	int alg, fid, rc;
2957 
2958 	/* Disable all flow key algx fieldx */
2959 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
2960 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
2961 			rvu_write64(rvu, blkaddr,
2962 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
2963 				    0);
2964 	}
2965 
2966 	/* IPv4/IPv6 SIP/DIPs */
2967 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
2968 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2969 	if (rc < 0)
2970 		return rc;
2971 
2972 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2973 	minkey_cfg = flowkey_cfg;
2974 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
2975 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2976 	if (rc < 0)
2977 		return rc;
2978 
2979 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2980 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
2981 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2982 	if (rc < 0)
2983 		return rc;
2984 
2985 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
2986 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
2987 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2988 	if (rc < 0)
2989 		return rc;
2990 
2991 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
2992 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
2993 			NIX_FLOW_KEY_TYPE_UDP;
2994 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
2995 	if (rc < 0)
2996 		return rc;
2997 
2998 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
2999 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3000 			NIX_FLOW_KEY_TYPE_SCTP;
3001 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3002 	if (rc < 0)
3003 		return rc;
3004 
3005 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3006 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3007 			NIX_FLOW_KEY_TYPE_SCTP;
3008 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3009 	if (rc < 0)
3010 		return rc;
3011 
3012 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3013 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3014 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3015 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3016 	if (rc < 0)
3017 		return rc;
3018 
3019 	return 0;
3020 }
3021 
3022 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3023 				      struct nix_set_mac_addr *req,
3024 				      struct msg_rsp *rsp)
3025 {
3026 	bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3027 	u16 pcifunc = req->hdr.pcifunc;
3028 	int blkaddr, nixlf, err;
3029 	struct rvu_pfvf *pfvf;
3030 
3031 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3032 	if (err)
3033 		return err;
3034 
3035 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3036 
3037 	/* VF can't overwrite admin(PF) changes */
3038 	if (from_vf && pfvf->pf_set_vf_cfg)
3039 		return -EPERM;
3040 
3041 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3042 
3043 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3044 				    pfvf->rx_chan_base, req->mac_addr);
3045 
3046 	return 0;
3047 }
3048 
3049 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3050 				      struct msg_req *req,
3051 				      struct nix_get_mac_addr_rsp *rsp)
3052 {
3053 	u16 pcifunc = req->hdr.pcifunc;
3054 	struct rvu_pfvf *pfvf;
3055 
3056 	if (!is_nixlf_attached(rvu, pcifunc))
3057 		return NIX_AF_ERR_AF_LF_INVALID;
3058 
3059 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3060 
3061 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
3062 
3063 	return 0;
3064 }
3065 
3066 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
3067 				     struct msg_rsp *rsp)
3068 {
3069 	bool allmulti = false, disable_promisc = false;
3070 	u16 pcifunc = req->hdr.pcifunc;
3071 	int blkaddr, nixlf, err;
3072 	struct rvu_pfvf *pfvf;
3073 
3074 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3075 	if (err)
3076 		return err;
3077 
3078 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3079 
3080 	if (req->mode & NIX_RX_MODE_PROMISC)
3081 		allmulti = false;
3082 	else if (req->mode & NIX_RX_MODE_ALLMULTI)
3083 		allmulti = true;
3084 	else
3085 		disable_promisc = true;
3086 
3087 	if (disable_promisc)
3088 		rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
3089 	else
3090 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
3091 					      pfvf->rx_chan_base, allmulti);
3092 	return 0;
3093 }
3094 
3095 static void nix_find_link_frs(struct rvu *rvu,
3096 			      struct nix_frs_cfg *req, u16 pcifunc)
3097 {
3098 	int pf = rvu_get_pf(pcifunc);
3099 	struct rvu_pfvf *pfvf;
3100 	int maxlen, minlen;
3101 	int numvfs, hwvf;
3102 	int vf;
3103 
3104 	/* Update with requester's min/max lengths */
3105 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3106 	pfvf->maxlen = req->maxlen;
3107 	if (req->update_minlen)
3108 		pfvf->minlen = req->minlen;
3109 
3110 	maxlen = req->maxlen;
3111 	minlen = req->update_minlen ? req->minlen : 0;
3112 
3113 	/* Get this PF's numVFs and starting hwvf */
3114 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
3115 
3116 	/* For each VF, compare requested max/minlen */
3117 	for (vf = 0; vf < numvfs; vf++) {
3118 		pfvf =  &rvu->hwvf[hwvf + vf];
3119 		if (pfvf->maxlen > maxlen)
3120 			maxlen = pfvf->maxlen;
3121 		if (req->update_minlen &&
3122 		    pfvf->minlen && pfvf->minlen < minlen)
3123 			minlen = pfvf->minlen;
3124 	}
3125 
3126 	/* Compare requested max/minlen with PF's max/minlen */
3127 	pfvf = &rvu->pf[pf];
3128 	if (pfvf->maxlen > maxlen)
3129 		maxlen = pfvf->maxlen;
3130 	if (req->update_minlen &&
3131 	    pfvf->minlen && pfvf->minlen < minlen)
3132 		minlen = pfvf->minlen;
3133 
3134 	/* Update the request with max/min PF's and it's VF's max/min */
3135 	req->maxlen = maxlen;
3136 	if (req->update_minlen)
3137 		req->minlen = minlen;
3138 }
3139 
3140 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
3141 				    struct msg_rsp *rsp)
3142 {
3143 	struct rvu_hwinfo *hw = rvu->hw;
3144 	u16 pcifunc = req->hdr.pcifunc;
3145 	int pf = rvu_get_pf(pcifunc);
3146 	int blkaddr, schq, link = -1;
3147 	struct nix_txsch *txsch;
3148 	u64 cfg, lmac_fifo_len;
3149 	struct nix_hw *nix_hw;
3150 	u8 cgx = 0, lmac = 0;
3151 	u16 max_mtu;
3152 
3153 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3154 	if (blkaddr < 0)
3155 		return NIX_AF_ERR_AF_LF_INVALID;
3156 
3157 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3158 	if (!nix_hw)
3159 		return -EINVAL;
3160 
3161 	if (is_afvf(pcifunc))
3162 		rvu_get_lbk_link_max_frs(rvu, &max_mtu);
3163 	else
3164 		rvu_get_lmac_link_max_frs(rvu, &max_mtu);
3165 
3166 	if (!req->sdp_link && req->maxlen > max_mtu)
3167 		return NIX_AF_ERR_FRS_INVALID;
3168 
3169 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
3170 		return NIX_AF_ERR_FRS_INVALID;
3171 
3172 	/* Check if requester wants to update SMQ's */
3173 	if (!req->update_smq)
3174 		goto rx_frscfg;
3175 
3176 	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
3177 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
3178 	mutex_lock(&rvu->rsrc_lock);
3179 	for (schq = 0; schq < txsch->schq.max; schq++) {
3180 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
3181 			continue;
3182 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
3183 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
3184 		if (req->update_minlen)
3185 			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
3186 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
3187 	}
3188 	mutex_unlock(&rvu->rsrc_lock);
3189 
3190 rx_frscfg:
3191 	/* Check if config is for SDP link */
3192 	if (req->sdp_link) {
3193 		if (!hw->sdp_links)
3194 			return NIX_AF_ERR_RX_LINK_INVALID;
3195 		link = hw->cgx_links + hw->lbk_links;
3196 		goto linkcfg;
3197 	}
3198 
3199 	/* Check if the request is from CGX mapped RVU PF */
3200 	if (is_pf_cgxmapped(rvu, pf)) {
3201 		/* Get CGX and LMAC to which this PF is mapped and find link */
3202 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
3203 		link = (cgx * hw->lmac_per_cgx) + lmac;
3204 	} else if (pf == 0) {
3205 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
3206 		link = hw->cgx_links;
3207 	}
3208 
3209 	if (link < 0)
3210 		return NIX_AF_ERR_RX_LINK_INVALID;
3211 
3212 	nix_find_link_frs(rvu, req, pcifunc);
3213 
3214 linkcfg:
3215 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
3216 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
3217 	if (req->update_minlen)
3218 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
3219 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
3220 
3221 	if (req->sdp_link || pf == 0)
3222 		return 0;
3223 
3224 	/* Update transmit credits for CGX links */
3225 	lmac_fifo_len =
3226 		rvu_cgx_get_fifolen(rvu) /
3227 		cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3228 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
3229 	cfg &= ~(0xFFFFFULL << 12);
3230 	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
3231 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
3232 	return 0;
3233 }
3234 
3235 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
3236 				    struct msg_rsp *rsp)
3237 {
3238 	int nixlf, blkaddr, err;
3239 	u64 cfg;
3240 
3241 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
3242 	if (err)
3243 		return err;
3244 
3245 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
3246 	/* Set the interface configuration */
3247 	if (req->len_verify & BIT(0))
3248 		cfg |= BIT_ULL(41);
3249 	else
3250 		cfg &= ~BIT_ULL(41);
3251 
3252 	if (req->len_verify & BIT(1))
3253 		cfg |= BIT_ULL(40);
3254 	else
3255 		cfg &= ~BIT_ULL(40);
3256 
3257 	if (req->csum_verify & BIT(0))
3258 		cfg |= BIT_ULL(37);
3259 	else
3260 		cfg &= ~BIT_ULL(37);
3261 
3262 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
3263 
3264 	return 0;
3265 }
3266 
3267 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
3268 {
3269 	/* CN10k supports 72KB FIFO size and max packet size of 64k */
3270 	if (rvu->hw->lbk_bufsize == 0x12000)
3271 		return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
3272 
3273 	return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
3274 }
3275 
3276 static void nix_link_config(struct rvu *rvu, int blkaddr)
3277 {
3278 	struct rvu_hwinfo *hw = rvu->hw;
3279 	int cgx, lmac_cnt, slink, link;
3280 	u16 lbk_max_frs, lmac_max_frs;
3281 	u64 tx_credits;
3282 
3283 	rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
3284 	rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
3285 
3286 	/* Set default min/max packet lengths allowed on NIX Rx links.
3287 	 *
3288 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3289 	 * as undersize and report them to SW as error pkts, hence
3290 	 * setting it to 40 bytes.
3291 	 */
3292 	for (link = 0; link < hw->cgx_links; link++) {
3293 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3294 				((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
3295 	}
3296 
3297 	for (link = hw->cgx_links; link < hw->lbk_links; link++) {
3298 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3299 			    ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
3300 	}
3301 	if (hw->sdp_links) {
3302 		link = hw->cgx_links + hw->lbk_links;
3303 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3304 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3305 	}
3306 
3307 	/* Set credits for Tx links assuming max packet length allowed.
3308 	 * This will be reconfigured based on MTU set for PF/VF.
3309 	 */
3310 	for (cgx = 0; cgx < hw->cgx; cgx++) {
3311 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3312 		tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
3313 			       lmac_max_frs) / 16;
3314 		/* Enable credits and set credit pkt count to max allowed */
3315 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3316 		slink = cgx * hw->lmac_per_cgx;
3317 		for (link = slink; link < (slink + lmac_cnt); link++) {
3318 			rvu_write64(rvu, blkaddr,
3319 				    NIX_AF_TX_LINKX_NORM_CREDIT(link),
3320 				    tx_credits);
3321 		}
3322 	}
3323 
3324 	/* Set Tx credits for LBK link */
3325 	slink = hw->cgx_links;
3326 	for (link = slink; link < (slink + hw->lbk_links); link++) {
3327 		tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
3328 		/* Enable credits and set credit pkt count to max allowed */
3329 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3330 		rvu_write64(rvu, blkaddr,
3331 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3332 	}
3333 }
3334 
3335 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3336 {
3337 	int idx, err;
3338 	u64 status;
3339 
3340 	/* Start X2P bus calibration */
3341 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3342 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3343 	/* Wait for calibration to complete */
3344 	err = rvu_poll_reg(rvu, blkaddr,
3345 			   NIX_AF_STATUS, BIT_ULL(10), false);
3346 	if (err) {
3347 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3348 		return err;
3349 	}
3350 
3351 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3352 	/* Check if CGX devices are ready */
3353 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3354 		/* Skip when cgx port is not available */
3355 		if (!rvu_cgx_pdata(idx, rvu) ||
3356 		    (status & (BIT_ULL(16 + idx))))
3357 			continue;
3358 		dev_err(rvu->dev,
3359 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
3360 		err = -EBUSY;
3361 	}
3362 
3363 	/* Check if LBK is ready */
3364 	if (!(status & BIT_ULL(19))) {
3365 		dev_err(rvu->dev,
3366 			"LBK didn't respond to NIX X2P calibration\n");
3367 		err = -EBUSY;
3368 	}
3369 
3370 	/* Clear 'calibrate_x2p' bit */
3371 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3372 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3373 	if (err || (status & 0x3FFULL))
3374 		dev_err(rvu->dev,
3375 			"NIX X2P calibration failed, status 0x%llx\n", status);
3376 	if (err)
3377 		return err;
3378 	return 0;
3379 }
3380 
3381 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3382 {
3383 	u64 cfg;
3384 	int err;
3385 
3386 	/* Set admin queue endianness */
3387 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3388 #ifdef __BIG_ENDIAN
3389 	cfg |= BIT_ULL(8);
3390 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3391 #else
3392 	cfg &= ~BIT_ULL(8);
3393 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3394 #endif
3395 
3396 	/* Do not bypass NDC cache */
3397 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3398 	cfg &= ~0x3FFEULL;
3399 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3400 	/* Disable caching of SQB aka SQEs */
3401 	cfg |= 0x04ULL;
3402 #endif
3403 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3404 
3405 	/* Result structure can be followed by RQ/SQ/CQ context at
3406 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3407 	 * operation type. Alloc sufficient result memory for all operations.
3408 	 */
3409 	err = rvu_aq_alloc(rvu, &block->aq,
3410 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3411 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3412 	if (err)
3413 		return err;
3414 
3415 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3416 	rvu_write64(rvu, block->addr,
3417 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3418 	return 0;
3419 }
3420 
3421 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
3422 {
3423 	const struct npc_lt_def_cfg *ltdefs;
3424 	struct rvu_hwinfo *hw = rvu->hw;
3425 	int blkaddr = nix_hw->blkaddr;
3426 	struct rvu_block *block;
3427 	int err;
3428 	u64 cfg;
3429 
3430 	block = &hw->block[blkaddr];
3431 
3432 	if (is_rvu_96xx_B0(rvu)) {
3433 		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3434 		 * internal state when conditional clocks are turned off.
3435 		 * Hence enable them.
3436 		 */
3437 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3438 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3439 
3440 		/* Set chan/link to backpressure TL3 instead of TL2 */
3441 		rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3442 
3443 		/* Disable SQ manager's sticky mode operation (set TM6 = 0)
3444 		 * This sticky mode is known to cause SQ stalls when multiple
3445 		 * SQs are mapped to same SMQ and transmitting pkts at a time.
3446 		 */
3447 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3448 		cfg &= ~BIT_ULL(15);
3449 		rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3450 	}
3451 
3452 	ltdefs = rvu->kpu.lt_def;
3453 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
3454 	err = nix_calibrate_x2p(rvu, blkaddr);
3455 	if (err)
3456 		return err;
3457 
3458 	/* Initialize admin queue */
3459 	err = nix_aq_init(rvu, block);
3460 	if (err)
3461 		return err;
3462 
3463 	/* Restore CINT timer delay to HW reset values */
3464 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3465 
3466 	if (is_block_implemented(hw, blkaddr)) {
3467 		err = nix_setup_txschq(rvu, nix_hw, blkaddr);
3468 		if (err)
3469 			return err;
3470 
3471 		err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
3472 		if (err)
3473 			return err;
3474 
3475 		err = nix_setup_mcast(rvu, nix_hw, blkaddr);
3476 		if (err)
3477 			return err;
3478 
3479 		err = nix_setup_txvlan(rvu, nix_hw);
3480 		if (err)
3481 			return err;
3482 
3483 		/* Configure segmentation offload formats */
3484 		nix_setup_lso(rvu, nix_hw, blkaddr);
3485 
3486 		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3487 		 * This helps HW protocol checker to identify headers
3488 		 * and validate length and checksums.
3489 		 */
3490 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3491 			    (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3492 			    ltdefs->rx_ol2.ltype_mask);
3493 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3494 			    (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3495 			    ltdefs->rx_oip4.ltype_mask);
3496 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3497 			    (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3498 			    ltdefs->rx_iip4.ltype_mask);
3499 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3500 			    (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3501 			    ltdefs->rx_oip6.ltype_mask);
3502 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3503 			    (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3504 			    ltdefs->rx_iip6.ltype_mask);
3505 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3506 			    (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3507 			    ltdefs->rx_otcp.ltype_mask);
3508 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3509 			    (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3510 			    ltdefs->rx_itcp.ltype_mask);
3511 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3512 			    (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3513 			    ltdefs->rx_oudp.ltype_mask);
3514 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3515 			    (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3516 			    ltdefs->rx_iudp.ltype_mask);
3517 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3518 			    (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3519 			    ltdefs->rx_osctp.ltype_mask);
3520 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3521 			    (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3522 			    ltdefs->rx_isctp.ltype_mask);
3523 
3524 		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3525 		if (err)
3526 			return err;
3527 
3528 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3529 		nix_link_config(rvu, blkaddr);
3530 
3531 		/* Enable Channel backpressure */
3532 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3533 	}
3534 	return 0;
3535 }
3536 
3537 int rvu_nix_init(struct rvu *rvu)
3538 {
3539 	struct rvu_hwinfo *hw = rvu->hw;
3540 	struct nix_hw *nix_hw;
3541 	int blkaddr = 0, err;
3542 	int i = 0;
3543 
3544 	hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
3545 			       GFP_KERNEL);
3546 	if (!hw->nix)
3547 		return -ENOMEM;
3548 
3549 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3550 	while (blkaddr) {
3551 		nix_hw = &hw->nix[i];
3552 		nix_hw->rvu = rvu;
3553 		nix_hw->blkaddr = blkaddr;
3554 		err = rvu_nix_block_init(rvu, nix_hw);
3555 		if (err)
3556 			return err;
3557 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3558 		i++;
3559 	}
3560 
3561 	return 0;
3562 }
3563 
3564 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
3565 				  struct rvu_block *block)
3566 {
3567 	struct nix_txsch *txsch;
3568 	struct nix_mcast *mcast;
3569 	struct nix_txvlan *vlan;
3570 	struct nix_hw *nix_hw;
3571 	int lvl;
3572 
3573 	rvu_aq_free(rvu, block->aq);
3574 
3575 	if (is_block_implemented(rvu->hw, blkaddr)) {
3576 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
3577 		if (!nix_hw)
3578 			return;
3579 
3580 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3581 			txsch = &nix_hw->txsch[lvl];
3582 			kfree(txsch->schq.bmap);
3583 		}
3584 
3585 		vlan = &nix_hw->txvlan;
3586 		kfree(vlan->rsrc.bmap);
3587 		mutex_destroy(&vlan->rsrc_lock);
3588 		devm_kfree(rvu->dev, vlan->entry2pfvf_map);
3589 
3590 		mcast = &nix_hw->mcast;
3591 		qmem_free(rvu->dev, mcast->mce_ctx);
3592 		qmem_free(rvu->dev, mcast->mcast_buf);
3593 		mutex_destroy(&mcast->mce_lock);
3594 	}
3595 }
3596 
3597 void rvu_nix_freemem(struct rvu *rvu)
3598 {
3599 	struct rvu_hwinfo *hw = rvu->hw;
3600 	struct rvu_block *block;
3601 	int blkaddr = 0;
3602 
3603 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3604 	while (blkaddr) {
3605 		block = &hw->block[blkaddr];
3606 		rvu_nix_block_freemem(rvu, blkaddr, block);
3607 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3608 	}
3609 }
3610 
3611 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3612 				     struct msg_rsp *rsp)
3613 {
3614 	u16 pcifunc = req->hdr.pcifunc;
3615 	int nixlf, err;
3616 
3617 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3618 	if (err)
3619 		return err;
3620 
3621 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3622 
3623 	npc_mcam_enable_flows(rvu, pcifunc);
3624 
3625 	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3626 }
3627 
3628 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3629 				    struct msg_rsp *rsp)
3630 {
3631 	u16 pcifunc = req->hdr.pcifunc;
3632 	int nixlf, err;
3633 
3634 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3635 	if (err)
3636 		return err;
3637 
3638 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
3639 
3640 	npc_mcam_disable_flows(rvu, pcifunc);
3641 
3642 	return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3643 }
3644 
3645 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3646 {
3647 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3648 	struct hwctx_disable_req ctx_req;
3649 	int err;
3650 
3651 	ctx_req.hdr.pcifunc = pcifunc;
3652 
3653 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3654 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3655 	rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
3656 	nix_interface_deinit(rvu, pcifunc, nixlf);
3657 	nix_rx_sync(rvu, blkaddr);
3658 	nix_txschq_free(rvu, pcifunc);
3659 
3660 	rvu_cgx_start_stop_io(rvu, pcifunc, false);
3661 
3662 	if (pfvf->sq_ctx) {
3663 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3664 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3665 		if (err)
3666 			dev_err(rvu->dev, "SQ ctx disable failed\n");
3667 	}
3668 
3669 	if (pfvf->rq_ctx) {
3670 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3671 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3672 		if (err)
3673 			dev_err(rvu->dev, "RQ ctx disable failed\n");
3674 	}
3675 
3676 	if (pfvf->cq_ctx) {
3677 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3678 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3679 		if (err)
3680 			dev_err(rvu->dev, "CQ ctx disable failed\n");
3681 	}
3682 
3683 	nix_ctx_free(rvu, pfvf);
3684 }
3685 
3686 #define NIX_AF_LFX_TX_CFG_PTP_EN	BIT_ULL(32)
3687 
3688 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3689 {
3690 	struct rvu_hwinfo *hw = rvu->hw;
3691 	struct rvu_block *block;
3692 	int blkaddr, pf;
3693 	int nixlf;
3694 	u64 cfg;
3695 
3696 	pf = rvu_get_pf(pcifunc);
3697 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
3698 		return 0;
3699 
3700 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3701 	if (blkaddr < 0)
3702 		return NIX_AF_ERR_AF_LF_INVALID;
3703 
3704 	block = &hw->block[blkaddr];
3705 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3706 	if (nixlf < 0)
3707 		return NIX_AF_ERR_AF_LF_INVALID;
3708 
3709 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3710 
3711 	if (enable)
3712 		cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3713 	else
3714 		cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3715 
3716 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3717 
3718 	return 0;
3719 }
3720 
3721 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
3722 					  struct msg_rsp *rsp)
3723 {
3724 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
3725 }
3726 
3727 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
3728 					   struct msg_rsp *rsp)
3729 {
3730 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
3731 }
3732 
3733 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3734 					struct nix_lso_format_cfg *req,
3735 					struct nix_lso_format_cfg_rsp *rsp)
3736 {
3737 	u16 pcifunc = req->hdr.pcifunc;
3738 	struct nix_hw *nix_hw;
3739 	struct rvu_pfvf *pfvf;
3740 	int blkaddr, idx, f;
3741 	u64 reg;
3742 
3743 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3744 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3745 	if (!pfvf->nixlf || blkaddr < 0)
3746 		return NIX_AF_ERR_AF_LF_INVALID;
3747 
3748 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3749 	if (!nix_hw)
3750 		return -EINVAL;
3751 
3752 	/* Find existing matching LSO format, if any */
3753 	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3754 		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3755 			reg = rvu_read64(rvu, blkaddr,
3756 					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3757 			if (req->fields[f] != (reg & req->field_mask))
3758 				break;
3759 		}
3760 
3761 		if (f == NIX_LSO_FIELD_MAX)
3762 			break;
3763 	}
3764 
3765 	if (idx < nix_hw->lso.in_use) {
3766 		/* Match found */
3767 		rsp->lso_format_idx = idx;
3768 		return 0;
3769 	}
3770 
3771 	if (nix_hw->lso.in_use == nix_hw->lso.total)
3772 		return NIX_AF_ERR_LSO_CFG_FAIL;
3773 
3774 	rsp->lso_format_idx = nix_hw->lso.in_use++;
3775 
3776 	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
3777 		rvu_write64(rvu, blkaddr,
3778 			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
3779 			    req->fields[f]);
3780 
3781 	return 0;
3782 }
3783 
3784 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
3785 {
3786 	bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
3787 
3788 	/* overwrite vf mac address with default_mac */
3789 	if (from_vf)
3790 		ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
3791 }
3792