1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "npc.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 
21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
23 			    int type, int chan_id);
24 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
25 			       int type, bool add);
26 
27 enum mc_tbl_sz {
28 	MC_TBL_SZ_256,
29 	MC_TBL_SZ_512,
30 	MC_TBL_SZ_1K,
31 	MC_TBL_SZ_2K,
32 	MC_TBL_SZ_4K,
33 	MC_TBL_SZ_8K,
34 	MC_TBL_SZ_16K,
35 	MC_TBL_SZ_32K,
36 	MC_TBL_SZ_64K,
37 };
38 
39 enum mc_buf_cnt {
40 	MC_BUF_CNT_8,
41 	MC_BUF_CNT_16,
42 	MC_BUF_CNT_32,
43 	MC_BUF_CNT_64,
44 	MC_BUF_CNT_128,
45 	MC_BUF_CNT_256,
46 	MC_BUF_CNT_512,
47 	MC_BUF_CNT_1024,
48 	MC_BUF_CNT_2048,
49 };
50 
51 enum nix_makr_fmt_indexes {
52 	NIX_MARK_CFG_IP_DSCP_RED,
53 	NIX_MARK_CFG_IP_DSCP_YELLOW,
54 	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
55 	NIX_MARK_CFG_IP_ECN_RED,
56 	NIX_MARK_CFG_IP_ECN_YELLOW,
57 	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
58 	NIX_MARK_CFG_VLAN_DEI_RED,
59 	NIX_MARK_CFG_VLAN_DEI_YELLOW,
60 	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
61 	NIX_MARK_CFG_MAX,
62 };
63 
64 /* For now considering MC resources needed for broadcast
65  * pkt replication only. i.e 256 HWVFs + 12 PFs.
66  */
67 #define MC_TBL_SIZE	MC_TBL_SZ_512
68 #define MC_BUF_CNT	MC_BUF_CNT_128
69 
70 struct mce {
71 	struct hlist_node	node;
72 	u16			pcifunc;
73 };
74 
75 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
76 {
77 	int i = 0;
78 
79 	/*If blkaddr is 0, return the first nix block address*/
80 	if (blkaddr == 0)
81 		return rvu->nix_blkaddr[blkaddr];
82 
83 	while (i + 1 < MAX_NIX_BLKS) {
84 		if (rvu->nix_blkaddr[i] == blkaddr)
85 			return rvu->nix_blkaddr[i + 1];
86 		i++;
87 	}
88 
89 	return 0;
90 }
91 
92 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
93 {
94 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
95 	int blkaddr;
96 
97 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
98 	if (!pfvf->nixlf || blkaddr < 0)
99 		return false;
100 	return true;
101 }
102 
103 int rvu_get_nixlf_count(struct rvu *rvu)
104 {
105 	int blkaddr = 0, max = 0;
106 	struct rvu_block *block;
107 
108 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
109 	while (blkaddr) {
110 		block = &rvu->hw->block[blkaddr];
111 		max += block->lf.max;
112 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
113 	}
114 	return max;
115 }
116 
117 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
118 {
119 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
120 	struct rvu_hwinfo *hw = rvu->hw;
121 	int blkaddr;
122 
123 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
124 	if (!pfvf->nixlf || blkaddr < 0)
125 		return NIX_AF_ERR_AF_LF_INVALID;
126 
127 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
128 	if (*nixlf < 0)
129 		return NIX_AF_ERR_AF_LF_INVALID;
130 
131 	if (nix_blkaddr)
132 		*nix_blkaddr = blkaddr;
133 
134 	return 0;
135 }
136 
137 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
138 			struct nix_hw **nix_hw, int *blkaddr)
139 {
140 	struct rvu_pfvf *pfvf;
141 
142 	pfvf = rvu_get_pfvf(rvu, pcifunc);
143 	*blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
144 	if (!pfvf->nixlf || *blkaddr < 0)
145 		return NIX_AF_ERR_AF_LF_INVALID;
146 
147 	*nix_hw = get_nix_hw(rvu->hw, *blkaddr);
148 	if (!*nix_hw)
149 		return NIX_AF_ERR_INVALID_NIXBLK;
150 	return 0;
151 }
152 
153 static void nix_mce_list_init(struct nix_mce_list *list, int max)
154 {
155 	INIT_HLIST_HEAD(&list->head);
156 	list->count = 0;
157 	list->max = max;
158 }
159 
160 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
161 {
162 	int idx;
163 
164 	if (!mcast)
165 		return 0;
166 
167 	idx = mcast->next_free_mce;
168 	mcast->next_free_mce += count;
169 	return idx;
170 }
171 
172 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
173 {
174 	int nix_blkaddr = 0, i = 0;
175 	struct rvu *rvu = hw->rvu;
176 
177 	nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
178 	while (nix_blkaddr) {
179 		if (blkaddr == nix_blkaddr && hw->nix)
180 			return &hw->nix[i];
181 		nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
182 		i++;
183 	}
184 	return NULL;
185 }
186 
187 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
188 {
189 	int err;
190 
191 	/*Sync all in flight RX packets to LLC/DRAM */
192 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
193 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
194 	if (err)
195 		dev_err(rvu->dev, "NIX RX software sync failed\n");
196 }
197 
198 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
199 			    int lvl, u16 pcifunc, u16 schq)
200 {
201 	struct rvu_hwinfo *hw = rvu->hw;
202 	struct nix_txsch *txsch;
203 	struct nix_hw *nix_hw;
204 	u16 map_func;
205 
206 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
207 	if (!nix_hw)
208 		return false;
209 
210 	txsch = &nix_hw->txsch[lvl];
211 	/* Check out of bounds */
212 	if (schq >= txsch->schq.max)
213 		return false;
214 
215 	mutex_lock(&rvu->rsrc_lock);
216 	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
217 	mutex_unlock(&rvu->rsrc_lock);
218 
219 	/* TLs aggegating traffic are shared across PF and VFs */
220 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
221 		if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
222 			return false;
223 		else
224 			return true;
225 	}
226 
227 	if (map_func != pcifunc)
228 		return false;
229 
230 	return true;
231 }
232 
233 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
234 {
235 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
236 	struct mac_ops *mac_ops;
237 	int pkind, pf, vf, lbkid;
238 	u8 cgx_id, lmac_id;
239 	int err;
240 
241 	pf = rvu_get_pf(pcifunc);
242 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
243 		return 0;
244 
245 	switch (type) {
246 	case NIX_INTF_TYPE_CGX:
247 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
248 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
249 
250 		pkind = rvu_npc_get_pkind(rvu, pf);
251 		if (pkind < 0) {
252 			dev_err(rvu->dev,
253 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
254 			return -EINVAL;
255 		}
256 		pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
257 		pfvf->tx_chan_base = pfvf->rx_chan_base;
258 		pfvf->rx_chan_cnt = 1;
259 		pfvf->tx_chan_cnt = 1;
260 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
261 		rvu_npc_set_pkind(rvu, pkind, pfvf);
262 
263 		mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
264 		/* By default we enable pause frames */
265 		if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
266 			mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
267 								    rvu),
268 						      lmac_id, true, true);
269 		break;
270 	case NIX_INTF_TYPE_LBK:
271 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
272 
273 		/* If NIX1 block is present on the silicon then NIXes are
274 		 * assigned alternatively for lbk interfaces. NIX0 should
275 		 * send packets on lbk link 1 channels and NIX1 should send
276 		 * on lbk link 0 channels for the communication between
277 		 * NIX0 and NIX1.
278 		 */
279 		lbkid = 0;
280 		if (rvu->hw->lbk_links > 1)
281 			lbkid = vf & 0x1 ? 0 : 1;
282 
283 		/* Note that AF's VFs work in pairs and talk over consecutive
284 		 * loopback channels.Therefore if odd number of AF VFs are
285 		 * enabled then the last VF remains with no pair.
286 		 */
287 		pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
288 		pfvf->tx_chan_base = vf & 0x1 ?
289 					rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
290 					rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
291 		pfvf->rx_chan_cnt = 1;
292 		pfvf->tx_chan_cnt = 1;
293 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
294 					      pfvf->rx_chan_base,
295 					      pfvf->rx_chan_cnt);
296 		break;
297 	}
298 
299 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
300 	 * RVU PF/VF's MAC address.
301 	 */
302 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
303 				    pfvf->rx_chan_base, pfvf->mac_addr);
304 
305 	/* Add this PF_FUNC to bcast pkt replication list */
306 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
307 	if (err) {
308 		dev_err(rvu->dev,
309 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
310 			pcifunc);
311 		return err;
312 	}
313 	/* Install MCAM rule matching Ethernet broadcast mac address */
314 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
315 					  nixlf, pfvf->rx_chan_base);
316 
317 	pfvf->maxlen = NIC_HW_MIN_FRS;
318 	pfvf->minlen = NIC_HW_MIN_FRS;
319 
320 	return 0;
321 }
322 
323 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
324 {
325 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
326 	int err;
327 
328 	pfvf->maxlen = 0;
329 	pfvf->minlen = 0;
330 
331 	/* Remove this PF_FUNC from bcast pkt replication list */
332 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
333 	if (err) {
334 		dev_err(rvu->dev,
335 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
336 			pcifunc);
337 	}
338 
339 	/* Free and disable any MCAM entries used by this NIX LF */
340 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
341 }
342 
343 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
344 				    struct nix_bp_cfg_req *req,
345 				    struct msg_rsp *rsp)
346 {
347 	u16 pcifunc = req->hdr.pcifunc;
348 	struct rvu_pfvf *pfvf;
349 	int blkaddr, pf, type;
350 	u16 chan_base, chan;
351 	u64 cfg;
352 
353 	pf = rvu_get_pf(pcifunc);
354 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
355 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
356 		return 0;
357 
358 	pfvf = rvu_get_pfvf(rvu, pcifunc);
359 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
360 
361 	chan_base = pfvf->rx_chan_base + req->chan_base;
362 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
363 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
364 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
365 			    cfg & ~BIT_ULL(16));
366 	}
367 	return 0;
368 }
369 
370 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
371 			    int type, int chan_id)
372 {
373 	int bpid, blkaddr, lmac_chan_cnt;
374 	struct rvu_hwinfo *hw = rvu->hw;
375 	u16 cgx_bpid_cnt, lbk_bpid_cnt;
376 	struct rvu_pfvf *pfvf;
377 	u8 cgx_id, lmac_id;
378 	u64 cfg;
379 
380 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
381 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
382 	lmac_chan_cnt = cfg & 0xFF;
383 
384 	cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
385 	lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
386 
387 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
388 
389 	/* Backpressure IDs range division
390 	 * CGX channles are mapped to (0 - 191) BPIDs
391 	 * LBK channles are mapped to (192 - 255) BPIDs
392 	 * SDP channles are mapped to (256 - 511) BPIDs
393 	 *
394 	 * Lmac channles and bpids mapped as follows
395 	 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
396 	 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
397 	 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
398 	 */
399 	switch (type) {
400 	case NIX_INTF_TYPE_CGX:
401 		if ((req->chan_base + req->chan_cnt) > 15)
402 			return -EINVAL;
403 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
404 		/* Assign bpid based on cgx, lmac and chan id */
405 		bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
406 			(lmac_id * lmac_chan_cnt) + req->chan_base;
407 
408 		if (req->bpid_per_chan)
409 			bpid += chan_id;
410 		if (bpid > cgx_bpid_cnt)
411 			return -EINVAL;
412 		break;
413 
414 	case NIX_INTF_TYPE_LBK:
415 		if ((req->chan_base + req->chan_cnt) > 63)
416 			return -EINVAL;
417 		bpid = cgx_bpid_cnt + req->chan_base;
418 		if (req->bpid_per_chan)
419 			bpid += chan_id;
420 		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
421 			return -EINVAL;
422 		break;
423 	default:
424 		return -EINVAL;
425 	}
426 	return bpid;
427 }
428 
429 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
430 				   struct nix_bp_cfg_req *req,
431 				   struct nix_bp_cfg_rsp *rsp)
432 {
433 	int blkaddr, pf, type, chan_id = 0;
434 	u16 pcifunc = req->hdr.pcifunc;
435 	struct rvu_pfvf *pfvf;
436 	u16 chan_base, chan;
437 	s16 bpid, bpid_base;
438 	u64 cfg;
439 
440 	pf = rvu_get_pf(pcifunc);
441 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
442 
443 	/* Enable backpressure only for CGX mapped PFs and LBK interface */
444 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
445 		return 0;
446 
447 	pfvf = rvu_get_pfvf(rvu, pcifunc);
448 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
449 
450 	bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
451 	chan_base = pfvf->rx_chan_base + req->chan_base;
452 	bpid = bpid_base;
453 
454 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
455 		if (bpid < 0) {
456 			dev_warn(rvu->dev, "Fail to enable backpressure\n");
457 			return -EINVAL;
458 		}
459 
460 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
461 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
462 			    cfg | (bpid & 0xFF) | BIT_ULL(16));
463 		chan_id++;
464 		bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
465 	}
466 
467 	for (chan = 0; chan < req->chan_cnt; chan++) {
468 		/* Map channel and bpid assign to it */
469 		rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
470 					(bpid_base & 0x3FF);
471 		if (req->bpid_per_chan)
472 			bpid_base++;
473 	}
474 	rsp->chan_cnt = req->chan_cnt;
475 
476 	return 0;
477 }
478 
479 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
480 				 u64 format, bool v4, u64 *fidx)
481 {
482 	struct nix_lso_format field = {0};
483 
484 	/* IP's Length field */
485 	field.layer = NIX_TXLAYER_OL3;
486 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
487 	field.offset = v4 ? 2 : 4;
488 	field.sizem1 = 1; /* i.e 2 bytes */
489 	field.alg = NIX_LSOALG_ADD_PAYLEN;
490 	rvu_write64(rvu, blkaddr,
491 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
492 		    *(u64 *)&field);
493 
494 	/* No ID field in IPv6 header */
495 	if (!v4)
496 		return;
497 
498 	/* IP's ID field */
499 	field.layer = NIX_TXLAYER_OL3;
500 	field.offset = 4;
501 	field.sizem1 = 1; /* i.e 2 bytes */
502 	field.alg = NIX_LSOALG_ADD_SEGNUM;
503 	rvu_write64(rvu, blkaddr,
504 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
505 		    *(u64 *)&field);
506 }
507 
508 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
509 				 u64 format, u64 *fidx)
510 {
511 	struct nix_lso_format field = {0};
512 
513 	/* TCP's sequence number field */
514 	field.layer = NIX_TXLAYER_OL4;
515 	field.offset = 4;
516 	field.sizem1 = 3; /* i.e 4 bytes */
517 	field.alg = NIX_LSOALG_ADD_OFFSET;
518 	rvu_write64(rvu, blkaddr,
519 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
520 		    *(u64 *)&field);
521 
522 	/* TCP's flags field */
523 	field.layer = NIX_TXLAYER_OL4;
524 	field.offset = 12;
525 	field.sizem1 = 1; /* 2 bytes */
526 	field.alg = NIX_LSOALG_TCP_FLAGS;
527 	rvu_write64(rvu, blkaddr,
528 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
529 		    *(u64 *)&field);
530 }
531 
532 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
533 {
534 	u64 cfg, idx, fidx = 0;
535 
536 	/* Get max HW supported format indices */
537 	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
538 	nix_hw->lso.total = cfg;
539 
540 	/* Enable LSO */
541 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
542 	/* For TSO, set first and middle segment flags to
543 	 * mask out PSH, RST & FIN flags in TCP packet
544 	 */
545 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
546 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
547 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
548 
549 	/* Setup default static LSO formats
550 	 *
551 	 * Configure format fields for TCPv4 segmentation offload
552 	 */
553 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
554 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
555 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
556 
557 	/* Set rest of the fields to NOP */
558 	for (; fidx < 8; fidx++) {
559 		rvu_write64(rvu, blkaddr,
560 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
561 	}
562 	nix_hw->lso.in_use++;
563 
564 	/* Configure format fields for TCPv6 segmentation offload */
565 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
566 	fidx = 0;
567 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
568 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
569 
570 	/* Set rest of the fields to NOP */
571 	for (; fidx < 8; fidx++) {
572 		rvu_write64(rvu, blkaddr,
573 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
574 	}
575 	nix_hw->lso.in_use++;
576 }
577 
578 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
579 {
580 	kfree(pfvf->rq_bmap);
581 	kfree(pfvf->sq_bmap);
582 	kfree(pfvf->cq_bmap);
583 	if (pfvf->rq_ctx)
584 		qmem_free(rvu->dev, pfvf->rq_ctx);
585 	if (pfvf->sq_ctx)
586 		qmem_free(rvu->dev, pfvf->sq_ctx);
587 	if (pfvf->cq_ctx)
588 		qmem_free(rvu->dev, pfvf->cq_ctx);
589 	if (pfvf->rss_ctx)
590 		qmem_free(rvu->dev, pfvf->rss_ctx);
591 	if (pfvf->nix_qints_ctx)
592 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
593 	if (pfvf->cq_ints_ctx)
594 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
595 
596 	pfvf->rq_bmap = NULL;
597 	pfvf->cq_bmap = NULL;
598 	pfvf->sq_bmap = NULL;
599 	pfvf->rq_ctx = NULL;
600 	pfvf->sq_ctx = NULL;
601 	pfvf->cq_ctx = NULL;
602 	pfvf->rss_ctx = NULL;
603 	pfvf->nix_qints_ctx = NULL;
604 	pfvf->cq_ints_ctx = NULL;
605 }
606 
607 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
608 			      struct rvu_pfvf *pfvf, int nixlf,
609 			      int rss_sz, int rss_grps, int hwctx_size,
610 			      u64 way_mask)
611 {
612 	int err, grp, num_indices;
613 
614 	/* RSS is not requested for this NIXLF */
615 	if (!rss_sz)
616 		return 0;
617 	num_indices = rss_sz * rss_grps;
618 
619 	/* Alloc NIX RSS HW context memory and config the base */
620 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
621 	if (err)
622 		return err;
623 
624 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
625 		    (u64)pfvf->rss_ctx->iova);
626 
627 	/* Config full RSS table size, enable RSS and caching */
628 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
629 		    BIT_ULL(36) | BIT_ULL(4) |
630 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
631 		    way_mask << 20);
632 	/* Config RSS group offset and sizes */
633 	for (grp = 0; grp < rss_grps; grp++)
634 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
635 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
636 	return 0;
637 }
638 
639 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
640 			       struct nix_aq_inst_s *inst)
641 {
642 	struct admin_queue *aq = block->aq;
643 	struct nix_aq_res_s *result;
644 	int timeout = 1000;
645 	u64 reg, head;
646 
647 	result = (struct nix_aq_res_s *)aq->res->base;
648 
649 	/* Get current head pointer where to append this instruction */
650 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
651 	head = (reg >> 4) & AQ_PTR_MASK;
652 
653 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
654 	       (void *)inst, aq->inst->entry_sz);
655 	memset(result, 0, sizeof(*result));
656 	/* sync into memory */
657 	wmb();
658 
659 	/* Ring the doorbell and wait for result */
660 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
661 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
662 		cpu_relax();
663 		udelay(1);
664 		timeout--;
665 		if (!timeout)
666 			return -EBUSY;
667 	}
668 
669 	if (result->compcode != NIX_AQ_COMP_GOOD)
670 		/* TODO: Replace this with some error code */
671 		return -EBUSY;
672 
673 	return 0;
674 }
675 
676 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
677 				   struct nix_aq_enq_req *req,
678 				   struct nix_aq_enq_rsp *rsp)
679 {
680 	struct rvu_hwinfo *hw = rvu->hw;
681 	u16 pcifunc = req->hdr.pcifunc;
682 	int nixlf, blkaddr, rc = 0;
683 	struct nix_aq_inst_s inst;
684 	struct rvu_block *block;
685 	struct admin_queue *aq;
686 	struct rvu_pfvf *pfvf;
687 	void *ctx, *mask;
688 	bool ena;
689 	u64 cfg;
690 
691 	blkaddr = nix_hw->blkaddr;
692 	block = &hw->block[blkaddr];
693 	aq = block->aq;
694 	if (!aq) {
695 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
696 		return NIX_AF_ERR_AQ_ENQUEUE;
697 	}
698 
699 	pfvf = rvu_get_pfvf(rvu, pcifunc);
700 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
701 
702 	/* Skip NIXLF check for broadcast MCE entry init */
703 	if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
704 		if (!pfvf->nixlf || nixlf < 0)
705 			return NIX_AF_ERR_AF_LF_INVALID;
706 	}
707 
708 	switch (req->ctype) {
709 	case NIX_AQ_CTYPE_RQ:
710 		/* Check if index exceeds max no of queues */
711 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
712 			rc = NIX_AF_ERR_AQ_ENQUEUE;
713 		break;
714 	case NIX_AQ_CTYPE_SQ:
715 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
716 			rc = NIX_AF_ERR_AQ_ENQUEUE;
717 		break;
718 	case NIX_AQ_CTYPE_CQ:
719 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
720 			rc = NIX_AF_ERR_AQ_ENQUEUE;
721 		break;
722 	case NIX_AQ_CTYPE_RSS:
723 		/* Check if RSS is enabled and qidx is within range */
724 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
725 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
726 		    (req->qidx >= (256UL << (cfg & 0xF))))
727 			rc = NIX_AF_ERR_AQ_ENQUEUE;
728 		break;
729 	case NIX_AQ_CTYPE_MCE:
730 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
731 
732 		/* Check if index exceeds MCE list length */
733 		if (!nix_hw->mcast.mce_ctx ||
734 		    (req->qidx >= (256UL << (cfg & 0xF))))
735 			rc = NIX_AF_ERR_AQ_ENQUEUE;
736 
737 		/* Adding multicast lists for requests from PF/VFs is not
738 		 * yet supported, so ignore this.
739 		 */
740 		if (rsp)
741 			rc = NIX_AF_ERR_AQ_ENQUEUE;
742 		break;
743 	default:
744 		rc = NIX_AF_ERR_AQ_ENQUEUE;
745 	}
746 
747 	if (rc)
748 		return rc;
749 
750 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
751 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
752 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
753 	     (req->op == NIX_AQ_INSTOP_WRITE &&
754 	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
755 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
756 				     pcifunc, req->sq.smq))
757 			return NIX_AF_ERR_AQ_ENQUEUE;
758 	}
759 
760 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
761 	inst.lf = nixlf;
762 	inst.cindex = req->qidx;
763 	inst.ctype = req->ctype;
764 	inst.op = req->op;
765 	/* Currently we are not supporting enqueuing multiple instructions,
766 	 * so always choose first entry in result memory.
767 	 */
768 	inst.res_addr = (u64)aq->res->iova;
769 
770 	/* Hardware uses same aq->res->base for updating result of
771 	 * previous instruction hence wait here till it is done.
772 	 */
773 	spin_lock(&aq->lock);
774 
775 	/* Clean result + context memory */
776 	memset(aq->res->base, 0, aq->res->entry_sz);
777 	/* Context needs to be written at RES_ADDR + 128 */
778 	ctx = aq->res->base + 128;
779 	/* Mask needs to be written at RES_ADDR + 256 */
780 	mask = aq->res->base + 256;
781 
782 	switch (req->op) {
783 	case NIX_AQ_INSTOP_WRITE:
784 		if (req->ctype == NIX_AQ_CTYPE_RQ)
785 			memcpy(mask, &req->rq_mask,
786 			       sizeof(struct nix_rq_ctx_s));
787 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
788 			memcpy(mask, &req->sq_mask,
789 			       sizeof(struct nix_sq_ctx_s));
790 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
791 			memcpy(mask, &req->cq_mask,
792 			       sizeof(struct nix_cq_ctx_s));
793 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
794 			memcpy(mask, &req->rss_mask,
795 			       sizeof(struct nix_rsse_s));
796 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
797 			memcpy(mask, &req->mce_mask,
798 			       sizeof(struct nix_rx_mce_s));
799 		fallthrough;
800 	case NIX_AQ_INSTOP_INIT:
801 		if (req->ctype == NIX_AQ_CTYPE_RQ)
802 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
803 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
804 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
805 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
806 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
807 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
808 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
809 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
810 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
811 		break;
812 	case NIX_AQ_INSTOP_NOP:
813 	case NIX_AQ_INSTOP_READ:
814 	case NIX_AQ_INSTOP_LOCK:
815 	case NIX_AQ_INSTOP_UNLOCK:
816 		break;
817 	default:
818 		rc = NIX_AF_ERR_AQ_ENQUEUE;
819 		spin_unlock(&aq->lock);
820 		return rc;
821 	}
822 
823 	/* Submit the instruction to AQ */
824 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
825 	if (rc) {
826 		spin_unlock(&aq->lock);
827 		return rc;
828 	}
829 
830 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
831 	if (req->op == NIX_AQ_INSTOP_INIT) {
832 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
833 			__set_bit(req->qidx, pfvf->rq_bmap);
834 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
835 			__set_bit(req->qidx, pfvf->sq_bmap);
836 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
837 			__set_bit(req->qidx, pfvf->cq_bmap);
838 	}
839 
840 	if (req->op == NIX_AQ_INSTOP_WRITE) {
841 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
842 			ena = (req->rq.ena & req->rq_mask.ena) |
843 				(test_bit(req->qidx, pfvf->rq_bmap) &
844 				~req->rq_mask.ena);
845 			if (ena)
846 				__set_bit(req->qidx, pfvf->rq_bmap);
847 			else
848 				__clear_bit(req->qidx, pfvf->rq_bmap);
849 		}
850 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
851 			ena = (req->rq.ena & req->sq_mask.ena) |
852 				(test_bit(req->qidx, pfvf->sq_bmap) &
853 				~req->sq_mask.ena);
854 			if (ena)
855 				__set_bit(req->qidx, pfvf->sq_bmap);
856 			else
857 				__clear_bit(req->qidx, pfvf->sq_bmap);
858 		}
859 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
860 			ena = (req->rq.ena & req->cq_mask.ena) |
861 				(test_bit(req->qidx, pfvf->cq_bmap) &
862 				~req->cq_mask.ena);
863 			if (ena)
864 				__set_bit(req->qidx, pfvf->cq_bmap);
865 			else
866 				__clear_bit(req->qidx, pfvf->cq_bmap);
867 		}
868 	}
869 
870 	if (rsp) {
871 		/* Copy read context into mailbox */
872 		if (req->op == NIX_AQ_INSTOP_READ) {
873 			if (req->ctype == NIX_AQ_CTYPE_RQ)
874 				memcpy(&rsp->rq, ctx,
875 				       sizeof(struct nix_rq_ctx_s));
876 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
877 				memcpy(&rsp->sq, ctx,
878 				       sizeof(struct nix_sq_ctx_s));
879 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
880 				memcpy(&rsp->cq, ctx,
881 				       sizeof(struct nix_cq_ctx_s));
882 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
883 				memcpy(&rsp->rss, ctx,
884 				       sizeof(struct nix_rsse_s));
885 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
886 				memcpy(&rsp->mce, ctx,
887 				       sizeof(struct nix_rx_mce_s));
888 		}
889 	}
890 
891 	spin_unlock(&aq->lock);
892 	return 0;
893 }
894 
895 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
896 			       struct nix_aq_enq_rsp *rsp)
897 {
898 	struct nix_hw *nix_hw;
899 	int blkaddr;
900 
901 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
902 	if (blkaddr < 0)
903 		return NIX_AF_ERR_AF_LF_INVALID;
904 
905 	nix_hw =  get_nix_hw(rvu->hw, blkaddr);
906 	if (!nix_hw)
907 		return -EINVAL;
908 
909 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
910 }
911 
912 static const char *nix_get_ctx_name(int ctype)
913 {
914 	switch (ctype) {
915 	case NIX_AQ_CTYPE_CQ:
916 		return "CQ";
917 	case NIX_AQ_CTYPE_SQ:
918 		return "SQ";
919 	case NIX_AQ_CTYPE_RQ:
920 		return "RQ";
921 	case NIX_AQ_CTYPE_RSS:
922 		return "RSS";
923 	}
924 	return "";
925 }
926 
927 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
928 {
929 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
930 	struct nix_aq_enq_req aq_req;
931 	unsigned long *bmap;
932 	int qidx, q_cnt = 0;
933 	int err = 0, rc;
934 
935 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
936 		return NIX_AF_ERR_AQ_ENQUEUE;
937 
938 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
939 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
940 
941 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
942 		aq_req.cq.ena = 0;
943 		aq_req.cq_mask.ena = 1;
944 		aq_req.cq.bp_ena = 0;
945 		aq_req.cq_mask.bp_ena = 1;
946 		q_cnt = pfvf->cq_ctx->qsize;
947 		bmap = pfvf->cq_bmap;
948 	}
949 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
950 		aq_req.sq.ena = 0;
951 		aq_req.sq_mask.ena = 1;
952 		q_cnt = pfvf->sq_ctx->qsize;
953 		bmap = pfvf->sq_bmap;
954 	}
955 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
956 		aq_req.rq.ena = 0;
957 		aq_req.rq_mask.ena = 1;
958 		q_cnt = pfvf->rq_ctx->qsize;
959 		bmap = pfvf->rq_bmap;
960 	}
961 
962 	aq_req.ctype = req->ctype;
963 	aq_req.op = NIX_AQ_INSTOP_WRITE;
964 
965 	for (qidx = 0; qidx < q_cnt; qidx++) {
966 		if (!test_bit(qidx, bmap))
967 			continue;
968 		aq_req.qidx = qidx;
969 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
970 		if (rc) {
971 			err = rc;
972 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
973 				nix_get_ctx_name(req->ctype), qidx);
974 		}
975 	}
976 
977 	return err;
978 }
979 
980 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
981 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
982 {
983 	struct nix_aq_enq_req lock_ctx_req;
984 	int err;
985 
986 	if (req->op != NIX_AQ_INSTOP_INIT)
987 		return 0;
988 
989 	if (req->ctype == NIX_AQ_CTYPE_MCE ||
990 	    req->ctype == NIX_AQ_CTYPE_DYNO)
991 		return 0;
992 
993 	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
994 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
995 	lock_ctx_req.ctype = req->ctype;
996 	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
997 	lock_ctx_req.qidx = req->qidx;
998 	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
999 	if (err)
1000 		dev_err(rvu->dev,
1001 			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1002 			req->hdr.pcifunc,
1003 			nix_get_ctx_name(req->ctype), req->qidx);
1004 	return err;
1005 }
1006 
1007 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1008 				struct nix_aq_enq_req *req,
1009 				struct nix_aq_enq_rsp *rsp)
1010 {
1011 	int err;
1012 
1013 	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1014 	if (!err)
1015 		err = nix_lf_hwctx_lockdown(rvu, req);
1016 	return err;
1017 }
1018 #else
1019 
1020 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1021 				struct nix_aq_enq_req *req,
1022 				struct nix_aq_enq_rsp *rsp)
1023 {
1024 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
1025 }
1026 #endif
1027 /* CN10K mbox handler */
1028 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1029 				      struct nix_cn10k_aq_enq_req *req,
1030 				      struct nix_cn10k_aq_enq_rsp *rsp)
1031 {
1032 	return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1033 				  (struct nix_aq_enq_rsp *)rsp);
1034 }
1035 
1036 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1037 				       struct hwctx_disable_req *req,
1038 				       struct msg_rsp *rsp)
1039 {
1040 	return nix_lf_hwctx_disable(rvu, req);
1041 }
1042 
1043 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1044 				  struct nix_lf_alloc_req *req,
1045 				  struct nix_lf_alloc_rsp *rsp)
1046 {
1047 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
1048 	struct rvu_hwinfo *hw = rvu->hw;
1049 	u16 pcifunc = req->hdr.pcifunc;
1050 	struct rvu_block *block;
1051 	struct rvu_pfvf *pfvf;
1052 	u64 cfg, ctx_cfg;
1053 	int blkaddr;
1054 
1055 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1056 		return NIX_AF_ERR_PARAM;
1057 
1058 	if (req->way_mask)
1059 		req->way_mask &= 0xFFFF;
1060 
1061 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1062 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1063 	if (!pfvf->nixlf || blkaddr < 0)
1064 		return NIX_AF_ERR_AF_LF_INVALID;
1065 
1066 	block = &hw->block[blkaddr];
1067 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1068 	if (nixlf < 0)
1069 		return NIX_AF_ERR_AF_LF_INVALID;
1070 
1071 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1072 	if (req->npa_func) {
1073 		/* If default, use 'this' NIXLF's PFFUNC */
1074 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1075 			req->npa_func = pcifunc;
1076 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1077 			return NIX_AF_INVAL_NPA_PF_FUNC;
1078 	}
1079 
1080 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1081 	if (req->sso_func) {
1082 		/* If default, use 'this' NIXLF's PFFUNC */
1083 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1084 			req->sso_func = pcifunc;
1085 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1086 			return NIX_AF_INVAL_SSO_PF_FUNC;
1087 	}
1088 
1089 	/* If RSS is being enabled, check if requested config is valid.
1090 	 * RSS table size should be power of two, otherwise
1091 	 * RSS_GRP::OFFSET + adder might go beyond that group or
1092 	 * won't be able to use entire table.
1093 	 */
1094 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1095 			    !is_power_of_2(req->rss_sz)))
1096 		return NIX_AF_ERR_RSS_SIZE_INVALID;
1097 
1098 	if (req->rss_sz &&
1099 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1100 		return NIX_AF_ERR_RSS_GRPS_INVALID;
1101 
1102 	/* Reset this NIX LF */
1103 	err = rvu_lf_reset(rvu, block, nixlf);
1104 	if (err) {
1105 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1106 			block->addr - BLKADDR_NIX0, nixlf);
1107 		return NIX_AF_ERR_LF_RESET;
1108 	}
1109 
1110 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1111 
1112 	/* Alloc NIX RQ HW context memory and config the base */
1113 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1114 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1115 	if (err)
1116 		goto free_mem;
1117 
1118 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1119 	if (!pfvf->rq_bmap)
1120 		goto free_mem;
1121 
1122 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1123 		    (u64)pfvf->rq_ctx->iova);
1124 
1125 	/* Set caching and queue count in HW */
1126 	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1127 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1128 
1129 	/* Alloc NIX SQ HW context memory and config the base */
1130 	hwctx_size = 1UL << (ctx_cfg & 0xF);
1131 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1132 	if (err)
1133 		goto free_mem;
1134 
1135 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1136 	if (!pfvf->sq_bmap)
1137 		goto free_mem;
1138 
1139 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1140 		    (u64)pfvf->sq_ctx->iova);
1141 
1142 	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1143 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1144 
1145 	/* Alloc NIX CQ HW context memory and config the base */
1146 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1147 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1148 	if (err)
1149 		goto free_mem;
1150 
1151 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1152 	if (!pfvf->cq_bmap)
1153 		goto free_mem;
1154 
1155 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1156 		    (u64)pfvf->cq_ctx->iova);
1157 
1158 	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1159 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1160 
1161 	/* Initialize receive side scaling (RSS) */
1162 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1163 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1164 				 req->rss_grps, hwctx_size, req->way_mask);
1165 	if (err)
1166 		goto free_mem;
1167 
1168 	/* Alloc memory for CQINT's HW contexts */
1169 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1170 	qints = (cfg >> 24) & 0xFFF;
1171 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1172 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1173 	if (err)
1174 		goto free_mem;
1175 
1176 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1177 		    (u64)pfvf->cq_ints_ctx->iova);
1178 
1179 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1180 		    BIT_ULL(36) | req->way_mask << 20);
1181 
1182 	/* Alloc memory for QINT's HW contexts */
1183 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1184 	qints = (cfg >> 12) & 0xFFF;
1185 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1186 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1187 	if (err)
1188 		goto free_mem;
1189 
1190 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1191 		    (u64)pfvf->nix_qints_ctx->iova);
1192 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1193 		    BIT_ULL(36) | req->way_mask << 20);
1194 
1195 	/* Setup VLANX TPID's.
1196 	 * Use VLAN1 for 802.1Q
1197 	 * and VLAN0 for 802.1AD.
1198 	 */
1199 	cfg = (0x8100ULL << 16) | 0x88A8ULL;
1200 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1201 
1202 	/* Enable LMTST for this NIX LF */
1203 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1204 
1205 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1206 	if (req->npa_func)
1207 		cfg = req->npa_func;
1208 	if (req->sso_func)
1209 		cfg |= (u64)req->sso_func << 16;
1210 
1211 	cfg |= (u64)req->xqe_sz << 33;
1212 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1213 
1214 	/* Config Rx pkt length, csum checks and apad  enable / disable */
1215 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1216 
1217 	/* Configure pkind for TX parse config */
1218 	cfg = NPC_TX_DEF_PKIND;
1219 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1220 
1221 	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1222 	err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1223 	if (err)
1224 		goto free_mem;
1225 
1226 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
1227 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1228 
1229 	/* Configure RX VTAG Type 7 (strip) for vf vlan */
1230 	rvu_write64(rvu, blkaddr,
1231 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1232 		    VTAGSIZE_T4 | VTAG_STRIP);
1233 
1234 	goto exit;
1235 
1236 free_mem:
1237 	nix_ctx_free(rvu, pfvf);
1238 	rc = -ENOMEM;
1239 
1240 exit:
1241 	/* Set macaddr of this PF/VF */
1242 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1243 
1244 	/* set SQB size info */
1245 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1246 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1247 	rsp->rx_chan_base = pfvf->rx_chan_base;
1248 	rsp->tx_chan_base = pfvf->tx_chan_base;
1249 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1250 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1251 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1252 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1253 	/* Get HW supported stat count */
1254 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1255 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1256 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1257 	/* Get count of CQ IRQs and error IRQs supported per LF */
1258 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1259 	rsp->qints = ((cfg >> 12) & 0xFFF);
1260 	rsp->cints = ((cfg >> 24) & 0xFFF);
1261 	rsp->cgx_links = hw->cgx_links;
1262 	rsp->lbk_links = hw->lbk_links;
1263 	rsp->sdp_links = hw->sdp_links;
1264 
1265 	return rc;
1266 }
1267 
1268 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1269 				 struct msg_rsp *rsp)
1270 {
1271 	struct rvu_hwinfo *hw = rvu->hw;
1272 	u16 pcifunc = req->hdr.pcifunc;
1273 	struct rvu_block *block;
1274 	int blkaddr, nixlf, err;
1275 	struct rvu_pfvf *pfvf;
1276 
1277 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1278 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1279 	if (!pfvf->nixlf || blkaddr < 0)
1280 		return NIX_AF_ERR_AF_LF_INVALID;
1281 
1282 	block = &hw->block[blkaddr];
1283 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1284 	if (nixlf < 0)
1285 		return NIX_AF_ERR_AF_LF_INVALID;
1286 
1287 	if (req->flags & NIX_LF_DISABLE_FLOWS)
1288 		rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1289 	else
1290 		rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1291 
1292 	/* Free any tx vtag def entries used by this NIX LF */
1293 	if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1294 		nix_free_tx_vtag_entries(rvu, pcifunc);
1295 
1296 	nix_interface_deinit(rvu, pcifunc, nixlf);
1297 
1298 	/* Reset this NIX LF */
1299 	err = rvu_lf_reset(rvu, block, nixlf);
1300 	if (err) {
1301 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1302 			block->addr - BLKADDR_NIX0, nixlf);
1303 		return NIX_AF_ERR_LF_RESET;
1304 	}
1305 
1306 	nix_ctx_free(rvu, pfvf);
1307 
1308 	return 0;
1309 }
1310 
1311 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1312 					 struct nix_mark_format_cfg  *req,
1313 					 struct nix_mark_format_cfg_rsp *rsp)
1314 {
1315 	u16 pcifunc = req->hdr.pcifunc;
1316 	struct nix_hw *nix_hw;
1317 	struct rvu_pfvf *pfvf;
1318 	int blkaddr, rc;
1319 	u32 cfg;
1320 
1321 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1322 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1323 	if (!pfvf->nixlf || blkaddr < 0)
1324 		return NIX_AF_ERR_AF_LF_INVALID;
1325 
1326 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1327 	if (!nix_hw)
1328 		return -EINVAL;
1329 
1330 	cfg = (((u32)req->offset & 0x7) << 16) |
1331 	      (((u32)req->y_mask & 0xF) << 12) |
1332 	      (((u32)req->y_val & 0xF) << 8) |
1333 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1334 
1335 	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1336 	if (rc < 0) {
1337 		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1338 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1339 		return NIX_AF_ERR_MARK_CFG_FAIL;
1340 	}
1341 
1342 	rsp->mark_format_idx = rc;
1343 	return 0;
1344 }
1345 
1346 /* Disable shaping of pkts by a scheduler queue
1347  * at a given scheduler level.
1348  */
1349 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1350 				 int lvl, int schq)
1351 {
1352 	u64  cir_reg = 0, pir_reg = 0;
1353 	u64  cfg;
1354 
1355 	switch (lvl) {
1356 	case NIX_TXSCH_LVL_TL1:
1357 		cir_reg = NIX_AF_TL1X_CIR(schq);
1358 		pir_reg = 0; /* PIR not available at TL1 */
1359 		break;
1360 	case NIX_TXSCH_LVL_TL2:
1361 		cir_reg = NIX_AF_TL2X_CIR(schq);
1362 		pir_reg = NIX_AF_TL2X_PIR(schq);
1363 		break;
1364 	case NIX_TXSCH_LVL_TL3:
1365 		cir_reg = NIX_AF_TL3X_CIR(schq);
1366 		pir_reg = NIX_AF_TL3X_PIR(schq);
1367 		break;
1368 	case NIX_TXSCH_LVL_TL4:
1369 		cir_reg = NIX_AF_TL4X_CIR(schq);
1370 		pir_reg = NIX_AF_TL4X_PIR(schq);
1371 		break;
1372 	}
1373 
1374 	if (!cir_reg)
1375 		return;
1376 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
1377 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1378 
1379 	if (!pir_reg)
1380 		return;
1381 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
1382 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1383 }
1384 
1385 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1386 				 int lvl, int schq)
1387 {
1388 	struct rvu_hwinfo *hw = rvu->hw;
1389 	int link;
1390 
1391 	if (lvl >= hw->cap.nix_tx_aggr_lvl)
1392 		return;
1393 
1394 	/* Reset TL4's SDP link config */
1395 	if (lvl == NIX_TXSCH_LVL_TL4)
1396 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1397 
1398 	if (lvl != NIX_TXSCH_LVL_TL2)
1399 		return;
1400 
1401 	/* Reset TL2's CGX or LBK link config */
1402 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1403 		rvu_write64(rvu, blkaddr,
1404 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1405 }
1406 
1407 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1408 {
1409 	struct rvu_hwinfo *hw = rvu->hw;
1410 	int pf = rvu_get_pf(pcifunc);
1411 	u8 cgx_id = 0, lmac_id = 0;
1412 
1413 	if (is_afvf(pcifunc)) {/* LBK links */
1414 		return hw->cgx_links;
1415 	} else if (is_pf_cgxmapped(rvu, pf)) {
1416 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1417 		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1418 	}
1419 
1420 	/* SDP link */
1421 	return hw->cgx_links + hw->lbk_links;
1422 }
1423 
1424 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1425 				 int link, int *start, int *end)
1426 {
1427 	struct rvu_hwinfo *hw = rvu->hw;
1428 	int pf = rvu_get_pf(pcifunc);
1429 
1430 	if (is_afvf(pcifunc)) { /* LBK links */
1431 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1432 		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1433 	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1434 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1435 		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1436 	} else { /* SDP link */
1437 		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1438 			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1439 		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1440 	}
1441 }
1442 
1443 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1444 				      struct nix_hw *nix_hw,
1445 				      struct nix_txsch_alloc_req *req)
1446 {
1447 	struct rvu_hwinfo *hw = rvu->hw;
1448 	int schq, req_schq, free_cnt;
1449 	struct nix_txsch *txsch;
1450 	int link, start, end;
1451 
1452 	txsch = &nix_hw->txsch[lvl];
1453 	req_schq = req->schq_contig[lvl] + req->schq[lvl];
1454 
1455 	if (!req_schq)
1456 		return 0;
1457 
1458 	link = nix_get_tx_link(rvu, pcifunc);
1459 
1460 	/* For traffic aggregating scheduler level, one queue is enough */
1461 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1462 		if (req_schq != 1)
1463 			return NIX_AF_ERR_TLX_ALLOC_FAIL;
1464 		return 0;
1465 	}
1466 
1467 	/* Get free SCHQ count and check if request can be accomodated */
1468 	if (hw->cap.nix_fixed_txschq_mapping) {
1469 		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1470 		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1471 		if (end <= txsch->schq.max && schq < end &&
1472 		    !test_bit(schq, txsch->schq.bmap))
1473 			free_cnt = 1;
1474 		else
1475 			free_cnt = 0;
1476 	} else {
1477 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
1478 	}
1479 
1480 	if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1481 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1482 
1483 	/* If contiguous queues are needed, check for availability */
1484 	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1485 	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1486 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1487 
1488 	return 0;
1489 }
1490 
1491 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1492 			    struct nix_txsch_alloc_rsp *rsp,
1493 			    int lvl, int start, int end)
1494 {
1495 	struct rvu_hwinfo *hw = rvu->hw;
1496 	u16 pcifunc = rsp->hdr.pcifunc;
1497 	int idx, schq;
1498 
1499 	/* For traffic aggregating levels, queue alloc is based
1500 	 * on transmit link to which PF_FUNC is mapped to.
1501 	 */
1502 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1503 		/* A single TL queue is allocated */
1504 		if (rsp->schq_contig[lvl]) {
1505 			rsp->schq_contig[lvl] = 1;
1506 			rsp->schq_contig_list[lvl][0] = start;
1507 		}
1508 
1509 		/* Both contig and non-contig reqs doesn't make sense here */
1510 		if (rsp->schq_contig[lvl])
1511 			rsp->schq[lvl] = 0;
1512 
1513 		if (rsp->schq[lvl]) {
1514 			rsp->schq[lvl] = 1;
1515 			rsp->schq_list[lvl][0] = start;
1516 		}
1517 		return;
1518 	}
1519 
1520 	/* Adjust the queue request count if HW supports
1521 	 * only one queue per level configuration.
1522 	 */
1523 	if (hw->cap.nix_fixed_txschq_mapping) {
1524 		idx = pcifunc & RVU_PFVF_FUNC_MASK;
1525 		schq = start + idx;
1526 		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1527 			rsp->schq_contig[lvl] = 0;
1528 			rsp->schq[lvl] = 0;
1529 			return;
1530 		}
1531 
1532 		if (rsp->schq_contig[lvl]) {
1533 			rsp->schq_contig[lvl] = 1;
1534 			set_bit(schq, txsch->schq.bmap);
1535 			rsp->schq_contig_list[lvl][0] = schq;
1536 			rsp->schq[lvl] = 0;
1537 		} else if (rsp->schq[lvl]) {
1538 			rsp->schq[lvl] = 1;
1539 			set_bit(schq, txsch->schq.bmap);
1540 			rsp->schq_list[lvl][0] = schq;
1541 		}
1542 		return;
1543 	}
1544 
1545 	/* Allocate contiguous queue indices requesty first */
1546 	if (rsp->schq_contig[lvl]) {
1547 		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1548 						  txsch->schq.max, start,
1549 						  rsp->schq_contig[lvl], 0);
1550 		if (schq >= end)
1551 			rsp->schq_contig[lvl] = 0;
1552 		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1553 			set_bit(schq, txsch->schq.bmap);
1554 			rsp->schq_contig_list[lvl][idx] = schq;
1555 			schq++;
1556 		}
1557 	}
1558 
1559 	/* Allocate non-contiguous queue indices */
1560 	if (rsp->schq[lvl]) {
1561 		idx = 0;
1562 		for (schq = start; schq < end; schq++) {
1563 			if (!test_bit(schq, txsch->schq.bmap)) {
1564 				set_bit(schq, txsch->schq.bmap);
1565 				rsp->schq_list[lvl][idx++] = schq;
1566 			}
1567 			if (idx == rsp->schq[lvl])
1568 				break;
1569 		}
1570 		/* Update how many were allocated */
1571 		rsp->schq[lvl] = idx;
1572 	}
1573 }
1574 
1575 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1576 				     struct nix_txsch_alloc_req *req,
1577 				     struct nix_txsch_alloc_rsp *rsp)
1578 {
1579 	struct rvu_hwinfo *hw = rvu->hw;
1580 	u16 pcifunc = req->hdr.pcifunc;
1581 	int link, blkaddr, rc = 0;
1582 	int lvl, idx, start, end;
1583 	struct nix_txsch *txsch;
1584 	struct rvu_pfvf *pfvf;
1585 	struct nix_hw *nix_hw;
1586 	u32 *pfvf_map;
1587 	u16 schq;
1588 
1589 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1590 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1591 	if (!pfvf->nixlf || blkaddr < 0)
1592 		return NIX_AF_ERR_AF_LF_INVALID;
1593 
1594 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1595 	if (!nix_hw)
1596 		return -EINVAL;
1597 
1598 	mutex_lock(&rvu->rsrc_lock);
1599 
1600 	/* Check if request is valid as per HW capabilities
1601 	 * and can be accomodated.
1602 	 */
1603 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1604 		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1605 		if (rc)
1606 			goto err;
1607 	}
1608 
1609 	/* Allocate requested Tx scheduler queues */
1610 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1611 		txsch = &nix_hw->txsch[lvl];
1612 		pfvf_map = txsch->pfvf_map;
1613 
1614 		if (!req->schq[lvl] && !req->schq_contig[lvl])
1615 			continue;
1616 
1617 		rsp->schq[lvl] = req->schq[lvl];
1618 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
1619 
1620 		link = nix_get_tx_link(rvu, pcifunc);
1621 
1622 		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1623 			start = link;
1624 			end = link;
1625 		} else if (hw->cap.nix_fixed_txschq_mapping) {
1626 			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1627 		} else {
1628 			start = 0;
1629 			end = txsch->schq.max;
1630 		}
1631 
1632 		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1633 
1634 		/* Reset queue config */
1635 		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1636 			schq = rsp->schq_contig_list[lvl][idx];
1637 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1638 			    NIX_TXSCHQ_CFG_DONE))
1639 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1640 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1641 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1642 		}
1643 
1644 		for (idx = 0; idx < req->schq[lvl]; idx++) {
1645 			schq = rsp->schq_list[lvl][idx];
1646 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1647 			    NIX_TXSCHQ_CFG_DONE))
1648 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1649 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1650 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1651 		}
1652 	}
1653 
1654 	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1655 	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1656 	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1657 				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1658 				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1659 	goto exit;
1660 err:
1661 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1662 exit:
1663 	mutex_unlock(&rvu->rsrc_lock);
1664 	return rc;
1665 }
1666 
1667 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1668 			  int smq, u16 pcifunc, int nixlf)
1669 {
1670 	int pf = rvu_get_pf(pcifunc);
1671 	u8 cgx_id = 0, lmac_id = 0;
1672 	int err, restore_tx_en = 0;
1673 	u64 cfg;
1674 
1675 	/* enable cgx tx if disabled */
1676 	if (is_pf_cgxmapped(rvu, pf)) {
1677 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1678 		restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1679 						    lmac_id, true);
1680 	}
1681 
1682 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1683 	/* Do SMQ flush and set enqueue xoff */
1684 	cfg |= BIT_ULL(50) | BIT_ULL(49);
1685 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1686 
1687 	/* Disable backpressure from physical link,
1688 	 * otherwise SMQ flush may stall.
1689 	 */
1690 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
1691 
1692 	/* Wait for flush to complete */
1693 	err = rvu_poll_reg(rvu, blkaddr,
1694 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1695 	if (err)
1696 		dev_err(rvu->dev,
1697 			"NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1698 
1699 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
1700 	/* restore cgx tx state */
1701 	if (restore_tx_en)
1702 		cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1703 }
1704 
1705 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1706 {
1707 	int blkaddr, nixlf, lvl, schq, err;
1708 	struct rvu_hwinfo *hw = rvu->hw;
1709 	struct nix_txsch *txsch;
1710 	struct nix_hw *nix_hw;
1711 
1712 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1713 	if (blkaddr < 0)
1714 		return NIX_AF_ERR_AF_LF_INVALID;
1715 
1716 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1717 	if (!nix_hw)
1718 		return -EINVAL;
1719 
1720 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1721 	if (nixlf < 0)
1722 		return NIX_AF_ERR_AF_LF_INVALID;
1723 
1724 	/* Disable TL2/3 queue links before SMQ flush*/
1725 	mutex_lock(&rvu->rsrc_lock);
1726 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1727 		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1728 			continue;
1729 
1730 		txsch = &nix_hw->txsch[lvl];
1731 		for (schq = 0; schq < txsch->schq.max; schq++) {
1732 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1733 				continue;
1734 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1735 		}
1736 	}
1737 
1738 	/* Flush SMQs */
1739 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1740 	for (schq = 0; schq < txsch->schq.max; schq++) {
1741 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1742 			continue;
1743 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1744 	}
1745 
1746 	/* Now free scheduler queues to free pool */
1747 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1748 		 /* TLs above aggregation level are shared across all PF
1749 		  * and it's VFs, hence skip freeing them.
1750 		  */
1751 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
1752 			continue;
1753 
1754 		txsch = &nix_hw->txsch[lvl];
1755 		for (schq = 0; schq < txsch->schq.max; schq++) {
1756 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1757 				continue;
1758 			rvu_free_rsrc(&txsch->schq, schq);
1759 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1760 		}
1761 	}
1762 	mutex_unlock(&rvu->rsrc_lock);
1763 
1764 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1765 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1766 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1767 	if (err)
1768 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1769 
1770 	return 0;
1771 }
1772 
1773 static int nix_txschq_free_one(struct rvu *rvu,
1774 			       struct nix_txsch_free_req *req)
1775 {
1776 	struct rvu_hwinfo *hw = rvu->hw;
1777 	u16 pcifunc = req->hdr.pcifunc;
1778 	int lvl, schq, nixlf, blkaddr;
1779 	struct nix_txsch *txsch;
1780 	struct nix_hw *nix_hw;
1781 	u32 *pfvf_map;
1782 
1783 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1784 	if (blkaddr < 0)
1785 		return NIX_AF_ERR_AF_LF_INVALID;
1786 
1787 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1788 	if (!nix_hw)
1789 		return -EINVAL;
1790 
1791 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1792 	if (nixlf < 0)
1793 		return NIX_AF_ERR_AF_LF_INVALID;
1794 
1795 	lvl = req->schq_lvl;
1796 	schq = req->schq;
1797 	txsch = &nix_hw->txsch[lvl];
1798 
1799 	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1800 		return 0;
1801 
1802 	pfvf_map = txsch->pfvf_map;
1803 	mutex_lock(&rvu->rsrc_lock);
1804 
1805 	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1806 		mutex_unlock(&rvu->rsrc_lock);
1807 		goto err;
1808 	}
1809 
1810 	/* Flush if it is a SMQ. Onus of disabling
1811 	 * TL2/3 queue links before SMQ flush is on user
1812 	 */
1813 	if (lvl == NIX_TXSCH_LVL_SMQ)
1814 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1815 
1816 	/* Free the resource */
1817 	rvu_free_rsrc(&txsch->schq, schq);
1818 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1819 	mutex_unlock(&rvu->rsrc_lock);
1820 	return 0;
1821 err:
1822 	return NIX_AF_ERR_TLX_INVALID;
1823 }
1824 
1825 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1826 				    struct nix_txsch_free_req *req,
1827 				    struct msg_rsp *rsp)
1828 {
1829 	if (req->flags & TXSCHQ_FREE_ALL)
1830 		return nix_txschq_free(rvu, req->hdr.pcifunc);
1831 	else
1832 		return nix_txschq_free_one(rvu, req);
1833 }
1834 
1835 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1836 				      int lvl, u64 reg, u64 regval)
1837 {
1838 	u64 regbase = reg & 0xFFFF;
1839 	u16 schq, parent;
1840 
1841 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1842 		return false;
1843 
1844 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1845 	/* Check if this schq belongs to this PF/VF or not */
1846 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1847 		return false;
1848 
1849 	parent = (regval >> 16) & 0x1FF;
1850 	/* Validate MDQ's TL4 parent */
1851 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
1852 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1853 		return false;
1854 
1855 	/* Validate TL4's TL3 parent */
1856 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
1857 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1858 		return false;
1859 
1860 	/* Validate TL3's TL2 parent */
1861 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
1862 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1863 		return false;
1864 
1865 	/* Validate TL2's TL1 parent */
1866 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
1867 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1868 		return false;
1869 
1870 	return true;
1871 }
1872 
1873 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1874 {
1875 	u64 regbase;
1876 
1877 	if (hw->cap.nix_shaping)
1878 		return true;
1879 
1880 	/* If shaping and coloring is not supported, then
1881 	 * *_CIR and *_PIR registers should not be configured.
1882 	 */
1883 	regbase = reg & 0xFFFF;
1884 
1885 	switch (lvl) {
1886 	case NIX_TXSCH_LVL_TL1:
1887 		if (regbase == NIX_AF_TL1X_CIR(0))
1888 			return false;
1889 		break;
1890 	case NIX_TXSCH_LVL_TL2:
1891 		if (regbase == NIX_AF_TL2X_CIR(0) ||
1892 		    regbase == NIX_AF_TL2X_PIR(0))
1893 			return false;
1894 		break;
1895 	case NIX_TXSCH_LVL_TL3:
1896 		if (regbase == NIX_AF_TL3X_CIR(0) ||
1897 		    regbase == NIX_AF_TL3X_PIR(0))
1898 			return false;
1899 		break;
1900 	case NIX_TXSCH_LVL_TL4:
1901 		if (regbase == NIX_AF_TL4X_CIR(0) ||
1902 		    regbase == NIX_AF_TL4X_PIR(0))
1903 			return false;
1904 		break;
1905 	}
1906 	return true;
1907 }
1908 
1909 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1910 				u16 pcifunc, int blkaddr)
1911 {
1912 	u32 *pfvf_map;
1913 	int schq;
1914 
1915 	schq = nix_get_tx_link(rvu, pcifunc);
1916 	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1917 	/* Skip if PF has already done the config */
1918 	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1919 		return;
1920 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1921 		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
1922 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1923 		    TXSCH_TL1_DFLT_RR_QTM);
1924 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1925 	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1926 }
1927 
1928 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1929 				    struct nix_txschq_config *req,
1930 				    struct msg_rsp *rsp)
1931 {
1932 	struct rvu_hwinfo *hw = rvu->hw;
1933 	u16 pcifunc = req->hdr.pcifunc;
1934 	u64 reg, regval, schq_regbase;
1935 	struct nix_txsch *txsch;
1936 	struct nix_hw *nix_hw;
1937 	int blkaddr, idx, err;
1938 	int nixlf, schq;
1939 	u32 *pfvf_map;
1940 
1941 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1942 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
1943 		return NIX_AF_INVAL_TXSCHQ_CFG;
1944 
1945 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1946 	if (err)
1947 		return err;
1948 
1949 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1950 	if (!nix_hw)
1951 		return -EINVAL;
1952 
1953 	txsch = &nix_hw->txsch[req->lvl];
1954 	pfvf_map = txsch->pfvf_map;
1955 
1956 	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1957 	    pcifunc & RVU_PFVF_FUNC_MASK) {
1958 		mutex_lock(&rvu->rsrc_lock);
1959 		if (req->lvl == NIX_TXSCH_LVL_TL1)
1960 			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1961 		mutex_unlock(&rvu->rsrc_lock);
1962 		return 0;
1963 	}
1964 
1965 	for (idx = 0; idx < req->num_regs; idx++) {
1966 		reg = req->reg[idx];
1967 		regval = req->regval[idx];
1968 		schq_regbase = reg & 0xFFFF;
1969 
1970 		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1971 					       txsch->lvl, reg, regval))
1972 			return NIX_AF_INVAL_TXSCHQ_CFG;
1973 
1974 		/* Check if shaping and coloring is supported */
1975 		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
1976 			continue;
1977 
1978 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1979 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1980 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1981 					   pcifunc, 0);
1982 			regval &= ~(0x7FULL << 24);
1983 			regval |= ((u64)nixlf << 24);
1984 		}
1985 
1986 		/* Clear 'BP_ENA' config, if it's not allowed */
1987 		if (!hw->cap.nix_tx_link_bp) {
1988 			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
1989 			    (schq_regbase & 0xFF00) ==
1990 			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
1991 				regval &= ~BIT_ULL(13);
1992 		}
1993 
1994 		/* Mark config as done for TL1 by PF */
1995 		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
1996 		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
1997 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1998 			mutex_lock(&rvu->rsrc_lock);
1999 			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2000 							NIX_TXSCHQ_CFG_DONE);
2001 			mutex_unlock(&rvu->rsrc_lock);
2002 		}
2003 
2004 		/* SMQ flush is special hence split register writes such
2005 		 * that flush first and write rest of the bits later.
2006 		 */
2007 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2008 		    (regval & BIT_ULL(49))) {
2009 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2010 			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2011 			regval &= ~BIT_ULL(49);
2012 		}
2013 		rvu_write64(rvu, blkaddr, reg, regval);
2014 	}
2015 
2016 	return 0;
2017 }
2018 
2019 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2020 			   struct nix_vtag_config *req)
2021 {
2022 	u64 regval = req->vtag_size;
2023 
2024 	if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2025 	    req->vtag_size > VTAGSIZE_T8)
2026 		return -EINVAL;
2027 
2028 	/* RX VTAG Type 7 reserved for vf vlan */
2029 	if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2030 		return NIX_AF_ERR_RX_VTAG_INUSE;
2031 
2032 	if (req->rx.capture_vtag)
2033 		regval |= BIT_ULL(5);
2034 	if (req->rx.strip_vtag)
2035 		regval |= BIT_ULL(4);
2036 
2037 	rvu_write64(rvu, blkaddr,
2038 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2039 	return 0;
2040 }
2041 
2042 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2043 			    u16 pcifunc, int index)
2044 {
2045 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2046 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2047 
2048 	if (vlan->entry2pfvf_map[index] != pcifunc)
2049 		return NIX_AF_ERR_PARAM;
2050 
2051 	rvu_write64(rvu, blkaddr,
2052 		    NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2053 	rvu_write64(rvu, blkaddr,
2054 		    NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2055 
2056 	vlan->entry2pfvf_map[index] = 0;
2057 	rvu_free_rsrc(&vlan->rsrc, index);
2058 
2059 	return 0;
2060 }
2061 
2062 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2063 {
2064 	struct nix_txvlan *vlan;
2065 	struct nix_hw *nix_hw;
2066 	int index, blkaddr;
2067 
2068 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2069 	if (blkaddr < 0)
2070 		return;
2071 
2072 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2073 	vlan = &nix_hw->txvlan;
2074 
2075 	mutex_lock(&vlan->rsrc_lock);
2076 	/* Scan all the entries and free the ones mapped to 'pcifunc' */
2077 	for (index = 0; index < vlan->rsrc.max; index++) {
2078 		if (vlan->entry2pfvf_map[index] == pcifunc)
2079 			nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2080 	}
2081 	mutex_unlock(&vlan->rsrc_lock);
2082 }
2083 
2084 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2085 			     u64 vtag, u8 size)
2086 {
2087 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2088 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2089 	u64 regval;
2090 	int index;
2091 
2092 	mutex_lock(&vlan->rsrc_lock);
2093 
2094 	index = rvu_alloc_rsrc(&vlan->rsrc);
2095 	if (index < 0) {
2096 		mutex_unlock(&vlan->rsrc_lock);
2097 		return index;
2098 	}
2099 
2100 	mutex_unlock(&vlan->rsrc_lock);
2101 
2102 	regval = size ? vtag : vtag << 32;
2103 
2104 	rvu_write64(rvu, blkaddr,
2105 		    NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2106 	rvu_write64(rvu, blkaddr,
2107 		    NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2108 
2109 	return index;
2110 }
2111 
2112 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2113 			     struct nix_vtag_config *req)
2114 {
2115 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2116 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2117 	u16 pcifunc = req->hdr.pcifunc;
2118 	int idx0 = req->tx.vtag0_idx;
2119 	int idx1 = req->tx.vtag1_idx;
2120 	int err = 0;
2121 
2122 	if (req->tx.free_vtag0 && req->tx.free_vtag1)
2123 		if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2124 		    vlan->entry2pfvf_map[idx1] != pcifunc)
2125 			return NIX_AF_ERR_PARAM;
2126 
2127 	mutex_lock(&vlan->rsrc_lock);
2128 
2129 	if (req->tx.free_vtag0) {
2130 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2131 		if (err)
2132 			goto exit;
2133 	}
2134 
2135 	if (req->tx.free_vtag1)
2136 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2137 
2138 exit:
2139 	mutex_unlock(&vlan->rsrc_lock);
2140 	return err;
2141 }
2142 
2143 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2144 			   struct nix_vtag_config *req,
2145 			   struct nix_vtag_config_rsp *rsp)
2146 {
2147 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2148 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2149 	u16 pcifunc = req->hdr.pcifunc;
2150 
2151 	if (req->tx.cfg_vtag0) {
2152 		rsp->vtag0_idx =
2153 			nix_tx_vtag_alloc(rvu, blkaddr,
2154 					  req->tx.vtag0, req->vtag_size);
2155 
2156 		if (rsp->vtag0_idx < 0)
2157 			return NIX_AF_ERR_TX_VTAG_NOSPC;
2158 
2159 		vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2160 	}
2161 
2162 	if (req->tx.cfg_vtag1) {
2163 		rsp->vtag1_idx =
2164 			nix_tx_vtag_alloc(rvu, blkaddr,
2165 					  req->tx.vtag1, req->vtag_size);
2166 
2167 		if (rsp->vtag1_idx < 0)
2168 			goto err_free;
2169 
2170 		vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2171 	}
2172 
2173 	return 0;
2174 
2175 err_free:
2176 	if (req->tx.cfg_vtag0)
2177 		nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2178 
2179 	return NIX_AF_ERR_TX_VTAG_NOSPC;
2180 }
2181 
2182 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2183 				  struct nix_vtag_config *req,
2184 				  struct nix_vtag_config_rsp *rsp)
2185 {
2186 	u16 pcifunc = req->hdr.pcifunc;
2187 	int blkaddr, nixlf, err;
2188 
2189 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2190 	if (err)
2191 		return err;
2192 
2193 	if (req->cfg_type) {
2194 		/* rx vtag configuration */
2195 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2196 		if (err)
2197 			return NIX_AF_ERR_PARAM;
2198 	} else {
2199 		/* tx vtag configuration */
2200 		if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2201 		    (req->tx.free_vtag0 || req->tx.free_vtag1))
2202 			return NIX_AF_ERR_PARAM;
2203 
2204 		if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2205 			return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2206 
2207 		if (req->tx.free_vtag0 || req->tx.free_vtag1)
2208 			return nix_tx_vtag_decfg(rvu, blkaddr, req);
2209 	}
2210 
2211 	return 0;
2212 }
2213 
2214 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2215 			     int mce, u8 op, u16 pcifunc, int next, bool eol)
2216 {
2217 	struct nix_aq_enq_req aq_req;
2218 	int err;
2219 
2220 	aq_req.hdr.pcifunc = 0;
2221 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
2222 	aq_req.op = op;
2223 	aq_req.qidx = mce;
2224 
2225 	/* Use RSS with RSS index 0 */
2226 	aq_req.mce.op = 1;
2227 	aq_req.mce.index = 0;
2228 	aq_req.mce.eol = eol;
2229 	aq_req.mce.pf_func = pcifunc;
2230 	aq_req.mce.next = next;
2231 
2232 	/* All fields valid */
2233 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
2234 
2235 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2236 	if (err) {
2237 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2238 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2239 		return err;
2240 	}
2241 	return 0;
2242 }
2243 
2244 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2245 				     u16 pcifunc, bool add)
2246 {
2247 	struct mce *mce, *tail = NULL;
2248 	bool delete = false;
2249 
2250 	/* Scan through the current list */
2251 	hlist_for_each_entry(mce, &mce_list->head, node) {
2252 		/* If already exists, then delete */
2253 		if (mce->pcifunc == pcifunc && !add) {
2254 			delete = true;
2255 			break;
2256 		} else if (mce->pcifunc == pcifunc && add) {
2257 			/* entry already exists */
2258 			return 0;
2259 		}
2260 		tail = mce;
2261 	}
2262 
2263 	if (delete) {
2264 		hlist_del(&mce->node);
2265 		kfree(mce);
2266 		mce_list->count--;
2267 		return 0;
2268 	}
2269 
2270 	if (!add)
2271 		return 0;
2272 
2273 	/* Add a new one to the list, at the tail */
2274 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2275 	if (!mce)
2276 		return -ENOMEM;
2277 	mce->pcifunc = pcifunc;
2278 	if (!tail)
2279 		hlist_add_head(&mce->node, &mce_list->head);
2280 	else
2281 		hlist_add_behind(&mce->node, &tail->node);
2282 	mce_list->count++;
2283 	return 0;
2284 }
2285 
2286 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
2287 			struct nix_mce_list *mce_list,
2288 			int mce_idx, int mcam_index, bool add)
2289 {
2290 	int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
2291 	struct npc_mcam *mcam = &rvu->hw->mcam;
2292 	struct nix_mcast *mcast;
2293 	struct nix_hw *nix_hw;
2294 	struct mce *mce;
2295 
2296 	if (!mce_list)
2297 		return -EINVAL;
2298 
2299 	/* Get this PF/VF func's MCE index */
2300 	idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2301 
2302 	if (idx > (mce_idx + mce_list->max)) {
2303 		dev_err(rvu->dev,
2304 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2305 			__func__, idx, mce_list->max,
2306 			pcifunc >> RVU_PFVF_PF_SHIFT);
2307 		return -EINVAL;
2308 	}
2309 
2310 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
2311 	if (err)
2312 		return err;
2313 
2314 	mcast = &nix_hw->mcast;
2315 	mutex_lock(&mcast->mce_lock);
2316 
2317 	err = nix_update_mce_list_entry(mce_list, pcifunc, add);
2318 	if (err)
2319 		goto end;
2320 
2321 	/* Disable MCAM entry in NPC */
2322 	if (!mce_list->count) {
2323 		npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2324 		npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
2325 		goto end;
2326 	}
2327 
2328 	/* Dump the updated list to HW */
2329 	idx = mce_idx;
2330 	last_idx = idx + mce_list->count - 1;
2331 	hlist_for_each_entry(mce, &mce_list->head, node) {
2332 		if (idx > last_idx)
2333 			break;
2334 
2335 		next_idx = idx + 1;
2336 		/* EOL should be set in last MCE */
2337 		err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2338 					mce->pcifunc, next_idx,
2339 					(next_idx > last_idx) ? true : false);
2340 		if (err)
2341 			goto end;
2342 		idx++;
2343 	}
2344 
2345 end:
2346 	mutex_unlock(&mcast->mce_lock);
2347 	return err;
2348 }
2349 
2350 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
2351 		      struct nix_mce_list **mce_list, int *mce_idx)
2352 {
2353 	struct rvu_hwinfo *hw = rvu->hw;
2354 	struct rvu_pfvf *pfvf;
2355 
2356 	if (!hw->cap.nix_rx_multicast ||
2357 	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
2358 		*mce_list = NULL;
2359 		*mce_idx = 0;
2360 		return;
2361 	}
2362 
2363 	/* Get this PF/VF func's MCE index */
2364 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2365 
2366 	if (type == NIXLF_BCAST_ENTRY) {
2367 		*mce_list = &pfvf->bcast_mce_list;
2368 		*mce_idx = pfvf->bcast_mce_idx;
2369 	} else if (type == NIXLF_ALLMULTI_ENTRY) {
2370 		*mce_list = &pfvf->mcast_mce_list;
2371 		*mce_idx = pfvf->mcast_mce_idx;
2372 	} else if (type == NIXLF_PROMISC_ENTRY) {
2373 		*mce_list = &pfvf->promisc_mce_list;
2374 		*mce_idx = pfvf->promisc_mce_idx;
2375 	}  else {
2376 		*mce_list = NULL;
2377 		*mce_idx = 0;
2378 	}
2379 }
2380 
2381 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
2382 			       int type, bool add)
2383 {
2384 	int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
2385 	struct npc_mcam *mcam = &rvu->hw->mcam;
2386 	struct rvu_hwinfo *hw = rvu->hw;
2387 	struct nix_mce_list *mce_list;
2388 
2389 	/* skip multicast pkt replication for AF's VFs */
2390 	if (is_afvf(pcifunc))
2391 		return 0;
2392 
2393 	if (!hw->cap.nix_rx_multicast)
2394 		return 0;
2395 
2396 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2397 	if (blkaddr < 0)
2398 		return -EINVAL;
2399 
2400 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2401 	if (nixlf < 0)
2402 		return -EINVAL;
2403 
2404 	nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
2405 
2406 	mcam_index = npc_get_nixlf_mcam_index(mcam,
2407 					      pcifunc & ~RVU_PFVF_FUNC_MASK,
2408 					      nixlf, type);
2409 	err = nix_update_mce_list(rvu, pcifunc, mce_list,
2410 				  mce_idx, mcam_index, add);
2411 	return err;
2412 }
2413 
2414 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2415 {
2416 	struct nix_mcast *mcast = &nix_hw->mcast;
2417 	int err, pf, numvfs, idx;
2418 	struct rvu_pfvf *pfvf;
2419 	u16 pcifunc;
2420 	u64 cfg;
2421 
2422 	/* Skip PF0 (i.e AF) */
2423 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2424 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2425 		/* If PF is not enabled, nothing to do */
2426 		if (!((cfg >> 20) & 0x01))
2427 			continue;
2428 		/* Get numVFs attached to this PF */
2429 		numvfs = (cfg >> 12) & 0xFF;
2430 
2431 		pfvf = &rvu->pf[pf];
2432 
2433 		/* This NIX0/1 block mapped to PF ? */
2434 		if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2435 			continue;
2436 
2437 		/* save start idx of broadcast mce list */
2438 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2439 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2440 
2441 		/* save start idx of multicast mce list */
2442 		pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2443 		nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
2444 
2445 		/* save the start idx of promisc mce list */
2446 		pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2447 		nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
2448 
2449 		for (idx = 0; idx < (numvfs + 1); idx++) {
2450 			/* idx-0 is for PF, followed by VFs */
2451 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2452 			pcifunc |= idx;
2453 			/* Add dummy entries now, so that we don't have to check
2454 			 * for whether AQ_OP should be INIT/WRITE later on.
2455 			 * Will be updated when a NIXLF is attached/detached to
2456 			 * these PF/VFs.
2457 			 */
2458 			err = nix_blk_setup_mce(rvu, nix_hw,
2459 						pfvf->bcast_mce_idx + idx,
2460 						NIX_AQ_INSTOP_INIT,
2461 						pcifunc, 0, true);
2462 			if (err)
2463 				return err;
2464 
2465 			/* add dummy entries to multicast mce list */
2466 			err = nix_blk_setup_mce(rvu, nix_hw,
2467 						pfvf->mcast_mce_idx + idx,
2468 						NIX_AQ_INSTOP_INIT,
2469 						pcifunc, 0, true);
2470 			if (err)
2471 				return err;
2472 
2473 			/* add dummy entries to promisc mce list */
2474 			err = nix_blk_setup_mce(rvu, nix_hw,
2475 						pfvf->promisc_mce_idx + idx,
2476 						NIX_AQ_INSTOP_INIT,
2477 						pcifunc, 0, true);
2478 			if (err)
2479 				return err;
2480 		}
2481 	}
2482 	return 0;
2483 }
2484 
2485 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2486 {
2487 	struct nix_mcast *mcast = &nix_hw->mcast;
2488 	struct rvu_hwinfo *hw = rvu->hw;
2489 	int err, size;
2490 
2491 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2492 	size = (1ULL << size);
2493 
2494 	/* Alloc memory for multicast/mirror replication entries */
2495 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2496 			 (256UL << MC_TBL_SIZE), size);
2497 	if (err)
2498 		return -ENOMEM;
2499 
2500 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2501 		    (u64)mcast->mce_ctx->iova);
2502 
2503 	/* Set max list length equal to max no of VFs per PF  + PF itself */
2504 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2505 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2506 
2507 	/* Alloc memory for multicast replication buffers */
2508 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2509 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2510 			 (8UL << MC_BUF_CNT), size);
2511 	if (err)
2512 		return -ENOMEM;
2513 
2514 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2515 		    (u64)mcast->mcast_buf->iova);
2516 
2517 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
2518 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2519 
2520 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2521 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
2522 		    BIT_ULL(20) | MC_BUF_CNT);
2523 
2524 	mutex_init(&mcast->mce_lock);
2525 
2526 	return nix_setup_mce_tables(rvu, nix_hw);
2527 }
2528 
2529 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
2530 {
2531 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2532 	int err;
2533 
2534 	/* Allocate resource bimap for tx vtag def registers*/
2535 	vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
2536 	err = rvu_alloc_bitmap(&vlan->rsrc);
2537 	if (err)
2538 		return -ENOMEM;
2539 
2540 	/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
2541 	vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
2542 					    sizeof(u16), GFP_KERNEL);
2543 	if (!vlan->entry2pfvf_map)
2544 		goto free_mem;
2545 
2546 	mutex_init(&vlan->rsrc_lock);
2547 	return 0;
2548 
2549 free_mem:
2550 	kfree(vlan->rsrc.bmap);
2551 	return -ENOMEM;
2552 }
2553 
2554 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2555 {
2556 	struct nix_txsch *txsch;
2557 	int err, lvl, schq;
2558 	u64 cfg, reg;
2559 
2560 	/* Get scheduler queue count of each type and alloc
2561 	 * bitmap for each for alloc/free/attach operations.
2562 	 */
2563 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2564 		txsch = &nix_hw->txsch[lvl];
2565 		txsch->lvl = lvl;
2566 		switch (lvl) {
2567 		case NIX_TXSCH_LVL_SMQ:
2568 			reg = NIX_AF_MDQ_CONST;
2569 			break;
2570 		case NIX_TXSCH_LVL_TL4:
2571 			reg = NIX_AF_TL4_CONST;
2572 			break;
2573 		case NIX_TXSCH_LVL_TL3:
2574 			reg = NIX_AF_TL3_CONST;
2575 			break;
2576 		case NIX_TXSCH_LVL_TL2:
2577 			reg = NIX_AF_TL2_CONST;
2578 			break;
2579 		case NIX_TXSCH_LVL_TL1:
2580 			reg = NIX_AF_TL1_CONST;
2581 			break;
2582 		}
2583 		cfg = rvu_read64(rvu, blkaddr, reg);
2584 		txsch->schq.max = cfg & 0xFFFF;
2585 		err = rvu_alloc_bitmap(&txsch->schq);
2586 		if (err)
2587 			return err;
2588 
2589 		/* Allocate memory for scheduler queues to
2590 		 * PF/VF pcifunc mapping info.
2591 		 */
2592 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2593 					       sizeof(u32), GFP_KERNEL);
2594 		if (!txsch->pfvf_map)
2595 			return -ENOMEM;
2596 		for (schq = 0; schq < txsch->schq.max; schq++)
2597 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2598 	}
2599 	return 0;
2600 }
2601 
2602 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2603 				int blkaddr, u32 cfg)
2604 {
2605 	int fmt_idx;
2606 
2607 	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2608 		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2609 			return fmt_idx;
2610 	}
2611 	if (fmt_idx >= nix_hw->mark_format.total)
2612 		return -ERANGE;
2613 
2614 	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2615 	nix_hw->mark_format.cfg[fmt_idx] = cfg;
2616 	nix_hw->mark_format.in_use++;
2617 	return fmt_idx;
2618 }
2619 
2620 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2621 				    int blkaddr)
2622 {
2623 	u64 cfgs[] = {
2624 		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
2625 		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
2626 		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
2627 		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
2628 		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
2629 		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
2630 		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
2631 		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
2632 		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2633 	};
2634 	int i, rc;
2635 	u64 total;
2636 
2637 	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2638 	nix_hw->mark_format.total = (u8)total;
2639 	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2640 					       GFP_KERNEL);
2641 	if (!nix_hw->mark_format.cfg)
2642 		return -ENOMEM;
2643 	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2644 		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2645 		if (rc < 0)
2646 			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2647 				i, rc);
2648 	}
2649 
2650 	return 0;
2651 }
2652 
2653 static void rvu_get_lbk_link_max_frs(struct rvu *rvu,  u16 *max_mtu)
2654 {
2655 	/* CN10K supports LBK FIFO size 72 KB */
2656 	if (rvu->hw->lbk_bufsize == 0x12000)
2657 		*max_mtu = CN10K_LBK_LINK_MAX_FRS;
2658 	else
2659 		*max_mtu = NIC_HW_MAX_FRS;
2660 }
2661 
2662 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2663 {
2664 	/* RPM supports FIFO len 128 KB */
2665 	if (rvu_cgx_get_fifolen(rvu) == 0x20000)
2666 		*max_mtu = CN10K_LMAC_LINK_MAX_FRS;
2667 	else
2668 		*max_mtu = NIC_HW_MAX_FRS;
2669 }
2670 
2671 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
2672 				     struct nix_hw_info *rsp)
2673 {
2674 	u16 pcifunc = req->hdr.pcifunc;
2675 	int blkaddr;
2676 
2677 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2678 	if (blkaddr < 0)
2679 		return NIX_AF_ERR_AF_LF_INVALID;
2680 
2681 	if (is_afvf(pcifunc))
2682 		rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
2683 	else
2684 		rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
2685 
2686 	rsp->min_mtu = NIC_HW_MIN_FRS;
2687 	return 0;
2688 }
2689 
2690 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2691 				   struct msg_rsp *rsp)
2692 {
2693 	u16 pcifunc = req->hdr.pcifunc;
2694 	int i, nixlf, blkaddr, err;
2695 	u64 stats;
2696 
2697 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2698 	if (err)
2699 		return err;
2700 
2701 	/* Get stats count supported by HW */
2702 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2703 
2704 	/* Reset tx stats */
2705 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2706 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2707 
2708 	/* Reset rx stats */
2709 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2710 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2711 
2712 	return 0;
2713 }
2714 
2715 /* Returns the ALG index to be set into NPC_RX_ACTION */
2716 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2717 {
2718 	int i;
2719 
2720 	/* Scan over exiting algo entries to find a match */
2721 	for (i = 0; i < nix_hw->flowkey.in_use; i++)
2722 		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2723 			return i;
2724 
2725 	return -ERANGE;
2726 }
2727 
2728 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2729 {
2730 	int idx, nr_field, key_off, field_marker, keyoff_marker;
2731 	int max_key_off, max_bit_pos, group_member;
2732 	struct nix_rx_flowkey_alg *field;
2733 	struct nix_rx_flowkey_alg tmp;
2734 	u32 key_type, valid_key;
2735 	int l4_key_offset = 0;
2736 
2737 	if (!alg)
2738 		return -EINVAL;
2739 
2740 #define FIELDS_PER_ALG  5
2741 #define MAX_KEY_OFF	40
2742 	/* Clear all fields */
2743 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2744 
2745 	/* Each of the 32 possible flow key algorithm definitions should
2746 	 * fall into above incremental config (except ALG0). Otherwise a
2747 	 * single NPC MCAM entry is not sufficient for supporting RSS.
2748 	 *
2749 	 * If a different definition or combination needed then NPC MCAM
2750 	 * has to be programmed to filter such pkts and it's action should
2751 	 * point to this definition to calculate flowtag or hash.
2752 	 *
2753 	 * The `for loop` goes over _all_ protocol field and the following
2754 	 * variables depicts the state machine forward progress logic.
2755 	 *
2756 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
2757 	 * in field->key_offset update.
2758 	 * field_marker - Enabled when a new field needs to be selected.
2759 	 * group_member - Enabled when protocol is part of a group.
2760 	 */
2761 
2762 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
2763 	nr_field = 0; key_off = 0; field_marker = 1;
2764 	field = &tmp; max_bit_pos = fls(flow_cfg);
2765 	for (idx = 0;
2766 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2767 	     key_off < MAX_KEY_OFF; idx++) {
2768 		key_type = BIT(idx);
2769 		valid_key = flow_cfg & key_type;
2770 		/* Found a field marker, reset the field values */
2771 		if (field_marker)
2772 			memset(&tmp, 0, sizeof(tmp));
2773 
2774 		field_marker = true;
2775 		keyoff_marker = true;
2776 		switch (key_type) {
2777 		case NIX_FLOW_KEY_TYPE_PORT:
2778 			field->sel_chan = true;
2779 			/* This should be set to 1, when SEL_CHAN is set */
2780 			field->bytesm1 = 1;
2781 			break;
2782 		case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
2783 			field->lid = NPC_LID_LC;
2784 			field->hdr_offset = 9; /* offset */
2785 			field->bytesm1 = 0; /* 1 byte */
2786 			field->ltype_match = NPC_LT_LC_IP;
2787 			field->ltype_mask = 0xF;
2788 			break;
2789 		case NIX_FLOW_KEY_TYPE_IPV4:
2790 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2791 			field->lid = NPC_LID_LC;
2792 			field->ltype_match = NPC_LT_LC_IP;
2793 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2794 				field->lid = NPC_LID_LG;
2795 				field->ltype_match = NPC_LT_LG_TU_IP;
2796 			}
2797 			field->hdr_offset = 12; /* SIP offset */
2798 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2799 			field->ltype_mask = 0xF; /* Match only IPv4 */
2800 			keyoff_marker = false;
2801 			break;
2802 		case NIX_FLOW_KEY_TYPE_IPV6:
2803 		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2804 			field->lid = NPC_LID_LC;
2805 			field->ltype_match = NPC_LT_LC_IP6;
2806 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2807 				field->lid = NPC_LID_LG;
2808 				field->ltype_match = NPC_LT_LG_TU_IP6;
2809 			}
2810 			field->hdr_offset = 8; /* SIP offset */
2811 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2812 			field->ltype_mask = 0xF; /* Match only IPv6 */
2813 			break;
2814 		case NIX_FLOW_KEY_TYPE_TCP:
2815 		case NIX_FLOW_KEY_TYPE_UDP:
2816 		case NIX_FLOW_KEY_TYPE_SCTP:
2817 		case NIX_FLOW_KEY_TYPE_INNR_TCP:
2818 		case NIX_FLOW_KEY_TYPE_INNR_UDP:
2819 		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2820 			field->lid = NPC_LID_LD;
2821 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2822 			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2823 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2824 				field->lid = NPC_LID_LH;
2825 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2826 
2827 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2828 			 * so no need to change the ltype_match, just change
2829 			 * the lid for inner protocols
2830 			 */
2831 			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2832 				     (int)NPC_LT_LH_TU_TCP);
2833 			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2834 				     (int)NPC_LT_LH_TU_UDP);
2835 			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2836 				     (int)NPC_LT_LH_TU_SCTP);
2837 
2838 			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2839 			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2840 			    valid_key) {
2841 				field->ltype_match |= NPC_LT_LD_TCP;
2842 				group_member = true;
2843 			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2844 				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2845 				   valid_key) {
2846 				field->ltype_match |= NPC_LT_LD_UDP;
2847 				group_member = true;
2848 			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2849 				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2850 				   valid_key) {
2851 				field->ltype_match |= NPC_LT_LD_SCTP;
2852 				group_member = true;
2853 			}
2854 			field->ltype_mask = ~field->ltype_match;
2855 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2856 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2857 				/* Handle the case where any of the group item
2858 				 * is enabled in the group but not the final one
2859 				 */
2860 				if (group_member) {
2861 					valid_key = true;
2862 					group_member = false;
2863 				}
2864 			} else {
2865 				field_marker = false;
2866 				keyoff_marker = false;
2867 			}
2868 
2869 			/* TCP/UDP/SCTP and ESP/AH falls at same offset so
2870 			 * remember the TCP key offset of 40 byte hash key.
2871 			 */
2872 			if (key_type == NIX_FLOW_KEY_TYPE_TCP)
2873 				l4_key_offset = key_off;
2874 			break;
2875 		case NIX_FLOW_KEY_TYPE_NVGRE:
2876 			field->lid = NPC_LID_LD;
2877 			field->hdr_offset = 4; /* VSID offset */
2878 			field->bytesm1 = 2;
2879 			field->ltype_match = NPC_LT_LD_NVGRE;
2880 			field->ltype_mask = 0xF;
2881 			break;
2882 		case NIX_FLOW_KEY_TYPE_VXLAN:
2883 		case NIX_FLOW_KEY_TYPE_GENEVE:
2884 			field->lid = NPC_LID_LE;
2885 			field->bytesm1 = 2;
2886 			field->hdr_offset = 4;
2887 			field->ltype_mask = 0xF;
2888 			field_marker = false;
2889 			keyoff_marker = false;
2890 
2891 			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2892 				field->ltype_match |= NPC_LT_LE_VXLAN;
2893 				group_member = true;
2894 			}
2895 
2896 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2897 				field->ltype_match |= NPC_LT_LE_GENEVE;
2898 				group_member = true;
2899 			}
2900 
2901 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2902 				if (group_member) {
2903 					field->ltype_mask = ~field->ltype_match;
2904 					field_marker = true;
2905 					keyoff_marker = true;
2906 					valid_key = true;
2907 					group_member = false;
2908 				}
2909 			}
2910 			break;
2911 		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2912 		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2913 			field->lid = NPC_LID_LA;
2914 			field->ltype_match = NPC_LT_LA_ETHER;
2915 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2916 				field->lid = NPC_LID_LF;
2917 				field->ltype_match = NPC_LT_LF_TU_ETHER;
2918 			}
2919 			field->hdr_offset = 0;
2920 			field->bytesm1 = 5; /* DMAC 6 Byte */
2921 			field->ltype_mask = 0xF;
2922 			break;
2923 		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2924 			field->lid = NPC_LID_LC;
2925 			field->hdr_offset = 40; /* IPV6 hdr */
2926 			field->bytesm1 = 0; /* 1 Byte ext hdr*/
2927 			field->ltype_match = NPC_LT_LC_IP6_EXT;
2928 			field->ltype_mask = 0xF;
2929 			break;
2930 		case NIX_FLOW_KEY_TYPE_GTPU:
2931 			field->lid = NPC_LID_LE;
2932 			field->hdr_offset = 4;
2933 			field->bytesm1 = 3; /* 4 bytes TID*/
2934 			field->ltype_match = NPC_LT_LE_GTPU;
2935 			field->ltype_mask = 0xF;
2936 			break;
2937 		case NIX_FLOW_KEY_TYPE_VLAN:
2938 			field->lid = NPC_LID_LB;
2939 			field->hdr_offset = 2; /* Skip TPID (2-bytes) */
2940 			field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
2941 			field->ltype_match = NPC_LT_LB_CTAG;
2942 			field->ltype_mask = 0xF;
2943 			field->fn_mask = 1; /* Mask out the first nibble */
2944 			break;
2945 		case NIX_FLOW_KEY_TYPE_AH:
2946 		case NIX_FLOW_KEY_TYPE_ESP:
2947 			field->hdr_offset = 0;
2948 			field->bytesm1 = 7; /* SPI + sequence number */
2949 			field->ltype_mask = 0xF;
2950 			field->lid = NPC_LID_LE;
2951 			field->ltype_match = NPC_LT_LE_ESP;
2952 			if (key_type == NIX_FLOW_KEY_TYPE_AH) {
2953 				field->lid = NPC_LID_LD;
2954 				field->ltype_match = NPC_LT_LD_AH;
2955 				field->hdr_offset = 4;
2956 				keyoff_marker = false;
2957 			}
2958 			break;
2959 		}
2960 		field->ena = 1;
2961 
2962 		/* Found a valid flow key type */
2963 		if (valid_key) {
2964 			/* Use the key offset of TCP/UDP/SCTP fields
2965 			 * for ESP/AH fields.
2966 			 */
2967 			if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
2968 			    key_type == NIX_FLOW_KEY_TYPE_AH)
2969 				key_off = l4_key_offset;
2970 			field->key_offset = key_off;
2971 			memcpy(&alg[nr_field], field, sizeof(*field));
2972 			max_key_off = max(max_key_off, field->bytesm1 + 1);
2973 
2974 			/* Found a field marker, get the next field */
2975 			if (field_marker)
2976 				nr_field++;
2977 		}
2978 
2979 		/* Found a keyoff marker, update the new key_off */
2980 		if (keyoff_marker) {
2981 			key_off += max_key_off;
2982 			max_key_off = 0;
2983 		}
2984 	}
2985 	/* Processed all the flow key types */
2986 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
2987 		return 0;
2988 	else
2989 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
2990 }
2991 
2992 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
2993 {
2994 	u64 field[FIELDS_PER_ALG];
2995 	struct nix_hw *hw;
2996 	int fid, rc;
2997 
2998 	hw = get_nix_hw(rvu->hw, blkaddr);
2999 	if (!hw)
3000 		return -EINVAL;
3001 
3002 	/* No room to add new flow hash algoritham */
3003 	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3004 		return NIX_AF_ERR_RSS_NOSPC_ALGO;
3005 
3006 	/* Generate algo fields for the given flow_cfg */
3007 	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3008 	if (rc)
3009 		return rc;
3010 
3011 	/* Update ALGX_FIELDX register with generated fields */
3012 	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3013 		rvu_write64(rvu, blkaddr,
3014 			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3015 							   fid), field[fid]);
3016 
3017 	/* Store the flow_cfg for futher lookup */
3018 	rc = hw->flowkey.in_use;
3019 	hw->flowkey.flowkey[rc] = flow_cfg;
3020 	hw->flowkey.in_use++;
3021 
3022 	return rc;
3023 }
3024 
3025 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3026 					 struct nix_rss_flowkey_cfg *req,
3027 					 struct nix_rss_flowkey_cfg_rsp *rsp)
3028 {
3029 	u16 pcifunc = req->hdr.pcifunc;
3030 	int alg_idx, nixlf, blkaddr;
3031 	struct nix_hw *nix_hw;
3032 	int err;
3033 
3034 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3035 	if (err)
3036 		return err;
3037 
3038 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3039 	if (!nix_hw)
3040 		return -EINVAL;
3041 
3042 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3043 	/* Failed to get algo index from the exiting list, reserve new  */
3044 	if (alg_idx < 0) {
3045 		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3046 						  req->flowkey_cfg);
3047 		if (alg_idx < 0)
3048 			return alg_idx;
3049 	}
3050 	rsp->alg_idx = alg_idx;
3051 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3052 				       alg_idx, req->mcam_index);
3053 	return 0;
3054 }
3055 
3056 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3057 {
3058 	u32 flowkey_cfg, minkey_cfg;
3059 	int alg, fid, rc;
3060 
3061 	/* Disable all flow key algx fieldx */
3062 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3063 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3064 			rvu_write64(rvu, blkaddr,
3065 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3066 				    0);
3067 	}
3068 
3069 	/* IPv4/IPv6 SIP/DIPs */
3070 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3071 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3072 	if (rc < 0)
3073 		return rc;
3074 
3075 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3076 	minkey_cfg = flowkey_cfg;
3077 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3078 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3079 	if (rc < 0)
3080 		return rc;
3081 
3082 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3083 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3084 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3085 	if (rc < 0)
3086 		return rc;
3087 
3088 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3089 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3090 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3091 	if (rc < 0)
3092 		return rc;
3093 
3094 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3095 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3096 			NIX_FLOW_KEY_TYPE_UDP;
3097 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3098 	if (rc < 0)
3099 		return rc;
3100 
3101 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3102 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3103 			NIX_FLOW_KEY_TYPE_SCTP;
3104 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3105 	if (rc < 0)
3106 		return rc;
3107 
3108 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3109 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3110 			NIX_FLOW_KEY_TYPE_SCTP;
3111 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3112 	if (rc < 0)
3113 		return rc;
3114 
3115 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3116 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3117 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3118 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3119 	if (rc < 0)
3120 		return rc;
3121 
3122 	return 0;
3123 }
3124 
3125 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3126 				      struct nix_set_mac_addr *req,
3127 				      struct msg_rsp *rsp)
3128 {
3129 	bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3130 	u16 pcifunc = req->hdr.pcifunc;
3131 	int blkaddr, nixlf, err;
3132 	struct rvu_pfvf *pfvf;
3133 
3134 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3135 	if (err)
3136 		return err;
3137 
3138 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3139 
3140 	/* untrusted VF can't overwrite admin(PF) changes */
3141 	if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3142 	    (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3143 		dev_warn(rvu->dev,
3144 			 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3145 		return -EPERM;
3146 	}
3147 
3148 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3149 
3150 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3151 				    pfvf->rx_chan_base, req->mac_addr);
3152 
3153 	if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
3154 		ether_addr_copy(pfvf->default_mac, req->mac_addr);
3155 
3156 	return 0;
3157 }
3158 
3159 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3160 				      struct msg_req *req,
3161 				      struct nix_get_mac_addr_rsp *rsp)
3162 {
3163 	u16 pcifunc = req->hdr.pcifunc;
3164 	struct rvu_pfvf *pfvf;
3165 
3166 	if (!is_nixlf_attached(rvu, pcifunc))
3167 		return NIX_AF_ERR_AF_LF_INVALID;
3168 
3169 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3170 
3171 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
3172 
3173 	return 0;
3174 }
3175 
3176 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
3177 				     struct msg_rsp *rsp)
3178 {
3179 	bool allmulti, promisc, nix_rx_multicast;
3180 	u16 pcifunc = req->hdr.pcifunc;
3181 	struct rvu_pfvf *pfvf;
3182 	int nixlf, err;
3183 
3184 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3185 	promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
3186 	allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
3187 	pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
3188 
3189 	nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
3190 
3191 	if (is_vf(pcifunc) && !nix_rx_multicast &&
3192 	    (promisc || allmulti)) {
3193 		dev_warn_ratelimited(rvu->dev,
3194 				     "VF promisc/multicast not supported\n");
3195 		return 0;
3196 	}
3197 
3198 	/* untrusted VF can't configure promisc/allmulti */
3199 	if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3200 	    (promisc || allmulti))
3201 		return 0;
3202 
3203 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3204 	if (err)
3205 		return err;
3206 
3207 	if (nix_rx_multicast) {
3208 		/* add/del this PF_FUNC to/from mcast pkt replication list */
3209 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
3210 					  allmulti);
3211 		if (err) {
3212 			dev_err(rvu->dev,
3213 				"Failed to update pcifunc 0x%x to multicast list\n",
3214 				pcifunc);
3215 			return err;
3216 		}
3217 
3218 		/* add/del this PF_FUNC to/from promisc pkt replication list */
3219 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
3220 					  promisc);
3221 		if (err) {
3222 			dev_err(rvu->dev,
3223 				"Failed to update pcifunc 0x%x to promisc list\n",
3224 				pcifunc);
3225 			return err;
3226 		}
3227 	}
3228 
3229 	/* install/uninstall allmulti entry */
3230 	if (allmulti) {
3231 		rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
3232 					       pfvf->rx_chan_base);
3233 	} else {
3234 		if (!nix_rx_multicast)
3235 			rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
3236 	}
3237 
3238 	/* install/uninstall promisc entry */
3239 	if (promisc) {
3240 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
3241 					      pfvf->rx_chan_base,
3242 					      pfvf->rx_chan_cnt);
3243 	} else {
3244 		if (!nix_rx_multicast)
3245 			rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
3246 	}
3247 
3248 	return 0;
3249 }
3250 
3251 static void nix_find_link_frs(struct rvu *rvu,
3252 			      struct nix_frs_cfg *req, u16 pcifunc)
3253 {
3254 	int pf = rvu_get_pf(pcifunc);
3255 	struct rvu_pfvf *pfvf;
3256 	int maxlen, minlen;
3257 	int numvfs, hwvf;
3258 	int vf;
3259 
3260 	/* Update with requester's min/max lengths */
3261 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3262 	pfvf->maxlen = req->maxlen;
3263 	if (req->update_minlen)
3264 		pfvf->minlen = req->minlen;
3265 
3266 	maxlen = req->maxlen;
3267 	minlen = req->update_minlen ? req->minlen : 0;
3268 
3269 	/* Get this PF's numVFs and starting hwvf */
3270 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
3271 
3272 	/* For each VF, compare requested max/minlen */
3273 	for (vf = 0; vf < numvfs; vf++) {
3274 		pfvf =  &rvu->hwvf[hwvf + vf];
3275 		if (pfvf->maxlen > maxlen)
3276 			maxlen = pfvf->maxlen;
3277 		if (req->update_minlen &&
3278 		    pfvf->minlen && pfvf->minlen < minlen)
3279 			minlen = pfvf->minlen;
3280 	}
3281 
3282 	/* Compare requested max/minlen with PF's max/minlen */
3283 	pfvf = &rvu->pf[pf];
3284 	if (pfvf->maxlen > maxlen)
3285 		maxlen = pfvf->maxlen;
3286 	if (req->update_minlen &&
3287 	    pfvf->minlen && pfvf->minlen < minlen)
3288 		minlen = pfvf->minlen;
3289 
3290 	/* Update the request with max/min PF's and it's VF's max/min */
3291 	req->maxlen = maxlen;
3292 	if (req->update_minlen)
3293 		req->minlen = minlen;
3294 }
3295 
3296 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
3297 				    struct msg_rsp *rsp)
3298 {
3299 	struct rvu_hwinfo *hw = rvu->hw;
3300 	u16 pcifunc = req->hdr.pcifunc;
3301 	int pf = rvu_get_pf(pcifunc);
3302 	int blkaddr, schq, link = -1;
3303 	struct nix_txsch *txsch;
3304 	u64 cfg, lmac_fifo_len;
3305 	struct nix_hw *nix_hw;
3306 	u8 cgx = 0, lmac = 0;
3307 	u16 max_mtu;
3308 
3309 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3310 	if (blkaddr < 0)
3311 		return NIX_AF_ERR_AF_LF_INVALID;
3312 
3313 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3314 	if (!nix_hw)
3315 		return -EINVAL;
3316 
3317 	if (is_afvf(pcifunc))
3318 		rvu_get_lbk_link_max_frs(rvu, &max_mtu);
3319 	else
3320 		rvu_get_lmac_link_max_frs(rvu, &max_mtu);
3321 
3322 	if (!req->sdp_link && req->maxlen > max_mtu)
3323 		return NIX_AF_ERR_FRS_INVALID;
3324 
3325 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
3326 		return NIX_AF_ERR_FRS_INVALID;
3327 
3328 	/* Check if requester wants to update SMQ's */
3329 	if (!req->update_smq)
3330 		goto rx_frscfg;
3331 
3332 	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
3333 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
3334 	mutex_lock(&rvu->rsrc_lock);
3335 	for (schq = 0; schq < txsch->schq.max; schq++) {
3336 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
3337 			continue;
3338 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
3339 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
3340 		if (req->update_minlen)
3341 			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
3342 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
3343 	}
3344 	mutex_unlock(&rvu->rsrc_lock);
3345 
3346 rx_frscfg:
3347 	/* Check if config is for SDP link */
3348 	if (req->sdp_link) {
3349 		if (!hw->sdp_links)
3350 			return NIX_AF_ERR_RX_LINK_INVALID;
3351 		link = hw->cgx_links + hw->lbk_links;
3352 		goto linkcfg;
3353 	}
3354 
3355 	/* Check if the request is from CGX mapped RVU PF */
3356 	if (is_pf_cgxmapped(rvu, pf)) {
3357 		/* Get CGX and LMAC to which this PF is mapped and find link */
3358 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
3359 		link = (cgx * hw->lmac_per_cgx) + lmac;
3360 	} else if (pf == 0) {
3361 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
3362 		link = hw->cgx_links;
3363 	}
3364 
3365 	if (link < 0)
3366 		return NIX_AF_ERR_RX_LINK_INVALID;
3367 
3368 	nix_find_link_frs(rvu, req, pcifunc);
3369 
3370 linkcfg:
3371 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
3372 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
3373 	if (req->update_minlen)
3374 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
3375 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
3376 
3377 	if (req->sdp_link || pf == 0)
3378 		return 0;
3379 
3380 	/* Update transmit credits for CGX links */
3381 	lmac_fifo_len =
3382 		rvu_cgx_get_fifolen(rvu) /
3383 		cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3384 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
3385 	cfg &= ~(0xFFFFFULL << 12);
3386 	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
3387 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
3388 	return 0;
3389 }
3390 
3391 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
3392 				    struct msg_rsp *rsp)
3393 {
3394 	int nixlf, blkaddr, err;
3395 	u64 cfg;
3396 
3397 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
3398 	if (err)
3399 		return err;
3400 
3401 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
3402 	/* Set the interface configuration */
3403 	if (req->len_verify & BIT(0))
3404 		cfg |= BIT_ULL(41);
3405 	else
3406 		cfg &= ~BIT_ULL(41);
3407 
3408 	if (req->len_verify & BIT(1))
3409 		cfg |= BIT_ULL(40);
3410 	else
3411 		cfg &= ~BIT_ULL(40);
3412 
3413 	if (req->csum_verify & BIT(0))
3414 		cfg |= BIT_ULL(37);
3415 	else
3416 		cfg &= ~BIT_ULL(37);
3417 
3418 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
3419 
3420 	return 0;
3421 }
3422 
3423 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
3424 {
3425 	/* CN10k supports 72KB FIFO size and max packet size of 64k */
3426 	if (rvu->hw->lbk_bufsize == 0x12000)
3427 		return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
3428 
3429 	return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
3430 }
3431 
3432 static void nix_link_config(struct rvu *rvu, int blkaddr)
3433 {
3434 	struct rvu_hwinfo *hw = rvu->hw;
3435 	int cgx, lmac_cnt, slink, link;
3436 	u16 lbk_max_frs, lmac_max_frs;
3437 	u64 tx_credits;
3438 
3439 	rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
3440 	rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
3441 
3442 	/* Set default min/max packet lengths allowed on NIX Rx links.
3443 	 *
3444 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3445 	 * as undersize and report them to SW as error pkts, hence
3446 	 * setting it to 40 bytes.
3447 	 */
3448 	for (link = 0; link < hw->cgx_links; link++) {
3449 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3450 				((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
3451 	}
3452 
3453 	for (link = hw->cgx_links; link < hw->lbk_links; link++) {
3454 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3455 			    ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
3456 	}
3457 	if (hw->sdp_links) {
3458 		link = hw->cgx_links + hw->lbk_links;
3459 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3460 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3461 	}
3462 
3463 	/* Set credits for Tx links assuming max packet length allowed.
3464 	 * This will be reconfigured based on MTU set for PF/VF.
3465 	 */
3466 	for (cgx = 0; cgx < hw->cgx; cgx++) {
3467 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3468 		tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
3469 			       lmac_max_frs) / 16;
3470 		/* Enable credits and set credit pkt count to max allowed */
3471 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3472 		slink = cgx * hw->lmac_per_cgx;
3473 		for (link = slink; link < (slink + lmac_cnt); link++) {
3474 			rvu_write64(rvu, blkaddr,
3475 				    NIX_AF_TX_LINKX_NORM_CREDIT(link),
3476 				    tx_credits);
3477 		}
3478 	}
3479 
3480 	/* Set Tx credits for LBK link */
3481 	slink = hw->cgx_links;
3482 	for (link = slink; link < (slink + hw->lbk_links); link++) {
3483 		tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
3484 		/* Enable credits and set credit pkt count to max allowed */
3485 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3486 		rvu_write64(rvu, blkaddr,
3487 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3488 	}
3489 }
3490 
3491 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3492 {
3493 	int idx, err;
3494 	u64 status;
3495 
3496 	/* Start X2P bus calibration */
3497 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3498 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3499 	/* Wait for calibration to complete */
3500 	err = rvu_poll_reg(rvu, blkaddr,
3501 			   NIX_AF_STATUS, BIT_ULL(10), false);
3502 	if (err) {
3503 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3504 		return err;
3505 	}
3506 
3507 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3508 	/* Check if CGX devices are ready */
3509 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3510 		/* Skip when cgx port is not available */
3511 		if (!rvu_cgx_pdata(idx, rvu) ||
3512 		    (status & (BIT_ULL(16 + idx))))
3513 			continue;
3514 		dev_err(rvu->dev,
3515 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
3516 		err = -EBUSY;
3517 	}
3518 
3519 	/* Check if LBK is ready */
3520 	if (!(status & BIT_ULL(19))) {
3521 		dev_err(rvu->dev,
3522 			"LBK didn't respond to NIX X2P calibration\n");
3523 		err = -EBUSY;
3524 	}
3525 
3526 	/* Clear 'calibrate_x2p' bit */
3527 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3528 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3529 	if (err || (status & 0x3FFULL))
3530 		dev_err(rvu->dev,
3531 			"NIX X2P calibration failed, status 0x%llx\n", status);
3532 	if (err)
3533 		return err;
3534 	return 0;
3535 }
3536 
3537 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3538 {
3539 	u64 cfg;
3540 	int err;
3541 
3542 	/* Set admin queue endianness */
3543 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3544 #ifdef __BIG_ENDIAN
3545 	cfg |= BIT_ULL(8);
3546 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3547 #else
3548 	cfg &= ~BIT_ULL(8);
3549 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3550 #endif
3551 
3552 	/* Do not bypass NDC cache */
3553 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3554 	cfg &= ~0x3FFEULL;
3555 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3556 	/* Disable caching of SQB aka SQEs */
3557 	cfg |= 0x04ULL;
3558 #endif
3559 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3560 
3561 	/* Result structure can be followed by RQ/SQ/CQ context at
3562 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3563 	 * operation type. Alloc sufficient result memory for all operations.
3564 	 */
3565 	err = rvu_aq_alloc(rvu, &block->aq,
3566 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3567 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3568 	if (err)
3569 		return err;
3570 
3571 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3572 	rvu_write64(rvu, block->addr,
3573 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3574 	return 0;
3575 }
3576 
3577 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
3578 {
3579 	const struct npc_lt_def_cfg *ltdefs;
3580 	struct rvu_hwinfo *hw = rvu->hw;
3581 	int blkaddr = nix_hw->blkaddr;
3582 	struct rvu_block *block;
3583 	int err;
3584 	u64 cfg;
3585 
3586 	block = &hw->block[blkaddr];
3587 
3588 	if (is_rvu_96xx_B0(rvu)) {
3589 		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3590 		 * internal state when conditional clocks are turned off.
3591 		 * Hence enable them.
3592 		 */
3593 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3594 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3595 
3596 		/* Set chan/link to backpressure TL3 instead of TL2 */
3597 		rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3598 
3599 		/* Disable SQ manager's sticky mode operation (set TM6 = 0)
3600 		 * This sticky mode is known to cause SQ stalls when multiple
3601 		 * SQs are mapped to same SMQ and transmitting pkts at a time.
3602 		 */
3603 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3604 		cfg &= ~BIT_ULL(15);
3605 		rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3606 	}
3607 
3608 	ltdefs = rvu->kpu.lt_def;
3609 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
3610 	err = nix_calibrate_x2p(rvu, blkaddr);
3611 	if (err)
3612 		return err;
3613 
3614 	/* Initialize admin queue */
3615 	err = nix_aq_init(rvu, block);
3616 	if (err)
3617 		return err;
3618 
3619 	/* Restore CINT timer delay to HW reset values */
3620 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3621 
3622 	if (is_block_implemented(hw, blkaddr)) {
3623 		err = nix_setup_txschq(rvu, nix_hw, blkaddr);
3624 		if (err)
3625 			return err;
3626 
3627 		err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
3628 		if (err)
3629 			return err;
3630 
3631 		err = nix_setup_mcast(rvu, nix_hw, blkaddr);
3632 		if (err)
3633 			return err;
3634 
3635 		err = nix_setup_txvlan(rvu, nix_hw);
3636 		if (err)
3637 			return err;
3638 
3639 		/* Configure segmentation offload formats */
3640 		nix_setup_lso(rvu, nix_hw, blkaddr);
3641 
3642 		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3643 		 * This helps HW protocol checker to identify headers
3644 		 * and validate length and checksums.
3645 		 */
3646 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3647 			    (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3648 			    ltdefs->rx_ol2.ltype_mask);
3649 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3650 			    (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3651 			    ltdefs->rx_oip4.ltype_mask);
3652 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3653 			    (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3654 			    ltdefs->rx_iip4.ltype_mask);
3655 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3656 			    (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3657 			    ltdefs->rx_oip6.ltype_mask);
3658 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3659 			    (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3660 			    ltdefs->rx_iip6.ltype_mask);
3661 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3662 			    (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3663 			    ltdefs->rx_otcp.ltype_mask);
3664 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3665 			    (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3666 			    ltdefs->rx_itcp.ltype_mask);
3667 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3668 			    (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3669 			    ltdefs->rx_oudp.ltype_mask);
3670 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3671 			    (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3672 			    ltdefs->rx_iudp.ltype_mask);
3673 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3674 			    (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3675 			    ltdefs->rx_osctp.ltype_mask);
3676 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3677 			    (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3678 			    ltdefs->rx_isctp.ltype_mask);
3679 
3680 		if (!is_rvu_otx2(rvu)) {
3681 			/* Enable APAD calculation for other protocols
3682 			 * matching APAD0 and APAD1 lt def registers.
3683 			 */
3684 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
3685 				    (ltdefs->rx_apad0.valid << 11) |
3686 				    (ltdefs->rx_apad0.lid << 8) |
3687 				    (ltdefs->rx_apad0.ltype_match << 4) |
3688 				    ltdefs->rx_apad0.ltype_mask);
3689 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
3690 				    (ltdefs->rx_apad1.valid << 11) |
3691 				    (ltdefs->rx_apad1.lid << 8) |
3692 				    (ltdefs->rx_apad1.ltype_match << 4) |
3693 				    ltdefs->rx_apad1.ltype_mask);
3694 
3695 			/* Receive ethertype defination register defines layer
3696 			 * information in NPC_RESULT_S to identify the Ethertype
3697 			 * location in L2 header. Used for Ethertype overwriting
3698 			 * in inline IPsec flow.
3699 			 */
3700 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
3701 				    (ltdefs->rx_et[0].offset << 12) |
3702 				    (ltdefs->rx_et[0].valid << 11) |
3703 				    (ltdefs->rx_et[0].lid << 8) |
3704 				    (ltdefs->rx_et[0].ltype_match << 4) |
3705 				    ltdefs->rx_et[0].ltype_mask);
3706 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
3707 				    (ltdefs->rx_et[1].offset << 12) |
3708 				    (ltdefs->rx_et[1].valid << 11) |
3709 				    (ltdefs->rx_et[1].lid << 8) |
3710 				    (ltdefs->rx_et[1].ltype_match << 4) |
3711 				    ltdefs->rx_et[1].ltype_mask);
3712 		}
3713 
3714 		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3715 		if (err)
3716 			return err;
3717 
3718 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3719 		nix_link_config(rvu, blkaddr);
3720 
3721 		/* Enable Channel backpressure */
3722 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3723 	}
3724 	return 0;
3725 }
3726 
3727 int rvu_nix_init(struct rvu *rvu)
3728 {
3729 	struct rvu_hwinfo *hw = rvu->hw;
3730 	struct nix_hw *nix_hw;
3731 	int blkaddr = 0, err;
3732 	int i = 0;
3733 
3734 	hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
3735 			       GFP_KERNEL);
3736 	if (!hw->nix)
3737 		return -ENOMEM;
3738 
3739 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3740 	while (blkaddr) {
3741 		nix_hw = &hw->nix[i];
3742 		nix_hw->rvu = rvu;
3743 		nix_hw->blkaddr = blkaddr;
3744 		err = rvu_nix_block_init(rvu, nix_hw);
3745 		if (err)
3746 			return err;
3747 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3748 		i++;
3749 	}
3750 
3751 	return 0;
3752 }
3753 
3754 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
3755 				  struct rvu_block *block)
3756 {
3757 	struct nix_txsch *txsch;
3758 	struct nix_mcast *mcast;
3759 	struct nix_txvlan *vlan;
3760 	struct nix_hw *nix_hw;
3761 	int lvl;
3762 
3763 	rvu_aq_free(rvu, block->aq);
3764 
3765 	if (is_block_implemented(rvu->hw, blkaddr)) {
3766 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
3767 		if (!nix_hw)
3768 			return;
3769 
3770 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3771 			txsch = &nix_hw->txsch[lvl];
3772 			kfree(txsch->schq.bmap);
3773 		}
3774 
3775 		vlan = &nix_hw->txvlan;
3776 		kfree(vlan->rsrc.bmap);
3777 		mutex_destroy(&vlan->rsrc_lock);
3778 		devm_kfree(rvu->dev, vlan->entry2pfvf_map);
3779 
3780 		mcast = &nix_hw->mcast;
3781 		qmem_free(rvu->dev, mcast->mce_ctx);
3782 		qmem_free(rvu->dev, mcast->mcast_buf);
3783 		mutex_destroy(&mcast->mce_lock);
3784 	}
3785 }
3786 
3787 void rvu_nix_freemem(struct rvu *rvu)
3788 {
3789 	struct rvu_hwinfo *hw = rvu->hw;
3790 	struct rvu_block *block;
3791 	int blkaddr = 0;
3792 
3793 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3794 	while (blkaddr) {
3795 		block = &hw->block[blkaddr];
3796 		rvu_nix_block_freemem(rvu, blkaddr, block);
3797 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3798 	}
3799 }
3800 
3801 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3802 				     struct msg_rsp *rsp)
3803 {
3804 	u16 pcifunc = req->hdr.pcifunc;
3805 	struct rvu_pfvf *pfvf;
3806 	int nixlf, err;
3807 
3808 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3809 	if (err)
3810 		return err;
3811 
3812 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3813 
3814 	npc_mcam_enable_flows(rvu, pcifunc);
3815 
3816 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3817 	set_bit(NIXLF_INITIALIZED, &pfvf->flags);
3818 
3819 	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3820 }
3821 
3822 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3823 				    struct msg_rsp *rsp)
3824 {
3825 	u16 pcifunc = req->hdr.pcifunc;
3826 	struct rvu_pfvf *pfvf;
3827 	int nixlf, err;
3828 
3829 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3830 	if (err)
3831 		return err;
3832 
3833 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3834 
3835 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3836 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3837 
3838 	return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3839 }
3840 
3841 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3842 {
3843 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3844 	struct hwctx_disable_req ctx_req;
3845 	int err;
3846 
3847 	ctx_req.hdr.pcifunc = pcifunc;
3848 
3849 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3850 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3851 	rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
3852 	nix_interface_deinit(rvu, pcifunc, nixlf);
3853 	nix_rx_sync(rvu, blkaddr);
3854 	nix_txschq_free(rvu, pcifunc);
3855 
3856 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3857 
3858 	rvu_cgx_start_stop_io(rvu, pcifunc, false);
3859 
3860 	if (pfvf->sq_ctx) {
3861 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3862 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3863 		if (err)
3864 			dev_err(rvu->dev, "SQ ctx disable failed\n");
3865 	}
3866 
3867 	if (pfvf->rq_ctx) {
3868 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3869 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3870 		if (err)
3871 			dev_err(rvu->dev, "RQ ctx disable failed\n");
3872 	}
3873 
3874 	if (pfvf->cq_ctx) {
3875 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3876 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3877 		if (err)
3878 			dev_err(rvu->dev, "CQ ctx disable failed\n");
3879 	}
3880 
3881 	nix_ctx_free(rvu, pfvf);
3882 }
3883 
3884 #define NIX_AF_LFX_TX_CFG_PTP_EN	BIT_ULL(32)
3885 
3886 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3887 {
3888 	struct rvu_hwinfo *hw = rvu->hw;
3889 	struct rvu_block *block;
3890 	int blkaddr, pf;
3891 	int nixlf;
3892 	u64 cfg;
3893 
3894 	pf = rvu_get_pf(pcifunc);
3895 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
3896 		return 0;
3897 
3898 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3899 	if (blkaddr < 0)
3900 		return NIX_AF_ERR_AF_LF_INVALID;
3901 
3902 	block = &hw->block[blkaddr];
3903 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3904 	if (nixlf < 0)
3905 		return NIX_AF_ERR_AF_LF_INVALID;
3906 
3907 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3908 
3909 	if (enable)
3910 		cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3911 	else
3912 		cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3913 
3914 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3915 
3916 	return 0;
3917 }
3918 
3919 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
3920 					  struct msg_rsp *rsp)
3921 {
3922 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
3923 }
3924 
3925 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
3926 					   struct msg_rsp *rsp)
3927 {
3928 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
3929 }
3930 
3931 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3932 					struct nix_lso_format_cfg *req,
3933 					struct nix_lso_format_cfg_rsp *rsp)
3934 {
3935 	u16 pcifunc = req->hdr.pcifunc;
3936 	struct nix_hw *nix_hw;
3937 	struct rvu_pfvf *pfvf;
3938 	int blkaddr, idx, f;
3939 	u64 reg;
3940 
3941 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3942 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3943 	if (!pfvf->nixlf || blkaddr < 0)
3944 		return NIX_AF_ERR_AF_LF_INVALID;
3945 
3946 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3947 	if (!nix_hw)
3948 		return -EINVAL;
3949 
3950 	/* Find existing matching LSO format, if any */
3951 	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3952 		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3953 			reg = rvu_read64(rvu, blkaddr,
3954 					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3955 			if (req->fields[f] != (reg & req->field_mask))
3956 				break;
3957 		}
3958 
3959 		if (f == NIX_LSO_FIELD_MAX)
3960 			break;
3961 	}
3962 
3963 	if (idx < nix_hw->lso.in_use) {
3964 		/* Match found */
3965 		rsp->lso_format_idx = idx;
3966 		return 0;
3967 	}
3968 
3969 	if (nix_hw->lso.in_use == nix_hw->lso.total)
3970 		return NIX_AF_ERR_LSO_CFG_FAIL;
3971 
3972 	rsp->lso_format_idx = nix_hw->lso.in_use++;
3973 
3974 	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
3975 		rvu_write64(rvu, blkaddr,
3976 			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
3977 			    req->fields[f]);
3978 
3979 	return 0;
3980 }
3981 
3982 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
3983 {
3984 	bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
3985 
3986 	/* overwrite vf mac address with default_mac */
3987 	if (from_vf)
3988 		ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
3989 }
3990