1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "npc.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 
21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
23 			    int type, int chan_id);
24 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
25 			       int type, bool add);
26 static int nix_setup_ipolicers(struct rvu *rvu,
27 			       struct nix_hw *nix_hw, int blkaddr);
28 static void nix_ipolicer_freemem(struct nix_hw *nix_hw);
29 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
30 			       struct nix_hw *nix_hw, u16 pcifunc);
31 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
32 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
33 				     u32 leaf_prof);
34 
35 enum mc_tbl_sz {
36 	MC_TBL_SZ_256,
37 	MC_TBL_SZ_512,
38 	MC_TBL_SZ_1K,
39 	MC_TBL_SZ_2K,
40 	MC_TBL_SZ_4K,
41 	MC_TBL_SZ_8K,
42 	MC_TBL_SZ_16K,
43 	MC_TBL_SZ_32K,
44 	MC_TBL_SZ_64K,
45 };
46 
47 enum mc_buf_cnt {
48 	MC_BUF_CNT_8,
49 	MC_BUF_CNT_16,
50 	MC_BUF_CNT_32,
51 	MC_BUF_CNT_64,
52 	MC_BUF_CNT_128,
53 	MC_BUF_CNT_256,
54 	MC_BUF_CNT_512,
55 	MC_BUF_CNT_1024,
56 	MC_BUF_CNT_2048,
57 };
58 
59 enum nix_makr_fmt_indexes {
60 	NIX_MARK_CFG_IP_DSCP_RED,
61 	NIX_MARK_CFG_IP_DSCP_YELLOW,
62 	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
63 	NIX_MARK_CFG_IP_ECN_RED,
64 	NIX_MARK_CFG_IP_ECN_YELLOW,
65 	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
66 	NIX_MARK_CFG_VLAN_DEI_RED,
67 	NIX_MARK_CFG_VLAN_DEI_YELLOW,
68 	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
69 	NIX_MARK_CFG_MAX,
70 };
71 
72 /* For now considering MC resources needed for broadcast
73  * pkt replication only. i.e 256 HWVFs + 12 PFs.
74  */
75 #define MC_TBL_SIZE	MC_TBL_SZ_512
76 #define MC_BUF_CNT	MC_BUF_CNT_128
77 
78 struct mce {
79 	struct hlist_node	node;
80 	u16			pcifunc;
81 };
82 
83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
84 {
85 	int i = 0;
86 
87 	/*If blkaddr is 0, return the first nix block address*/
88 	if (blkaddr == 0)
89 		return rvu->nix_blkaddr[blkaddr];
90 
91 	while (i + 1 < MAX_NIX_BLKS) {
92 		if (rvu->nix_blkaddr[i] == blkaddr)
93 			return rvu->nix_blkaddr[i + 1];
94 		i++;
95 	}
96 
97 	return 0;
98 }
99 
100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
101 {
102 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
103 	int blkaddr;
104 
105 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
106 	if (!pfvf->nixlf || blkaddr < 0)
107 		return false;
108 	return true;
109 }
110 
111 int rvu_get_nixlf_count(struct rvu *rvu)
112 {
113 	int blkaddr = 0, max = 0;
114 	struct rvu_block *block;
115 
116 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
117 	while (blkaddr) {
118 		block = &rvu->hw->block[blkaddr];
119 		max += block->lf.max;
120 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
121 	}
122 	return max;
123 }
124 
125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
126 {
127 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
128 	struct rvu_hwinfo *hw = rvu->hw;
129 	int blkaddr;
130 
131 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
132 	if (!pfvf->nixlf || blkaddr < 0)
133 		return NIX_AF_ERR_AF_LF_INVALID;
134 
135 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
136 	if (*nixlf < 0)
137 		return NIX_AF_ERR_AF_LF_INVALID;
138 
139 	if (nix_blkaddr)
140 		*nix_blkaddr = blkaddr;
141 
142 	return 0;
143 }
144 
145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
146 			struct nix_hw **nix_hw, int *blkaddr)
147 {
148 	struct rvu_pfvf *pfvf;
149 
150 	pfvf = rvu_get_pfvf(rvu, pcifunc);
151 	*blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
152 	if (!pfvf->nixlf || *blkaddr < 0)
153 		return NIX_AF_ERR_AF_LF_INVALID;
154 
155 	*nix_hw = get_nix_hw(rvu->hw, *blkaddr);
156 	if (!*nix_hw)
157 		return NIX_AF_ERR_INVALID_NIXBLK;
158 	return 0;
159 }
160 
161 static void nix_mce_list_init(struct nix_mce_list *list, int max)
162 {
163 	INIT_HLIST_HEAD(&list->head);
164 	list->count = 0;
165 	list->max = max;
166 }
167 
168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
169 {
170 	int idx;
171 
172 	if (!mcast)
173 		return 0;
174 
175 	idx = mcast->next_free_mce;
176 	mcast->next_free_mce += count;
177 	return idx;
178 }
179 
180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
181 {
182 	int nix_blkaddr = 0, i = 0;
183 	struct rvu *rvu = hw->rvu;
184 
185 	nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
186 	while (nix_blkaddr) {
187 		if (blkaddr == nix_blkaddr && hw->nix)
188 			return &hw->nix[i];
189 		nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
190 		i++;
191 	}
192 	return NULL;
193 }
194 
195 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
196 {
197 	int err;
198 
199 	/*Sync all in flight RX packets to LLC/DRAM */
200 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
201 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
202 	if (err)
203 		dev_err(rvu->dev, "NIX RX software sync failed\n");
204 }
205 
206 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
207 			    int lvl, u16 pcifunc, u16 schq)
208 {
209 	struct rvu_hwinfo *hw = rvu->hw;
210 	struct nix_txsch *txsch;
211 	struct nix_hw *nix_hw;
212 	u16 map_func;
213 
214 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
215 	if (!nix_hw)
216 		return false;
217 
218 	txsch = &nix_hw->txsch[lvl];
219 	/* Check out of bounds */
220 	if (schq >= txsch->schq.max)
221 		return false;
222 
223 	mutex_lock(&rvu->rsrc_lock);
224 	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
225 	mutex_unlock(&rvu->rsrc_lock);
226 
227 	/* TLs aggegating traffic are shared across PF and VFs */
228 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
229 		if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
230 			return false;
231 		else
232 			return true;
233 	}
234 
235 	if (map_func != pcifunc)
236 		return false;
237 
238 	return true;
239 }
240 
241 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
242 {
243 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
244 	struct mac_ops *mac_ops;
245 	int pkind, pf, vf, lbkid;
246 	u8 cgx_id, lmac_id;
247 	int err;
248 
249 	pf = rvu_get_pf(pcifunc);
250 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
251 		return 0;
252 
253 	switch (type) {
254 	case NIX_INTF_TYPE_CGX:
255 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
256 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
257 
258 		pkind = rvu_npc_get_pkind(rvu, pf);
259 		if (pkind < 0) {
260 			dev_err(rvu->dev,
261 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
262 			return -EINVAL;
263 		}
264 		pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
265 		pfvf->tx_chan_base = pfvf->rx_chan_base;
266 		pfvf->rx_chan_cnt = 1;
267 		pfvf->tx_chan_cnt = 1;
268 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
269 		rvu_npc_set_pkind(rvu, pkind, pfvf);
270 
271 		mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
272 		/* By default we enable pause frames */
273 		if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
274 			mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
275 								    rvu),
276 						      lmac_id, true, true);
277 		break;
278 	case NIX_INTF_TYPE_LBK:
279 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
280 
281 		/* If NIX1 block is present on the silicon then NIXes are
282 		 * assigned alternatively for lbk interfaces. NIX0 should
283 		 * send packets on lbk link 1 channels and NIX1 should send
284 		 * on lbk link 0 channels for the communication between
285 		 * NIX0 and NIX1.
286 		 */
287 		lbkid = 0;
288 		if (rvu->hw->lbk_links > 1)
289 			lbkid = vf & 0x1 ? 0 : 1;
290 
291 		/* Note that AF's VFs work in pairs and talk over consecutive
292 		 * loopback channels.Therefore if odd number of AF VFs are
293 		 * enabled then the last VF remains with no pair.
294 		 */
295 		pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
296 		pfvf->tx_chan_base = vf & 0x1 ?
297 					rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
298 					rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
299 		pfvf->rx_chan_cnt = 1;
300 		pfvf->tx_chan_cnt = 1;
301 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
302 					      pfvf->rx_chan_base,
303 					      pfvf->rx_chan_cnt);
304 		break;
305 	}
306 
307 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
308 	 * RVU PF/VF's MAC address.
309 	 */
310 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
311 				    pfvf->rx_chan_base, pfvf->mac_addr);
312 
313 	/* Add this PF_FUNC to bcast pkt replication list */
314 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
315 	if (err) {
316 		dev_err(rvu->dev,
317 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
318 			pcifunc);
319 		return err;
320 	}
321 	/* Install MCAM rule matching Ethernet broadcast mac address */
322 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
323 					  nixlf, pfvf->rx_chan_base);
324 
325 	pfvf->maxlen = NIC_HW_MIN_FRS;
326 	pfvf->minlen = NIC_HW_MIN_FRS;
327 
328 	return 0;
329 }
330 
331 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
332 {
333 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
334 	int err;
335 
336 	pfvf->maxlen = 0;
337 	pfvf->minlen = 0;
338 
339 	/* Remove this PF_FUNC from bcast pkt replication list */
340 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
341 	if (err) {
342 		dev_err(rvu->dev,
343 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
344 			pcifunc);
345 	}
346 
347 	/* Free and disable any MCAM entries used by this NIX LF */
348 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
349 }
350 
351 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
352 				    struct nix_bp_cfg_req *req,
353 				    struct msg_rsp *rsp)
354 {
355 	u16 pcifunc = req->hdr.pcifunc;
356 	struct rvu_pfvf *pfvf;
357 	int blkaddr, pf, type;
358 	u16 chan_base, chan;
359 	u64 cfg;
360 
361 	pf = rvu_get_pf(pcifunc);
362 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
363 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
364 		return 0;
365 
366 	pfvf = rvu_get_pfvf(rvu, pcifunc);
367 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
368 
369 	chan_base = pfvf->rx_chan_base + req->chan_base;
370 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
371 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
372 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
373 			    cfg & ~BIT_ULL(16));
374 	}
375 	return 0;
376 }
377 
378 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
379 			    int type, int chan_id)
380 {
381 	int bpid, blkaddr, lmac_chan_cnt;
382 	struct rvu_hwinfo *hw = rvu->hw;
383 	u16 cgx_bpid_cnt, lbk_bpid_cnt;
384 	struct rvu_pfvf *pfvf;
385 	u8 cgx_id, lmac_id;
386 	u64 cfg;
387 
388 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
389 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
390 	lmac_chan_cnt = cfg & 0xFF;
391 
392 	cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
393 	lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
394 
395 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
396 
397 	/* Backpressure IDs range division
398 	 * CGX channles are mapped to (0 - 191) BPIDs
399 	 * LBK channles are mapped to (192 - 255) BPIDs
400 	 * SDP channles are mapped to (256 - 511) BPIDs
401 	 *
402 	 * Lmac channles and bpids mapped as follows
403 	 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
404 	 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
405 	 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
406 	 */
407 	switch (type) {
408 	case NIX_INTF_TYPE_CGX:
409 		if ((req->chan_base + req->chan_cnt) > 15)
410 			return -EINVAL;
411 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
412 		/* Assign bpid based on cgx, lmac and chan id */
413 		bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
414 			(lmac_id * lmac_chan_cnt) + req->chan_base;
415 
416 		if (req->bpid_per_chan)
417 			bpid += chan_id;
418 		if (bpid > cgx_bpid_cnt)
419 			return -EINVAL;
420 		break;
421 
422 	case NIX_INTF_TYPE_LBK:
423 		if ((req->chan_base + req->chan_cnt) > 63)
424 			return -EINVAL;
425 		bpid = cgx_bpid_cnt + req->chan_base;
426 		if (req->bpid_per_chan)
427 			bpid += chan_id;
428 		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
429 			return -EINVAL;
430 		break;
431 	default:
432 		return -EINVAL;
433 	}
434 	return bpid;
435 }
436 
437 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
438 				   struct nix_bp_cfg_req *req,
439 				   struct nix_bp_cfg_rsp *rsp)
440 {
441 	int blkaddr, pf, type, chan_id = 0;
442 	u16 pcifunc = req->hdr.pcifunc;
443 	struct rvu_pfvf *pfvf;
444 	u16 chan_base, chan;
445 	s16 bpid, bpid_base;
446 	u64 cfg;
447 
448 	pf = rvu_get_pf(pcifunc);
449 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
450 
451 	/* Enable backpressure only for CGX mapped PFs and LBK interface */
452 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
453 		return 0;
454 
455 	pfvf = rvu_get_pfvf(rvu, pcifunc);
456 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
457 
458 	bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
459 	chan_base = pfvf->rx_chan_base + req->chan_base;
460 	bpid = bpid_base;
461 
462 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
463 		if (bpid < 0) {
464 			dev_warn(rvu->dev, "Fail to enable backpressure\n");
465 			return -EINVAL;
466 		}
467 
468 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
469 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
470 			    cfg | (bpid & 0xFF) | BIT_ULL(16));
471 		chan_id++;
472 		bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
473 	}
474 
475 	for (chan = 0; chan < req->chan_cnt; chan++) {
476 		/* Map channel and bpid assign to it */
477 		rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
478 					(bpid_base & 0x3FF);
479 		if (req->bpid_per_chan)
480 			bpid_base++;
481 	}
482 	rsp->chan_cnt = req->chan_cnt;
483 
484 	return 0;
485 }
486 
487 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
488 				 u64 format, bool v4, u64 *fidx)
489 {
490 	struct nix_lso_format field = {0};
491 
492 	/* IP's Length field */
493 	field.layer = NIX_TXLAYER_OL3;
494 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
495 	field.offset = v4 ? 2 : 4;
496 	field.sizem1 = 1; /* i.e 2 bytes */
497 	field.alg = NIX_LSOALG_ADD_PAYLEN;
498 	rvu_write64(rvu, blkaddr,
499 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
500 		    *(u64 *)&field);
501 
502 	/* No ID field in IPv6 header */
503 	if (!v4)
504 		return;
505 
506 	/* IP's ID field */
507 	field.layer = NIX_TXLAYER_OL3;
508 	field.offset = 4;
509 	field.sizem1 = 1; /* i.e 2 bytes */
510 	field.alg = NIX_LSOALG_ADD_SEGNUM;
511 	rvu_write64(rvu, blkaddr,
512 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
513 		    *(u64 *)&field);
514 }
515 
516 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
517 				 u64 format, u64 *fidx)
518 {
519 	struct nix_lso_format field = {0};
520 
521 	/* TCP's sequence number field */
522 	field.layer = NIX_TXLAYER_OL4;
523 	field.offset = 4;
524 	field.sizem1 = 3; /* i.e 4 bytes */
525 	field.alg = NIX_LSOALG_ADD_OFFSET;
526 	rvu_write64(rvu, blkaddr,
527 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
528 		    *(u64 *)&field);
529 
530 	/* TCP's flags field */
531 	field.layer = NIX_TXLAYER_OL4;
532 	field.offset = 12;
533 	field.sizem1 = 1; /* 2 bytes */
534 	field.alg = NIX_LSOALG_TCP_FLAGS;
535 	rvu_write64(rvu, blkaddr,
536 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
537 		    *(u64 *)&field);
538 }
539 
540 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
541 {
542 	u64 cfg, idx, fidx = 0;
543 
544 	/* Get max HW supported format indices */
545 	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
546 	nix_hw->lso.total = cfg;
547 
548 	/* Enable LSO */
549 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
550 	/* For TSO, set first and middle segment flags to
551 	 * mask out PSH, RST & FIN flags in TCP packet
552 	 */
553 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
554 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
555 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
556 
557 	/* Setup default static LSO formats
558 	 *
559 	 * Configure format fields for TCPv4 segmentation offload
560 	 */
561 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
562 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
563 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
564 
565 	/* Set rest of the fields to NOP */
566 	for (; fidx < 8; fidx++) {
567 		rvu_write64(rvu, blkaddr,
568 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
569 	}
570 	nix_hw->lso.in_use++;
571 
572 	/* Configure format fields for TCPv6 segmentation offload */
573 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
574 	fidx = 0;
575 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
576 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
577 
578 	/* Set rest of the fields to NOP */
579 	for (; fidx < 8; fidx++) {
580 		rvu_write64(rvu, blkaddr,
581 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
582 	}
583 	nix_hw->lso.in_use++;
584 }
585 
586 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
587 {
588 	kfree(pfvf->rq_bmap);
589 	kfree(pfvf->sq_bmap);
590 	kfree(pfvf->cq_bmap);
591 	if (pfvf->rq_ctx)
592 		qmem_free(rvu->dev, pfvf->rq_ctx);
593 	if (pfvf->sq_ctx)
594 		qmem_free(rvu->dev, pfvf->sq_ctx);
595 	if (pfvf->cq_ctx)
596 		qmem_free(rvu->dev, pfvf->cq_ctx);
597 	if (pfvf->rss_ctx)
598 		qmem_free(rvu->dev, pfvf->rss_ctx);
599 	if (pfvf->nix_qints_ctx)
600 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
601 	if (pfvf->cq_ints_ctx)
602 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
603 
604 	pfvf->rq_bmap = NULL;
605 	pfvf->cq_bmap = NULL;
606 	pfvf->sq_bmap = NULL;
607 	pfvf->rq_ctx = NULL;
608 	pfvf->sq_ctx = NULL;
609 	pfvf->cq_ctx = NULL;
610 	pfvf->rss_ctx = NULL;
611 	pfvf->nix_qints_ctx = NULL;
612 	pfvf->cq_ints_ctx = NULL;
613 }
614 
615 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
616 			      struct rvu_pfvf *pfvf, int nixlf,
617 			      int rss_sz, int rss_grps, int hwctx_size,
618 			      u64 way_mask)
619 {
620 	int err, grp, num_indices;
621 
622 	/* RSS is not requested for this NIXLF */
623 	if (!rss_sz)
624 		return 0;
625 	num_indices = rss_sz * rss_grps;
626 
627 	/* Alloc NIX RSS HW context memory and config the base */
628 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
629 	if (err)
630 		return err;
631 
632 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
633 		    (u64)pfvf->rss_ctx->iova);
634 
635 	/* Config full RSS table size, enable RSS and caching */
636 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
637 		    BIT_ULL(36) | BIT_ULL(4) |
638 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
639 		    way_mask << 20);
640 	/* Config RSS group offset and sizes */
641 	for (grp = 0; grp < rss_grps; grp++)
642 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
643 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
644 	return 0;
645 }
646 
647 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
648 			       struct nix_aq_inst_s *inst)
649 {
650 	struct admin_queue *aq = block->aq;
651 	struct nix_aq_res_s *result;
652 	int timeout = 1000;
653 	u64 reg, head;
654 
655 	result = (struct nix_aq_res_s *)aq->res->base;
656 
657 	/* Get current head pointer where to append this instruction */
658 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
659 	head = (reg >> 4) & AQ_PTR_MASK;
660 
661 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
662 	       (void *)inst, aq->inst->entry_sz);
663 	memset(result, 0, sizeof(*result));
664 	/* sync into memory */
665 	wmb();
666 
667 	/* Ring the doorbell and wait for result */
668 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
669 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
670 		cpu_relax();
671 		udelay(1);
672 		timeout--;
673 		if (!timeout)
674 			return -EBUSY;
675 	}
676 
677 	if (result->compcode != NIX_AQ_COMP_GOOD)
678 		/* TODO: Replace this with some error code */
679 		return -EBUSY;
680 
681 	return 0;
682 }
683 
684 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
685 				   struct nix_aq_enq_req *req,
686 				   struct nix_aq_enq_rsp *rsp)
687 {
688 	struct rvu_hwinfo *hw = rvu->hw;
689 	u16 pcifunc = req->hdr.pcifunc;
690 	int nixlf, blkaddr, rc = 0;
691 	struct nix_aq_inst_s inst;
692 	struct rvu_block *block;
693 	struct admin_queue *aq;
694 	struct rvu_pfvf *pfvf;
695 	void *ctx, *mask;
696 	bool ena;
697 	u64 cfg;
698 
699 	blkaddr = nix_hw->blkaddr;
700 	block = &hw->block[blkaddr];
701 	aq = block->aq;
702 	if (!aq) {
703 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
704 		return NIX_AF_ERR_AQ_ENQUEUE;
705 	}
706 
707 	pfvf = rvu_get_pfvf(rvu, pcifunc);
708 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
709 
710 	/* Skip NIXLF check for broadcast MCE entry and bandwidth profile
711 	 * operations done by AF itself.
712 	 */
713 	if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
714 	      (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
715 		if (!pfvf->nixlf || nixlf < 0)
716 			return NIX_AF_ERR_AF_LF_INVALID;
717 	}
718 
719 	switch (req->ctype) {
720 	case NIX_AQ_CTYPE_RQ:
721 		/* Check if index exceeds max no of queues */
722 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
723 			rc = NIX_AF_ERR_AQ_ENQUEUE;
724 		break;
725 	case NIX_AQ_CTYPE_SQ:
726 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
727 			rc = NIX_AF_ERR_AQ_ENQUEUE;
728 		break;
729 	case NIX_AQ_CTYPE_CQ:
730 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
731 			rc = NIX_AF_ERR_AQ_ENQUEUE;
732 		break;
733 	case NIX_AQ_CTYPE_RSS:
734 		/* Check if RSS is enabled and qidx is within range */
735 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
736 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
737 		    (req->qidx >= (256UL << (cfg & 0xF))))
738 			rc = NIX_AF_ERR_AQ_ENQUEUE;
739 		break;
740 	case NIX_AQ_CTYPE_MCE:
741 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
742 
743 		/* Check if index exceeds MCE list length */
744 		if (!nix_hw->mcast.mce_ctx ||
745 		    (req->qidx >= (256UL << (cfg & 0xF))))
746 			rc = NIX_AF_ERR_AQ_ENQUEUE;
747 
748 		/* Adding multicast lists for requests from PF/VFs is not
749 		 * yet supported, so ignore this.
750 		 */
751 		if (rsp)
752 			rc = NIX_AF_ERR_AQ_ENQUEUE;
753 		break;
754 	case NIX_AQ_CTYPE_BANDPROF:
755 		if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
756 					nix_hw, pcifunc))
757 			rc = NIX_AF_ERR_INVALID_BANDPROF;
758 		break;
759 	default:
760 		rc = NIX_AF_ERR_AQ_ENQUEUE;
761 	}
762 
763 	if (rc)
764 		return rc;
765 
766 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
767 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
768 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
769 	     (req->op == NIX_AQ_INSTOP_WRITE &&
770 	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
771 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
772 				     pcifunc, req->sq.smq))
773 			return NIX_AF_ERR_AQ_ENQUEUE;
774 	}
775 
776 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
777 	inst.lf = nixlf;
778 	inst.cindex = req->qidx;
779 	inst.ctype = req->ctype;
780 	inst.op = req->op;
781 	/* Currently we are not supporting enqueuing multiple instructions,
782 	 * so always choose first entry in result memory.
783 	 */
784 	inst.res_addr = (u64)aq->res->iova;
785 
786 	/* Hardware uses same aq->res->base for updating result of
787 	 * previous instruction hence wait here till it is done.
788 	 */
789 	spin_lock(&aq->lock);
790 
791 	/* Clean result + context memory */
792 	memset(aq->res->base, 0, aq->res->entry_sz);
793 	/* Context needs to be written at RES_ADDR + 128 */
794 	ctx = aq->res->base + 128;
795 	/* Mask needs to be written at RES_ADDR + 256 */
796 	mask = aq->res->base + 256;
797 
798 	switch (req->op) {
799 	case NIX_AQ_INSTOP_WRITE:
800 		if (req->ctype == NIX_AQ_CTYPE_RQ)
801 			memcpy(mask, &req->rq_mask,
802 			       sizeof(struct nix_rq_ctx_s));
803 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
804 			memcpy(mask, &req->sq_mask,
805 			       sizeof(struct nix_sq_ctx_s));
806 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
807 			memcpy(mask, &req->cq_mask,
808 			       sizeof(struct nix_cq_ctx_s));
809 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
810 			memcpy(mask, &req->rss_mask,
811 			       sizeof(struct nix_rsse_s));
812 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
813 			memcpy(mask, &req->mce_mask,
814 			       sizeof(struct nix_rx_mce_s));
815 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
816 			memcpy(mask, &req->prof_mask,
817 			       sizeof(struct nix_bandprof_s));
818 		fallthrough;
819 	case NIX_AQ_INSTOP_INIT:
820 		if (req->ctype == NIX_AQ_CTYPE_RQ)
821 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
822 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
823 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
824 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
825 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
826 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
827 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
828 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
829 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
830 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
831 			memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
832 		break;
833 	case NIX_AQ_INSTOP_NOP:
834 	case NIX_AQ_INSTOP_READ:
835 	case NIX_AQ_INSTOP_LOCK:
836 	case NIX_AQ_INSTOP_UNLOCK:
837 		break;
838 	default:
839 		rc = NIX_AF_ERR_AQ_ENQUEUE;
840 		spin_unlock(&aq->lock);
841 		return rc;
842 	}
843 
844 	/* Submit the instruction to AQ */
845 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
846 	if (rc) {
847 		spin_unlock(&aq->lock);
848 		return rc;
849 	}
850 
851 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
852 	if (req->op == NIX_AQ_INSTOP_INIT) {
853 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
854 			__set_bit(req->qidx, pfvf->rq_bmap);
855 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
856 			__set_bit(req->qidx, pfvf->sq_bmap);
857 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
858 			__set_bit(req->qidx, pfvf->cq_bmap);
859 	}
860 
861 	if (req->op == NIX_AQ_INSTOP_WRITE) {
862 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
863 			ena = (req->rq.ena & req->rq_mask.ena) |
864 				(test_bit(req->qidx, pfvf->rq_bmap) &
865 				~req->rq_mask.ena);
866 			if (ena)
867 				__set_bit(req->qidx, pfvf->rq_bmap);
868 			else
869 				__clear_bit(req->qidx, pfvf->rq_bmap);
870 		}
871 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
872 			ena = (req->rq.ena & req->sq_mask.ena) |
873 				(test_bit(req->qidx, pfvf->sq_bmap) &
874 				~req->sq_mask.ena);
875 			if (ena)
876 				__set_bit(req->qidx, pfvf->sq_bmap);
877 			else
878 				__clear_bit(req->qidx, pfvf->sq_bmap);
879 		}
880 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
881 			ena = (req->rq.ena & req->cq_mask.ena) |
882 				(test_bit(req->qidx, pfvf->cq_bmap) &
883 				~req->cq_mask.ena);
884 			if (ena)
885 				__set_bit(req->qidx, pfvf->cq_bmap);
886 			else
887 				__clear_bit(req->qidx, pfvf->cq_bmap);
888 		}
889 	}
890 
891 	if (rsp) {
892 		/* Copy read context into mailbox */
893 		if (req->op == NIX_AQ_INSTOP_READ) {
894 			if (req->ctype == NIX_AQ_CTYPE_RQ)
895 				memcpy(&rsp->rq, ctx,
896 				       sizeof(struct nix_rq_ctx_s));
897 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
898 				memcpy(&rsp->sq, ctx,
899 				       sizeof(struct nix_sq_ctx_s));
900 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
901 				memcpy(&rsp->cq, ctx,
902 				       sizeof(struct nix_cq_ctx_s));
903 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
904 				memcpy(&rsp->rss, ctx,
905 				       sizeof(struct nix_rsse_s));
906 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
907 				memcpy(&rsp->mce, ctx,
908 				       sizeof(struct nix_rx_mce_s));
909 			else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
910 				memcpy(&rsp->prof, ctx,
911 				       sizeof(struct nix_bandprof_s));
912 		}
913 	}
914 
915 	spin_unlock(&aq->lock);
916 	return 0;
917 }
918 
919 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
920 			       struct nix_aq_enq_rsp *rsp)
921 {
922 	struct nix_hw *nix_hw;
923 	int blkaddr;
924 
925 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
926 	if (blkaddr < 0)
927 		return NIX_AF_ERR_AF_LF_INVALID;
928 
929 	nix_hw =  get_nix_hw(rvu->hw, blkaddr);
930 	if (!nix_hw)
931 		return -EINVAL;
932 
933 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
934 }
935 
936 static const char *nix_get_ctx_name(int ctype)
937 {
938 	switch (ctype) {
939 	case NIX_AQ_CTYPE_CQ:
940 		return "CQ";
941 	case NIX_AQ_CTYPE_SQ:
942 		return "SQ";
943 	case NIX_AQ_CTYPE_RQ:
944 		return "RQ";
945 	case NIX_AQ_CTYPE_RSS:
946 		return "RSS";
947 	}
948 	return "";
949 }
950 
951 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
952 {
953 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
954 	struct nix_aq_enq_req aq_req;
955 	unsigned long *bmap;
956 	int qidx, q_cnt = 0;
957 	int err = 0, rc;
958 
959 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
960 		return NIX_AF_ERR_AQ_ENQUEUE;
961 
962 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
963 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
964 
965 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
966 		aq_req.cq.ena = 0;
967 		aq_req.cq_mask.ena = 1;
968 		aq_req.cq.bp_ena = 0;
969 		aq_req.cq_mask.bp_ena = 1;
970 		q_cnt = pfvf->cq_ctx->qsize;
971 		bmap = pfvf->cq_bmap;
972 	}
973 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
974 		aq_req.sq.ena = 0;
975 		aq_req.sq_mask.ena = 1;
976 		q_cnt = pfvf->sq_ctx->qsize;
977 		bmap = pfvf->sq_bmap;
978 	}
979 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
980 		aq_req.rq.ena = 0;
981 		aq_req.rq_mask.ena = 1;
982 		q_cnt = pfvf->rq_ctx->qsize;
983 		bmap = pfvf->rq_bmap;
984 	}
985 
986 	aq_req.ctype = req->ctype;
987 	aq_req.op = NIX_AQ_INSTOP_WRITE;
988 
989 	for (qidx = 0; qidx < q_cnt; qidx++) {
990 		if (!test_bit(qidx, bmap))
991 			continue;
992 		aq_req.qidx = qidx;
993 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
994 		if (rc) {
995 			err = rc;
996 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
997 				nix_get_ctx_name(req->ctype), qidx);
998 		}
999 	}
1000 
1001 	return err;
1002 }
1003 
1004 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1005 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1006 {
1007 	struct nix_aq_enq_req lock_ctx_req;
1008 	int err;
1009 
1010 	if (req->op != NIX_AQ_INSTOP_INIT)
1011 		return 0;
1012 
1013 	if (req->ctype == NIX_AQ_CTYPE_MCE ||
1014 	    req->ctype == NIX_AQ_CTYPE_DYNO)
1015 		return 0;
1016 
1017 	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1018 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1019 	lock_ctx_req.ctype = req->ctype;
1020 	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1021 	lock_ctx_req.qidx = req->qidx;
1022 	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1023 	if (err)
1024 		dev_err(rvu->dev,
1025 			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1026 			req->hdr.pcifunc,
1027 			nix_get_ctx_name(req->ctype), req->qidx);
1028 	return err;
1029 }
1030 
1031 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1032 				struct nix_aq_enq_req *req,
1033 				struct nix_aq_enq_rsp *rsp)
1034 {
1035 	int err;
1036 
1037 	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1038 	if (!err)
1039 		err = nix_lf_hwctx_lockdown(rvu, req);
1040 	return err;
1041 }
1042 #else
1043 
1044 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1045 				struct nix_aq_enq_req *req,
1046 				struct nix_aq_enq_rsp *rsp)
1047 {
1048 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
1049 }
1050 #endif
1051 /* CN10K mbox handler */
1052 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1053 				      struct nix_cn10k_aq_enq_req *req,
1054 				      struct nix_cn10k_aq_enq_rsp *rsp)
1055 {
1056 	return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1057 				  (struct nix_aq_enq_rsp *)rsp);
1058 }
1059 
1060 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1061 				       struct hwctx_disable_req *req,
1062 				       struct msg_rsp *rsp)
1063 {
1064 	return nix_lf_hwctx_disable(rvu, req);
1065 }
1066 
1067 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1068 				  struct nix_lf_alloc_req *req,
1069 				  struct nix_lf_alloc_rsp *rsp)
1070 {
1071 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
1072 	struct rvu_hwinfo *hw = rvu->hw;
1073 	u16 pcifunc = req->hdr.pcifunc;
1074 	struct rvu_block *block;
1075 	struct rvu_pfvf *pfvf;
1076 	u64 cfg, ctx_cfg;
1077 	int blkaddr;
1078 
1079 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1080 		return NIX_AF_ERR_PARAM;
1081 
1082 	if (req->way_mask)
1083 		req->way_mask &= 0xFFFF;
1084 
1085 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1086 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1087 	if (!pfvf->nixlf || blkaddr < 0)
1088 		return NIX_AF_ERR_AF_LF_INVALID;
1089 
1090 	block = &hw->block[blkaddr];
1091 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1092 	if (nixlf < 0)
1093 		return NIX_AF_ERR_AF_LF_INVALID;
1094 
1095 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1096 	if (req->npa_func) {
1097 		/* If default, use 'this' NIXLF's PFFUNC */
1098 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1099 			req->npa_func = pcifunc;
1100 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1101 			return NIX_AF_INVAL_NPA_PF_FUNC;
1102 	}
1103 
1104 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1105 	if (req->sso_func) {
1106 		/* If default, use 'this' NIXLF's PFFUNC */
1107 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1108 			req->sso_func = pcifunc;
1109 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1110 			return NIX_AF_INVAL_SSO_PF_FUNC;
1111 	}
1112 
1113 	/* If RSS is being enabled, check if requested config is valid.
1114 	 * RSS table size should be power of two, otherwise
1115 	 * RSS_GRP::OFFSET + adder might go beyond that group or
1116 	 * won't be able to use entire table.
1117 	 */
1118 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1119 			    !is_power_of_2(req->rss_sz)))
1120 		return NIX_AF_ERR_RSS_SIZE_INVALID;
1121 
1122 	if (req->rss_sz &&
1123 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1124 		return NIX_AF_ERR_RSS_GRPS_INVALID;
1125 
1126 	/* Reset this NIX LF */
1127 	err = rvu_lf_reset(rvu, block, nixlf);
1128 	if (err) {
1129 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1130 			block->addr - BLKADDR_NIX0, nixlf);
1131 		return NIX_AF_ERR_LF_RESET;
1132 	}
1133 
1134 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1135 
1136 	/* Alloc NIX RQ HW context memory and config the base */
1137 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1138 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1139 	if (err)
1140 		goto free_mem;
1141 
1142 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1143 	if (!pfvf->rq_bmap)
1144 		goto free_mem;
1145 
1146 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1147 		    (u64)pfvf->rq_ctx->iova);
1148 
1149 	/* Set caching and queue count in HW */
1150 	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1151 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1152 
1153 	/* Alloc NIX SQ HW context memory and config the base */
1154 	hwctx_size = 1UL << (ctx_cfg & 0xF);
1155 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1156 	if (err)
1157 		goto free_mem;
1158 
1159 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1160 	if (!pfvf->sq_bmap)
1161 		goto free_mem;
1162 
1163 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1164 		    (u64)pfvf->sq_ctx->iova);
1165 
1166 	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1167 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1168 
1169 	/* Alloc NIX CQ HW context memory and config the base */
1170 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1171 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1172 	if (err)
1173 		goto free_mem;
1174 
1175 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1176 	if (!pfvf->cq_bmap)
1177 		goto free_mem;
1178 
1179 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1180 		    (u64)pfvf->cq_ctx->iova);
1181 
1182 	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1183 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1184 
1185 	/* Initialize receive side scaling (RSS) */
1186 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1187 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1188 				 req->rss_grps, hwctx_size, req->way_mask);
1189 	if (err)
1190 		goto free_mem;
1191 
1192 	/* Alloc memory for CQINT's HW contexts */
1193 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1194 	qints = (cfg >> 24) & 0xFFF;
1195 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1196 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1197 	if (err)
1198 		goto free_mem;
1199 
1200 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1201 		    (u64)pfvf->cq_ints_ctx->iova);
1202 
1203 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1204 		    BIT_ULL(36) | req->way_mask << 20);
1205 
1206 	/* Alloc memory for QINT's HW contexts */
1207 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1208 	qints = (cfg >> 12) & 0xFFF;
1209 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1210 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1211 	if (err)
1212 		goto free_mem;
1213 
1214 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1215 		    (u64)pfvf->nix_qints_ctx->iova);
1216 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1217 		    BIT_ULL(36) | req->way_mask << 20);
1218 
1219 	/* Setup VLANX TPID's.
1220 	 * Use VLAN1 for 802.1Q
1221 	 * and VLAN0 for 802.1AD.
1222 	 */
1223 	cfg = (0x8100ULL << 16) | 0x88A8ULL;
1224 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1225 
1226 	/* Enable LMTST for this NIX LF */
1227 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1228 
1229 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1230 	if (req->npa_func)
1231 		cfg = req->npa_func;
1232 	if (req->sso_func)
1233 		cfg |= (u64)req->sso_func << 16;
1234 
1235 	cfg |= (u64)req->xqe_sz << 33;
1236 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1237 
1238 	/* Config Rx pkt length, csum checks and apad  enable / disable */
1239 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1240 
1241 	/* Configure pkind for TX parse config */
1242 	cfg = NPC_TX_DEF_PKIND;
1243 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1244 
1245 	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1246 	err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1247 	if (err)
1248 		goto free_mem;
1249 
1250 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
1251 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1252 
1253 	/* Configure RX VTAG Type 7 (strip) for vf vlan */
1254 	rvu_write64(rvu, blkaddr,
1255 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1256 		    VTAGSIZE_T4 | VTAG_STRIP);
1257 
1258 	goto exit;
1259 
1260 free_mem:
1261 	nix_ctx_free(rvu, pfvf);
1262 	rc = -ENOMEM;
1263 
1264 exit:
1265 	/* Set macaddr of this PF/VF */
1266 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1267 
1268 	/* set SQB size info */
1269 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1270 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1271 	rsp->rx_chan_base = pfvf->rx_chan_base;
1272 	rsp->tx_chan_base = pfvf->tx_chan_base;
1273 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1274 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1275 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1276 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1277 	/* Get HW supported stat count */
1278 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1279 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1280 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1281 	/* Get count of CQ IRQs and error IRQs supported per LF */
1282 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1283 	rsp->qints = ((cfg >> 12) & 0xFFF);
1284 	rsp->cints = ((cfg >> 24) & 0xFFF);
1285 	rsp->cgx_links = hw->cgx_links;
1286 	rsp->lbk_links = hw->lbk_links;
1287 	rsp->sdp_links = hw->sdp_links;
1288 
1289 	return rc;
1290 }
1291 
1292 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1293 				 struct msg_rsp *rsp)
1294 {
1295 	struct rvu_hwinfo *hw = rvu->hw;
1296 	u16 pcifunc = req->hdr.pcifunc;
1297 	struct rvu_block *block;
1298 	int blkaddr, nixlf, err;
1299 	struct rvu_pfvf *pfvf;
1300 
1301 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1302 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1303 	if (!pfvf->nixlf || blkaddr < 0)
1304 		return NIX_AF_ERR_AF_LF_INVALID;
1305 
1306 	block = &hw->block[blkaddr];
1307 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1308 	if (nixlf < 0)
1309 		return NIX_AF_ERR_AF_LF_INVALID;
1310 
1311 	if (req->flags & NIX_LF_DISABLE_FLOWS)
1312 		rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1313 	else
1314 		rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1315 
1316 	/* Free any tx vtag def entries used by this NIX LF */
1317 	if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1318 		nix_free_tx_vtag_entries(rvu, pcifunc);
1319 
1320 	nix_interface_deinit(rvu, pcifunc, nixlf);
1321 
1322 	/* Reset this NIX LF */
1323 	err = rvu_lf_reset(rvu, block, nixlf);
1324 	if (err) {
1325 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1326 			block->addr - BLKADDR_NIX0, nixlf);
1327 		return NIX_AF_ERR_LF_RESET;
1328 	}
1329 
1330 	nix_ctx_free(rvu, pfvf);
1331 
1332 	return 0;
1333 }
1334 
1335 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1336 					 struct nix_mark_format_cfg  *req,
1337 					 struct nix_mark_format_cfg_rsp *rsp)
1338 {
1339 	u16 pcifunc = req->hdr.pcifunc;
1340 	struct nix_hw *nix_hw;
1341 	struct rvu_pfvf *pfvf;
1342 	int blkaddr, rc;
1343 	u32 cfg;
1344 
1345 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1346 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1347 	if (!pfvf->nixlf || blkaddr < 0)
1348 		return NIX_AF_ERR_AF_LF_INVALID;
1349 
1350 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1351 	if (!nix_hw)
1352 		return -EINVAL;
1353 
1354 	cfg = (((u32)req->offset & 0x7) << 16) |
1355 	      (((u32)req->y_mask & 0xF) << 12) |
1356 	      (((u32)req->y_val & 0xF) << 8) |
1357 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1358 
1359 	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1360 	if (rc < 0) {
1361 		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1362 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1363 		return NIX_AF_ERR_MARK_CFG_FAIL;
1364 	}
1365 
1366 	rsp->mark_format_idx = rc;
1367 	return 0;
1368 }
1369 
1370 /* Disable shaping of pkts by a scheduler queue
1371  * at a given scheduler level.
1372  */
1373 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1374 				 int lvl, int schq)
1375 {
1376 	u64  cir_reg = 0, pir_reg = 0;
1377 	u64  cfg;
1378 
1379 	switch (lvl) {
1380 	case NIX_TXSCH_LVL_TL1:
1381 		cir_reg = NIX_AF_TL1X_CIR(schq);
1382 		pir_reg = 0; /* PIR not available at TL1 */
1383 		break;
1384 	case NIX_TXSCH_LVL_TL2:
1385 		cir_reg = NIX_AF_TL2X_CIR(schq);
1386 		pir_reg = NIX_AF_TL2X_PIR(schq);
1387 		break;
1388 	case NIX_TXSCH_LVL_TL3:
1389 		cir_reg = NIX_AF_TL3X_CIR(schq);
1390 		pir_reg = NIX_AF_TL3X_PIR(schq);
1391 		break;
1392 	case NIX_TXSCH_LVL_TL4:
1393 		cir_reg = NIX_AF_TL4X_CIR(schq);
1394 		pir_reg = NIX_AF_TL4X_PIR(schq);
1395 		break;
1396 	}
1397 
1398 	if (!cir_reg)
1399 		return;
1400 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
1401 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1402 
1403 	if (!pir_reg)
1404 		return;
1405 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
1406 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1407 }
1408 
1409 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1410 				 int lvl, int schq)
1411 {
1412 	struct rvu_hwinfo *hw = rvu->hw;
1413 	int link;
1414 
1415 	if (lvl >= hw->cap.nix_tx_aggr_lvl)
1416 		return;
1417 
1418 	/* Reset TL4's SDP link config */
1419 	if (lvl == NIX_TXSCH_LVL_TL4)
1420 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1421 
1422 	if (lvl != NIX_TXSCH_LVL_TL2)
1423 		return;
1424 
1425 	/* Reset TL2's CGX or LBK link config */
1426 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1427 		rvu_write64(rvu, blkaddr,
1428 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1429 }
1430 
1431 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1432 {
1433 	struct rvu_hwinfo *hw = rvu->hw;
1434 	int pf = rvu_get_pf(pcifunc);
1435 	u8 cgx_id = 0, lmac_id = 0;
1436 
1437 	if (is_afvf(pcifunc)) {/* LBK links */
1438 		return hw->cgx_links;
1439 	} else if (is_pf_cgxmapped(rvu, pf)) {
1440 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1441 		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1442 	}
1443 
1444 	/* SDP link */
1445 	return hw->cgx_links + hw->lbk_links;
1446 }
1447 
1448 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1449 				 int link, int *start, int *end)
1450 {
1451 	struct rvu_hwinfo *hw = rvu->hw;
1452 	int pf = rvu_get_pf(pcifunc);
1453 
1454 	if (is_afvf(pcifunc)) { /* LBK links */
1455 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1456 		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1457 	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1458 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1459 		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1460 	} else { /* SDP link */
1461 		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1462 			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1463 		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1464 	}
1465 }
1466 
1467 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1468 				      struct nix_hw *nix_hw,
1469 				      struct nix_txsch_alloc_req *req)
1470 {
1471 	struct rvu_hwinfo *hw = rvu->hw;
1472 	int schq, req_schq, free_cnt;
1473 	struct nix_txsch *txsch;
1474 	int link, start, end;
1475 
1476 	txsch = &nix_hw->txsch[lvl];
1477 	req_schq = req->schq_contig[lvl] + req->schq[lvl];
1478 
1479 	if (!req_schq)
1480 		return 0;
1481 
1482 	link = nix_get_tx_link(rvu, pcifunc);
1483 
1484 	/* For traffic aggregating scheduler level, one queue is enough */
1485 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1486 		if (req_schq != 1)
1487 			return NIX_AF_ERR_TLX_ALLOC_FAIL;
1488 		return 0;
1489 	}
1490 
1491 	/* Get free SCHQ count and check if request can be accomodated */
1492 	if (hw->cap.nix_fixed_txschq_mapping) {
1493 		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1494 		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1495 		if (end <= txsch->schq.max && schq < end &&
1496 		    !test_bit(schq, txsch->schq.bmap))
1497 			free_cnt = 1;
1498 		else
1499 			free_cnt = 0;
1500 	} else {
1501 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
1502 	}
1503 
1504 	if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1505 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1506 
1507 	/* If contiguous queues are needed, check for availability */
1508 	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1509 	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1510 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1511 
1512 	return 0;
1513 }
1514 
1515 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1516 			    struct nix_txsch_alloc_rsp *rsp,
1517 			    int lvl, int start, int end)
1518 {
1519 	struct rvu_hwinfo *hw = rvu->hw;
1520 	u16 pcifunc = rsp->hdr.pcifunc;
1521 	int idx, schq;
1522 
1523 	/* For traffic aggregating levels, queue alloc is based
1524 	 * on transmit link to which PF_FUNC is mapped to.
1525 	 */
1526 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1527 		/* A single TL queue is allocated */
1528 		if (rsp->schq_contig[lvl]) {
1529 			rsp->schq_contig[lvl] = 1;
1530 			rsp->schq_contig_list[lvl][0] = start;
1531 		}
1532 
1533 		/* Both contig and non-contig reqs doesn't make sense here */
1534 		if (rsp->schq_contig[lvl])
1535 			rsp->schq[lvl] = 0;
1536 
1537 		if (rsp->schq[lvl]) {
1538 			rsp->schq[lvl] = 1;
1539 			rsp->schq_list[lvl][0] = start;
1540 		}
1541 		return;
1542 	}
1543 
1544 	/* Adjust the queue request count if HW supports
1545 	 * only one queue per level configuration.
1546 	 */
1547 	if (hw->cap.nix_fixed_txschq_mapping) {
1548 		idx = pcifunc & RVU_PFVF_FUNC_MASK;
1549 		schq = start + idx;
1550 		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1551 			rsp->schq_contig[lvl] = 0;
1552 			rsp->schq[lvl] = 0;
1553 			return;
1554 		}
1555 
1556 		if (rsp->schq_contig[lvl]) {
1557 			rsp->schq_contig[lvl] = 1;
1558 			set_bit(schq, txsch->schq.bmap);
1559 			rsp->schq_contig_list[lvl][0] = schq;
1560 			rsp->schq[lvl] = 0;
1561 		} else if (rsp->schq[lvl]) {
1562 			rsp->schq[lvl] = 1;
1563 			set_bit(schq, txsch->schq.bmap);
1564 			rsp->schq_list[lvl][0] = schq;
1565 		}
1566 		return;
1567 	}
1568 
1569 	/* Allocate contiguous queue indices requesty first */
1570 	if (rsp->schq_contig[lvl]) {
1571 		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1572 						  txsch->schq.max, start,
1573 						  rsp->schq_contig[lvl], 0);
1574 		if (schq >= end)
1575 			rsp->schq_contig[lvl] = 0;
1576 		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1577 			set_bit(schq, txsch->schq.bmap);
1578 			rsp->schq_contig_list[lvl][idx] = schq;
1579 			schq++;
1580 		}
1581 	}
1582 
1583 	/* Allocate non-contiguous queue indices */
1584 	if (rsp->schq[lvl]) {
1585 		idx = 0;
1586 		for (schq = start; schq < end; schq++) {
1587 			if (!test_bit(schq, txsch->schq.bmap)) {
1588 				set_bit(schq, txsch->schq.bmap);
1589 				rsp->schq_list[lvl][idx++] = schq;
1590 			}
1591 			if (idx == rsp->schq[lvl])
1592 				break;
1593 		}
1594 		/* Update how many were allocated */
1595 		rsp->schq[lvl] = idx;
1596 	}
1597 }
1598 
1599 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1600 				     struct nix_txsch_alloc_req *req,
1601 				     struct nix_txsch_alloc_rsp *rsp)
1602 {
1603 	struct rvu_hwinfo *hw = rvu->hw;
1604 	u16 pcifunc = req->hdr.pcifunc;
1605 	int link, blkaddr, rc = 0;
1606 	int lvl, idx, start, end;
1607 	struct nix_txsch *txsch;
1608 	struct rvu_pfvf *pfvf;
1609 	struct nix_hw *nix_hw;
1610 	u32 *pfvf_map;
1611 	u16 schq;
1612 
1613 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1614 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1615 	if (!pfvf->nixlf || blkaddr < 0)
1616 		return NIX_AF_ERR_AF_LF_INVALID;
1617 
1618 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1619 	if (!nix_hw)
1620 		return -EINVAL;
1621 
1622 	mutex_lock(&rvu->rsrc_lock);
1623 
1624 	/* Check if request is valid as per HW capabilities
1625 	 * and can be accomodated.
1626 	 */
1627 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1628 		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1629 		if (rc)
1630 			goto err;
1631 	}
1632 
1633 	/* Allocate requested Tx scheduler queues */
1634 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1635 		txsch = &nix_hw->txsch[lvl];
1636 		pfvf_map = txsch->pfvf_map;
1637 
1638 		if (!req->schq[lvl] && !req->schq_contig[lvl])
1639 			continue;
1640 
1641 		rsp->schq[lvl] = req->schq[lvl];
1642 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
1643 
1644 		link = nix_get_tx_link(rvu, pcifunc);
1645 
1646 		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1647 			start = link;
1648 			end = link;
1649 		} else if (hw->cap.nix_fixed_txschq_mapping) {
1650 			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1651 		} else {
1652 			start = 0;
1653 			end = txsch->schq.max;
1654 		}
1655 
1656 		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1657 
1658 		/* Reset queue config */
1659 		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1660 			schq = rsp->schq_contig_list[lvl][idx];
1661 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1662 			    NIX_TXSCHQ_CFG_DONE))
1663 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1664 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1665 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1666 		}
1667 
1668 		for (idx = 0; idx < req->schq[lvl]; idx++) {
1669 			schq = rsp->schq_list[lvl][idx];
1670 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1671 			    NIX_TXSCHQ_CFG_DONE))
1672 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1673 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1674 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1675 		}
1676 	}
1677 
1678 	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1679 	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1680 	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1681 				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1682 				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1683 	goto exit;
1684 err:
1685 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1686 exit:
1687 	mutex_unlock(&rvu->rsrc_lock);
1688 	return rc;
1689 }
1690 
1691 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1692 			  int smq, u16 pcifunc, int nixlf)
1693 {
1694 	int pf = rvu_get_pf(pcifunc);
1695 	u8 cgx_id = 0, lmac_id = 0;
1696 	int err, restore_tx_en = 0;
1697 	u64 cfg;
1698 
1699 	/* enable cgx tx if disabled */
1700 	if (is_pf_cgxmapped(rvu, pf)) {
1701 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1702 		restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1703 						    lmac_id, true);
1704 	}
1705 
1706 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1707 	/* Do SMQ flush and set enqueue xoff */
1708 	cfg |= BIT_ULL(50) | BIT_ULL(49);
1709 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1710 
1711 	/* Disable backpressure from physical link,
1712 	 * otherwise SMQ flush may stall.
1713 	 */
1714 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
1715 
1716 	/* Wait for flush to complete */
1717 	err = rvu_poll_reg(rvu, blkaddr,
1718 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1719 	if (err)
1720 		dev_err(rvu->dev,
1721 			"NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1722 
1723 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
1724 	/* restore cgx tx state */
1725 	if (restore_tx_en)
1726 		cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1727 }
1728 
1729 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1730 {
1731 	int blkaddr, nixlf, lvl, schq, err;
1732 	struct rvu_hwinfo *hw = rvu->hw;
1733 	struct nix_txsch *txsch;
1734 	struct nix_hw *nix_hw;
1735 
1736 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1737 	if (blkaddr < 0)
1738 		return NIX_AF_ERR_AF_LF_INVALID;
1739 
1740 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1741 	if (!nix_hw)
1742 		return -EINVAL;
1743 
1744 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1745 	if (nixlf < 0)
1746 		return NIX_AF_ERR_AF_LF_INVALID;
1747 
1748 	/* Disable TL2/3 queue links before SMQ flush*/
1749 	mutex_lock(&rvu->rsrc_lock);
1750 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1751 		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1752 			continue;
1753 
1754 		txsch = &nix_hw->txsch[lvl];
1755 		for (schq = 0; schq < txsch->schq.max; schq++) {
1756 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1757 				continue;
1758 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1759 		}
1760 	}
1761 
1762 	/* Flush SMQs */
1763 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1764 	for (schq = 0; schq < txsch->schq.max; schq++) {
1765 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1766 			continue;
1767 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1768 	}
1769 
1770 	/* Now free scheduler queues to free pool */
1771 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1772 		 /* TLs above aggregation level are shared across all PF
1773 		  * and it's VFs, hence skip freeing them.
1774 		  */
1775 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
1776 			continue;
1777 
1778 		txsch = &nix_hw->txsch[lvl];
1779 		for (schq = 0; schq < txsch->schq.max; schq++) {
1780 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1781 				continue;
1782 			rvu_free_rsrc(&txsch->schq, schq);
1783 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1784 		}
1785 	}
1786 	mutex_unlock(&rvu->rsrc_lock);
1787 
1788 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1789 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1790 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1791 	if (err)
1792 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1793 
1794 	return 0;
1795 }
1796 
1797 static int nix_txschq_free_one(struct rvu *rvu,
1798 			       struct nix_txsch_free_req *req)
1799 {
1800 	struct rvu_hwinfo *hw = rvu->hw;
1801 	u16 pcifunc = req->hdr.pcifunc;
1802 	int lvl, schq, nixlf, blkaddr;
1803 	struct nix_txsch *txsch;
1804 	struct nix_hw *nix_hw;
1805 	u32 *pfvf_map;
1806 
1807 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1808 	if (blkaddr < 0)
1809 		return NIX_AF_ERR_AF_LF_INVALID;
1810 
1811 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1812 	if (!nix_hw)
1813 		return -EINVAL;
1814 
1815 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1816 	if (nixlf < 0)
1817 		return NIX_AF_ERR_AF_LF_INVALID;
1818 
1819 	lvl = req->schq_lvl;
1820 	schq = req->schq;
1821 	txsch = &nix_hw->txsch[lvl];
1822 
1823 	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1824 		return 0;
1825 
1826 	pfvf_map = txsch->pfvf_map;
1827 	mutex_lock(&rvu->rsrc_lock);
1828 
1829 	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1830 		mutex_unlock(&rvu->rsrc_lock);
1831 		goto err;
1832 	}
1833 
1834 	/* Flush if it is a SMQ. Onus of disabling
1835 	 * TL2/3 queue links before SMQ flush is on user
1836 	 */
1837 	if (lvl == NIX_TXSCH_LVL_SMQ)
1838 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1839 
1840 	/* Free the resource */
1841 	rvu_free_rsrc(&txsch->schq, schq);
1842 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1843 	mutex_unlock(&rvu->rsrc_lock);
1844 	return 0;
1845 err:
1846 	return NIX_AF_ERR_TLX_INVALID;
1847 }
1848 
1849 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1850 				    struct nix_txsch_free_req *req,
1851 				    struct msg_rsp *rsp)
1852 {
1853 	if (req->flags & TXSCHQ_FREE_ALL)
1854 		return nix_txschq_free(rvu, req->hdr.pcifunc);
1855 	else
1856 		return nix_txschq_free_one(rvu, req);
1857 }
1858 
1859 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1860 				      int lvl, u64 reg, u64 regval)
1861 {
1862 	u64 regbase = reg & 0xFFFF;
1863 	u16 schq, parent;
1864 
1865 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1866 		return false;
1867 
1868 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1869 	/* Check if this schq belongs to this PF/VF or not */
1870 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1871 		return false;
1872 
1873 	parent = (regval >> 16) & 0x1FF;
1874 	/* Validate MDQ's TL4 parent */
1875 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
1876 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1877 		return false;
1878 
1879 	/* Validate TL4's TL3 parent */
1880 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
1881 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1882 		return false;
1883 
1884 	/* Validate TL3's TL2 parent */
1885 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
1886 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1887 		return false;
1888 
1889 	/* Validate TL2's TL1 parent */
1890 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
1891 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1892 		return false;
1893 
1894 	return true;
1895 }
1896 
1897 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1898 {
1899 	u64 regbase;
1900 
1901 	if (hw->cap.nix_shaping)
1902 		return true;
1903 
1904 	/* If shaping and coloring is not supported, then
1905 	 * *_CIR and *_PIR registers should not be configured.
1906 	 */
1907 	regbase = reg & 0xFFFF;
1908 
1909 	switch (lvl) {
1910 	case NIX_TXSCH_LVL_TL1:
1911 		if (regbase == NIX_AF_TL1X_CIR(0))
1912 			return false;
1913 		break;
1914 	case NIX_TXSCH_LVL_TL2:
1915 		if (regbase == NIX_AF_TL2X_CIR(0) ||
1916 		    regbase == NIX_AF_TL2X_PIR(0))
1917 			return false;
1918 		break;
1919 	case NIX_TXSCH_LVL_TL3:
1920 		if (regbase == NIX_AF_TL3X_CIR(0) ||
1921 		    regbase == NIX_AF_TL3X_PIR(0))
1922 			return false;
1923 		break;
1924 	case NIX_TXSCH_LVL_TL4:
1925 		if (regbase == NIX_AF_TL4X_CIR(0) ||
1926 		    regbase == NIX_AF_TL4X_PIR(0))
1927 			return false;
1928 		break;
1929 	}
1930 	return true;
1931 }
1932 
1933 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1934 				u16 pcifunc, int blkaddr)
1935 {
1936 	u32 *pfvf_map;
1937 	int schq;
1938 
1939 	schq = nix_get_tx_link(rvu, pcifunc);
1940 	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1941 	/* Skip if PF has already done the config */
1942 	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1943 		return;
1944 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1945 		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
1946 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1947 		    TXSCH_TL1_DFLT_RR_QTM);
1948 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1949 	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1950 }
1951 
1952 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1953 				    struct nix_txschq_config *req,
1954 				    struct msg_rsp *rsp)
1955 {
1956 	struct rvu_hwinfo *hw = rvu->hw;
1957 	u16 pcifunc = req->hdr.pcifunc;
1958 	u64 reg, regval, schq_regbase;
1959 	struct nix_txsch *txsch;
1960 	struct nix_hw *nix_hw;
1961 	int blkaddr, idx, err;
1962 	int nixlf, schq;
1963 	u32 *pfvf_map;
1964 
1965 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1966 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
1967 		return NIX_AF_INVAL_TXSCHQ_CFG;
1968 
1969 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
1970 	if (err)
1971 		return err;
1972 
1973 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1974 	if (!nix_hw)
1975 		return -EINVAL;
1976 
1977 	txsch = &nix_hw->txsch[req->lvl];
1978 	pfvf_map = txsch->pfvf_map;
1979 
1980 	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
1981 	    pcifunc & RVU_PFVF_FUNC_MASK) {
1982 		mutex_lock(&rvu->rsrc_lock);
1983 		if (req->lvl == NIX_TXSCH_LVL_TL1)
1984 			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
1985 		mutex_unlock(&rvu->rsrc_lock);
1986 		return 0;
1987 	}
1988 
1989 	for (idx = 0; idx < req->num_regs; idx++) {
1990 		reg = req->reg[idx];
1991 		regval = req->regval[idx];
1992 		schq_regbase = reg & 0xFFFF;
1993 
1994 		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
1995 					       txsch->lvl, reg, regval))
1996 			return NIX_AF_INVAL_TXSCHQ_CFG;
1997 
1998 		/* Check if shaping and coloring is supported */
1999 		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2000 			continue;
2001 
2002 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2003 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2004 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2005 					   pcifunc, 0);
2006 			regval &= ~(0x7FULL << 24);
2007 			regval |= ((u64)nixlf << 24);
2008 		}
2009 
2010 		/* Clear 'BP_ENA' config, if it's not allowed */
2011 		if (!hw->cap.nix_tx_link_bp) {
2012 			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2013 			    (schq_regbase & 0xFF00) ==
2014 			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2015 				regval &= ~BIT_ULL(13);
2016 		}
2017 
2018 		/* Mark config as done for TL1 by PF */
2019 		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2020 		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2021 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2022 			mutex_lock(&rvu->rsrc_lock);
2023 			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2024 							NIX_TXSCHQ_CFG_DONE);
2025 			mutex_unlock(&rvu->rsrc_lock);
2026 		}
2027 
2028 		/* SMQ flush is special hence split register writes such
2029 		 * that flush first and write rest of the bits later.
2030 		 */
2031 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2032 		    (regval & BIT_ULL(49))) {
2033 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2034 			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2035 			regval &= ~BIT_ULL(49);
2036 		}
2037 		rvu_write64(rvu, blkaddr, reg, regval);
2038 	}
2039 
2040 	return 0;
2041 }
2042 
2043 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2044 			   struct nix_vtag_config *req)
2045 {
2046 	u64 regval = req->vtag_size;
2047 
2048 	if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2049 	    req->vtag_size > VTAGSIZE_T8)
2050 		return -EINVAL;
2051 
2052 	/* RX VTAG Type 7 reserved for vf vlan */
2053 	if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2054 		return NIX_AF_ERR_RX_VTAG_INUSE;
2055 
2056 	if (req->rx.capture_vtag)
2057 		regval |= BIT_ULL(5);
2058 	if (req->rx.strip_vtag)
2059 		regval |= BIT_ULL(4);
2060 
2061 	rvu_write64(rvu, blkaddr,
2062 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2063 	return 0;
2064 }
2065 
2066 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2067 			    u16 pcifunc, int index)
2068 {
2069 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2070 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2071 
2072 	if (vlan->entry2pfvf_map[index] != pcifunc)
2073 		return NIX_AF_ERR_PARAM;
2074 
2075 	rvu_write64(rvu, blkaddr,
2076 		    NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2077 	rvu_write64(rvu, blkaddr,
2078 		    NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2079 
2080 	vlan->entry2pfvf_map[index] = 0;
2081 	rvu_free_rsrc(&vlan->rsrc, index);
2082 
2083 	return 0;
2084 }
2085 
2086 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2087 {
2088 	struct nix_txvlan *vlan;
2089 	struct nix_hw *nix_hw;
2090 	int index, blkaddr;
2091 
2092 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2093 	if (blkaddr < 0)
2094 		return;
2095 
2096 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2097 	vlan = &nix_hw->txvlan;
2098 
2099 	mutex_lock(&vlan->rsrc_lock);
2100 	/* Scan all the entries and free the ones mapped to 'pcifunc' */
2101 	for (index = 0; index < vlan->rsrc.max; index++) {
2102 		if (vlan->entry2pfvf_map[index] == pcifunc)
2103 			nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2104 	}
2105 	mutex_unlock(&vlan->rsrc_lock);
2106 }
2107 
2108 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2109 			     u64 vtag, u8 size)
2110 {
2111 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2112 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2113 	u64 regval;
2114 	int index;
2115 
2116 	mutex_lock(&vlan->rsrc_lock);
2117 
2118 	index = rvu_alloc_rsrc(&vlan->rsrc);
2119 	if (index < 0) {
2120 		mutex_unlock(&vlan->rsrc_lock);
2121 		return index;
2122 	}
2123 
2124 	mutex_unlock(&vlan->rsrc_lock);
2125 
2126 	regval = size ? vtag : vtag << 32;
2127 
2128 	rvu_write64(rvu, blkaddr,
2129 		    NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2130 	rvu_write64(rvu, blkaddr,
2131 		    NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2132 
2133 	return index;
2134 }
2135 
2136 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2137 			     struct nix_vtag_config *req)
2138 {
2139 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2140 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2141 	u16 pcifunc = req->hdr.pcifunc;
2142 	int idx0 = req->tx.vtag0_idx;
2143 	int idx1 = req->tx.vtag1_idx;
2144 	int err = 0;
2145 
2146 	if (req->tx.free_vtag0 && req->tx.free_vtag1)
2147 		if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2148 		    vlan->entry2pfvf_map[idx1] != pcifunc)
2149 			return NIX_AF_ERR_PARAM;
2150 
2151 	mutex_lock(&vlan->rsrc_lock);
2152 
2153 	if (req->tx.free_vtag0) {
2154 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2155 		if (err)
2156 			goto exit;
2157 	}
2158 
2159 	if (req->tx.free_vtag1)
2160 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2161 
2162 exit:
2163 	mutex_unlock(&vlan->rsrc_lock);
2164 	return err;
2165 }
2166 
2167 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2168 			   struct nix_vtag_config *req,
2169 			   struct nix_vtag_config_rsp *rsp)
2170 {
2171 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2172 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2173 	u16 pcifunc = req->hdr.pcifunc;
2174 
2175 	if (req->tx.cfg_vtag0) {
2176 		rsp->vtag0_idx =
2177 			nix_tx_vtag_alloc(rvu, blkaddr,
2178 					  req->tx.vtag0, req->vtag_size);
2179 
2180 		if (rsp->vtag0_idx < 0)
2181 			return NIX_AF_ERR_TX_VTAG_NOSPC;
2182 
2183 		vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2184 	}
2185 
2186 	if (req->tx.cfg_vtag1) {
2187 		rsp->vtag1_idx =
2188 			nix_tx_vtag_alloc(rvu, blkaddr,
2189 					  req->tx.vtag1, req->vtag_size);
2190 
2191 		if (rsp->vtag1_idx < 0)
2192 			goto err_free;
2193 
2194 		vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2195 	}
2196 
2197 	return 0;
2198 
2199 err_free:
2200 	if (req->tx.cfg_vtag0)
2201 		nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2202 
2203 	return NIX_AF_ERR_TX_VTAG_NOSPC;
2204 }
2205 
2206 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2207 				  struct nix_vtag_config *req,
2208 				  struct nix_vtag_config_rsp *rsp)
2209 {
2210 	u16 pcifunc = req->hdr.pcifunc;
2211 	int blkaddr, nixlf, err;
2212 
2213 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2214 	if (err)
2215 		return err;
2216 
2217 	if (req->cfg_type) {
2218 		/* rx vtag configuration */
2219 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2220 		if (err)
2221 			return NIX_AF_ERR_PARAM;
2222 	} else {
2223 		/* tx vtag configuration */
2224 		if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2225 		    (req->tx.free_vtag0 || req->tx.free_vtag1))
2226 			return NIX_AF_ERR_PARAM;
2227 
2228 		if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2229 			return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2230 
2231 		if (req->tx.free_vtag0 || req->tx.free_vtag1)
2232 			return nix_tx_vtag_decfg(rvu, blkaddr, req);
2233 	}
2234 
2235 	return 0;
2236 }
2237 
2238 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2239 			     int mce, u8 op, u16 pcifunc, int next, bool eol)
2240 {
2241 	struct nix_aq_enq_req aq_req;
2242 	int err;
2243 
2244 	aq_req.hdr.pcifunc = 0;
2245 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
2246 	aq_req.op = op;
2247 	aq_req.qidx = mce;
2248 
2249 	/* Use RSS with RSS index 0 */
2250 	aq_req.mce.op = 1;
2251 	aq_req.mce.index = 0;
2252 	aq_req.mce.eol = eol;
2253 	aq_req.mce.pf_func = pcifunc;
2254 	aq_req.mce.next = next;
2255 
2256 	/* All fields valid */
2257 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
2258 
2259 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2260 	if (err) {
2261 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2262 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2263 		return err;
2264 	}
2265 	return 0;
2266 }
2267 
2268 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2269 				     u16 pcifunc, bool add)
2270 {
2271 	struct mce *mce, *tail = NULL;
2272 	bool delete = false;
2273 
2274 	/* Scan through the current list */
2275 	hlist_for_each_entry(mce, &mce_list->head, node) {
2276 		/* If already exists, then delete */
2277 		if (mce->pcifunc == pcifunc && !add) {
2278 			delete = true;
2279 			break;
2280 		} else if (mce->pcifunc == pcifunc && add) {
2281 			/* entry already exists */
2282 			return 0;
2283 		}
2284 		tail = mce;
2285 	}
2286 
2287 	if (delete) {
2288 		hlist_del(&mce->node);
2289 		kfree(mce);
2290 		mce_list->count--;
2291 		return 0;
2292 	}
2293 
2294 	if (!add)
2295 		return 0;
2296 
2297 	/* Add a new one to the list, at the tail */
2298 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2299 	if (!mce)
2300 		return -ENOMEM;
2301 	mce->pcifunc = pcifunc;
2302 	if (!tail)
2303 		hlist_add_head(&mce->node, &mce_list->head);
2304 	else
2305 		hlist_add_behind(&mce->node, &tail->node);
2306 	mce_list->count++;
2307 	return 0;
2308 }
2309 
2310 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
2311 			struct nix_mce_list *mce_list,
2312 			int mce_idx, int mcam_index, bool add)
2313 {
2314 	int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
2315 	struct npc_mcam *mcam = &rvu->hw->mcam;
2316 	struct nix_mcast *mcast;
2317 	struct nix_hw *nix_hw;
2318 	struct mce *mce;
2319 
2320 	if (!mce_list)
2321 		return -EINVAL;
2322 
2323 	/* Get this PF/VF func's MCE index */
2324 	idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2325 
2326 	if (idx > (mce_idx + mce_list->max)) {
2327 		dev_err(rvu->dev,
2328 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2329 			__func__, idx, mce_list->max,
2330 			pcifunc >> RVU_PFVF_PF_SHIFT);
2331 		return -EINVAL;
2332 	}
2333 
2334 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
2335 	if (err)
2336 		return err;
2337 
2338 	mcast = &nix_hw->mcast;
2339 	mutex_lock(&mcast->mce_lock);
2340 
2341 	err = nix_update_mce_list_entry(mce_list, pcifunc, add);
2342 	if (err)
2343 		goto end;
2344 
2345 	/* Disable MCAM entry in NPC */
2346 	if (!mce_list->count) {
2347 		npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2348 		npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
2349 		goto end;
2350 	}
2351 
2352 	/* Dump the updated list to HW */
2353 	idx = mce_idx;
2354 	last_idx = idx + mce_list->count - 1;
2355 	hlist_for_each_entry(mce, &mce_list->head, node) {
2356 		if (idx > last_idx)
2357 			break;
2358 
2359 		next_idx = idx + 1;
2360 		/* EOL should be set in last MCE */
2361 		err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2362 					mce->pcifunc, next_idx,
2363 					(next_idx > last_idx) ? true : false);
2364 		if (err)
2365 			goto end;
2366 		idx++;
2367 	}
2368 
2369 end:
2370 	mutex_unlock(&mcast->mce_lock);
2371 	return err;
2372 }
2373 
2374 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
2375 		      struct nix_mce_list **mce_list, int *mce_idx)
2376 {
2377 	struct rvu_hwinfo *hw = rvu->hw;
2378 	struct rvu_pfvf *pfvf;
2379 
2380 	if (!hw->cap.nix_rx_multicast ||
2381 	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
2382 		*mce_list = NULL;
2383 		*mce_idx = 0;
2384 		return;
2385 	}
2386 
2387 	/* Get this PF/VF func's MCE index */
2388 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2389 
2390 	if (type == NIXLF_BCAST_ENTRY) {
2391 		*mce_list = &pfvf->bcast_mce_list;
2392 		*mce_idx = pfvf->bcast_mce_idx;
2393 	} else if (type == NIXLF_ALLMULTI_ENTRY) {
2394 		*mce_list = &pfvf->mcast_mce_list;
2395 		*mce_idx = pfvf->mcast_mce_idx;
2396 	} else if (type == NIXLF_PROMISC_ENTRY) {
2397 		*mce_list = &pfvf->promisc_mce_list;
2398 		*mce_idx = pfvf->promisc_mce_idx;
2399 	}  else {
2400 		*mce_list = NULL;
2401 		*mce_idx = 0;
2402 	}
2403 }
2404 
2405 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
2406 			       int type, bool add)
2407 {
2408 	int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
2409 	struct npc_mcam *mcam = &rvu->hw->mcam;
2410 	struct rvu_hwinfo *hw = rvu->hw;
2411 	struct nix_mce_list *mce_list;
2412 
2413 	/* skip multicast pkt replication for AF's VFs */
2414 	if (is_afvf(pcifunc))
2415 		return 0;
2416 
2417 	if (!hw->cap.nix_rx_multicast)
2418 		return 0;
2419 
2420 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2421 	if (blkaddr < 0)
2422 		return -EINVAL;
2423 
2424 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2425 	if (nixlf < 0)
2426 		return -EINVAL;
2427 
2428 	nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
2429 
2430 	mcam_index = npc_get_nixlf_mcam_index(mcam,
2431 					      pcifunc & ~RVU_PFVF_FUNC_MASK,
2432 					      nixlf, type);
2433 	err = nix_update_mce_list(rvu, pcifunc, mce_list,
2434 				  mce_idx, mcam_index, add);
2435 	return err;
2436 }
2437 
2438 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2439 {
2440 	struct nix_mcast *mcast = &nix_hw->mcast;
2441 	int err, pf, numvfs, idx;
2442 	struct rvu_pfvf *pfvf;
2443 	u16 pcifunc;
2444 	u64 cfg;
2445 
2446 	/* Skip PF0 (i.e AF) */
2447 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2448 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2449 		/* If PF is not enabled, nothing to do */
2450 		if (!((cfg >> 20) & 0x01))
2451 			continue;
2452 		/* Get numVFs attached to this PF */
2453 		numvfs = (cfg >> 12) & 0xFF;
2454 
2455 		pfvf = &rvu->pf[pf];
2456 
2457 		/* This NIX0/1 block mapped to PF ? */
2458 		if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2459 			continue;
2460 
2461 		/* save start idx of broadcast mce list */
2462 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2463 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2464 
2465 		/* save start idx of multicast mce list */
2466 		pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2467 		nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
2468 
2469 		/* save the start idx of promisc mce list */
2470 		pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2471 		nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
2472 
2473 		for (idx = 0; idx < (numvfs + 1); idx++) {
2474 			/* idx-0 is for PF, followed by VFs */
2475 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2476 			pcifunc |= idx;
2477 			/* Add dummy entries now, so that we don't have to check
2478 			 * for whether AQ_OP should be INIT/WRITE later on.
2479 			 * Will be updated when a NIXLF is attached/detached to
2480 			 * these PF/VFs.
2481 			 */
2482 			err = nix_blk_setup_mce(rvu, nix_hw,
2483 						pfvf->bcast_mce_idx + idx,
2484 						NIX_AQ_INSTOP_INIT,
2485 						pcifunc, 0, true);
2486 			if (err)
2487 				return err;
2488 
2489 			/* add dummy entries to multicast mce list */
2490 			err = nix_blk_setup_mce(rvu, nix_hw,
2491 						pfvf->mcast_mce_idx + idx,
2492 						NIX_AQ_INSTOP_INIT,
2493 						pcifunc, 0, true);
2494 			if (err)
2495 				return err;
2496 
2497 			/* add dummy entries to promisc mce list */
2498 			err = nix_blk_setup_mce(rvu, nix_hw,
2499 						pfvf->promisc_mce_idx + idx,
2500 						NIX_AQ_INSTOP_INIT,
2501 						pcifunc, 0, true);
2502 			if (err)
2503 				return err;
2504 		}
2505 	}
2506 	return 0;
2507 }
2508 
2509 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2510 {
2511 	struct nix_mcast *mcast = &nix_hw->mcast;
2512 	struct rvu_hwinfo *hw = rvu->hw;
2513 	int err, size;
2514 
2515 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2516 	size = (1ULL << size);
2517 
2518 	/* Alloc memory for multicast/mirror replication entries */
2519 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2520 			 (256UL << MC_TBL_SIZE), size);
2521 	if (err)
2522 		return -ENOMEM;
2523 
2524 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2525 		    (u64)mcast->mce_ctx->iova);
2526 
2527 	/* Set max list length equal to max no of VFs per PF  + PF itself */
2528 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2529 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2530 
2531 	/* Alloc memory for multicast replication buffers */
2532 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2533 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2534 			 (8UL << MC_BUF_CNT), size);
2535 	if (err)
2536 		return -ENOMEM;
2537 
2538 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2539 		    (u64)mcast->mcast_buf->iova);
2540 
2541 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
2542 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2543 
2544 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2545 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
2546 		    BIT_ULL(20) | MC_BUF_CNT);
2547 
2548 	mutex_init(&mcast->mce_lock);
2549 
2550 	return nix_setup_mce_tables(rvu, nix_hw);
2551 }
2552 
2553 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
2554 {
2555 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2556 	int err;
2557 
2558 	/* Allocate resource bimap for tx vtag def registers*/
2559 	vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
2560 	err = rvu_alloc_bitmap(&vlan->rsrc);
2561 	if (err)
2562 		return -ENOMEM;
2563 
2564 	/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
2565 	vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
2566 					    sizeof(u16), GFP_KERNEL);
2567 	if (!vlan->entry2pfvf_map)
2568 		goto free_mem;
2569 
2570 	mutex_init(&vlan->rsrc_lock);
2571 	return 0;
2572 
2573 free_mem:
2574 	kfree(vlan->rsrc.bmap);
2575 	return -ENOMEM;
2576 }
2577 
2578 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2579 {
2580 	struct nix_txsch *txsch;
2581 	int err, lvl, schq;
2582 	u64 cfg, reg;
2583 
2584 	/* Get scheduler queue count of each type and alloc
2585 	 * bitmap for each for alloc/free/attach operations.
2586 	 */
2587 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2588 		txsch = &nix_hw->txsch[lvl];
2589 		txsch->lvl = lvl;
2590 		switch (lvl) {
2591 		case NIX_TXSCH_LVL_SMQ:
2592 			reg = NIX_AF_MDQ_CONST;
2593 			break;
2594 		case NIX_TXSCH_LVL_TL4:
2595 			reg = NIX_AF_TL4_CONST;
2596 			break;
2597 		case NIX_TXSCH_LVL_TL3:
2598 			reg = NIX_AF_TL3_CONST;
2599 			break;
2600 		case NIX_TXSCH_LVL_TL2:
2601 			reg = NIX_AF_TL2_CONST;
2602 			break;
2603 		case NIX_TXSCH_LVL_TL1:
2604 			reg = NIX_AF_TL1_CONST;
2605 			break;
2606 		}
2607 		cfg = rvu_read64(rvu, blkaddr, reg);
2608 		txsch->schq.max = cfg & 0xFFFF;
2609 		err = rvu_alloc_bitmap(&txsch->schq);
2610 		if (err)
2611 			return err;
2612 
2613 		/* Allocate memory for scheduler queues to
2614 		 * PF/VF pcifunc mapping info.
2615 		 */
2616 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2617 					       sizeof(u32), GFP_KERNEL);
2618 		if (!txsch->pfvf_map)
2619 			return -ENOMEM;
2620 		for (schq = 0; schq < txsch->schq.max; schq++)
2621 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2622 	}
2623 	return 0;
2624 }
2625 
2626 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2627 				int blkaddr, u32 cfg)
2628 {
2629 	int fmt_idx;
2630 
2631 	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2632 		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2633 			return fmt_idx;
2634 	}
2635 	if (fmt_idx >= nix_hw->mark_format.total)
2636 		return -ERANGE;
2637 
2638 	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2639 	nix_hw->mark_format.cfg[fmt_idx] = cfg;
2640 	nix_hw->mark_format.in_use++;
2641 	return fmt_idx;
2642 }
2643 
2644 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2645 				    int blkaddr)
2646 {
2647 	u64 cfgs[] = {
2648 		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
2649 		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
2650 		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
2651 		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
2652 		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
2653 		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
2654 		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
2655 		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
2656 		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2657 	};
2658 	int i, rc;
2659 	u64 total;
2660 
2661 	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2662 	nix_hw->mark_format.total = (u8)total;
2663 	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2664 					       GFP_KERNEL);
2665 	if (!nix_hw->mark_format.cfg)
2666 		return -ENOMEM;
2667 	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2668 		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2669 		if (rc < 0)
2670 			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2671 				i, rc);
2672 	}
2673 
2674 	return 0;
2675 }
2676 
2677 static void rvu_get_lbk_link_max_frs(struct rvu *rvu,  u16 *max_mtu)
2678 {
2679 	/* CN10K supports LBK FIFO size 72 KB */
2680 	if (rvu->hw->lbk_bufsize == 0x12000)
2681 		*max_mtu = CN10K_LBK_LINK_MAX_FRS;
2682 	else
2683 		*max_mtu = NIC_HW_MAX_FRS;
2684 }
2685 
2686 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2687 {
2688 	/* RPM supports FIFO len 128 KB */
2689 	if (rvu_cgx_get_fifolen(rvu) == 0x20000)
2690 		*max_mtu = CN10K_LMAC_LINK_MAX_FRS;
2691 	else
2692 		*max_mtu = NIC_HW_MAX_FRS;
2693 }
2694 
2695 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
2696 				     struct nix_hw_info *rsp)
2697 {
2698 	u16 pcifunc = req->hdr.pcifunc;
2699 	int blkaddr;
2700 
2701 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2702 	if (blkaddr < 0)
2703 		return NIX_AF_ERR_AF_LF_INVALID;
2704 
2705 	if (is_afvf(pcifunc))
2706 		rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
2707 	else
2708 		rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
2709 
2710 	rsp->min_mtu = NIC_HW_MIN_FRS;
2711 	return 0;
2712 }
2713 
2714 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2715 				   struct msg_rsp *rsp)
2716 {
2717 	u16 pcifunc = req->hdr.pcifunc;
2718 	int i, nixlf, blkaddr, err;
2719 	u64 stats;
2720 
2721 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2722 	if (err)
2723 		return err;
2724 
2725 	/* Get stats count supported by HW */
2726 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2727 
2728 	/* Reset tx stats */
2729 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2730 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2731 
2732 	/* Reset rx stats */
2733 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2734 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2735 
2736 	return 0;
2737 }
2738 
2739 /* Returns the ALG index to be set into NPC_RX_ACTION */
2740 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2741 {
2742 	int i;
2743 
2744 	/* Scan over exiting algo entries to find a match */
2745 	for (i = 0; i < nix_hw->flowkey.in_use; i++)
2746 		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2747 			return i;
2748 
2749 	return -ERANGE;
2750 }
2751 
2752 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2753 {
2754 	int idx, nr_field, key_off, field_marker, keyoff_marker;
2755 	int max_key_off, max_bit_pos, group_member;
2756 	struct nix_rx_flowkey_alg *field;
2757 	struct nix_rx_flowkey_alg tmp;
2758 	u32 key_type, valid_key;
2759 	int l4_key_offset = 0;
2760 
2761 	if (!alg)
2762 		return -EINVAL;
2763 
2764 #define FIELDS_PER_ALG  5
2765 #define MAX_KEY_OFF	40
2766 	/* Clear all fields */
2767 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2768 
2769 	/* Each of the 32 possible flow key algorithm definitions should
2770 	 * fall into above incremental config (except ALG0). Otherwise a
2771 	 * single NPC MCAM entry is not sufficient for supporting RSS.
2772 	 *
2773 	 * If a different definition or combination needed then NPC MCAM
2774 	 * has to be programmed to filter such pkts and it's action should
2775 	 * point to this definition to calculate flowtag or hash.
2776 	 *
2777 	 * The `for loop` goes over _all_ protocol field and the following
2778 	 * variables depicts the state machine forward progress logic.
2779 	 *
2780 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
2781 	 * in field->key_offset update.
2782 	 * field_marker - Enabled when a new field needs to be selected.
2783 	 * group_member - Enabled when protocol is part of a group.
2784 	 */
2785 
2786 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
2787 	nr_field = 0; key_off = 0; field_marker = 1;
2788 	field = &tmp; max_bit_pos = fls(flow_cfg);
2789 	for (idx = 0;
2790 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2791 	     key_off < MAX_KEY_OFF; idx++) {
2792 		key_type = BIT(idx);
2793 		valid_key = flow_cfg & key_type;
2794 		/* Found a field marker, reset the field values */
2795 		if (field_marker)
2796 			memset(&tmp, 0, sizeof(tmp));
2797 
2798 		field_marker = true;
2799 		keyoff_marker = true;
2800 		switch (key_type) {
2801 		case NIX_FLOW_KEY_TYPE_PORT:
2802 			field->sel_chan = true;
2803 			/* This should be set to 1, when SEL_CHAN is set */
2804 			field->bytesm1 = 1;
2805 			break;
2806 		case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
2807 			field->lid = NPC_LID_LC;
2808 			field->hdr_offset = 9; /* offset */
2809 			field->bytesm1 = 0; /* 1 byte */
2810 			field->ltype_match = NPC_LT_LC_IP;
2811 			field->ltype_mask = 0xF;
2812 			break;
2813 		case NIX_FLOW_KEY_TYPE_IPV4:
2814 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2815 			field->lid = NPC_LID_LC;
2816 			field->ltype_match = NPC_LT_LC_IP;
2817 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2818 				field->lid = NPC_LID_LG;
2819 				field->ltype_match = NPC_LT_LG_TU_IP;
2820 			}
2821 			field->hdr_offset = 12; /* SIP offset */
2822 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2823 			field->ltype_mask = 0xF; /* Match only IPv4 */
2824 			keyoff_marker = false;
2825 			break;
2826 		case NIX_FLOW_KEY_TYPE_IPV6:
2827 		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2828 			field->lid = NPC_LID_LC;
2829 			field->ltype_match = NPC_LT_LC_IP6;
2830 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2831 				field->lid = NPC_LID_LG;
2832 				field->ltype_match = NPC_LT_LG_TU_IP6;
2833 			}
2834 			field->hdr_offset = 8; /* SIP offset */
2835 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2836 			field->ltype_mask = 0xF; /* Match only IPv6 */
2837 			break;
2838 		case NIX_FLOW_KEY_TYPE_TCP:
2839 		case NIX_FLOW_KEY_TYPE_UDP:
2840 		case NIX_FLOW_KEY_TYPE_SCTP:
2841 		case NIX_FLOW_KEY_TYPE_INNR_TCP:
2842 		case NIX_FLOW_KEY_TYPE_INNR_UDP:
2843 		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2844 			field->lid = NPC_LID_LD;
2845 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2846 			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2847 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2848 				field->lid = NPC_LID_LH;
2849 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2850 
2851 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2852 			 * so no need to change the ltype_match, just change
2853 			 * the lid for inner protocols
2854 			 */
2855 			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2856 				     (int)NPC_LT_LH_TU_TCP);
2857 			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2858 				     (int)NPC_LT_LH_TU_UDP);
2859 			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2860 				     (int)NPC_LT_LH_TU_SCTP);
2861 
2862 			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2863 			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2864 			    valid_key) {
2865 				field->ltype_match |= NPC_LT_LD_TCP;
2866 				group_member = true;
2867 			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2868 				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2869 				   valid_key) {
2870 				field->ltype_match |= NPC_LT_LD_UDP;
2871 				group_member = true;
2872 			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2873 				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2874 				   valid_key) {
2875 				field->ltype_match |= NPC_LT_LD_SCTP;
2876 				group_member = true;
2877 			}
2878 			field->ltype_mask = ~field->ltype_match;
2879 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2880 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2881 				/* Handle the case where any of the group item
2882 				 * is enabled in the group but not the final one
2883 				 */
2884 				if (group_member) {
2885 					valid_key = true;
2886 					group_member = false;
2887 				}
2888 			} else {
2889 				field_marker = false;
2890 				keyoff_marker = false;
2891 			}
2892 
2893 			/* TCP/UDP/SCTP and ESP/AH falls at same offset so
2894 			 * remember the TCP key offset of 40 byte hash key.
2895 			 */
2896 			if (key_type == NIX_FLOW_KEY_TYPE_TCP)
2897 				l4_key_offset = key_off;
2898 			break;
2899 		case NIX_FLOW_KEY_TYPE_NVGRE:
2900 			field->lid = NPC_LID_LD;
2901 			field->hdr_offset = 4; /* VSID offset */
2902 			field->bytesm1 = 2;
2903 			field->ltype_match = NPC_LT_LD_NVGRE;
2904 			field->ltype_mask = 0xF;
2905 			break;
2906 		case NIX_FLOW_KEY_TYPE_VXLAN:
2907 		case NIX_FLOW_KEY_TYPE_GENEVE:
2908 			field->lid = NPC_LID_LE;
2909 			field->bytesm1 = 2;
2910 			field->hdr_offset = 4;
2911 			field->ltype_mask = 0xF;
2912 			field_marker = false;
2913 			keyoff_marker = false;
2914 
2915 			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2916 				field->ltype_match |= NPC_LT_LE_VXLAN;
2917 				group_member = true;
2918 			}
2919 
2920 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2921 				field->ltype_match |= NPC_LT_LE_GENEVE;
2922 				group_member = true;
2923 			}
2924 
2925 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2926 				if (group_member) {
2927 					field->ltype_mask = ~field->ltype_match;
2928 					field_marker = true;
2929 					keyoff_marker = true;
2930 					valid_key = true;
2931 					group_member = false;
2932 				}
2933 			}
2934 			break;
2935 		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2936 		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2937 			field->lid = NPC_LID_LA;
2938 			field->ltype_match = NPC_LT_LA_ETHER;
2939 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2940 				field->lid = NPC_LID_LF;
2941 				field->ltype_match = NPC_LT_LF_TU_ETHER;
2942 			}
2943 			field->hdr_offset = 0;
2944 			field->bytesm1 = 5; /* DMAC 6 Byte */
2945 			field->ltype_mask = 0xF;
2946 			break;
2947 		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2948 			field->lid = NPC_LID_LC;
2949 			field->hdr_offset = 40; /* IPV6 hdr */
2950 			field->bytesm1 = 0; /* 1 Byte ext hdr*/
2951 			field->ltype_match = NPC_LT_LC_IP6_EXT;
2952 			field->ltype_mask = 0xF;
2953 			break;
2954 		case NIX_FLOW_KEY_TYPE_GTPU:
2955 			field->lid = NPC_LID_LE;
2956 			field->hdr_offset = 4;
2957 			field->bytesm1 = 3; /* 4 bytes TID*/
2958 			field->ltype_match = NPC_LT_LE_GTPU;
2959 			field->ltype_mask = 0xF;
2960 			break;
2961 		case NIX_FLOW_KEY_TYPE_VLAN:
2962 			field->lid = NPC_LID_LB;
2963 			field->hdr_offset = 2; /* Skip TPID (2-bytes) */
2964 			field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
2965 			field->ltype_match = NPC_LT_LB_CTAG;
2966 			field->ltype_mask = 0xF;
2967 			field->fn_mask = 1; /* Mask out the first nibble */
2968 			break;
2969 		case NIX_FLOW_KEY_TYPE_AH:
2970 		case NIX_FLOW_KEY_TYPE_ESP:
2971 			field->hdr_offset = 0;
2972 			field->bytesm1 = 7; /* SPI + sequence number */
2973 			field->ltype_mask = 0xF;
2974 			field->lid = NPC_LID_LE;
2975 			field->ltype_match = NPC_LT_LE_ESP;
2976 			if (key_type == NIX_FLOW_KEY_TYPE_AH) {
2977 				field->lid = NPC_LID_LD;
2978 				field->ltype_match = NPC_LT_LD_AH;
2979 				field->hdr_offset = 4;
2980 				keyoff_marker = false;
2981 			}
2982 			break;
2983 		}
2984 		field->ena = 1;
2985 
2986 		/* Found a valid flow key type */
2987 		if (valid_key) {
2988 			/* Use the key offset of TCP/UDP/SCTP fields
2989 			 * for ESP/AH fields.
2990 			 */
2991 			if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
2992 			    key_type == NIX_FLOW_KEY_TYPE_AH)
2993 				key_off = l4_key_offset;
2994 			field->key_offset = key_off;
2995 			memcpy(&alg[nr_field], field, sizeof(*field));
2996 			max_key_off = max(max_key_off, field->bytesm1 + 1);
2997 
2998 			/* Found a field marker, get the next field */
2999 			if (field_marker)
3000 				nr_field++;
3001 		}
3002 
3003 		/* Found a keyoff marker, update the new key_off */
3004 		if (keyoff_marker) {
3005 			key_off += max_key_off;
3006 			max_key_off = 0;
3007 		}
3008 	}
3009 	/* Processed all the flow key types */
3010 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
3011 		return 0;
3012 	else
3013 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
3014 }
3015 
3016 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
3017 {
3018 	u64 field[FIELDS_PER_ALG];
3019 	struct nix_hw *hw;
3020 	int fid, rc;
3021 
3022 	hw = get_nix_hw(rvu->hw, blkaddr);
3023 	if (!hw)
3024 		return -EINVAL;
3025 
3026 	/* No room to add new flow hash algoritham */
3027 	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3028 		return NIX_AF_ERR_RSS_NOSPC_ALGO;
3029 
3030 	/* Generate algo fields for the given flow_cfg */
3031 	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3032 	if (rc)
3033 		return rc;
3034 
3035 	/* Update ALGX_FIELDX register with generated fields */
3036 	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3037 		rvu_write64(rvu, blkaddr,
3038 			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3039 							   fid), field[fid]);
3040 
3041 	/* Store the flow_cfg for futher lookup */
3042 	rc = hw->flowkey.in_use;
3043 	hw->flowkey.flowkey[rc] = flow_cfg;
3044 	hw->flowkey.in_use++;
3045 
3046 	return rc;
3047 }
3048 
3049 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3050 					 struct nix_rss_flowkey_cfg *req,
3051 					 struct nix_rss_flowkey_cfg_rsp *rsp)
3052 {
3053 	u16 pcifunc = req->hdr.pcifunc;
3054 	int alg_idx, nixlf, blkaddr;
3055 	struct nix_hw *nix_hw;
3056 	int err;
3057 
3058 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3059 	if (err)
3060 		return err;
3061 
3062 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3063 	if (!nix_hw)
3064 		return -EINVAL;
3065 
3066 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3067 	/* Failed to get algo index from the exiting list, reserve new  */
3068 	if (alg_idx < 0) {
3069 		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3070 						  req->flowkey_cfg);
3071 		if (alg_idx < 0)
3072 			return alg_idx;
3073 	}
3074 	rsp->alg_idx = alg_idx;
3075 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3076 				       alg_idx, req->mcam_index);
3077 	return 0;
3078 }
3079 
3080 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3081 {
3082 	u32 flowkey_cfg, minkey_cfg;
3083 	int alg, fid, rc;
3084 
3085 	/* Disable all flow key algx fieldx */
3086 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3087 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3088 			rvu_write64(rvu, blkaddr,
3089 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3090 				    0);
3091 	}
3092 
3093 	/* IPv4/IPv6 SIP/DIPs */
3094 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3095 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3096 	if (rc < 0)
3097 		return rc;
3098 
3099 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3100 	minkey_cfg = flowkey_cfg;
3101 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3102 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3103 	if (rc < 0)
3104 		return rc;
3105 
3106 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3107 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3108 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3109 	if (rc < 0)
3110 		return rc;
3111 
3112 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3113 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3114 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3115 	if (rc < 0)
3116 		return rc;
3117 
3118 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3119 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3120 			NIX_FLOW_KEY_TYPE_UDP;
3121 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3122 	if (rc < 0)
3123 		return rc;
3124 
3125 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3126 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3127 			NIX_FLOW_KEY_TYPE_SCTP;
3128 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3129 	if (rc < 0)
3130 		return rc;
3131 
3132 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3133 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3134 			NIX_FLOW_KEY_TYPE_SCTP;
3135 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3136 	if (rc < 0)
3137 		return rc;
3138 
3139 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3140 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3141 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3142 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3143 	if (rc < 0)
3144 		return rc;
3145 
3146 	return 0;
3147 }
3148 
3149 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3150 				      struct nix_set_mac_addr *req,
3151 				      struct msg_rsp *rsp)
3152 {
3153 	bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3154 	u16 pcifunc = req->hdr.pcifunc;
3155 	int blkaddr, nixlf, err;
3156 	struct rvu_pfvf *pfvf;
3157 
3158 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3159 	if (err)
3160 		return err;
3161 
3162 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3163 
3164 	/* untrusted VF can't overwrite admin(PF) changes */
3165 	if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3166 	    (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3167 		dev_warn(rvu->dev,
3168 			 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3169 		return -EPERM;
3170 	}
3171 
3172 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3173 
3174 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3175 				    pfvf->rx_chan_base, req->mac_addr);
3176 
3177 	if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
3178 		ether_addr_copy(pfvf->default_mac, req->mac_addr);
3179 
3180 	return 0;
3181 }
3182 
3183 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3184 				      struct msg_req *req,
3185 				      struct nix_get_mac_addr_rsp *rsp)
3186 {
3187 	u16 pcifunc = req->hdr.pcifunc;
3188 	struct rvu_pfvf *pfvf;
3189 
3190 	if (!is_nixlf_attached(rvu, pcifunc))
3191 		return NIX_AF_ERR_AF_LF_INVALID;
3192 
3193 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3194 
3195 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
3196 
3197 	return 0;
3198 }
3199 
3200 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
3201 				     struct msg_rsp *rsp)
3202 {
3203 	bool allmulti, promisc, nix_rx_multicast;
3204 	u16 pcifunc = req->hdr.pcifunc;
3205 	struct rvu_pfvf *pfvf;
3206 	int nixlf, err;
3207 
3208 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3209 	promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
3210 	allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
3211 	pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
3212 
3213 	nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
3214 
3215 	if (is_vf(pcifunc) && !nix_rx_multicast &&
3216 	    (promisc || allmulti)) {
3217 		dev_warn_ratelimited(rvu->dev,
3218 				     "VF promisc/multicast not supported\n");
3219 		return 0;
3220 	}
3221 
3222 	/* untrusted VF can't configure promisc/allmulti */
3223 	if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3224 	    (promisc || allmulti))
3225 		return 0;
3226 
3227 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3228 	if (err)
3229 		return err;
3230 
3231 	if (nix_rx_multicast) {
3232 		/* add/del this PF_FUNC to/from mcast pkt replication list */
3233 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
3234 					  allmulti);
3235 		if (err) {
3236 			dev_err(rvu->dev,
3237 				"Failed to update pcifunc 0x%x to multicast list\n",
3238 				pcifunc);
3239 			return err;
3240 		}
3241 
3242 		/* add/del this PF_FUNC to/from promisc pkt replication list */
3243 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
3244 					  promisc);
3245 		if (err) {
3246 			dev_err(rvu->dev,
3247 				"Failed to update pcifunc 0x%x to promisc list\n",
3248 				pcifunc);
3249 			return err;
3250 		}
3251 	}
3252 
3253 	/* install/uninstall allmulti entry */
3254 	if (allmulti) {
3255 		rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
3256 					       pfvf->rx_chan_base);
3257 	} else {
3258 		if (!nix_rx_multicast)
3259 			rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
3260 	}
3261 
3262 	/* install/uninstall promisc entry */
3263 	if (promisc) {
3264 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
3265 					      pfvf->rx_chan_base,
3266 					      pfvf->rx_chan_cnt);
3267 	} else {
3268 		if (!nix_rx_multicast)
3269 			rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
3270 	}
3271 
3272 	return 0;
3273 }
3274 
3275 static void nix_find_link_frs(struct rvu *rvu,
3276 			      struct nix_frs_cfg *req, u16 pcifunc)
3277 {
3278 	int pf = rvu_get_pf(pcifunc);
3279 	struct rvu_pfvf *pfvf;
3280 	int maxlen, minlen;
3281 	int numvfs, hwvf;
3282 	int vf;
3283 
3284 	/* Update with requester's min/max lengths */
3285 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3286 	pfvf->maxlen = req->maxlen;
3287 	if (req->update_minlen)
3288 		pfvf->minlen = req->minlen;
3289 
3290 	maxlen = req->maxlen;
3291 	minlen = req->update_minlen ? req->minlen : 0;
3292 
3293 	/* Get this PF's numVFs and starting hwvf */
3294 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
3295 
3296 	/* For each VF, compare requested max/minlen */
3297 	for (vf = 0; vf < numvfs; vf++) {
3298 		pfvf =  &rvu->hwvf[hwvf + vf];
3299 		if (pfvf->maxlen > maxlen)
3300 			maxlen = pfvf->maxlen;
3301 		if (req->update_minlen &&
3302 		    pfvf->minlen && pfvf->minlen < minlen)
3303 			minlen = pfvf->minlen;
3304 	}
3305 
3306 	/* Compare requested max/minlen with PF's max/minlen */
3307 	pfvf = &rvu->pf[pf];
3308 	if (pfvf->maxlen > maxlen)
3309 		maxlen = pfvf->maxlen;
3310 	if (req->update_minlen &&
3311 	    pfvf->minlen && pfvf->minlen < minlen)
3312 		minlen = pfvf->minlen;
3313 
3314 	/* Update the request with max/min PF's and it's VF's max/min */
3315 	req->maxlen = maxlen;
3316 	if (req->update_minlen)
3317 		req->minlen = minlen;
3318 }
3319 
3320 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
3321 				    struct msg_rsp *rsp)
3322 {
3323 	struct rvu_hwinfo *hw = rvu->hw;
3324 	u16 pcifunc = req->hdr.pcifunc;
3325 	int pf = rvu_get_pf(pcifunc);
3326 	int blkaddr, schq, link = -1;
3327 	struct nix_txsch *txsch;
3328 	u64 cfg, lmac_fifo_len;
3329 	struct nix_hw *nix_hw;
3330 	u8 cgx = 0, lmac = 0;
3331 	u16 max_mtu;
3332 
3333 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3334 	if (blkaddr < 0)
3335 		return NIX_AF_ERR_AF_LF_INVALID;
3336 
3337 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3338 	if (!nix_hw)
3339 		return -EINVAL;
3340 
3341 	if (is_afvf(pcifunc))
3342 		rvu_get_lbk_link_max_frs(rvu, &max_mtu);
3343 	else
3344 		rvu_get_lmac_link_max_frs(rvu, &max_mtu);
3345 
3346 	if (!req->sdp_link && req->maxlen > max_mtu)
3347 		return NIX_AF_ERR_FRS_INVALID;
3348 
3349 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
3350 		return NIX_AF_ERR_FRS_INVALID;
3351 
3352 	/* Check if requester wants to update SMQ's */
3353 	if (!req->update_smq)
3354 		goto rx_frscfg;
3355 
3356 	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
3357 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
3358 	mutex_lock(&rvu->rsrc_lock);
3359 	for (schq = 0; schq < txsch->schq.max; schq++) {
3360 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
3361 			continue;
3362 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
3363 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
3364 		if (req->update_minlen)
3365 			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
3366 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
3367 	}
3368 	mutex_unlock(&rvu->rsrc_lock);
3369 
3370 rx_frscfg:
3371 	/* Check if config is for SDP link */
3372 	if (req->sdp_link) {
3373 		if (!hw->sdp_links)
3374 			return NIX_AF_ERR_RX_LINK_INVALID;
3375 		link = hw->cgx_links + hw->lbk_links;
3376 		goto linkcfg;
3377 	}
3378 
3379 	/* Check if the request is from CGX mapped RVU PF */
3380 	if (is_pf_cgxmapped(rvu, pf)) {
3381 		/* Get CGX and LMAC to which this PF is mapped and find link */
3382 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
3383 		link = (cgx * hw->lmac_per_cgx) + lmac;
3384 	} else if (pf == 0) {
3385 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
3386 		link = hw->cgx_links;
3387 	}
3388 
3389 	if (link < 0)
3390 		return NIX_AF_ERR_RX_LINK_INVALID;
3391 
3392 	nix_find_link_frs(rvu, req, pcifunc);
3393 
3394 linkcfg:
3395 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
3396 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
3397 	if (req->update_minlen)
3398 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
3399 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
3400 
3401 	if (req->sdp_link || pf == 0)
3402 		return 0;
3403 
3404 	/* Update transmit credits for CGX links */
3405 	lmac_fifo_len =
3406 		rvu_cgx_get_fifolen(rvu) /
3407 		cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3408 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
3409 	cfg &= ~(0xFFFFFULL << 12);
3410 	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
3411 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
3412 	return 0;
3413 }
3414 
3415 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
3416 				    struct msg_rsp *rsp)
3417 {
3418 	int nixlf, blkaddr, err;
3419 	u64 cfg;
3420 
3421 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
3422 	if (err)
3423 		return err;
3424 
3425 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
3426 	/* Set the interface configuration */
3427 	if (req->len_verify & BIT(0))
3428 		cfg |= BIT_ULL(41);
3429 	else
3430 		cfg &= ~BIT_ULL(41);
3431 
3432 	if (req->len_verify & BIT(1))
3433 		cfg |= BIT_ULL(40);
3434 	else
3435 		cfg &= ~BIT_ULL(40);
3436 
3437 	if (req->csum_verify & BIT(0))
3438 		cfg |= BIT_ULL(37);
3439 	else
3440 		cfg &= ~BIT_ULL(37);
3441 
3442 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
3443 
3444 	return 0;
3445 }
3446 
3447 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
3448 {
3449 	/* CN10k supports 72KB FIFO size and max packet size of 64k */
3450 	if (rvu->hw->lbk_bufsize == 0x12000)
3451 		return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
3452 
3453 	return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
3454 }
3455 
3456 static void nix_link_config(struct rvu *rvu, int blkaddr)
3457 {
3458 	struct rvu_hwinfo *hw = rvu->hw;
3459 	int cgx, lmac_cnt, slink, link;
3460 	u16 lbk_max_frs, lmac_max_frs;
3461 	u64 tx_credits;
3462 
3463 	rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
3464 	rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
3465 
3466 	/* Set default min/max packet lengths allowed on NIX Rx links.
3467 	 *
3468 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3469 	 * as undersize and report them to SW as error pkts, hence
3470 	 * setting it to 40 bytes.
3471 	 */
3472 	for (link = 0; link < hw->cgx_links; link++) {
3473 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3474 				((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
3475 	}
3476 
3477 	for (link = hw->cgx_links; link < hw->lbk_links; link++) {
3478 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3479 			    ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
3480 	}
3481 	if (hw->sdp_links) {
3482 		link = hw->cgx_links + hw->lbk_links;
3483 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3484 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3485 	}
3486 
3487 	/* Set credits for Tx links assuming max packet length allowed.
3488 	 * This will be reconfigured based on MTU set for PF/VF.
3489 	 */
3490 	for (cgx = 0; cgx < hw->cgx; cgx++) {
3491 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3492 		tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
3493 			       lmac_max_frs) / 16;
3494 		/* Enable credits and set credit pkt count to max allowed */
3495 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3496 		slink = cgx * hw->lmac_per_cgx;
3497 		for (link = slink; link < (slink + lmac_cnt); link++) {
3498 			rvu_write64(rvu, blkaddr,
3499 				    NIX_AF_TX_LINKX_NORM_CREDIT(link),
3500 				    tx_credits);
3501 		}
3502 	}
3503 
3504 	/* Set Tx credits for LBK link */
3505 	slink = hw->cgx_links;
3506 	for (link = slink; link < (slink + hw->lbk_links); link++) {
3507 		tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
3508 		/* Enable credits and set credit pkt count to max allowed */
3509 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3510 		rvu_write64(rvu, blkaddr,
3511 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3512 	}
3513 }
3514 
3515 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3516 {
3517 	int idx, err;
3518 	u64 status;
3519 
3520 	/* Start X2P bus calibration */
3521 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3522 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3523 	/* Wait for calibration to complete */
3524 	err = rvu_poll_reg(rvu, blkaddr,
3525 			   NIX_AF_STATUS, BIT_ULL(10), false);
3526 	if (err) {
3527 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3528 		return err;
3529 	}
3530 
3531 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3532 	/* Check if CGX devices are ready */
3533 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3534 		/* Skip when cgx port is not available */
3535 		if (!rvu_cgx_pdata(idx, rvu) ||
3536 		    (status & (BIT_ULL(16 + idx))))
3537 			continue;
3538 		dev_err(rvu->dev,
3539 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
3540 		err = -EBUSY;
3541 	}
3542 
3543 	/* Check if LBK is ready */
3544 	if (!(status & BIT_ULL(19))) {
3545 		dev_err(rvu->dev,
3546 			"LBK didn't respond to NIX X2P calibration\n");
3547 		err = -EBUSY;
3548 	}
3549 
3550 	/* Clear 'calibrate_x2p' bit */
3551 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3552 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3553 	if (err || (status & 0x3FFULL))
3554 		dev_err(rvu->dev,
3555 			"NIX X2P calibration failed, status 0x%llx\n", status);
3556 	if (err)
3557 		return err;
3558 	return 0;
3559 }
3560 
3561 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3562 {
3563 	u64 cfg;
3564 	int err;
3565 
3566 	/* Set admin queue endianness */
3567 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3568 #ifdef __BIG_ENDIAN
3569 	cfg |= BIT_ULL(8);
3570 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3571 #else
3572 	cfg &= ~BIT_ULL(8);
3573 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3574 #endif
3575 
3576 	/* Do not bypass NDC cache */
3577 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3578 	cfg &= ~0x3FFEULL;
3579 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3580 	/* Disable caching of SQB aka SQEs */
3581 	cfg |= 0x04ULL;
3582 #endif
3583 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3584 
3585 	/* Result structure can be followed by RQ/SQ/CQ context at
3586 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3587 	 * operation type. Alloc sufficient result memory for all operations.
3588 	 */
3589 	err = rvu_aq_alloc(rvu, &block->aq,
3590 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3591 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3592 	if (err)
3593 		return err;
3594 
3595 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3596 	rvu_write64(rvu, block->addr,
3597 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3598 	return 0;
3599 }
3600 
3601 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
3602 {
3603 	const struct npc_lt_def_cfg *ltdefs;
3604 	struct rvu_hwinfo *hw = rvu->hw;
3605 	int blkaddr = nix_hw->blkaddr;
3606 	struct rvu_block *block;
3607 	int err;
3608 	u64 cfg;
3609 
3610 	block = &hw->block[blkaddr];
3611 
3612 	if (is_rvu_96xx_B0(rvu)) {
3613 		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3614 		 * internal state when conditional clocks are turned off.
3615 		 * Hence enable them.
3616 		 */
3617 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3618 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3619 
3620 		/* Set chan/link to backpressure TL3 instead of TL2 */
3621 		rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3622 
3623 		/* Disable SQ manager's sticky mode operation (set TM6 = 0)
3624 		 * This sticky mode is known to cause SQ stalls when multiple
3625 		 * SQs are mapped to same SMQ and transmitting pkts at a time.
3626 		 */
3627 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3628 		cfg &= ~BIT_ULL(15);
3629 		rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3630 	}
3631 
3632 	ltdefs = rvu->kpu.lt_def;
3633 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
3634 	err = nix_calibrate_x2p(rvu, blkaddr);
3635 	if (err)
3636 		return err;
3637 
3638 	/* Initialize admin queue */
3639 	err = nix_aq_init(rvu, block);
3640 	if (err)
3641 		return err;
3642 
3643 	/* Restore CINT timer delay to HW reset values */
3644 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3645 
3646 	if (is_block_implemented(hw, blkaddr)) {
3647 		err = nix_setup_txschq(rvu, nix_hw, blkaddr);
3648 		if (err)
3649 			return err;
3650 
3651 		err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
3652 		if (err)
3653 			return err;
3654 
3655 		err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
3656 		if (err)
3657 			return err;
3658 
3659 		err = nix_setup_mcast(rvu, nix_hw, blkaddr);
3660 		if (err)
3661 			return err;
3662 
3663 		err = nix_setup_txvlan(rvu, nix_hw);
3664 		if (err)
3665 			return err;
3666 
3667 		/* Configure segmentation offload formats */
3668 		nix_setup_lso(rvu, nix_hw, blkaddr);
3669 
3670 		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3671 		 * This helps HW protocol checker to identify headers
3672 		 * and validate length and checksums.
3673 		 */
3674 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3675 			    (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3676 			    ltdefs->rx_ol2.ltype_mask);
3677 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3678 			    (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3679 			    ltdefs->rx_oip4.ltype_mask);
3680 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3681 			    (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3682 			    ltdefs->rx_iip4.ltype_mask);
3683 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3684 			    (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3685 			    ltdefs->rx_oip6.ltype_mask);
3686 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3687 			    (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3688 			    ltdefs->rx_iip6.ltype_mask);
3689 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3690 			    (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3691 			    ltdefs->rx_otcp.ltype_mask);
3692 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3693 			    (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3694 			    ltdefs->rx_itcp.ltype_mask);
3695 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3696 			    (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3697 			    ltdefs->rx_oudp.ltype_mask);
3698 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3699 			    (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3700 			    ltdefs->rx_iudp.ltype_mask);
3701 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3702 			    (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3703 			    ltdefs->rx_osctp.ltype_mask);
3704 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3705 			    (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3706 			    ltdefs->rx_isctp.ltype_mask);
3707 
3708 		if (!is_rvu_otx2(rvu)) {
3709 			/* Enable APAD calculation for other protocols
3710 			 * matching APAD0 and APAD1 lt def registers.
3711 			 */
3712 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
3713 				    (ltdefs->rx_apad0.valid << 11) |
3714 				    (ltdefs->rx_apad0.lid << 8) |
3715 				    (ltdefs->rx_apad0.ltype_match << 4) |
3716 				    ltdefs->rx_apad0.ltype_mask);
3717 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
3718 				    (ltdefs->rx_apad1.valid << 11) |
3719 				    (ltdefs->rx_apad1.lid << 8) |
3720 				    (ltdefs->rx_apad1.ltype_match << 4) |
3721 				    ltdefs->rx_apad1.ltype_mask);
3722 
3723 			/* Receive ethertype defination register defines layer
3724 			 * information in NPC_RESULT_S to identify the Ethertype
3725 			 * location in L2 header. Used for Ethertype overwriting
3726 			 * in inline IPsec flow.
3727 			 */
3728 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
3729 				    (ltdefs->rx_et[0].offset << 12) |
3730 				    (ltdefs->rx_et[0].valid << 11) |
3731 				    (ltdefs->rx_et[0].lid << 8) |
3732 				    (ltdefs->rx_et[0].ltype_match << 4) |
3733 				    ltdefs->rx_et[0].ltype_mask);
3734 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
3735 				    (ltdefs->rx_et[1].offset << 12) |
3736 				    (ltdefs->rx_et[1].valid << 11) |
3737 				    (ltdefs->rx_et[1].lid << 8) |
3738 				    (ltdefs->rx_et[1].ltype_match << 4) |
3739 				    ltdefs->rx_et[1].ltype_mask);
3740 		}
3741 
3742 		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3743 		if (err)
3744 			return err;
3745 
3746 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3747 		nix_link_config(rvu, blkaddr);
3748 
3749 		/* Enable Channel backpressure */
3750 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3751 	}
3752 	return 0;
3753 }
3754 
3755 int rvu_nix_init(struct rvu *rvu)
3756 {
3757 	struct rvu_hwinfo *hw = rvu->hw;
3758 	struct nix_hw *nix_hw;
3759 	int blkaddr = 0, err;
3760 	int i = 0;
3761 
3762 	hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
3763 			       GFP_KERNEL);
3764 	if (!hw->nix)
3765 		return -ENOMEM;
3766 
3767 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3768 	while (blkaddr) {
3769 		nix_hw = &hw->nix[i];
3770 		nix_hw->rvu = rvu;
3771 		nix_hw->blkaddr = blkaddr;
3772 		err = rvu_nix_block_init(rvu, nix_hw);
3773 		if (err)
3774 			return err;
3775 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3776 		i++;
3777 	}
3778 
3779 	return 0;
3780 }
3781 
3782 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
3783 				  struct rvu_block *block)
3784 {
3785 	struct nix_txsch *txsch;
3786 	struct nix_mcast *mcast;
3787 	struct nix_txvlan *vlan;
3788 	struct nix_hw *nix_hw;
3789 	int lvl;
3790 
3791 	rvu_aq_free(rvu, block->aq);
3792 
3793 	if (is_block_implemented(rvu->hw, blkaddr)) {
3794 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
3795 		if (!nix_hw)
3796 			return;
3797 
3798 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3799 			txsch = &nix_hw->txsch[lvl];
3800 			kfree(txsch->schq.bmap);
3801 		}
3802 
3803 		nix_ipolicer_freemem(nix_hw);
3804 
3805 		vlan = &nix_hw->txvlan;
3806 		kfree(vlan->rsrc.bmap);
3807 		mutex_destroy(&vlan->rsrc_lock);
3808 		devm_kfree(rvu->dev, vlan->entry2pfvf_map);
3809 
3810 		mcast = &nix_hw->mcast;
3811 		qmem_free(rvu->dev, mcast->mce_ctx);
3812 		qmem_free(rvu->dev, mcast->mcast_buf);
3813 		mutex_destroy(&mcast->mce_lock);
3814 	}
3815 }
3816 
3817 void rvu_nix_freemem(struct rvu *rvu)
3818 {
3819 	struct rvu_hwinfo *hw = rvu->hw;
3820 	struct rvu_block *block;
3821 	int blkaddr = 0;
3822 
3823 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3824 	while (blkaddr) {
3825 		block = &hw->block[blkaddr];
3826 		rvu_nix_block_freemem(rvu, blkaddr, block);
3827 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3828 	}
3829 }
3830 
3831 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3832 				     struct msg_rsp *rsp)
3833 {
3834 	u16 pcifunc = req->hdr.pcifunc;
3835 	struct rvu_pfvf *pfvf;
3836 	int nixlf, err;
3837 
3838 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3839 	if (err)
3840 		return err;
3841 
3842 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3843 
3844 	npc_mcam_enable_flows(rvu, pcifunc);
3845 
3846 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3847 	set_bit(NIXLF_INITIALIZED, &pfvf->flags);
3848 
3849 	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3850 }
3851 
3852 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3853 				    struct msg_rsp *rsp)
3854 {
3855 	u16 pcifunc = req->hdr.pcifunc;
3856 	struct rvu_pfvf *pfvf;
3857 	int nixlf, err;
3858 
3859 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3860 	if (err)
3861 		return err;
3862 
3863 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3864 
3865 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3866 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3867 
3868 	return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3869 }
3870 
3871 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3872 {
3873 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3874 	struct hwctx_disable_req ctx_req;
3875 	int err;
3876 
3877 	ctx_req.hdr.pcifunc = pcifunc;
3878 
3879 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3880 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3881 	rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
3882 	nix_interface_deinit(rvu, pcifunc, nixlf);
3883 	nix_rx_sync(rvu, blkaddr);
3884 	nix_txschq_free(rvu, pcifunc);
3885 
3886 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3887 
3888 	rvu_cgx_start_stop_io(rvu, pcifunc, false);
3889 
3890 	if (pfvf->sq_ctx) {
3891 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3892 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3893 		if (err)
3894 			dev_err(rvu->dev, "SQ ctx disable failed\n");
3895 	}
3896 
3897 	if (pfvf->rq_ctx) {
3898 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3899 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3900 		if (err)
3901 			dev_err(rvu->dev, "RQ ctx disable failed\n");
3902 	}
3903 
3904 	if (pfvf->cq_ctx) {
3905 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3906 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3907 		if (err)
3908 			dev_err(rvu->dev, "CQ ctx disable failed\n");
3909 	}
3910 
3911 	nix_ctx_free(rvu, pfvf);
3912 
3913 	nix_free_all_bandprof(rvu, pcifunc);
3914 }
3915 
3916 #define NIX_AF_LFX_TX_CFG_PTP_EN	BIT_ULL(32)
3917 
3918 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3919 {
3920 	struct rvu_hwinfo *hw = rvu->hw;
3921 	struct rvu_block *block;
3922 	int blkaddr, pf;
3923 	int nixlf;
3924 	u64 cfg;
3925 
3926 	pf = rvu_get_pf(pcifunc);
3927 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
3928 		return 0;
3929 
3930 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3931 	if (blkaddr < 0)
3932 		return NIX_AF_ERR_AF_LF_INVALID;
3933 
3934 	block = &hw->block[blkaddr];
3935 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3936 	if (nixlf < 0)
3937 		return NIX_AF_ERR_AF_LF_INVALID;
3938 
3939 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3940 
3941 	if (enable)
3942 		cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3943 	else
3944 		cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3945 
3946 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3947 
3948 	return 0;
3949 }
3950 
3951 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
3952 					  struct msg_rsp *rsp)
3953 {
3954 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
3955 }
3956 
3957 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
3958 					   struct msg_rsp *rsp)
3959 {
3960 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
3961 }
3962 
3963 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3964 					struct nix_lso_format_cfg *req,
3965 					struct nix_lso_format_cfg_rsp *rsp)
3966 {
3967 	u16 pcifunc = req->hdr.pcifunc;
3968 	struct nix_hw *nix_hw;
3969 	struct rvu_pfvf *pfvf;
3970 	int blkaddr, idx, f;
3971 	u64 reg;
3972 
3973 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3974 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3975 	if (!pfvf->nixlf || blkaddr < 0)
3976 		return NIX_AF_ERR_AF_LF_INVALID;
3977 
3978 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3979 	if (!nix_hw)
3980 		return -EINVAL;
3981 
3982 	/* Find existing matching LSO format, if any */
3983 	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
3984 		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
3985 			reg = rvu_read64(rvu, blkaddr,
3986 					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
3987 			if (req->fields[f] != (reg & req->field_mask))
3988 				break;
3989 		}
3990 
3991 		if (f == NIX_LSO_FIELD_MAX)
3992 			break;
3993 	}
3994 
3995 	if (idx < nix_hw->lso.in_use) {
3996 		/* Match found */
3997 		rsp->lso_format_idx = idx;
3998 		return 0;
3999 	}
4000 
4001 	if (nix_hw->lso.in_use == nix_hw->lso.total)
4002 		return NIX_AF_ERR_LSO_CFG_FAIL;
4003 
4004 	rsp->lso_format_idx = nix_hw->lso.in_use++;
4005 
4006 	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
4007 		rvu_write64(rvu, blkaddr,
4008 			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
4009 			    req->fields[f]);
4010 
4011 	return 0;
4012 }
4013 
4014 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
4015 {
4016 	bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
4017 
4018 	/* overwrite vf mac address with default_mac */
4019 	if (from_vf)
4020 		ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
4021 }
4022 
4023 /* NIX ingress policers or bandwidth profiles APIs */
4024 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
4025 {
4026 	struct npc_lt_def_cfg defs, *ltdefs;
4027 
4028 	ltdefs = &defs;
4029 	memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
4030 
4031 	/* Extract PCP and DEI fields from outer VLAN from byte offset
4032 	 * 2 from the start of LB_PTR (ie TAG).
4033 	 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
4034 	 * fields are considered when 'Tunnel enable' is set in profile.
4035 	 */
4036 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
4037 		    (2UL << 12) | (ltdefs->ovlan.lid << 8) |
4038 		    (ltdefs->ovlan.ltype_match << 4) |
4039 		    ltdefs->ovlan.ltype_mask);
4040 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
4041 		    (2UL << 12) | (ltdefs->ivlan.lid << 8) |
4042 		    (ltdefs->ivlan.ltype_match << 4) |
4043 		    ltdefs->ivlan.ltype_mask);
4044 
4045 	/* DSCP field in outer and tunneled IPv4 packets */
4046 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
4047 		    (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
4048 		    (ltdefs->rx_oip4.ltype_match << 4) |
4049 		    ltdefs->rx_oip4.ltype_mask);
4050 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
4051 		    (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
4052 		    (ltdefs->rx_iip4.ltype_match << 4) |
4053 		    ltdefs->rx_iip4.ltype_mask);
4054 
4055 	/* DSCP field (traffic class) in outer and tunneled IPv6 packets */
4056 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
4057 		    (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
4058 		    (ltdefs->rx_oip6.ltype_match << 4) |
4059 		    ltdefs->rx_oip6.ltype_mask);
4060 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
4061 		    (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
4062 		    (ltdefs->rx_iip6.ltype_match << 4) |
4063 		    ltdefs->rx_iip6.ltype_mask);
4064 }
4065 
4066 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
4067 				    int layer, int prof_idx)
4068 {
4069 	struct nix_cn10k_aq_enq_req aq_req;
4070 	int rc;
4071 
4072 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4073 
4074 	aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
4075 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4076 	aq_req.op = NIX_AQ_INSTOP_INIT;
4077 
4078 	/* Context is all zeros, submit to AQ */
4079 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4080 				     (struct nix_aq_enq_req *)&aq_req, NULL);
4081 	if (rc)
4082 		dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
4083 			layer, prof_idx);
4084 	return rc;
4085 }
4086 
4087 static int nix_setup_ipolicers(struct rvu *rvu,
4088 			       struct nix_hw *nix_hw, int blkaddr)
4089 {
4090 	struct rvu_hwinfo *hw = rvu->hw;
4091 	struct nix_ipolicer *ipolicer;
4092 	int err, layer, prof_idx;
4093 	u64 cfg;
4094 
4095 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
4096 	if (!(cfg & BIT_ULL(61))) {
4097 		hw->cap.ipolicer = false;
4098 		return 0;
4099 	}
4100 
4101 	hw->cap.ipolicer = true;
4102 	nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
4103 					sizeof(*ipolicer), GFP_KERNEL);
4104 	if (!nix_hw->ipolicer)
4105 		return -ENOMEM;
4106 
4107 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
4108 
4109 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4110 		ipolicer = &nix_hw->ipolicer[layer];
4111 		switch (layer) {
4112 		case BAND_PROF_LEAF_LAYER:
4113 			ipolicer->band_prof.max = cfg & 0XFFFF;
4114 			break;
4115 		case BAND_PROF_MID_LAYER:
4116 			ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
4117 			break;
4118 		case BAND_PROF_TOP_LAYER:
4119 			ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
4120 			break;
4121 		}
4122 
4123 		if (!ipolicer->band_prof.max)
4124 			continue;
4125 
4126 		err = rvu_alloc_bitmap(&ipolicer->band_prof);
4127 		if (err)
4128 			return err;
4129 
4130 		ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
4131 						  ipolicer->band_prof.max,
4132 						  sizeof(u16), GFP_KERNEL);
4133 		if (!ipolicer->pfvf_map)
4134 			return -ENOMEM;
4135 
4136 		ipolicer->match_id = devm_kcalloc(rvu->dev,
4137 						  ipolicer->band_prof.max,
4138 						  sizeof(u16), GFP_KERNEL);
4139 		if (!ipolicer->match_id)
4140 			return -ENOMEM;
4141 
4142 		for (prof_idx = 0;
4143 		     prof_idx < ipolicer->band_prof.max; prof_idx++) {
4144 			/* Set AF as current owner for INIT ops to succeed */
4145 			ipolicer->pfvf_map[prof_idx] = 0x00;
4146 
4147 			/* There is no enable bit in the profile context,
4148 			 * so no context disable. So let's INIT them here
4149 			 * so that PF/VF later on have to just do WRITE to
4150 			 * setup policer rates and config.
4151 			 */
4152 			err = nix_init_policer_context(rvu, nix_hw,
4153 						       layer, prof_idx);
4154 			if (err)
4155 				return err;
4156 		}
4157 
4158 		/* Allocate memory for maintaining ref_counts for MID level
4159 		 * profiles, this will be needed for leaf layer profiles'
4160 		 * aggregation.
4161 		 */
4162 		if (layer != BAND_PROF_MID_LAYER)
4163 			continue;
4164 
4165 		ipolicer->ref_count = devm_kcalloc(rvu->dev,
4166 						   ipolicer->band_prof.max,
4167 						   sizeof(u16), GFP_KERNEL);
4168 	}
4169 
4170 	/* Set policer timeunit to 2us ie  (19 + 1) * 100 nsec = 2us */
4171 	rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
4172 
4173 	nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
4174 
4175 	return 0;
4176 }
4177 
4178 static void nix_ipolicer_freemem(struct nix_hw *nix_hw)
4179 {
4180 	struct nix_ipolicer *ipolicer;
4181 	int layer;
4182 
4183 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4184 		ipolicer = &nix_hw->ipolicer[layer];
4185 
4186 		if (!ipolicer->band_prof.max)
4187 			continue;
4188 
4189 		kfree(ipolicer->band_prof.bmap);
4190 	}
4191 }
4192 
4193 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
4194 			       struct nix_hw *nix_hw, u16 pcifunc)
4195 {
4196 	struct nix_ipolicer *ipolicer;
4197 	int layer, hi_layer, prof_idx;
4198 
4199 	/* Bits [15:14] in profile index represent layer */
4200 	layer = (req->qidx >> 14) & 0x03;
4201 	prof_idx = req->qidx & 0x3FFF;
4202 
4203 	ipolicer = &nix_hw->ipolicer[layer];
4204 	if (prof_idx >= ipolicer->band_prof.max)
4205 		return -EINVAL;
4206 
4207 	/* Check if the profile is allocated to the requesting PCIFUNC or not
4208 	 * with the exception of AF. AF is allowed to read and update contexts.
4209 	 */
4210 	if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
4211 		return -EINVAL;
4212 
4213 	/* If this profile is linked to higher layer profile then check
4214 	 * if that profile is also allocated to the requesting PCIFUNC
4215 	 * or not.
4216 	 */
4217 	if (!req->prof.hl_en)
4218 		return 0;
4219 
4220 	/* Leaf layer profile can link only to mid layer and
4221 	 * mid layer to top layer.
4222 	 */
4223 	if (layer == BAND_PROF_LEAF_LAYER)
4224 		hi_layer = BAND_PROF_MID_LAYER;
4225 	else if (layer == BAND_PROF_MID_LAYER)
4226 		hi_layer = BAND_PROF_TOP_LAYER;
4227 	else
4228 		return -EINVAL;
4229 
4230 	ipolicer = &nix_hw->ipolicer[hi_layer];
4231 	prof_idx = req->prof.band_prof_id;
4232 	if (prof_idx >= ipolicer->band_prof.max ||
4233 	    ipolicer->pfvf_map[prof_idx] != pcifunc)
4234 		return -EINVAL;
4235 
4236 	return 0;
4237 }
4238 
4239 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
4240 					struct nix_bandprof_alloc_req *req,
4241 					struct nix_bandprof_alloc_rsp *rsp)
4242 {
4243 	int blkaddr, layer, prof, idx, err;
4244 	u16 pcifunc = req->hdr.pcifunc;
4245 	struct nix_ipolicer *ipolicer;
4246 	struct nix_hw *nix_hw;
4247 
4248 	if (!rvu->hw->cap.ipolicer)
4249 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
4250 
4251 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4252 	if (err)
4253 		return err;
4254 
4255 	mutex_lock(&rvu->rsrc_lock);
4256 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4257 		if (layer == BAND_PROF_INVAL_LAYER)
4258 			continue;
4259 		if (!req->prof_count[layer])
4260 			continue;
4261 
4262 		ipolicer = &nix_hw->ipolicer[layer];
4263 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
4264 			/* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
4265 			if (idx == MAX_BANDPROF_PER_PFFUNC)
4266 				break;
4267 
4268 			prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4269 			if (prof < 0)
4270 				break;
4271 			rsp->prof_count[layer]++;
4272 			rsp->prof_idx[layer][idx] = prof;
4273 			ipolicer->pfvf_map[prof] = pcifunc;
4274 		}
4275 	}
4276 	mutex_unlock(&rvu->rsrc_lock);
4277 	return 0;
4278 }
4279 
4280 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
4281 {
4282 	int blkaddr, layer, prof_idx, err;
4283 	struct nix_ipolicer *ipolicer;
4284 	struct nix_hw *nix_hw;
4285 
4286 	if (!rvu->hw->cap.ipolicer)
4287 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
4288 
4289 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4290 	if (err)
4291 		return err;
4292 
4293 	mutex_lock(&rvu->rsrc_lock);
4294 	/* Free all the profiles allocated to the PCIFUNC */
4295 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4296 		if (layer == BAND_PROF_INVAL_LAYER)
4297 			continue;
4298 		ipolicer = &nix_hw->ipolicer[layer];
4299 
4300 		for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
4301 			if (ipolicer->pfvf_map[prof_idx] != pcifunc)
4302 				continue;
4303 
4304 			/* Clear ratelimit aggregation, if any */
4305 			if (layer == BAND_PROF_LEAF_LAYER &&
4306 			    ipolicer->match_id[prof_idx])
4307 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4308 
4309 			ipolicer->pfvf_map[prof_idx] = 0x00;
4310 			ipolicer->match_id[prof_idx] = 0;
4311 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4312 		}
4313 	}
4314 	mutex_unlock(&rvu->rsrc_lock);
4315 	return 0;
4316 }
4317 
4318 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
4319 				       struct nix_bandprof_free_req *req,
4320 				       struct msg_rsp *rsp)
4321 {
4322 	int blkaddr, layer, prof_idx, idx, err;
4323 	u16 pcifunc = req->hdr.pcifunc;
4324 	struct nix_ipolicer *ipolicer;
4325 	struct nix_hw *nix_hw;
4326 
4327 	if (req->free_all)
4328 		return nix_free_all_bandprof(rvu, pcifunc);
4329 
4330 	if (!rvu->hw->cap.ipolicer)
4331 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
4332 
4333 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4334 	if (err)
4335 		return err;
4336 
4337 	mutex_lock(&rvu->rsrc_lock);
4338 	/* Free the requested profile indices */
4339 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4340 		if (layer == BAND_PROF_INVAL_LAYER)
4341 			continue;
4342 		if (!req->prof_count[layer])
4343 			continue;
4344 
4345 		ipolicer = &nix_hw->ipolicer[layer];
4346 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
4347 			prof_idx = req->prof_idx[layer][idx];
4348 			if (prof_idx >= ipolicer->band_prof.max ||
4349 			    ipolicer->pfvf_map[prof_idx] != pcifunc)
4350 				continue;
4351 
4352 			/* Clear ratelimit aggregation, if any */
4353 			if (layer == BAND_PROF_LEAF_LAYER &&
4354 			    ipolicer->match_id[prof_idx])
4355 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4356 
4357 			ipolicer->pfvf_map[prof_idx] = 0x00;
4358 			ipolicer->match_id[prof_idx] = 0;
4359 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4360 			if (idx == MAX_BANDPROF_PER_PFFUNC)
4361 				break;
4362 		}
4363 	}
4364 	mutex_unlock(&rvu->rsrc_lock);
4365 	return 0;
4366 }
4367 
4368 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
4369 			struct nix_cn10k_aq_enq_req *aq_req,
4370 			struct nix_cn10k_aq_enq_rsp *aq_rsp,
4371 			u16 pcifunc, u8 ctype, u32 qidx)
4372 {
4373 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4374 	aq_req->hdr.pcifunc = pcifunc;
4375 	aq_req->ctype = ctype;
4376 	aq_req->op = NIX_AQ_INSTOP_READ;
4377 	aq_req->qidx = qidx;
4378 
4379 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4380 				       (struct nix_aq_enq_req *)aq_req,
4381 				       (struct nix_aq_enq_rsp *)aq_rsp);
4382 }
4383 
4384 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
4385 					  struct nix_hw *nix_hw,
4386 					  struct nix_cn10k_aq_enq_req *aq_req,
4387 					  struct nix_cn10k_aq_enq_rsp *aq_rsp,
4388 					  u32 leaf_prof, u16 mid_prof)
4389 {
4390 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4391 	aq_req->hdr.pcifunc = 0x00;
4392 	aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
4393 	aq_req->op = NIX_AQ_INSTOP_WRITE;
4394 	aq_req->qidx = leaf_prof;
4395 
4396 	aq_req->prof.band_prof_id = mid_prof;
4397 	aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
4398 	aq_req->prof.hl_en = 1;
4399 	aq_req->prof_mask.hl_en = 1;
4400 
4401 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4402 				       (struct nix_aq_enq_req *)aq_req,
4403 				       (struct nix_aq_enq_rsp *)aq_rsp);
4404 }
4405 
4406 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
4407 				 u16 rq_idx, u16 match_id)
4408 {
4409 	int leaf_prof, mid_prof, leaf_match;
4410 	struct nix_cn10k_aq_enq_req aq_req;
4411 	struct nix_cn10k_aq_enq_rsp aq_rsp;
4412 	struct nix_ipolicer *ipolicer;
4413 	struct nix_hw *nix_hw;
4414 	int blkaddr, idx, rc;
4415 
4416 	if (!rvu->hw->cap.ipolicer)
4417 		return 0;
4418 
4419 	rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4420 	if (rc)
4421 		return rc;
4422 
4423 	/* Fetch the RQ's context to see if policing is enabled */
4424 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
4425 				 NIX_AQ_CTYPE_RQ, rq_idx);
4426 	if (rc) {
4427 		dev_err(rvu->dev,
4428 			"%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
4429 			__func__, rq_idx, pcifunc);
4430 		return rc;
4431 	}
4432 
4433 	if (!aq_rsp.rq.policer_ena)
4434 		return 0;
4435 
4436 	/* Get the bandwidth profile ID mapped to this RQ */
4437 	leaf_prof = aq_rsp.rq.band_prof_id;
4438 
4439 	ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
4440 	ipolicer->match_id[leaf_prof] = match_id;
4441 
4442 	/* Check if any other leaf profile is marked with same match_id */
4443 	for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
4444 		if (idx == leaf_prof)
4445 			continue;
4446 		if (ipolicer->match_id[idx] != match_id)
4447 			continue;
4448 
4449 		leaf_match = idx;
4450 		break;
4451 	}
4452 
4453 	if (idx == ipolicer->band_prof.max)
4454 		return 0;
4455 
4456 	/* Fetch the matching profile's context to check if it's already
4457 	 * mapped to a mid level profile.
4458 	 */
4459 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4460 				 NIX_AQ_CTYPE_BANDPROF, leaf_match);
4461 	if (rc) {
4462 		dev_err(rvu->dev,
4463 			"%s: Failed to fetch context of leaf profile %d\n",
4464 			__func__, leaf_match);
4465 		return rc;
4466 	}
4467 
4468 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
4469 	if (aq_rsp.prof.hl_en) {
4470 		/* Get Mid layer prof index and map leaf_prof index
4471 		 * also such that flows that are being steered
4472 		 * to different RQs and marked with same match_id
4473 		 * are rate limited in a aggregate fashion
4474 		 */
4475 		mid_prof = aq_rsp.prof.band_prof_id;
4476 		rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4477 						    &aq_req, &aq_rsp,
4478 						    leaf_prof, mid_prof);
4479 		if (rc) {
4480 			dev_err(rvu->dev,
4481 				"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4482 				__func__, leaf_prof, mid_prof);
4483 			goto exit;
4484 		}
4485 
4486 		mutex_lock(&rvu->rsrc_lock);
4487 		ipolicer->ref_count[mid_prof]++;
4488 		mutex_unlock(&rvu->rsrc_lock);
4489 		goto exit;
4490 	}
4491 
4492 	/* Allocate a mid layer profile and
4493 	 * map both 'leaf_prof' and 'leaf_match' profiles to it.
4494 	 */
4495 	mutex_lock(&rvu->rsrc_lock);
4496 	mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4497 	if (mid_prof < 0) {
4498 		dev_err(rvu->dev,
4499 			"%s: Unable to allocate mid layer profile\n", __func__);
4500 		mutex_unlock(&rvu->rsrc_lock);
4501 		goto exit;
4502 	}
4503 	mutex_unlock(&rvu->rsrc_lock);
4504 	ipolicer->pfvf_map[mid_prof] = 0x00;
4505 	ipolicer->ref_count[mid_prof] = 0;
4506 
4507 	/* Initialize mid layer profile same as 'leaf_prof' */
4508 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4509 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
4510 	if (rc) {
4511 		dev_err(rvu->dev,
4512 			"%s: Failed to fetch context of leaf profile %d\n",
4513 			__func__, leaf_prof);
4514 		goto exit;
4515 	}
4516 
4517 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4518 	aq_req.hdr.pcifunc = 0x00;
4519 	aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
4520 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4521 	aq_req.op = NIX_AQ_INSTOP_WRITE;
4522 	memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
4523 	/* Clear higher layer enable bit in the mid profile, just in case */
4524 	aq_req.prof.hl_en = 0;
4525 	aq_req.prof_mask.hl_en = 1;
4526 
4527 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4528 				     (struct nix_aq_enq_req *)&aq_req, NULL);
4529 	if (rc) {
4530 		dev_err(rvu->dev,
4531 			"%s: Failed to INIT context of mid layer profile %d\n",
4532 			__func__, mid_prof);
4533 		goto exit;
4534 	}
4535 
4536 	/* Map both leaf profiles to this mid layer profile */
4537 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4538 					    &aq_req, &aq_rsp,
4539 					    leaf_prof, mid_prof);
4540 	if (rc) {
4541 		dev_err(rvu->dev,
4542 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4543 			__func__, leaf_prof, mid_prof);
4544 		goto exit;
4545 	}
4546 
4547 	mutex_lock(&rvu->rsrc_lock);
4548 	ipolicer->ref_count[mid_prof]++;
4549 	mutex_unlock(&rvu->rsrc_lock);
4550 
4551 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4552 					    &aq_req, &aq_rsp,
4553 					    leaf_match, mid_prof);
4554 	if (rc) {
4555 		dev_err(rvu->dev,
4556 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4557 			__func__, leaf_match, mid_prof);
4558 		ipolicer->ref_count[mid_prof]--;
4559 		goto exit;
4560 	}
4561 
4562 	mutex_lock(&rvu->rsrc_lock);
4563 	ipolicer->ref_count[mid_prof]++;
4564 	mutex_unlock(&rvu->rsrc_lock);
4565 
4566 exit:
4567 	return rc;
4568 }
4569 
4570 /* Called with mutex rsrc_lock */
4571 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
4572 				     u32 leaf_prof)
4573 {
4574 	struct nix_cn10k_aq_enq_req aq_req;
4575 	struct nix_cn10k_aq_enq_rsp aq_rsp;
4576 	struct nix_ipolicer *ipolicer;
4577 	u16 mid_prof;
4578 	int rc;
4579 
4580 	mutex_unlock(&rvu->rsrc_lock);
4581 
4582 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4583 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
4584 
4585 	mutex_lock(&rvu->rsrc_lock);
4586 	if (rc) {
4587 		dev_err(rvu->dev,
4588 			"%s: Failed to fetch context of leaf profile %d\n",
4589 			__func__, leaf_prof);
4590 		return;
4591 	}
4592 
4593 	if (!aq_rsp.prof.hl_en)
4594 		return;
4595 
4596 	mid_prof = aq_rsp.prof.band_prof_id;
4597 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
4598 	ipolicer->ref_count[mid_prof]--;
4599 	/* If ref_count is zero, free mid layer profile */
4600 	if (!ipolicer->ref_count[mid_prof]) {
4601 		ipolicer->pfvf_map[mid_prof] = 0x00;
4602 		rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
4603 	}
4604 }
4605