1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "npc.h"
18 #include "cgx.h"
19 #include "lmac_common.h"
20 
21 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
22 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
23 			    int type, int chan_id);
24 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
25 			       int type, bool add);
26 static int nix_setup_ipolicers(struct rvu *rvu,
27 			       struct nix_hw *nix_hw, int blkaddr);
28 static void nix_ipolicer_freemem(struct nix_hw *nix_hw);
29 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
30 			       struct nix_hw *nix_hw, u16 pcifunc);
31 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
32 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
33 				     u32 leaf_prof);
34 
35 enum mc_tbl_sz {
36 	MC_TBL_SZ_256,
37 	MC_TBL_SZ_512,
38 	MC_TBL_SZ_1K,
39 	MC_TBL_SZ_2K,
40 	MC_TBL_SZ_4K,
41 	MC_TBL_SZ_8K,
42 	MC_TBL_SZ_16K,
43 	MC_TBL_SZ_32K,
44 	MC_TBL_SZ_64K,
45 };
46 
47 enum mc_buf_cnt {
48 	MC_BUF_CNT_8,
49 	MC_BUF_CNT_16,
50 	MC_BUF_CNT_32,
51 	MC_BUF_CNT_64,
52 	MC_BUF_CNT_128,
53 	MC_BUF_CNT_256,
54 	MC_BUF_CNT_512,
55 	MC_BUF_CNT_1024,
56 	MC_BUF_CNT_2048,
57 };
58 
59 enum nix_makr_fmt_indexes {
60 	NIX_MARK_CFG_IP_DSCP_RED,
61 	NIX_MARK_CFG_IP_DSCP_YELLOW,
62 	NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
63 	NIX_MARK_CFG_IP_ECN_RED,
64 	NIX_MARK_CFG_IP_ECN_YELLOW,
65 	NIX_MARK_CFG_IP_ECN_YELLOW_RED,
66 	NIX_MARK_CFG_VLAN_DEI_RED,
67 	NIX_MARK_CFG_VLAN_DEI_YELLOW,
68 	NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
69 	NIX_MARK_CFG_MAX,
70 };
71 
72 /* For now considering MC resources needed for broadcast
73  * pkt replication only. i.e 256 HWVFs + 12 PFs.
74  */
75 #define MC_TBL_SIZE	MC_TBL_SZ_512
76 #define MC_BUF_CNT	MC_BUF_CNT_128
77 
78 struct mce {
79 	struct hlist_node	node;
80 	u16			pcifunc;
81 };
82 
83 int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
84 {
85 	int i = 0;
86 
87 	/*If blkaddr is 0, return the first nix block address*/
88 	if (blkaddr == 0)
89 		return rvu->nix_blkaddr[blkaddr];
90 
91 	while (i + 1 < MAX_NIX_BLKS) {
92 		if (rvu->nix_blkaddr[i] == blkaddr)
93 			return rvu->nix_blkaddr[i + 1];
94 		i++;
95 	}
96 
97 	return 0;
98 }
99 
100 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
101 {
102 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
103 	int blkaddr;
104 
105 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
106 	if (!pfvf->nixlf || blkaddr < 0)
107 		return false;
108 	return true;
109 }
110 
111 int rvu_get_nixlf_count(struct rvu *rvu)
112 {
113 	int blkaddr = 0, max = 0;
114 	struct rvu_block *block;
115 
116 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
117 	while (blkaddr) {
118 		block = &rvu->hw->block[blkaddr];
119 		max += block->lf.max;
120 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
121 	}
122 	return max;
123 }
124 
125 int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
126 {
127 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
128 	struct rvu_hwinfo *hw = rvu->hw;
129 	int blkaddr;
130 
131 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
132 	if (!pfvf->nixlf || blkaddr < 0)
133 		return NIX_AF_ERR_AF_LF_INVALID;
134 
135 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
136 	if (*nixlf < 0)
137 		return NIX_AF_ERR_AF_LF_INVALID;
138 
139 	if (nix_blkaddr)
140 		*nix_blkaddr = blkaddr;
141 
142 	return 0;
143 }
144 
145 int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
146 			struct nix_hw **nix_hw, int *blkaddr)
147 {
148 	struct rvu_pfvf *pfvf;
149 
150 	pfvf = rvu_get_pfvf(rvu, pcifunc);
151 	*blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
152 	if (!pfvf->nixlf || *blkaddr < 0)
153 		return NIX_AF_ERR_AF_LF_INVALID;
154 
155 	*nix_hw = get_nix_hw(rvu->hw, *blkaddr);
156 	if (!*nix_hw)
157 		return NIX_AF_ERR_INVALID_NIXBLK;
158 	return 0;
159 }
160 
161 static void nix_mce_list_init(struct nix_mce_list *list, int max)
162 {
163 	INIT_HLIST_HEAD(&list->head);
164 	list->count = 0;
165 	list->max = max;
166 }
167 
168 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
169 {
170 	int idx;
171 
172 	if (!mcast)
173 		return 0;
174 
175 	idx = mcast->next_free_mce;
176 	mcast->next_free_mce += count;
177 	return idx;
178 }
179 
180 struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
181 {
182 	int nix_blkaddr = 0, i = 0;
183 	struct rvu *rvu = hw->rvu;
184 
185 	nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
186 	while (nix_blkaddr) {
187 		if (blkaddr == nix_blkaddr && hw->nix)
188 			return &hw->nix[i];
189 		nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
190 		i++;
191 	}
192 	return NULL;
193 }
194 
195 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
196 {
197 	int err;
198 
199 	/*Sync all in flight RX packets to LLC/DRAM */
200 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
201 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
202 	if (err)
203 		dev_err(rvu->dev, "NIX RX software sync failed\n");
204 }
205 
206 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
207 			    int lvl, u16 pcifunc, u16 schq)
208 {
209 	struct rvu_hwinfo *hw = rvu->hw;
210 	struct nix_txsch *txsch;
211 	struct nix_hw *nix_hw;
212 	u16 map_func;
213 
214 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
215 	if (!nix_hw)
216 		return false;
217 
218 	txsch = &nix_hw->txsch[lvl];
219 	/* Check out of bounds */
220 	if (schq >= txsch->schq.max)
221 		return false;
222 
223 	mutex_lock(&rvu->rsrc_lock);
224 	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
225 	mutex_unlock(&rvu->rsrc_lock);
226 
227 	/* TLs aggegating traffic are shared across PF and VFs */
228 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
229 		if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
230 			return false;
231 		else
232 			return true;
233 	}
234 
235 	if (map_func != pcifunc)
236 		return false;
237 
238 	return true;
239 }
240 
241 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
242 {
243 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
244 	struct mac_ops *mac_ops;
245 	int pkind, pf, vf, lbkid;
246 	u8 cgx_id, lmac_id;
247 	int err;
248 
249 	pf = rvu_get_pf(pcifunc);
250 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
251 		return 0;
252 
253 	switch (type) {
254 	case NIX_INTF_TYPE_CGX:
255 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
256 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
257 
258 		pkind = rvu_npc_get_pkind(rvu, pf);
259 		if (pkind < 0) {
260 			dev_err(rvu->dev,
261 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
262 			return -EINVAL;
263 		}
264 		pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
265 		pfvf->tx_chan_base = pfvf->rx_chan_base;
266 		pfvf->rx_chan_cnt = 1;
267 		pfvf->tx_chan_cnt = 1;
268 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
269 		rvu_npc_set_pkind(rvu, pkind, pfvf);
270 
271 		mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
272 		/* By default we enable pause frames */
273 		if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
274 			mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
275 								    rvu),
276 						      lmac_id, true, true);
277 		break;
278 	case NIX_INTF_TYPE_LBK:
279 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
280 
281 		/* If NIX1 block is present on the silicon then NIXes are
282 		 * assigned alternatively for lbk interfaces. NIX0 should
283 		 * send packets on lbk link 1 channels and NIX1 should send
284 		 * on lbk link 0 channels for the communication between
285 		 * NIX0 and NIX1.
286 		 */
287 		lbkid = 0;
288 		if (rvu->hw->lbk_links > 1)
289 			lbkid = vf & 0x1 ? 0 : 1;
290 
291 		/* Note that AF's VFs work in pairs and talk over consecutive
292 		 * loopback channels.Therefore if odd number of AF VFs are
293 		 * enabled then the last VF remains with no pair.
294 		 */
295 		pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
296 		pfvf->tx_chan_base = vf & 0x1 ?
297 					rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
298 					rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
299 		pfvf->rx_chan_cnt = 1;
300 		pfvf->tx_chan_cnt = 1;
301 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
302 					      pfvf->rx_chan_base,
303 					      pfvf->rx_chan_cnt);
304 		break;
305 	}
306 
307 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
308 	 * RVU PF/VF's MAC address.
309 	 */
310 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
311 				    pfvf->rx_chan_base, pfvf->mac_addr);
312 
313 	/* Add this PF_FUNC to bcast pkt replication list */
314 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
315 	if (err) {
316 		dev_err(rvu->dev,
317 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
318 			pcifunc);
319 		return err;
320 	}
321 	/* Install MCAM rule matching Ethernet broadcast mac address */
322 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
323 					  nixlf, pfvf->rx_chan_base);
324 
325 	pfvf->maxlen = NIC_HW_MIN_FRS;
326 	pfvf->minlen = NIC_HW_MIN_FRS;
327 
328 	return 0;
329 }
330 
331 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
332 {
333 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
334 	int err;
335 
336 	pfvf->maxlen = 0;
337 	pfvf->minlen = 0;
338 
339 	/* Remove this PF_FUNC from bcast pkt replication list */
340 	err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
341 	if (err) {
342 		dev_err(rvu->dev,
343 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
344 			pcifunc);
345 	}
346 
347 	/* Free and disable any MCAM entries used by this NIX LF */
348 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
349 
350 	/* Disable DMAC filters used */
351 	rvu_cgx_disable_dmac_entries(rvu, pcifunc);
352 }
353 
354 int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
355 				    struct nix_bp_cfg_req *req,
356 				    struct msg_rsp *rsp)
357 {
358 	u16 pcifunc = req->hdr.pcifunc;
359 	struct rvu_pfvf *pfvf;
360 	int blkaddr, pf, type;
361 	u16 chan_base, chan;
362 	u64 cfg;
363 
364 	pf = rvu_get_pf(pcifunc);
365 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
366 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
367 		return 0;
368 
369 	pfvf = rvu_get_pfvf(rvu, pcifunc);
370 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
371 
372 	chan_base = pfvf->rx_chan_base + req->chan_base;
373 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
374 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
375 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
376 			    cfg & ~BIT_ULL(16));
377 	}
378 	return 0;
379 }
380 
381 static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
382 			    int type, int chan_id)
383 {
384 	int bpid, blkaddr, lmac_chan_cnt;
385 	struct rvu_hwinfo *hw = rvu->hw;
386 	u16 cgx_bpid_cnt, lbk_bpid_cnt;
387 	struct rvu_pfvf *pfvf;
388 	u8 cgx_id, lmac_id;
389 	u64 cfg;
390 
391 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
392 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
393 	lmac_chan_cnt = cfg & 0xFF;
394 
395 	cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
396 	lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
397 
398 	pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
399 
400 	/* Backpressure IDs range division
401 	 * CGX channles are mapped to (0 - 191) BPIDs
402 	 * LBK channles are mapped to (192 - 255) BPIDs
403 	 * SDP channles are mapped to (256 - 511) BPIDs
404 	 *
405 	 * Lmac channles and bpids mapped as follows
406 	 * cgx(0)_lmac(0)_chan(0 - 15) = bpid(0 - 15)
407 	 * cgx(0)_lmac(1)_chan(0 - 15) = bpid(16 - 31) ....
408 	 * cgx(1)_lmac(0)_chan(0 - 15) = bpid(64 - 79) ....
409 	 */
410 	switch (type) {
411 	case NIX_INTF_TYPE_CGX:
412 		if ((req->chan_base + req->chan_cnt) > 15)
413 			return -EINVAL;
414 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
415 		/* Assign bpid based on cgx, lmac and chan id */
416 		bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) +
417 			(lmac_id * lmac_chan_cnt) + req->chan_base;
418 
419 		if (req->bpid_per_chan)
420 			bpid += chan_id;
421 		if (bpid > cgx_bpid_cnt)
422 			return -EINVAL;
423 		break;
424 
425 	case NIX_INTF_TYPE_LBK:
426 		if ((req->chan_base + req->chan_cnt) > 63)
427 			return -EINVAL;
428 		bpid = cgx_bpid_cnt + req->chan_base;
429 		if (req->bpid_per_chan)
430 			bpid += chan_id;
431 		if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
432 			return -EINVAL;
433 		break;
434 	default:
435 		return -EINVAL;
436 	}
437 	return bpid;
438 }
439 
440 int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
441 				   struct nix_bp_cfg_req *req,
442 				   struct nix_bp_cfg_rsp *rsp)
443 {
444 	int blkaddr, pf, type, chan_id = 0;
445 	u16 pcifunc = req->hdr.pcifunc;
446 	struct rvu_pfvf *pfvf;
447 	u16 chan_base, chan;
448 	s16 bpid, bpid_base;
449 	u64 cfg;
450 
451 	pf = rvu_get_pf(pcifunc);
452 	type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
453 
454 	/* Enable backpressure only for CGX mapped PFs and LBK interface */
455 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
456 		return 0;
457 
458 	pfvf = rvu_get_pfvf(rvu, pcifunc);
459 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
460 
461 	bpid_base = rvu_nix_get_bpid(rvu, req, type, chan_id);
462 	chan_base = pfvf->rx_chan_base + req->chan_base;
463 	bpid = bpid_base;
464 
465 	for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
466 		if (bpid < 0) {
467 			dev_warn(rvu->dev, "Fail to enable backpressure\n");
468 			return -EINVAL;
469 		}
470 
471 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
472 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
473 			    cfg | (bpid & 0xFF) | BIT_ULL(16));
474 		chan_id++;
475 		bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
476 	}
477 
478 	for (chan = 0; chan < req->chan_cnt; chan++) {
479 		/* Map channel and bpid assign to it */
480 		rsp->chan_bpid[chan] = ((req->chan_base + chan) & 0x7F) << 10 |
481 					(bpid_base & 0x3FF);
482 		if (req->bpid_per_chan)
483 			bpid_base++;
484 	}
485 	rsp->chan_cnt = req->chan_cnt;
486 
487 	return 0;
488 }
489 
490 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
491 				 u64 format, bool v4, u64 *fidx)
492 {
493 	struct nix_lso_format field = {0};
494 
495 	/* IP's Length field */
496 	field.layer = NIX_TXLAYER_OL3;
497 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
498 	field.offset = v4 ? 2 : 4;
499 	field.sizem1 = 1; /* i.e 2 bytes */
500 	field.alg = NIX_LSOALG_ADD_PAYLEN;
501 	rvu_write64(rvu, blkaddr,
502 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
503 		    *(u64 *)&field);
504 
505 	/* No ID field in IPv6 header */
506 	if (!v4)
507 		return;
508 
509 	/* IP's ID field */
510 	field.layer = NIX_TXLAYER_OL3;
511 	field.offset = 4;
512 	field.sizem1 = 1; /* i.e 2 bytes */
513 	field.alg = NIX_LSOALG_ADD_SEGNUM;
514 	rvu_write64(rvu, blkaddr,
515 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
516 		    *(u64 *)&field);
517 }
518 
519 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
520 				 u64 format, u64 *fidx)
521 {
522 	struct nix_lso_format field = {0};
523 
524 	/* TCP's sequence number field */
525 	field.layer = NIX_TXLAYER_OL4;
526 	field.offset = 4;
527 	field.sizem1 = 3; /* i.e 4 bytes */
528 	field.alg = NIX_LSOALG_ADD_OFFSET;
529 	rvu_write64(rvu, blkaddr,
530 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
531 		    *(u64 *)&field);
532 
533 	/* TCP's flags field */
534 	field.layer = NIX_TXLAYER_OL4;
535 	field.offset = 12;
536 	field.sizem1 = 1; /* 2 bytes */
537 	field.alg = NIX_LSOALG_TCP_FLAGS;
538 	rvu_write64(rvu, blkaddr,
539 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
540 		    *(u64 *)&field);
541 }
542 
543 static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
544 {
545 	u64 cfg, idx, fidx = 0;
546 
547 	/* Get max HW supported format indices */
548 	cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
549 	nix_hw->lso.total = cfg;
550 
551 	/* Enable LSO */
552 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
553 	/* For TSO, set first and middle segment flags to
554 	 * mask out PSH, RST & FIN flags in TCP packet
555 	 */
556 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
557 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
558 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
559 
560 	/* Setup default static LSO formats
561 	 *
562 	 * Configure format fields for TCPv4 segmentation offload
563 	 */
564 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
565 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
566 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
567 
568 	/* Set rest of the fields to NOP */
569 	for (; fidx < 8; fidx++) {
570 		rvu_write64(rvu, blkaddr,
571 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
572 	}
573 	nix_hw->lso.in_use++;
574 
575 	/* Configure format fields for TCPv6 segmentation offload */
576 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
577 	fidx = 0;
578 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
579 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
580 
581 	/* Set rest of the fields to NOP */
582 	for (; fidx < 8; fidx++) {
583 		rvu_write64(rvu, blkaddr,
584 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
585 	}
586 	nix_hw->lso.in_use++;
587 }
588 
589 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
590 {
591 	kfree(pfvf->rq_bmap);
592 	kfree(pfvf->sq_bmap);
593 	kfree(pfvf->cq_bmap);
594 	if (pfvf->rq_ctx)
595 		qmem_free(rvu->dev, pfvf->rq_ctx);
596 	if (pfvf->sq_ctx)
597 		qmem_free(rvu->dev, pfvf->sq_ctx);
598 	if (pfvf->cq_ctx)
599 		qmem_free(rvu->dev, pfvf->cq_ctx);
600 	if (pfvf->rss_ctx)
601 		qmem_free(rvu->dev, pfvf->rss_ctx);
602 	if (pfvf->nix_qints_ctx)
603 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
604 	if (pfvf->cq_ints_ctx)
605 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
606 
607 	pfvf->rq_bmap = NULL;
608 	pfvf->cq_bmap = NULL;
609 	pfvf->sq_bmap = NULL;
610 	pfvf->rq_ctx = NULL;
611 	pfvf->sq_ctx = NULL;
612 	pfvf->cq_ctx = NULL;
613 	pfvf->rss_ctx = NULL;
614 	pfvf->nix_qints_ctx = NULL;
615 	pfvf->cq_ints_ctx = NULL;
616 }
617 
618 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
619 			      struct rvu_pfvf *pfvf, int nixlf,
620 			      int rss_sz, int rss_grps, int hwctx_size,
621 			      u64 way_mask)
622 {
623 	int err, grp, num_indices;
624 
625 	/* RSS is not requested for this NIXLF */
626 	if (!rss_sz)
627 		return 0;
628 	num_indices = rss_sz * rss_grps;
629 
630 	/* Alloc NIX RSS HW context memory and config the base */
631 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
632 	if (err)
633 		return err;
634 
635 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
636 		    (u64)pfvf->rss_ctx->iova);
637 
638 	/* Config full RSS table size, enable RSS and caching */
639 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
640 		    BIT_ULL(36) | BIT_ULL(4) |
641 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
642 		    way_mask << 20);
643 	/* Config RSS group offset and sizes */
644 	for (grp = 0; grp < rss_grps; grp++)
645 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
646 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
647 	return 0;
648 }
649 
650 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
651 			       struct nix_aq_inst_s *inst)
652 {
653 	struct admin_queue *aq = block->aq;
654 	struct nix_aq_res_s *result;
655 	int timeout = 1000;
656 	u64 reg, head;
657 
658 	result = (struct nix_aq_res_s *)aq->res->base;
659 
660 	/* Get current head pointer where to append this instruction */
661 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
662 	head = (reg >> 4) & AQ_PTR_MASK;
663 
664 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
665 	       (void *)inst, aq->inst->entry_sz);
666 	memset(result, 0, sizeof(*result));
667 	/* sync into memory */
668 	wmb();
669 
670 	/* Ring the doorbell and wait for result */
671 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
672 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
673 		cpu_relax();
674 		udelay(1);
675 		timeout--;
676 		if (!timeout)
677 			return -EBUSY;
678 	}
679 
680 	if (result->compcode != NIX_AQ_COMP_GOOD)
681 		/* TODO: Replace this with some error code */
682 		return -EBUSY;
683 
684 	return 0;
685 }
686 
687 static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
688 				   struct nix_aq_enq_req *req,
689 				   struct nix_aq_enq_rsp *rsp)
690 {
691 	struct rvu_hwinfo *hw = rvu->hw;
692 	u16 pcifunc = req->hdr.pcifunc;
693 	int nixlf, blkaddr, rc = 0;
694 	struct nix_aq_inst_s inst;
695 	struct rvu_block *block;
696 	struct admin_queue *aq;
697 	struct rvu_pfvf *pfvf;
698 	void *ctx, *mask;
699 	bool ena;
700 	u64 cfg;
701 
702 	blkaddr = nix_hw->blkaddr;
703 	block = &hw->block[blkaddr];
704 	aq = block->aq;
705 	if (!aq) {
706 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
707 		return NIX_AF_ERR_AQ_ENQUEUE;
708 	}
709 
710 	pfvf = rvu_get_pfvf(rvu, pcifunc);
711 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
712 
713 	/* Skip NIXLF check for broadcast MCE entry and bandwidth profile
714 	 * operations done by AF itself.
715 	 */
716 	if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
717 	      (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
718 		if (!pfvf->nixlf || nixlf < 0)
719 			return NIX_AF_ERR_AF_LF_INVALID;
720 	}
721 
722 	switch (req->ctype) {
723 	case NIX_AQ_CTYPE_RQ:
724 		/* Check if index exceeds max no of queues */
725 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
726 			rc = NIX_AF_ERR_AQ_ENQUEUE;
727 		break;
728 	case NIX_AQ_CTYPE_SQ:
729 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
730 			rc = NIX_AF_ERR_AQ_ENQUEUE;
731 		break;
732 	case NIX_AQ_CTYPE_CQ:
733 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
734 			rc = NIX_AF_ERR_AQ_ENQUEUE;
735 		break;
736 	case NIX_AQ_CTYPE_RSS:
737 		/* Check if RSS is enabled and qidx is within range */
738 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
739 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
740 		    (req->qidx >= (256UL << (cfg & 0xF))))
741 			rc = NIX_AF_ERR_AQ_ENQUEUE;
742 		break;
743 	case NIX_AQ_CTYPE_MCE:
744 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
745 
746 		/* Check if index exceeds MCE list length */
747 		if (!nix_hw->mcast.mce_ctx ||
748 		    (req->qidx >= (256UL << (cfg & 0xF))))
749 			rc = NIX_AF_ERR_AQ_ENQUEUE;
750 
751 		/* Adding multicast lists for requests from PF/VFs is not
752 		 * yet supported, so ignore this.
753 		 */
754 		if (rsp)
755 			rc = NIX_AF_ERR_AQ_ENQUEUE;
756 		break;
757 	case NIX_AQ_CTYPE_BANDPROF:
758 		if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
759 					nix_hw, pcifunc))
760 			rc = NIX_AF_ERR_INVALID_BANDPROF;
761 		break;
762 	default:
763 		rc = NIX_AF_ERR_AQ_ENQUEUE;
764 	}
765 
766 	if (rc)
767 		return rc;
768 
769 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
770 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
771 	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
772 	     (req->op == NIX_AQ_INSTOP_WRITE &&
773 	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
774 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
775 				     pcifunc, req->sq.smq))
776 			return NIX_AF_ERR_AQ_ENQUEUE;
777 	}
778 
779 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
780 	inst.lf = nixlf;
781 	inst.cindex = req->qidx;
782 	inst.ctype = req->ctype;
783 	inst.op = req->op;
784 	/* Currently we are not supporting enqueuing multiple instructions,
785 	 * so always choose first entry in result memory.
786 	 */
787 	inst.res_addr = (u64)aq->res->iova;
788 
789 	/* Hardware uses same aq->res->base for updating result of
790 	 * previous instruction hence wait here till it is done.
791 	 */
792 	spin_lock(&aq->lock);
793 
794 	/* Clean result + context memory */
795 	memset(aq->res->base, 0, aq->res->entry_sz);
796 	/* Context needs to be written at RES_ADDR + 128 */
797 	ctx = aq->res->base + 128;
798 	/* Mask needs to be written at RES_ADDR + 256 */
799 	mask = aq->res->base + 256;
800 
801 	switch (req->op) {
802 	case NIX_AQ_INSTOP_WRITE:
803 		if (req->ctype == NIX_AQ_CTYPE_RQ)
804 			memcpy(mask, &req->rq_mask,
805 			       sizeof(struct nix_rq_ctx_s));
806 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
807 			memcpy(mask, &req->sq_mask,
808 			       sizeof(struct nix_sq_ctx_s));
809 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
810 			memcpy(mask, &req->cq_mask,
811 			       sizeof(struct nix_cq_ctx_s));
812 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
813 			memcpy(mask, &req->rss_mask,
814 			       sizeof(struct nix_rsse_s));
815 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
816 			memcpy(mask, &req->mce_mask,
817 			       sizeof(struct nix_rx_mce_s));
818 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
819 			memcpy(mask, &req->prof_mask,
820 			       sizeof(struct nix_bandprof_s));
821 		fallthrough;
822 	case NIX_AQ_INSTOP_INIT:
823 		if (req->ctype == NIX_AQ_CTYPE_RQ)
824 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
825 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
826 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
827 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
828 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
829 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
830 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
831 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
832 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
833 		else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
834 			memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
835 		break;
836 	case NIX_AQ_INSTOP_NOP:
837 	case NIX_AQ_INSTOP_READ:
838 	case NIX_AQ_INSTOP_LOCK:
839 	case NIX_AQ_INSTOP_UNLOCK:
840 		break;
841 	default:
842 		rc = NIX_AF_ERR_AQ_ENQUEUE;
843 		spin_unlock(&aq->lock);
844 		return rc;
845 	}
846 
847 	/* Submit the instruction to AQ */
848 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
849 	if (rc) {
850 		spin_unlock(&aq->lock);
851 		return rc;
852 	}
853 
854 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
855 	if (req->op == NIX_AQ_INSTOP_INIT) {
856 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
857 			__set_bit(req->qidx, pfvf->rq_bmap);
858 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
859 			__set_bit(req->qidx, pfvf->sq_bmap);
860 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
861 			__set_bit(req->qidx, pfvf->cq_bmap);
862 	}
863 
864 	if (req->op == NIX_AQ_INSTOP_WRITE) {
865 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
866 			ena = (req->rq.ena & req->rq_mask.ena) |
867 				(test_bit(req->qidx, pfvf->rq_bmap) &
868 				~req->rq_mask.ena);
869 			if (ena)
870 				__set_bit(req->qidx, pfvf->rq_bmap);
871 			else
872 				__clear_bit(req->qidx, pfvf->rq_bmap);
873 		}
874 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
875 			ena = (req->rq.ena & req->sq_mask.ena) |
876 				(test_bit(req->qidx, pfvf->sq_bmap) &
877 				~req->sq_mask.ena);
878 			if (ena)
879 				__set_bit(req->qidx, pfvf->sq_bmap);
880 			else
881 				__clear_bit(req->qidx, pfvf->sq_bmap);
882 		}
883 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
884 			ena = (req->rq.ena & req->cq_mask.ena) |
885 				(test_bit(req->qidx, pfvf->cq_bmap) &
886 				~req->cq_mask.ena);
887 			if (ena)
888 				__set_bit(req->qidx, pfvf->cq_bmap);
889 			else
890 				__clear_bit(req->qidx, pfvf->cq_bmap);
891 		}
892 	}
893 
894 	if (rsp) {
895 		/* Copy read context into mailbox */
896 		if (req->op == NIX_AQ_INSTOP_READ) {
897 			if (req->ctype == NIX_AQ_CTYPE_RQ)
898 				memcpy(&rsp->rq, ctx,
899 				       sizeof(struct nix_rq_ctx_s));
900 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
901 				memcpy(&rsp->sq, ctx,
902 				       sizeof(struct nix_sq_ctx_s));
903 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
904 				memcpy(&rsp->cq, ctx,
905 				       sizeof(struct nix_cq_ctx_s));
906 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
907 				memcpy(&rsp->rss, ctx,
908 				       sizeof(struct nix_rsse_s));
909 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
910 				memcpy(&rsp->mce, ctx,
911 				       sizeof(struct nix_rx_mce_s));
912 			else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
913 				memcpy(&rsp->prof, ctx,
914 				       sizeof(struct nix_bandprof_s));
915 		}
916 	}
917 
918 	spin_unlock(&aq->lock);
919 	return 0;
920 }
921 
922 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
923 			       struct nix_aq_enq_rsp *rsp)
924 {
925 	struct nix_hw *nix_hw;
926 	int blkaddr;
927 
928 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
929 	if (blkaddr < 0)
930 		return NIX_AF_ERR_AF_LF_INVALID;
931 
932 	nix_hw =  get_nix_hw(rvu->hw, blkaddr);
933 	if (!nix_hw)
934 		return -EINVAL;
935 
936 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
937 }
938 
939 static const char *nix_get_ctx_name(int ctype)
940 {
941 	switch (ctype) {
942 	case NIX_AQ_CTYPE_CQ:
943 		return "CQ";
944 	case NIX_AQ_CTYPE_SQ:
945 		return "SQ";
946 	case NIX_AQ_CTYPE_RQ:
947 		return "RQ";
948 	case NIX_AQ_CTYPE_RSS:
949 		return "RSS";
950 	}
951 	return "";
952 }
953 
954 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
955 {
956 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
957 	struct nix_aq_enq_req aq_req;
958 	unsigned long *bmap;
959 	int qidx, q_cnt = 0;
960 	int err = 0, rc;
961 
962 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
963 		return NIX_AF_ERR_AQ_ENQUEUE;
964 
965 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
966 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
967 
968 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
969 		aq_req.cq.ena = 0;
970 		aq_req.cq_mask.ena = 1;
971 		aq_req.cq.bp_ena = 0;
972 		aq_req.cq_mask.bp_ena = 1;
973 		q_cnt = pfvf->cq_ctx->qsize;
974 		bmap = pfvf->cq_bmap;
975 	}
976 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
977 		aq_req.sq.ena = 0;
978 		aq_req.sq_mask.ena = 1;
979 		q_cnt = pfvf->sq_ctx->qsize;
980 		bmap = pfvf->sq_bmap;
981 	}
982 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
983 		aq_req.rq.ena = 0;
984 		aq_req.rq_mask.ena = 1;
985 		q_cnt = pfvf->rq_ctx->qsize;
986 		bmap = pfvf->rq_bmap;
987 	}
988 
989 	aq_req.ctype = req->ctype;
990 	aq_req.op = NIX_AQ_INSTOP_WRITE;
991 
992 	for (qidx = 0; qidx < q_cnt; qidx++) {
993 		if (!test_bit(qidx, bmap))
994 			continue;
995 		aq_req.qidx = qidx;
996 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
997 		if (rc) {
998 			err = rc;
999 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
1000 				nix_get_ctx_name(req->ctype), qidx);
1001 		}
1002 	}
1003 
1004 	return err;
1005 }
1006 
1007 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
1008 static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
1009 {
1010 	struct nix_aq_enq_req lock_ctx_req;
1011 	int err;
1012 
1013 	if (req->op != NIX_AQ_INSTOP_INIT)
1014 		return 0;
1015 
1016 	if (req->ctype == NIX_AQ_CTYPE_MCE ||
1017 	    req->ctype == NIX_AQ_CTYPE_DYNO)
1018 		return 0;
1019 
1020 	memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
1021 	lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
1022 	lock_ctx_req.ctype = req->ctype;
1023 	lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
1024 	lock_ctx_req.qidx = req->qidx;
1025 	err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
1026 	if (err)
1027 		dev_err(rvu->dev,
1028 			"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
1029 			req->hdr.pcifunc,
1030 			nix_get_ctx_name(req->ctype), req->qidx);
1031 	return err;
1032 }
1033 
1034 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1035 				struct nix_aq_enq_req *req,
1036 				struct nix_aq_enq_rsp *rsp)
1037 {
1038 	int err;
1039 
1040 	err = rvu_nix_aq_enq_inst(rvu, req, rsp);
1041 	if (!err)
1042 		err = nix_lf_hwctx_lockdown(rvu, req);
1043 	return err;
1044 }
1045 #else
1046 
1047 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
1048 				struct nix_aq_enq_req *req,
1049 				struct nix_aq_enq_rsp *rsp)
1050 {
1051 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
1052 }
1053 #endif
1054 /* CN10K mbox handler */
1055 int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
1056 				      struct nix_cn10k_aq_enq_req *req,
1057 				      struct nix_cn10k_aq_enq_rsp *rsp)
1058 {
1059 	return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
1060 				  (struct nix_aq_enq_rsp *)rsp);
1061 }
1062 
1063 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
1064 				       struct hwctx_disable_req *req,
1065 				       struct msg_rsp *rsp)
1066 {
1067 	return nix_lf_hwctx_disable(rvu, req);
1068 }
1069 
1070 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
1071 				  struct nix_lf_alloc_req *req,
1072 				  struct nix_lf_alloc_rsp *rsp)
1073 {
1074 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
1075 	struct rvu_hwinfo *hw = rvu->hw;
1076 	u16 pcifunc = req->hdr.pcifunc;
1077 	struct rvu_block *block;
1078 	struct rvu_pfvf *pfvf;
1079 	u64 cfg, ctx_cfg;
1080 	int blkaddr;
1081 
1082 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
1083 		return NIX_AF_ERR_PARAM;
1084 
1085 	if (req->way_mask)
1086 		req->way_mask &= 0xFFFF;
1087 
1088 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1089 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1090 	if (!pfvf->nixlf || blkaddr < 0)
1091 		return NIX_AF_ERR_AF_LF_INVALID;
1092 
1093 	block = &hw->block[blkaddr];
1094 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1095 	if (nixlf < 0)
1096 		return NIX_AF_ERR_AF_LF_INVALID;
1097 
1098 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
1099 	if (req->npa_func) {
1100 		/* If default, use 'this' NIXLF's PFFUNC */
1101 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
1102 			req->npa_func = pcifunc;
1103 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
1104 			return NIX_AF_INVAL_NPA_PF_FUNC;
1105 	}
1106 
1107 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
1108 	if (req->sso_func) {
1109 		/* If default, use 'this' NIXLF's PFFUNC */
1110 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
1111 			req->sso_func = pcifunc;
1112 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
1113 			return NIX_AF_INVAL_SSO_PF_FUNC;
1114 	}
1115 
1116 	/* If RSS is being enabled, check if requested config is valid.
1117 	 * RSS table size should be power of two, otherwise
1118 	 * RSS_GRP::OFFSET + adder might go beyond that group or
1119 	 * won't be able to use entire table.
1120 	 */
1121 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
1122 			    !is_power_of_2(req->rss_sz)))
1123 		return NIX_AF_ERR_RSS_SIZE_INVALID;
1124 
1125 	if (req->rss_sz &&
1126 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
1127 		return NIX_AF_ERR_RSS_GRPS_INVALID;
1128 
1129 	/* Reset this NIX LF */
1130 	err = rvu_lf_reset(rvu, block, nixlf);
1131 	if (err) {
1132 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1133 			block->addr - BLKADDR_NIX0, nixlf);
1134 		return NIX_AF_ERR_LF_RESET;
1135 	}
1136 
1137 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
1138 
1139 	/* Alloc NIX RQ HW context memory and config the base */
1140 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
1141 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
1142 	if (err)
1143 		goto free_mem;
1144 
1145 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
1146 	if (!pfvf->rq_bmap)
1147 		goto free_mem;
1148 
1149 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
1150 		    (u64)pfvf->rq_ctx->iova);
1151 
1152 	/* Set caching and queue count in HW */
1153 	cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
1154 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
1155 
1156 	/* Alloc NIX SQ HW context memory and config the base */
1157 	hwctx_size = 1UL << (ctx_cfg & 0xF);
1158 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
1159 	if (err)
1160 		goto free_mem;
1161 
1162 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
1163 	if (!pfvf->sq_bmap)
1164 		goto free_mem;
1165 
1166 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
1167 		    (u64)pfvf->sq_ctx->iova);
1168 
1169 	cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
1170 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
1171 
1172 	/* Alloc NIX CQ HW context memory and config the base */
1173 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
1174 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
1175 	if (err)
1176 		goto free_mem;
1177 
1178 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
1179 	if (!pfvf->cq_bmap)
1180 		goto free_mem;
1181 
1182 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
1183 		    (u64)pfvf->cq_ctx->iova);
1184 
1185 	cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
1186 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
1187 
1188 	/* Initialize receive side scaling (RSS) */
1189 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
1190 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
1191 				 req->rss_grps, hwctx_size, req->way_mask);
1192 	if (err)
1193 		goto free_mem;
1194 
1195 	/* Alloc memory for CQINT's HW contexts */
1196 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1197 	qints = (cfg >> 24) & 0xFFF;
1198 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
1199 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
1200 	if (err)
1201 		goto free_mem;
1202 
1203 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
1204 		    (u64)pfvf->cq_ints_ctx->iova);
1205 
1206 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
1207 		    BIT_ULL(36) | req->way_mask << 20);
1208 
1209 	/* Alloc memory for QINT's HW contexts */
1210 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1211 	qints = (cfg >> 12) & 0xFFF;
1212 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
1213 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
1214 	if (err)
1215 		goto free_mem;
1216 
1217 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
1218 		    (u64)pfvf->nix_qints_ctx->iova);
1219 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
1220 		    BIT_ULL(36) | req->way_mask << 20);
1221 
1222 	/* Setup VLANX TPID's.
1223 	 * Use VLAN1 for 802.1Q
1224 	 * and VLAN0 for 802.1AD.
1225 	 */
1226 	cfg = (0x8100ULL << 16) | 0x88A8ULL;
1227 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
1228 
1229 	/* Enable LMTST for this NIX LF */
1230 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
1231 
1232 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
1233 	if (req->npa_func)
1234 		cfg = req->npa_func;
1235 	if (req->sso_func)
1236 		cfg |= (u64)req->sso_func << 16;
1237 
1238 	cfg |= (u64)req->xqe_sz << 33;
1239 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
1240 
1241 	/* Config Rx pkt length, csum checks and apad  enable / disable */
1242 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
1243 
1244 	/* Configure pkind for TX parse config */
1245 	cfg = NPC_TX_DEF_PKIND;
1246 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
1247 
1248 	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
1249 	err = nix_interface_init(rvu, pcifunc, intf, nixlf);
1250 	if (err)
1251 		goto free_mem;
1252 
1253 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
1254 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
1255 
1256 	/* Configure RX VTAG Type 7 (strip) for vf vlan */
1257 	rvu_write64(rvu, blkaddr,
1258 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
1259 		    VTAGSIZE_T4 | VTAG_STRIP);
1260 
1261 	goto exit;
1262 
1263 free_mem:
1264 	nix_ctx_free(rvu, pfvf);
1265 	rc = -ENOMEM;
1266 
1267 exit:
1268 	/* Set macaddr of this PF/VF */
1269 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
1270 
1271 	/* set SQB size info */
1272 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
1273 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
1274 	rsp->rx_chan_base = pfvf->rx_chan_base;
1275 	rsp->tx_chan_base = pfvf->tx_chan_base;
1276 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
1277 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
1278 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
1279 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
1280 	/* Get HW supported stat count */
1281 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1282 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
1283 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
1284 	/* Get count of CQ IRQs and error IRQs supported per LF */
1285 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
1286 	rsp->qints = ((cfg >> 12) & 0xFFF);
1287 	rsp->cints = ((cfg >> 24) & 0xFFF);
1288 	rsp->cgx_links = hw->cgx_links;
1289 	rsp->lbk_links = hw->lbk_links;
1290 	rsp->sdp_links = hw->sdp_links;
1291 
1292 	return rc;
1293 }
1294 
1295 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
1296 				 struct msg_rsp *rsp)
1297 {
1298 	struct rvu_hwinfo *hw = rvu->hw;
1299 	u16 pcifunc = req->hdr.pcifunc;
1300 	struct rvu_block *block;
1301 	int blkaddr, nixlf, err;
1302 	struct rvu_pfvf *pfvf;
1303 
1304 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1305 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1306 	if (!pfvf->nixlf || blkaddr < 0)
1307 		return NIX_AF_ERR_AF_LF_INVALID;
1308 
1309 	block = &hw->block[blkaddr];
1310 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
1311 	if (nixlf < 0)
1312 		return NIX_AF_ERR_AF_LF_INVALID;
1313 
1314 	if (req->flags & NIX_LF_DISABLE_FLOWS)
1315 		rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
1316 	else
1317 		rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
1318 
1319 	/* Free any tx vtag def entries used by this NIX LF */
1320 	if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
1321 		nix_free_tx_vtag_entries(rvu, pcifunc);
1322 
1323 	nix_interface_deinit(rvu, pcifunc, nixlf);
1324 
1325 	/* Reset this NIX LF */
1326 	err = rvu_lf_reset(rvu, block, nixlf);
1327 	if (err) {
1328 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
1329 			block->addr - BLKADDR_NIX0, nixlf);
1330 		return NIX_AF_ERR_LF_RESET;
1331 	}
1332 
1333 	nix_ctx_free(rvu, pfvf);
1334 
1335 	return 0;
1336 }
1337 
1338 int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
1339 					 struct nix_mark_format_cfg  *req,
1340 					 struct nix_mark_format_cfg_rsp *rsp)
1341 {
1342 	u16 pcifunc = req->hdr.pcifunc;
1343 	struct nix_hw *nix_hw;
1344 	struct rvu_pfvf *pfvf;
1345 	int blkaddr, rc;
1346 	u32 cfg;
1347 
1348 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1349 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1350 	if (!pfvf->nixlf || blkaddr < 0)
1351 		return NIX_AF_ERR_AF_LF_INVALID;
1352 
1353 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1354 	if (!nix_hw)
1355 		return -EINVAL;
1356 
1357 	cfg = (((u32)req->offset & 0x7) << 16) |
1358 	      (((u32)req->y_mask & 0xF) << 12) |
1359 	      (((u32)req->y_val & 0xF) << 8) |
1360 	      (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
1361 
1362 	rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
1363 	if (rc < 0) {
1364 		dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
1365 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1366 		return NIX_AF_ERR_MARK_CFG_FAIL;
1367 	}
1368 
1369 	rsp->mark_format_idx = rc;
1370 	return 0;
1371 }
1372 
1373 /* Disable shaping of pkts by a scheduler queue
1374  * at a given scheduler level.
1375  */
1376 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
1377 				 int lvl, int schq)
1378 {
1379 	u64  cir_reg = 0, pir_reg = 0;
1380 	u64  cfg;
1381 
1382 	switch (lvl) {
1383 	case NIX_TXSCH_LVL_TL1:
1384 		cir_reg = NIX_AF_TL1X_CIR(schq);
1385 		pir_reg = 0; /* PIR not available at TL1 */
1386 		break;
1387 	case NIX_TXSCH_LVL_TL2:
1388 		cir_reg = NIX_AF_TL2X_CIR(schq);
1389 		pir_reg = NIX_AF_TL2X_PIR(schq);
1390 		break;
1391 	case NIX_TXSCH_LVL_TL3:
1392 		cir_reg = NIX_AF_TL3X_CIR(schq);
1393 		pir_reg = NIX_AF_TL3X_PIR(schq);
1394 		break;
1395 	case NIX_TXSCH_LVL_TL4:
1396 		cir_reg = NIX_AF_TL4X_CIR(schq);
1397 		pir_reg = NIX_AF_TL4X_PIR(schq);
1398 		break;
1399 	}
1400 
1401 	if (!cir_reg)
1402 		return;
1403 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
1404 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
1405 
1406 	if (!pir_reg)
1407 		return;
1408 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
1409 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
1410 }
1411 
1412 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
1413 				 int lvl, int schq)
1414 {
1415 	struct rvu_hwinfo *hw = rvu->hw;
1416 	int link;
1417 
1418 	if (lvl >= hw->cap.nix_tx_aggr_lvl)
1419 		return;
1420 
1421 	/* Reset TL4's SDP link config */
1422 	if (lvl == NIX_TXSCH_LVL_TL4)
1423 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
1424 
1425 	if (lvl != NIX_TXSCH_LVL_TL2)
1426 		return;
1427 
1428 	/* Reset TL2's CGX or LBK link config */
1429 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
1430 		rvu_write64(rvu, blkaddr,
1431 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
1432 }
1433 
1434 static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
1435 {
1436 	struct rvu_hwinfo *hw = rvu->hw;
1437 	int pf = rvu_get_pf(pcifunc);
1438 	u8 cgx_id = 0, lmac_id = 0;
1439 
1440 	if (is_afvf(pcifunc)) {/* LBK links */
1441 		return hw->cgx_links;
1442 	} else if (is_pf_cgxmapped(rvu, pf)) {
1443 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1444 		return (cgx_id * hw->lmac_per_cgx) + lmac_id;
1445 	}
1446 
1447 	/* SDP link */
1448 	return hw->cgx_links + hw->lbk_links;
1449 }
1450 
1451 static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
1452 				 int link, int *start, int *end)
1453 {
1454 	struct rvu_hwinfo *hw = rvu->hw;
1455 	int pf = rvu_get_pf(pcifunc);
1456 
1457 	if (is_afvf(pcifunc)) { /* LBK links */
1458 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1459 		*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
1460 	} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
1461 		*start = hw->cap.nix_txsch_per_cgx_lmac * link;
1462 		*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
1463 	} else { /* SDP link */
1464 		*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
1465 			(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
1466 		*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
1467 	}
1468 }
1469 
1470 static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
1471 				      struct nix_hw *nix_hw,
1472 				      struct nix_txsch_alloc_req *req)
1473 {
1474 	struct rvu_hwinfo *hw = rvu->hw;
1475 	int schq, req_schq, free_cnt;
1476 	struct nix_txsch *txsch;
1477 	int link, start, end;
1478 
1479 	txsch = &nix_hw->txsch[lvl];
1480 	req_schq = req->schq_contig[lvl] + req->schq[lvl];
1481 
1482 	if (!req_schq)
1483 		return 0;
1484 
1485 	link = nix_get_tx_link(rvu, pcifunc);
1486 
1487 	/* For traffic aggregating scheduler level, one queue is enough */
1488 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1489 		if (req_schq != 1)
1490 			return NIX_AF_ERR_TLX_ALLOC_FAIL;
1491 		return 0;
1492 	}
1493 
1494 	/* Get free SCHQ count and check if request can be accomodated */
1495 	if (hw->cap.nix_fixed_txschq_mapping) {
1496 		nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1497 		schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
1498 		if (end <= txsch->schq.max && schq < end &&
1499 		    !test_bit(schq, txsch->schq.bmap))
1500 			free_cnt = 1;
1501 		else
1502 			free_cnt = 0;
1503 	} else {
1504 		free_cnt = rvu_rsrc_free_count(&txsch->schq);
1505 	}
1506 
1507 	if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1508 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1509 
1510 	/* If contiguous queues are needed, check for availability */
1511 	if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
1512 	    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1513 		return NIX_AF_ERR_TLX_ALLOC_FAIL;
1514 
1515 	return 0;
1516 }
1517 
1518 static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
1519 			    struct nix_txsch_alloc_rsp *rsp,
1520 			    int lvl, int start, int end)
1521 {
1522 	struct rvu_hwinfo *hw = rvu->hw;
1523 	u16 pcifunc = rsp->hdr.pcifunc;
1524 	int idx, schq;
1525 
1526 	/* For traffic aggregating levels, queue alloc is based
1527 	 * on transmit link to which PF_FUNC is mapped to.
1528 	 */
1529 	if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1530 		/* A single TL queue is allocated */
1531 		if (rsp->schq_contig[lvl]) {
1532 			rsp->schq_contig[lvl] = 1;
1533 			rsp->schq_contig_list[lvl][0] = start;
1534 		}
1535 
1536 		/* Both contig and non-contig reqs doesn't make sense here */
1537 		if (rsp->schq_contig[lvl])
1538 			rsp->schq[lvl] = 0;
1539 
1540 		if (rsp->schq[lvl]) {
1541 			rsp->schq[lvl] = 1;
1542 			rsp->schq_list[lvl][0] = start;
1543 		}
1544 		return;
1545 	}
1546 
1547 	/* Adjust the queue request count if HW supports
1548 	 * only one queue per level configuration.
1549 	 */
1550 	if (hw->cap.nix_fixed_txschq_mapping) {
1551 		idx = pcifunc & RVU_PFVF_FUNC_MASK;
1552 		schq = start + idx;
1553 		if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
1554 			rsp->schq_contig[lvl] = 0;
1555 			rsp->schq[lvl] = 0;
1556 			return;
1557 		}
1558 
1559 		if (rsp->schq_contig[lvl]) {
1560 			rsp->schq_contig[lvl] = 1;
1561 			set_bit(schq, txsch->schq.bmap);
1562 			rsp->schq_contig_list[lvl][0] = schq;
1563 			rsp->schq[lvl] = 0;
1564 		} else if (rsp->schq[lvl]) {
1565 			rsp->schq[lvl] = 1;
1566 			set_bit(schq, txsch->schq.bmap);
1567 			rsp->schq_list[lvl][0] = schq;
1568 		}
1569 		return;
1570 	}
1571 
1572 	/* Allocate contiguous queue indices requesty first */
1573 	if (rsp->schq_contig[lvl]) {
1574 		schq = bitmap_find_next_zero_area(txsch->schq.bmap,
1575 						  txsch->schq.max, start,
1576 						  rsp->schq_contig[lvl], 0);
1577 		if (schq >= end)
1578 			rsp->schq_contig[lvl] = 0;
1579 		for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
1580 			set_bit(schq, txsch->schq.bmap);
1581 			rsp->schq_contig_list[lvl][idx] = schq;
1582 			schq++;
1583 		}
1584 	}
1585 
1586 	/* Allocate non-contiguous queue indices */
1587 	if (rsp->schq[lvl]) {
1588 		idx = 0;
1589 		for (schq = start; schq < end; schq++) {
1590 			if (!test_bit(schq, txsch->schq.bmap)) {
1591 				set_bit(schq, txsch->schq.bmap);
1592 				rsp->schq_list[lvl][idx++] = schq;
1593 			}
1594 			if (idx == rsp->schq[lvl])
1595 				break;
1596 		}
1597 		/* Update how many were allocated */
1598 		rsp->schq[lvl] = idx;
1599 	}
1600 }
1601 
1602 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
1603 				     struct nix_txsch_alloc_req *req,
1604 				     struct nix_txsch_alloc_rsp *rsp)
1605 {
1606 	struct rvu_hwinfo *hw = rvu->hw;
1607 	u16 pcifunc = req->hdr.pcifunc;
1608 	int link, blkaddr, rc = 0;
1609 	int lvl, idx, start, end;
1610 	struct nix_txsch *txsch;
1611 	struct rvu_pfvf *pfvf;
1612 	struct nix_hw *nix_hw;
1613 	u32 *pfvf_map;
1614 	u16 schq;
1615 
1616 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1617 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1618 	if (!pfvf->nixlf || blkaddr < 0)
1619 		return NIX_AF_ERR_AF_LF_INVALID;
1620 
1621 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1622 	if (!nix_hw)
1623 		return -EINVAL;
1624 
1625 	mutex_lock(&rvu->rsrc_lock);
1626 
1627 	/* Check if request is valid as per HW capabilities
1628 	 * and can be accomodated.
1629 	 */
1630 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1631 		rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
1632 		if (rc)
1633 			goto err;
1634 	}
1635 
1636 	/* Allocate requested Tx scheduler queues */
1637 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1638 		txsch = &nix_hw->txsch[lvl];
1639 		pfvf_map = txsch->pfvf_map;
1640 
1641 		if (!req->schq[lvl] && !req->schq_contig[lvl])
1642 			continue;
1643 
1644 		rsp->schq[lvl] = req->schq[lvl];
1645 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
1646 
1647 		link = nix_get_tx_link(rvu, pcifunc);
1648 
1649 		if (lvl >= hw->cap.nix_tx_aggr_lvl) {
1650 			start = link;
1651 			end = link;
1652 		} else if (hw->cap.nix_fixed_txschq_mapping) {
1653 			nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
1654 		} else {
1655 			start = 0;
1656 			end = txsch->schq.max;
1657 		}
1658 
1659 		nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
1660 
1661 		/* Reset queue config */
1662 		for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1663 			schq = rsp->schq_contig_list[lvl][idx];
1664 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1665 			    NIX_TXSCHQ_CFG_DONE))
1666 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1667 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1668 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1669 		}
1670 
1671 		for (idx = 0; idx < req->schq[lvl]; idx++) {
1672 			schq = rsp->schq_list[lvl][idx];
1673 			if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
1674 			    NIX_TXSCHQ_CFG_DONE))
1675 				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
1676 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1677 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1678 		}
1679 	}
1680 
1681 	rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
1682 	rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
1683 	rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
1684 				       NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
1685 				       NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
1686 	goto exit;
1687 err:
1688 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1689 exit:
1690 	mutex_unlock(&rvu->rsrc_lock);
1691 	return rc;
1692 }
1693 
1694 static void nix_smq_flush(struct rvu *rvu, int blkaddr,
1695 			  int smq, u16 pcifunc, int nixlf)
1696 {
1697 	int pf = rvu_get_pf(pcifunc);
1698 	u8 cgx_id = 0, lmac_id = 0;
1699 	int err, restore_tx_en = 0;
1700 	u64 cfg;
1701 
1702 	/* enable cgx tx if disabled */
1703 	if (is_pf_cgxmapped(rvu, pf)) {
1704 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1705 		restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
1706 						    lmac_id, true);
1707 	}
1708 
1709 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
1710 	/* Do SMQ flush and set enqueue xoff */
1711 	cfg |= BIT_ULL(50) | BIT_ULL(49);
1712 	rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
1713 
1714 	/* Disable backpressure from physical link,
1715 	 * otherwise SMQ flush may stall.
1716 	 */
1717 	rvu_cgx_enadis_rx_bp(rvu, pf, false);
1718 
1719 	/* Wait for flush to complete */
1720 	err = rvu_poll_reg(rvu, blkaddr,
1721 			   NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
1722 	if (err)
1723 		dev_err(rvu->dev,
1724 			"NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
1725 
1726 	rvu_cgx_enadis_rx_bp(rvu, pf, true);
1727 	/* restore cgx tx state */
1728 	if (restore_tx_en)
1729 		cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
1730 }
1731 
1732 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1733 {
1734 	int blkaddr, nixlf, lvl, schq, err;
1735 	struct rvu_hwinfo *hw = rvu->hw;
1736 	struct nix_txsch *txsch;
1737 	struct nix_hw *nix_hw;
1738 
1739 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1740 	if (blkaddr < 0)
1741 		return NIX_AF_ERR_AF_LF_INVALID;
1742 
1743 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1744 	if (!nix_hw)
1745 		return -EINVAL;
1746 
1747 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1748 	if (nixlf < 0)
1749 		return NIX_AF_ERR_AF_LF_INVALID;
1750 
1751 	/* Disable TL2/3 queue links before SMQ flush*/
1752 	mutex_lock(&rvu->rsrc_lock);
1753 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1754 		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1755 			continue;
1756 
1757 		txsch = &nix_hw->txsch[lvl];
1758 		for (schq = 0; schq < txsch->schq.max; schq++) {
1759 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1760 				continue;
1761 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1762 		}
1763 	}
1764 
1765 	/* Flush SMQs */
1766 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1767 	for (schq = 0; schq < txsch->schq.max; schq++) {
1768 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1769 			continue;
1770 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1771 	}
1772 
1773 	/* Now free scheduler queues to free pool */
1774 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1775 		 /* TLs above aggregation level are shared across all PF
1776 		  * and it's VFs, hence skip freeing them.
1777 		  */
1778 		if (lvl >= hw->cap.nix_tx_aggr_lvl)
1779 			continue;
1780 
1781 		txsch = &nix_hw->txsch[lvl];
1782 		for (schq = 0; schq < txsch->schq.max; schq++) {
1783 			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1784 				continue;
1785 			rvu_free_rsrc(&txsch->schq, schq);
1786 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1787 		}
1788 	}
1789 	mutex_unlock(&rvu->rsrc_lock);
1790 
1791 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1792 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1793 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1794 	if (err)
1795 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1796 
1797 	return 0;
1798 }
1799 
1800 static int nix_txschq_free_one(struct rvu *rvu,
1801 			       struct nix_txsch_free_req *req)
1802 {
1803 	struct rvu_hwinfo *hw = rvu->hw;
1804 	u16 pcifunc = req->hdr.pcifunc;
1805 	int lvl, schq, nixlf, blkaddr;
1806 	struct nix_txsch *txsch;
1807 	struct nix_hw *nix_hw;
1808 	u32 *pfvf_map;
1809 
1810 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1811 	if (blkaddr < 0)
1812 		return NIX_AF_ERR_AF_LF_INVALID;
1813 
1814 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1815 	if (!nix_hw)
1816 		return -EINVAL;
1817 
1818 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1819 	if (nixlf < 0)
1820 		return NIX_AF_ERR_AF_LF_INVALID;
1821 
1822 	lvl = req->schq_lvl;
1823 	schq = req->schq;
1824 	txsch = &nix_hw->txsch[lvl];
1825 
1826 	if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
1827 		return 0;
1828 
1829 	pfvf_map = txsch->pfvf_map;
1830 	mutex_lock(&rvu->rsrc_lock);
1831 
1832 	if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
1833 		mutex_unlock(&rvu->rsrc_lock);
1834 		goto err;
1835 	}
1836 
1837 	/* Flush if it is a SMQ. Onus of disabling
1838 	 * TL2/3 queue links before SMQ flush is on user
1839 	 */
1840 	if (lvl == NIX_TXSCH_LVL_SMQ)
1841 		nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
1842 
1843 	/* Free the resource */
1844 	rvu_free_rsrc(&txsch->schq, schq);
1845 	txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
1846 	mutex_unlock(&rvu->rsrc_lock);
1847 	return 0;
1848 err:
1849 	return NIX_AF_ERR_TLX_INVALID;
1850 }
1851 
1852 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1853 				    struct nix_txsch_free_req *req,
1854 				    struct msg_rsp *rsp)
1855 {
1856 	if (req->flags & TXSCHQ_FREE_ALL)
1857 		return nix_txschq_free(rvu, req->hdr.pcifunc);
1858 	else
1859 		return nix_txschq_free_one(rvu, req);
1860 }
1861 
1862 static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1863 				      int lvl, u64 reg, u64 regval)
1864 {
1865 	u64 regbase = reg & 0xFFFF;
1866 	u16 schq, parent;
1867 
1868 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1869 		return false;
1870 
1871 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1872 	/* Check if this schq belongs to this PF/VF or not */
1873 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1874 		return false;
1875 
1876 	parent = (regval >> 16) & 0x1FF;
1877 	/* Validate MDQ's TL4 parent */
1878 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
1879 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1880 		return false;
1881 
1882 	/* Validate TL4's TL3 parent */
1883 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
1884 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1885 		return false;
1886 
1887 	/* Validate TL3's TL2 parent */
1888 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
1889 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1890 		return false;
1891 
1892 	/* Validate TL2's TL1 parent */
1893 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
1894 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1895 		return false;
1896 
1897 	return true;
1898 }
1899 
1900 static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
1901 {
1902 	u64 regbase;
1903 
1904 	if (hw->cap.nix_shaping)
1905 		return true;
1906 
1907 	/* If shaping and coloring is not supported, then
1908 	 * *_CIR and *_PIR registers should not be configured.
1909 	 */
1910 	regbase = reg & 0xFFFF;
1911 
1912 	switch (lvl) {
1913 	case NIX_TXSCH_LVL_TL1:
1914 		if (regbase == NIX_AF_TL1X_CIR(0))
1915 			return false;
1916 		break;
1917 	case NIX_TXSCH_LVL_TL2:
1918 		if (regbase == NIX_AF_TL2X_CIR(0) ||
1919 		    regbase == NIX_AF_TL2X_PIR(0))
1920 			return false;
1921 		break;
1922 	case NIX_TXSCH_LVL_TL3:
1923 		if (regbase == NIX_AF_TL3X_CIR(0) ||
1924 		    regbase == NIX_AF_TL3X_PIR(0))
1925 			return false;
1926 		break;
1927 	case NIX_TXSCH_LVL_TL4:
1928 		if (regbase == NIX_AF_TL4X_CIR(0) ||
1929 		    regbase == NIX_AF_TL4X_PIR(0))
1930 			return false;
1931 		break;
1932 	}
1933 	return true;
1934 }
1935 
1936 static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
1937 				u16 pcifunc, int blkaddr)
1938 {
1939 	u32 *pfvf_map;
1940 	int schq;
1941 
1942 	schq = nix_get_tx_link(rvu, pcifunc);
1943 	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
1944 	/* Skip if PF has already done the config */
1945 	if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
1946 		return;
1947 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
1948 		    (TXSCH_TL1_DFLT_RR_PRIO << 1));
1949 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
1950 		    TXSCH_TL1_DFLT_RR_QTM);
1951 	rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
1952 	pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
1953 }
1954 
1955 static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr,
1956 			       u16 pcifunc, struct nix_txsch *txsch)
1957 {
1958 	struct rvu_hwinfo *hw = rvu->hw;
1959 	int lbk_link_start, lbk_links;
1960 	u8 pf = rvu_get_pf(pcifunc);
1961 	int schq;
1962 
1963 	if (!is_pf_cgxmapped(rvu, pf))
1964 		return;
1965 
1966 	lbk_link_start = hw->cgx_links;
1967 
1968 	for (schq = 0; schq < txsch->schq.max; schq++) {
1969 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
1970 			continue;
1971 		/* Enable all LBK links with channel 63 by default so that
1972 		 * packets can be sent to LBK with a NPC TX MCAM rule
1973 		 */
1974 		lbk_links = hw->lbk_links;
1975 		while (lbk_links--)
1976 			rvu_write64(rvu, blkaddr,
1977 				    NIX_AF_TL3_TL2X_LINKX_CFG(schq,
1978 							      lbk_link_start +
1979 							      lbk_links),
1980 				    BIT_ULL(12) | RVU_SWITCH_LBK_CHAN);
1981 	}
1982 }
1983 
1984 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1985 				    struct nix_txschq_config *req,
1986 				    struct msg_rsp *rsp)
1987 {
1988 	struct rvu_hwinfo *hw = rvu->hw;
1989 	u16 pcifunc = req->hdr.pcifunc;
1990 	u64 reg, regval, schq_regbase;
1991 	struct nix_txsch *txsch;
1992 	struct nix_hw *nix_hw;
1993 	int blkaddr, idx, err;
1994 	int nixlf, schq;
1995 	u32 *pfvf_map;
1996 
1997 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1998 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
1999 		return NIX_AF_INVAL_TXSCHQ_CFG;
2000 
2001 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2002 	if (err)
2003 		return err;
2004 
2005 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2006 	if (!nix_hw)
2007 		return -EINVAL;
2008 
2009 	txsch = &nix_hw->txsch[req->lvl];
2010 	pfvf_map = txsch->pfvf_map;
2011 
2012 	if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
2013 	    pcifunc & RVU_PFVF_FUNC_MASK) {
2014 		mutex_lock(&rvu->rsrc_lock);
2015 		if (req->lvl == NIX_TXSCH_LVL_TL1)
2016 			nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
2017 		mutex_unlock(&rvu->rsrc_lock);
2018 		return 0;
2019 	}
2020 
2021 	for (idx = 0; idx < req->num_regs; idx++) {
2022 		reg = req->reg[idx];
2023 		regval = req->regval[idx];
2024 		schq_regbase = reg & 0xFFFF;
2025 
2026 		if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
2027 					       txsch->lvl, reg, regval))
2028 			return NIX_AF_INVAL_TXSCHQ_CFG;
2029 
2030 		/* Check if shaping and coloring is supported */
2031 		if (!is_txschq_shaping_valid(hw, req->lvl, reg))
2032 			continue;
2033 
2034 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
2035 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
2036 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
2037 					   pcifunc, 0);
2038 			regval &= ~(0x7FULL << 24);
2039 			regval |= ((u64)nixlf << 24);
2040 		}
2041 
2042 		/* Clear 'BP_ENA' config, if it's not allowed */
2043 		if (!hw->cap.nix_tx_link_bp) {
2044 			if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
2045 			    (schq_regbase & 0xFF00) ==
2046 			    NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
2047 				regval &= ~BIT_ULL(13);
2048 		}
2049 
2050 		/* Mark config as done for TL1 by PF */
2051 		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
2052 		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
2053 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2054 			mutex_lock(&rvu->rsrc_lock);
2055 			pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
2056 							NIX_TXSCHQ_CFG_DONE);
2057 			mutex_unlock(&rvu->rsrc_lock);
2058 		}
2059 
2060 		/* SMQ flush is special hence split register writes such
2061 		 * that flush first and write rest of the bits later.
2062 		 */
2063 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
2064 		    (regval & BIT_ULL(49))) {
2065 			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
2066 			nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
2067 			regval &= ~BIT_ULL(49);
2068 		}
2069 		rvu_write64(rvu, blkaddr, reg, regval);
2070 	}
2071 
2072 	rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc,
2073 			   &nix_hw->txsch[NIX_TXSCH_LVL_TL2]);
2074 
2075 	return 0;
2076 }
2077 
2078 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
2079 			   struct nix_vtag_config *req)
2080 {
2081 	u64 regval = req->vtag_size;
2082 
2083 	if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
2084 	    req->vtag_size > VTAGSIZE_T8)
2085 		return -EINVAL;
2086 
2087 	/* RX VTAG Type 7 reserved for vf vlan */
2088 	if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
2089 		return NIX_AF_ERR_RX_VTAG_INUSE;
2090 
2091 	if (req->rx.capture_vtag)
2092 		regval |= BIT_ULL(5);
2093 	if (req->rx.strip_vtag)
2094 		regval |= BIT_ULL(4);
2095 
2096 	rvu_write64(rvu, blkaddr,
2097 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
2098 	return 0;
2099 }
2100 
2101 static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
2102 			    u16 pcifunc, int index)
2103 {
2104 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2105 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2106 
2107 	if (vlan->entry2pfvf_map[index] != pcifunc)
2108 		return NIX_AF_ERR_PARAM;
2109 
2110 	rvu_write64(rvu, blkaddr,
2111 		    NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
2112 	rvu_write64(rvu, blkaddr,
2113 		    NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
2114 
2115 	vlan->entry2pfvf_map[index] = 0;
2116 	rvu_free_rsrc(&vlan->rsrc, index);
2117 
2118 	return 0;
2119 }
2120 
2121 static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
2122 {
2123 	struct nix_txvlan *vlan;
2124 	struct nix_hw *nix_hw;
2125 	int index, blkaddr;
2126 
2127 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2128 	if (blkaddr < 0)
2129 		return;
2130 
2131 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
2132 	vlan = &nix_hw->txvlan;
2133 
2134 	mutex_lock(&vlan->rsrc_lock);
2135 	/* Scan all the entries and free the ones mapped to 'pcifunc' */
2136 	for (index = 0; index < vlan->rsrc.max; index++) {
2137 		if (vlan->entry2pfvf_map[index] == pcifunc)
2138 			nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
2139 	}
2140 	mutex_unlock(&vlan->rsrc_lock);
2141 }
2142 
2143 static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
2144 			     u64 vtag, u8 size)
2145 {
2146 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2147 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2148 	u64 regval;
2149 	int index;
2150 
2151 	mutex_lock(&vlan->rsrc_lock);
2152 
2153 	index = rvu_alloc_rsrc(&vlan->rsrc);
2154 	if (index < 0) {
2155 		mutex_unlock(&vlan->rsrc_lock);
2156 		return index;
2157 	}
2158 
2159 	mutex_unlock(&vlan->rsrc_lock);
2160 
2161 	regval = size ? vtag : vtag << 32;
2162 
2163 	rvu_write64(rvu, blkaddr,
2164 		    NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
2165 	rvu_write64(rvu, blkaddr,
2166 		    NIX_AF_TX_VTAG_DEFX_CTL(index), size);
2167 
2168 	return index;
2169 }
2170 
2171 static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
2172 			     struct nix_vtag_config *req)
2173 {
2174 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2175 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2176 	u16 pcifunc = req->hdr.pcifunc;
2177 	int idx0 = req->tx.vtag0_idx;
2178 	int idx1 = req->tx.vtag1_idx;
2179 	int err = 0;
2180 
2181 	if (req->tx.free_vtag0 && req->tx.free_vtag1)
2182 		if (vlan->entry2pfvf_map[idx0] != pcifunc ||
2183 		    vlan->entry2pfvf_map[idx1] != pcifunc)
2184 			return NIX_AF_ERR_PARAM;
2185 
2186 	mutex_lock(&vlan->rsrc_lock);
2187 
2188 	if (req->tx.free_vtag0) {
2189 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
2190 		if (err)
2191 			goto exit;
2192 	}
2193 
2194 	if (req->tx.free_vtag1)
2195 		err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
2196 
2197 exit:
2198 	mutex_unlock(&vlan->rsrc_lock);
2199 	return err;
2200 }
2201 
2202 static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
2203 			   struct nix_vtag_config *req,
2204 			   struct nix_vtag_config_rsp *rsp)
2205 {
2206 	struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
2207 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2208 	u16 pcifunc = req->hdr.pcifunc;
2209 
2210 	if (req->tx.cfg_vtag0) {
2211 		rsp->vtag0_idx =
2212 			nix_tx_vtag_alloc(rvu, blkaddr,
2213 					  req->tx.vtag0, req->vtag_size);
2214 
2215 		if (rsp->vtag0_idx < 0)
2216 			return NIX_AF_ERR_TX_VTAG_NOSPC;
2217 
2218 		vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
2219 	}
2220 
2221 	if (req->tx.cfg_vtag1) {
2222 		rsp->vtag1_idx =
2223 			nix_tx_vtag_alloc(rvu, blkaddr,
2224 					  req->tx.vtag1, req->vtag_size);
2225 
2226 		if (rsp->vtag1_idx < 0)
2227 			goto err_free;
2228 
2229 		vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
2230 	}
2231 
2232 	return 0;
2233 
2234 err_free:
2235 	if (req->tx.cfg_vtag0)
2236 		nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
2237 
2238 	return NIX_AF_ERR_TX_VTAG_NOSPC;
2239 }
2240 
2241 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
2242 				  struct nix_vtag_config *req,
2243 				  struct nix_vtag_config_rsp *rsp)
2244 {
2245 	u16 pcifunc = req->hdr.pcifunc;
2246 	int blkaddr, nixlf, err;
2247 
2248 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2249 	if (err)
2250 		return err;
2251 
2252 	if (req->cfg_type) {
2253 		/* rx vtag configuration */
2254 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
2255 		if (err)
2256 			return NIX_AF_ERR_PARAM;
2257 	} else {
2258 		/* tx vtag configuration */
2259 		if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
2260 		    (req->tx.free_vtag0 || req->tx.free_vtag1))
2261 			return NIX_AF_ERR_PARAM;
2262 
2263 		if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
2264 			return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
2265 
2266 		if (req->tx.free_vtag0 || req->tx.free_vtag1)
2267 			return nix_tx_vtag_decfg(rvu, blkaddr, req);
2268 	}
2269 
2270 	return 0;
2271 }
2272 
2273 static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
2274 			     int mce, u8 op, u16 pcifunc, int next, bool eol)
2275 {
2276 	struct nix_aq_enq_req aq_req;
2277 	int err;
2278 
2279 	aq_req.hdr.pcifunc = 0;
2280 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
2281 	aq_req.op = op;
2282 	aq_req.qidx = mce;
2283 
2284 	/* Use RSS with RSS index 0 */
2285 	aq_req.mce.op = 1;
2286 	aq_req.mce.index = 0;
2287 	aq_req.mce.eol = eol;
2288 	aq_req.mce.pf_func = pcifunc;
2289 	aq_req.mce.next = next;
2290 
2291 	/* All fields valid */
2292 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
2293 
2294 	err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
2295 	if (err) {
2296 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
2297 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
2298 		return err;
2299 	}
2300 	return 0;
2301 }
2302 
2303 static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
2304 				     u16 pcifunc, bool add)
2305 {
2306 	struct mce *mce, *tail = NULL;
2307 	bool delete = false;
2308 
2309 	/* Scan through the current list */
2310 	hlist_for_each_entry(mce, &mce_list->head, node) {
2311 		/* If already exists, then delete */
2312 		if (mce->pcifunc == pcifunc && !add) {
2313 			delete = true;
2314 			break;
2315 		} else if (mce->pcifunc == pcifunc && add) {
2316 			/* entry already exists */
2317 			return 0;
2318 		}
2319 		tail = mce;
2320 	}
2321 
2322 	if (delete) {
2323 		hlist_del(&mce->node);
2324 		kfree(mce);
2325 		mce_list->count--;
2326 		return 0;
2327 	}
2328 
2329 	if (!add)
2330 		return 0;
2331 
2332 	/* Add a new one to the list, at the tail */
2333 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
2334 	if (!mce)
2335 		return -ENOMEM;
2336 	mce->pcifunc = pcifunc;
2337 	if (!tail)
2338 		hlist_add_head(&mce->node, &mce_list->head);
2339 	else
2340 		hlist_add_behind(&mce->node, &tail->node);
2341 	mce_list->count++;
2342 	return 0;
2343 }
2344 
2345 int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
2346 			struct nix_mce_list *mce_list,
2347 			int mce_idx, int mcam_index, bool add)
2348 {
2349 	int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
2350 	struct npc_mcam *mcam = &rvu->hw->mcam;
2351 	struct nix_mcast *mcast;
2352 	struct nix_hw *nix_hw;
2353 	struct mce *mce;
2354 
2355 	if (!mce_list)
2356 		return -EINVAL;
2357 
2358 	/* Get this PF/VF func's MCE index */
2359 	idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
2360 
2361 	if (idx > (mce_idx + mce_list->max)) {
2362 		dev_err(rvu->dev,
2363 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
2364 			__func__, idx, mce_list->max,
2365 			pcifunc >> RVU_PFVF_PF_SHIFT);
2366 		return -EINVAL;
2367 	}
2368 
2369 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
2370 	if (err)
2371 		return err;
2372 
2373 	mcast = &nix_hw->mcast;
2374 	mutex_lock(&mcast->mce_lock);
2375 
2376 	err = nix_update_mce_list_entry(mce_list, pcifunc, add);
2377 	if (err)
2378 		goto end;
2379 
2380 	/* Disable MCAM entry in NPC */
2381 	if (!mce_list->count) {
2382 		npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
2383 		npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
2384 		goto end;
2385 	}
2386 
2387 	/* Dump the updated list to HW */
2388 	idx = mce_idx;
2389 	last_idx = idx + mce_list->count - 1;
2390 	hlist_for_each_entry(mce, &mce_list->head, node) {
2391 		if (idx > last_idx)
2392 			break;
2393 
2394 		next_idx = idx + 1;
2395 		/* EOL should be set in last MCE */
2396 		err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
2397 					mce->pcifunc, next_idx,
2398 					(next_idx > last_idx) ? true : false);
2399 		if (err)
2400 			goto end;
2401 		idx++;
2402 	}
2403 
2404 end:
2405 	mutex_unlock(&mcast->mce_lock);
2406 	return err;
2407 }
2408 
2409 void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
2410 		      struct nix_mce_list **mce_list, int *mce_idx)
2411 {
2412 	struct rvu_hwinfo *hw = rvu->hw;
2413 	struct rvu_pfvf *pfvf;
2414 
2415 	if (!hw->cap.nix_rx_multicast ||
2416 	    !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
2417 		*mce_list = NULL;
2418 		*mce_idx = 0;
2419 		return;
2420 	}
2421 
2422 	/* Get this PF/VF func's MCE index */
2423 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
2424 
2425 	if (type == NIXLF_BCAST_ENTRY) {
2426 		*mce_list = &pfvf->bcast_mce_list;
2427 		*mce_idx = pfvf->bcast_mce_idx;
2428 	} else if (type == NIXLF_ALLMULTI_ENTRY) {
2429 		*mce_list = &pfvf->mcast_mce_list;
2430 		*mce_idx = pfvf->mcast_mce_idx;
2431 	} else if (type == NIXLF_PROMISC_ENTRY) {
2432 		*mce_list = &pfvf->promisc_mce_list;
2433 		*mce_idx = pfvf->promisc_mce_idx;
2434 	}  else {
2435 		*mce_list = NULL;
2436 		*mce_idx = 0;
2437 	}
2438 }
2439 
2440 static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
2441 			       int type, bool add)
2442 {
2443 	int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
2444 	struct npc_mcam *mcam = &rvu->hw->mcam;
2445 	struct rvu_hwinfo *hw = rvu->hw;
2446 	struct nix_mce_list *mce_list;
2447 
2448 	/* skip multicast pkt replication for AF's VFs */
2449 	if (is_afvf(pcifunc))
2450 		return 0;
2451 
2452 	if (!hw->cap.nix_rx_multicast)
2453 		return 0;
2454 
2455 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2456 	if (blkaddr < 0)
2457 		return -EINVAL;
2458 
2459 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2460 	if (nixlf < 0)
2461 		return -EINVAL;
2462 
2463 	nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
2464 
2465 	mcam_index = npc_get_nixlf_mcam_index(mcam,
2466 					      pcifunc & ~RVU_PFVF_FUNC_MASK,
2467 					      nixlf, type);
2468 	err = nix_update_mce_list(rvu, pcifunc, mce_list,
2469 				  mce_idx, mcam_index, add);
2470 	return err;
2471 }
2472 
2473 static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
2474 {
2475 	struct nix_mcast *mcast = &nix_hw->mcast;
2476 	int err, pf, numvfs, idx;
2477 	struct rvu_pfvf *pfvf;
2478 	u16 pcifunc;
2479 	u64 cfg;
2480 
2481 	/* Skip PF0 (i.e AF) */
2482 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
2483 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
2484 		/* If PF is not enabled, nothing to do */
2485 		if (!((cfg >> 20) & 0x01))
2486 			continue;
2487 		/* Get numVFs attached to this PF */
2488 		numvfs = (cfg >> 12) & 0xFF;
2489 
2490 		pfvf = &rvu->pf[pf];
2491 
2492 		/* This NIX0/1 block mapped to PF ? */
2493 		if (pfvf->nix_blkaddr != nix_hw->blkaddr)
2494 			continue;
2495 
2496 		/* save start idx of broadcast mce list */
2497 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2498 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
2499 
2500 		/* save start idx of multicast mce list */
2501 		pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2502 		nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
2503 
2504 		/* save the start idx of promisc mce list */
2505 		pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
2506 		nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
2507 
2508 		for (idx = 0; idx < (numvfs + 1); idx++) {
2509 			/* idx-0 is for PF, followed by VFs */
2510 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
2511 			pcifunc |= idx;
2512 			/* Add dummy entries now, so that we don't have to check
2513 			 * for whether AQ_OP should be INIT/WRITE later on.
2514 			 * Will be updated when a NIXLF is attached/detached to
2515 			 * these PF/VFs.
2516 			 */
2517 			err = nix_blk_setup_mce(rvu, nix_hw,
2518 						pfvf->bcast_mce_idx + idx,
2519 						NIX_AQ_INSTOP_INIT,
2520 						pcifunc, 0, true);
2521 			if (err)
2522 				return err;
2523 
2524 			/* add dummy entries to multicast mce list */
2525 			err = nix_blk_setup_mce(rvu, nix_hw,
2526 						pfvf->mcast_mce_idx + idx,
2527 						NIX_AQ_INSTOP_INIT,
2528 						pcifunc, 0, true);
2529 			if (err)
2530 				return err;
2531 
2532 			/* add dummy entries to promisc mce list */
2533 			err = nix_blk_setup_mce(rvu, nix_hw,
2534 						pfvf->promisc_mce_idx + idx,
2535 						NIX_AQ_INSTOP_INIT,
2536 						pcifunc, 0, true);
2537 			if (err)
2538 				return err;
2539 		}
2540 	}
2541 	return 0;
2542 }
2543 
2544 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2545 {
2546 	struct nix_mcast *mcast = &nix_hw->mcast;
2547 	struct rvu_hwinfo *hw = rvu->hw;
2548 	int err, size;
2549 
2550 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
2551 	size = (1ULL << size);
2552 
2553 	/* Alloc memory for multicast/mirror replication entries */
2554 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
2555 			 (256UL << MC_TBL_SIZE), size);
2556 	if (err)
2557 		return -ENOMEM;
2558 
2559 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
2560 		    (u64)mcast->mce_ctx->iova);
2561 
2562 	/* Set max list length equal to max no of VFs per PF  + PF itself */
2563 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
2564 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
2565 
2566 	/* Alloc memory for multicast replication buffers */
2567 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
2568 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
2569 			 (8UL << MC_BUF_CNT), size);
2570 	if (err)
2571 		return -ENOMEM;
2572 
2573 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
2574 		    (u64)mcast->mcast_buf->iova);
2575 
2576 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
2577 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
2578 
2579 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
2580 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
2581 		    BIT_ULL(20) | MC_BUF_CNT);
2582 
2583 	mutex_init(&mcast->mce_lock);
2584 
2585 	return nix_setup_mce_tables(rvu, nix_hw);
2586 }
2587 
2588 static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
2589 {
2590 	struct nix_txvlan *vlan = &nix_hw->txvlan;
2591 	int err;
2592 
2593 	/* Allocate resource bimap for tx vtag def registers*/
2594 	vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
2595 	err = rvu_alloc_bitmap(&vlan->rsrc);
2596 	if (err)
2597 		return -ENOMEM;
2598 
2599 	/* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
2600 	vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
2601 					    sizeof(u16), GFP_KERNEL);
2602 	if (!vlan->entry2pfvf_map)
2603 		goto free_mem;
2604 
2605 	mutex_init(&vlan->rsrc_lock);
2606 	return 0;
2607 
2608 free_mem:
2609 	kfree(vlan->rsrc.bmap);
2610 	return -ENOMEM;
2611 }
2612 
2613 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
2614 {
2615 	struct nix_txsch *txsch;
2616 	int err, lvl, schq;
2617 	u64 cfg, reg;
2618 
2619 	/* Get scheduler queue count of each type and alloc
2620 	 * bitmap for each for alloc/free/attach operations.
2621 	 */
2622 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2623 		txsch = &nix_hw->txsch[lvl];
2624 		txsch->lvl = lvl;
2625 		switch (lvl) {
2626 		case NIX_TXSCH_LVL_SMQ:
2627 			reg = NIX_AF_MDQ_CONST;
2628 			break;
2629 		case NIX_TXSCH_LVL_TL4:
2630 			reg = NIX_AF_TL4_CONST;
2631 			break;
2632 		case NIX_TXSCH_LVL_TL3:
2633 			reg = NIX_AF_TL3_CONST;
2634 			break;
2635 		case NIX_TXSCH_LVL_TL2:
2636 			reg = NIX_AF_TL2_CONST;
2637 			break;
2638 		case NIX_TXSCH_LVL_TL1:
2639 			reg = NIX_AF_TL1_CONST;
2640 			break;
2641 		}
2642 		cfg = rvu_read64(rvu, blkaddr, reg);
2643 		txsch->schq.max = cfg & 0xFFFF;
2644 		err = rvu_alloc_bitmap(&txsch->schq);
2645 		if (err)
2646 			return err;
2647 
2648 		/* Allocate memory for scheduler queues to
2649 		 * PF/VF pcifunc mapping info.
2650 		 */
2651 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
2652 					       sizeof(u32), GFP_KERNEL);
2653 		if (!txsch->pfvf_map)
2654 			return -ENOMEM;
2655 		for (schq = 0; schq < txsch->schq.max; schq++)
2656 			txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
2657 	}
2658 	return 0;
2659 }
2660 
2661 int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
2662 				int blkaddr, u32 cfg)
2663 {
2664 	int fmt_idx;
2665 
2666 	for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
2667 		if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
2668 			return fmt_idx;
2669 	}
2670 	if (fmt_idx >= nix_hw->mark_format.total)
2671 		return -ERANGE;
2672 
2673 	rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
2674 	nix_hw->mark_format.cfg[fmt_idx] = cfg;
2675 	nix_hw->mark_format.in_use++;
2676 	return fmt_idx;
2677 }
2678 
2679 static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
2680 				    int blkaddr)
2681 {
2682 	u64 cfgs[] = {
2683 		[NIX_MARK_CFG_IP_DSCP_RED]         = 0x10003,
2684 		[NIX_MARK_CFG_IP_DSCP_YELLOW]      = 0x11200,
2685 		[NIX_MARK_CFG_IP_DSCP_YELLOW_RED]  = 0x11203,
2686 		[NIX_MARK_CFG_IP_ECN_RED]          = 0x6000c,
2687 		[NIX_MARK_CFG_IP_ECN_YELLOW]       = 0x60c00,
2688 		[NIX_MARK_CFG_IP_ECN_YELLOW_RED]   = 0x60c0c,
2689 		[NIX_MARK_CFG_VLAN_DEI_RED]        = 0x30008,
2690 		[NIX_MARK_CFG_VLAN_DEI_YELLOW]     = 0x30800,
2691 		[NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
2692 	};
2693 	int i, rc;
2694 	u64 total;
2695 
2696 	total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
2697 	nix_hw->mark_format.total = (u8)total;
2698 	nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
2699 					       GFP_KERNEL);
2700 	if (!nix_hw->mark_format.cfg)
2701 		return -ENOMEM;
2702 	for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
2703 		rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
2704 		if (rc < 0)
2705 			dev_err(rvu->dev, "Err %d in setup mark format %d\n",
2706 				i, rc);
2707 	}
2708 
2709 	return 0;
2710 }
2711 
2712 static void rvu_get_lbk_link_max_frs(struct rvu *rvu,  u16 *max_mtu)
2713 {
2714 	/* CN10K supports LBK FIFO size 72 KB */
2715 	if (rvu->hw->lbk_bufsize == 0x12000)
2716 		*max_mtu = CN10K_LBK_LINK_MAX_FRS;
2717 	else
2718 		*max_mtu = NIC_HW_MAX_FRS;
2719 }
2720 
2721 static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
2722 {
2723 	/* RPM supports FIFO len 128 KB */
2724 	if (rvu_cgx_get_fifolen(rvu) == 0x20000)
2725 		*max_mtu = CN10K_LMAC_LINK_MAX_FRS;
2726 	else
2727 		*max_mtu = NIC_HW_MAX_FRS;
2728 }
2729 
2730 int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
2731 				     struct nix_hw_info *rsp)
2732 {
2733 	u16 pcifunc = req->hdr.pcifunc;
2734 	int blkaddr;
2735 
2736 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2737 	if (blkaddr < 0)
2738 		return NIX_AF_ERR_AF_LF_INVALID;
2739 
2740 	if (is_afvf(pcifunc))
2741 		rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
2742 	else
2743 		rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
2744 
2745 	rsp->min_mtu = NIC_HW_MIN_FRS;
2746 	return 0;
2747 }
2748 
2749 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
2750 				   struct msg_rsp *rsp)
2751 {
2752 	u16 pcifunc = req->hdr.pcifunc;
2753 	int i, nixlf, blkaddr, err;
2754 	u64 stats;
2755 
2756 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
2757 	if (err)
2758 		return err;
2759 
2760 	/* Get stats count supported by HW */
2761 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
2762 
2763 	/* Reset tx stats */
2764 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
2765 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
2766 
2767 	/* Reset rx stats */
2768 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
2769 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
2770 
2771 	return 0;
2772 }
2773 
2774 /* Returns the ALG index to be set into NPC_RX_ACTION */
2775 static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
2776 {
2777 	int i;
2778 
2779 	/* Scan over exiting algo entries to find a match */
2780 	for (i = 0; i < nix_hw->flowkey.in_use; i++)
2781 		if (nix_hw->flowkey.flowkey[i] == flow_cfg)
2782 			return i;
2783 
2784 	return -ERANGE;
2785 }
2786 
2787 static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
2788 {
2789 	int idx, nr_field, key_off, field_marker, keyoff_marker;
2790 	int max_key_off, max_bit_pos, group_member;
2791 	struct nix_rx_flowkey_alg *field;
2792 	struct nix_rx_flowkey_alg tmp;
2793 	u32 key_type, valid_key;
2794 	int l4_key_offset = 0;
2795 
2796 	if (!alg)
2797 		return -EINVAL;
2798 
2799 #define FIELDS_PER_ALG  5
2800 #define MAX_KEY_OFF	40
2801 	/* Clear all fields */
2802 	memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
2803 
2804 	/* Each of the 32 possible flow key algorithm definitions should
2805 	 * fall into above incremental config (except ALG0). Otherwise a
2806 	 * single NPC MCAM entry is not sufficient for supporting RSS.
2807 	 *
2808 	 * If a different definition or combination needed then NPC MCAM
2809 	 * has to be programmed to filter such pkts and it's action should
2810 	 * point to this definition to calculate flowtag or hash.
2811 	 *
2812 	 * The `for loop` goes over _all_ protocol field and the following
2813 	 * variables depicts the state machine forward progress logic.
2814 	 *
2815 	 * keyoff_marker - Enabled when hash byte length needs to be accounted
2816 	 * in field->key_offset update.
2817 	 * field_marker - Enabled when a new field needs to be selected.
2818 	 * group_member - Enabled when protocol is part of a group.
2819 	 */
2820 
2821 	keyoff_marker = 0; max_key_off = 0; group_member = 0;
2822 	nr_field = 0; key_off = 0; field_marker = 1;
2823 	field = &tmp; max_bit_pos = fls(flow_cfg);
2824 	for (idx = 0;
2825 	     idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
2826 	     key_off < MAX_KEY_OFF; idx++) {
2827 		key_type = BIT(idx);
2828 		valid_key = flow_cfg & key_type;
2829 		/* Found a field marker, reset the field values */
2830 		if (field_marker)
2831 			memset(&tmp, 0, sizeof(tmp));
2832 
2833 		field_marker = true;
2834 		keyoff_marker = true;
2835 		switch (key_type) {
2836 		case NIX_FLOW_KEY_TYPE_PORT:
2837 			field->sel_chan = true;
2838 			/* This should be set to 1, when SEL_CHAN is set */
2839 			field->bytesm1 = 1;
2840 			break;
2841 		case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
2842 			field->lid = NPC_LID_LC;
2843 			field->hdr_offset = 9; /* offset */
2844 			field->bytesm1 = 0; /* 1 byte */
2845 			field->ltype_match = NPC_LT_LC_IP;
2846 			field->ltype_mask = 0xF;
2847 			break;
2848 		case NIX_FLOW_KEY_TYPE_IPV4:
2849 		case NIX_FLOW_KEY_TYPE_INNR_IPV4:
2850 			field->lid = NPC_LID_LC;
2851 			field->ltype_match = NPC_LT_LC_IP;
2852 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
2853 				field->lid = NPC_LID_LG;
2854 				field->ltype_match = NPC_LT_LG_TU_IP;
2855 			}
2856 			field->hdr_offset = 12; /* SIP offset */
2857 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
2858 			field->ltype_mask = 0xF; /* Match only IPv4 */
2859 			keyoff_marker = false;
2860 			break;
2861 		case NIX_FLOW_KEY_TYPE_IPV6:
2862 		case NIX_FLOW_KEY_TYPE_INNR_IPV6:
2863 			field->lid = NPC_LID_LC;
2864 			field->ltype_match = NPC_LT_LC_IP6;
2865 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
2866 				field->lid = NPC_LID_LG;
2867 				field->ltype_match = NPC_LT_LG_TU_IP6;
2868 			}
2869 			field->hdr_offset = 8; /* SIP offset */
2870 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
2871 			field->ltype_mask = 0xF; /* Match only IPv6 */
2872 			break;
2873 		case NIX_FLOW_KEY_TYPE_TCP:
2874 		case NIX_FLOW_KEY_TYPE_UDP:
2875 		case NIX_FLOW_KEY_TYPE_SCTP:
2876 		case NIX_FLOW_KEY_TYPE_INNR_TCP:
2877 		case NIX_FLOW_KEY_TYPE_INNR_UDP:
2878 		case NIX_FLOW_KEY_TYPE_INNR_SCTP:
2879 			field->lid = NPC_LID_LD;
2880 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
2881 			    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
2882 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
2883 				field->lid = NPC_LID_LH;
2884 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
2885 
2886 			/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
2887 			 * so no need to change the ltype_match, just change
2888 			 * the lid for inner protocols
2889 			 */
2890 			BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
2891 				     (int)NPC_LT_LH_TU_TCP);
2892 			BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
2893 				     (int)NPC_LT_LH_TU_UDP);
2894 			BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
2895 				     (int)NPC_LT_LH_TU_SCTP);
2896 
2897 			if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
2898 			     key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
2899 			    valid_key) {
2900 				field->ltype_match |= NPC_LT_LD_TCP;
2901 				group_member = true;
2902 			} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
2903 				    key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
2904 				   valid_key) {
2905 				field->ltype_match |= NPC_LT_LD_UDP;
2906 				group_member = true;
2907 			} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2908 				    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
2909 				   valid_key) {
2910 				field->ltype_match |= NPC_LT_LD_SCTP;
2911 				group_member = true;
2912 			}
2913 			field->ltype_mask = ~field->ltype_match;
2914 			if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
2915 			    key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
2916 				/* Handle the case where any of the group item
2917 				 * is enabled in the group but not the final one
2918 				 */
2919 				if (group_member) {
2920 					valid_key = true;
2921 					group_member = false;
2922 				}
2923 			} else {
2924 				field_marker = false;
2925 				keyoff_marker = false;
2926 			}
2927 
2928 			/* TCP/UDP/SCTP and ESP/AH falls at same offset so
2929 			 * remember the TCP key offset of 40 byte hash key.
2930 			 */
2931 			if (key_type == NIX_FLOW_KEY_TYPE_TCP)
2932 				l4_key_offset = key_off;
2933 			break;
2934 		case NIX_FLOW_KEY_TYPE_NVGRE:
2935 			field->lid = NPC_LID_LD;
2936 			field->hdr_offset = 4; /* VSID offset */
2937 			field->bytesm1 = 2;
2938 			field->ltype_match = NPC_LT_LD_NVGRE;
2939 			field->ltype_mask = 0xF;
2940 			break;
2941 		case NIX_FLOW_KEY_TYPE_VXLAN:
2942 		case NIX_FLOW_KEY_TYPE_GENEVE:
2943 			field->lid = NPC_LID_LE;
2944 			field->bytesm1 = 2;
2945 			field->hdr_offset = 4;
2946 			field->ltype_mask = 0xF;
2947 			field_marker = false;
2948 			keyoff_marker = false;
2949 
2950 			if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
2951 				field->ltype_match |= NPC_LT_LE_VXLAN;
2952 				group_member = true;
2953 			}
2954 
2955 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
2956 				field->ltype_match |= NPC_LT_LE_GENEVE;
2957 				group_member = true;
2958 			}
2959 
2960 			if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
2961 				if (group_member) {
2962 					field->ltype_mask = ~field->ltype_match;
2963 					field_marker = true;
2964 					keyoff_marker = true;
2965 					valid_key = true;
2966 					group_member = false;
2967 				}
2968 			}
2969 			break;
2970 		case NIX_FLOW_KEY_TYPE_ETH_DMAC:
2971 		case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
2972 			field->lid = NPC_LID_LA;
2973 			field->ltype_match = NPC_LT_LA_ETHER;
2974 			if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
2975 				field->lid = NPC_LID_LF;
2976 				field->ltype_match = NPC_LT_LF_TU_ETHER;
2977 			}
2978 			field->hdr_offset = 0;
2979 			field->bytesm1 = 5; /* DMAC 6 Byte */
2980 			field->ltype_mask = 0xF;
2981 			break;
2982 		case NIX_FLOW_KEY_TYPE_IPV6_EXT:
2983 			field->lid = NPC_LID_LC;
2984 			field->hdr_offset = 40; /* IPV6 hdr */
2985 			field->bytesm1 = 0; /* 1 Byte ext hdr*/
2986 			field->ltype_match = NPC_LT_LC_IP6_EXT;
2987 			field->ltype_mask = 0xF;
2988 			break;
2989 		case NIX_FLOW_KEY_TYPE_GTPU:
2990 			field->lid = NPC_LID_LE;
2991 			field->hdr_offset = 4;
2992 			field->bytesm1 = 3; /* 4 bytes TID*/
2993 			field->ltype_match = NPC_LT_LE_GTPU;
2994 			field->ltype_mask = 0xF;
2995 			break;
2996 		case NIX_FLOW_KEY_TYPE_VLAN:
2997 			field->lid = NPC_LID_LB;
2998 			field->hdr_offset = 2; /* Skip TPID (2-bytes) */
2999 			field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
3000 			field->ltype_match = NPC_LT_LB_CTAG;
3001 			field->ltype_mask = 0xF;
3002 			field->fn_mask = 1; /* Mask out the first nibble */
3003 			break;
3004 		case NIX_FLOW_KEY_TYPE_AH:
3005 		case NIX_FLOW_KEY_TYPE_ESP:
3006 			field->hdr_offset = 0;
3007 			field->bytesm1 = 7; /* SPI + sequence number */
3008 			field->ltype_mask = 0xF;
3009 			field->lid = NPC_LID_LE;
3010 			field->ltype_match = NPC_LT_LE_ESP;
3011 			if (key_type == NIX_FLOW_KEY_TYPE_AH) {
3012 				field->lid = NPC_LID_LD;
3013 				field->ltype_match = NPC_LT_LD_AH;
3014 				field->hdr_offset = 4;
3015 				keyoff_marker = false;
3016 			}
3017 			break;
3018 		}
3019 		field->ena = 1;
3020 
3021 		/* Found a valid flow key type */
3022 		if (valid_key) {
3023 			/* Use the key offset of TCP/UDP/SCTP fields
3024 			 * for ESP/AH fields.
3025 			 */
3026 			if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
3027 			    key_type == NIX_FLOW_KEY_TYPE_AH)
3028 				key_off = l4_key_offset;
3029 			field->key_offset = key_off;
3030 			memcpy(&alg[nr_field], field, sizeof(*field));
3031 			max_key_off = max(max_key_off, field->bytesm1 + 1);
3032 
3033 			/* Found a field marker, get the next field */
3034 			if (field_marker)
3035 				nr_field++;
3036 		}
3037 
3038 		/* Found a keyoff marker, update the new key_off */
3039 		if (keyoff_marker) {
3040 			key_off += max_key_off;
3041 			max_key_off = 0;
3042 		}
3043 	}
3044 	/* Processed all the flow key types */
3045 	if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
3046 		return 0;
3047 	else
3048 		return NIX_AF_ERR_RSS_NOSPC_FIELD;
3049 }
3050 
3051 static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
3052 {
3053 	u64 field[FIELDS_PER_ALG];
3054 	struct nix_hw *hw;
3055 	int fid, rc;
3056 
3057 	hw = get_nix_hw(rvu->hw, blkaddr);
3058 	if (!hw)
3059 		return -EINVAL;
3060 
3061 	/* No room to add new flow hash algoritham */
3062 	if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
3063 		return NIX_AF_ERR_RSS_NOSPC_ALGO;
3064 
3065 	/* Generate algo fields for the given flow_cfg */
3066 	rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
3067 	if (rc)
3068 		return rc;
3069 
3070 	/* Update ALGX_FIELDX register with generated fields */
3071 	for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3072 		rvu_write64(rvu, blkaddr,
3073 			    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
3074 							   fid), field[fid]);
3075 
3076 	/* Store the flow_cfg for futher lookup */
3077 	rc = hw->flowkey.in_use;
3078 	hw->flowkey.flowkey[rc] = flow_cfg;
3079 	hw->flowkey.in_use++;
3080 
3081 	return rc;
3082 }
3083 
3084 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
3085 					 struct nix_rss_flowkey_cfg *req,
3086 					 struct nix_rss_flowkey_cfg_rsp *rsp)
3087 {
3088 	u16 pcifunc = req->hdr.pcifunc;
3089 	int alg_idx, nixlf, blkaddr;
3090 	struct nix_hw *nix_hw;
3091 	int err;
3092 
3093 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3094 	if (err)
3095 		return err;
3096 
3097 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3098 	if (!nix_hw)
3099 		return -EINVAL;
3100 
3101 	alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
3102 	/* Failed to get algo index from the exiting list, reserve new  */
3103 	if (alg_idx < 0) {
3104 		alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
3105 						  req->flowkey_cfg);
3106 		if (alg_idx < 0)
3107 			return alg_idx;
3108 	}
3109 	rsp->alg_idx = alg_idx;
3110 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
3111 				       alg_idx, req->mcam_index);
3112 	return 0;
3113 }
3114 
3115 static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
3116 {
3117 	u32 flowkey_cfg, minkey_cfg;
3118 	int alg, fid, rc;
3119 
3120 	/* Disable all flow key algx fieldx */
3121 	for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
3122 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
3123 			rvu_write64(rvu, blkaddr,
3124 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
3125 				    0);
3126 	}
3127 
3128 	/* IPv4/IPv6 SIP/DIPs */
3129 	flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
3130 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3131 	if (rc < 0)
3132 		return rc;
3133 
3134 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3135 	minkey_cfg = flowkey_cfg;
3136 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
3137 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3138 	if (rc < 0)
3139 		return rc;
3140 
3141 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3142 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
3143 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3144 	if (rc < 0)
3145 		return rc;
3146 
3147 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
3148 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
3149 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3150 	if (rc < 0)
3151 		return rc;
3152 
3153 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
3154 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3155 			NIX_FLOW_KEY_TYPE_UDP;
3156 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3157 	if (rc < 0)
3158 		return rc;
3159 
3160 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3161 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3162 			NIX_FLOW_KEY_TYPE_SCTP;
3163 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3164 	if (rc < 0)
3165 		return rc;
3166 
3167 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3168 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
3169 			NIX_FLOW_KEY_TYPE_SCTP;
3170 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3171 	if (rc < 0)
3172 		return rc;
3173 
3174 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
3175 	flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
3176 		      NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
3177 	rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
3178 	if (rc < 0)
3179 		return rc;
3180 
3181 	return 0;
3182 }
3183 
3184 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
3185 				      struct nix_set_mac_addr *req,
3186 				      struct msg_rsp *rsp)
3187 {
3188 	bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
3189 	u16 pcifunc = req->hdr.pcifunc;
3190 	int blkaddr, nixlf, err;
3191 	struct rvu_pfvf *pfvf;
3192 
3193 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
3194 	if (err)
3195 		return err;
3196 
3197 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3198 
3199 	/* untrusted VF can't overwrite admin(PF) changes */
3200 	if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3201 	    (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
3202 		dev_warn(rvu->dev,
3203 			 "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
3204 		return -EPERM;
3205 	}
3206 
3207 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
3208 
3209 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
3210 				    pfvf->rx_chan_base, req->mac_addr);
3211 
3212 	if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
3213 		ether_addr_copy(pfvf->default_mac, req->mac_addr);
3214 
3215 	return 0;
3216 }
3217 
3218 int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
3219 				      struct msg_req *req,
3220 				      struct nix_get_mac_addr_rsp *rsp)
3221 {
3222 	u16 pcifunc = req->hdr.pcifunc;
3223 	struct rvu_pfvf *pfvf;
3224 
3225 	if (!is_nixlf_attached(rvu, pcifunc))
3226 		return NIX_AF_ERR_AF_LF_INVALID;
3227 
3228 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3229 
3230 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
3231 
3232 	return 0;
3233 }
3234 
3235 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
3236 				     struct msg_rsp *rsp)
3237 {
3238 	bool allmulti, promisc, nix_rx_multicast;
3239 	u16 pcifunc = req->hdr.pcifunc;
3240 	struct rvu_pfvf *pfvf;
3241 	int nixlf, err;
3242 
3243 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3244 	promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
3245 	allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
3246 	pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
3247 
3248 	nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
3249 
3250 	if (is_vf(pcifunc) && !nix_rx_multicast &&
3251 	    (promisc || allmulti)) {
3252 		dev_warn_ratelimited(rvu->dev,
3253 				     "VF promisc/multicast not supported\n");
3254 		return 0;
3255 	}
3256 
3257 	/* untrusted VF can't configure promisc/allmulti */
3258 	if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
3259 	    (promisc || allmulti))
3260 		return 0;
3261 
3262 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3263 	if (err)
3264 		return err;
3265 
3266 	if (nix_rx_multicast) {
3267 		/* add/del this PF_FUNC to/from mcast pkt replication list */
3268 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
3269 					  allmulti);
3270 		if (err) {
3271 			dev_err(rvu->dev,
3272 				"Failed to update pcifunc 0x%x to multicast list\n",
3273 				pcifunc);
3274 			return err;
3275 		}
3276 
3277 		/* add/del this PF_FUNC to/from promisc pkt replication list */
3278 		err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
3279 					  promisc);
3280 		if (err) {
3281 			dev_err(rvu->dev,
3282 				"Failed to update pcifunc 0x%x to promisc list\n",
3283 				pcifunc);
3284 			return err;
3285 		}
3286 	}
3287 
3288 	/* install/uninstall allmulti entry */
3289 	if (allmulti) {
3290 		rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
3291 					       pfvf->rx_chan_base);
3292 	} else {
3293 		if (!nix_rx_multicast)
3294 			rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
3295 	}
3296 
3297 	/* install/uninstall promisc entry */
3298 	if (promisc) {
3299 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
3300 					      pfvf->rx_chan_base,
3301 					      pfvf->rx_chan_cnt);
3302 	} else {
3303 		if (!nix_rx_multicast)
3304 			rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
3305 	}
3306 
3307 	return 0;
3308 }
3309 
3310 static void nix_find_link_frs(struct rvu *rvu,
3311 			      struct nix_frs_cfg *req, u16 pcifunc)
3312 {
3313 	int pf = rvu_get_pf(pcifunc);
3314 	struct rvu_pfvf *pfvf;
3315 	int maxlen, minlen;
3316 	int numvfs, hwvf;
3317 	int vf;
3318 
3319 	/* Update with requester's min/max lengths */
3320 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3321 	pfvf->maxlen = req->maxlen;
3322 	if (req->update_minlen)
3323 		pfvf->minlen = req->minlen;
3324 
3325 	maxlen = req->maxlen;
3326 	minlen = req->update_minlen ? req->minlen : 0;
3327 
3328 	/* Get this PF's numVFs and starting hwvf */
3329 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
3330 
3331 	/* For each VF, compare requested max/minlen */
3332 	for (vf = 0; vf < numvfs; vf++) {
3333 		pfvf =  &rvu->hwvf[hwvf + vf];
3334 		if (pfvf->maxlen > maxlen)
3335 			maxlen = pfvf->maxlen;
3336 		if (req->update_minlen &&
3337 		    pfvf->minlen && pfvf->minlen < minlen)
3338 			minlen = pfvf->minlen;
3339 	}
3340 
3341 	/* Compare requested max/minlen with PF's max/minlen */
3342 	pfvf = &rvu->pf[pf];
3343 	if (pfvf->maxlen > maxlen)
3344 		maxlen = pfvf->maxlen;
3345 	if (req->update_minlen &&
3346 	    pfvf->minlen && pfvf->minlen < minlen)
3347 		minlen = pfvf->minlen;
3348 
3349 	/* Update the request with max/min PF's and it's VF's max/min */
3350 	req->maxlen = maxlen;
3351 	if (req->update_minlen)
3352 		req->minlen = minlen;
3353 }
3354 
3355 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
3356 				    struct msg_rsp *rsp)
3357 {
3358 	struct rvu_hwinfo *hw = rvu->hw;
3359 	u16 pcifunc = req->hdr.pcifunc;
3360 	int pf = rvu_get_pf(pcifunc);
3361 	int blkaddr, schq, link = -1;
3362 	struct nix_txsch *txsch;
3363 	u64 cfg, lmac_fifo_len;
3364 	struct nix_hw *nix_hw;
3365 	u8 cgx = 0, lmac = 0;
3366 	u16 max_mtu;
3367 
3368 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3369 	if (blkaddr < 0)
3370 		return NIX_AF_ERR_AF_LF_INVALID;
3371 
3372 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
3373 	if (!nix_hw)
3374 		return -EINVAL;
3375 
3376 	if (is_afvf(pcifunc))
3377 		rvu_get_lbk_link_max_frs(rvu, &max_mtu);
3378 	else
3379 		rvu_get_lmac_link_max_frs(rvu, &max_mtu);
3380 
3381 	if (!req->sdp_link && req->maxlen > max_mtu)
3382 		return NIX_AF_ERR_FRS_INVALID;
3383 
3384 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
3385 		return NIX_AF_ERR_FRS_INVALID;
3386 
3387 	/* Check if requester wants to update SMQ's */
3388 	if (!req->update_smq)
3389 		goto rx_frscfg;
3390 
3391 	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
3392 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
3393 	mutex_lock(&rvu->rsrc_lock);
3394 	for (schq = 0; schq < txsch->schq.max; schq++) {
3395 		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
3396 			continue;
3397 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
3398 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
3399 		if (req->update_minlen)
3400 			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
3401 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
3402 	}
3403 	mutex_unlock(&rvu->rsrc_lock);
3404 
3405 rx_frscfg:
3406 	/* Check if config is for SDP link */
3407 	if (req->sdp_link) {
3408 		if (!hw->sdp_links)
3409 			return NIX_AF_ERR_RX_LINK_INVALID;
3410 		link = hw->cgx_links + hw->lbk_links;
3411 		goto linkcfg;
3412 	}
3413 
3414 	/* Check if the request is from CGX mapped RVU PF */
3415 	if (is_pf_cgxmapped(rvu, pf)) {
3416 		/* Get CGX and LMAC to which this PF is mapped and find link */
3417 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
3418 		link = (cgx * hw->lmac_per_cgx) + lmac;
3419 	} else if (pf == 0) {
3420 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
3421 		link = hw->cgx_links;
3422 	}
3423 
3424 	if (link < 0)
3425 		return NIX_AF_ERR_RX_LINK_INVALID;
3426 
3427 	nix_find_link_frs(rvu, req, pcifunc);
3428 
3429 linkcfg:
3430 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
3431 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
3432 	if (req->update_minlen)
3433 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
3434 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
3435 
3436 	if (req->sdp_link || pf == 0)
3437 		return 0;
3438 
3439 	/* Update transmit credits for CGX links */
3440 	lmac_fifo_len =
3441 		rvu_cgx_get_fifolen(rvu) /
3442 		cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3443 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
3444 	cfg &= ~(0xFFFFFULL << 12);
3445 	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
3446 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
3447 	return 0;
3448 }
3449 
3450 int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
3451 				    struct msg_rsp *rsp)
3452 {
3453 	int nixlf, blkaddr, err;
3454 	u64 cfg;
3455 
3456 	err = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, &blkaddr);
3457 	if (err)
3458 		return err;
3459 
3460 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
3461 	/* Set the interface configuration */
3462 	if (req->len_verify & BIT(0))
3463 		cfg |= BIT_ULL(41);
3464 	else
3465 		cfg &= ~BIT_ULL(41);
3466 
3467 	if (req->len_verify & BIT(1))
3468 		cfg |= BIT_ULL(40);
3469 	else
3470 		cfg &= ~BIT_ULL(40);
3471 
3472 	if (req->csum_verify & BIT(0))
3473 		cfg |= BIT_ULL(37);
3474 	else
3475 		cfg &= ~BIT_ULL(37);
3476 
3477 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
3478 
3479 	return 0;
3480 }
3481 
3482 static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
3483 {
3484 	/* CN10k supports 72KB FIFO size and max packet size of 64k */
3485 	if (rvu->hw->lbk_bufsize == 0x12000)
3486 		return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
3487 
3488 	return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
3489 }
3490 
3491 static void nix_link_config(struct rvu *rvu, int blkaddr)
3492 {
3493 	struct rvu_hwinfo *hw = rvu->hw;
3494 	int cgx, lmac_cnt, slink, link;
3495 	u16 lbk_max_frs, lmac_max_frs;
3496 	u64 tx_credits;
3497 
3498 	rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
3499 	rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
3500 
3501 	/* Set default min/max packet lengths allowed on NIX Rx links.
3502 	 *
3503 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
3504 	 * as undersize and report them to SW as error pkts, hence
3505 	 * setting it to 40 bytes.
3506 	 */
3507 	for (link = 0; link < hw->cgx_links; link++) {
3508 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3509 				((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
3510 	}
3511 
3512 	for (link = hw->cgx_links; link < hw->lbk_links; link++) {
3513 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3514 			    ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
3515 	}
3516 	if (hw->sdp_links) {
3517 		link = hw->cgx_links + hw->lbk_links;
3518 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
3519 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
3520 	}
3521 
3522 	/* Set credits for Tx links assuming max packet length allowed.
3523 	 * This will be reconfigured based on MTU set for PF/VF.
3524 	 */
3525 	for (cgx = 0; cgx < hw->cgx; cgx++) {
3526 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
3527 		tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
3528 			       lmac_max_frs) / 16;
3529 		/* Enable credits and set credit pkt count to max allowed */
3530 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3531 		slink = cgx * hw->lmac_per_cgx;
3532 		for (link = slink; link < (slink + lmac_cnt); link++) {
3533 			rvu_write64(rvu, blkaddr,
3534 				    NIX_AF_TX_LINKX_NORM_CREDIT(link),
3535 				    tx_credits);
3536 		}
3537 	}
3538 
3539 	/* Set Tx credits for LBK link */
3540 	slink = hw->cgx_links;
3541 	for (link = slink; link < (slink + hw->lbk_links); link++) {
3542 		tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
3543 		/* Enable credits and set credit pkt count to max allowed */
3544 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
3545 		rvu_write64(rvu, blkaddr,
3546 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
3547 	}
3548 }
3549 
3550 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
3551 {
3552 	int idx, err;
3553 	u64 status;
3554 
3555 	/* Start X2P bus calibration */
3556 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3557 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
3558 	/* Wait for calibration to complete */
3559 	err = rvu_poll_reg(rvu, blkaddr,
3560 			   NIX_AF_STATUS, BIT_ULL(10), false);
3561 	if (err) {
3562 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
3563 		return err;
3564 	}
3565 
3566 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
3567 	/* Check if CGX devices are ready */
3568 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
3569 		/* Skip when cgx port is not available */
3570 		if (!rvu_cgx_pdata(idx, rvu) ||
3571 		    (status & (BIT_ULL(16 + idx))))
3572 			continue;
3573 		dev_err(rvu->dev,
3574 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
3575 		err = -EBUSY;
3576 	}
3577 
3578 	/* Check if LBK is ready */
3579 	if (!(status & BIT_ULL(19))) {
3580 		dev_err(rvu->dev,
3581 			"LBK didn't respond to NIX X2P calibration\n");
3582 		err = -EBUSY;
3583 	}
3584 
3585 	/* Clear 'calibrate_x2p' bit */
3586 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3587 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
3588 	if (err || (status & 0x3FFULL))
3589 		dev_err(rvu->dev,
3590 			"NIX X2P calibration failed, status 0x%llx\n", status);
3591 	if (err)
3592 		return err;
3593 	return 0;
3594 }
3595 
3596 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
3597 {
3598 	u64 cfg;
3599 	int err;
3600 
3601 	/* Set admin queue endianness */
3602 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
3603 #ifdef __BIG_ENDIAN
3604 	cfg |= BIT_ULL(8);
3605 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3606 #else
3607 	cfg &= ~BIT_ULL(8);
3608 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
3609 #endif
3610 
3611 	/* Do not bypass NDC cache */
3612 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
3613 	cfg &= ~0x3FFEULL;
3614 #ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
3615 	/* Disable caching of SQB aka SQEs */
3616 	cfg |= 0x04ULL;
3617 #endif
3618 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
3619 
3620 	/* Result structure can be followed by RQ/SQ/CQ context at
3621 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
3622 	 * operation type. Alloc sufficient result memory for all operations.
3623 	 */
3624 	err = rvu_aq_alloc(rvu, &block->aq,
3625 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
3626 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
3627 	if (err)
3628 		return err;
3629 
3630 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
3631 	rvu_write64(rvu, block->addr,
3632 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
3633 	return 0;
3634 }
3635 
3636 static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
3637 {
3638 	const struct npc_lt_def_cfg *ltdefs;
3639 	struct rvu_hwinfo *hw = rvu->hw;
3640 	int blkaddr = nix_hw->blkaddr;
3641 	struct rvu_block *block;
3642 	int err;
3643 	u64 cfg;
3644 
3645 	block = &hw->block[blkaddr];
3646 
3647 	if (is_rvu_96xx_B0(rvu)) {
3648 		/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
3649 		 * internal state when conditional clocks are turned off.
3650 		 * Hence enable them.
3651 		 */
3652 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
3653 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
3654 
3655 		/* Set chan/link to backpressure TL3 instead of TL2 */
3656 		rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
3657 
3658 		/* Disable SQ manager's sticky mode operation (set TM6 = 0)
3659 		 * This sticky mode is known to cause SQ stalls when multiple
3660 		 * SQs are mapped to same SMQ and transmitting pkts at a time.
3661 		 */
3662 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
3663 		cfg &= ~BIT_ULL(15);
3664 		rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
3665 	}
3666 
3667 	ltdefs = rvu->kpu.lt_def;
3668 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
3669 	err = nix_calibrate_x2p(rvu, blkaddr);
3670 	if (err)
3671 		return err;
3672 
3673 	/* Initialize admin queue */
3674 	err = nix_aq_init(rvu, block);
3675 	if (err)
3676 		return err;
3677 
3678 	/* Restore CINT timer delay to HW reset values */
3679 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
3680 
3681 	if (is_block_implemented(hw, blkaddr)) {
3682 		err = nix_setup_txschq(rvu, nix_hw, blkaddr);
3683 		if (err)
3684 			return err;
3685 
3686 		err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
3687 		if (err)
3688 			return err;
3689 
3690 		err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
3691 		if (err)
3692 			return err;
3693 
3694 		err = nix_setup_mcast(rvu, nix_hw, blkaddr);
3695 		if (err)
3696 			return err;
3697 
3698 		err = nix_setup_txvlan(rvu, nix_hw);
3699 		if (err)
3700 			return err;
3701 
3702 		/* Configure segmentation offload formats */
3703 		nix_setup_lso(rvu, nix_hw, blkaddr);
3704 
3705 		/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
3706 		 * This helps HW protocol checker to identify headers
3707 		 * and validate length and checksums.
3708 		 */
3709 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
3710 			    (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
3711 			    ltdefs->rx_ol2.ltype_mask);
3712 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
3713 			    (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
3714 			    ltdefs->rx_oip4.ltype_mask);
3715 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
3716 			    (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
3717 			    ltdefs->rx_iip4.ltype_mask);
3718 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
3719 			    (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
3720 			    ltdefs->rx_oip6.ltype_mask);
3721 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
3722 			    (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
3723 			    ltdefs->rx_iip6.ltype_mask);
3724 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
3725 			    (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
3726 			    ltdefs->rx_otcp.ltype_mask);
3727 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
3728 			    (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
3729 			    ltdefs->rx_itcp.ltype_mask);
3730 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
3731 			    (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
3732 			    ltdefs->rx_oudp.ltype_mask);
3733 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
3734 			    (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
3735 			    ltdefs->rx_iudp.ltype_mask);
3736 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
3737 			    (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
3738 			    ltdefs->rx_osctp.ltype_mask);
3739 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
3740 			    (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
3741 			    ltdefs->rx_isctp.ltype_mask);
3742 
3743 		if (!is_rvu_otx2(rvu)) {
3744 			/* Enable APAD calculation for other protocols
3745 			 * matching APAD0 and APAD1 lt def registers.
3746 			 */
3747 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
3748 				    (ltdefs->rx_apad0.valid << 11) |
3749 				    (ltdefs->rx_apad0.lid << 8) |
3750 				    (ltdefs->rx_apad0.ltype_match << 4) |
3751 				    ltdefs->rx_apad0.ltype_mask);
3752 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
3753 				    (ltdefs->rx_apad1.valid << 11) |
3754 				    (ltdefs->rx_apad1.lid << 8) |
3755 				    (ltdefs->rx_apad1.ltype_match << 4) |
3756 				    ltdefs->rx_apad1.ltype_mask);
3757 
3758 			/* Receive ethertype defination register defines layer
3759 			 * information in NPC_RESULT_S to identify the Ethertype
3760 			 * location in L2 header. Used for Ethertype overwriting
3761 			 * in inline IPsec flow.
3762 			 */
3763 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
3764 				    (ltdefs->rx_et[0].offset << 12) |
3765 				    (ltdefs->rx_et[0].valid << 11) |
3766 				    (ltdefs->rx_et[0].lid << 8) |
3767 				    (ltdefs->rx_et[0].ltype_match << 4) |
3768 				    ltdefs->rx_et[0].ltype_mask);
3769 			rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
3770 				    (ltdefs->rx_et[1].offset << 12) |
3771 				    (ltdefs->rx_et[1].valid << 11) |
3772 				    (ltdefs->rx_et[1].lid << 8) |
3773 				    (ltdefs->rx_et[1].ltype_match << 4) |
3774 				    ltdefs->rx_et[1].ltype_mask);
3775 		}
3776 
3777 		err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
3778 		if (err)
3779 			return err;
3780 
3781 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
3782 		nix_link_config(rvu, blkaddr);
3783 
3784 		/* Enable Channel backpressure */
3785 		rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0));
3786 	}
3787 	return 0;
3788 }
3789 
3790 int rvu_nix_init(struct rvu *rvu)
3791 {
3792 	struct rvu_hwinfo *hw = rvu->hw;
3793 	struct nix_hw *nix_hw;
3794 	int blkaddr = 0, err;
3795 	int i = 0;
3796 
3797 	hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
3798 			       GFP_KERNEL);
3799 	if (!hw->nix)
3800 		return -ENOMEM;
3801 
3802 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3803 	while (blkaddr) {
3804 		nix_hw = &hw->nix[i];
3805 		nix_hw->rvu = rvu;
3806 		nix_hw->blkaddr = blkaddr;
3807 		err = rvu_nix_block_init(rvu, nix_hw);
3808 		if (err)
3809 			return err;
3810 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3811 		i++;
3812 	}
3813 
3814 	return 0;
3815 }
3816 
3817 static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
3818 				  struct rvu_block *block)
3819 {
3820 	struct nix_txsch *txsch;
3821 	struct nix_mcast *mcast;
3822 	struct nix_txvlan *vlan;
3823 	struct nix_hw *nix_hw;
3824 	int lvl;
3825 
3826 	rvu_aq_free(rvu, block->aq);
3827 
3828 	if (is_block_implemented(rvu->hw, blkaddr)) {
3829 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
3830 		if (!nix_hw)
3831 			return;
3832 
3833 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
3834 			txsch = &nix_hw->txsch[lvl];
3835 			kfree(txsch->schq.bmap);
3836 		}
3837 
3838 		nix_ipolicer_freemem(nix_hw);
3839 
3840 		vlan = &nix_hw->txvlan;
3841 		kfree(vlan->rsrc.bmap);
3842 		mutex_destroy(&vlan->rsrc_lock);
3843 		devm_kfree(rvu->dev, vlan->entry2pfvf_map);
3844 
3845 		mcast = &nix_hw->mcast;
3846 		qmem_free(rvu->dev, mcast->mce_ctx);
3847 		qmem_free(rvu->dev, mcast->mcast_buf);
3848 		mutex_destroy(&mcast->mce_lock);
3849 	}
3850 }
3851 
3852 void rvu_nix_freemem(struct rvu *rvu)
3853 {
3854 	struct rvu_hwinfo *hw = rvu->hw;
3855 	struct rvu_block *block;
3856 	int blkaddr = 0;
3857 
3858 	blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3859 	while (blkaddr) {
3860 		block = &hw->block[blkaddr];
3861 		rvu_nix_block_freemem(rvu, blkaddr, block);
3862 		blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
3863 	}
3864 }
3865 
3866 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
3867 				     struct msg_rsp *rsp)
3868 {
3869 	u16 pcifunc = req->hdr.pcifunc;
3870 	struct rvu_pfvf *pfvf;
3871 	int nixlf, err;
3872 
3873 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3874 	if (err)
3875 		return err;
3876 
3877 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
3878 
3879 	npc_mcam_enable_flows(rvu, pcifunc);
3880 
3881 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3882 	set_bit(NIXLF_INITIALIZED, &pfvf->flags);
3883 
3884 	return rvu_cgx_start_stop_io(rvu, pcifunc, true);
3885 }
3886 
3887 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
3888 				    struct msg_rsp *rsp)
3889 {
3890 	u16 pcifunc = req->hdr.pcifunc;
3891 	struct rvu_pfvf *pfvf;
3892 	int nixlf, err;
3893 
3894 	err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
3895 	if (err)
3896 		return err;
3897 
3898 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3899 
3900 	pfvf = rvu_get_pfvf(rvu, pcifunc);
3901 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3902 
3903 	return rvu_cgx_start_stop_io(rvu, pcifunc, false);
3904 }
3905 
3906 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
3907 {
3908 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
3909 	struct hwctx_disable_req ctx_req;
3910 	int err;
3911 
3912 	ctx_req.hdr.pcifunc = pcifunc;
3913 
3914 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
3915 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
3916 	rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
3917 	nix_interface_deinit(rvu, pcifunc, nixlf);
3918 	nix_rx_sync(rvu, blkaddr);
3919 	nix_txschq_free(rvu, pcifunc);
3920 
3921 	clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
3922 
3923 	rvu_cgx_start_stop_io(rvu, pcifunc, false);
3924 
3925 	if (pfvf->sq_ctx) {
3926 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
3927 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3928 		if (err)
3929 			dev_err(rvu->dev, "SQ ctx disable failed\n");
3930 	}
3931 
3932 	if (pfvf->rq_ctx) {
3933 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
3934 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3935 		if (err)
3936 			dev_err(rvu->dev, "RQ ctx disable failed\n");
3937 	}
3938 
3939 	if (pfvf->cq_ctx) {
3940 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
3941 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
3942 		if (err)
3943 			dev_err(rvu->dev, "CQ ctx disable failed\n");
3944 	}
3945 
3946 	nix_ctx_free(rvu, pfvf);
3947 
3948 	nix_free_all_bandprof(rvu, pcifunc);
3949 }
3950 
3951 #define NIX_AF_LFX_TX_CFG_PTP_EN	BIT_ULL(32)
3952 
3953 static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
3954 {
3955 	struct rvu_hwinfo *hw = rvu->hw;
3956 	struct rvu_block *block;
3957 	int blkaddr, pf;
3958 	int nixlf;
3959 	u64 cfg;
3960 
3961 	pf = rvu_get_pf(pcifunc);
3962 	if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
3963 		return 0;
3964 
3965 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
3966 	if (blkaddr < 0)
3967 		return NIX_AF_ERR_AF_LF_INVALID;
3968 
3969 	block = &hw->block[blkaddr];
3970 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
3971 	if (nixlf < 0)
3972 		return NIX_AF_ERR_AF_LF_INVALID;
3973 
3974 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
3975 
3976 	if (enable)
3977 		cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
3978 	else
3979 		cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
3980 
3981 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
3982 
3983 	return 0;
3984 }
3985 
3986 int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
3987 					  struct msg_rsp *rsp)
3988 {
3989 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
3990 }
3991 
3992 int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
3993 					   struct msg_rsp *rsp)
3994 {
3995 	return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
3996 }
3997 
3998 int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
3999 					struct nix_lso_format_cfg *req,
4000 					struct nix_lso_format_cfg_rsp *rsp)
4001 {
4002 	u16 pcifunc = req->hdr.pcifunc;
4003 	struct nix_hw *nix_hw;
4004 	struct rvu_pfvf *pfvf;
4005 	int blkaddr, idx, f;
4006 	u64 reg;
4007 
4008 	pfvf = rvu_get_pfvf(rvu, pcifunc);
4009 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
4010 	if (!pfvf->nixlf || blkaddr < 0)
4011 		return NIX_AF_ERR_AF_LF_INVALID;
4012 
4013 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
4014 	if (!nix_hw)
4015 		return -EINVAL;
4016 
4017 	/* Find existing matching LSO format, if any */
4018 	for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
4019 		for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
4020 			reg = rvu_read64(rvu, blkaddr,
4021 					 NIX_AF_LSO_FORMATX_FIELDX(idx, f));
4022 			if (req->fields[f] != (reg & req->field_mask))
4023 				break;
4024 		}
4025 
4026 		if (f == NIX_LSO_FIELD_MAX)
4027 			break;
4028 	}
4029 
4030 	if (idx < nix_hw->lso.in_use) {
4031 		/* Match found */
4032 		rsp->lso_format_idx = idx;
4033 		return 0;
4034 	}
4035 
4036 	if (nix_hw->lso.in_use == nix_hw->lso.total)
4037 		return NIX_AF_ERR_LSO_CFG_FAIL;
4038 
4039 	rsp->lso_format_idx = nix_hw->lso.in_use++;
4040 
4041 	for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
4042 		rvu_write64(rvu, blkaddr,
4043 			    NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
4044 			    req->fields[f]);
4045 
4046 	return 0;
4047 }
4048 
4049 void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
4050 {
4051 	bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
4052 
4053 	/* overwrite vf mac address with default_mac */
4054 	if (from_vf)
4055 		ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
4056 }
4057 
4058 /* NIX ingress policers or bandwidth profiles APIs */
4059 static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
4060 {
4061 	struct npc_lt_def_cfg defs, *ltdefs;
4062 
4063 	ltdefs = &defs;
4064 	memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
4065 
4066 	/* Extract PCP and DEI fields from outer VLAN from byte offset
4067 	 * 2 from the start of LB_PTR (ie TAG).
4068 	 * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
4069 	 * fields are considered when 'Tunnel enable' is set in profile.
4070 	 */
4071 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
4072 		    (2UL << 12) | (ltdefs->ovlan.lid << 8) |
4073 		    (ltdefs->ovlan.ltype_match << 4) |
4074 		    ltdefs->ovlan.ltype_mask);
4075 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
4076 		    (2UL << 12) | (ltdefs->ivlan.lid << 8) |
4077 		    (ltdefs->ivlan.ltype_match << 4) |
4078 		    ltdefs->ivlan.ltype_mask);
4079 
4080 	/* DSCP field in outer and tunneled IPv4 packets */
4081 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
4082 		    (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
4083 		    (ltdefs->rx_oip4.ltype_match << 4) |
4084 		    ltdefs->rx_oip4.ltype_mask);
4085 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
4086 		    (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
4087 		    (ltdefs->rx_iip4.ltype_match << 4) |
4088 		    ltdefs->rx_iip4.ltype_mask);
4089 
4090 	/* DSCP field (traffic class) in outer and tunneled IPv6 packets */
4091 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
4092 		    (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
4093 		    (ltdefs->rx_oip6.ltype_match << 4) |
4094 		    ltdefs->rx_oip6.ltype_mask);
4095 	rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
4096 		    (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
4097 		    (ltdefs->rx_iip6.ltype_match << 4) |
4098 		    ltdefs->rx_iip6.ltype_mask);
4099 }
4100 
4101 static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
4102 				    int layer, int prof_idx)
4103 {
4104 	struct nix_cn10k_aq_enq_req aq_req;
4105 	int rc;
4106 
4107 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4108 
4109 	aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
4110 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4111 	aq_req.op = NIX_AQ_INSTOP_INIT;
4112 
4113 	/* Context is all zeros, submit to AQ */
4114 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4115 				     (struct nix_aq_enq_req *)&aq_req, NULL);
4116 	if (rc)
4117 		dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
4118 			layer, prof_idx);
4119 	return rc;
4120 }
4121 
4122 static int nix_setup_ipolicers(struct rvu *rvu,
4123 			       struct nix_hw *nix_hw, int blkaddr)
4124 {
4125 	struct rvu_hwinfo *hw = rvu->hw;
4126 	struct nix_ipolicer *ipolicer;
4127 	int err, layer, prof_idx;
4128 	u64 cfg;
4129 
4130 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
4131 	if (!(cfg & BIT_ULL(61))) {
4132 		hw->cap.ipolicer = false;
4133 		return 0;
4134 	}
4135 
4136 	hw->cap.ipolicer = true;
4137 	nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
4138 					sizeof(*ipolicer), GFP_KERNEL);
4139 	if (!nix_hw->ipolicer)
4140 		return -ENOMEM;
4141 
4142 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
4143 
4144 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4145 		ipolicer = &nix_hw->ipolicer[layer];
4146 		switch (layer) {
4147 		case BAND_PROF_LEAF_LAYER:
4148 			ipolicer->band_prof.max = cfg & 0XFFFF;
4149 			break;
4150 		case BAND_PROF_MID_LAYER:
4151 			ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
4152 			break;
4153 		case BAND_PROF_TOP_LAYER:
4154 			ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
4155 			break;
4156 		}
4157 
4158 		if (!ipolicer->band_prof.max)
4159 			continue;
4160 
4161 		err = rvu_alloc_bitmap(&ipolicer->band_prof);
4162 		if (err)
4163 			return err;
4164 
4165 		ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
4166 						  ipolicer->band_prof.max,
4167 						  sizeof(u16), GFP_KERNEL);
4168 		if (!ipolicer->pfvf_map)
4169 			return -ENOMEM;
4170 
4171 		ipolicer->match_id = devm_kcalloc(rvu->dev,
4172 						  ipolicer->band_prof.max,
4173 						  sizeof(u16), GFP_KERNEL);
4174 		if (!ipolicer->match_id)
4175 			return -ENOMEM;
4176 
4177 		for (prof_idx = 0;
4178 		     prof_idx < ipolicer->band_prof.max; prof_idx++) {
4179 			/* Set AF as current owner for INIT ops to succeed */
4180 			ipolicer->pfvf_map[prof_idx] = 0x00;
4181 
4182 			/* There is no enable bit in the profile context,
4183 			 * so no context disable. So let's INIT them here
4184 			 * so that PF/VF later on have to just do WRITE to
4185 			 * setup policer rates and config.
4186 			 */
4187 			err = nix_init_policer_context(rvu, nix_hw,
4188 						       layer, prof_idx);
4189 			if (err)
4190 				return err;
4191 		}
4192 
4193 		/* Allocate memory for maintaining ref_counts for MID level
4194 		 * profiles, this will be needed for leaf layer profiles'
4195 		 * aggregation.
4196 		 */
4197 		if (layer != BAND_PROF_MID_LAYER)
4198 			continue;
4199 
4200 		ipolicer->ref_count = devm_kcalloc(rvu->dev,
4201 						   ipolicer->band_prof.max,
4202 						   sizeof(u16), GFP_KERNEL);
4203 	}
4204 
4205 	/* Set policer timeunit to 2us ie  (19 + 1) * 100 nsec = 2us */
4206 	rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
4207 
4208 	nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
4209 
4210 	return 0;
4211 }
4212 
4213 static void nix_ipolicer_freemem(struct nix_hw *nix_hw)
4214 {
4215 	struct nix_ipolicer *ipolicer;
4216 	int layer;
4217 
4218 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4219 		ipolicer = &nix_hw->ipolicer[layer];
4220 
4221 		if (!ipolicer->band_prof.max)
4222 			continue;
4223 
4224 		kfree(ipolicer->band_prof.bmap);
4225 	}
4226 }
4227 
4228 static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
4229 			       struct nix_hw *nix_hw, u16 pcifunc)
4230 {
4231 	struct nix_ipolicer *ipolicer;
4232 	int layer, hi_layer, prof_idx;
4233 
4234 	/* Bits [15:14] in profile index represent layer */
4235 	layer = (req->qidx >> 14) & 0x03;
4236 	prof_idx = req->qidx & 0x3FFF;
4237 
4238 	ipolicer = &nix_hw->ipolicer[layer];
4239 	if (prof_idx >= ipolicer->band_prof.max)
4240 		return -EINVAL;
4241 
4242 	/* Check if the profile is allocated to the requesting PCIFUNC or not
4243 	 * with the exception of AF. AF is allowed to read and update contexts.
4244 	 */
4245 	if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
4246 		return -EINVAL;
4247 
4248 	/* If this profile is linked to higher layer profile then check
4249 	 * if that profile is also allocated to the requesting PCIFUNC
4250 	 * or not.
4251 	 */
4252 	if (!req->prof.hl_en)
4253 		return 0;
4254 
4255 	/* Leaf layer profile can link only to mid layer and
4256 	 * mid layer to top layer.
4257 	 */
4258 	if (layer == BAND_PROF_LEAF_LAYER)
4259 		hi_layer = BAND_PROF_MID_LAYER;
4260 	else if (layer == BAND_PROF_MID_LAYER)
4261 		hi_layer = BAND_PROF_TOP_LAYER;
4262 	else
4263 		return -EINVAL;
4264 
4265 	ipolicer = &nix_hw->ipolicer[hi_layer];
4266 	prof_idx = req->prof.band_prof_id;
4267 	if (prof_idx >= ipolicer->band_prof.max ||
4268 	    ipolicer->pfvf_map[prof_idx] != pcifunc)
4269 		return -EINVAL;
4270 
4271 	return 0;
4272 }
4273 
4274 int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
4275 					struct nix_bandprof_alloc_req *req,
4276 					struct nix_bandprof_alloc_rsp *rsp)
4277 {
4278 	int blkaddr, layer, prof, idx, err;
4279 	u16 pcifunc = req->hdr.pcifunc;
4280 	struct nix_ipolicer *ipolicer;
4281 	struct nix_hw *nix_hw;
4282 
4283 	if (!rvu->hw->cap.ipolicer)
4284 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
4285 
4286 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4287 	if (err)
4288 		return err;
4289 
4290 	mutex_lock(&rvu->rsrc_lock);
4291 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4292 		if (layer == BAND_PROF_INVAL_LAYER)
4293 			continue;
4294 		if (!req->prof_count[layer])
4295 			continue;
4296 
4297 		ipolicer = &nix_hw->ipolicer[layer];
4298 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
4299 			/* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
4300 			if (idx == MAX_BANDPROF_PER_PFFUNC)
4301 				break;
4302 
4303 			prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4304 			if (prof < 0)
4305 				break;
4306 			rsp->prof_count[layer]++;
4307 			rsp->prof_idx[layer][idx] = prof;
4308 			ipolicer->pfvf_map[prof] = pcifunc;
4309 		}
4310 	}
4311 	mutex_unlock(&rvu->rsrc_lock);
4312 	return 0;
4313 }
4314 
4315 static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
4316 {
4317 	int blkaddr, layer, prof_idx, err;
4318 	struct nix_ipolicer *ipolicer;
4319 	struct nix_hw *nix_hw;
4320 
4321 	if (!rvu->hw->cap.ipolicer)
4322 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
4323 
4324 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4325 	if (err)
4326 		return err;
4327 
4328 	mutex_lock(&rvu->rsrc_lock);
4329 	/* Free all the profiles allocated to the PCIFUNC */
4330 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4331 		if (layer == BAND_PROF_INVAL_LAYER)
4332 			continue;
4333 		ipolicer = &nix_hw->ipolicer[layer];
4334 
4335 		for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
4336 			if (ipolicer->pfvf_map[prof_idx] != pcifunc)
4337 				continue;
4338 
4339 			/* Clear ratelimit aggregation, if any */
4340 			if (layer == BAND_PROF_LEAF_LAYER &&
4341 			    ipolicer->match_id[prof_idx])
4342 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4343 
4344 			ipolicer->pfvf_map[prof_idx] = 0x00;
4345 			ipolicer->match_id[prof_idx] = 0;
4346 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4347 		}
4348 	}
4349 	mutex_unlock(&rvu->rsrc_lock);
4350 	return 0;
4351 }
4352 
4353 int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
4354 				       struct nix_bandprof_free_req *req,
4355 				       struct msg_rsp *rsp)
4356 {
4357 	int blkaddr, layer, prof_idx, idx, err;
4358 	u16 pcifunc = req->hdr.pcifunc;
4359 	struct nix_ipolicer *ipolicer;
4360 	struct nix_hw *nix_hw;
4361 
4362 	if (req->free_all)
4363 		return nix_free_all_bandprof(rvu, pcifunc);
4364 
4365 	if (!rvu->hw->cap.ipolicer)
4366 		return NIX_AF_ERR_IPOLICER_NOTSUPP;
4367 
4368 	err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4369 	if (err)
4370 		return err;
4371 
4372 	mutex_lock(&rvu->rsrc_lock);
4373 	/* Free the requested profile indices */
4374 	for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
4375 		if (layer == BAND_PROF_INVAL_LAYER)
4376 			continue;
4377 		if (!req->prof_count[layer])
4378 			continue;
4379 
4380 		ipolicer = &nix_hw->ipolicer[layer];
4381 		for (idx = 0; idx < req->prof_count[layer]; idx++) {
4382 			prof_idx = req->prof_idx[layer][idx];
4383 			if (prof_idx >= ipolicer->band_prof.max ||
4384 			    ipolicer->pfvf_map[prof_idx] != pcifunc)
4385 				continue;
4386 
4387 			/* Clear ratelimit aggregation, if any */
4388 			if (layer == BAND_PROF_LEAF_LAYER &&
4389 			    ipolicer->match_id[prof_idx])
4390 				nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
4391 
4392 			ipolicer->pfvf_map[prof_idx] = 0x00;
4393 			ipolicer->match_id[prof_idx] = 0;
4394 			rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
4395 			if (idx == MAX_BANDPROF_PER_PFFUNC)
4396 				break;
4397 		}
4398 	}
4399 	mutex_unlock(&rvu->rsrc_lock);
4400 	return 0;
4401 }
4402 
4403 int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
4404 			struct nix_cn10k_aq_enq_req *aq_req,
4405 			struct nix_cn10k_aq_enq_rsp *aq_rsp,
4406 			u16 pcifunc, u8 ctype, u32 qidx)
4407 {
4408 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4409 	aq_req->hdr.pcifunc = pcifunc;
4410 	aq_req->ctype = ctype;
4411 	aq_req->op = NIX_AQ_INSTOP_READ;
4412 	aq_req->qidx = qidx;
4413 
4414 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4415 				       (struct nix_aq_enq_req *)aq_req,
4416 				       (struct nix_aq_enq_rsp *)aq_rsp);
4417 }
4418 
4419 static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
4420 					  struct nix_hw *nix_hw,
4421 					  struct nix_cn10k_aq_enq_req *aq_req,
4422 					  struct nix_cn10k_aq_enq_rsp *aq_rsp,
4423 					  u32 leaf_prof, u16 mid_prof)
4424 {
4425 	memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4426 	aq_req->hdr.pcifunc = 0x00;
4427 	aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
4428 	aq_req->op = NIX_AQ_INSTOP_WRITE;
4429 	aq_req->qidx = leaf_prof;
4430 
4431 	aq_req->prof.band_prof_id = mid_prof;
4432 	aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
4433 	aq_req->prof.hl_en = 1;
4434 	aq_req->prof_mask.hl_en = 1;
4435 
4436 	return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4437 				       (struct nix_aq_enq_req *)aq_req,
4438 				       (struct nix_aq_enq_rsp *)aq_rsp);
4439 }
4440 
4441 int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
4442 				 u16 rq_idx, u16 match_id)
4443 {
4444 	int leaf_prof, mid_prof, leaf_match;
4445 	struct nix_cn10k_aq_enq_req aq_req;
4446 	struct nix_cn10k_aq_enq_rsp aq_rsp;
4447 	struct nix_ipolicer *ipolicer;
4448 	struct nix_hw *nix_hw;
4449 	int blkaddr, idx, rc;
4450 
4451 	if (!rvu->hw->cap.ipolicer)
4452 		return 0;
4453 
4454 	rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
4455 	if (rc)
4456 		return rc;
4457 
4458 	/* Fetch the RQ's context to see if policing is enabled */
4459 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
4460 				 NIX_AQ_CTYPE_RQ, rq_idx);
4461 	if (rc) {
4462 		dev_err(rvu->dev,
4463 			"%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
4464 			__func__, rq_idx, pcifunc);
4465 		return rc;
4466 	}
4467 
4468 	if (!aq_rsp.rq.policer_ena)
4469 		return 0;
4470 
4471 	/* Get the bandwidth profile ID mapped to this RQ */
4472 	leaf_prof = aq_rsp.rq.band_prof_id;
4473 
4474 	ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
4475 	ipolicer->match_id[leaf_prof] = match_id;
4476 
4477 	/* Check if any other leaf profile is marked with same match_id */
4478 	for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
4479 		if (idx == leaf_prof)
4480 			continue;
4481 		if (ipolicer->match_id[idx] != match_id)
4482 			continue;
4483 
4484 		leaf_match = idx;
4485 		break;
4486 	}
4487 
4488 	if (idx == ipolicer->band_prof.max)
4489 		return 0;
4490 
4491 	/* Fetch the matching profile's context to check if it's already
4492 	 * mapped to a mid level profile.
4493 	 */
4494 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4495 				 NIX_AQ_CTYPE_BANDPROF, leaf_match);
4496 	if (rc) {
4497 		dev_err(rvu->dev,
4498 			"%s: Failed to fetch context of leaf profile %d\n",
4499 			__func__, leaf_match);
4500 		return rc;
4501 	}
4502 
4503 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
4504 	if (aq_rsp.prof.hl_en) {
4505 		/* Get Mid layer prof index and map leaf_prof index
4506 		 * also such that flows that are being steered
4507 		 * to different RQs and marked with same match_id
4508 		 * are rate limited in a aggregate fashion
4509 		 */
4510 		mid_prof = aq_rsp.prof.band_prof_id;
4511 		rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4512 						    &aq_req, &aq_rsp,
4513 						    leaf_prof, mid_prof);
4514 		if (rc) {
4515 			dev_err(rvu->dev,
4516 				"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4517 				__func__, leaf_prof, mid_prof);
4518 			goto exit;
4519 		}
4520 
4521 		mutex_lock(&rvu->rsrc_lock);
4522 		ipolicer->ref_count[mid_prof]++;
4523 		mutex_unlock(&rvu->rsrc_lock);
4524 		goto exit;
4525 	}
4526 
4527 	/* Allocate a mid layer profile and
4528 	 * map both 'leaf_prof' and 'leaf_match' profiles to it.
4529 	 */
4530 	mutex_lock(&rvu->rsrc_lock);
4531 	mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
4532 	if (mid_prof < 0) {
4533 		dev_err(rvu->dev,
4534 			"%s: Unable to allocate mid layer profile\n", __func__);
4535 		mutex_unlock(&rvu->rsrc_lock);
4536 		goto exit;
4537 	}
4538 	mutex_unlock(&rvu->rsrc_lock);
4539 	ipolicer->pfvf_map[mid_prof] = 0x00;
4540 	ipolicer->ref_count[mid_prof] = 0;
4541 
4542 	/* Initialize mid layer profile same as 'leaf_prof' */
4543 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4544 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
4545 	if (rc) {
4546 		dev_err(rvu->dev,
4547 			"%s: Failed to fetch context of leaf profile %d\n",
4548 			__func__, leaf_prof);
4549 		goto exit;
4550 	}
4551 
4552 	memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
4553 	aq_req.hdr.pcifunc = 0x00;
4554 	aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
4555 	aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
4556 	aq_req.op = NIX_AQ_INSTOP_WRITE;
4557 	memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
4558 	/* Clear higher layer enable bit in the mid profile, just in case */
4559 	aq_req.prof.hl_en = 0;
4560 	aq_req.prof_mask.hl_en = 1;
4561 
4562 	rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
4563 				     (struct nix_aq_enq_req *)&aq_req, NULL);
4564 	if (rc) {
4565 		dev_err(rvu->dev,
4566 			"%s: Failed to INIT context of mid layer profile %d\n",
4567 			__func__, mid_prof);
4568 		goto exit;
4569 	}
4570 
4571 	/* Map both leaf profiles to this mid layer profile */
4572 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4573 					    &aq_req, &aq_rsp,
4574 					    leaf_prof, mid_prof);
4575 	if (rc) {
4576 		dev_err(rvu->dev,
4577 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4578 			__func__, leaf_prof, mid_prof);
4579 		goto exit;
4580 	}
4581 
4582 	mutex_lock(&rvu->rsrc_lock);
4583 	ipolicer->ref_count[mid_prof]++;
4584 	mutex_unlock(&rvu->rsrc_lock);
4585 
4586 	rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
4587 					    &aq_req, &aq_rsp,
4588 					    leaf_match, mid_prof);
4589 	if (rc) {
4590 		dev_err(rvu->dev,
4591 			"%s: Failed to map leaf(%d) and mid(%d) profiles\n",
4592 			__func__, leaf_match, mid_prof);
4593 		ipolicer->ref_count[mid_prof]--;
4594 		goto exit;
4595 	}
4596 
4597 	mutex_lock(&rvu->rsrc_lock);
4598 	ipolicer->ref_count[mid_prof]++;
4599 	mutex_unlock(&rvu->rsrc_lock);
4600 
4601 exit:
4602 	return rc;
4603 }
4604 
4605 /* Called with mutex rsrc_lock */
4606 static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
4607 				     u32 leaf_prof)
4608 {
4609 	struct nix_cn10k_aq_enq_req aq_req;
4610 	struct nix_cn10k_aq_enq_rsp aq_rsp;
4611 	struct nix_ipolicer *ipolicer;
4612 	u16 mid_prof;
4613 	int rc;
4614 
4615 	mutex_unlock(&rvu->rsrc_lock);
4616 
4617 	rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
4618 				 NIX_AQ_CTYPE_BANDPROF, leaf_prof);
4619 
4620 	mutex_lock(&rvu->rsrc_lock);
4621 	if (rc) {
4622 		dev_err(rvu->dev,
4623 			"%s: Failed to fetch context of leaf profile %d\n",
4624 			__func__, leaf_prof);
4625 		return;
4626 	}
4627 
4628 	if (!aq_rsp.prof.hl_en)
4629 		return;
4630 
4631 	mid_prof = aq_rsp.prof.band_prof_id;
4632 	ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
4633 	ipolicer->ref_count[mid_prof]--;
4634 	/* If ref_count is zero, free mid layer profile */
4635 	if (!ipolicer->ref_count[mid_prof]) {
4636 		ipolicer->pfvf_map[mid_prof] = 0x00;
4637 		rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
4638 	}
4639 }
4640