1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "npc.h"
18 #include "cgx.h"
19 
20 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
21 
22 enum mc_tbl_sz {
23 	MC_TBL_SZ_256,
24 	MC_TBL_SZ_512,
25 	MC_TBL_SZ_1K,
26 	MC_TBL_SZ_2K,
27 	MC_TBL_SZ_4K,
28 	MC_TBL_SZ_8K,
29 	MC_TBL_SZ_16K,
30 	MC_TBL_SZ_32K,
31 	MC_TBL_SZ_64K,
32 };
33 
34 enum mc_buf_cnt {
35 	MC_BUF_CNT_8,
36 	MC_BUF_CNT_16,
37 	MC_BUF_CNT_32,
38 	MC_BUF_CNT_64,
39 	MC_BUF_CNT_128,
40 	MC_BUF_CNT_256,
41 	MC_BUF_CNT_512,
42 	MC_BUF_CNT_1024,
43 	MC_BUF_CNT_2048,
44 };
45 
46 /* For now considering MC resources needed for broadcast
47  * pkt replication only. i.e 256 HWVFs + 12 PFs.
48  */
49 #define MC_TBL_SIZE	MC_TBL_SZ_512
50 #define MC_BUF_CNT	MC_BUF_CNT_128
51 
52 struct mce {
53 	struct hlist_node	node;
54 	u16			idx;
55 	u16			pcifunc;
56 };
57 
58 bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
59 {
60 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
61 	int blkaddr;
62 
63 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
64 	if (!pfvf->nixlf || blkaddr < 0)
65 		return false;
66 	return true;
67 }
68 
69 int rvu_get_nixlf_count(struct rvu *rvu)
70 {
71 	struct rvu_block *block;
72 	int blkaddr;
73 
74 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
75 	if (blkaddr < 0)
76 		return 0;
77 	block = &rvu->hw->block[blkaddr];
78 	return block->lf.max;
79 }
80 
81 static void nix_mce_list_init(struct nix_mce_list *list, int max)
82 {
83 	INIT_HLIST_HEAD(&list->head);
84 	list->count = 0;
85 	list->max = max;
86 }
87 
88 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
89 {
90 	int idx;
91 
92 	if (!mcast)
93 		return 0;
94 
95 	idx = mcast->next_free_mce;
96 	mcast->next_free_mce += count;
97 	return idx;
98 }
99 
100 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
101 {
102 	if (blkaddr == BLKADDR_NIX0 && hw->nix0)
103 		return hw->nix0;
104 
105 	return NULL;
106 }
107 
108 static void nix_rx_sync(struct rvu *rvu, int blkaddr)
109 {
110 	int err;
111 
112 	/*Sync all in flight RX packets to LLC/DRAM */
113 	rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
114 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
115 	if (err)
116 		dev_err(rvu->dev, "NIX RX software sync failed\n");
117 
118 	/* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA]
119 	 * bit too early. Hence wait for 50us more.
120 	 */
121 	if (is_rvu_9xxx_A0(rvu))
122 		usleep_range(50, 60);
123 }
124 
125 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
126 			    int lvl, u16 pcifunc, u16 schq)
127 {
128 	struct nix_txsch *txsch;
129 	struct nix_hw *nix_hw;
130 
131 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
132 	if (!nix_hw)
133 		return false;
134 
135 	txsch = &nix_hw->txsch[lvl];
136 	/* Check out of bounds */
137 	if (schq >= txsch->schq.max)
138 		return false;
139 
140 	mutex_lock(&rvu->rsrc_lock);
141 	if (txsch->pfvf_map[schq] != pcifunc) {
142 		mutex_unlock(&rvu->rsrc_lock);
143 		return false;
144 	}
145 	mutex_unlock(&rvu->rsrc_lock);
146 	return true;
147 }
148 
149 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
150 {
151 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
152 	u8 cgx_id, lmac_id;
153 	int pkind, pf, vf;
154 	int err;
155 
156 	pf = rvu_get_pf(pcifunc);
157 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
158 		return 0;
159 
160 	switch (type) {
161 	case NIX_INTF_TYPE_CGX:
162 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
163 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
164 
165 		pkind = rvu_npc_get_pkind(rvu, pf);
166 		if (pkind < 0) {
167 			dev_err(rvu->dev,
168 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
169 			return -EINVAL;
170 		}
171 		pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
172 		pfvf->tx_chan_base = pfvf->rx_chan_base;
173 		pfvf->rx_chan_cnt = 1;
174 		pfvf->tx_chan_cnt = 1;
175 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
176 		rvu_npc_set_pkind(rvu, pkind, pfvf);
177 		break;
178 	case NIX_INTF_TYPE_LBK:
179 		vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
180 		pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
181 		pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
182 						NIX_CHAN_LBK_CHX(0, vf + 1);
183 		pfvf->rx_chan_cnt = 1;
184 		pfvf->tx_chan_cnt = 1;
185 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
186 					      pfvf->rx_chan_base, false);
187 		break;
188 	}
189 
190 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
191 	 * RVU PF/VF's MAC address.
192 	 */
193 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
194 				    pfvf->rx_chan_base, pfvf->mac_addr);
195 
196 	/* Add this PF_FUNC to bcast pkt replication list */
197 	err = nix_update_bcast_mce_list(rvu, pcifunc, true);
198 	if (err) {
199 		dev_err(rvu->dev,
200 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
201 			pcifunc);
202 		return err;
203 	}
204 
205 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
206 					  nixlf, pfvf->rx_chan_base);
207 	pfvf->maxlen = NIC_HW_MIN_FRS;
208 	pfvf->minlen = NIC_HW_MIN_FRS;
209 
210 	return 0;
211 }
212 
213 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
214 {
215 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
216 	int err;
217 
218 	pfvf->maxlen = 0;
219 	pfvf->minlen = 0;
220 	pfvf->rxvlan = false;
221 
222 	/* Remove this PF_FUNC from bcast pkt replication list */
223 	err = nix_update_bcast_mce_list(rvu, pcifunc, false);
224 	if (err) {
225 		dev_err(rvu->dev,
226 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
227 			pcifunc);
228 	}
229 
230 	/* Free and disable any MCAM entries used by this NIX LF */
231 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
232 }
233 
234 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
235 				 u64 format, bool v4, u64 *fidx)
236 {
237 	struct nix_lso_format field = {0};
238 
239 	/* IP's Length field */
240 	field.layer = NIX_TXLAYER_OL3;
241 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
242 	field.offset = v4 ? 2 : 4;
243 	field.sizem1 = 1; /* i.e 2 bytes */
244 	field.alg = NIX_LSOALG_ADD_PAYLEN;
245 	rvu_write64(rvu, blkaddr,
246 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
247 		    *(u64 *)&field);
248 
249 	/* No ID field in IPv6 header */
250 	if (!v4)
251 		return;
252 
253 	/* IP's ID field */
254 	field.layer = NIX_TXLAYER_OL3;
255 	field.offset = 4;
256 	field.sizem1 = 1; /* i.e 2 bytes */
257 	field.alg = NIX_LSOALG_ADD_SEGNUM;
258 	rvu_write64(rvu, blkaddr,
259 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
260 		    *(u64 *)&field);
261 }
262 
263 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
264 				 u64 format, u64 *fidx)
265 {
266 	struct nix_lso_format field = {0};
267 
268 	/* TCP's sequence number field */
269 	field.layer = NIX_TXLAYER_OL4;
270 	field.offset = 4;
271 	field.sizem1 = 3; /* i.e 4 bytes */
272 	field.alg = NIX_LSOALG_ADD_OFFSET;
273 	rvu_write64(rvu, blkaddr,
274 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
275 		    *(u64 *)&field);
276 
277 	/* TCP's flags field */
278 	field.layer = NIX_TXLAYER_OL4;
279 	field.offset = 12;
280 	field.sizem1 = 0; /* not needed */
281 	field.alg = NIX_LSOALG_TCP_FLAGS;
282 	rvu_write64(rvu, blkaddr,
283 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
284 		    *(u64 *)&field);
285 }
286 
287 static void nix_setup_lso(struct rvu *rvu, int blkaddr)
288 {
289 	u64 cfg, idx, fidx = 0;
290 
291 	/* Enable LSO */
292 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
293 	/* For TSO, set first and middle segment flags to
294 	 * mask out PSH, RST & FIN flags in TCP packet
295 	 */
296 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
297 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
298 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
299 
300 	/* Configure format fields for TCPv4 segmentation offload */
301 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
302 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
303 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
304 
305 	/* Set rest of the fields to NOP */
306 	for (; fidx < 8; fidx++) {
307 		rvu_write64(rvu, blkaddr,
308 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
309 	}
310 
311 	/* Configure format fields for TCPv6 segmentation offload */
312 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
313 	fidx = 0;
314 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
315 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
316 
317 	/* Set rest of the fields to NOP */
318 	for (; fidx < 8; fidx++) {
319 		rvu_write64(rvu, blkaddr,
320 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
321 	}
322 }
323 
324 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
325 {
326 	kfree(pfvf->rq_bmap);
327 	kfree(pfvf->sq_bmap);
328 	kfree(pfvf->cq_bmap);
329 	if (pfvf->rq_ctx)
330 		qmem_free(rvu->dev, pfvf->rq_ctx);
331 	if (pfvf->sq_ctx)
332 		qmem_free(rvu->dev, pfvf->sq_ctx);
333 	if (pfvf->cq_ctx)
334 		qmem_free(rvu->dev, pfvf->cq_ctx);
335 	if (pfvf->rss_ctx)
336 		qmem_free(rvu->dev, pfvf->rss_ctx);
337 	if (pfvf->nix_qints_ctx)
338 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
339 	if (pfvf->cq_ints_ctx)
340 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
341 
342 	pfvf->rq_bmap = NULL;
343 	pfvf->cq_bmap = NULL;
344 	pfvf->sq_bmap = NULL;
345 	pfvf->rq_ctx = NULL;
346 	pfvf->sq_ctx = NULL;
347 	pfvf->cq_ctx = NULL;
348 	pfvf->rss_ctx = NULL;
349 	pfvf->nix_qints_ctx = NULL;
350 	pfvf->cq_ints_ctx = NULL;
351 }
352 
353 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
354 			      struct rvu_pfvf *pfvf, int nixlf,
355 			      int rss_sz, int rss_grps, int hwctx_size)
356 {
357 	int err, grp, num_indices;
358 
359 	/* RSS is not requested for this NIXLF */
360 	if (!rss_sz)
361 		return 0;
362 	num_indices = rss_sz * rss_grps;
363 
364 	/* Alloc NIX RSS HW context memory and config the base */
365 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
366 	if (err)
367 		return err;
368 
369 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
370 		    (u64)pfvf->rss_ctx->iova);
371 
372 	/* Config full RSS table size, enable RSS and caching */
373 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
374 		    BIT_ULL(36) | BIT_ULL(4) |
375 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
376 	/* Config RSS group offset and sizes */
377 	for (grp = 0; grp < rss_grps; grp++)
378 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
379 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
380 	return 0;
381 }
382 
383 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
384 			       struct nix_aq_inst_s *inst)
385 {
386 	struct admin_queue *aq = block->aq;
387 	struct nix_aq_res_s *result;
388 	int timeout = 1000;
389 	u64 reg, head;
390 
391 	result = (struct nix_aq_res_s *)aq->res->base;
392 
393 	/* Get current head pointer where to append this instruction */
394 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
395 	head = (reg >> 4) & AQ_PTR_MASK;
396 
397 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
398 	       (void *)inst, aq->inst->entry_sz);
399 	memset(result, 0, sizeof(*result));
400 	/* sync into memory */
401 	wmb();
402 
403 	/* Ring the doorbell and wait for result */
404 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
405 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
406 		cpu_relax();
407 		udelay(1);
408 		timeout--;
409 		if (!timeout)
410 			return -EBUSY;
411 	}
412 
413 	if (result->compcode != NIX_AQ_COMP_GOOD)
414 		/* TODO: Replace this with some error code */
415 		return -EBUSY;
416 
417 	return 0;
418 }
419 
420 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
421 			       struct nix_aq_enq_rsp *rsp)
422 {
423 	struct rvu_hwinfo *hw = rvu->hw;
424 	u16 pcifunc = req->hdr.pcifunc;
425 	int nixlf, blkaddr, rc = 0;
426 	struct nix_aq_inst_s inst;
427 	struct rvu_block *block;
428 	struct admin_queue *aq;
429 	struct rvu_pfvf *pfvf;
430 	void *ctx, *mask;
431 	bool ena;
432 	u64 cfg;
433 
434 	pfvf = rvu_get_pfvf(rvu, pcifunc);
435 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
436 	if (!pfvf->nixlf || blkaddr < 0)
437 		return NIX_AF_ERR_AF_LF_INVALID;
438 
439 	block = &hw->block[blkaddr];
440 	aq = block->aq;
441 	if (!aq) {
442 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
443 		return NIX_AF_ERR_AQ_ENQUEUE;
444 	}
445 
446 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
447 	if (nixlf < 0)
448 		return NIX_AF_ERR_AF_LF_INVALID;
449 
450 	switch (req->ctype) {
451 	case NIX_AQ_CTYPE_RQ:
452 		/* Check if index exceeds max no of queues */
453 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
454 			rc = NIX_AF_ERR_AQ_ENQUEUE;
455 		break;
456 	case NIX_AQ_CTYPE_SQ:
457 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
458 			rc = NIX_AF_ERR_AQ_ENQUEUE;
459 		break;
460 	case NIX_AQ_CTYPE_CQ:
461 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
462 			rc = NIX_AF_ERR_AQ_ENQUEUE;
463 		break;
464 	case NIX_AQ_CTYPE_RSS:
465 		/* Check if RSS is enabled and qidx is within range */
466 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
467 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
468 		    (req->qidx >= (256UL << (cfg & 0xF))))
469 			rc = NIX_AF_ERR_AQ_ENQUEUE;
470 		break;
471 	case NIX_AQ_CTYPE_MCE:
472 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
473 		/* Check if index exceeds MCE list length */
474 		if (!hw->nix0->mcast.mce_ctx ||
475 		    (req->qidx >= (256UL << (cfg & 0xF))))
476 			rc = NIX_AF_ERR_AQ_ENQUEUE;
477 
478 		/* Adding multicast lists for requests from PF/VFs is not
479 		 * yet supported, so ignore this.
480 		 */
481 		if (rsp)
482 			rc = NIX_AF_ERR_AQ_ENQUEUE;
483 		break;
484 	default:
485 		rc = NIX_AF_ERR_AQ_ENQUEUE;
486 	}
487 
488 	if (rc)
489 		return rc;
490 
491 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
492 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
493 	    req->op != NIX_AQ_INSTOP_WRITE) {
494 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
495 				     pcifunc, req->sq.smq))
496 			return NIX_AF_ERR_AQ_ENQUEUE;
497 	}
498 
499 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
500 	inst.lf = nixlf;
501 	inst.cindex = req->qidx;
502 	inst.ctype = req->ctype;
503 	inst.op = req->op;
504 	/* Currently we are not supporting enqueuing multiple instructions,
505 	 * so always choose first entry in result memory.
506 	 */
507 	inst.res_addr = (u64)aq->res->iova;
508 
509 	/* Clean result + context memory */
510 	memset(aq->res->base, 0, aq->res->entry_sz);
511 	/* Context needs to be written at RES_ADDR + 128 */
512 	ctx = aq->res->base + 128;
513 	/* Mask needs to be written at RES_ADDR + 256 */
514 	mask = aq->res->base + 256;
515 
516 	switch (req->op) {
517 	case NIX_AQ_INSTOP_WRITE:
518 		if (req->ctype == NIX_AQ_CTYPE_RQ)
519 			memcpy(mask, &req->rq_mask,
520 			       sizeof(struct nix_rq_ctx_s));
521 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
522 			memcpy(mask, &req->sq_mask,
523 			       sizeof(struct nix_sq_ctx_s));
524 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
525 			memcpy(mask, &req->cq_mask,
526 			       sizeof(struct nix_cq_ctx_s));
527 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
528 			memcpy(mask, &req->rss_mask,
529 			       sizeof(struct nix_rsse_s));
530 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
531 			memcpy(mask, &req->mce_mask,
532 			       sizeof(struct nix_rx_mce_s));
533 		/* Fall through */
534 	case NIX_AQ_INSTOP_INIT:
535 		if (req->ctype == NIX_AQ_CTYPE_RQ)
536 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
537 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
538 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
539 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
540 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
541 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
542 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
543 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
544 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
545 		break;
546 	case NIX_AQ_INSTOP_NOP:
547 	case NIX_AQ_INSTOP_READ:
548 	case NIX_AQ_INSTOP_LOCK:
549 	case NIX_AQ_INSTOP_UNLOCK:
550 		break;
551 	default:
552 		rc = NIX_AF_ERR_AQ_ENQUEUE;
553 		return rc;
554 	}
555 
556 	spin_lock(&aq->lock);
557 
558 	/* Submit the instruction to AQ */
559 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
560 	if (rc) {
561 		spin_unlock(&aq->lock);
562 		return rc;
563 	}
564 
565 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
566 	if (req->op == NIX_AQ_INSTOP_INIT) {
567 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
568 			__set_bit(req->qidx, pfvf->rq_bmap);
569 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
570 			__set_bit(req->qidx, pfvf->sq_bmap);
571 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
572 			__set_bit(req->qidx, pfvf->cq_bmap);
573 	}
574 
575 	if (req->op == NIX_AQ_INSTOP_WRITE) {
576 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
577 			ena = (req->rq.ena & req->rq_mask.ena) |
578 				(test_bit(req->qidx, pfvf->rq_bmap) &
579 				~req->rq_mask.ena);
580 			if (ena)
581 				__set_bit(req->qidx, pfvf->rq_bmap);
582 			else
583 				__clear_bit(req->qidx, pfvf->rq_bmap);
584 		}
585 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
586 			ena = (req->rq.ena & req->sq_mask.ena) |
587 				(test_bit(req->qidx, pfvf->sq_bmap) &
588 				~req->sq_mask.ena);
589 			if (ena)
590 				__set_bit(req->qidx, pfvf->sq_bmap);
591 			else
592 				__clear_bit(req->qidx, pfvf->sq_bmap);
593 		}
594 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
595 			ena = (req->rq.ena & req->cq_mask.ena) |
596 				(test_bit(req->qidx, pfvf->cq_bmap) &
597 				~req->cq_mask.ena);
598 			if (ena)
599 				__set_bit(req->qidx, pfvf->cq_bmap);
600 			else
601 				__clear_bit(req->qidx, pfvf->cq_bmap);
602 		}
603 	}
604 
605 	if (rsp) {
606 		/* Copy read context into mailbox */
607 		if (req->op == NIX_AQ_INSTOP_READ) {
608 			if (req->ctype == NIX_AQ_CTYPE_RQ)
609 				memcpy(&rsp->rq, ctx,
610 				       sizeof(struct nix_rq_ctx_s));
611 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
612 				memcpy(&rsp->sq, ctx,
613 				       sizeof(struct nix_sq_ctx_s));
614 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
615 				memcpy(&rsp->cq, ctx,
616 				       sizeof(struct nix_cq_ctx_s));
617 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
618 				memcpy(&rsp->rss, ctx,
619 				       sizeof(struct nix_rsse_s));
620 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
621 				memcpy(&rsp->mce, ctx,
622 				       sizeof(struct nix_rx_mce_s));
623 		}
624 	}
625 
626 	spin_unlock(&aq->lock);
627 	return 0;
628 }
629 
630 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
631 {
632 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
633 	struct nix_aq_enq_req aq_req;
634 	unsigned long *bmap;
635 	int qidx, q_cnt = 0;
636 	int err = 0, rc;
637 
638 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
639 		return NIX_AF_ERR_AQ_ENQUEUE;
640 
641 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
642 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
643 
644 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
645 		aq_req.cq.ena = 0;
646 		aq_req.cq_mask.ena = 1;
647 		q_cnt = pfvf->cq_ctx->qsize;
648 		bmap = pfvf->cq_bmap;
649 	}
650 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
651 		aq_req.sq.ena = 0;
652 		aq_req.sq_mask.ena = 1;
653 		q_cnt = pfvf->sq_ctx->qsize;
654 		bmap = pfvf->sq_bmap;
655 	}
656 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
657 		aq_req.rq.ena = 0;
658 		aq_req.rq_mask.ena = 1;
659 		q_cnt = pfvf->rq_ctx->qsize;
660 		bmap = pfvf->rq_bmap;
661 	}
662 
663 	aq_req.ctype = req->ctype;
664 	aq_req.op = NIX_AQ_INSTOP_WRITE;
665 
666 	for (qidx = 0; qidx < q_cnt; qidx++) {
667 		if (!test_bit(qidx, bmap))
668 			continue;
669 		aq_req.qidx = qidx;
670 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
671 		if (rc) {
672 			err = rc;
673 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
674 				(req->ctype == NIX_AQ_CTYPE_CQ) ?
675 				"CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
676 				"RQ" : "SQ"), qidx);
677 		}
678 	}
679 
680 	return err;
681 }
682 
683 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
684 				struct nix_aq_enq_req *req,
685 				struct nix_aq_enq_rsp *rsp)
686 {
687 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
688 }
689 
690 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
691 				       struct hwctx_disable_req *req,
692 				       struct msg_rsp *rsp)
693 {
694 	return nix_lf_hwctx_disable(rvu, req);
695 }
696 
697 int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
698 				  struct nix_lf_alloc_req *req,
699 				  struct nix_lf_alloc_rsp *rsp)
700 {
701 	int nixlf, qints, hwctx_size, intf, err, rc = 0;
702 	struct rvu_hwinfo *hw = rvu->hw;
703 	u16 pcifunc = req->hdr.pcifunc;
704 	struct rvu_block *block;
705 	struct rvu_pfvf *pfvf;
706 	u64 cfg, ctx_cfg;
707 	int blkaddr;
708 
709 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
710 		return NIX_AF_ERR_PARAM;
711 
712 	pfvf = rvu_get_pfvf(rvu, pcifunc);
713 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
714 	if (!pfvf->nixlf || blkaddr < 0)
715 		return NIX_AF_ERR_AF_LF_INVALID;
716 
717 	block = &hw->block[blkaddr];
718 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
719 	if (nixlf < 0)
720 		return NIX_AF_ERR_AF_LF_INVALID;
721 
722 	/* Check if requested 'NIXLF <=> NPALF' mapping is valid */
723 	if (req->npa_func) {
724 		/* If default, use 'this' NIXLF's PFFUNC */
725 		if (req->npa_func == RVU_DEFAULT_PF_FUNC)
726 			req->npa_func = pcifunc;
727 		if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
728 			return NIX_AF_INVAL_NPA_PF_FUNC;
729 	}
730 
731 	/* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
732 	if (req->sso_func) {
733 		/* If default, use 'this' NIXLF's PFFUNC */
734 		if (req->sso_func == RVU_DEFAULT_PF_FUNC)
735 			req->sso_func = pcifunc;
736 		if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
737 			return NIX_AF_INVAL_SSO_PF_FUNC;
738 	}
739 
740 	/* If RSS is being enabled, check if requested config is valid.
741 	 * RSS table size should be power of two, otherwise
742 	 * RSS_GRP::OFFSET + adder might go beyond that group or
743 	 * won't be able to use entire table.
744 	 */
745 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
746 			    !is_power_of_2(req->rss_sz)))
747 		return NIX_AF_ERR_RSS_SIZE_INVALID;
748 
749 	if (req->rss_sz &&
750 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
751 		return NIX_AF_ERR_RSS_GRPS_INVALID;
752 
753 	/* Reset this NIX LF */
754 	err = rvu_lf_reset(rvu, block, nixlf);
755 	if (err) {
756 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
757 			block->addr - BLKADDR_NIX0, nixlf);
758 		return NIX_AF_ERR_LF_RESET;
759 	}
760 
761 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
762 
763 	/* Alloc NIX RQ HW context memory and config the base */
764 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
765 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
766 	if (err)
767 		goto free_mem;
768 
769 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
770 	if (!pfvf->rq_bmap)
771 		goto free_mem;
772 
773 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
774 		    (u64)pfvf->rq_ctx->iova);
775 
776 	/* Set caching and queue count in HW */
777 	cfg = BIT_ULL(36) | (req->rq_cnt - 1);
778 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
779 
780 	/* Alloc NIX SQ HW context memory and config the base */
781 	hwctx_size = 1UL << (ctx_cfg & 0xF);
782 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
783 	if (err)
784 		goto free_mem;
785 
786 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
787 	if (!pfvf->sq_bmap)
788 		goto free_mem;
789 
790 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
791 		    (u64)pfvf->sq_ctx->iova);
792 	cfg = BIT_ULL(36) | (req->sq_cnt - 1);
793 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
794 
795 	/* Alloc NIX CQ HW context memory and config the base */
796 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
797 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
798 	if (err)
799 		goto free_mem;
800 
801 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
802 	if (!pfvf->cq_bmap)
803 		goto free_mem;
804 
805 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
806 		    (u64)pfvf->cq_ctx->iova);
807 	cfg = BIT_ULL(36) | (req->cq_cnt - 1);
808 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
809 
810 	/* Initialize receive side scaling (RSS) */
811 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
812 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
813 				 req->rss_sz, req->rss_grps, hwctx_size);
814 	if (err)
815 		goto free_mem;
816 
817 	/* Alloc memory for CQINT's HW contexts */
818 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
819 	qints = (cfg >> 24) & 0xFFF;
820 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
821 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
822 	if (err)
823 		goto free_mem;
824 
825 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
826 		    (u64)pfvf->cq_ints_ctx->iova);
827 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
828 
829 	/* Alloc memory for QINT's HW contexts */
830 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
831 	qints = (cfg >> 12) & 0xFFF;
832 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
833 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
834 	if (err)
835 		goto free_mem;
836 
837 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
838 		    (u64)pfvf->nix_qints_ctx->iova);
839 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
840 
841 	/* Enable LMTST for this NIX LF */
842 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
843 
844 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
845 	if (req->npa_func)
846 		cfg = req->npa_func;
847 	if (req->sso_func)
848 		cfg |= (u64)req->sso_func << 16;
849 
850 	cfg |= (u64)req->xqe_sz << 33;
851 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
852 
853 	/* Config Rx pkt length, csum checks and apad  enable / disable */
854 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
855 
856 	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
857 	err = nix_interface_init(rvu, pcifunc, intf, nixlf);
858 	if (err)
859 		goto free_mem;
860 
861 	/* Disable NPC entries as NIXLF's contexts are not initialized yet */
862 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
863 
864 	goto exit;
865 
866 free_mem:
867 	nix_ctx_free(rvu, pfvf);
868 	rc = -ENOMEM;
869 
870 exit:
871 	/* Set macaddr of this PF/VF */
872 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
873 
874 	/* set SQB size info */
875 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
876 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
877 	rsp->rx_chan_base = pfvf->rx_chan_base;
878 	rsp->tx_chan_base = pfvf->tx_chan_base;
879 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
880 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
881 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
882 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
883 	/* Get HW supported stat count */
884 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
885 	rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
886 	rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
887 	/* Get count of CQ IRQs and error IRQs supported per LF */
888 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
889 	rsp->qints = ((cfg >> 12) & 0xFFF);
890 	rsp->cints = ((cfg >> 24) & 0xFFF);
891 	return rc;
892 }
893 
894 int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
895 				 struct msg_rsp *rsp)
896 {
897 	struct rvu_hwinfo *hw = rvu->hw;
898 	u16 pcifunc = req->hdr.pcifunc;
899 	struct rvu_block *block;
900 	int blkaddr, nixlf, err;
901 	struct rvu_pfvf *pfvf;
902 
903 	pfvf = rvu_get_pfvf(rvu, pcifunc);
904 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
905 	if (!pfvf->nixlf || blkaddr < 0)
906 		return NIX_AF_ERR_AF_LF_INVALID;
907 
908 	block = &hw->block[blkaddr];
909 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
910 	if (nixlf < 0)
911 		return NIX_AF_ERR_AF_LF_INVALID;
912 
913 	nix_interface_deinit(rvu, pcifunc, nixlf);
914 
915 	/* Reset this NIX LF */
916 	err = rvu_lf_reset(rvu, block, nixlf);
917 	if (err) {
918 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
919 			block->addr - BLKADDR_NIX0, nixlf);
920 		return NIX_AF_ERR_LF_RESET;
921 	}
922 
923 	nix_ctx_free(rvu, pfvf);
924 
925 	return 0;
926 }
927 
928 /* Disable shaping of pkts by a scheduler queue
929  * at a given scheduler level.
930  */
931 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
932 				 int lvl, int schq)
933 {
934 	u64  cir_reg = 0, pir_reg = 0;
935 	u64  cfg;
936 
937 	switch (lvl) {
938 	case NIX_TXSCH_LVL_TL1:
939 		cir_reg = NIX_AF_TL1X_CIR(schq);
940 		pir_reg = 0; /* PIR not available at TL1 */
941 		break;
942 	case NIX_TXSCH_LVL_TL2:
943 		cir_reg = NIX_AF_TL2X_CIR(schq);
944 		pir_reg = NIX_AF_TL2X_PIR(schq);
945 		break;
946 	case NIX_TXSCH_LVL_TL3:
947 		cir_reg = NIX_AF_TL3X_CIR(schq);
948 		pir_reg = NIX_AF_TL3X_PIR(schq);
949 		break;
950 	case NIX_TXSCH_LVL_TL4:
951 		cir_reg = NIX_AF_TL4X_CIR(schq);
952 		pir_reg = NIX_AF_TL4X_PIR(schq);
953 		break;
954 	}
955 
956 	if (!cir_reg)
957 		return;
958 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
959 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
960 
961 	if (!pir_reg)
962 		return;
963 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
964 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
965 }
966 
967 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
968 				 int lvl, int schq)
969 {
970 	struct rvu_hwinfo *hw = rvu->hw;
971 	int link;
972 
973 	/* Reset TL4's SDP link config */
974 	if (lvl == NIX_TXSCH_LVL_TL4)
975 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
976 
977 	if (lvl != NIX_TXSCH_LVL_TL2)
978 		return;
979 
980 	/* Reset TL2's CGX or LBK link config */
981 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
982 		rvu_write64(rvu, blkaddr,
983 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
984 }
985 
986 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
987 				     struct nix_txsch_alloc_req *req,
988 				     struct nix_txsch_alloc_rsp *rsp)
989 {
990 	u16 pcifunc = req->hdr.pcifunc;
991 	struct nix_txsch *txsch;
992 	int lvl, idx, req_schq;
993 	struct rvu_pfvf *pfvf;
994 	struct nix_hw *nix_hw;
995 	int blkaddr, rc = 0;
996 	u16 schq;
997 
998 	pfvf = rvu_get_pfvf(rvu, pcifunc);
999 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1000 	if (!pfvf->nixlf || blkaddr < 0)
1001 		return NIX_AF_ERR_AF_LF_INVALID;
1002 
1003 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1004 	if (!nix_hw)
1005 		return -EINVAL;
1006 
1007 	mutex_lock(&rvu->rsrc_lock);
1008 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1009 		txsch = &nix_hw->txsch[lvl];
1010 		req_schq = req->schq_contig[lvl] + req->schq[lvl];
1011 
1012 		/* There are only 28 TL1s */
1013 		if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
1014 			goto err;
1015 
1016 		/* Check if request is valid */
1017 		if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
1018 			goto err;
1019 
1020 		/* If contiguous queues are needed, check for availability */
1021 		if (req->schq_contig[lvl] &&
1022 		    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
1023 			goto err;
1024 
1025 		/* Check if full request can be accommodated */
1026 		if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
1027 			goto err;
1028 	}
1029 
1030 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1031 		txsch = &nix_hw->txsch[lvl];
1032 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
1033 		rsp->schq[lvl] = req->schq[lvl];
1034 
1035 		schq = 0;
1036 		/* Alloc contiguous queues first */
1037 		if (req->schq_contig[lvl]) {
1038 			schq = rvu_alloc_rsrc_contig(&txsch->schq,
1039 						     req->schq_contig[lvl]);
1040 
1041 			for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
1042 				txsch->pfvf_map[schq] = pcifunc;
1043 				nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1044 				nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1045 				rsp->schq_contig_list[lvl][idx] = schq;
1046 				schq++;
1047 			}
1048 		}
1049 
1050 		/* Alloc non-contiguous queues */
1051 		for (idx = 0; idx < req->schq[lvl]; idx++) {
1052 			schq = rvu_alloc_rsrc(&txsch->schq);
1053 			txsch->pfvf_map[schq] = pcifunc;
1054 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1055 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
1056 			rsp->schq_list[lvl][idx] = schq;
1057 		}
1058 	}
1059 	goto exit;
1060 err:
1061 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
1062 exit:
1063 	mutex_unlock(&rvu->rsrc_lock);
1064 	return rc;
1065 }
1066 
1067 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1068 {
1069 	int blkaddr, nixlf, lvl, schq, err;
1070 	struct rvu_hwinfo *hw = rvu->hw;
1071 	struct nix_txsch *txsch;
1072 	struct nix_hw *nix_hw;
1073 	u64 cfg;
1074 
1075 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1076 	if (blkaddr < 0)
1077 		return NIX_AF_ERR_AF_LF_INVALID;
1078 
1079 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1080 	if (!nix_hw)
1081 		return -EINVAL;
1082 
1083 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1084 	if (nixlf < 0)
1085 		return NIX_AF_ERR_AF_LF_INVALID;
1086 
1087 	/* Disable TL2/3 queue links before SMQ flush*/
1088 	mutex_lock(&rvu->rsrc_lock);
1089 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1090 		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1091 			continue;
1092 
1093 		txsch = &nix_hw->txsch[lvl];
1094 		for (schq = 0; schq < txsch->schq.max; schq++) {
1095 			if (txsch->pfvf_map[schq] != pcifunc)
1096 				continue;
1097 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1098 		}
1099 	}
1100 
1101 	/* Flush SMQs */
1102 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1103 	for (schq = 0; schq < txsch->schq.max; schq++) {
1104 		if (txsch->pfvf_map[schq] != pcifunc)
1105 			continue;
1106 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
1107 		/* Do SMQ flush and set enqueue xoff */
1108 		cfg |= BIT_ULL(50) | BIT_ULL(49);
1109 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
1110 
1111 		/* Wait for flush to complete */
1112 		err = rvu_poll_reg(rvu, blkaddr,
1113 				   NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
1114 		if (err) {
1115 			dev_err(rvu->dev,
1116 				"NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
1117 		}
1118 	}
1119 
1120 	/* Now free scheduler queues to free pool */
1121 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1122 		txsch = &nix_hw->txsch[lvl];
1123 		for (schq = 0; schq < txsch->schq.max; schq++) {
1124 			if (txsch->pfvf_map[schq] != pcifunc)
1125 				continue;
1126 			rvu_free_rsrc(&txsch->schq, schq);
1127 			txsch->pfvf_map[schq] = 0;
1128 		}
1129 	}
1130 	mutex_unlock(&rvu->rsrc_lock);
1131 
1132 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1133 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1134 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1135 	if (err)
1136 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1137 
1138 	return 0;
1139 }
1140 
1141 int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
1142 				    struct nix_txsch_free_req *req,
1143 				    struct msg_rsp *rsp)
1144 {
1145 	return nix_txschq_free(rvu, req->hdr.pcifunc);
1146 }
1147 
1148 static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1149 				   int lvl, u64 reg, u64 regval)
1150 {
1151 	u64 regbase = reg & 0xFFFF;
1152 	u16 schq, parent;
1153 
1154 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1155 		return false;
1156 
1157 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1158 	/* Check if this schq belongs to this PF/VF or not */
1159 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1160 		return false;
1161 
1162 	parent = (regval >> 16) & 0x1FF;
1163 	/* Validate MDQ's TL4 parent */
1164 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
1165 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1166 		return false;
1167 
1168 	/* Validate TL4's TL3 parent */
1169 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
1170 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1171 		return false;
1172 
1173 	/* Validate TL3's TL2 parent */
1174 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
1175 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1176 		return false;
1177 
1178 	/* Validate TL2's TL1 parent */
1179 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
1180 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1181 		return false;
1182 
1183 	return true;
1184 }
1185 
1186 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
1187 				    struct nix_txschq_config *req,
1188 				    struct msg_rsp *rsp)
1189 {
1190 	struct rvu_hwinfo *hw = rvu->hw;
1191 	u16 pcifunc = req->hdr.pcifunc;
1192 	u64 reg, regval, schq_regbase;
1193 	struct nix_txsch *txsch;
1194 	struct nix_hw *nix_hw;
1195 	int blkaddr, idx, err;
1196 	int nixlf;
1197 
1198 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1199 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
1200 		return NIX_AF_INVAL_TXSCHQ_CFG;
1201 
1202 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1203 	if (blkaddr < 0)
1204 		return NIX_AF_ERR_AF_LF_INVALID;
1205 
1206 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1207 	if (!nix_hw)
1208 		return -EINVAL;
1209 
1210 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1211 	if (nixlf < 0)
1212 		return NIX_AF_ERR_AF_LF_INVALID;
1213 
1214 	txsch = &nix_hw->txsch[req->lvl];
1215 	for (idx = 0; idx < req->num_regs; idx++) {
1216 		reg = req->reg[idx];
1217 		regval = req->regval[idx];
1218 		schq_regbase = reg & 0xFFFF;
1219 
1220 		if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
1221 					    txsch->lvl, reg, regval))
1222 			return NIX_AF_INVAL_TXSCHQ_CFG;
1223 
1224 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1225 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1226 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1227 					   pcifunc, 0);
1228 			regval &= ~(0x7FULL << 24);
1229 			regval |= ((u64)nixlf << 24);
1230 		}
1231 
1232 		rvu_write64(rvu, blkaddr, reg, regval);
1233 
1234 		/* Check for SMQ flush, if so, poll for its completion */
1235 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1236 		    (regval & BIT_ULL(49))) {
1237 			err = rvu_poll_reg(rvu, blkaddr,
1238 					   reg, BIT_ULL(49), true);
1239 			if (err)
1240 				return NIX_AF_SMQ_FLUSH_FAILED;
1241 		}
1242 	}
1243 	return 0;
1244 }
1245 
1246 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1247 			   struct nix_vtag_config *req)
1248 {
1249 	u64 regval = req->vtag_size;
1250 
1251 	if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
1252 		return -EINVAL;
1253 
1254 	if (req->rx.capture_vtag)
1255 		regval |= BIT_ULL(5);
1256 	if (req->rx.strip_vtag)
1257 		regval |= BIT_ULL(4);
1258 
1259 	rvu_write64(rvu, blkaddr,
1260 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1261 	return 0;
1262 }
1263 
1264 int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
1265 				  struct nix_vtag_config *req,
1266 				  struct msg_rsp *rsp)
1267 {
1268 	struct rvu_hwinfo *hw = rvu->hw;
1269 	u16 pcifunc = req->hdr.pcifunc;
1270 	int blkaddr, nixlf, err;
1271 
1272 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1273 	if (blkaddr < 0)
1274 		return NIX_AF_ERR_AF_LF_INVALID;
1275 
1276 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1277 	if (nixlf < 0)
1278 		return NIX_AF_ERR_AF_LF_INVALID;
1279 
1280 	if (req->cfg_type) {
1281 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1282 		if (err)
1283 			return NIX_AF_ERR_PARAM;
1284 	} else {
1285 		/* TODO: handle tx vtag configuration */
1286 		return 0;
1287 	}
1288 
1289 	return 0;
1290 }
1291 
1292 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1293 			 u16 pcifunc, int next, bool eol)
1294 {
1295 	struct nix_aq_enq_req aq_req;
1296 	int err;
1297 
1298 	aq_req.hdr.pcifunc = pcifunc;
1299 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
1300 	aq_req.op = op;
1301 	aq_req.qidx = mce;
1302 
1303 	/* Forward bcast pkts to RQ0, RSS not needed */
1304 	aq_req.mce.op = 0;
1305 	aq_req.mce.index = 0;
1306 	aq_req.mce.eol = eol;
1307 	aq_req.mce.pf_func = pcifunc;
1308 	aq_req.mce.next = next;
1309 
1310 	/* All fields valid */
1311 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
1312 
1313 	err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1314 	if (err) {
1315 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1316 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1317 		return err;
1318 	}
1319 	return 0;
1320 }
1321 
1322 static int nix_update_mce_list(struct nix_mce_list *mce_list,
1323 			       u16 pcifunc, int idx, bool add)
1324 {
1325 	struct mce *mce, *tail = NULL;
1326 	bool delete = false;
1327 
1328 	/* Scan through the current list */
1329 	hlist_for_each_entry(mce, &mce_list->head, node) {
1330 		/* If already exists, then delete */
1331 		if (mce->pcifunc == pcifunc && !add) {
1332 			delete = true;
1333 			break;
1334 		}
1335 		tail = mce;
1336 	}
1337 
1338 	if (delete) {
1339 		hlist_del(&mce->node);
1340 		kfree(mce);
1341 		mce_list->count--;
1342 		return 0;
1343 	}
1344 
1345 	if (!add)
1346 		return 0;
1347 
1348 	/* Add a new one to the list, at the tail */
1349 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
1350 	if (!mce)
1351 		return -ENOMEM;
1352 	mce->idx = idx;
1353 	mce->pcifunc = pcifunc;
1354 	if (!tail)
1355 		hlist_add_head(&mce->node, &mce_list->head);
1356 	else
1357 		hlist_add_behind(&mce->node, &tail->node);
1358 	mce_list->count++;
1359 	return 0;
1360 }
1361 
1362 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
1363 {
1364 	int err = 0, idx, next_idx, count;
1365 	struct nix_mce_list *mce_list;
1366 	struct mce *mce, *next_mce;
1367 	struct nix_mcast *mcast;
1368 	struct nix_hw *nix_hw;
1369 	struct rvu_pfvf *pfvf;
1370 	int blkaddr;
1371 
1372 	/* Broadcast pkt replication is not needed for AF's VFs, hence skip */
1373 	if (is_afvf(pcifunc))
1374 		return 0;
1375 
1376 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1377 	if (blkaddr < 0)
1378 		return 0;
1379 
1380 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1381 	if (!nix_hw)
1382 		return 0;
1383 
1384 	mcast = &nix_hw->mcast;
1385 
1386 	/* Get this PF/VF func's MCE index */
1387 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1388 	idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
1389 
1390 	mce_list = &pfvf->bcast_mce_list;
1391 	if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
1392 		dev_err(rvu->dev,
1393 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
1394 			__func__, idx, mce_list->max,
1395 			pcifunc >> RVU_PFVF_PF_SHIFT);
1396 		return -EINVAL;
1397 	}
1398 
1399 	mutex_lock(&mcast->mce_lock);
1400 
1401 	err = nix_update_mce_list(mce_list, pcifunc, idx, add);
1402 	if (err)
1403 		goto end;
1404 
1405 	/* Disable MCAM entry in NPC */
1406 
1407 	if (!mce_list->count)
1408 		goto end;
1409 	count = mce_list->count;
1410 
1411 	/* Dump the updated list to HW */
1412 	hlist_for_each_entry(mce, &mce_list->head, node) {
1413 		next_idx = 0;
1414 		count--;
1415 		if (count) {
1416 			next_mce = hlist_entry(mce->node.next,
1417 					       struct mce, node);
1418 			next_idx = next_mce->idx;
1419 		}
1420 		/* EOL should be set in last MCE */
1421 		err = nix_setup_mce(rvu, mce->idx,
1422 				    NIX_AQ_INSTOP_WRITE, mce->pcifunc,
1423 				    next_idx, count ? false : true);
1424 		if (err)
1425 			goto end;
1426 	}
1427 
1428 end:
1429 	mutex_unlock(&mcast->mce_lock);
1430 	return err;
1431 }
1432 
1433 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
1434 {
1435 	struct nix_mcast *mcast = &nix_hw->mcast;
1436 	int err, pf, numvfs, idx;
1437 	struct rvu_pfvf *pfvf;
1438 	u16 pcifunc;
1439 	u64 cfg;
1440 
1441 	/* Skip PF0 (i.e AF) */
1442 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
1443 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1444 		/* If PF is not enabled, nothing to do */
1445 		if (!((cfg >> 20) & 0x01))
1446 			continue;
1447 		/* Get numVFs attached to this PF */
1448 		numvfs = (cfg >> 12) & 0xFF;
1449 
1450 		pfvf = &rvu->pf[pf];
1451 		/* Save the start MCE */
1452 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
1453 
1454 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
1455 
1456 		for (idx = 0; idx < (numvfs + 1); idx++) {
1457 			/* idx-0 is for PF, followed by VFs */
1458 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1459 			pcifunc |= idx;
1460 			/* Add dummy entries now, so that we don't have to check
1461 			 * for whether AQ_OP should be INIT/WRITE later on.
1462 			 * Will be updated when a NIXLF is attached/detached to
1463 			 * these PF/VFs.
1464 			 */
1465 			err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
1466 					    NIX_AQ_INSTOP_INIT,
1467 					    pcifunc, 0, true);
1468 			if (err)
1469 				return err;
1470 		}
1471 	}
1472 	return 0;
1473 }
1474 
1475 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1476 {
1477 	struct nix_mcast *mcast = &nix_hw->mcast;
1478 	struct rvu_hwinfo *hw = rvu->hw;
1479 	int err, size;
1480 
1481 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
1482 	size = (1ULL << size);
1483 
1484 	/* Alloc memory for multicast/mirror replication entries */
1485 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
1486 			 (256UL << MC_TBL_SIZE), size);
1487 	if (err)
1488 		return -ENOMEM;
1489 
1490 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
1491 		    (u64)mcast->mce_ctx->iova);
1492 
1493 	/* Set max list length equal to max no of VFs per PF  + PF itself */
1494 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
1495 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
1496 
1497 	/* Alloc memory for multicast replication buffers */
1498 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
1499 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
1500 			 (8UL << MC_BUF_CNT), size);
1501 	if (err)
1502 		return -ENOMEM;
1503 
1504 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
1505 		    (u64)mcast->mcast_buf->iova);
1506 
1507 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
1508 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
1509 
1510 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
1511 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
1512 		    BIT_ULL(20) | MC_BUF_CNT);
1513 
1514 	mutex_init(&mcast->mce_lock);
1515 
1516 	return nix_setup_bcast_tables(rvu, nix_hw);
1517 }
1518 
1519 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1520 {
1521 	struct nix_txsch *txsch;
1522 	u64 cfg, reg;
1523 	int err, lvl;
1524 
1525 	/* Get scheduler queue count of each type and alloc
1526 	 * bitmap for each for alloc/free/attach operations.
1527 	 */
1528 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1529 		txsch = &nix_hw->txsch[lvl];
1530 		txsch->lvl = lvl;
1531 		switch (lvl) {
1532 		case NIX_TXSCH_LVL_SMQ:
1533 			reg = NIX_AF_MDQ_CONST;
1534 			break;
1535 		case NIX_TXSCH_LVL_TL4:
1536 			reg = NIX_AF_TL4_CONST;
1537 			break;
1538 		case NIX_TXSCH_LVL_TL3:
1539 			reg = NIX_AF_TL3_CONST;
1540 			break;
1541 		case NIX_TXSCH_LVL_TL2:
1542 			reg = NIX_AF_TL2_CONST;
1543 			break;
1544 		case NIX_TXSCH_LVL_TL1:
1545 			reg = NIX_AF_TL1_CONST;
1546 			break;
1547 		}
1548 		cfg = rvu_read64(rvu, blkaddr, reg);
1549 		txsch->schq.max = cfg & 0xFFFF;
1550 		err = rvu_alloc_bitmap(&txsch->schq);
1551 		if (err)
1552 			return err;
1553 
1554 		/* Allocate memory for scheduler queues to
1555 		 * PF/VF pcifunc mapping info.
1556 		 */
1557 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
1558 					       sizeof(u16), GFP_KERNEL);
1559 		if (!txsch->pfvf_map)
1560 			return -ENOMEM;
1561 	}
1562 	return 0;
1563 }
1564 
1565 int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
1566 				   struct msg_rsp *rsp)
1567 {
1568 	struct rvu_hwinfo *hw = rvu->hw;
1569 	u16 pcifunc = req->hdr.pcifunc;
1570 	int i, nixlf, blkaddr;
1571 	u64 stats;
1572 
1573 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1574 	if (blkaddr < 0)
1575 		return NIX_AF_ERR_AF_LF_INVALID;
1576 
1577 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1578 	if (nixlf < 0)
1579 		return NIX_AF_ERR_AF_LF_INVALID;
1580 
1581 	/* Get stats count supported by HW */
1582 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1583 
1584 	/* Reset tx stats */
1585 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
1586 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
1587 
1588 	/* Reset rx stats */
1589 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
1590 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
1591 
1592 	return 0;
1593 }
1594 
1595 /* Returns the ALG index to be set into NPC_RX_ACTION */
1596 static int get_flowkey_alg_idx(u32 flow_cfg)
1597 {
1598 	u32 ip_cfg;
1599 
1600 	flow_cfg &= ~FLOW_KEY_TYPE_PORT;
1601 	ip_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
1602 	if (flow_cfg == ip_cfg)
1603 		return FLOW_KEY_ALG_IP;
1604 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP))
1605 		return FLOW_KEY_ALG_TCP;
1606 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP))
1607 		return FLOW_KEY_ALG_UDP;
1608 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_SCTP))
1609 		return FLOW_KEY_ALG_SCTP;
1610 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP))
1611 		return FLOW_KEY_ALG_TCP_UDP;
1612 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP))
1613 		return FLOW_KEY_ALG_TCP_SCTP;
1614 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
1615 		return FLOW_KEY_ALG_UDP_SCTP;
1616 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP |
1617 			      FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
1618 		return FLOW_KEY_ALG_TCP_UDP_SCTP;
1619 
1620 	return FLOW_KEY_ALG_PORT;
1621 }
1622 
1623 int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
1624 					 struct nix_rss_flowkey_cfg *req,
1625 					 struct msg_rsp *rsp)
1626 {
1627 	struct rvu_hwinfo *hw = rvu->hw;
1628 	u16 pcifunc = req->hdr.pcifunc;
1629 	int alg_idx, nixlf, blkaddr;
1630 
1631 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1632 	if (blkaddr < 0)
1633 		return NIX_AF_ERR_AF_LF_INVALID;
1634 
1635 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1636 	if (nixlf < 0)
1637 		return NIX_AF_ERR_AF_LF_INVALID;
1638 
1639 	alg_idx = get_flowkey_alg_idx(req->flowkey_cfg);
1640 
1641 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
1642 				       alg_idx, req->mcam_index);
1643 	return 0;
1644 }
1645 
1646 static void set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
1647 {
1648 	struct nix_rx_flowkey_alg *field = NULL;
1649 	int idx, key_type;
1650 
1651 	if (!alg)
1652 		return;
1653 
1654 	/* FIELD0: IPv4
1655 	 * FIELD1: IPv6
1656 	 * FIELD2: TCP/UDP/SCTP/ALL
1657 	 * FIELD3: Unused
1658 	 * FIELD4: Unused
1659 	 *
1660 	 * Each of the 32 possible flow key algorithm definitions should
1661 	 * fall into above incremental config (except ALG0). Otherwise a
1662 	 * single NPC MCAM entry is not sufficient for supporting RSS.
1663 	 *
1664 	 * If a different definition or combination needed then NPC MCAM
1665 	 * has to be programmed to filter such pkts and it's action should
1666 	 * point to this definition to calculate flowtag or hash.
1667 	 */
1668 	for (idx = 0; idx < 32; idx++) {
1669 		key_type = flow_cfg & BIT_ULL(idx);
1670 		if (!key_type)
1671 			continue;
1672 		switch (key_type) {
1673 		case FLOW_KEY_TYPE_PORT:
1674 			field = &alg[0];
1675 			field->sel_chan = true;
1676 			/* This should be set to 1, when SEL_CHAN is set */
1677 			field->bytesm1 = 1;
1678 			break;
1679 		case FLOW_KEY_TYPE_IPV4:
1680 			field = &alg[0];
1681 			field->lid = NPC_LID_LC;
1682 			field->ltype_match = NPC_LT_LC_IP;
1683 			field->hdr_offset = 12; /* SIP offset */
1684 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
1685 			field->ltype_mask = 0xF; /* Match only IPv4 */
1686 			break;
1687 		case FLOW_KEY_TYPE_IPV6:
1688 			field = &alg[1];
1689 			field->lid = NPC_LID_LC;
1690 			field->ltype_match = NPC_LT_LC_IP6;
1691 			field->hdr_offset = 8; /* SIP offset */
1692 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
1693 			field->ltype_mask = 0xF; /* Match only IPv6 */
1694 			break;
1695 		case FLOW_KEY_TYPE_TCP:
1696 		case FLOW_KEY_TYPE_UDP:
1697 		case FLOW_KEY_TYPE_SCTP:
1698 			field = &alg[2];
1699 			field->lid = NPC_LID_LD;
1700 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
1701 			if (key_type == FLOW_KEY_TYPE_TCP)
1702 				field->ltype_match |= NPC_LT_LD_TCP;
1703 			else if (key_type == FLOW_KEY_TYPE_UDP)
1704 				field->ltype_match |= NPC_LT_LD_UDP;
1705 			else if (key_type == FLOW_KEY_TYPE_SCTP)
1706 				field->ltype_match |= NPC_LT_LD_SCTP;
1707 			field->key_offset = 32; /* After IPv4/v6 SIP, DIP */
1708 			field->ltype_mask = ~field->ltype_match;
1709 			break;
1710 		}
1711 		if (field)
1712 			field->ena = 1;
1713 		field = NULL;
1714 	}
1715 }
1716 
1717 static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
1718 {
1719 #define FIELDS_PER_ALG	5
1720 	u64 field[FLOW_KEY_ALG_MAX][FIELDS_PER_ALG];
1721 	u32 flowkey_cfg, minkey_cfg;
1722 	int alg, fid;
1723 
1724 	memset(&field, 0, sizeof(u64) * FLOW_KEY_ALG_MAX * FIELDS_PER_ALG);
1725 
1726 	/* Only incoming channel number */
1727 	flowkey_cfg = FLOW_KEY_TYPE_PORT;
1728 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_PORT], flowkey_cfg);
1729 
1730 	/* For a incoming pkt if none of the fields match then flowkey
1731 	 * will be zero, hence tag generated will also be zero.
1732 	 * RSS entry at rsse_index = NIX_AF_LF()_RSS_GRP()[OFFSET] will
1733 	 * be used to queue the packet.
1734 	 */
1735 
1736 	/* IPv4/IPv6 SIP/DIPs */
1737 	flowkey_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
1738 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_IP], flowkey_cfg);
1739 
1740 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
1741 	minkey_cfg = flowkey_cfg;
1742 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP;
1743 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP], flowkey_cfg);
1744 
1745 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
1746 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP;
1747 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP], flowkey_cfg);
1748 
1749 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
1750 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_SCTP;
1751 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_SCTP], flowkey_cfg);
1752 
1753 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
1754 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP;
1755 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP], flowkey_cfg);
1756 
1757 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
1758 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP;
1759 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_SCTP], flowkey_cfg);
1760 
1761 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
1762 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
1763 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP_SCTP], flowkey_cfg);
1764 
1765 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
1766 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP |
1767 		      FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
1768 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP_SCTP],
1769 			   flowkey_cfg);
1770 
1771 	for (alg = 0; alg < FLOW_KEY_ALG_MAX; alg++) {
1772 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
1773 			rvu_write64(rvu, blkaddr,
1774 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
1775 				    field[alg][fid]);
1776 	}
1777 }
1778 
1779 int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
1780 				      struct nix_set_mac_addr *req,
1781 				      struct msg_rsp *rsp)
1782 {
1783 	struct rvu_hwinfo *hw = rvu->hw;
1784 	u16 pcifunc = req->hdr.pcifunc;
1785 	struct rvu_pfvf *pfvf;
1786 	int blkaddr, nixlf;
1787 
1788 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1789 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1790 	if (!pfvf->nixlf || blkaddr < 0)
1791 		return NIX_AF_ERR_AF_LF_INVALID;
1792 
1793 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1794 	if (nixlf < 0)
1795 		return NIX_AF_ERR_AF_LF_INVALID;
1796 
1797 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1798 
1799 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
1800 				    pfvf->rx_chan_base, req->mac_addr);
1801 
1802 	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
1803 
1804 	return 0;
1805 }
1806 
1807 int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
1808 				     struct msg_rsp *rsp)
1809 {
1810 	bool allmulti = false, disable_promisc = false;
1811 	struct rvu_hwinfo *hw = rvu->hw;
1812 	u16 pcifunc = req->hdr.pcifunc;
1813 	struct rvu_pfvf *pfvf;
1814 	int blkaddr, nixlf;
1815 
1816 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1817 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1818 	if (!pfvf->nixlf || blkaddr < 0)
1819 		return NIX_AF_ERR_AF_LF_INVALID;
1820 
1821 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1822 	if (nixlf < 0)
1823 		return NIX_AF_ERR_AF_LF_INVALID;
1824 
1825 	if (req->mode & NIX_RX_MODE_PROMISC)
1826 		allmulti = false;
1827 	else if (req->mode & NIX_RX_MODE_ALLMULTI)
1828 		allmulti = true;
1829 	else
1830 		disable_promisc = true;
1831 
1832 	if (disable_promisc)
1833 		rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
1834 	else
1835 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
1836 					      pfvf->rx_chan_base, allmulti);
1837 
1838 	rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
1839 
1840 	return 0;
1841 }
1842 
1843 static void nix_find_link_frs(struct rvu *rvu,
1844 			      struct nix_frs_cfg *req, u16 pcifunc)
1845 {
1846 	int pf = rvu_get_pf(pcifunc);
1847 	struct rvu_pfvf *pfvf;
1848 	int maxlen, minlen;
1849 	int numvfs, hwvf;
1850 	int vf;
1851 
1852 	/* Update with requester's min/max lengths */
1853 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1854 	pfvf->maxlen = req->maxlen;
1855 	if (req->update_minlen)
1856 		pfvf->minlen = req->minlen;
1857 
1858 	maxlen = req->maxlen;
1859 	minlen = req->update_minlen ? req->minlen : 0;
1860 
1861 	/* Get this PF's numVFs and starting hwvf */
1862 	rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
1863 
1864 	/* For each VF, compare requested max/minlen */
1865 	for (vf = 0; vf < numvfs; vf++) {
1866 		pfvf =  &rvu->hwvf[hwvf + vf];
1867 		if (pfvf->maxlen > maxlen)
1868 			maxlen = pfvf->maxlen;
1869 		if (req->update_minlen &&
1870 		    pfvf->minlen && pfvf->minlen < minlen)
1871 			minlen = pfvf->minlen;
1872 	}
1873 
1874 	/* Compare requested max/minlen with PF's max/minlen */
1875 	pfvf = &rvu->pf[pf];
1876 	if (pfvf->maxlen > maxlen)
1877 		maxlen = pfvf->maxlen;
1878 	if (req->update_minlen &&
1879 	    pfvf->minlen && pfvf->minlen < minlen)
1880 		minlen = pfvf->minlen;
1881 
1882 	/* Update the request with max/min PF's and it's VF's max/min */
1883 	req->maxlen = maxlen;
1884 	if (req->update_minlen)
1885 		req->minlen = minlen;
1886 }
1887 
1888 int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
1889 				    struct msg_rsp *rsp)
1890 {
1891 	struct rvu_hwinfo *hw = rvu->hw;
1892 	u16 pcifunc = req->hdr.pcifunc;
1893 	int pf = rvu_get_pf(pcifunc);
1894 	int blkaddr, schq, link = -1;
1895 	struct nix_txsch *txsch;
1896 	u64 cfg, lmac_fifo_len;
1897 	struct nix_hw *nix_hw;
1898 	u8 cgx = 0, lmac = 0;
1899 
1900 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1901 	if (blkaddr < 0)
1902 		return NIX_AF_ERR_AF_LF_INVALID;
1903 
1904 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1905 	if (!nix_hw)
1906 		return -EINVAL;
1907 
1908 	if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
1909 		return NIX_AF_ERR_FRS_INVALID;
1910 
1911 	if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
1912 		return NIX_AF_ERR_FRS_INVALID;
1913 
1914 	/* Check if requester wants to update SMQ's */
1915 	if (!req->update_smq)
1916 		goto rx_frscfg;
1917 
1918 	/* Update min/maxlen in each of the SMQ attached to this PF/VF */
1919 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1920 	mutex_lock(&rvu->rsrc_lock);
1921 	for (schq = 0; schq < txsch->schq.max; schq++) {
1922 		if (txsch->pfvf_map[schq] != pcifunc)
1923 			continue;
1924 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
1925 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
1926 		if (req->update_minlen)
1927 			cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
1928 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
1929 	}
1930 	mutex_unlock(&rvu->rsrc_lock);
1931 
1932 rx_frscfg:
1933 	/* Check if config is for SDP link */
1934 	if (req->sdp_link) {
1935 		if (!hw->sdp_links)
1936 			return NIX_AF_ERR_RX_LINK_INVALID;
1937 		link = hw->cgx_links + hw->lbk_links;
1938 		goto linkcfg;
1939 	}
1940 
1941 	/* Check if the request is from CGX mapped RVU PF */
1942 	if (is_pf_cgxmapped(rvu, pf)) {
1943 		/* Get CGX and LMAC to which this PF is mapped and find link */
1944 		rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
1945 		link = (cgx * hw->lmac_per_cgx) + lmac;
1946 	} else if (pf == 0) {
1947 		/* For VFs of PF0 ingress is LBK port, so config LBK link */
1948 		link = hw->cgx_links;
1949 	}
1950 
1951 	if (link < 0)
1952 		return NIX_AF_ERR_RX_LINK_INVALID;
1953 
1954 	nix_find_link_frs(rvu, req, pcifunc);
1955 
1956 linkcfg:
1957 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
1958 	cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
1959 	if (req->update_minlen)
1960 		cfg = (cfg & ~0xFFFFULL) | req->minlen;
1961 	rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
1962 
1963 	if (req->sdp_link || pf == 0)
1964 		return 0;
1965 
1966 	/* Update transmit credits for CGX links */
1967 	lmac_fifo_len =
1968 		CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
1969 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
1970 	cfg &= ~(0xFFFFFULL << 12);
1971 	cfg |=  ((lmac_fifo_len - req->maxlen) / 16) << 12;
1972 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
1973 	rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg);
1974 
1975 	return 0;
1976 }
1977 
1978 int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
1979 				      struct msg_rsp *rsp)
1980 {
1981 	struct npc_mcam_alloc_entry_req alloc_req = { };
1982 	struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
1983 	struct npc_mcam_free_entry_req free_req = { };
1984 	u16 pcifunc = req->hdr.pcifunc;
1985 	int blkaddr, nixlf, err;
1986 	struct rvu_pfvf *pfvf;
1987 
1988 	/* LBK VFs do not have separate MCAM UCAST entry hence
1989 	 * skip allocating rxvlan for them
1990 	 */
1991 	if (is_afvf(pcifunc))
1992 		return 0;
1993 
1994 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1995 	if (pfvf->rxvlan)
1996 		return 0;
1997 
1998 	/* alloc new mcam entry */
1999 	alloc_req.hdr.pcifunc = pcifunc;
2000 	alloc_req.count = 1;
2001 
2002 	err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
2003 						    &alloc_rsp);
2004 	if (err)
2005 		return err;
2006 
2007 	/* update entry to enable rxvlan offload */
2008 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2009 	if (blkaddr < 0) {
2010 		err = NIX_AF_ERR_AF_LF_INVALID;
2011 		goto free_entry;
2012 	}
2013 
2014 	nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
2015 	if (nixlf < 0) {
2016 		err = NIX_AF_ERR_AF_LF_INVALID;
2017 		goto free_entry;
2018 	}
2019 
2020 	pfvf->rxvlan_index = alloc_rsp.entry_list[0];
2021 	/* all it means is that rxvlan_index is valid */
2022 	pfvf->rxvlan = true;
2023 
2024 	err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
2025 	if (err)
2026 		goto free_entry;
2027 
2028 	return 0;
2029 free_entry:
2030 	free_req.hdr.pcifunc = pcifunc;
2031 	free_req.entry = alloc_rsp.entry_list[0];
2032 	rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
2033 	pfvf->rxvlan = false;
2034 	return err;
2035 }
2036 
2037 static void nix_link_config(struct rvu *rvu, int blkaddr)
2038 {
2039 	struct rvu_hwinfo *hw = rvu->hw;
2040 	int cgx, lmac_cnt, slink, link;
2041 	u64 tx_credits;
2042 
2043 	/* Set default min/max packet lengths allowed on NIX Rx links.
2044 	 *
2045 	 * With HW reset minlen value of 60byte, HW will treat ARP pkts
2046 	 * as undersize and report them to SW as error pkts, hence
2047 	 * setting it to 40 bytes.
2048 	 */
2049 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
2050 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2051 			    NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2052 	}
2053 
2054 	if (hw->sdp_links) {
2055 		link = hw->cgx_links + hw->lbk_links;
2056 		rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
2057 			    SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
2058 	}
2059 
2060 	/* Set credits for Tx links assuming max packet length allowed.
2061 	 * This will be reconfigured based on MTU set for PF/VF.
2062 	 */
2063 	for (cgx = 0; cgx < hw->cgx; cgx++) {
2064 		lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
2065 		tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
2066 		/* Enable credits and set credit pkt count to max allowed */
2067 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
2068 		slink = cgx * hw->lmac_per_cgx;
2069 		for (link = slink; link < (slink + lmac_cnt); link++) {
2070 			rvu_write64(rvu, blkaddr,
2071 				    NIX_AF_TX_LINKX_NORM_CREDIT(link),
2072 				    tx_credits);
2073 			rvu_write64(rvu, blkaddr,
2074 				    NIX_AF_TX_LINKX_EXPR_CREDIT(link),
2075 				    tx_credits);
2076 		}
2077 	}
2078 
2079 	/* Set Tx credits for LBK link */
2080 	slink = hw->cgx_links;
2081 	for (link = slink; link < (slink + hw->lbk_links); link++) {
2082 		tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
2083 		/* Enable credits and set credit pkt count to max allowed */
2084 		tx_credits =  (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
2085 		rvu_write64(rvu, blkaddr,
2086 			    NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
2087 		rvu_write64(rvu, blkaddr,
2088 			    NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits);
2089 	}
2090 }
2091 
2092 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
2093 {
2094 	int idx, err;
2095 	u64 status;
2096 
2097 	/* Start X2P bus calibration */
2098 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2099 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
2100 	/* Wait for calibration to complete */
2101 	err = rvu_poll_reg(rvu, blkaddr,
2102 			   NIX_AF_STATUS, BIT_ULL(10), false);
2103 	if (err) {
2104 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
2105 		return err;
2106 	}
2107 
2108 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
2109 	/* Check if CGX devices are ready */
2110 	for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
2111 		/* Skip when cgx port is not available */
2112 		if (!rvu_cgx_pdata(idx, rvu) ||
2113 		    (status & (BIT_ULL(16 + idx))))
2114 			continue;
2115 		dev_err(rvu->dev,
2116 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
2117 		err = -EBUSY;
2118 	}
2119 
2120 	/* Check if LBK is ready */
2121 	if (!(status & BIT_ULL(19))) {
2122 		dev_err(rvu->dev,
2123 			"LBK didn't respond to NIX X2P calibration\n");
2124 		err = -EBUSY;
2125 	}
2126 
2127 	/* Clear 'calibrate_x2p' bit */
2128 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2129 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
2130 	if (err || (status & 0x3FFULL))
2131 		dev_err(rvu->dev,
2132 			"NIX X2P calibration failed, status 0x%llx\n", status);
2133 	if (err)
2134 		return err;
2135 	return 0;
2136 }
2137 
2138 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
2139 {
2140 	u64 cfg;
2141 	int err;
2142 
2143 	/* Set admin queue endianness */
2144 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
2145 #ifdef __BIG_ENDIAN
2146 	cfg |= BIT_ULL(8);
2147 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
2148 #else
2149 	cfg &= ~BIT_ULL(8);
2150 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
2151 #endif
2152 
2153 	/* Do not bypass NDC cache */
2154 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
2155 	cfg &= ~0x3FFEULL;
2156 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
2157 
2158 	/* Result structure can be followed by RQ/SQ/CQ context at
2159 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
2160 	 * operation type. Alloc sufficient result memory for all operations.
2161 	 */
2162 	err = rvu_aq_alloc(rvu, &block->aq,
2163 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
2164 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
2165 	if (err)
2166 		return err;
2167 
2168 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
2169 	rvu_write64(rvu, block->addr,
2170 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
2171 	return 0;
2172 }
2173 
2174 int rvu_nix_init(struct rvu *rvu)
2175 {
2176 	struct rvu_hwinfo *hw = rvu->hw;
2177 	struct rvu_block *block;
2178 	int blkaddr, err;
2179 	u64 cfg;
2180 
2181 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
2182 	if (blkaddr < 0)
2183 		return 0;
2184 	block = &hw->block[blkaddr];
2185 
2186 	/* As per a HW errata in 9xxx A0 silicon, NIX may corrupt
2187 	 * internal state when conditional clocks are turned off.
2188 	 * Hence enable them.
2189 	 */
2190 	if (is_rvu_9xxx_A0(rvu))
2191 		rvu_write64(rvu, blkaddr, NIX_AF_CFG,
2192 			    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
2193 
2194 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
2195 	err = nix_calibrate_x2p(rvu, blkaddr);
2196 	if (err)
2197 		return err;
2198 
2199 	/* Set num of links of each type */
2200 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
2201 	hw->cgx = (cfg >> 12) & 0xF;
2202 	hw->lmac_per_cgx = (cfg >> 8) & 0xF;
2203 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
2204 	hw->lbk_links = 1;
2205 	hw->sdp_links = 1;
2206 
2207 	/* Initialize admin queue */
2208 	err = nix_aq_init(rvu, block);
2209 	if (err)
2210 		return err;
2211 
2212 	/* Restore CINT timer delay to HW reset values */
2213 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
2214 
2215 	/* Configure segmentation offload formats */
2216 	nix_setup_lso(rvu, blkaddr);
2217 
2218 	if (blkaddr == BLKADDR_NIX0) {
2219 		hw->nix0 = devm_kzalloc(rvu->dev,
2220 					sizeof(struct nix_hw), GFP_KERNEL);
2221 		if (!hw->nix0)
2222 			return -ENOMEM;
2223 
2224 		err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
2225 		if (err)
2226 			return err;
2227 
2228 		err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
2229 		if (err)
2230 			return err;
2231 
2232 		/* Config Outer L2, IP, TCP and UDP's NPC layer info.
2233 		 * This helps HW protocol checker to identify headers
2234 		 * and validate length and checksums.
2235 		 */
2236 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
2237 			    (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
2238 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
2239 			    (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
2240 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
2241 			    (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
2242 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
2243 			    (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
2244 
2245 		nix_rx_flowkey_alg_cfg(rvu, blkaddr);
2246 
2247 		/* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
2248 		nix_link_config(rvu, blkaddr);
2249 	}
2250 	return 0;
2251 }
2252 
2253 void rvu_nix_freemem(struct rvu *rvu)
2254 {
2255 	struct rvu_hwinfo *hw = rvu->hw;
2256 	struct rvu_block *block;
2257 	struct nix_txsch *txsch;
2258 	struct nix_mcast *mcast;
2259 	struct nix_hw *nix_hw;
2260 	int blkaddr, lvl;
2261 
2262 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
2263 	if (blkaddr < 0)
2264 		return;
2265 
2266 	block = &hw->block[blkaddr];
2267 	rvu_aq_free(rvu, block->aq);
2268 
2269 	if (blkaddr == BLKADDR_NIX0) {
2270 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
2271 		if (!nix_hw)
2272 			return;
2273 
2274 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
2275 			txsch = &nix_hw->txsch[lvl];
2276 			kfree(txsch->schq.bmap);
2277 		}
2278 
2279 		mcast = &nix_hw->mcast;
2280 		qmem_free(rvu->dev, mcast->mce_ctx);
2281 		qmem_free(rvu->dev, mcast->mcast_buf);
2282 		mutex_destroy(&mcast->mce_lock);
2283 	}
2284 }
2285 
2286 static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
2287 {
2288 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
2289 	struct rvu_hwinfo *hw = rvu->hw;
2290 	int blkaddr;
2291 
2292 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
2293 	if (!pfvf->nixlf || blkaddr < 0)
2294 		return NIX_AF_ERR_AF_LF_INVALID;
2295 
2296 	*nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
2297 	if (*nixlf < 0)
2298 		return NIX_AF_ERR_AF_LF_INVALID;
2299 
2300 	return 0;
2301 }
2302 
2303 int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
2304 				     struct msg_rsp *rsp)
2305 {
2306 	u16 pcifunc = req->hdr.pcifunc;
2307 	int nixlf, err;
2308 
2309 	err = nix_get_nixlf(rvu, pcifunc, &nixlf);
2310 	if (err)
2311 		return err;
2312 
2313 	rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
2314 	return 0;
2315 }
2316 
2317 int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
2318 				    struct msg_rsp *rsp)
2319 {
2320 	u16 pcifunc = req->hdr.pcifunc;
2321 	int nixlf, err;
2322 
2323 	err = nix_get_nixlf(rvu, pcifunc, &nixlf);
2324 	if (err)
2325 		return err;
2326 
2327 	rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
2328 	return 0;
2329 }
2330 
2331 void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
2332 {
2333 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
2334 	struct hwctx_disable_req ctx_req;
2335 	int err;
2336 
2337 	ctx_req.hdr.pcifunc = pcifunc;
2338 
2339 	/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
2340 	nix_interface_deinit(rvu, pcifunc, nixlf);
2341 	nix_rx_sync(rvu, blkaddr);
2342 	nix_txschq_free(rvu, pcifunc);
2343 
2344 	if (pfvf->sq_ctx) {
2345 		ctx_req.ctype = NIX_AQ_CTYPE_SQ;
2346 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
2347 		if (err)
2348 			dev_err(rvu->dev, "SQ ctx disable failed\n");
2349 	}
2350 
2351 	if (pfvf->rq_ctx) {
2352 		ctx_req.ctype = NIX_AQ_CTYPE_RQ;
2353 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
2354 		if (err)
2355 			dev_err(rvu->dev, "RQ ctx disable failed\n");
2356 	}
2357 
2358 	if (pfvf->cq_ctx) {
2359 		ctx_req.ctype = NIX_AQ_CTYPE_CQ;
2360 		err = nix_lf_hwctx_disable(rvu, &ctx_req);
2361 		if (err)
2362 			dev_err(rvu->dev, "CQ ctx disable failed\n");
2363 	}
2364 
2365 	nix_ctx_free(rvu, pfvf);
2366 }
2367