1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "npc.h"
18 #include "cgx.h"
19 
20 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
21 
22 enum mc_tbl_sz {
23 	MC_TBL_SZ_256,
24 	MC_TBL_SZ_512,
25 	MC_TBL_SZ_1K,
26 	MC_TBL_SZ_2K,
27 	MC_TBL_SZ_4K,
28 	MC_TBL_SZ_8K,
29 	MC_TBL_SZ_16K,
30 	MC_TBL_SZ_32K,
31 	MC_TBL_SZ_64K,
32 };
33 
34 enum mc_buf_cnt {
35 	MC_BUF_CNT_8,
36 	MC_BUF_CNT_16,
37 	MC_BUF_CNT_32,
38 	MC_BUF_CNT_64,
39 	MC_BUF_CNT_128,
40 	MC_BUF_CNT_256,
41 	MC_BUF_CNT_512,
42 	MC_BUF_CNT_1024,
43 	MC_BUF_CNT_2048,
44 };
45 
46 /* For now considering MC resources needed for broadcast
47  * pkt replication only. i.e 256 HWVFs + 12 PFs.
48  */
49 #define MC_TBL_SIZE	MC_TBL_SZ_512
50 #define MC_BUF_CNT	MC_BUF_CNT_128
51 
52 struct mce {
53 	struct hlist_node	node;
54 	u16			idx;
55 	u16			pcifunc;
56 };
57 
58 int rvu_get_nixlf_count(struct rvu *rvu)
59 {
60 	struct rvu_block *block;
61 	int blkaddr;
62 
63 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
64 	if (blkaddr < 0)
65 		return 0;
66 	block = &rvu->hw->block[blkaddr];
67 	return block->lf.max;
68 }
69 
70 static void nix_mce_list_init(struct nix_mce_list *list, int max)
71 {
72 	INIT_HLIST_HEAD(&list->head);
73 	list->count = 0;
74 	list->max = max;
75 }
76 
77 static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
78 {
79 	int idx;
80 
81 	if (!mcast)
82 		return 0;
83 
84 	idx = mcast->next_free_mce;
85 	mcast->next_free_mce += count;
86 	return idx;
87 }
88 
89 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
90 {
91 	if (blkaddr == BLKADDR_NIX0 && hw->nix0)
92 		return hw->nix0;
93 
94 	return NULL;
95 }
96 
97 static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
98 			    int lvl, u16 pcifunc, u16 schq)
99 {
100 	struct nix_txsch *txsch;
101 	struct nix_hw *nix_hw;
102 
103 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
104 	if (!nix_hw)
105 		return false;
106 
107 	txsch = &nix_hw->txsch[lvl];
108 	/* Check out of bounds */
109 	if (schq >= txsch->schq.max)
110 		return false;
111 
112 	spin_lock(&rvu->rsrc_lock);
113 	if (txsch->pfvf_map[schq] != pcifunc) {
114 		spin_unlock(&rvu->rsrc_lock);
115 		return false;
116 	}
117 	spin_unlock(&rvu->rsrc_lock);
118 	return true;
119 }
120 
121 static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
122 {
123 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
124 	u8 cgx_id, lmac_id;
125 	int pkind, pf;
126 	int err;
127 
128 	pf = rvu_get_pf(pcifunc);
129 	if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
130 		return 0;
131 
132 	switch (type) {
133 	case NIX_INTF_TYPE_CGX:
134 		pfvf->cgx_lmac = rvu->pf2cgxlmac_map[pf];
135 		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
136 
137 		pkind = rvu_npc_get_pkind(rvu, pf);
138 		if (pkind < 0) {
139 			dev_err(rvu->dev,
140 				"PF_Func 0x%x: Invalid pkind\n", pcifunc);
141 			return -EINVAL;
142 		}
143 		pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
144 		pfvf->tx_chan_base = pfvf->rx_chan_base;
145 		pfvf->rx_chan_cnt = 1;
146 		pfvf->tx_chan_cnt = 1;
147 		cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
148 		rvu_npc_set_pkind(rvu, pkind, pfvf);
149 		break;
150 	case NIX_INTF_TYPE_LBK:
151 		break;
152 	}
153 
154 	/* Add a UCAST forwarding rule in MCAM with this NIXLF attached
155 	 * RVU PF/VF's MAC address.
156 	 */
157 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
158 				    pfvf->rx_chan_base, pfvf->mac_addr);
159 
160 	/* Add this PF_FUNC to bcast pkt replication list */
161 	err = nix_update_bcast_mce_list(rvu, pcifunc, true);
162 	if (err) {
163 		dev_err(rvu->dev,
164 			"Bcast list, failed to enable PF_FUNC 0x%x\n",
165 			pcifunc);
166 		return err;
167 	}
168 
169 	rvu_npc_install_bcast_match_entry(rvu, pcifunc,
170 					  nixlf, pfvf->rx_chan_base);
171 
172 	return 0;
173 }
174 
175 static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
176 {
177 	int err;
178 
179 	/* Remove this PF_FUNC from bcast pkt replication list */
180 	err = nix_update_bcast_mce_list(rvu, pcifunc, false);
181 	if (err) {
182 		dev_err(rvu->dev,
183 			"Bcast list, failed to disable PF_FUNC 0x%x\n",
184 			pcifunc);
185 	}
186 
187 	/* Free and disable any MCAM entries used by this NIX LF */
188 	rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
189 }
190 
191 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
192 				 u64 format, bool v4, u64 *fidx)
193 {
194 	struct nix_lso_format field = {0};
195 
196 	/* IP's Length field */
197 	field.layer = NIX_TXLAYER_OL3;
198 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
199 	field.offset = v4 ? 2 : 4;
200 	field.sizem1 = 1; /* i.e 2 bytes */
201 	field.alg = NIX_LSOALG_ADD_PAYLEN;
202 	rvu_write64(rvu, blkaddr,
203 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
204 		    *(u64 *)&field);
205 
206 	/* No ID field in IPv6 header */
207 	if (!v4)
208 		return;
209 
210 	/* IP's ID field */
211 	field.layer = NIX_TXLAYER_OL3;
212 	field.offset = 4;
213 	field.sizem1 = 1; /* i.e 2 bytes */
214 	field.alg = NIX_LSOALG_ADD_SEGNUM;
215 	rvu_write64(rvu, blkaddr,
216 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
217 		    *(u64 *)&field);
218 }
219 
220 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
221 				 u64 format, u64 *fidx)
222 {
223 	struct nix_lso_format field = {0};
224 
225 	/* TCP's sequence number field */
226 	field.layer = NIX_TXLAYER_OL4;
227 	field.offset = 4;
228 	field.sizem1 = 3; /* i.e 4 bytes */
229 	field.alg = NIX_LSOALG_ADD_OFFSET;
230 	rvu_write64(rvu, blkaddr,
231 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
232 		    *(u64 *)&field);
233 
234 	/* TCP's flags field */
235 	field.layer = NIX_TXLAYER_OL4;
236 	field.offset = 12;
237 	field.sizem1 = 0; /* not needed */
238 	field.alg = NIX_LSOALG_TCP_FLAGS;
239 	rvu_write64(rvu, blkaddr,
240 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
241 		    *(u64 *)&field);
242 }
243 
244 static void nix_setup_lso(struct rvu *rvu, int blkaddr)
245 {
246 	u64 cfg, idx, fidx = 0;
247 
248 	/* Enable LSO */
249 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
250 	/* For TSO, set first and middle segment flags to
251 	 * mask out PSH, RST & FIN flags in TCP packet
252 	 */
253 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
254 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
255 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
256 
257 	/* Configure format fields for TCPv4 segmentation offload */
258 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
259 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
260 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
261 
262 	/* Set rest of the fields to NOP */
263 	for (; fidx < 8; fidx++) {
264 		rvu_write64(rvu, blkaddr,
265 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
266 	}
267 
268 	/* Configure format fields for TCPv6 segmentation offload */
269 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
270 	fidx = 0;
271 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
272 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
273 
274 	/* Set rest of the fields to NOP */
275 	for (; fidx < 8; fidx++) {
276 		rvu_write64(rvu, blkaddr,
277 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
278 	}
279 }
280 
281 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
282 {
283 	kfree(pfvf->rq_bmap);
284 	kfree(pfvf->sq_bmap);
285 	kfree(pfvf->cq_bmap);
286 	if (pfvf->rq_ctx)
287 		qmem_free(rvu->dev, pfvf->rq_ctx);
288 	if (pfvf->sq_ctx)
289 		qmem_free(rvu->dev, pfvf->sq_ctx);
290 	if (pfvf->cq_ctx)
291 		qmem_free(rvu->dev, pfvf->cq_ctx);
292 	if (pfvf->rss_ctx)
293 		qmem_free(rvu->dev, pfvf->rss_ctx);
294 	if (pfvf->nix_qints_ctx)
295 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
296 	if (pfvf->cq_ints_ctx)
297 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
298 
299 	pfvf->rq_bmap = NULL;
300 	pfvf->cq_bmap = NULL;
301 	pfvf->sq_bmap = NULL;
302 	pfvf->rq_ctx = NULL;
303 	pfvf->sq_ctx = NULL;
304 	pfvf->cq_ctx = NULL;
305 	pfvf->rss_ctx = NULL;
306 	pfvf->nix_qints_ctx = NULL;
307 	pfvf->cq_ints_ctx = NULL;
308 }
309 
310 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
311 			      struct rvu_pfvf *pfvf, int nixlf,
312 			      int rss_sz, int rss_grps, int hwctx_size)
313 {
314 	int err, grp, num_indices;
315 
316 	/* RSS is not requested for this NIXLF */
317 	if (!rss_sz)
318 		return 0;
319 	num_indices = rss_sz * rss_grps;
320 
321 	/* Alloc NIX RSS HW context memory and config the base */
322 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
323 	if (err)
324 		return err;
325 
326 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
327 		    (u64)pfvf->rss_ctx->iova);
328 
329 	/* Config full RSS table size, enable RSS and caching */
330 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
331 		    BIT_ULL(36) | BIT_ULL(4) |
332 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
333 	/* Config RSS group offset and sizes */
334 	for (grp = 0; grp < rss_grps; grp++)
335 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
336 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
337 	return 0;
338 }
339 
340 static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
341 			       struct nix_aq_inst_s *inst)
342 {
343 	struct admin_queue *aq = block->aq;
344 	struct nix_aq_res_s *result;
345 	int timeout = 1000;
346 	u64 reg, head;
347 
348 	result = (struct nix_aq_res_s *)aq->res->base;
349 
350 	/* Get current head pointer where to append this instruction */
351 	reg = rvu_read64(rvu, block->addr, NIX_AF_AQ_STATUS);
352 	head = (reg >> 4) & AQ_PTR_MASK;
353 
354 	memcpy((void *)(aq->inst->base + (head * aq->inst->entry_sz)),
355 	       (void *)inst, aq->inst->entry_sz);
356 	memset(result, 0, sizeof(*result));
357 	/* sync into memory */
358 	wmb();
359 
360 	/* Ring the doorbell and wait for result */
361 	rvu_write64(rvu, block->addr, NIX_AF_AQ_DOOR, 1);
362 	while (result->compcode == NIX_AQ_COMP_NOTDONE) {
363 		cpu_relax();
364 		udelay(1);
365 		timeout--;
366 		if (!timeout)
367 			return -EBUSY;
368 	}
369 
370 	if (result->compcode != NIX_AQ_COMP_GOOD)
371 		/* TODO: Replace this with some error code */
372 		return -EBUSY;
373 
374 	return 0;
375 }
376 
377 static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
378 			       struct nix_aq_enq_rsp *rsp)
379 {
380 	struct rvu_hwinfo *hw = rvu->hw;
381 	u16 pcifunc = req->hdr.pcifunc;
382 	int nixlf, blkaddr, rc = 0;
383 	struct nix_aq_inst_s inst;
384 	struct rvu_block *block;
385 	struct admin_queue *aq;
386 	struct rvu_pfvf *pfvf;
387 	void *ctx, *mask;
388 	bool ena;
389 	u64 cfg;
390 
391 	pfvf = rvu_get_pfvf(rvu, pcifunc);
392 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
393 	if (!pfvf->nixlf || blkaddr < 0)
394 		return NIX_AF_ERR_AF_LF_INVALID;
395 
396 	block = &hw->block[blkaddr];
397 	aq = block->aq;
398 	if (!aq) {
399 		dev_warn(rvu->dev, "%s: NIX AQ not initialized\n", __func__);
400 		return NIX_AF_ERR_AQ_ENQUEUE;
401 	}
402 
403 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
404 	if (nixlf < 0)
405 		return NIX_AF_ERR_AF_LF_INVALID;
406 
407 	switch (req->ctype) {
408 	case NIX_AQ_CTYPE_RQ:
409 		/* Check if index exceeds max no of queues */
410 		if (!pfvf->rq_ctx || req->qidx >= pfvf->rq_ctx->qsize)
411 			rc = NIX_AF_ERR_AQ_ENQUEUE;
412 		break;
413 	case NIX_AQ_CTYPE_SQ:
414 		if (!pfvf->sq_ctx || req->qidx >= pfvf->sq_ctx->qsize)
415 			rc = NIX_AF_ERR_AQ_ENQUEUE;
416 		break;
417 	case NIX_AQ_CTYPE_CQ:
418 		if (!pfvf->cq_ctx || req->qidx >= pfvf->cq_ctx->qsize)
419 			rc = NIX_AF_ERR_AQ_ENQUEUE;
420 		break;
421 	case NIX_AQ_CTYPE_RSS:
422 		/* Check if RSS is enabled and qidx is within range */
423 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf));
424 		if (!(cfg & BIT_ULL(4)) || !pfvf->rss_ctx ||
425 		    (req->qidx >= (256UL << (cfg & 0xF))))
426 			rc = NIX_AF_ERR_AQ_ENQUEUE;
427 		break;
428 	case NIX_AQ_CTYPE_MCE:
429 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
430 		/* Check if index exceeds MCE list length */
431 		if (!hw->nix0->mcast.mce_ctx ||
432 		    (req->qidx >= (256UL << (cfg & 0xF))))
433 			rc = NIX_AF_ERR_AQ_ENQUEUE;
434 
435 		/* Adding multicast lists for requests from PF/VFs is not
436 		 * yet supported, so ignore this.
437 		 */
438 		if (rsp)
439 			rc = NIX_AF_ERR_AQ_ENQUEUE;
440 		break;
441 	default:
442 		rc = NIX_AF_ERR_AQ_ENQUEUE;
443 	}
444 
445 	if (rc)
446 		return rc;
447 
448 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
449 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
450 	    req->op != NIX_AQ_INSTOP_WRITE) {
451 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
452 				     pcifunc, req->sq.smq))
453 			return NIX_AF_ERR_AQ_ENQUEUE;
454 	}
455 
456 	memset(&inst, 0, sizeof(struct nix_aq_inst_s));
457 	inst.lf = nixlf;
458 	inst.cindex = req->qidx;
459 	inst.ctype = req->ctype;
460 	inst.op = req->op;
461 	/* Currently we are not supporting enqueuing multiple instructions,
462 	 * so always choose first entry in result memory.
463 	 */
464 	inst.res_addr = (u64)aq->res->iova;
465 
466 	/* Clean result + context memory */
467 	memset(aq->res->base, 0, aq->res->entry_sz);
468 	/* Context needs to be written at RES_ADDR + 128 */
469 	ctx = aq->res->base + 128;
470 	/* Mask needs to be written at RES_ADDR + 256 */
471 	mask = aq->res->base + 256;
472 
473 	switch (req->op) {
474 	case NIX_AQ_INSTOP_WRITE:
475 		if (req->ctype == NIX_AQ_CTYPE_RQ)
476 			memcpy(mask, &req->rq_mask,
477 			       sizeof(struct nix_rq_ctx_s));
478 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
479 			memcpy(mask, &req->sq_mask,
480 			       sizeof(struct nix_sq_ctx_s));
481 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
482 			memcpy(mask, &req->cq_mask,
483 			       sizeof(struct nix_cq_ctx_s));
484 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
485 			memcpy(mask, &req->rss_mask,
486 			       sizeof(struct nix_rsse_s));
487 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
488 			memcpy(mask, &req->mce_mask,
489 			       sizeof(struct nix_rx_mce_s));
490 		/* Fall through */
491 	case NIX_AQ_INSTOP_INIT:
492 		if (req->ctype == NIX_AQ_CTYPE_RQ)
493 			memcpy(ctx, &req->rq, sizeof(struct nix_rq_ctx_s));
494 		else if (req->ctype == NIX_AQ_CTYPE_SQ)
495 			memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s));
496 		else if (req->ctype == NIX_AQ_CTYPE_CQ)
497 			memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s));
498 		else if (req->ctype == NIX_AQ_CTYPE_RSS)
499 			memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
500 		else if (req->ctype == NIX_AQ_CTYPE_MCE)
501 			memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
502 		break;
503 	case NIX_AQ_INSTOP_NOP:
504 	case NIX_AQ_INSTOP_READ:
505 	case NIX_AQ_INSTOP_LOCK:
506 	case NIX_AQ_INSTOP_UNLOCK:
507 		break;
508 	default:
509 		rc = NIX_AF_ERR_AQ_ENQUEUE;
510 		return rc;
511 	}
512 
513 	spin_lock(&aq->lock);
514 
515 	/* Submit the instruction to AQ */
516 	rc = nix_aq_enqueue_wait(rvu, block, &inst);
517 	if (rc) {
518 		spin_unlock(&aq->lock);
519 		return rc;
520 	}
521 
522 	/* Set RQ/SQ/CQ bitmap if respective queue hw context is enabled */
523 	if (req->op == NIX_AQ_INSTOP_INIT) {
524 		if (req->ctype == NIX_AQ_CTYPE_RQ && req->rq.ena)
525 			__set_bit(req->qidx, pfvf->rq_bmap);
526 		if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena)
527 			__set_bit(req->qidx, pfvf->sq_bmap);
528 		if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena)
529 			__set_bit(req->qidx, pfvf->cq_bmap);
530 	}
531 
532 	if (req->op == NIX_AQ_INSTOP_WRITE) {
533 		if (req->ctype == NIX_AQ_CTYPE_RQ) {
534 			ena = (req->rq.ena & req->rq_mask.ena) |
535 				(test_bit(req->qidx, pfvf->rq_bmap) &
536 				~req->rq_mask.ena);
537 			if (ena)
538 				__set_bit(req->qidx, pfvf->rq_bmap);
539 			else
540 				__clear_bit(req->qidx, pfvf->rq_bmap);
541 		}
542 		if (req->ctype == NIX_AQ_CTYPE_SQ) {
543 			ena = (req->rq.ena & req->sq_mask.ena) |
544 				(test_bit(req->qidx, pfvf->sq_bmap) &
545 				~req->sq_mask.ena);
546 			if (ena)
547 				__set_bit(req->qidx, pfvf->sq_bmap);
548 			else
549 				__clear_bit(req->qidx, pfvf->sq_bmap);
550 		}
551 		if (req->ctype == NIX_AQ_CTYPE_CQ) {
552 			ena = (req->rq.ena & req->cq_mask.ena) |
553 				(test_bit(req->qidx, pfvf->cq_bmap) &
554 				~req->cq_mask.ena);
555 			if (ena)
556 				__set_bit(req->qidx, pfvf->cq_bmap);
557 			else
558 				__clear_bit(req->qidx, pfvf->cq_bmap);
559 		}
560 	}
561 
562 	if (rsp) {
563 		/* Copy read context into mailbox */
564 		if (req->op == NIX_AQ_INSTOP_READ) {
565 			if (req->ctype == NIX_AQ_CTYPE_RQ)
566 				memcpy(&rsp->rq, ctx,
567 				       sizeof(struct nix_rq_ctx_s));
568 			else if (req->ctype == NIX_AQ_CTYPE_SQ)
569 				memcpy(&rsp->sq, ctx,
570 				       sizeof(struct nix_sq_ctx_s));
571 			else if (req->ctype == NIX_AQ_CTYPE_CQ)
572 				memcpy(&rsp->cq, ctx,
573 				       sizeof(struct nix_cq_ctx_s));
574 			else if (req->ctype == NIX_AQ_CTYPE_RSS)
575 				memcpy(&rsp->rss, ctx,
576 				       sizeof(struct nix_rsse_s));
577 			else if (req->ctype == NIX_AQ_CTYPE_MCE)
578 				memcpy(&rsp->mce, ctx,
579 				       sizeof(struct nix_rx_mce_s));
580 		}
581 	}
582 
583 	spin_unlock(&aq->lock);
584 	return 0;
585 }
586 
587 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
588 {
589 	struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
590 	struct nix_aq_enq_req aq_req;
591 	unsigned long *bmap;
592 	int qidx, q_cnt = 0;
593 	int err = 0, rc;
594 
595 	if (!pfvf->cq_ctx || !pfvf->sq_ctx || !pfvf->rq_ctx)
596 		return NIX_AF_ERR_AQ_ENQUEUE;
597 
598 	memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
599 	aq_req.hdr.pcifunc = req->hdr.pcifunc;
600 
601 	if (req->ctype == NIX_AQ_CTYPE_CQ) {
602 		aq_req.cq.ena = 0;
603 		aq_req.cq_mask.ena = 1;
604 		q_cnt = pfvf->cq_ctx->qsize;
605 		bmap = pfvf->cq_bmap;
606 	}
607 	if (req->ctype == NIX_AQ_CTYPE_SQ) {
608 		aq_req.sq.ena = 0;
609 		aq_req.sq_mask.ena = 1;
610 		q_cnt = pfvf->sq_ctx->qsize;
611 		bmap = pfvf->sq_bmap;
612 	}
613 	if (req->ctype == NIX_AQ_CTYPE_RQ) {
614 		aq_req.rq.ena = 0;
615 		aq_req.rq_mask.ena = 1;
616 		q_cnt = pfvf->rq_ctx->qsize;
617 		bmap = pfvf->rq_bmap;
618 	}
619 
620 	aq_req.ctype = req->ctype;
621 	aq_req.op = NIX_AQ_INSTOP_WRITE;
622 
623 	for (qidx = 0; qidx < q_cnt; qidx++) {
624 		if (!test_bit(qidx, bmap))
625 			continue;
626 		aq_req.qidx = qidx;
627 		rc = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
628 		if (rc) {
629 			err = rc;
630 			dev_err(rvu->dev, "Failed to disable %s:%d context\n",
631 				(req->ctype == NIX_AQ_CTYPE_CQ) ?
632 				"CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
633 				"RQ" : "SQ"), qidx);
634 		}
635 	}
636 
637 	return err;
638 }
639 
640 int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
641 				struct nix_aq_enq_req *req,
642 				struct nix_aq_enq_rsp *rsp)
643 {
644 	return rvu_nix_aq_enq_inst(rvu, req, rsp);
645 }
646 
647 int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
648 				       struct hwctx_disable_req *req,
649 				       struct msg_rsp *rsp)
650 {
651 	return nix_lf_hwctx_disable(rvu, req);
652 }
653 
654 int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
655 				  struct nix_lf_alloc_req *req,
656 				  struct nix_lf_alloc_rsp *rsp)
657 {
658 	int nixlf, qints, hwctx_size, err, rc = 0;
659 	struct rvu_hwinfo *hw = rvu->hw;
660 	u16 pcifunc = req->hdr.pcifunc;
661 	struct rvu_block *block;
662 	struct rvu_pfvf *pfvf;
663 	u64 cfg, ctx_cfg;
664 	int blkaddr;
665 
666 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
667 		return NIX_AF_ERR_PARAM;
668 
669 	pfvf = rvu_get_pfvf(rvu, pcifunc);
670 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
671 	if (!pfvf->nixlf || blkaddr < 0)
672 		return NIX_AF_ERR_AF_LF_INVALID;
673 
674 	block = &hw->block[blkaddr];
675 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
676 	if (nixlf < 0)
677 		return NIX_AF_ERR_AF_LF_INVALID;
678 
679 	/* If RSS is being enabled, check if requested config is valid.
680 	 * RSS table size should be power of two, otherwise
681 	 * RSS_GRP::OFFSET + adder might go beyond that group or
682 	 * won't be able to use entire table.
683 	 */
684 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
685 			    !is_power_of_2(req->rss_sz)))
686 		return NIX_AF_ERR_RSS_SIZE_INVALID;
687 
688 	if (req->rss_sz &&
689 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
690 		return NIX_AF_ERR_RSS_GRPS_INVALID;
691 
692 	/* Reset this NIX LF */
693 	err = rvu_lf_reset(rvu, block, nixlf);
694 	if (err) {
695 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
696 			block->addr - BLKADDR_NIX0, nixlf);
697 		return NIX_AF_ERR_LF_RESET;
698 	}
699 
700 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
701 
702 	/* Alloc NIX RQ HW context memory and config the base */
703 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
704 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
705 	if (err)
706 		goto free_mem;
707 
708 	pfvf->rq_bmap = kcalloc(req->rq_cnt, sizeof(long), GFP_KERNEL);
709 	if (!pfvf->rq_bmap)
710 		goto free_mem;
711 
712 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
713 		    (u64)pfvf->rq_ctx->iova);
714 
715 	/* Set caching and queue count in HW */
716 	cfg = BIT_ULL(36) | (req->rq_cnt - 1);
717 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
718 
719 	/* Alloc NIX SQ HW context memory and config the base */
720 	hwctx_size = 1UL << (ctx_cfg & 0xF);
721 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
722 	if (err)
723 		goto free_mem;
724 
725 	pfvf->sq_bmap = kcalloc(req->sq_cnt, sizeof(long), GFP_KERNEL);
726 	if (!pfvf->sq_bmap)
727 		goto free_mem;
728 
729 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
730 		    (u64)pfvf->sq_ctx->iova);
731 	cfg = BIT_ULL(36) | (req->sq_cnt - 1);
732 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
733 
734 	/* Alloc NIX CQ HW context memory and config the base */
735 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
736 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
737 	if (err)
738 		goto free_mem;
739 
740 	pfvf->cq_bmap = kcalloc(req->cq_cnt, sizeof(long), GFP_KERNEL);
741 	if (!pfvf->cq_bmap)
742 		goto free_mem;
743 
744 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
745 		    (u64)pfvf->cq_ctx->iova);
746 	cfg = BIT_ULL(36) | (req->cq_cnt - 1);
747 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
748 
749 	/* Initialize receive side scaling (RSS) */
750 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
751 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
752 				 req->rss_sz, req->rss_grps, hwctx_size);
753 	if (err)
754 		goto free_mem;
755 
756 	/* Alloc memory for CQINT's HW contexts */
757 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
758 	qints = (cfg >> 24) & 0xFFF;
759 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
760 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
761 	if (err)
762 		goto free_mem;
763 
764 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
765 		    (u64)pfvf->cq_ints_ctx->iova);
766 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
767 
768 	/* Alloc memory for QINT's HW contexts */
769 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
770 	qints = (cfg >> 12) & 0xFFF;
771 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
772 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
773 	if (err)
774 		goto free_mem;
775 
776 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
777 		    (u64)pfvf->nix_qints_ctx->iova);
778 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
779 
780 	/* Enable LMTST for this NIX LF */
781 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
782 
783 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
784 	 * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
785 	 * PCIFUNC itself.
786 	 */
787 	if (req->npa_func == RVU_DEFAULT_PF_FUNC)
788 		cfg = pcifunc;
789 	else
790 		cfg = req->npa_func;
791 
792 	if (req->sso_func == RVU_DEFAULT_PF_FUNC)
793 		cfg |= (u64)pcifunc << 16;
794 	else
795 		cfg |= (u64)req->sso_func << 16;
796 
797 	cfg |= (u64)req->xqe_sz << 33;
798 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
799 
800 	/* Config Rx pkt length, csum checks and apad  enable / disable */
801 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
802 
803 	err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf);
804 	if (err)
805 		goto free_mem;
806 
807 	goto exit;
808 
809 free_mem:
810 	nix_ctx_free(rvu, pfvf);
811 	rc = -ENOMEM;
812 
813 exit:
814 	/* Set macaddr of this PF/VF */
815 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
816 
817 	/* set SQB size info */
818 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
819 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
820 	rsp->rx_chan_base = pfvf->rx_chan_base;
821 	rsp->tx_chan_base = pfvf->tx_chan_base;
822 	rsp->rx_chan_cnt = pfvf->rx_chan_cnt;
823 	rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
824 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
825 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
826 	return rc;
827 }
828 
829 int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
830 				 struct msg_rsp *rsp)
831 {
832 	struct rvu_hwinfo *hw = rvu->hw;
833 	u16 pcifunc = req->hdr.pcifunc;
834 	struct rvu_block *block;
835 	int blkaddr, nixlf, err;
836 	struct rvu_pfvf *pfvf;
837 
838 	pfvf = rvu_get_pfvf(rvu, pcifunc);
839 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
840 	if (!pfvf->nixlf || blkaddr < 0)
841 		return NIX_AF_ERR_AF_LF_INVALID;
842 
843 	block = &hw->block[blkaddr];
844 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
845 	if (nixlf < 0)
846 		return NIX_AF_ERR_AF_LF_INVALID;
847 
848 	nix_interface_deinit(rvu, pcifunc, nixlf);
849 
850 	/* Reset this NIX LF */
851 	err = rvu_lf_reset(rvu, block, nixlf);
852 	if (err) {
853 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
854 			block->addr - BLKADDR_NIX0, nixlf);
855 		return NIX_AF_ERR_LF_RESET;
856 	}
857 
858 	nix_ctx_free(rvu, pfvf);
859 
860 	return 0;
861 }
862 
863 /* Disable shaping of pkts by a scheduler queue
864  * at a given scheduler level.
865  */
866 static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr,
867 				 int lvl, int schq)
868 {
869 	u64  cir_reg = 0, pir_reg = 0;
870 	u64  cfg;
871 
872 	switch (lvl) {
873 	case NIX_TXSCH_LVL_TL1:
874 		cir_reg = NIX_AF_TL1X_CIR(schq);
875 		pir_reg = 0; /* PIR not available at TL1 */
876 		break;
877 	case NIX_TXSCH_LVL_TL2:
878 		cir_reg = NIX_AF_TL2X_CIR(schq);
879 		pir_reg = NIX_AF_TL2X_PIR(schq);
880 		break;
881 	case NIX_TXSCH_LVL_TL3:
882 		cir_reg = NIX_AF_TL3X_CIR(schq);
883 		pir_reg = NIX_AF_TL3X_PIR(schq);
884 		break;
885 	case NIX_TXSCH_LVL_TL4:
886 		cir_reg = NIX_AF_TL4X_CIR(schq);
887 		pir_reg = NIX_AF_TL4X_PIR(schq);
888 		break;
889 	}
890 
891 	if (!cir_reg)
892 		return;
893 	cfg = rvu_read64(rvu, blkaddr, cir_reg);
894 	rvu_write64(rvu, blkaddr, cir_reg, cfg & ~BIT_ULL(0));
895 
896 	if (!pir_reg)
897 		return;
898 	cfg = rvu_read64(rvu, blkaddr, pir_reg);
899 	rvu_write64(rvu, blkaddr, pir_reg, cfg & ~BIT_ULL(0));
900 }
901 
902 static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
903 				 int lvl, int schq)
904 {
905 	struct rvu_hwinfo *hw = rvu->hw;
906 	int link;
907 
908 	/* Reset TL4's SDP link config */
909 	if (lvl == NIX_TXSCH_LVL_TL4)
910 		rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
911 
912 	if (lvl != NIX_TXSCH_LVL_TL2)
913 		return;
914 
915 	/* Reset TL2's CGX or LBK link config */
916 	for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
917 		rvu_write64(rvu, blkaddr,
918 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
919 }
920 
921 int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
922 				     struct nix_txsch_alloc_req *req,
923 				     struct nix_txsch_alloc_rsp *rsp)
924 {
925 	u16 pcifunc = req->hdr.pcifunc;
926 	struct nix_txsch *txsch;
927 	int lvl, idx, req_schq;
928 	struct rvu_pfvf *pfvf;
929 	struct nix_hw *nix_hw;
930 	int blkaddr, rc = 0;
931 	u16 schq;
932 
933 	pfvf = rvu_get_pfvf(rvu, pcifunc);
934 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
935 	if (!pfvf->nixlf || blkaddr < 0)
936 		return NIX_AF_ERR_AF_LF_INVALID;
937 
938 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
939 	if (!nix_hw)
940 		return -EINVAL;
941 
942 	spin_lock(&rvu->rsrc_lock);
943 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
944 		txsch = &nix_hw->txsch[lvl];
945 		req_schq = req->schq_contig[lvl] + req->schq[lvl];
946 
947 		/* There are only 28 TL1s */
948 		if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
949 			goto err;
950 
951 		/* Check if request is valid */
952 		if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
953 			goto err;
954 
955 		/* If contiguous queues are needed, check for availability */
956 		if (req->schq_contig[lvl] &&
957 		    !rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
958 			goto err;
959 
960 		/* Check if full request can be accommodated */
961 		if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
962 			goto err;
963 	}
964 
965 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
966 		txsch = &nix_hw->txsch[lvl];
967 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
968 		rsp->schq[lvl] = req->schq[lvl];
969 
970 		schq = 0;
971 		/* Alloc contiguous queues first */
972 		if (req->schq_contig[lvl]) {
973 			schq = rvu_alloc_rsrc_contig(&txsch->schq,
974 						     req->schq_contig[lvl]);
975 
976 			for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
977 				txsch->pfvf_map[schq] = pcifunc;
978 				nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
979 				nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
980 				rsp->schq_contig_list[lvl][idx] = schq;
981 				schq++;
982 			}
983 		}
984 
985 		/* Alloc non-contiguous queues */
986 		for (idx = 0; idx < req->schq[lvl]; idx++) {
987 			schq = rvu_alloc_rsrc(&txsch->schq);
988 			txsch->pfvf_map[schq] = pcifunc;
989 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
990 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
991 			rsp->schq_list[lvl][idx] = schq;
992 		}
993 	}
994 	goto exit;
995 err:
996 	rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
997 exit:
998 	spin_unlock(&rvu->rsrc_lock);
999 	return rc;
1000 }
1001 
1002 static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
1003 {
1004 	int blkaddr, nixlf, lvl, schq, err;
1005 	struct rvu_hwinfo *hw = rvu->hw;
1006 	struct nix_txsch *txsch;
1007 	struct nix_hw *nix_hw;
1008 	u64 cfg;
1009 
1010 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1011 	if (blkaddr < 0)
1012 		return NIX_AF_ERR_AF_LF_INVALID;
1013 
1014 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1015 	if (!nix_hw)
1016 		return -EINVAL;
1017 
1018 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1019 	if (nixlf < 0)
1020 		return NIX_AF_ERR_AF_LF_INVALID;
1021 
1022 	/* Disable TL2/3 queue links before SMQ flush*/
1023 	spin_lock(&rvu->rsrc_lock);
1024 	for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1025 		if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
1026 			continue;
1027 
1028 		txsch = &nix_hw->txsch[lvl];
1029 		for (schq = 0; schq < txsch->schq.max; schq++) {
1030 			if (txsch->pfvf_map[schq] != pcifunc)
1031 				continue;
1032 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
1033 		}
1034 	}
1035 
1036 	/* Flush SMQs */
1037 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1038 	for (schq = 0; schq < txsch->schq.max; schq++) {
1039 		if (txsch->pfvf_map[schq] != pcifunc)
1040 			continue;
1041 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
1042 		/* Do SMQ flush and set enqueue xoff */
1043 		cfg |= BIT_ULL(50) | BIT_ULL(49);
1044 		rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
1045 
1046 		/* Wait for flush to complete */
1047 		err = rvu_poll_reg(rvu, blkaddr,
1048 				   NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
1049 		if (err) {
1050 			dev_err(rvu->dev,
1051 				"NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
1052 		}
1053 	}
1054 
1055 	/* Now free scheduler queues to free pool */
1056 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1057 		txsch = &nix_hw->txsch[lvl];
1058 		for (schq = 0; schq < txsch->schq.max; schq++) {
1059 			if (txsch->pfvf_map[schq] != pcifunc)
1060 				continue;
1061 			rvu_free_rsrc(&txsch->schq, schq);
1062 			txsch->pfvf_map[schq] = 0;
1063 		}
1064 	}
1065 	spin_unlock(&rvu->rsrc_lock);
1066 
1067 	/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
1068 	rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
1069 	err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
1070 	if (err)
1071 		dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
1072 
1073 	return 0;
1074 }
1075 
1076 int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
1077 				    struct nix_txsch_free_req *req,
1078 				    struct msg_rsp *rsp)
1079 {
1080 	return nix_txschq_free(rvu, req->hdr.pcifunc);
1081 }
1082 
1083 static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
1084 				   int lvl, u64 reg, u64 regval)
1085 {
1086 	u64 regbase = reg & 0xFFFF;
1087 	u16 schq, parent;
1088 
1089 	if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
1090 		return false;
1091 
1092 	schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
1093 	/* Check if this schq belongs to this PF/VF or not */
1094 	if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
1095 		return false;
1096 
1097 	parent = (regval >> 16) & 0x1FF;
1098 	/* Validate MDQ's TL4 parent */
1099 	if (regbase == NIX_AF_MDQX_PARENT(0) &&
1100 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
1101 		return false;
1102 
1103 	/* Validate TL4's TL3 parent */
1104 	if (regbase == NIX_AF_TL4X_PARENT(0) &&
1105 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
1106 		return false;
1107 
1108 	/* Validate TL3's TL2 parent */
1109 	if (regbase == NIX_AF_TL3X_PARENT(0) &&
1110 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
1111 		return false;
1112 
1113 	/* Validate TL2's TL1 parent */
1114 	if (regbase == NIX_AF_TL2X_PARENT(0) &&
1115 	    !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
1116 		return false;
1117 
1118 	return true;
1119 }
1120 
1121 int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
1122 				    struct nix_txschq_config *req,
1123 				    struct msg_rsp *rsp)
1124 {
1125 	struct rvu_hwinfo *hw = rvu->hw;
1126 	u16 pcifunc = req->hdr.pcifunc;
1127 	u64 reg, regval, schq_regbase;
1128 	struct nix_txsch *txsch;
1129 	struct nix_hw *nix_hw;
1130 	int blkaddr, idx, err;
1131 	int nixlf;
1132 
1133 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
1134 	    req->num_regs > MAX_REGS_PER_MBOX_MSG)
1135 		return NIX_AF_INVAL_TXSCHQ_CFG;
1136 
1137 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1138 	if (blkaddr < 0)
1139 		return NIX_AF_ERR_AF_LF_INVALID;
1140 
1141 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1142 	if (!nix_hw)
1143 		return -EINVAL;
1144 
1145 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1146 	if (nixlf < 0)
1147 		return NIX_AF_ERR_AF_LF_INVALID;
1148 
1149 	txsch = &nix_hw->txsch[req->lvl];
1150 	for (idx = 0; idx < req->num_regs; idx++) {
1151 		reg = req->reg[idx];
1152 		regval = req->regval[idx];
1153 		schq_regbase = reg & 0xFFFF;
1154 
1155 		if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
1156 					    txsch->lvl, reg, regval))
1157 			return NIX_AF_INVAL_TXSCHQ_CFG;
1158 
1159 		/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
1160 		if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
1161 			nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
1162 					   pcifunc, 0);
1163 			regval &= ~(0x7FULL << 24);
1164 			regval |= ((u64)nixlf << 24);
1165 		}
1166 
1167 		rvu_write64(rvu, blkaddr, reg, regval);
1168 
1169 		/* Check for SMQ flush, if so, poll for its completion */
1170 		if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1171 		    (regval & BIT_ULL(49))) {
1172 			err = rvu_poll_reg(rvu, blkaddr,
1173 					   reg, BIT_ULL(49), true);
1174 			if (err)
1175 				return NIX_AF_SMQ_FLUSH_FAILED;
1176 		}
1177 	}
1178 	return 0;
1179 }
1180 
1181 static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
1182 			   struct nix_vtag_config *req)
1183 {
1184 	u64 regval = 0;
1185 
1186 #define NIX_VTAGTYPE_MAX 0x8ull
1187 #define NIX_VTAGSIZE_MASK 0x7ull
1188 #define NIX_VTAGSTRIP_CAP_MASK 0x30ull
1189 
1190 	if (req->rx.vtag_type >= NIX_VTAGTYPE_MAX ||
1191 	    req->vtag_size > VTAGSIZE_T8)
1192 		return -EINVAL;
1193 
1194 	regval = rvu_read64(rvu, blkaddr,
1195 			    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type));
1196 
1197 	if (req->rx.strip_vtag && req->rx.capture_vtag)
1198 		regval |= BIT_ULL(4) | BIT_ULL(5);
1199 	else if (req->rx.strip_vtag)
1200 		regval |= BIT_ULL(4);
1201 	else
1202 		regval &= ~(BIT_ULL(4) | BIT_ULL(5));
1203 
1204 	regval &= ~NIX_VTAGSIZE_MASK;
1205 	regval |= req->vtag_size & NIX_VTAGSIZE_MASK;
1206 
1207 	rvu_write64(rvu, blkaddr,
1208 		    NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
1209 	return 0;
1210 }
1211 
1212 int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
1213 				  struct nix_vtag_config *req,
1214 				  struct msg_rsp *rsp)
1215 {
1216 	struct rvu_hwinfo *hw = rvu->hw;
1217 	u16 pcifunc = req->hdr.pcifunc;
1218 	int blkaddr, nixlf, err;
1219 
1220 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1221 	if (blkaddr < 0)
1222 		return NIX_AF_ERR_AF_LF_INVALID;
1223 
1224 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1225 	if (nixlf < 0)
1226 		return NIX_AF_ERR_AF_LF_INVALID;
1227 
1228 	if (req->cfg_type) {
1229 		err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
1230 		if (err)
1231 			return NIX_AF_ERR_PARAM;
1232 	} else {
1233 		/* TODO: handle tx vtag configuration */
1234 		return 0;
1235 	}
1236 
1237 	return 0;
1238 }
1239 
1240 static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
1241 			 u16 pcifunc, int next, bool eol)
1242 {
1243 	struct nix_aq_enq_req aq_req;
1244 	int err;
1245 
1246 	aq_req.hdr.pcifunc = pcifunc;
1247 	aq_req.ctype = NIX_AQ_CTYPE_MCE;
1248 	aq_req.op = op;
1249 	aq_req.qidx = mce;
1250 
1251 	/* Forward bcast pkts to RQ0, RSS not needed */
1252 	aq_req.mce.op = 0;
1253 	aq_req.mce.index = 0;
1254 	aq_req.mce.eol = eol;
1255 	aq_req.mce.pf_func = pcifunc;
1256 	aq_req.mce.next = next;
1257 
1258 	/* All fields valid */
1259 	*(u64 *)(&aq_req.mce_mask) = ~0ULL;
1260 
1261 	err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
1262 	if (err) {
1263 		dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
1264 			rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
1265 		return err;
1266 	}
1267 	return 0;
1268 }
1269 
1270 static int nix_update_mce_list(struct nix_mce_list *mce_list,
1271 			       u16 pcifunc, int idx, bool add)
1272 {
1273 	struct mce *mce, *tail = NULL;
1274 	bool delete = false;
1275 
1276 	/* Scan through the current list */
1277 	hlist_for_each_entry(mce, &mce_list->head, node) {
1278 		/* If already exists, then delete */
1279 		if (mce->pcifunc == pcifunc && !add) {
1280 			delete = true;
1281 			break;
1282 		}
1283 		tail = mce;
1284 	}
1285 
1286 	if (delete) {
1287 		hlist_del(&mce->node);
1288 		kfree(mce);
1289 		mce_list->count--;
1290 		return 0;
1291 	}
1292 
1293 	if (!add)
1294 		return 0;
1295 
1296 	/* Add a new one to the list, at the tail */
1297 	mce = kzalloc(sizeof(*mce), GFP_KERNEL);
1298 	if (!mce)
1299 		return -ENOMEM;
1300 	mce->idx = idx;
1301 	mce->pcifunc = pcifunc;
1302 	if (!tail)
1303 		hlist_add_head(&mce->node, &mce_list->head);
1304 	else
1305 		hlist_add_behind(&mce->node, &tail->node);
1306 	mce_list->count++;
1307 	return 0;
1308 }
1309 
1310 static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
1311 {
1312 	int err = 0, idx, next_idx, count;
1313 	struct nix_mce_list *mce_list;
1314 	struct mce *mce, *next_mce;
1315 	struct nix_mcast *mcast;
1316 	struct nix_hw *nix_hw;
1317 	struct rvu_pfvf *pfvf;
1318 	int blkaddr;
1319 
1320 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1321 	if (blkaddr < 0)
1322 		return 0;
1323 
1324 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
1325 	if (!nix_hw)
1326 		return 0;
1327 
1328 	mcast = &nix_hw->mcast;
1329 
1330 	/* Get this PF/VF func's MCE index */
1331 	pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
1332 	idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
1333 
1334 	mce_list = &pfvf->bcast_mce_list;
1335 	if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
1336 		dev_err(rvu->dev,
1337 			"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
1338 			__func__, idx, mce_list->max,
1339 			pcifunc >> RVU_PFVF_PF_SHIFT);
1340 		return -EINVAL;
1341 	}
1342 
1343 	spin_lock(&mcast->mce_lock);
1344 
1345 	err = nix_update_mce_list(mce_list, pcifunc, idx, add);
1346 	if (err)
1347 		goto end;
1348 
1349 	/* Disable MCAM entry in NPC */
1350 
1351 	if (!mce_list->count)
1352 		goto end;
1353 	count = mce_list->count;
1354 
1355 	/* Dump the updated list to HW */
1356 	hlist_for_each_entry(mce, &mce_list->head, node) {
1357 		next_idx = 0;
1358 		count--;
1359 		if (count) {
1360 			next_mce = hlist_entry(mce->node.next,
1361 					       struct mce, node);
1362 			next_idx = next_mce->idx;
1363 		}
1364 		/* EOL should be set in last MCE */
1365 		err = nix_setup_mce(rvu, mce->idx,
1366 				    NIX_AQ_INSTOP_WRITE, mce->pcifunc,
1367 				    next_idx, count ? false : true);
1368 		if (err)
1369 			goto end;
1370 	}
1371 
1372 end:
1373 	spin_unlock(&mcast->mce_lock);
1374 	return err;
1375 }
1376 
1377 static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
1378 {
1379 	struct nix_mcast *mcast = &nix_hw->mcast;
1380 	int err, pf, numvfs, idx;
1381 	struct rvu_pfvf *pfvf;
1382 	u16 pcifunc;
1383 	u64 cfg;
1384 
1385 	/* Skip PF0 (i.e AF) */
1386 	for (pf = 1; pf < (rvu->cgx_mapped_pfs + 1); pf++) {
1387 		cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
1388 		/* If PF is not enabled, nothing to do */
1389 		if (!((cfg >> 20) & 0x01))
1390 			continue;
1391 		/* Get numVFs attached to this PF */
1392 		numvfs = (cfg >> 12) & 0xFF;
1393 
1394 		pfvf = &rvu->pf[pf];
1395 		/* Save the start MCE */
1396 		pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
1397 
1398 		nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
1399 
1400 		for (idx = 0; idx < (numvfs + 1); idx++) {
1401 			/* idx-0 is for PF, followed by VFs */
1402 			pcifunc = (pf << RVU_PFVF_PF_SHIFT);
1403 			pcifunc |= idx;
1404 			/* Add dummy entries now, so that we don't have to check
1405 			 * for whether AQ_OP should be INIT/WRITE later on.
1406 			 * Will be updated when a NIXLF is attached/detached to
1407 			 * these PF/VFs.
1408 			 */
1409 			err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
1410 					    NIX_AQ_INSTOP_INIT,
1411 					    pcifunc, 0, true);
1412 			if (err)
1413 				return err;
1414 		}
1415 	}
1416 	return 0;
1417 }
1418 
1419 static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1420 {
1421 	struct nix_mcast *mcast = &nix_hw->mcast;
1422 	struct rvu_hwinfo *hw = rvu->hw;
1423 	int err, size;
1424 
1425 	size = (rvu_read64(rvu, blkaddr, NIX_AF_CONST3) >> 16) & 0x0F;
1426 	size = (1ULL << size);
1427 
1428 	/* Alloc memory for multicast/mirror replication entries */
1429 	err = qmem_alloc(rvu->dev, &mcast->mce_ctx,
1430 			 (256UL << MC_TBL_SIZE), size);
1431 	if (err)
1432 		return -ENOMEM;
1433 
1434 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BASE,
1435 		    (u64)mcast->mce_ctx->iova);
1436 
1437 	/* Set max list length equal to max no of VFs per PF  + PF itself */
1438 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG,
1439 		    BIT_ULL(36) | (hw->max_vfs_per_pf << 4) | MC_TBL_SIZE);
1440 
1441 	/* Alloc memory for multicast replication buffers */
1442 	size = rvu_read64(rvu, blkaddr, NIX_AF_MC_MIRROR_CONST) & 0xFFFF;
1443 	err = qmem_alloc(rvu->dev, &mcast->mcast_buf,
1444 			 (8UL << MC_BUF_CNT), size);
1445 	if (err)
1446 		return -ENOMEM;
1447 
1448 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_BASE,
1449 		    (u64)mcast->mcast_buf->iova);
1450 
1451 	/* Alloc pkind for NIX internal RX multicast/mirror replay */
1452 	mcast->replay_pkind = rvu_alloc_rsrc(&hw->pkind.rsrc);
1453 
1454 	rvu_write64(rvu, blkaddr, NIX_AF_RX_MCAST_BUF_CFG,
1455 		    BIT_ULL(63) | (mcast->replay_pkind << 24) |
1456 		    BIT_ULL(20) | MC_BUF_CNT);
1457 
1458 	spin_lock_init(&mcast->mce_lock);
1459 
1460 	return nix_setup_bcast_tables(rvu, nix_hw);
1461 }
1462 
1463 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
1464 {
1465 	struct nix_txsch *txsch;
1466 	u64 cfg, reg;
1467 	int err, lvl;
1468 
1469 	/* Get scheduler queue count of each type and alloc
1470 	 * bitmap for each for alloc/free/attach operations.
1471 	 */
1472 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1473 		txsch = &nix_hw->txsch[lvl];
1474 		txsch->lvl = lvl;
1475 		switch (lvl) {
1476 		case NIX_TXSCH_LVL_SMQ:
1477 			reg = NIX_AF_MDQ_CONST;
1478 			break;
1479 		case NIX_TXSCH_LVL_TL4:
1480 			reg = NIX_AF_TL4_CONST;
1481 			break;
1482 		case NIX_TXSCH_LVL_TL3:
1483 			reg = NIX_AF_TL3_CONST;
1484 			break;
1485 		case NIX_TXSCH_LVL_TL2:
1486 			reg = NIX_AF_TL2_CONST;
1487 			break;
1488 		case NIX_TXSCH_LVL_TL1:
1489 			reg = NIX_AF_TL1_CONST;
1490 			break;
1491 		}
1492 		cfg = rvu_read64(rvu, blkaddr, reg);
1493 		txsch->schq.max = cfg & 0xFFFF;
1494 		err = rvu_alloc_bitmap(&txsch->schq);
1495 		if (err)
1496 			return err;
1497 
1498 		/* Allocate memory for scheduler queues to
1499 		 * PF/VF pcifunc mapping info.
1500 		 */
1501 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
1502 					       sizeof(u16), GFP_KERNEL);
1503 		if (!txsch->pfvf_map)
1504 			return -ENOMEM;
1505 	}
1506 	return 0;
1507 }
1508 
1509 int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
1510 				   struct msg_rsp *rsp)
1511 {
1512 	struct rvu_hwinfo *hw = rvu->hw;
1513 	u16 pcifunc = req->hdr.pcifunc;
1514 	int i, nixlf, blkaddr;
1515 	u64 stats;
1516 
1517 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1518 	if (blkaddr < 0)
1519 		return NIX_AF_ERR_AF_LF_INVALID;
1520 
1521 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1522 	if (nixlf < 0)
1523 		return NIX_AF_ERR_AF_LF_INVALID;
1524 
1525 	/* Get stats count supported by HW */
1526 	stats = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
1527 
1528 	/* Reset tx stats */
1529 	for (i = 0; i < ((stats >> 24) & 0xFF); i++)
1530 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, i), 0);
1531 
1532 	/* Reset rx stats */
1533 	for (i = 0; i < ((stats >> 32) & 0xFF); i++)
1534 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, i), 0);
1535 
1536 	return 0;
1537 }
1538 
1539 /* Returns the ALG index to be set into NPC_RX_ACTION */
1540 static int get_flowkey_alg_idx(u32 flow_cfg)
1541 {
1542 	u32 ip_cfg;
1543 
1544 	flow_cfg &= ~FLOW_KEY_TYPE_PORT;
1545 	ip_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
1546 	if (flow_cfg == ip_cfg)
1547 		return FLOW_KEY_ALG_IP;
1548 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP))
1549 		return FLOW_KEY_ALG_TCP;
1550 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP))
1551 		return FLOW_KEY_ALG_UDP;
1552 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_SCTP))
1553 		return FLOW_KEY_ALG_SCTP;
1554 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP))
1555 		return FLOW_KEY_ALG_TCP_UDP;
1556 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP))
1557 		return FLOW_KEY_ALG_TCP_SCTP;
1558 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
1559 		return FLOW_KEY_ALG_UDP_SCTP;
1560 	else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP |
1561 			      FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
1562 		return FLOW_KEY_ALG_TCP_UDP_SCTP;
1563 
1564 	return FLOW_KEY_ALG_PORT;
1565 }
1566 
1567 int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
1568 					 struct nix_rss_flowkey_cfg *req,
1569 					 struct msg_rsp *rsp)
1570 {
1571 	struct rvu_hwinfo *hw = rvu->hw;
1572 	u16 pcifunc = req->hdr.pcifunc;
1573 	int alg_idx, nixlf, blkaddr;
1574 
1575 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1576 	if (blkaddr < 0)
1577 		return NIX_AF_ERR_AF_LF_INVALID;
1578 
1579 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1580 	if (nixlf < 0)
1581 		return NIX_AF_ERR_AF_LF_INVALID;
1582 
1583 	alg_idx = get_flowkey_alg_idx(req->flowkey_cfg);
1584 
1585 	rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
1586 				       alg_idx, req->mcam_index);
1587 	return 0;
1588 }
1589 
1590 static void set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
1591 {
1592 	struct nix_rx_flowkey_alg *field = NULL;
1593 	int idx, key_type;
1594 
1595 	if (!alg)
1596 		return;
1597 
1598 	/* FIELD0: IPv4
1599 	 * FIELD1: IPv6
1600 	 * FIELD2: TCP/UDP/SCTP/ALL
1601 	 * FIELD3: Unused
1602 	 * FIELD4: Unused
1603 	 *
1604 	 * Each of the 32 possible flow key algorithm definitions should
1605 	 * fall into above incremental config (except ALG0). Otherwise a
1606 	 * single NPC MCAM entry is not sufficient for supporting RSS.
1607 	 *
1608 	 * If a different definition or combination needed then NPC MCAM
1609 	 * has to be programmed to filter such pkts and it's action should
1610 	 * point to this definition to calculate flowtag or hash.
1611 	 */
1612 	for (idx = 0; idx < 32; idx++) {
1613 		key_type = flow_cfg & BIT_ULL(idx);
1614 		if (!key_type)
1615 			continue;
1616 		switch (key_type) {
1617 		case FLOW_KEY_TYPE_PORT:
1618 			field = &alg[0];
1619 			field->sel_chan = true;
1620 			/* This should be set to 1, when SEL_CHAN is set */
1621 			field->bytesm1 = 1;
1622 			break;
1623 		case FLOW_KEY_TYPE_IPV4:
1624 			field = &alg[0];
1625 			field->lid = NPC_LID_LC;
1626 			field->ltype_match = NPC_LT_LC_IP;
1627 			field->hdr_offset = 12; /* SIP offset */
1628 			field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
1629 			field->ltype_mask = 0xF; /* Match only IPv4 */
1630 			break;
1631 		case FLOW_KEY_TYPE_IPV6:
1632 			field = &alg[1];
1633 			field->lid = NPC_LID_LC;
1634 			field->ltype_match = NPC_LT_LC_IP6;
1635 			field->hdr_offset = 8; /* SIP offset */
1636 			field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
1637 			field->ltype_mask = 0xF; /* Match only IPv6 */
1638 			break;
1639 		case FLOW_KEY_TYPE_TCP:
1640 		case FLOW_KEY_TYPE_UDP:
1641 		case FLOW_KEY_TYPE_SCTP:
1642 			field = &alg[2];
1643 			field->lid = NPC_LID_LD;
1644 			field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
1645 			if (key_type == FLOW_KEY_TYPE_TCP)
1646 				field->ltype_match |= NPC_LT_LD_TCP;
1647 			else if (key_type == FLOW_KEY_TYPE_UDP)
1648 				field->ltype_match |= NPC_LT_LD_UDP;
1649 			else if (key_type == FLOW_KEY_TYPE_SCTP)
1650 				field->ltype_match |= NPC_LT_LD_SCTP;
1651 			field->key_offset = 32; /* After IPv4/v6 SIP, DIP */
1652 			field->ltype_mask = ~field->ltype_match;
1653 			break;
1654 		}
1655 		if (field)
1656 			field->ena = 1;
1657 		field = NULL;
1658 	}
1659 }
1660 
1661 static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
1662 {
1663 #define FIELDS_PER_ALG	5
1664 	u64 field[FLOW_KEY_ALG_MAX][FIELDS_PER_ALG];
1665 	u32 flowkey_cfg, minkey_cfg;
1666 	int alg, fid;
1667 
1668 	memset(&field, 0, sizeof(u64) * FLOW_KEY_ALG_MAX * FIELDS_PER_ALG);
1669 
1670 	/* Only incoming channel number */
1671 	flowkey_cfg = FLOW_KEY_TYPE_PORT;
1672 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_PORT], flowkey_cfg);
1673 
1674 	/* For a incoming pkt if none of the fields match then flowkey
1675 	 * will be zero, hence tag generated will also be zero.
1676 	 * RSS entry at rsse_index = NIX_AF_LF()_RSS_GRP()[OFFSET] will
1677 	 * be used to queue the packet.
1678 	 */
1679 
1680 	/* IPv4/IPv6 SIP/DIPs */
1681 	flowkey_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
1682 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_IP], flowkey_cfg);
1683 
1684 	/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
1685 	minkey_cfg = flowkey_cfg;
1686 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP;
1687 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP], flowkey_cfg);
1688 
1689 	/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
1690 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP;
1691 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP], flowkey_cfg);
1692 
1693 	/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
1694 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_SCTP;
1695 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_SCTP], flowkey_cfg);
1696 
1697 	/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
1698 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP;
1699 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP], flowkey_cfg);
1700 
1701 	/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
1702 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP;
1703 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_SCTP], flowkey_cfg);
1704 
1705 	/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
1706 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
1707 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP_SCTP], flowkey_cfg);
1708 
1709 	/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
1710 	flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP |
1711 		      FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
1712 	set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP_SCTP],
1713 			   flowkey_cfg);
1714 
1715 	for (alg = 0; alg < FLOW_KEY_ALG_MAX; alg++) {
1716 		for (fid = 0; fid < FIELDS_PER_ALG; fid++)
1717 			rvu_write64(rvu, blkaddr,
1718 				    NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
1719 				    field[alg][fid]);
1720 	}
1721 }
1722 
1723 int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
1724 				      struct nix_set_mac_addr *req,
1725 				      struct msg_rsp *rsp)
1726 {
1727 	struct rvu_hwinfo *hw = rvu->hw;
1728 	u16 pcifunc = req->hdr.pcifunc;
1729 	struct rvu_pfvf *pfvf;
1730 	int blkaddr, nixlf;
1731 
1732 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1733 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1734 	if (!pfvf->nixlf || blkaddr < 0)
1735 		return NIX_AF_ERR_AF_LF_INVALID;
1736 
1737 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1738 	if (nixlf < 0)
1739 		return NIX_AF_ERR_AF_LF_INVALID;
1740 
1741 	ether_addr_copy(pfvf->mac_addr, req->mac_addr);
1742 
1743 	rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
1744 				    pfvf->rx_chan_base, req->mac_addr);
1745 	return 0;
1746 }
1747 
1748 int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
1749 				     struct msg_rsp *rsp)
1750 {
1751 	bool allmulti = false, disable_promisc = false;
1752 	struct rvu_hwinfo *hw = rvu->hw;
1753 	u16 pcifunc = req->hdr.pcifunc;
1754 	struct rvu_pfvf *pfvf;
1755 	int blkaddr, nixlf;
1756 
1757 	pfvf = rvu_get_pfvf(rvu, pcifunc);
1758 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1759 	if (!pfvf->nixlf || blkaddr < 0)
1760 		return NIX_AF_ERR_AF_LF_INVALID;
1761 
1762 	nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
1763 	if (nixlf < 0)
1764 		return NIX_AF_ERR_AF_LF_INVALID;
1765 
1766 	if (req->mode & NIX_RX_MODE_PROMISC)
1767 		allmulti = false;
1768 	else if (req->mode & NIX_RX_MODE_ALLMULTI)
1769 		allmulti = true;
1770 	else
1771 		disable_promisc = true;
1772 
1773 	if (disable_promisc)
1774 		rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
1775 	else
1776 		rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
1777 					      pfvf->rx_chan_base, allmulti);
1778 	return 0;
1779 }
1780 
1781 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
1782 {
1783 	int idx, err;
1784 	u64 status;
1785 
1786 	/* Start X2P bus calibration */
1787 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
1788 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
1789 	/* Wait for calibration to complete */
1790 	err = rvu_poll_reg(rvu, blkaddr,
1791 			   NIX_AF_STATUS, BIT_ULL(10), false);
1792 	if (err) {
1793 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
1794 		return err;
1795 	}
1796 
1797 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
1798 	/* Check if CGX devices are ready */
1799 	for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
1800 		if (status & (BIT_ULL(16 + idx)))
1801 			continue;
1802 		dev_err(rvu->dev,
1803 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
1804 		err = -EBUSY;
1805 	}
1806 
1807 	/* Check if LBK is ready */
1808 	if (!(status & BIT_ULL(19))) {
1809 		dev_err(rvu->dev,
1810 			"LBK didn't respond to NIX X2P calibration\n");
1811 		err = -EBUSY;
1812 	}
1813 
1814 	/* Clear 'calibrate_x2p' bit */
1815 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
1816 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
1817 	if (err || (status & 0x3FFULL))
1818 		dev_err(rvu->dev,
1819 			"NIX X2P calibration failed, status 0x%llx\n", status);
1820 	if (err)
1821 		return err;
1822 	return 0;
1823 }
1824 
1825 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
1826 {
1827 	u64 cfg;
1828 	int err;
1829 
1830 	/* Set admin queue endianness */
1831 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
1832 #ifdef __BIG_ENDIAN
1833 	cfg |= BIT_ULL(1);
1834 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
1835 #else
1836 	cfg &= ~BIT_ULL(1);
1837 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
1838 #endif
1839 
1840 	/* Do not bypass NDC cache */
1841 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
1842 	cfg &= ~0x3FFEULL;
1843 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
1844 
1845 	/* Result structure can be followed by RQ/SQ/CQ context at
1846 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
1847 	 * operation type. Alloc sufficient result memory for all operations.
1848 	 */
1849 	err = rvu_aq_alloc(rvu, &block->aq,
1850 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
1851 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
1852 	if (err)
1853 		return err;
1854 
1855 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
1856 	rvu_write64(rvu, block->addr,
1857 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
1858 	return 0;
1859 }
1860 
1861 int rvu_nix_init(struct rvu *rvu)
1862 {
1863 	struct rvu_hwinfo *hw = rvu->hw;
1864 	struct rvu_block *block;
1865 	int blkaddr, err;
1866 	u64 cfg;
1867 
1868 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
1869 	if (blkaddr < 0)
1870 		return 0;
1871 	block = &hw->block[blkaddr];
1872 
1873 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
1874 	err = nix_calibrate_x2p(rvu, blkaddr);
1875 	if (err)
1876 		return err;
1877 
1878 	/* Set num of links of each type */
1879 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
1880 	hw->cgx = (cfg >> 12) & 0xF;
1881 	hw->lmac_per_cgx = (cfg >> 8) & 0xF;
1882 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
1883 	hw->lbk_links = 1;
1884 	hw->sdp_links = 1;
1885 
1886 	/* Initialize admin queue */
1887 	err = nix_aq_init(rvu, block);
1888 	if (err)
1889 		return err;
1890 
1891 	/* Restore CINT timer delay to HW reset values */
1892 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
1893 
1894 	/* Configure segmentation offload formats */
1895 	nix_setup_lso(rvu, blkaddr);
1896 
1897 	if (blkaddr == BLKADDR_NIX0) {
1898 		hw->nix0 = devm_kzalloc(rvu->dev,
1899 					sizeof(struct nix_hw), GFP_KERNEL);
1900 		if (!hw->nix0)
1901 			return -ENOMEM;
1902 
1903 		err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
1904 		if (err)
1905 			return err;
1906 
1907 		err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
1908 		if (err)
1909 			return err;
1910 
1911 		/* Config Outer L2, IP, TCP and UDP's NPC layer info.
1912 		 * This helps HW protocol checker to identify headers
1913 		 * and validate length and checksums.
1914 		 */
1915 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
1916 			    (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
1917 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
1918 			    (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
1919 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
1920 			    (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
1921 		rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
1922 			    (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
1923 
1924 		nix_rx_flowkey_alg_cfg(rvu, blkaddr);
1925 	}
1926 	return 0;
1927 }
1928 
1929 void rvu_nix_freemem(struct rvu *rvu)
1930 {
1931 	struct rvu_hwinfo *hw = rvu->hw;
1932 	struct rvu_block *block;
1933 	struct nix_txsch *txsch;
1934 	struct nix_mcast *mcast;
1935 	struct nix_hw *nix_hw;
1936 	int blkaddr, lvl;
1937 
1938 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
1939 	if (blkaddr < 0)
1940 		return;
1941 
1942 	block = &hw->block[blkaddr];
1943 	rvu_aq_free(rvu, block->aq);
1944 
1945 	if (blkaddr == BLKADDR_NIX0) {
1946 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
1947 		if (!nix_hw)
1948 			return;
1949 
1950 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1951 			txsch = &nix_hw->txsch[lvl];
1952 			kfree(txsch->schq.bmap);
1953 		}
1954 
1955 		mcast = &nix_hw->mcast;
1956 		qmem_free(rvu->dev, mcast->mce_ctx);
1957 		qmem_free(rvu->dev, mcast->mcast_buf);
1958 	}
1959 }
1960