1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Admin Function driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 
14 #include "rvu_struct.h"
15 #include "rvu_reg.h"
16 #include "rvu.h"
17 #include "cgx.h"
18 
19 static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr,
20 				 u64 format, bool v4, u64 *fidx)
21 {
22 	struct nix_lso_format field = {0};
23 
24 	/* IP's Length field */
25 	field.layer = NIX_TXLAYER_OL3;
26 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
27 	field.offset = v4 ? 2 : 4;
28 	field.sizem1 = 1; /* i.e 2 bytes */
29 	field.alg = NIX_LSOALG_ADD_PAYLEN;
30 	rvu_write64(rvu, blkaddr,
31 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
32 		    *(u64 *)&field);
33 
34 	/* No ID field in IPv6 header */
35 	if (!v4)
36 		return;
37 
38 	/* IP's ID field */
39 	field.layer = NIX_TXLAYER_OL3;
40 	field.offset = 4;
41 	field.sizem1 = 1; /* i.e 2 bytes */
42 	field.alg = NIX_LSOALG_ADD_SEGNUM;
43 	rvu_write64(rvu, blkaddr,
44 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
45 		    *(u64 *)&field);
46 }
47 
48 static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
49 				 u64 format, u64 *fidx)
50 {
51 	struct nix_lso_format field = {0};
52 
53 	/* TCP's sequence number field */
54 	field.layer = NIX_TXLAYER_OL4;
55 	field.offset = 4;
56 	field.sizem1 = 3; /* i.e 4 bytes */
57 	field.alg = NIX_LSOALG_ADD_OFFSET;
58 	rvu_write64(rvu, blkaddr,
59 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
60 		    *(u64 *)&field);
61 
62 	/* TCP's flags field */
63 	field.layer = NIX_TXLAYER_OL4;
64 	field.offset = 12;
65 	field.sizem1 = 0; /* not needed */
66 	field.alg = NIX_LSOALG_TCP_FLAGS;
67 	rvu_write64(rvu, blkaddr,
68 		    NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
69 		    *(u64 *)&field);
70 }
71 
72 static void nix_setup_lso(struct rvu *rvu, int blkaddr)
73 {
74 	u64 cfg, idx, fidx = 0;
75 
76 	/* Enable LSO */
77 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
78 	/* For TSO, set first and middle segment flags to
79 	 * mask out PSH, RST & FIN flags in TCP packet
80 	 */
81 	cfg &= ~((0xFFFFULL << 32) | (0xFFFFULL << 16));
82 	cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
83 	rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
84 
85 	/* Configure format fields for TCPv4 segmentation offload */
86 	idx = NIX_LSO_FORMAT_IDX_TSOV4;
87 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
88 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
89 
90 	/* Set rest of the fields to NOP */
91 	for (; fidx < 8; fidx++) {
92 		rvu_write64(rvu, blkaddr,
93 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
94 	}
95 
96 	/* Configure format fields for TCPv6 segmentation offload */
97 	idx = NIX_LSO_FORMAT_IDX_TSOV6;
98 	fidx = 0;
99 	nix_setup_lso_tso_l3(rvu, blkaddr, idx, false, &fidx);
100 	nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
101 
102 	/* Set rest of the fields to NOP */
103 	for (; fidx < 8; fidx++) {
104 		rvu_write64(rvu, blkaddr,
105 			    NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
106 	}
107 }
108 
109 static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
110 {
111 	if (pfvf->rq_ctx)
112 		qmem_free(rvu->dev, pfvf->rq_ctx);
113 	if (pfvf->sq_ctx)
114 		qmem_free(rvu->dev, pfvf->sq_ctx);
115 	if (pfvf->cq_ctx)
116 		qmem_free(rvu->dev, pfvf->cq_ctx);
117 	if (pfvf->rss_ctx)
118 		qmem_free(rvu->dev, pfvf->rss_ctx);
119 	if (pfvf->nix_qints_ctx)
120 		qmem_free(rvu->dev, pfvf->nix_qints_ctx);
121 	if (pfvf->cq_ints_ctx)
122 		qmem_free(rvu->dev, pfvf->cq_ints_ctx);
123 
124 	pfvf->rq_ctx = NULL;
125 	pfvf->sq_ctx = NULL;
126 	pfvf->cq_ctx = NULL;
127 	pfvf->rss_ctx = NULL;
128 	pfvf->nix_qints_ctx = NULL;
129 	pfvf->cq_ints_ctx = NULL;
130 }
131 
132 static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
133 			      struct rvu_pfvf *pfvf, int nixlf,
134 			      int rss_sz, int rss_grps, int hwctx_size)
135 {
136 	int err, grp, num_indices;
137 
138 	/* RSS is not requested for this NIXLF */
139 	if (!rss_sz)
140 		return 0;
141 	num_indices = rss_sz * rss_grps;
142 
143 	/* Alloc NIX RSS HW context memory and config the base */
144 	err = qmem_alloc(rvu->dev, &pfvf->rss_ctx, num_indices, hwctx_size);
145 	if (err)
146 		return err;
147 
148 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_BASE(nixlf),
149 		    (u64)pfvf->rss_ctx->iova);
150 
151 	/* Config full RSS table size, enable RSS and caching */
152 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
153 		    BIT_ULL(36) | BIT_ULL(4) |
154 		    ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
155 	/* Config RSS group offset and sizes */
156 	for (grp = 0; grp < rss_grps; grp++)
157 		rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
158 			    ((ilog2(rss_sz) - 1) << 16) | (rss_sz * grp));
159 	return 0;
160 }
161 
162 int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
163 				  struct nix_lf_alloc_req *req,
164 				  struct nix_lf_alloc_rsp *rsp)
165 {
166 	int nixlf, qints, hwctx_size, err, rc = 0;
167 	struct rvu_hwinfo *hw = rvu->hw;
168 	u16 pcifunc = req->hdr.pcifunc;
169 	struct rvu_block *block;
170 	struct rvu_pfvf *pfvf;
171 	u64 cfg, ctx_cfg;
172 	int blkaddr;
173 
174 	if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
175 		return NIX_AF_ERR_PARAM;
176 
177 	pfvf = rvu_get_pfvf(rvu, pcifunc);
178 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
179 	if (!pfvf->nixlf || blkaddr < 0)
180 		return NIX_AF_ERR_AF_LF_INVALID;
181 
182 	block = &hw->block[blkaddr];
183 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
184 	if (nixlf < 0)
185 		return NIX_AF_ERR_AF_LF_INVALID;
186 
187 	/* If RSS is being enabled, check if requested config is valid.
188 	 * RSS table size should be power of two, otherwise
189 	 * RSS_GRP::OFFSET + adder might go beyond that group or
190 	 * won't be able to use entire table.
191 	 */
192 	if (req->rss_sz && (req->rss_sz > MAX_RSS_INDIR_TBL_SIZE ||
193 			    !is_power_of_2(req->rss_sz)))
194 		return NIX_AF_ERR_RSS_SIZE_INVALID;
195 
196 	if (req->rss_sz &&
197 	    (!req->rss_grps || req->rss_grps > MAX_RSS_GROUPS))
198 		return NIX_AF_ERR_RSS_GRPS_INVALID;
199 
200 	/* Reset this NIX LF */
201 	err = rvu_lf_reset(rvu, block, nixlf);
202 	if (err) {
203 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
204 			block->addr - BLKADDR_NIX0, nixlf);
205 		return NIX_AF_ERR_LF_RESET;
206 	}
207 
208 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
209 
210 	/* Alloc NIX RQ HW context memory and config the base */
211 	hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
212 	err = qmem_alloc(rvu->dev, &pfvf->rq_ctx, req->rq_cnt, hwctx_size);
213 	if (err)
214 		goto free_mem;
215 
216 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_BASE(nixlf),
217 		    (u64)pfvf->rq_ctx->iova);
218 
219 	/* Set caching and queue count in HW */
220 	cfg = BIT_ULL(36) | (req->rq_cnt - 1);
221 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
222 
223 	/* Alloc NIX SQ HW context memory and config the base */
224 	hwctx_size = 1UL << (ctx_cfg & 0xF);
225 	err = qmem_alloc(rvu->dev, &pfvf->sq_ctx, req->sq_cnt, hwctx_size);
226 	if (err)
227 		goto free_mem;
228 
229 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
230 		    (u64)pfvf->sq_ctx->iova);
231 	cfg = BIT_ULL(36) | (req->sq_cnt - 1);
232 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
233 
234 	/* Alloc NIX CQ HW context memory and config the base */
235 	hwctx_size = 1UL << ((ctx_cfg >> 8) & 0xF);
236 	err = qmem_alloc(rvu->dev, &pfvf->cq_ctx, req->cq_cnt, hwctx_size);
237 	if (err)
238 		goto free_mem;
239 
240 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
241 		    (u64)pfvf->cq_ctx->iova);
242 	cfg = BIT_ULL(36) | (req->cq_cnt - 1);
243 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
244 
245 	/* Initialize receive side scaling (RSS) */
246 	hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
247 	err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
248 				 req->rss_sz, req->rss_grps, hwctx_size);
249 	if (err)
250 		goto free_mem;
251 
252 	/* Alloc memory for CQINT's HW contexts */
253 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
254 	qints = (cfg >> 24) & 0xFFF;
255 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
256 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
257 	if (err)
258 		goto free_mem;
259 
260 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
261 		    (u64)pfvf->cq_ints_ctx->iova);
262 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
263 
264 	/* Alloc memory for QINT's HW contexts */
265 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
266 	qints = (cfg >> 12) & 0xFFF;
267 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
268 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
269 	if (err)
270 		goto free_mem;
271 
272 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
273 		    (u64)pfvf->nix_qints_ctx->iova);
274 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
275 
276 	/* Enable LMTST for this NIX LF */
277 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
278 
279 	/* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
280 	 * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
281 	 * PCIFUNC itself.
282 	 */
283 	if (req->npa_func == RVU_DEFAULT_PF_FUNC)
284 		cfg = pcifunc;
285 	else
286 		cfg = req->npa_func;
287 
288 	if (req->sso_func == RVU_DEFAULT_PF_FUNC)
289 		cfg |= (u64)pcifunc << 16;
290 	else
291 		cfg |= (u64)req->sso_func << 16;
292 
293 	cfg |= (u64)req->xqe_sz << 33;
294 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CFG(nixlf), cfg);
295 
296 	/* Config Rx pkt length, csum checks and apad  enable / disable */
297 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
298 
299 	goto exit;
300 
301 free_mem:
302 	nix_ctx_free(rvu, pfvf);
303 	rc = -ENOMEM;
304 
305 exit:
306 	/* Set macaddr of this PF/VF */
307 	ether_addr_copy(rsp->mac_addr, pfvf->mac_addr);
308 
309 	/* set SQB size info */
310 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQ_CONST);
311 	rsp->sqb_size = (cfg >> 34) & 0xFFFF;
312 	rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
313 	rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
314 	return rc;
315 }
316 
317 int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
318 				 struct msg_rsp *rsp)
319 {
320 	struct rvu_hwinfo *hw = rvu->hw;
321 	u16 pcifunc = req->hdr.pcifunc;
322 	struct rvu_block *block;
323 	int blkaddr, nixlf, err;
324 	struct rvu_pfvf *pfvf;
325 
326 	pfvf = rvu_get_pfvf(rvu, pcifunc);
327 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
328 	if (!pfvf->nixlf || blkaddr < 0)
329 		return NIX_AF_ERR_AF_LF_INVALID;
330 
331 	block = &hw->block[blkaddr];
332 	nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
333 	if (nixlf < 0)
334 		return NIX_AF_ERR_AF_LF_INVALID;
335 
336 	/* Reset this NIX LF */
337 	err = rvu_lf_reset(rvu, block, nixlf);
338 	if (err) {
339 		dev_err(rvu->dev, "Failed to reset NIX%d LF%d\n",
340 			block->addr - BLKADDR_NIX0, nixlf);
341 		return NIX_AF_ERR_LF_RESET;
342 	}
343 
344 	nix_ctx_free(rvu, pfvf);
345 
346 	return 0;
347 }
348 
349 static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
350 {
351 	if (blkaddr == BLKADDR_NIX0 && hw->nix0)
352 		return hw->nix0;
353 
354 	return NULL;
355 }
356 
357 static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
358 {
359 	struct nix_txsch *txsch;
360 	u64 cfg, reg;
361 	int err, lvl;
362 
363 	/* Get scheduler queue count of each type and alloc
364 	 * bitmap for each for alloc/free/attach operations.
365 	 */
366 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
367 		txsch = &nix_hw->txsch[lvl];
368 		txsch->lvl = lvl;
369 		switch (lvl) {
370 		case NIX_TXSCH_LVL_SMQ:
371 			reg = NIX_AF_MDQ_CONST;
372 			break;
373 		case NIX_TXSCH_LVL_TL4:
374 			reg = NIX_AF_TL4_CONST;
375 			break;
376 		case NIX_TXSCH_LVL_TL3:
377 			reg = NIX_AF_TL3_CONST;
378 			break;
379 		case NIX_TXSCH_LVL_TL2:
380 			reg = NIX_AF_TL2_CONST;
381 			break;
382 		case NIX_TXSCH_LVL_TL1:
383 			reg = NIX_AF_TL1_CONST;
384 			break;
385 		}
386 		cfg = rvu_read64(rvu, blkaddr, reg);
387 		txsch->schq.max = cfg & 0xFFFF;
388 		err = rvu_alloc_bitmap(&txsch->schq);
389 		if (err)
390 			return err;
391 
392 		/* Allocate memory for scheduler queues to
393 		 * PF/VF pcifunc mapping info.
394 		 */
395 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
396 					       sizeof(u16), GFP_KERNEL);
397 		if (!txsch->pfvf_map)
398 			return -ENOMEM;
399 	}
400 	return 0;
401 }
402 
403 static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
404 {
405 	int idx, err;
406 	u64 status;
407 
408 	/* Start X2P bus calibration */
409 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
410 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) | BIT_ULL(9));
411 	/* Wait for calibration to complete */
412 	err = rvu_poll_reg(rvu, blkaddr,
413 			   NIX_AF_STATUS, BIT_ULL(10), false);
414 	if (err) {
415 		dev_err(rvu->dev, "NIX X2P bus calibration failed\n");
416 		return err;
417 	}
418 
419 	status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
420 	/* Check if CGX devices are ready */
421 	for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
422 		if (status & (BIT_ULL(16 + idx)))
423 			continue;
424 		dev_err(rvu->dev,
425 			"CGX%d didn't respond to NIX X2P calibration\n", idx);
426 		err = -EBUSY;
427 	}
428 
429 	/* Check if LBK is ready */
430 	if (!(status & BIT_ULL(19))) {
431 		dev_err(rvu->dev,
432 			"LBK didn't respond to NIX X2P calibration\n");
433 		err = -EBUSY;
434 	}
435 
436 	/* Clear 'calibrate_x2p' bit */
437 	rvu_write64(rvu, blkaddr, NIX_AF_CFG,
438 		    rvu_read64(rvu, blkaddr, NIX_AF_CFG) & ~BIT_ULL(9));
439 	if (err || (status & 0x3FFULL))
440 		dev_err(rvu->dev,
441 			"NIX X2P calibration failed, status 0x%llx\n", status);
442 	if (err)
443 		return err;
444 	return 0;
445 }
446 
447 static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
448 {
449 	u64 cfg;
450 	int err;
451 
452 	/* Set admin queue endianness */
453 	cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
454 #ifdef __BIG_ENDIAN
455 	cfg |= BIT_ULL(1);
456 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
457 #else
458 	cfg &= ~BIT_ULL(1);
459 	rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
460 #endif
461 
462 	/* Do not bypass NDC cache */
463 	cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
464 	cfg &= ~0x3FFEULL;
465 	rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
466 
467 	/* Result structure can be followed by RQ/SQ/CQ context at
468 	 * RES + 128bytes and a write mask at RES + 256 bytes, depending on
469 	 * operation type. Alloc sufficient result memory for all operations.
470 	 */
471 	err = rvu_aq_alloc(rvu, &block->aq,
472 			   Q_COUNT(AQ_SIZE), sizeof(struct nix_aq_inst_s),
473 			   ALIGN(sizeof(struct nix_aq_res_s), 128) + 256);
474 	if (err)
475 		return err;
476 
477 	rvu_write64(rvu, block->addr, NIX_AF_AQ_CFG, AQ_SIZE);
478 	rvu_write64(rvu, block->addr,
479 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
480 	return 0;
481 }
482 
483 int rvu_nix_init(struct rvu *rvu)
484 {
485 	struct rvu_hwinfo *hw = rvu->hw;
486 	struct rvu_block *block;
487 	int blkaddr, err;
488 	u64 cfg;
489 
490 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
491 	if (blkaddr < 0)
492 		return 0;
493 	block = &hw->block[blkaddr];
494 
495 	/* Calibrate X2P bus to check if CGX/LBK links are fine */
496 	err = nix_calibrate_x2p(rvu, blkaddr);
497 	if (err)
498 		return err;
499 
500 	/* Set num of links of each type */
501 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
502 	hw->cgx = (cfg >> 12) & 0xF;
503 	hw->lmac_per_cgx = (cfg >> 8) & 0xF;
504 	hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
505 	hw->lbk_links = 1;
506 	hw->sdp_links = 1;
507 
508 	/* Initialize admin queue */
509 	err = nix_aq_init(rvu, block);
510 	if (err)
511 		return err;
512 
513 	/* Restore CINT timer delay to HW reset values */
514 	rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
515 
516 	/* Configure segmentation offload formats */
517 	nix_setup_lso(rvu, blkaddr);
518 
519 	if (blkaddr == BLKADDR_NIX0) {
520 		hw->nix0 = devm_kzalloc(rvu->dev,
521 					sizeof(struct nix_hw), GFP_KERNEL);
522 		if (!hw->nix0)
523 			return -ENOMEM;
524 
525 		err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
526 		if (err)
527 			return err;
528 	}
529 	return 0;
530 }
531 
532 void rvu_nix_freemem(struct rvu *rvu)
533 {
534 	struct rvu_hwinfo *hw = rvu->hw;
535 	struct rvu_block *block;
536 	struct nix_txsch *txsch;
537 	struct nix_hw *nix_hw;
538 	int blkaddr, lvl;
539 
540 	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
541 	if (blkaddr < 0)
542 		return;
543 
544 	block = &hw->block[blkaddr];
545 	rvu_aq_free(rvu, block->aq);
546 
547 	if (blkaddr == BLKADDR_NIX0) {
548 		nix_hw = get_nix_hw(rvu->hw, blkaddr);
549 		if (!nix_hw)
550 			return;
551 
552 		for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
553 			txsch = &nix_hw->txsch[lvl];
554 			kfree(txsch->schq.bmap);
555 		}
556 	}
557 }
558