11ac5a404SSelvin Xavier /*
21ac5a404SSelvin Xavier  * Broadcom NetXtreme-E RoCE driver.
31ac5a404SSelvin Xavier  *
41ac5a404SSelvin Xavier  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
51ac5a404SSelvin Xavier  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
61ac5a404SSelvin Xavier  *
71ac5a404SSelvin Xavier  * This software is available to you under a choice of one of two
81ac5a404SSelvin Xavier  * licenses.  You may choose to be licensed under the terms of the GNU
91ac5a404SSelvin Xavier  * General Public License (GPL) Version 2, available from the file
101ac5a404SSelvin Xavier  * COPYING in the main directory of this source tree, or the
111ac5a404SSelvin Xavier  * BSD license below:
121ac5a404SSelvin Xavier  *
131ac5a404SSelvin Xavier  * Redistribution and use in source and binary forms, with or without
141ac5a404SSelvin Xavier  * modification, are permitted provided that the following conditions
151ac5a404SSelvin Xavier  * are met:
161ac5a404SSelvin Xavier  *
171ac5a404SSelvin Xavier  * 1. Redistributions of source code must retain the above copyright
181ac5a404SSelvin Xavier  *    notice, this list of conditions and the following disclaimer.
191ac5a404SSelvin Xavier  * 2. Redistributions in binary form must reproduce the above copyright
201ac5a404SSelvin Xavier  *    notice, this list of conditions and the following disclaimer in
211ac5a404SSelvin Xavier  *    the documentation and/or other materials provided with the
221ac5a404SSelvin Xavier  *    distribution.
231ac5a404SSelvin Xavier  *
241ac5a404SSelvin Xavier  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
251ac5a404SSelvin Xavier  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
261ac5a404SSelvin Xavier  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
271ac5a404SSelvin Xavier  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
281ac5a404SSelvin Xavier  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
291ac5a404SSelvin Xavier  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
301ac5a404SSelvin Xavier  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
311ac5a404SSelvin Xavier  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
321ac5a404SSelvin Xavier  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
331ac5a404SSelvin Xavier  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
341ac5a404SSelvin Xavier  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
351ac5a404SSelvin Xavier  *
361ac5a404SSelvin Xavier  * Description: QPLib resource manager
371ac5a404SSelvin Xavier  */
381ac5a404SSelvin Xavier 
3908920b8fSJoe Perches #define dev_fmt(fmt) "QPLIB: " fmt
4008920b8fSJoe Perches 
411ac5a404SSelvin Xavier #include <linux/spinlock.h>
421ac5a404SSelvin Xavier #include <linux/pci.h>
431ac5a404SSelvin Xavier #include <linux/interrupt.h>
441ac5a404SSelvin Xavier #include <linux/inetdevice.h>
451ac5a404SSelvin Xavier #include <linux/dma-mapping.h>
461ac5a404SSelvin Xavier #include <linux/if_vlan.h>
4765a16620SJason Gunthorpe #include <linux/vmalloc.h>
481ac5a404SSelvin Xavier #include "roce_hsi.h"
491ac5a404SSelvin Xavier #include "qplib_res.h"
501ac5a404SSelvin Xavier #include "qplib_sp.h"
511ac5a404SSelvin Xavier #include "qplib_rcfw.h"
521ac5a404SSelvin Xavier 
531ac5a404SSelvin Xavier static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
541ac5a404SSelvin Xavier 				      struct bnxt_qplib_stats *stats);
551ac5a404SSelvin Xavier static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
561ac5a404SSelvin Xavier 				      struct bnxt_qplib_stats *stats);
571ac5a404SSelvin Xavier 
581ac5a404SSelvin Xavier /* PBL */
590c4dcd60SDevesh Sharma static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
601ac5a404SSelvin Xavier 		       bool is_umem)
611ac5a404SSelvin Xavier {
620c4dcd60SDevesh Sharma 	struct pci_dev *pdev = res->pdev;
631ac5a404SSelvin Xavier 	int i;
641ac5a404SSelvin Xavier 
651ac5a404SSelvin Xavier 	if (!is_umem) {
661ac5a404SSelvin Xavier 		for (i = 0; i < pbl->pg_count; i++) {
671ac5a404SSelvin Xavier 			if (pbl->pg_arr[i])
681ac5a404SSelvin Xavier 				dma_free_coherent(&pdev->dev, pbl->pg_size,
691ac5a404SSelvin Xavier 						  (void *)((unsigned long)
701ac5a404SSelvin Xavier 						   pbl->pg_arr[i] &
711ac5a404SSelvin Xavier 						  PAGE_MASK),
721ac5a404SSelvin Xavier 						  pbl->pg_map_arr[i]);
731ac5a404SSelvin Xavier 			else
741ac5a404SSelvin Xavier 				dev_warn(&pdev->dev,
7508920b8fSJoe Perches 					 "PBL free pg_arr[%d] empty?!\n", i);
761ac5a404SSelvin Xavier 			pbl->pg_arr[i] = NULL;
771ac5a404SSelvin Xavier 		}
781ac5a404SSelvin Xavier 	}
790c4dcd60SDevesh Sharma 	vfree(pbl->pg_arr);
801ac5a404SSelvin Xavier 	pbl->pg_arr = NULL;
810c4dcd60SDevesh Sharma 	vfree(pbl->pg_map_arr);
821ac5a404SSelvin Xavier 	pbl->pg_map_arr = NULL;
831ac5a404SSelvin Xavier 	pbl->pg_count = 0;
841ac5a404SSelvin Xavier 	pbl->pg_size = 0;
851ac5a404SSelvin Xavier }
861ac5a404SSelvin Xavier 
870c4dcd60SDevesh Sharma static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
880c4dcd60SDevesh Sharma 					   struct bnxt_qplib_sg_info *sginfo)
891ac5a404SSelvin Xavier {
900c4dcd60SDevesh Sharma 	struct scatterlist *sghead = sginfo->sghead;
91161ebe24SShiraz, Saleem 	struct sg_dma_page_iter sg_iter;
920c4dcd60SDevesh Sharma 	int i = 0;
930c4dcd60SDevesh Sharma 
940c4dcd60SDevesh Sharma 	for_each_sg_dma_page(sghead, &sg_iter, sginfo->nmap, 0) {
950c4dcd60SDevesh Sharma 		pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
960c4dcd60SDevesh Sharma 		pbl->pg_arr[i] = NULL;
970c4dcd60SDevesh Sharma 		pbl->pg_count++;
980c4dcd60SDevesh Sharma 		i++;
990c4dcd60SDevesh Sharma 	}
1000c4dcd60SDevesh Sharma }
1010c4dcd60SDevesh Sharma 
1020c4dcd60SDevesh Sharma static int __alloc_pbl(struct bnxt_qplib_res *res,
1030c4dcd60SDevesh Sharma 		       struct bnxt_qplib_pbl *pbl,
1040c4dcd60SDevesh Sharma 		       struct bnxt_qplib_sg_info *sginfo)
1050c4dcd60SDevesh Sharma {
1060c4dcd60SDevesh Sharma 	struct pci_dev *pdev = res->pdev;
1070c4dcd60SDevesh Sharma 	struct scatterlist *sghead;
1081ac5a404SSelvin Xavier 	bool is_umem = false;
1096be2067dSYueHaibing 	u32 pages;
1101ac5a404SSelvin Xavier 	int i;
1111ac5a404SSelvin Xavier 
1120c4dcd60SDevesh Sharma 	if (sginfo->nopte)
1130c4dcd60SDevesh Sharma 		return 0;
1140c4dcd60SDevesh Sharma 	pages = sginfo->npages;
1150c4dcd60SDevesh Sharma 	sghead = sginfo->sghead;
1161ac5a404SSelvin Xavier 	/* page ptr arrays */
1170c4dcd60SDevesh Sharma 	pbl->pg_arr = vmalloc(pages * sizeof(void *));
1181ac5a404SSelvin Xavier 	if (!pbl->pg_arr)
1191ac5a404SSelvin Xavier 		return -ENOMEM;
1201ac5a404SSelvin Xavier 
1210c4dcd60SDevesh Sharma 	pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
1221ac5a404SSelvin Xavier 	if (!pbl->pg_map_arr) {
1230c4dcd60SDevesh Sharma 		vfree(pbl->pg_arr);
1241ac5a404SSelvin Xavier 		pbl->pg_arr = NULL;
1251ac5a404SSelvin Xavier 		return -ENOMEM;
1261ac5a404SSelvin Xavier 	}
1271ac5a404SSelvin Xavier 	pbl->pg_count = 0;
1280c4dcd60SDevesh Sharma 	pbl->pg_size = sginfo->pgsize;
1291ac5a404SSelvin Xavier 
1301ac5a404SSelvin Xavier 	if (!sghead) {
1311ac5a404SSelvin Xavier 		for (i = 0; i < pages; i++) {
132750afb08SLuis Chamberlain 			pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
1331ac5a404SSelvin Xavier 							    pbl->pg_size,
1341ac5a404SSelvin Xavier 							    &pbl->pg_map_arr[i],
1351ac5a404SSelvin Xavier 							    GFP_KERNEL);
1361ac5a404SSelvin Xavier 			if (!pbl->pg_arr[i])
1371ac5a404SSelvin Xavier 				goto fail;
1381ac5a404SSelvin Xavier 			pbl->pg_count++;
1391ac5a404SSelvin Xavier 		}
1401ac5a404SSelvin Xavier 	} else {
1411ac5a404SSelvin Xavier 		is_umem = true;
1420c4dcd60SDevesh Sharma 		bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
1431ac5a404SSelvin Xavier 	}
1441ac5a404SSelvin Xavier 
1451ac5a404SSelvin Xavier 	return 0;
1461ac5a404SSelvin Xavier fail:
1470c4dcd60SDevesh Sharma 	__free_pbl(res, pbl, is_umem);
1481ac5a404SSelvin Xavier 	return -ENOMEM;
1491ac5a404SSelvin Xavier }
1501ac5a404SSelvin Xavier 
1511ac5a404SSelvin Xavier /* HWQ */
1520c4dcd60SDevesh Sharma void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
1530c4dcd60SDevesh Sharma 			 struct bnxt_qplib_hwq *hwq)
1541ac5a404SSelvin Xavier {
1551ac5a404SSelvin Xavier 	int i;
1561ac5a404SSelvin Xavier 
1571ac5a404SSelvin Xavier 	if (!hwq->max_elements)
1581ac5a404SSelvin Xavier 		return;
1591ac5a404SSelvin Xavier 	if (hwq->level >= PBL_LVL_MAX)
1601ac5a404SSelvin Xavier 		return;
1611ac5a404SSelvin Xavier 
1621ac5a404SSelvin Xavier 	for (i = 0; i < hwq->level + 1; i++) {
1631ac5a404SSelvin Xavier 		if (i == hwq->level)
1640c4dcd60SDevesh Sharma 			__free_pbl(res, &hwq->pbl[i], hwq->is_user);
1651ac5a404SSelvin Xavier 		else
1660c4dcd60SDevesh Sharma 			__free_pbl(res, &hwq->pbl[i], false);
1671ac5a404SSelvin Xavier 	}
1681ac5a404SSelvin Xavier 
1691ac5a404SSelvin Xavier 	hwq->level = PBL_LVL_MAX;
1701ac5a404SSelvin Xavier 	hwq->max_elements = 0;
1711ac5a404SSelvin Xavier 	hwq->element_size = 0;
1721ac5a404SSelvin Xavier 	hwq->prod = 0;
1731ac5a404SSelvin Xavier 	hwq->cons = 0;
1741ac5a404SSelvin Xavier 	hwq->cp_bit = 0;
1751ac5a404SSelvin Xavier }
1761ac5a404SSelvin Xavier 
1771ac5a404SSelvin Xavier /* All HWQs are power of 2 in size */
1780c4dcd60SDevesh Sharma 
1790c4dcd60SDevesh Sharma int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
1800c4dcd60SDevesh Sharma 			      struct bnxt_qplib_hwq_attr *hwq_attr)
1811ac5a404SSelvin Xavier {
1820c4dcd60SDevesh Sharma 	u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
1830c4dcd60SDevesh Sharma 	struct bnxt_qplib_sg_info sginfo = {};
1840c4dcd60SDevesh Sharma 	u32 depth, stride, npbl, npde;
1851ac5a404SSelvin Xavier 	dma_addr_t *src_phys_ptr, **dst_virt_ptr;
1865aa84840SSelvin Xavier 	struct scatterlist *sghead = NULL;
1870c4dcd60SDevesh Sharma 	struct bnxt_qplib_res *res;
1880c4dcd60SDevesh Sharma 	struct pci_dev *pdev;
1890c4dcd60SDevesh Sharma 	int i, rc, lvl;
1901ac5a404SSelvin Xavier 
1910c4dcd60SDevesh Sharma 	res = hwq_attr->res;
1920c4dcd60SDevesh Sharma 	pdev = res->pdev;
1930c4dcd60SDevesh Sharma 	sghead = hwq_attr->sginfo->sghead;
1940c4dcd60SDevesh Sharma 	pg_size = hwq_attr->sginfo->pgsize;
1951ac5a404SSelvin Xavier 	hwq->level = PBL_LVL_MAX;
1961ac5a404SSelvin Xavier 
1970c4dcd60SDevesh Sharma 	depth = roundup_pow_of_two(hwq_attr->depth);
1980c4dcd60SDevesh Sharma 	stride = roundup_pow_of_two(hwq_attr->stride);
1990c4dcd60SDevesh Sharma 	if (hwq_attr->aux_depth) {
2000c4dcd60SDevesh Sharma 		aux_slots = hwq_attr->aux_depth;
2010c4dcd60SDevesh Sharma 		aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
2020c4dcd60SDevesh Sharma 		aux_pages = (aux_slots * aux_size) / pg_size;
2030c4dcd60SDevesh Sharma 		if ((aux_slots * aux_size) % pg_size)
2041ac5a404SSelvin Xavier 			aux_pages++;
2051ac5a404SSelvin Xavier 	}
2065aa84840SSelvin Xavier 
2071ac5a404SSelvin Xavier 	if (!sghead) {
2081ac5a404SSelvin Xavier 		hwq->is_user = false;
2090c4dcd60SDevesh Sharma 		npages = (depth * stride) / pg_size + aux_pages;
2100c4dcd60SDevesh Sharma 		if ((depth * stride) % pg_size)
2110c4dcd60SDevesh Sharma 			npages++;
2120c4dcd60SDevesh Sharma 		if (!npages)
2131ac5a404SSelvin Xavier 			return -EINVAL;
2140c4dcd60SDevesh Sharma 		hwq_attr->sginfo->npages = npages;
2151ac5a404SSelvin Xavier 	} else {
2161ac5a404SSelvin Xavier 		hwq->is_user = true;
2170c4dcd60SDevesh Sharma 		npages = hwq_attr->sginfo->npages;
2180c4dcd60SDevesh Sharma 		npages = (npages * PAGE_SIZE) /
2190c4dcd60SDevesh Sharma 			  BIT_ULL(hwq_attr->sginfo->pgshft);
2200c4dcd60SDevesh Sharma 		if ((hwq_attr->sginfo->npages * PAGE_SIZE) %
2210c4dcd60SDevesh Sharma 		     BIT_ULL(hwq_attr->sginfo->pgshft))
2220c4dcd60SDevesh Sharma 			if (!npages)
2230c4dcd60SDevesh Sharma 				npages++;
2241ac5a404SSelvin Xavier 	}
2251ac5a404SSelvin Xavier 
2260c4dcd60SDevesh Sharma 	if (npages == MAX_PBL_LVL_0_PGS) {
2270c4dcd60SDevesh Sharma 		/* This request is Level 0, map PTE */
2280c4dcd60SDevesh Sharma 		rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
2291ac5a404SSelvin Xavier 		if (rc)
2301ac5a404SSelvin Xavier 			goto fail;
2311ac5a404SSelvin Xavier 		hwq->level = PBL_LVL_0;
2320c4dcd60SDevesh Sharma 	}
2331ac5a404SSelvin Xavier 
2340c4dcd60SDevesh Sharma 	if (npages > MAX_PBL_LVL_0_PGS) {
2350c4dcd60SDevesh Sharma 		if (npages > MAX_PBL_LVL_1_PGS) {
2360c4dcd60SDevesh Sharma 			u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
2370c4dcd60SDevesh Sharma 				    0 : PTU_PTE_VALID;
2381ac5a404SSelvin Xavier 			/* 2 levels of indirection */
2390c4dcd60SDevesh Sharma 			npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
2400c4dcd60SDevesh Sharma 			if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
2410c4dcd60SDevesh Sharma 				npbl++;
2420c4dcd60SDevesh Sharma 			npde = npbl >> MAX_PDL_LVL_SHIFT;
2430c4dcd60SDevesh Sharma 			if (npbl % BIT(MAX_PDL_LVL_SHIFT))
2440c4dcd60SDevesh Sharma 				npde++;
2450c4dcd60SDevesh Sharma 			/* Alloc PDE pages */
2460c4dcd60SDevesh Sharma 			sginfo.pgsize = npde * pg_size;
2470c4dcd60SDevesh Sharma 			sginfo.npages = 1;
2480c4dcd60SDevesh Sharma 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
2490c4dcd60SDevesh Sharma 
2500c4dcd60SDevesh Sharma 			/* Alloc PBL pages */
2510c4dcd60SDevesh Sharma 			sginfo.npages = npbl;
2520c4dcd60SDevesh Sharma 			sginfo.pgsize = PAGE_SIZE;
2530c4dcd60SDevesh Sharma 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
2541ac5a404SSelvin Xavier 			if (rc)
2551ac5a404SSelvin Xavier 				goto fail;
2560c4dcd60SDevesh Sharma 			/* Fill PDL with PBL page pointers */
2571ac5a404SSelvin Xavier 			dst_virt_ptr =
2581ac5a404SSelvin Xavier 				(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
2591ac5a404SSelvin Xavier 			src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
2600c4dcd60SDevesh Sharma 			if (hwq_attr->type == HWQ_TYPE_MR) {
2610c4dcd60SDevesh Sharma 			/* For MR it is expected that we supply only 1 contigous
2620c4dcd60SDevesh Sharma 			 * page i.e only 1 entry in the PDL that will contain
2630c4dcd60SDevesh Sharma 			 * all the PBLs for the user supplied memory region
2640c4dcd60SDevesh Sharma 			 */
2650c4dcd60SDevesh Sharma 				for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
2660c4dcd60SDevesh Sharma 				     i++)
2670c4dcd60SDevesh Sharma 					dst_virt_ptr[0][i] = src_phys_ptr[i] |
2680c4dcd60SDevesh Sharma 						flag;
2690c4dcd60SDevesh Sharma 			} else {
2700c4dcd60SDevesh Sharma 				for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
2710c4dcd60SDevesh Sharma 				     i++)
2721ac5a404SSelvin Xavier 					dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
2730c4dcd60SDevesh Sharma 						src_phys_ptr[i] |
2740c4dcd60SDevesh Sharma 						PTU_PDE_VALID;
2750c4dcd60SDevesh Sharma 			}
2760c4dcd60SDevesh Sharma 			/* Alloc or init PTEs */
2770c4dcd60SDevesh Sharma 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
2780c4dcd60SDevesh Sharma 					 hwq_attr->sginfo);
2791ac5a404SSelvin Xavier 			if (rc)
2801ac5a404SSelvin Xavier 				goto fail;
2810c4dcd60SDevesh Sharma 			hwq->level = PBL_LVL_2;
2820c4dcd60SDevesh Sharma 			if (hwq_attr->sginfo->nopte)
2830c4dcd60SDevesh Sharma 				goto done;
2840c4dcd60SDevesh Sharma 			/* Fill PBLs with PTE pointers */
2851ac5a404SSelvin Xavier 			dst_virt_ptr =
2861ac5a404SSelvin Xavier 				(dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
2871ac5a404SSelvin Xavier 			src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
2881ac5a404SSelvin Xavier 			for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
2891ac5a404SSelvin Xavier 				dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
2901ac5a404SSelvin Xavier 					src_phys_ptr[i] | PTU_PTE_VALID;
2911ac5a404SSelvin Xavier 			}
2920c4dcd60SDevesh Sharma 			if (hwq_attr->type == HWQ_TYPE_QUEUE) {
2931ac5a404SSelvin Xavier 				/* Find the last pg of the size */
2941ac5a404SSelvin Xavier 				i = hwq->pbl[PBL_LVL_2].pg_count;
2951ac5a404SSelvin Xavier 				dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
2961ac5a404SSelvin Xavier 								  PTU_PTE_LAST;
2971ac5a404SSelvin Xavier 				if (i > 1)
2981ac5a404SSelvin Xavier 					dst_virt_ptr[PTR_PG(i - 2)]
2991ac5a404SSelvin Xavier 						    [PTR_IDX(i - 2)] |=
3001ac5a404SSelvin Xavier 						    PTU_PTE_NEXT_TO_LAST;
3011ac5a404SSelvin Xavier 			}
3020c4dcd60SDevesh Sharma 		} else { /* pages < 512 npbl = 1, npde = 0 */
3030c4dcd60SDevesh Sharma 			u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
3040c4dcd60SDevesh Sharma 				    0 : PTU_PTE_VALID;
3051ac5a404SSelvin Xavier 
3061ac5a404SSelvin Xavier 			/* 1 level of indirection */
3070c4dcd60SDevesh Sharma 			npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
3080c4dcd60SDevesh Sharma 			if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
3090c4dcd60SDevesh Sharma 				npbl++;
3100c4dcd60SDevesh Sharma 			sginfo.npages = npbl;
3110c4dcd60SDevesh Sharma 			sginfo.pgsize = PAGE_SIZE;
3120c4dcd60SDevesh Sharma 			/* Alloc PBL page */
3130c4dcd60SDevesh Sharma 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
3141ac5a404SSelvin Xavier 			if (rc)
3151ac5a404SSelvin Xavier 				goto fail;
3160c4dcd60SDevesh Sharma 			/* Alloc or init  PTEs */
3170c4dcd60SDevesh Sharma 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
3180c4dcd60SDevesh Sharma 					 hwq_attr->sginfo);
3190c4dcd60SDevesh Sharma 			if (rc)
3200c4dcd60SDevesh Sharma 				goto fail;
3210c4dcd60SDevesh Sharma 			hwq->level = PBL_LVL_1;
3220c4dcd60SDevesh Sharma 			if (hwq_attr->sginfo->nopte)
3230c4dcd60SDevesh Sharma 				goto done;
3240c4dcd60SDevesh Sharma 			/* Fill PBL with PTE pointers */
3251ac5a404SSelvin Xavier 			dst_virt_ptr =
3261ac5a404SSelvin Xavier 				(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
3271ac5a404SSelvin Xavier 			src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
3280c4dcd60SDevesh Sharma 			for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
3291ac5a404SSelvin Xavier 				dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
3301ac5a404SSelvin Xavier 					src_phys_ptr[i] | flag;
3310c4dcd60SDevesh Sharma 			if (hwq_attr->type == HWQ_TYPE_QUEUE) {
3321ac5a404SSelvin Xavier 				/* Find the last pg of the size */
3331ac5a404SSelvin Xavier 				i = hwq->pbl[PBL_LVL_1].pg_count;
3341ac5a404SSelvin Xavier 				dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
3351ac5a404SSelvin Xavier 								  PTU_PTE_LAST;
3361ac5a404SSelvin Xavier 				if (i > 1)
3371ac5a404SSelvin Xavier 					dst_virt_ptr[PTR_PG(i - 2)]
3381ac5a404SSelvin Xavier 						    [PTR_IDX(i - 2)] |=
3391ac5a404SSelvin Xavier 						    PTU_PTE_NEXT_TO_LAST;
3401ac5a404SSelvin Xavier 			}
3411ac5a404SSelvin Xavier 		}
3421ac5a404SSelvin Xavier 	}
3430c4dcd60SDevesh Sharma done:
3441ac5a404SSelvin Xavier 	hwq->prod = 0;
3451ac5a404SSelvin Xavier 	hwq->cons = 0;
3460c4dcd60SDevesh Sharma 	hwq->pdev = pdev;
3470c4dcd60SDevesh Sharma 	hwq->depth = hwq_attr->depth;
3480c4dcd60SDevesh Sharma 	hwq->max_elements = depth;
3490c4dcd60SDevesh Sharma 	hwq->element_size = stride;
350fddcbbb0SDevesh Sharma 	hwq->qe_ppg = pg_size / stride;
3511ac5a404SSelvin Xavier 	/* For direct access to the elements */
3520c4dcd60SDevesh Sharma 	lvl = hwq->level;
3530c4dcd60SDevesh Sharma 	if (hwq_attr->sginfo->nopte && hwq->level)
3540c4dcd60SDevesh Sharma 		lvl = hwq->level - 1;
3550c4dcd60SDevesh Sharma 	hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
3560c4dcd60SDevesh Sharma 	hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
3570c4dcd60SDevesh Sharma 	spin_lock_init(&hwq->lock);
3581ac5a404SSelvin Xavier 
3591ac5a404SSelvin Xavier 	return 0;
3601ac5a404SSelvin Xavier fail:
3610c4dcd60SDevesh Sharma 	bnxt_qplib_free_hwq(res, hwq);
3621ac5a404SSelvin Xavier 	return -ENOMEM;
3631ac5a404SSelvin Xavier }
3641ac5a404SSelvin Xavier 
3651ac5a404SSelvin Xavier /* Context Tables */
3660c4dcd60SDevesh Sharma void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
3671ac5a404SSelvin Xavier 			 struct bnxt_qplib_ctx *ctx)
3681ac5a404SSelvin Xavier {
3691ac5a404SSelvin Xavier 	int i;
3701ac5a404SSelvin Xavier 
3710c4dcd60SDevesh Sharma 	bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
3720c4dcd60SDevesh Sharma 	bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
3730c4dcd60SDevesh Sharma 	bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
3740c4dcd60SDevesh Sharma 	bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
3750c4dcd60SDevesh Sharma 	bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
3761ac5a404SSelvin Xavier 	for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
3770c4dcd60SDevesh Sharma 		bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
3780c4dcd60SDevesh Sharma 	/* restore original pde level before destroy */
3790c4dcd60SDevesh Sharma 	ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
3800c4dcd60SDevesh Sharma 	bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
3810c4dcd60SDevesh Sharma 	bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
3820c4dcd60SDevesh Sharma }
3830c4dcd60SDevesh Sharma 
3840c4dcd60SDevesh Sharma static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
3850c4dcd60SDevesh Sharma 				      struct bnxt_qplib_ctx *ctx)
3860c4dcd60SDevesh Sharma {
3870c4dcd60SDevesh Sharma 	struct bnxt_qplib_hwq_attr hwq_attr = {};
3880c4dcd60SDevesh Sharma 	struct bnxt_qplib_sg_info sginfo = {};
3890c4dcd60SDevesh Sharma 	struct bnxt_qplib_tqm_ctx *tqmctx;
3900c4dcd60SDevesh Sharma 	int rc = 0;
3910c4dcd60SDevesh Sharma 	int i;
3920c4dcd60SDevesh Sharma 
3930c4dcd60SDevesh Sharma 	tqmctx = &ctx->tqm_ctx;
3940c4dcd60SDevesh Sharma 
3950c4dcd60SDevesh Sharma 	sginfo.pgsize = PAGE_SIZE;
3960c4dcd60SDevesh Sharma 	sginfo.pgshft = PAGE_SHIFT;
3970c4dcd60SDevesh Sharma 	hwq_attr.sginfo = &sginfo;
3980c4dcd60SDevesh Sharma 	hwq_attr.res = res;
3990c4dcd60SDevesh Sharma 	hwq_attr.type = HWQ_TYPE_CTX;
4000c4dcd60SDevesh Sharma 	hwq_attr.depth = 512;
4010c4dcd60SDevesh Sharma 	hwq_attr.stride = sizeof(u64);
4020c4dcd60SDevesh Sharma 	/* Alloc pdl buffer */
4030c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
4040c4dcd60SDevesh Sharma 	if (rc)
4050c4dcd60SDevesh Sharma 		goto out;
4060c4dcd60SDevesh Sharma 	/* Save original pdl level */
4070c4dcd60SDevesh Sharma 	tqmctx->pde_level = tqmctx->pde.level;
4080c4dcd60SDevesh Sharma 
4090c4dcd60SDevesh Sharma 	hwq_attr.stride = 1;
4100c4dcd60SDevesh Sharma 	for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
4110c4dcd60SDevesh Sharma 		if (!tqmctx->qcount[i])
4120c4dcd60SDevesh Sharma 			continue;
4130c4dcd60SDevesh Sharma 		hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
4140c4dcd60SDevesh Sharma 		rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
4150c4dcd60SDevesh Sharma 		if (rc)
4160c4dcd60SDevesh Sharma 			goto out;
4170c4dcd60SDevesh Sharma 	}
4180c4dcd60SDevesh Sharma out:
4190c4dcd60SDevesh Sharma 	return rc;
4200c4dcd60SDevesh Sharma }
4210c4dcd60SDevesh Sharma 
4220c4dcd60SDevesh Sharma static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
4230c4dcd60SDevesh Sharma {
4240c4dcd60SDevesh Sharma 	struct bnxt_qplib_hwq *tbl;
4250c4dcd60SDevesh Sharma 	dma_addr_t *dma_ptr;
4260c4dcd60SDevesh Sharma 	__le64 **pbl_ptr, *ptr;
4270c4dcd60SDevesh Sharma 	int i, j, k;
4280c4dcd60SDevesh Sharma 	int fnz_idx = -1;
4290c4dcd60SDevesh Sharma 	int pg_count;
4300c4dcd60SDevesh Sharma 
4310c4dcd60SDevesh Sharma 	pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
4320c4dcd60SDevesh Sharma 
4330c4dcd60SDevesh Sharma 	for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
4340c4dcd60SDevesh Sharma 	     i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
4350c4dcd60SDevesh Sharma 		tbl = &ctx->qtbl[i];
4360c4dcd60SDevesh Sharma 		if (!tbl->max_elements)
4370c4dcd60SDevesh Sharma 			continue;
4380c4dcd60SDevesh Sharma 		if (fnz_idx == -1)
4390c4dcd60SDevesh Sharma 			fnz_idx = i; /* first non-zero index */
4400c4dcd60SDevesh Sharma 		switch (tbl->level) {
4410c4dcd60SDevesh Sharma 		case PBL_LVL_2:
4420c4dcd60SDevesh Sharma 			pg_count = tbl->pbl[PBL_LVL_1].pg_count;
4430c4dcd60SDevesh Sharma 			for (k = 0; k < pg_count; k++) {
4440c4dcd60SDevesh Sharma 				ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
4450c4dcd60SDevesh Sharma 				dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
4460c4dcd60SDevesh Sharma 				*ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
4470c4dcd60SDevesh Sharma 			}
4480c4dcd60SDevesh Sharma 			break;
4490c4dcd60SDevesh Sharma 		case PBL_LVL_1:
4500c4dcd60SDevesh Sharma 		case PBL_LVL_0:
4510c4dcd60SDevesh Sharma 		default:
4520c4dcd60SDevesh Sharma 			ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
4530c4dcd60SDevesh Sharma 			*ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
4540c4dcd60SDevesh Sharma 					   PTU_PTE_VALID);
4550c4dcd60SDevesh Sharma 			break;
4560c4dcd60SDevesh Sharma 		}
4570c4dcd60SDevesh Sharma 	}
4580c4dcd60SDevesh Sharma 	if (fnz_idx == -1)
4590c4dcd60SDevesh Sharma 		fnz_idx = 0;
4600c4dcd60SDevesh Sharma 	/* update pde level as per page table programming */
4610c4dcd60SDevesh Sharma 	ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
4620c4dcd60SDevesh Sharma 			  ctx->qtbl[fnz_idx].level + 1;
4630c4dcd60SDevesh Sharma }
4640c4dcd60SDevesh Sharma 
4650c4dcd60SDevesh Sharma static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
4660c4dcd60SDevesh Sharma 				      struct bnxt_qplib_ctx *ctx)
4670c4dcd60SDevesh Sharma {
4680c4dcd60SDevesh Sharma 	int rc = 0;
4690c4dcd60SDevesh Sharma 
4700c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
4710c4dcd60SDevesh Sharma 	if (rc)
4720c4dcd60SDevesh Sharma 		goto fail;
4730c4dcd60SDevesh Sharma 
4740c4dcd60SDevesh Sharma 	bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
4750c4dcd60SDevesh Sharma fail:
4760c4dcd60SDevesh Sharma 	return rc;
4771ac5a404SSelvin Xavier }
4781ac5a404SSelvin Xavier 
4791ac5a404SSelvin Xavier /*
4801ac5a404SSelvin Xavier  * Routine: bnxt_qplib_alloc_ctx
4811ac5a404SSelvin Xavier  * Description:
4821ac5a404SSelvin Xavier  *     Context tables are memories which are used by the chip fw.
4831ac5a404SSelvin Xavier  *     The 6 tables defined are:
4841ac5a404SSelvin Xavier  *             QPC ctx - holds QP states
4851ac5a404SSelvin Xavier  *             MRW ctx - holds memory region and window
4861ac5a404SSelvin Xavier  *             SRQ ctx - holds shared RQ states
4871ac5a404SSelvin Xavier  *             CQ ctx - holds completion queue states
4881ac5a404SSelvin Xavier  *             TQM ctx - holds Tx Queue Manager context
4891ac5a404SSelvin Xavier  *             TIM ctx - holds timer context
4901ac5a404SSelvin Xavier  *     Depending on the size of the tbl requested, either a 1 Page Buffer List
4911ac5a404SSelvin Xavier  *     or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
4921ac5a404SSelvin Xavier  *     instead.
4931ac5a404SSelvin Xavier  *     Table might be employed as follows:
4941ac5a404SSelvin Xavier  *             For 0      < ctx size <= 1 PAGE, 0 level of ind is used
4951ac5a404SSelvin Xavier  *             For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
4961ac5a404SSelvin Xavier  *             For 512    < ctx size <= MAX, 2 levels of ind is used
4971ac5a404SSelvin Xavier  * Returns:
4981ac5a404SSelvin Xavier  *     0 if success, else -ERRORS
4991ac5a404SSelvin Xavier  */
5000c4dcd60SDevesh Sharma int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
5011ac5a404SSelvin Xavier 			 struct bnxt_qplib_ctx *ctx,
502e0387e1dSDevesh Sharma 			 bool virt_fn, bool is_p5)
5031ac5a404SSelvin Xavier {
5040c4dcd60SDevesh Sharma 	struct bnxt_qplib_hwq_attr hwq_attr = {};
5050c4dcd60SDevesh Sharma 	struct bnxt_qplib_sg_info sginfo = {};
5060c4dcd60SDevesh Sharma 	int rc = 0;
5071ac5a404SSelvin Xavier 
508e0387e1dSDevesh Sharma 	if (virt_fn || is_p5)
5091ac5a404SSelvin Xavier 		goto stats_alloc;
5101ac5a404SSelvin Xavier 
5111ac5a404SSelvin Xavier 	/* QPC Tables */
5120c4dcd60SDevesh Sharma 	sginfo.pgsize = PAGE_SIZE;
5130c4dcd60SDevesh Sharma 	sginfo.pgshft = PAGE_SHIFT;
5140c4dcd60SDevesh Sharma 	hwq_attr.sginfo = &sginfo;
5150c4dcd60SDevesh Sharma 
5160c4dcd60SDevesh Sharma 	hwq_attr.res = res;
5170c4dcd60SDevesh Sharma 	hwq_attr.depth = ctx->qpc_count;
5180c4dcd60SDevesh Sharma 	hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
5190c4dcd60SDevesh Sharma 	hwq_attr.type = HWQ_TYPE_CTX;
5200c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
5211ac5a404SSelvin Xavier 	if (rc)
5221ac5a404SSelvin Xavier 		goto fail;
5231ac5a404SSelvin Xavier 
5241ac5a404SSelvin Xavier 	/* MRW Tables */
5250c4dcd60SDevesh Sharma 	hwq_attr.depth = ctx->mrw_count;
5260c4dcd60SDevesh Sharma 	hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
5270c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
5281ac5a404SSelvin Xavier 	if (rc)
5291ac5a404SSelvin Xavier 		goto fail;
5301ac5a404SSelvin Xavier 
5311ac5a404SSelvin Xavier 	/* SRQ Tables */
5320c4dcd60SDevesh Sharma 	hwq_attr.depth = ctx->srqc_count;
5330c4dcd60SDevesh Sharma 	hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
5340c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
5351ac5a404SSelvin Xavier 	if (rc)
5361ac5a404SSelvin Xavier 		goto fail;
5371ac5a404SSelvin Xavier 
5381ac5a404SSelvin Xavier 	/* CQ Tables */
5390c4dcd60SDevesh Sharma 	hwq_attr.depth = ctx->cq_count;
5400c4dcd60SDevesh Sharma 	hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
5410c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
5421ac5a404SSelvin Xavier 	if (rc)
5431ac5a404SSelvin Xavier 		goto fail;
5441ac5a404SSelvin Xavier 
5451ac5a404SSelvin Xavier 	/* TQM Buffer */
5460c4dcd60SDevesh Sharma 	rc = bnxt_qplib_setup_tqm_rings(res, ctx);
5471ac5a404SSelvin Xavier 	if (rc)
5481ac5a404SSelvin Xavier 		goto fail;
5491ac5a404SSelvin Xavier 	/* TIM Buffer */
5501ac5a404SSelvin Xavier 	ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
5510c4dcd60SDevesh Sharma 	hwq_attr.depth = ctx->qpc_count * 16;
5520c4dcd60SDevesh Sharma 	hwq_attr.stride = 1;
5530c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
5541ac5a404SSelvin Xavier 	if (rc)
5551ac5a404SSelvin Xavier 		goto fail;
5561ac5a404SSelvin Xavier stats_alloc:
5571ac5a404SSelvin Xavier 	/* Stats */
5580c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats);
5591ac5a404SSelvin Xavier 	if (rc)
5601ac5a404SSelvin Xavier 		goto fail;
5611ac5a404SSelvin Xavier 
5621ac5a404SSelvin Xavier 	return 0;
5631ac5a404SSelvin Xavier 
5641ac5a404SSelvin Xavier fail:
5650c4dcd60SDevesh Sharma 	bnxt_qplib_free_ctx(res, ctx);
5661ac5a404SSelvin Xavier 	return rc;
5671ac5a404SSelvin Xavier }
5681ac5a404SSelvin Xavier 
5691ac5a404SSelvin Xavier /* GUID */
5701ac5a404SSelvin Xavier void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
5711ac5a404SSelvin Xavier {
5721ac5a404SSelvin Xavier 	u8 mac[ETH_ALEN];
5731ac5a404SSelvin Xavier 
5741ac5a404SSelvin Xavier 	/* MAC-48 to EUI-64 mapping */
5751ac5a404SSelvin Xavier 	memcpy(mac, dev_addr, ETH_ALEN);
5761ac5a404SSelvin Xavier 	guid[0] = mac[0] ^ 2;
5771ac5a404SSelvin Xavier 	guid[1] = mac[1];
5781ac5a404SSelvin Xavier 	guid[2] = mac[2];
5791ac5a404SSelvin Xavier 	guid[3] = 0xff;
5801ac5a404SSelvin Xavier 	guid[4] = 0xfe;
5811ac5a404SSelvin Xavier 	guid[5] = mac[3];
5821ac5a404SSelvin Xavier 	guid[6] = mac[4];
5831ac5a404SSelvin Xavier 	guid[7] = mac[5];
5841ac5a404SSelvin Xavier }
5851ac5a404SSelvin Xavier 
5861ac5a404SSelvin Xavier static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
5871ac5a404SSelvin Xavier 				     struct bnxt_qplib_sgid_tbl *sgid_tbl)
5881ac5a404SSelvin Xavier {
5891ac5a404SSelvin Xavier 	kfree(sgid_tbl->tbl);
5901ac5a404SSelvin Xavier 	kfree(sgid_tbl->hw_id);
5911ac5a404SSelvin Xavier 	kfree(sgid_tbl->ctx);
5925fac5b1bSKalesh AP 	kfree(sgid_tbl->vlan);
5931ac5a404SSelvin Xavier 	sgid_tbl->tbl = NULL;
5941ac5a404SSelvin Xavier 	sgid_tbl->hw_id = NULL;
5951ac5a404SSelvin Xavier 	sgid_tbl->ctx = NULL;
5965fac5b1bSKalesh AP 	sgid_tbl->vlan = NULL;
5971ac5a404SSelvin Xavier 	sgid_tbl->max = 0;
5981ac5a404SSelvin Xavier 	sgid_tbl->active = 0;
5991ac5a404SSelvin Xavier }
6001ac5a404SSelvin Xavier 
6011ac5a404SSelvin Xavier static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
6021ac5a404SSelvin Xavier 				     struct bnxt_qplib_sgid_tbl *sgid_tbl,
6031ac5a404SSelvin Xavier 				     u16 max)
6041ac5a404SSelvin Xavier {
605c56b593dSSelvin Xavier 	sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
6061ac5a404SSelvin Xavier 	if (!sgid_tbl->tbl)
6071ac5a404SSelvin Xavier 		return -ENOMEM;
6081ac5a404SSelvin Xavier 
6091ac5a404SSelvin Xavier 	sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
6101ac5a404SSelvin Xavier 	if (!sgid_tbl->hw_id)
6111ac5a404SSelvin Xavier 		goto out_free1;
6121ac5a404SSelvin Xavier 
6131ac5a404SSelvin Xavier 	sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
6141ac5a404SSelvin Xavier 	if (!sgid_tbl->ctx)
6151ac5a404SSelvin Xavier 		goto out_free2;
6161ac5a404SSelvin Xavier 
6175fac5b1bSKalesh AP 	sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
6185fac5b1bSKalesh AP 	if (!sgid_tbl->vlan)
6195fac5b1bSKalesh AP 		goto out_free3;
6205fac5b1bSKalesh AP 
6211ac5a404SSelvin Xavier 	sgid_tbl->max = max;
6221ac5a404SSelvin Xavier 	return 0;
6235fac5b1bSKalesh AP out_free3:
6245fac5b1bSKalesh AP 	kfree(sgid_tbl->ctx);
6255fac5b1bSKalesh AP 	sgid_tbl->ctx = NULL;
6261ac5a404SSelvin Xavier out_free2:
6271ac5a404SSelvin Xavier 	kfree(sgid_tbl->hw_id);
6281ac5a404SSelvin Xavier 	sgid_tbl->hw_id = NULL;
6291ac5a404SSelvin Xavier out_free1:
6301ac5a404SSelvin Xavier 	kfree(sgid_tbl->tbl);
6311ac5a404SSelvin Xavier 	sgid_tbl->tbl = NULL;
6321ac5a404SSelvin Xavier 	return -ENOMEM;
6331ac5a404SSelvin Xavier };
6341ac5a404SSelvin Xavier 
6351ac5a404SSelvin Xavier static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
6361ac5a404SSelvin Xavier 					struct bnxt_qplib_sgid_tbl *sgid_tbl)
6371ac5a404SSelvin Xavier {
6381ac5a404SSelvin Xavier 	int i;
6391ac5a404SSelvin Xavier 
6401ac5a404SSelvin Xavier 	for (i = 0; i < sgid_tbl->max; i++) {
6411ac5a404SSelvin Xavier 		if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
6421ac5a404SSelvin Xavier 			   sizeof(bnxt_qplib_gid_zero)))
643c56b593dSSelvin Xavier 			bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
644c56b593dSSelvin Xavier 					    sgid_tbl->tbl[i].vlan_id, true);
6451ac5a404SSelvin Xavier 	}
646c56b593dSSelvin Xavier 	memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
6471ac5a404SSelvin Xavier 	memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
6485fac5b1bSKalesh AP 	memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
6491ac5a404SSelvin Xavier 	sgid_tbl->active = 0;
6501ac5a404SSelvin Xavier }
6511ac5a404SSelvin Xavier 
6521ac5a404SSelvin Xavier static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
6531ac5a404SSelvin Xavier 				     struct net_device *netdev)
6541ac5a404SSelvin Xavier {
655c56b593dSSelvin Xavier 	u32 i;
656c56b593dSSelvin Xavier 
657c56b593dSSelvin Xavier 	for (i = 0; i < sgid_tbl->max; i++)
658c56b593dSSelvin Xavier 		sgid_tbl->tbl[i].vlan_id = 0xffff;
659c56b593dSSelvin Xavier 
6601ac5a404SSelvin Xavier 	memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
6611ac5a404SSelvin Xavier }
6621ac5a404SSelvin Xavier 
6631ac5a404SSelvin Xavier static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
6641ac5a404SSelvin Xavier 				     struct bnxt_qplib_pkey_tbl *pkey_tbl)
6651ac5a404SSelvin Xavier {
6661ac5a404SSelvin Xavier 	if (!pkey_tbl->tbl)
66708920b8fSJoe Perches 		dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
6681ac5a404SSelvin Xavier 	else
6691ac5a404SSelvin Xavier 		kfree(pkey_tbl->tbl);
6701ac5a404SSelvin Xavier 
6711ac5a404SSelvin Xavier 	pkey_tbl->tbl = NULL;
6721ac5a404SSelvin Xavier 	pkey_tbl->max = 0;
6731ac5a404SSelvin Xavier 	pkey_tbl->active = 0;
6741ac5a404SSelvin Xavier }
6751ac5a404SSelvin Xavier 
6761ac5a404SSelvin Xavier static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
6771ac5a404SSelvin Xavier 				     struct bnxt_qplib_pkey_tbl *pkey_tbl,
6781ac5a404SSelvin Xavier 				     u16 max)
6791ac5a404SSelvin Xavier {
6801ac5a404SSelvin Xavier 	pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
6811ac5a404SSelvin Xavier 	if (!pkey_tbl->tbl)
6821ac5a404SSelvin Xavier 		return -ENOMEM;
6831ac5a404SSelvin Xavier 
6841ac5a404SSelvin Xavier 	pkey_tbl->max = max;
6851ac5a404SSelvin Xavier 	return 0;
6861ac5a404SSelvin Xavier };
6871ac5a404SSelvin Xavier 
6881ac5a404SSelvin Xavier /* PDs */
6891ac5a404SSelvin Xavier int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
6901ac5a404SSelvin Xavier {
6911ac5a404SSelvin Xavier 	u32 bit_num;
6921ac5a404SSelvin Xavier 
6931ac5a404SSelvin Xavier 	bit_num = find_first_bit(pdt->tbl, pdt->max);
6941ac5a404SSelvin Xavier 	if (bit_num == pdt->max)
6951ac5a404SSelvin Xavier 		return -ENOMEM;
6961ac5a404SSelvin Xavier 
6971ac5a404SSelvin Xavier 	/* Found unused PD */
6981ac5a404SSelvin Xavier 	clear_bit(bit_num, pdt->tbl);
6991ac5a404SSelvin Xavier 	pd->id = bit_num;
7001ac5a404SSelvin Xavier 	return 0;
7011ac5a404SSelvin Xavier }
7021ac5a404SSelvin Xavier 
7031ac5a404SSelvin Xavier int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
7041ac5a404SSelvin Xavier 			  struct bnxt_qplib_pd_tbl *pdt,
7051ac5a404SSelvin Xavier 			  struct bnxt_qplib_pd *pd)
7061ac5a404SSelvin Xavier {
7071ac5a404SSelvin Xavier 	if (test_and_set_bit(pd->id, pdt->tbl)) {
70808920b8fSJoe Perches 		dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
7091ac5a404SSelvin Xavier 			 pd->id);
7101ac5a404SSelvin Xavier 		return -EINVAL;
7111ac5a404SSelvin Xavier 	}
7121ac5a404SSelvin Xavier 	pd->id = 0;
7131ac5a404SSelvin Xavier 	return 0;
7141ac5a404SSelvin Xavier }
7151ac5a404SSelvin Xavier 
7161ac5a404SSelvin Xavier static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
7171ac5a404SSelvin Xavier {
7181ac5a404SSelvin Xavier 	kfree(pdt->tbl);
7191ac5a404SSelvin Xavier 	pdt->tbl = NULL;
7201ac5a404SSelvin Xavier 	pdt->max = 0;
7211ac5a404SSelvin Xavier }
7221ac5a404SSelvin Xavier 
7231ac5a404SSelvin Xavier static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
7241ac5a404SSelvin Xavier 				   struct bnxt_qplib_pd_tbl *pdt,
7251ac5a404SSelvin Xavier 				   u32 max)
7261ac5a404SSelvin Xavier {
7271ac5a404SSelvin Xavier 	u32 bytes;
7281ac5a404SSelvin Xavier 
7291ac5a404SSelvin Xavier 	bytes = max >> 3;
7301ac5a404SSelvin Xavier 	if (!bytes)
7311ac5a404SSelvin Xavier 		bytes = 1;
7321ac5a404SSelvin Xavier 	pdt->tbl = kmalloc(bytes, GFP_KERNEL);
7331ac5a404SSelvin Xavier 	if (!pdt->tbl)
7341ac5a404SSelvin Xavier 		return -ENOMEM;
7351ac5a404SSelvin Xavier 
7361ac5a404SSelvin Xavier 	pdt->max = max;
7371ac5a404SSelvin Xavier 	memset((u8 *)pdt->tbl, 0xFF, bytes);
7381ac5a404SSelvin Xavier 
7391ac5a404SSelvin Xavier 	return 0;
7401ac5a404SSelvin Xavier }
7411ac5a404SSelvin Xavier 
7421ac5a404SSelvin Xavier /* DPIs */
7431ac5a404SSelvin Xavier int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
7441ac5a404SSelvin Xavier 			 struct bnxt_qplib_dpi     *dpi,
7451ac5a404SSelvin Xavier 			 void                      *app)
7461ac5a404SSelvin Xavier {
7471ac5a404SSelvin Xavier 	u32 bit_num;
7481ac5a404SSelvin Xavier 
7491ac5a404SSelvin Xavier 	bit_num = find_first_bit(dpit->tbl, dpit->max);
7501ac5a404SSelvin Xavier 	if (bit_num == dpit->max)
7511ac5a404SSelvin Xavier 		return -ENOMEM;
7521ac5a404SSelvin Xavier 
7531ac5a404SSelvin Xavier 	/* Found unused DPI */
7541ac5a404SSelvin Xavier 	clear_bit(bit_num, dpit->tbl);
7551ac5a404SSelvin Xavier 	dpit->app_tbl[bit_num] = app;
7561ac5a404SSelvin Xavier 
7571ac5a404SSelvin Xavier 	dpi->dpi = bit_num;
7581ac5a404SSelvin Xavier 	dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
7591ac5a404SSelvin Xavier 	dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
7601ac5a404SSelvin Xavier 
7611ac5a404SSelvin Xavier 	return 0;
7621ac5a404SSelvin Xavier }
7631ac5a404SSelvin Xavier 
7641ac5a404SSelvin Xavier int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
7651ac5a404SSelvin Xavier 			   struct bnxt_qplib_dpi_tbl *dpit,
7661ac5a404SSelvin Xavier 			   struct bnxt_qplib_dpi     *dpi)
7671ac5a404SSelvin Xavier {
7681ac5a404SSelvin Xavier 	if (dpi->dpi >= dpit->max) {
76908920b8fSJoe Perches 		dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
7701ac5a404SSelvin Xavier 		return -EINVAL;
7711ac5a404SSelvin Xavier 	}
7721ac5a404SSelvin Xavier 	if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
77308920b8fSJoe Perches 		dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
7741ac5a404SSelvin Xavier 			 dpi->dpi);
7751ac5a404SSelvin Xavier 		return -EINVAL;
7761ac5a404SSelvin Xavier 	}
7771ac5a404SSelvin Xavier 	if (dpit->app_tbl)
7781ac5a404SSelvin Xavier 		dpit->app_tbl[dpi->dpi] = NULL;
7791ac5a404SSelvin Xavier 	memset(dpi, 0, sizeof(*dpi));
7801ac5a404SSelvin Xavier 
7811ac5a404SSelvin Xavier 	return 0;
7821ac5a404SSelvin Xavier }
7831ac5a404SSelvin Xavier 
7841ac5a404SSelvin Xavier static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res     *res,
7851ac5a404SSelvin Xavier 				    struct bnxt_qplib_dpi_tbl *dpit)
7861ac5a404SSelvin Xavier {
7871ac5a404SSelvin Xavier 	kfree(dpit->tbl);
7881ac5a404SSelvin Xavier 	kfree(dpit->app_tbl);
7891ac5a404SSelvin Xavier 	if (dpit->dbr_bar_reg_iomem)
7901ac5a404SSelvin Xavier 		pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
7911ac5a404SSelvin Xavier 	memset(dpit, 0, sizeof(*dpit));
7921ac5a404SSelvin Xavier }
7931ac5a404SSelvin Xavier 
7941ac5a404SSelvin Xavier static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
7951ac5a404SSelvin Xavier 				    struct bnxt_qplib_dpi_tbl *dpit,
7961ac5a404SSelvin Xavier 				    u32                       dbr_offset)
7971ac5a404SSelvin Xavier {
7981ac5a404SSelvin Xavier 	u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
7991ac5a404SSelvin Xavier 	resource_size_t bar_reg_base;
8001ac5a404SSelvin Xavier 	u32 dbr_len, bytes;
8011ac5a404SSelvin Xavier 
8021ac5a404SSelvin Xavier 	if (dpit->dbr_bar_reg_iomem) {
80308920b8fSJoe Perches 		dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
80408920b8fSJoe Perches 			dbr_bar_reg);
8051ac5a404SSelvin Xavier 		return -EALREADY;
8061ac5a404SSelvin Xavier 	}
8071ac5a404SSelvin Xavier 
8081ac5a404SSelvin Xavier 	bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
8091ac5a404SSelvin Xavier 	if (!bar_reg_base) {
81008920b8fSJoe Perches 		dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
81108920b8fSJoe Perches 			dbr_bar_reg);
8121ac5a404SSelvin Xavier 		return -ENOMEM;
8131ac5a404SSelvin Xavier 	}
8141ac5a404SSelvin Xavier 
8151ac5a404SSelvin Xavier 	dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
8161ac5a404SSelvin Xavier 	if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
81708920b8fSJoe Perches 		dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
8181ac5a404SSelvin Xavier 		return -ENOMEM;
8191ac5a404SSelvin Xavier 	}
8201ac5a404SSelvin Xavier 
8214bdc0d67SChristoph Hellwig 	dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
8221ac5a404SSelvin Xavier 						  dbr_len);
8231ac5a404SSelvin Xavier 	if (!dpit->dbr_bar_reg_iomem) {
8241ac5a404SSelvin Xavier 		dev_err(&res->pdev->dev,
82508920b8fSJoe Perches 			"FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
8261ac5a404SSelvin Xavier 		return -ENOMEM;
8271ac5a404SSelvin Xavier 	}
8281ac5a404SSelvin Xavier 
8291ac5a404SSelvin Xavier 	dpit->unmapped_dbr = bar_reg_base + dbr_offset;
8301ac5a404SSelvin Xavier 	dpit->max = dbr_len / PAGE_SIZE;
8311ac5a404SSelvin Xavier 
8321ac5a404SSelvin Xavier 	dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
833e5b89843SMarkus Elfring 	if (!dpit->app_tbl)
834e5b89843SMarkus Elfring 		goto unmap_io;
8351ac5a404SSelvin Xavier 
8361ac5a404SSelvin Xavier 	bytes = dpit->max >> 3;
8371ac5a404SSelvin Xavier 	if (!bytes)
8381ac5a404SSelvin Xavier 		bytes = 1;
8391ac5a404SSelvin Xavier 
8401ac5a404SSelvin Xavier 	dpit->tbl = kmalloc(bytes, GFP_KERNEL);
8411ac5a404SSelvin Xavier 	if (!dpit->tbl) {
8421ac5a404SSelvin Xavier 		kfree(dpit->app_tbl);
8431ac5a404SSelvin Xavier 		dpit->app_tbl = NULL;
844e5b89843SMarkus Elfring 		goto unmap_io;
8451ac5a404SSelvin Xavier 	}
8461ac5a404SSelvin Xavier 
8471ac5a404SSelvin Xavier 	memset((u8 *)dpit->tbl, 0xFF, bytes);
8481ac5a404SSelvin Xavier 
8491ac5a404SSelvin Xavier 	return 0;
850e5b89843SMarkus Elfring 
851e5b89843SMarkus Elfring unmap_io:
852e5b89843SMarkus Elfring 	pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
853e5b89843SMarkus Elfring 	return -ENOMEM;
8541ac5a404SSelvin Xavier }
8551ac5a404SSelvin Xavier 
8561ac5a404SSelvin Xavier /* PKEYs */
8571ac5a404SSelvin Xavier static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
8581ac5a404SSelvin Xavier {
8591ac5a404SSelvin Xavier 	memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
8601ac5a404SSelvin Xavier 	pkey_tbl->active = 0;
8611ac5a404SSelvin Xavier }
8621ac5a404SSelvin Xavier 
8631ac5a404SSelvin Xavier static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
8641ac5a404SSelvin Xavier 				     struct bnxt_qplib_pkey_tbl *pkey_tbl)
8651ac5a404SSelvin Xavier {
8661ac5a404SSelvin Xavier 	u16 pkey = 0xFFFF;
8671ac5a404SSelvin Xavier 
8681ac5a404SSelvin Xavier 	memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
8691ac5a404SSelvin Xavier 
8701ac5a404SSelvin Xavier 	/* pkey default = 0xFFFF */
8711ac5a404SSelvin Xavier 	bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
8721ac5a404SSelvin Xavier }
8731ac5a404SSelvin Xavier 
8741ac5a404SSelvin Xavier /* Stats */
8751ac5a404SSelvin Xavier static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
8761ac5a404SSelvin Xavier 				      struct bnxt_qplib_stats *stats)
8771ac5a404SSelvin Xavier {
8781ac5a404SSelvin Xavier 	if (stats->dma) {
8791ac5a404SSelvin Xavier 		dma_free_coherent(&pdev->dev, stats->size,
8801ac5a404SSelvin Xavier 				  stats->dma, stats->dma_map);
8811ac5a404SSelvin Xavier 	}
8821ac5a404SSelvin Xavier 	memset(stats, 0, sizeof(*stats));
8831ac5a404SSelvin Xavier 	stats->fw_id = -1;
8841ac5a404SSelvin Xavier }
8851ac5a404SSelvin Xavier 
8861ac5a404SSelvin Xavier static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
8871ac5a404SSelvin Xavier 				      struct bnxt_qplib_stats *stats)
8881ac5a404SSelvin Xavier {
8891ac5a404SSelvin Xavier 	memset(stats, 0, sizeof(*stats));
8901ac5a404SSelvin Xavier 	stats->fw_id = -1;
891e0387e1dSDevesh Sharma 	/* 128 byte aligned context memory is required only for 57500.
892e0387e1dSDevesh Sharma 	 * However making this unconditional, it does not harm previous
893e0387e1dSDevesh Sharma 	 * generation.
894e0387e1dSDevesh Sharma 	 */
895e0387e1dSDevesh Sharma 	stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
8961ac5a404SSelvin Xavier 	stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
8971ac5a404SSelvin Xavier 					&stats->dma_map, GFP_KERNEL);
8981ac5a404SSelvin Xavier 	if (!stats->dma) {
89908920b8fSJoe Perches 		dev_err(&pdev->dev, "Stats DMA allocation failed\n");
9001ac5a404SSelvin Xavier 		return -ENOMEM;
9011ac5a404SSelvin Xavier 	}
9021ac5a404SSelvin Xavier 	return 0;
9031ac5a404SSelvin Xavier }
9041ac5a404SSelvin Xavier 
9051ac5a404SSelvin Xavier void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
9061ac5a404SSelvin Xavier {
9071ac5a404SSelvin Xavier 	bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
9081ac5a404SSelvin Xavier 	bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
9091ac5a404SSelvin Xavier }
9101ac5a404SSelvin Xavier 
9111ac5a404SSelvin Xavier int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
9121ac5a404SSelvin Xavier {
9131ac5a404SSelvin Xavier 	bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
9141ac5a404SSelvin Xavier 	bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
9151ac5a404SSelvin Xavier 
9161ac5a404SSelvin Xavier 	return 0;
9171ac5a404SSelvin Xavier }
9181ac5a404SSelvin Xavier 
9191ac5a404SSelvin Xavier void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
9201ac5a404SSelvin Xavier {
9211ac5a404SSelvin Xavier 	bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
9221ac5a404SSelvin Xavier 	bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
9231ac5a404SSelvin Xavier 	bnxt_qplib_free_pd_tbl(&res->pd_tbl);
9241ac5a404SSelvin Xavier 	bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
9251ac5a404SSelvin Xavier }
9261ac5a404SSelvin Xavier 
9271ac5a404SSelvin Xavier int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
9281ac5a404SSelvin Xavier 			 struct net_device *netdev,
9291ac5a404SSelvin Xavier 			 struct bnxt_qplib_dev_attr *dev_attr)
9301ac5a404SSelvin Xavier {
9311ac5a404SSelvin Xavier 	int rc = 0;
9321ac5a404SSelvin Xavier 
9331ac5a404SSelvin Xavier 	res->pdev = pdev;
9341ac5a404SSelvin Xavier 	res->netdev = netdev;
9351ac5a404SSelvin Xavier 
9361ac5a404SSelvin Xavier 	rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
9371ac5a404SSelvin Xavier 	if (rc)
9381ac5a404SSelvin Xavier 		goto fail;
9391ac5a404SSelvin Xavier 
9401ac5a404SSelvin Xavier 	rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
9411ac5a404SSelvin Xavier 	if (rc)
9421ac5a404SSelvin Xavier 		goto fail;
9431ac5a404SSelvin Xavier 
9441ac5a404SSelvin Xavier 	rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
9451ac5a404SSelvin Xavier 	if (rc)
9461ac5a404SSelvin Xavier 		goto fail;
9471ac5a404SSelvin Xavier 
9481ac5a404SSelvin Xavier 	rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
9491ac5a404SSelvin Xavier 	if (rc)
9501ac5a404SSelvin Xavier 		goto fail;
9511ac5a404SSelvin Xavier 
9521ac5a404SSelvin Xavier 	return 0;
9531ac5a404SSelvin Xavier fail:
9541ac5a404SSelvin Xavier 	bnxt_qplib_free_res(res);
9551ac5a404SSelvin Xavier 	return rc;
9561ac5a404SSelvin Xavier }
957