11ac5a404SSelvin Xavier /*
21ac5a404SSelvin Xavier  * Broadcom NetXtreme-E RoCE driver.
31ac5a404SSelvin Xavier  *
41ac5a404SSelvin Xavier  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
51ac5a404SSelvin Xavier  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
61ac5a404SSelvin Xavier  *
71ac5a404SSelvin Xavier  * This software is available to you under a choice of one of two
81ac5a404SSelvin Xavier  * licenses.  You may choose to be licensed under the terms of the GNU
91ac5a404SSelvin Xavier  * General Public License (GPL) Version 2, available from the file
101ac5a404SSelvin Xavier  * COPYING in the main directory of this source tree, or the
111ac5a404SSelvin Xavier  * BSD license below:
121ac5a404SSelvin Xavier  *
131ac5a404SSelvin Xavier  * Redistribution and use in source and binary forms, with or without
141ac5a404SSelvin Xavier  * modification, are permitted provided that the following conditions
151ac5a404SSelvin Xavier  * are met:
161ac5a404SSelvin Xavier  *
171ac5a404SSelvin Xavier  * 1. Redistributions of source code must retain the above copyright
181ac5a404SSelvin Xavier  *    notice, this list of conditions and the following disclaimer.
191ac5a404SSelvin Xavier  * 2. Redistributions in binary form must reproduce the above copyright
201ac5a404SSelvin Xavier  *    notice, this list of conditions and the following disclaimer in
211ac5a404SSelvin Xavier  *    the documentation and/or other materials provided with the
221ac5a404SSelvin Xavier  *    distribution.
231ac5a404SSelvin Xavier  *
241ac5a404SSelvin Xavier  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
251ac5a404SSelvin Xavier  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
261ac5a404SSelvin Xavier  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
271ac5a404SSelvin Xavier  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
281ac5a404SSelvin Xavier  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
291ac5a404SSelvin Xavier  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
301ac5a404SSelvin Xavier  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
311ac5a404SSelvin Xavier  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
321ac5a404SSelvin Xavier  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
331ac5a404SSelvin Xavier  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
341ac5a404SSelvin Xavier  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
351ac5a404SSelvin Xavier  *
361ac5a404SSelvin Xavier  * Description: QPLib resource manager
371ac5a404SSelvin Xavier  */
381ac5a404SSelvin Xavier 
3908920b8fSJoe Perches #define dev_fmt(fmt) "QPLIB: " fmt
4008920b8fSJoe Perches 
411ac5a404SSelvin Xavier #include <linux/spinlock.h>
421ac5a404SSelvin Xavier #include <linux/pci.h>
431ac5a404SSelvin Xavier #include <linux/interrupt.h>
441ac5a404SSelvin Xavier #include <linux/inetdevice.h>
451ac5a404SSelvin Xavier #include <linux/dma-mapping.h>
461ac5a404SSelvin Xavier #include <linux/if_vlan.h>
4765a16620SJason Gunthorpe #include <linux/vmalloc.h>
486ef999f5SJason Gunthorpe #include <rdma/ib_verbs.h>
496ef999f5SJason Gunthorpe #include <rdma/ib_umem.h>
506ef999f5SJason Gunthorpe 
511ac5a404SSelvin Xavier #include "roce_hsi.h"
521ac5a404SSelvin Xavier #include "qplib_res.h"
531ac5a404SSelvin Xavier #include "qplib_sp.h"
541ac5a404SSelvin Xavier #include "qplib_rcfw.h"
551ac5a404SSelvin Xavier 
561ac5a404SSelvin Xavier static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
571ac5a404SSelvin Xavier 				      struct bnxt_qplib_stats *stats);
581ac5a404SSelvin Xavier static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
591ac5a404SSelvin Xavier 				      struct bnxt_qplib_stats *stats);
601ac5a404SSelvin Xavier 
611ac5a404SSelvin Xavier /* PBL */
620c4dcd60SDevesh Sharma static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
631ac5a404SSelvin Xavier 		       bool is_umem)
641ac5a404SSelvin Xavier {
650c4dcd60SDevesh Sharma 	struct pci_dev *pdev = res->pdev;
661ac5a404SSelvin Xavier 	int i;
671ac5a404SSelvin Xavier 
681ac5a404SSelvin Xavier 	if (!is_umem) {
691ac5a404SSelvin Xavier 		for (i = 0; i < pbl->pg_count; i++) {
701ac5a404SSelvin Xavier 			if (pbl->pg_arr[i])
711ac5a404SSelvin Xavier 				dma_free_coherent(&pdev->dev, pbl->pg_size,
721ac5a404SSelvin Xavier 						  (void *)((unsigned long)
731ac5a404SSelvin Xavier 						   pbl->pg_arr[i] &
741ac5a404SSelvin Xavier 						  PAGE_MASK),
751ac5a404SSelvin Xavier 						  pbl->pg_map_arr[i]);
761ac5a404SSelvin Xavier 			else
771ac5a404SSelvin Xavier 				dev_warn(&pdev->dev,
7808920b8fSJoe Perches 					 "PBL free pg_arr[%d] empty?!\n", i);
791ac5a404SSelvin Xavier 			pbl->pg_arr[i] = NULL;
801ac5a404SSelvin Xavier 		}
811ac5a404SSelvin Xavier 	}
820c4dcd60SDevesh Sharma 	vfree(pbl->pg_arr);
831ac5a404SSelvin Xavier 	pbl->pg_arr = NULL;
840c4dcd60SDevesh Sharma 	vfree(pbl->pg_map_arr);
851ac5a404SSelvin Xavier 	pbl->pg_map_arr = NULL;
861ac5a404SSelvin Xavier 	pbl->pg_count = 0;
871ac5a404SSelvin Xavier 	pbl->pg_size = 0;
881ac5a404SSelvin Xavier }
891ac5a404SSelvin Xavier 
900c4dcd60SDevesh Sharma static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
910c4dcd60SDevesh Sharma 					   struct bnxt_qplib_sg_info *sginfo)
921ac5a404SSelvin Xavier {
936ef999f5SJason Gunthorpe 	struct ib_block_iter biter;
940c4dcd60SDevesh Sharma 	int i = 0;
950c4dcd60SDevesh Sharma 
966ef999f5SJason Gunthorpe 	rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
976ef999f5SJason Gunthorpe 		pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
980c4dcd60SDevesh Sharma 		pbl->pg_arr[i] = NULL;
990c4dcd60SDevesh Sharma 		pbl->pg_count++;
1000c4dcd60SDevesh Sharma 		i++;
1010c4dcd60SDevesh Sharma 	}
1020c4dcd60SDevesh Sharma }
1030c4dcd60SDevesh Sharma 
1040c4dcd60SDevesh Sharma static int __alloc_pbl(struct bnxt_qplib_res *res,
1050c4dcd60SDevesh Sharma 		       struct bnxt_qplib_pbl *pbl,
1060c4dcd60SDevesh Sharma 		       struct bnxt_qplib_sg_info *sginfo)
1070c4dcd60SDevesh Sharma {
1080c4dcd60SDevesh Sharma 	struct pci_dev *pdev = res->pdev;
1091ac5a404SSelvin Xavier 	bool is_umem = false;
1106be2067dSYueHaibing 	u32 pages;
1111ac5a404SSelvin Xavier 	int i;
1121ac5a404SSelvin Xavier 
1130c4dcd60SDevesh Sharma 	if (sginfo->nopte)
1140c4dcd60SDevesh Sharma 		return 0;
1156ef999f5SJason Gunthorpe 	if (sginfo->umem)
1166ef999f5SJason Gunthorpe 		pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
1176ef999f5SJason Gunthorpe 	else
1180c4dcd60SDevesh Sharma 		pages = sginfo->npages;
1191ac5a404SSelvin Xavier 	/* page ptr arrays */
1200c4dcd60SDevesh Sharma 	pbl->pg_arr = vmalloc(pages * sizeof(void *));
1211ac5a404SSelvin Xavier 	if (!pbl->pg_arr)
1221ac5a404SSelvin Xavier 		return -ENOMEM;
1231ac5a404SSelvin Xavier 
1240c4dcd60SDevesh Sharma 	pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
1251ac5a404SSelvin Xavier 	if (!pbl->pg_map_arr) {
1260c4dcd60SDevesh Sharma 		vfree(pbl->pg_arr);
1271ac5a404SSelvin Xavier 		pbl->pg_arr = NULL;
1281ac5a404SSelvin Xavier 		return -ENOMEM;
1291ac5a404SSelvin Xavier 	}
1301ac5a404SSelvin Xavier 	pbl->pg_count = 0;
1310c4dcd60SDevesh Sharma 	pbl->pg_size = sginfo->pgsize;
1321ac5a404SSelvin Xavier 
1336ef999f5SJason Gunthorpe 	if (!sginfo->umem) {
1341ac5a404SSelvin Xavier 		for (i = 0; i < pages; i++) {
135750afb08SLuis Chamberlain 			pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
1361ac5a404SSelvin Xavier 							    pbl->pg_size,
1371ac5a404SSelvin Xavier 							    &pbl->pg_map_arr[i],
1381ac5a404SSelvin Xavier 							    GFP_KERNEL);
1391ac5a404SSelvin Xavier 			if (!pbl->pg_arr[i])
1401ac5a404SSelvin Xavier 				goto fail;
1411ac5a404SSelvin Xavier 			pbl->pg_count++;
1421ac5a404SSelvin Xavier 		}
1431ac5a404SSelvin Xavier 	} else {
1441ac5a404SSelvin Xavier 		is_umem = true;
1450c4dcd60SDevesh Sharma 		bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
1461ac5a404SSelvin Xavier 	}
1471ac5a404SSelvin Xavier 
1481ac5a404SSelvin Xavier 	return 0;
1491ac5a404SSelvin Xavier fail:
1500c4dcd60SDevesh Sharma 	__free_pbl(res, pbl, is_umem);
1511ac5a404SSelvin Xavier 	return -ENOMEM;
1521ac5a404SSelvin Xavier }
1531ac5a404SSelvin Xavier 
1541ac5a404SSelvin Xavier /* HWQ */
1550c4dcd60SDevesh Sharma void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
1560c4dcd60SDevesh Sharma 			 struct bnxt_qplib_hwq *hwq)
1571ac5a404SSelvin Xavier {
1581ac5a404SSelvin Xavier 	int i;
1591ac5a404SSelvin Xavier 
1601ac5a404SSelvin Xavier 	if (!hwq->max_elements)
1611ac5a404SSelvin Xavier 		return;
1621ac5a404SSelvin Xavier 	if (hwq->level >= PBL_LVL_MAX)
1631ac5a404SSelvin Xavier 		return;
1641ac5a404SSelvin Xavier 
1651ac5a404SSelvin Xavier 	for (i = 0; i < hwq->level + 1; i++) {
1661ac5a404SSelvin Xavier 		if (i == hwq->level)
1670c4dcd60SDevesh Sharma 			__free_pbl(res, &hwq->pbl[i], hwq->is_user);
1681ac5a404SSelvin Xavier 		else
1690c4dcd60SDevesh Sharma 			__free_pbl(res, &hwq->pbl[i], false);
1701ac5a404SSelvin Xavier 	}
1711ac5a404SSelvin Xavier 
1721ac5a404SSelvin Xavier 	hwq->level = PBL_LVL_MAX;
1731ac5a404SSelvin Xavier 	hwq->max_elements = 0;
1741ac5a404SSelvin Xavier 	hwq->element_size = 0;
1751ac5a404SSelvin Xavier 	hwq->prod = 0;
1761ac5a404SSelvin Xavier 	hwq->cons = 0;
1771ac5a404SSelvin Xavier 	hwq->cp_bit = 0;
1781ac5a404SSelvin Xavier }
1791ac5a404SSelvin Xavier 
1801ac5a404SSelvin Xavier /* All HWQs are power of 2 in size */
1810c4dcd60SDevesh Sharma 
1820c4dcd60SDevesh Sharma int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
1830c4dcd60SDevesh Sharma 			      struct bnxt_qplib_hwq_attr *hwq_attr)
1841ac5a404SSelvin Xavier {
1850c4dcd60SDevesh Sharma 	u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
1860c4dcd60SDevesh Sharma 	struct bnxt_qplib_sg_info sginfo = {};
1870c4dcd60SDevesh Sharma 	u32 depth, stride, npbl, npde;
1881ac5a404SSelvin Xavier 	dma_addr_t *src_phys_ptr, **dst_virt_ptr;
1890c4dcd60SDevesh Sharma 	struct bnxt_qplib_res *res;
1900c4dcd60SDevesh Sharma 	struct pci_dev *pdev;
1910c4dcd60SDevesh Sharma 	int i, rc, lvl;
1921ac5a404SSelvin Xavier 
1930c4dcd60SDevesh Sharma 	res = hwq_attr->res;
1940c4dcd60SDevesh Sharma 	pdev = res->pdev;
1950c4dcd60SDevesh Sharma 	pg_size = hwq_attr->sginfo->pgsize;
1961ac5a404SSelvin Xavier 	hwq->level = PBL_LVL_MAX;
1971ac5a404SSelvin Xavier 
1980c4dcd60SDevesh Sharma 	depth = roundup_pow_of_two(hwq_attr->depth);
1990c4dcd60SDevesh Sharma 	stride = roundup_pow_of_two(hwq_attr->stride);
2000c4dcd60SDevesh Sharma 	if (hwq_attr->aux_depth) {
2010c4dcd60SDevesh Sharma 		aux_slots = hwq_attr->aux_depth;
2020c4dcd60SDevesh Sharma 		aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
2030c4dcd60SDevesh Sharma 		aux_pages = (aux_slots * aux_size) / pg_size;
2040c4dcd60SDevesh Sharma 		if ((aux_slots * aux_size) % pg_size)
2051ac5a404SSelvin Xavier 			aux_pages++;
2061ac5a404SSelvin Xavier 	}
2075aa84840SSelvin Xavier 
2086ef999f5SJason Gunthorpe 	if (!hwq_attr->sginfo->umem) {
2091ac5a404SSelvin Xavier 		hwq->is_user = false;
2100c4dcd60SDevesh Sharma 		npages = (depth * stride) / pg_size + aux_pages;
2110c4dcd60SDevesh Sharma 		if ((depth * stride) % pg_size)
2120c4dcd60SDevesh Sharma 			npages++;
2130c4dcd60SDevesh Sharma 		if (!npages)
2141ac5a404SSelvin Xavier 			return -EINVAL;
2150c4dcd60SDevesh Sharma 		hwq_attr->sginfo->npages = npages;
2161ac5a404SSelvin Xavier 	} else {
2176ef999f5SJason Gunthorpe 		unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
2186ef999f5SJason Gunthorpe 			hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
2196ef999f5SJason Gunthorpe 
2201ac5a404SSelvin Xavier 		hwq->is_user = true;
2216ef999f5SJason Gunthorpe 		npages = sginfo_num_pages;
2220c4dcd60SDevesh Sharma 		npages = (npages * PAGE_SIZE) /
2230c4dcd60SDevesh Sharma 			  BIT_ULL(hwq_attr->sginfo->pgshft);
2246ef999f5SJason Gunthorpe 		if ((sginfo_num_pages * PAGE_SIZE) %
2250c4dcd60SDevesh Sharma 		     BIT_ULL(hwq_attr->sginfo->pgshft))
2260c4dcd60SDevesh Sharma 			if (!npages)
2270c4dcd60SDevesh Sharma 				npages++;
2281ac5a404SSelvin Xavier 	}
2291ac5a404SSelvin Xavier 
2300c4dcd60SDevesh Sharma 	if (npages == MAX_PBL_LVL_0_PGS) {
2310c4dcd60SDevesh Sharma 		/* This request is Level 0, map PTE */
2320c4dcd60SDevesh Sharma 		rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
2331ac5a404SSelvin Xavier 		if (rc)
2341ac5a404SSelvin Xavier 			goto fail;
2351ac5a404SSelvin Xavier 		hwq->level = PBL_LVL_0;
2360c4dcd60SDevesh Sharma 	}
2371ac5a404SSelvin Xavier 
2380c4dcd60SDevesh Sharma 	if (npages > MAX_PBL_LVL_0_PGS) {
2390c4dcd60SDevesh Sharma 		if (npages > MAX_PBL_LVL_1_PGS) {
2400c4dcd60SDevesh Sharma 			u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
2410c4dcd60SDevesh Sharma 				    0 : PTU_PTE_VALID;
2421ac5a404SSelvin Xavier 			/* 2 levels of indirection */
2430c4dcd60SDevesh Sharma 			npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
2440c4dcd60SDevesh Sharma 			if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
2450c4dcd60SDevesh Sharma 				npbl++;
2460c4dcd60SDevesh Sharma 			npde = npbl >> MAX_PDL_LVL_SHIFT;
2470c4dcd60SDevesh Sharma 			if (npbl % BIT(MAX_PDL_LVL_SHIFT))
2480c4dcd60SDevesh Sharma 				npde++;
2490c4dcd60SDevesh Sharma 			/* Alloc PDE pages */
2500c4dcd60SDevesh Sharma 			sginfo.pgsize = npde * pg_size;
2510c4dcd60SDevesh Sharma 			sginfo.npages = 1;
2520c4dcd60SDevesh Sharma 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
2530c4dcd60SDevesh Sharma 
2540c4dcd60SDevesh Sharma 			/* Alloc PBL pages */
2550c4dcd60SDevesh Sharma 			sginfo.npages = npbl;
2560c4dcd60SDevesh Sharma 			sginfo.pgsize = PAGE_SIZE;
2570c4dcd60SDevesh Sharma 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
2581ac5a404SSelvin Xavier 			if (rc)
2591ac5a404SSelvin Xavier 				goto fail;
2600c4dcd60SDevesh Sharma 			/* Fill PDL with PBL page pointers */
2611ac5a404SSelvin Xavier 			dst_virt_ptr =
2621ac5a404SSelvin Xavier 				(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
2631ac5a404SSelvin Xavier 			src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
2640c4dcd60SDevesh Sharma 			if (hwq_attr->type == HWQ_TYPE_MR) {
2650c4dcd60SDevesh Sharma 			/* For MR it is expected that we supply only 1 contigous
2660c4dcd60SDevesh Sharma 			 * page i.e only 1 entry in the PDL that will contain
2670c4dcd60SDevesh Sharma 			 * all the PBLs for the user supplied memory region
2680c4dcd60SDevesh Sharma 			 */
2690c4dcd60SDevesh Sharma 				for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
2700c4dcd60SDevesh Sharma 				     i++)
2710c4dcd60SDevesh Sharma 					dst_virt_ptr[0][i] = src_phys_ptr[i] |
2720c4dcd60SDevesh Sharma 						flag;
2730c4dcd60SDevesh Sharma 			} else {
2740c4dcd60SDevesh Sharma 				for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
2750c4dcd60SDevesh Sharma 				     i++)
2761ac5a404SSelvin Xavier 					dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
2770c4dcd60SDevesh Sharma 						src_phys_ptr[i] |
2780c4dcd60SDevesh Sharma 						PTU_PDE_VALID;
2790c4dcd60SDevesh Sharma 			}
2800c4dcd60SDevesh Sharma 			/* Alloc or init PTEs */
2810c4dcd60SDevesh Sharma 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
2820c4dcd60SDevesh Sharma 					 hwq_attr->sginfo);
2831ac5a404SSelvin Xavier 			if (rc)
2841ac5a404SSelvin Xavier 				goto fail;
2850c4dcd60SDevesh Sharma 			hwq->level = PBL_LVL_2;
2860c4dcd60SDevesh Sharma 			if (hwq_attr->sginfo->nopte)
2870c4dcd60SDevesh Sharma 				goto done;
2880c4dcd60SDevesh Sharma 			/* Fill PBLs with PTE pointers */
2891ac5a404SSelvin Xavier 			dst_virt_ptr =
2901ac5a404SSelvin Xavier 				(dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
2911ac5a404SSelvin Xavier 			src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
2921ac5a404SSelvin Xavier 			for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
2931ac5a404SSelvin Xavier 				dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
2941ac5a404SSelvin Xavier 					src_phys_ptr[i] | PTU_PTE_VALID;
2951ac5a404SSelvin Xavier 			}
2960c4dcd60SDevesh Sharma 			if (hwq_attr->type == HWQ_TYPE_QUEUE) {
2971ac5a404SSelvin Xavier 				/* Find the last pg of the size */
2981ac5a404SSelvin Xavier 				i = hwq->pbl[PBL_LVL_2].pg_count;
2991ac5a404SSelvin Xavier 				dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
3001ac5a404SSelvin Xavier 								  PTU_PTE_LAST;
3011ac5a404SSelvin Xavier 				if (i > 1)
3021ac5a404SSelvin Xavier 					dst_virt_ptr[PTR_PG(i - 2)]
3031ac5a404SSelvin Xavier 						    [PTR_IDX(i - 2)] |=
3041ac5a404SSelvin Xavier 						    PTU_PTE_NEXT_TO_LAST;
3051ac5a404SSelvin Xavier 			}
3060c4dcd60SDevesh Sharma 		} else { /* pages < 512 npbl = 1, npde = 0 */
3070c4dcd60SDevesh Sharma 			u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
3080c4dcd60SDevesh Sharma 				    0 : PTU_PTE_VALID;
3091ac5a404SSelvin Xavier 
3101ac5a404SSelvin Xavier 			/* 1 level of indirection */
3110c4dcd60SDevesh Sharma 			npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
3120c4dcd60SDevesh Sharma 			if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
3130c4dcd60SDevesh Sharma 				npbl++;
3140c4dcd60SDevesh Sharma 			sginfo.npages = npbl;
3150c4dcd60SDevesh Sharma 			sginfo.pgsize = PAGE_SIZE;
3160c4dcd60SDevesh Sharma 			/* Alloc PBL page */
3170c4dcd60SDevesh Sharma 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
3181ac5a404SSelvin Xavier 			if (rc)
3191ac5a404SSelvin Xavier 				goto fail;
3200c4dcd60SDevesh Sharma 			/* Alloc or init  PTEs */
3210c4dcd60SDevesh Sharma 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
3220c4dcd60SDevesh Sharma 					 hwq_attr->sginfo);
3230c4dcd60SDevesh Sharma 			if (rc)
3240c4dcd60SDevesh Sharma 				goto fail;
3250c4dcd60SDevesh Sharma 			hwq->level = PBL_LVL_1;
3260c4dcd60SDevesh Sharma 			if (hwq_attr->sginfo->nopte)
3270c4dcd60SDevesh Sharma 				goto done;
3280c4dcd60SDevesh Sharma 			/* Fill PBL with PTE pointers */
3291ac5a404SSelvin Xavier 			dst_virt_ptr =
3301ac5a404SSelvin Xavier 				(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
3311ac5a404SSelvin Xavier 			src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
3320c4dcd60SDevesh Sharma 			for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
3331ac5a404SSelvin Xavier 				dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
3341ac5a404SSelvin Xavier 					src_phys_ptr[i] | flag;
3350c4dcd60SDevesh Sharma 			if (hwq_attr->type == HWQ_TYPE_QUEUE) {
3361ac5a404SSelvin Xavier 				/* Find the last pg of the size */
3371ac5a404SSelvin Xavier 				i = hwq->pbl[PBL_LVL_1].pg_count;
3381ac5a404SSelvin Xavier 				dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
3391ac5a404SSelvin Xavier 								  PTU_PTE_LAST;
3401ac5a404SSelvin Xavier 				if (i > 1)
3411ac5a404SSelvin Xavier 					dst_virt_ptr[PTR_PG(i - 2)]
3421ac5a404SSelvin Xavier 						    [PTR_IDX(i - 2)] |=
3431ac5a404SSelvin Xavier 						    PTU_PTE_NEXT_TO_LAST;
3441ac5a404SSelvin Xavier 			}
3451ac5a404SSelvin Xavier 		}
3461ac5a404SSelvin Xavier 	}
3470c4dcd60SDevesh Sharma done:
3481ac5a404SSelvin Xavier 	hwq->prod = 0;
3491ac5a404SSelvin Xavier 	hwq->cons = 0;
3500c4dcd60SDevesh Sharma 	hwq->pdev = pdev;
3510c4dcd60SDevesh Sharma 	hwq->depth = hwq_attr->depth;
3520c4dcd60SDevesh Sharma 	hwq->max_elements = depth;
3530c4dcd60SDevesh Sharma 	hwq->element_size = stride;
354fddcbbb0SDevesh Sharma 	hwq->qe_ppg = pg_size / stride;
3551ac5a404SSelvin Xavier 	/* For direct access to the elements */
3560c4dcd60SDevesh Sharma 	lvl = hwq->level;
3570c4dcd60SDevesh Sharma 	if (hwq_attr->sginfo->nopte && hwq->level)
3580c4dcd60SDevesh Sharma 		lvl = hwq->level - 1;
3590c4dcd60SDevesh Sharma 	hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
3600c4dcd60SDevesh Sharma 	hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
3610c4dcd60SDevesh Sharma 	spin_lock_init(&hwq->lock);
3621ac5a404SSelvin Xavier 
3631ac5a404SSelvin Xavier 	return 0;
3641ac5a404SSelvin Xavier fail:
3650c4dcd60SDevesh Sharma 	bnxt_qplib_free_hwq(res, hwq);
3661ac5a404SSelvin Xavier 	return -ENOMEM;
3671ac5a404SSelvin Xavier }
3681ac5a404SSelvin Xavier 
3691ac5a404SSelvin Xavier /* Context Tables */
3700c4dcd60SDevesh Sharma void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
3711ac5a404SSelvin Xavier 			 struct bnxt_qplib_ctx *ctx)
3721ac5a404SSelvin Xavier {
3731ac5a404SSelvin Xavier 	int i;
3741ac5a404SSelvin Xavier 
3750c4dcd60SDevesh Sharma 	bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
3760c4dcd60SDevesh Sharma 	bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
3770c4dcd60SDevesh Sharma 	bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
3780c4dcd60SDevesh Sharma 	bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
3790c4dcd60SDevesh Sharma 	bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
3801ac5a404SSelvin Xavier 	for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
3810c4dcd60SDevesh Sharma 		bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
3820c4dcd60SDevesh Sharma 	/* restore original pde level before destroy */
3830c4dcd60SDevesh Sharma 	ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
3840c4dcd60SDevesh Sharma 	bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
3850c4dcd60SDevesh Sharma 	bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
3860c4dcd60SDevesh Sharma }
3870c4dcd60SDevesh Sharma 
3880c4dcd60SDevesh Sharma static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
3890c4dcd60SDevesh Sharma 				      struct bnxt_qplib_ctx *ctx)
3900c4dcd60SDevesh Sharma {
3910c4dcd60SDevesh Sharma 	struct bnxt_qplib_hwq_attr hwq_attr = {};
3920c4dcd60SDevesh Sharma 	struct bnxt_qplib_sg_info sginfo = {};
3930c4dcd60SDevesh Sharma 	struct bnxt_qplib_tqm_ctx *tqmctx;
3940c4dcd60SDevesh Sharma 	int rc = 0;
3950c4dcd60SDevesh Sharma 	int i;
3960c4dcd60SDevesh Sharma 
3970c4dcd60SDevesh Sharma 	tqmctx = &ctx->tqm_ctx;
3980c4dcd60SDevesh Sharma 
3990c4dcd60SDevesh Sharma 	sginfo.pgsize = PAGE_SIZE;
4000c4dcd60SDevesh Sharma 	sginfo.pgshft = PAGE_SHIFT;
4010c4dcd60SDevesh Sharma 	hwq_attr.sginfo = &sginfo;
4020c4dcd60SDevesh Sharma 	hwq_attr.res = res;
4030c4dcd60SDevesh Sharma 	hwq_attr.type = HWQ_TYPE_CTX;
4040c4dcd60SDevesh Sharma 	hwq_attr.depth = 512;
4050c4dcd60SDevesh Sharma 	hwq_attr.stride = sizeof(u64);
4060c4dcd60SDevesh Sharma 	/* Alloc pdl buffer */
4070c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
4080c4dcd60SDevesh Sharma 	if (rc)
4090c4dcd60SDevesh Sharma 		goto out;
4100c4dcd60SDevesh Sharma 	/* Save original pdl level */
4110c4dcd60SDevesh Sharma 	tqmctx->pde_level = tqmctx->pde.level;
4120c4dcd60SDevesh Sharma 
4130c4dcd60SDevesh Sharma 	hwq_attr.stride = 1;
4140c4dcd60SDevesh Sharma 	for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
4150c4dcd60SDevesh Sharma 		if (!tqmctx->qcount[i])
4160c4dcd60SDevesh Sharma 			continue;
4170c4dcd60SDevesh Sharma 		hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
4180c4dcd60SDevesh Sharma 		rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
4190c4dcd60SDevesh Sharma 		if (rc)
4200c4dcd60SDevesh Sharma 			goto out;
4210c4dcd60SDevesh Sharma 	}
4220c4dcd60SDevesh Sharma out:
4230c4dcd60SDevesh Sharma 	return rc;
4240c4dcd60SDevesh Sharma }
4250c4dcd60SDevesh Sharma 
4260c4dcd60SDevesh Sharma static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
4270c4dcd60SDevesh Sharma {
4280c4dcd60SDevesh Sharma 	struct bnxt_qplib_hwq *tbl;
4290c4dcd60SDevesh Sharma 	dma_addr_t *dma_ptr;
4300c4dcd60SDevesh Sharma 	__le64 **pbl_ptr, *ptr;
4310c4dcd60SDevesh Sharma 	int i, j, k;
4320c4dcd60SDevesh Sharma 	int fnz_idx = -1;
4330c4dcd60SDevesh Sharma 	int pg_count;
4340c4dcd60SDevesh Sharma 
4350c4dcd60SDevesh Sharma 	pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
4360c4dcd60SDevesh Sharma 
4370c4dcd60SDevesh Sharma 	for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
4380c4dcd60SDevesh Sharma 	     i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
4390c4dcd60SDevesh Sharma 		tbl = &ctx->qtbl[i];
4400c4dcd60SDevesh Sharma 		if (!tbl->max_elements)
4410c4dcd60SDevesh Sharma 			continue;
4420c4dcd60SDevesh Sharma 		if (fnz_idx == -1)
4430c4dcd60SDevesh Sharma 			fnz_idx = i; /* first non-zero index */
4440c4dcd60SDevesh Sharma 		switch (tbl->level) {
4450c4dcd60SDevesh Sharma 		case PBL_LVL_2:
4460c4dcd60SDevesh Sharma 			pg_count = tbl->pbl[PBL_LVL_1].pg_count;
4470c4dcd60SDevesh Sharma 			for (k = 0; k < pg_count; k++) {
4480c4dcd60SDevesh Sharma 				ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
4490c4dcd60SDevesh Sharma 				dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
4500c4dcd60SDevesh Sharma 				*ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
4510c4dcd60SDevesh Sharma 			}
4520c4dcd60SDevesh Sharma 			break;
4530c4dcd60SDevesh Sharma 		case PBL_LVL_1:
4540c4dcd60SDevesh Sharma 		case PBL_LVL_0:
4550c4dcd60SDevesh Sharma 		default:
4560c4dcd60SDevesh Sharma 			ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
4570c4dcd60SDevesh Sharma 			*ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
4580c4dcd60SDevesh Sharma 					   PTU_PTE_VALID);
4590c4dcd60SDevesh Sharma 			break;
4600c4dcd60SDevesh Sharma 		}
4610c4dcd60SDevesh Sharma 	}
4620c4dcd60SDevesh Sharma 	if (fnz_idx == -1)
4630c4dcd60SDevesh Sharma 		fnz_idx = 0;
4640c4dcd60SDevesh Sharma 	/* update pde level as per page table programming */
4650c4dcd60SDevesh Sharma 	ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
4660c4dcd60SDevesh Sharma 			  ctx->qtbl[fnz_idx].level + 1;
4670c4dcd60SDevesh Sharma }
4680c4dcd60SDevesh Sharma 
4690c4dcd60SDevesh Sharma static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
4700c4dcd60SDevesh Sharma 				      struct bnxt_qplib_ctx *ctx)
4710c4dcd60SDevesh Sharma {
4720c4dcd60SDevesh Sharma 	int rc = 0;
4730c4dcd60SDevesh Sharma 
4740c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
4750c4dcd60SDevesh Sharma 	if (rc)
4760c4dcd60SDevesh Sharma 		goto fail;
4770c4dcd60SDevesh Sharma 
4780c4dcd60SDevesh Sharma 	bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
4790c4dcd60SDevesh Sharma fail:
4800c4dcd60SDevesh Sharma 	return rc;
4811ac5a404SSelvin Xavier }
4821ac5a404SSelvin Xavier 
4831ac5a404SSelvin Xavier /*
4841ac5a404SSelvin Xavier  * Routine: bnxt_qplib_alloc_ctx
4851ac5a404SSelvin Xavier  * Description:
4861ac5a404SSelvin Xavier  *     Context tables are memories which are used by the chip fw.
4871ac5a404SSelvin Xavier  *     The 6 tables defined are:
4881ac5a404SSelvin Xavier  *             QPC ctx - holds QP states
4891ac5a404SSelvin Xavier  *             MRW ctx - holds memory region and window
4901ac5a404SSelvin Xavier  *             SRQ ctx - holds shared RQ states
4911ac5a404SSelvin Xavier  *             CQ ctx - holds completion queue states
4921ac5a404SSelvin Xavier  *             TQM ctx - holds Tx Queue Manager context
4931ac5a404SSelvin Xavier  *             TIM ctx - holds timer context
4941ac5a404SSelvin Xavier  *     Depending on the size of the tbl requested, either a 1 Page Buffer List
4951ac5a404SSelvin Xavier  *     or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
4961ac5a404SSelvin Xavier  *     instead.
4971ac5a404SSelvin Xavier  *     Table might be employed as follows:
4981ac5a404SSelvin Xavier  *             For 0      < ctx size <= 1 PAGE, 0 level of ind is used
4991ac5a404SSelvin Xavier  *             For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
5001ac5a404SSelvin Xavier  *             For 512    < ctx size <= MAX, 2 levels of ind is used
5011ac5a404SSelvin Xavier  * Returns:
5021ac5a404SSelvin Xavier  *     0 if success, else -ERRORS
5031ac5a404SSelvin Xavier  */
5040c4dcd60SDevesh Sharma int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
5051ac5a404SSelvin Xavier 			 struct bnxt_qplib_ctx *ctx,
506e0387e1dSDevesh Sharma 			 bool virt_fn, bool is_p5)
5071ac5a404SSelvin Xavier {
5080c4dcd60SDevesh Sharma 	struct bnxt_qplib_hwq_attr hwq_attr = {};
5090c4dcd60SDevesh Sharma 	struct bnxt_qplib_sg_info sginfo = {};
5100c4dcd60SDevesh Sharma 	int rc = 0;
5111ac5a404SSelvin Xavier 
512e0387e1dSDevesh Sharma 	if (virt_fn || is_p5)
5131ac5a404SSelvin Xavier 		goto stats_alloc;
5141ac5a404SSelvin Xavier 
5151ac5a404SSelvin Xavier 	/* QPC Tables */
5160c4dcd60SDevesh Sharma 	sginfo.pgsize = PAGE_SIZE;
5170c4dcd60SDevesh Sharma 	sginfo.pgshft = PAGE_SHIFT;
5180c4dcd60SDevesh Sharma 	hwq_attr.sginfo = &sginfo;
5190c4dcd60SDevesh Sharma 
5200c4dcd60SDevesh Sharma 	hwq_attr.res = res;
5210c4dcd60SDevesh Sharma 	hwq_attr.depth = ctx->qpc_count;
5220c4dcd60SDevesh Sharma 	hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
5230c4dcd60SDevesh Sharma 	hwq_attr.type = HWQ_TYPE_CTX;
5240c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
5251ac5a404SSelvin Xavier 	if (rc)
5261ac5a404SSelvin Xavier 		goto fail;
5271ac5a404SSelvin Xavier 
5281ac5a404SSelvin Xavier 	/* MRW Tables */
5290c4dcd60SDevesh Sharma 	hwq_attr.depth = ctx->mrw_count;
5300c4dcd60SDevesh Sharma 	hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
5310c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
5321ac5a404SSelvin Xavier 	if (rc)
5331ac5a404SSelvin Xavier 		goto fail;
5341ac5a404SSelvin Xavier 
5351ac5a404SSelvin Xavier 	/* SRQ Tables */
5360c4dcd60SDevesh Sharma 	hwq_attr.depth = ctx->srqc_count;
5370c4dcd60SDevesh Sharma 	hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
5380c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
5391ac5a404SSelvin Xavier 	if (rc)
5401ac5a404SSelvin Xavier 		goto fail;
5411ac5a404SSelvin Xavier 
5421ac5a404SSelvin Xavier 	/* CQ Tables */
5430c4dcd60SDevesh Sharma 	hwq_attr.depth = ctx->cq_count;
5440c4dcd60SDevesh Sharma 	hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
5450c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
5461ac5a404SSelvin Xavier 	if (rc)
5471ac5a404SSelvin Xavier 		goto fail;
5481ac5a404SSelvin Xavier 
5491ac5a404SSelvin Xavier 	/* TQM Buffer */
5500c4dcd60SDevesh Sharma 	rc = bnxt_qplib_setup_tqm_rings(res, ctx);
5511ac5a404SSelvin Xavier 	if (rc)
5521ac5a404SSelvin Xavier 		goto fail;
5531ac5a404SSelvin Xavier 	/* TIM Buffer */
5541ac5a404SSelvin Xavier 	ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
5550c4dcd60SDevesh Sharma 	hwq_attr.depth = ctx->qpc_count * 16;
5560c4dcd60SDevesh Sharma 	hwq_attr.stride = 1;
5570c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
5581ac5a404SSelvin Xavier 	if (rc)
5591ac5a404SSelvin Xavier 		goto fail;
5601ac5a404SSelvin Xavier stats_alloc:
5611ac5a404SSelvin Xavier 	/* Stats */
5620c4dcd60SDevesh Sharma 	rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats);
5631ac5a404SSelvin Xavier 	if (rc)
5641ac5a404SSelvin Xavier 		goto fail;
5651ac5a404SSelvin Xavier 
5661ac5a404SSelvin Xavier 	return 0;
5671ac5a404SSelvin Xavier 
5681ac5a404SSelvin Xavier fail:
5690c4dcd60SDevesh Sharma 	bnxt_qplib_free_ctx(res, ctx);
5701ac5a404SSelvin Xavier 	return rc;
5711ac5a404SSelvin Xavier }
5721ac5a404SSelvin Xavier 
5731ac5a404SSelvin Xavier /* GUID */
5741ac5a404SSelvin Xavier void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
5751ac5a404SSelvin Xavier {
5761ac5a404SSelvin Xavier 	u8 mac[ETH_ALEN];
5771ac5a404SSelvin Xavier 
5781ac5a404SSelvin Xavier 	/* MAC-48 to EUI-64 mapping */
5791ac5a404SSelvin Xavier 	memcpy(mac, dev_addr, ETH_ALEN);
5801ac5a404SSelvin Xavier 	guid[0] = mac[0] ^ 2;
5811ac5a404SSelvin Xavier 	guid[1] = mac[1];
5821ac5a404SSelvin Xavier 	guid[2] = mac[2];
5831ac5a404SSelvin Xavier 	guid[3] = 0xff;
5841ac5a404SSelvin Xavier 	guid[4] = 0xfe;
5851ac5a404SSelvin Xavier 	guid[5] = mac[3];
5861ac5a404SSelvin Xavier 	guid[6] = mac[4];
5871ac5a404SSelvin Xavier 	guid[7] = mac[5];
5881ac5a404SSelvin Xavier }
5891ac5a404SSelvin Xavier 
5901ac5a404SSelvin Xavier static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
5911ac5a404SSelvin Xavier 				     struct bnxt_qplib_sgid_tbl *sgid_tbl)
5921ac5a404SSelvin Xavier {
5931ac5a404SSelvin Xavier 	kfree(sgid_tbl->tbl);
5941ac5a404SSelvin Xavier 	kfree(sgid_tbl->hw_id);
5951ac5a404SSelvin Xavier 	kfree(sgid_tbl->ctx);
5965fac5b1bSKalesh AP 	kfree(sgid_tbl->vlan);
5971ac5a404SSelvin Xavier 	sgid_tbl->tbl = NULL;
5981ac5a404SSelvin Xavier 	sgid_tbl->hw_id = NULL;
5991ac5a404SSelvin Xavier 	sgid_tbl->ctx = NULL;
6005fac5b1bSKalesh AP 	sgid_tbl->vlan = NULL;
6011ac5a404SSelvin Xavier 	sgid_tbl->max = 0;
6021ac5a404SSelvin Xavier 	sgid_tbl->active = 0;
6031ac5a404SSelvin Xavier }
6041ac5a404SSelvin Xavier 
6051ac5a404SSelvin Xavier static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
6061ac5a404SSelvin Xavier 				     struct bnxt_qplib_sgid_tbl *sgid_tbl,
6071ac5a404SSelvin Xavier 				     u16 max)
6081ac5a404SSelvin Xavier {
609c56b593dSSelvin Xavier 	sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
6101ac5a404SSelvin Xavier 	if (!sgid_tbl->tbl)
6111ac5a404SSelvin Xavier 		return -ENOMEM;
6121ac5a404SSelvin Xavier 
6131ac5a404SSelvin Xavier 	sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
6141ac5a404SSelvin Xavier 	if (!sgid_tbl->hw_id)
6151ac5a404SSelvin Xavier 		goto out_free1;
6161ac5a404SSelvin Xavier 
6171ac5a404SSelvin Xavier 	sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
6181ac5a404SSelvin Xavier 	if (!sgid_tbl->ctx)
6191ac5a404SSelvin Xavier 		goto out_free2;
6201ac5a404SSelvin Xavier 
6215fac5b1bSKalesh AP 	sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
6225fac5b1bSKalesh AP 	if (!sgid_tbl->vlan)
6235fac5b1bSKalesh AP 		goto out_free3;
6245fac5b1bSKalesh AP 
6251ac5a404SSelvin Xavier 	sgid_tbl->max = max;
6261ac5a404SSelvin Xavier 	return 0;
6275fac5b1bSKalesh AP out_free3:
6285fac5b1bSKalesh AP 	kfree(sgid_tbl->ctx);
6295fac5b1bSKalesh AP 	sgid_tbl->ctx = NULL;
6301ac5a404SSelvin Xavier out_free2:
6311ac5a404SSelvin Xavier 	kfree(sgid_tbl->hw_id);
6321ac5a404SSelvin Xavier 	sgid_tbl->hw_id = NULL;
6331ac5a404SSelvin Xavier out_free1:
6341ac5a404SSelvin Xavier 	kfree(sgid_tbl->tbl);
6351ac5a404SSelvin Xavier 	sgid_tbl->tbl = NULL;
6361ac5a404SSelvin Xavier 	return -ENOMEM;
6371ac5a404SSelvin Xavier };
6381ac5a404SSelvin Xavier 
6391ac5a404SSelvin Xavier static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
6401ac5a404SSelvin Xavier 					struct bnxt_qplib_sgid_tbl *sgid_tbl)
6411ac5a404SSelvin Xavier {
6421ac5a404SSelvin Xavier 	int i;
6431ac5a404SSelvin Xavier 
6441ac5a404SSelvin Xavier 	for (i = 0; i < sgid_tbl->max; i++) {
6451ac5a404SSelvin Xavier 		if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
6461ac5a404SSelvin Xavier 			   sizeof(bnxt_qplib_gid_zero)))
647c56b593dSSelvin Xavier 			bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
648c56b593dSSelvin Xavier 					    sgid_tbl->tbl[i].vlan_id, true);
6491ac5a404SSelvin Xavier 	}
650c56b593dSSelvin Xavier 	memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
6511ac5a404SSelvin Xavier 	memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
6525fac5b1bSKalesh AP 	memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
6531ac5a404SSelvin Xavier 	sgid_tbl->active = 0;
6541ac5a404SSelvin Xavier }
6551ac5a404SSelvin Xavier 
6561ac5a404SSelvin Xavier static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
6571ac5a404SSelvin Xavier 				     struct net_device *netdev)
6581ac5a404SSelvin Xavier {
659c56b593dSSelvin Xavier 	u32 i;
660c56b593dSSelvin Xavier 
661c56b593dSSelvin Xavier 	for (i = 0; i < sgid_tbl->max; i++)
662c56b593dSSelvin Xavier 		sgid_tbl->tbl[i].vlan_id = 0xffff;
663c56b593dSSelvin Xavier 
6641ac5a404SSelvin Xavier 	memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
6651ac5a404SSelvin Xavier }
6661ac5a404SSelvin Xavier 
6671ac5a404SSelvin Xavier static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
6681ac5a404SSelvin Xavier 				     struct bnxt_qplib_pkey_tbl *pkey_tbl)
6691ac5a404SSelvin Xavier {
6701ac5a404SSelvin Xavier 	if (!pkey_tbl->tbl)
67108920b8fSJoe Perches 		dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
6721ac5a404SSelvin Xavier 	else
6731ac5a404SSelvin Xavier 		kfree(pkey_tbl->tbl);
6741ac5a404SSelvin Xavier 
6751ac5a404SSelvin Xavier 	pkey_tbl->tbl = NULL;
6761ac5a404SSelvin Xavier 	pkey_tbl->max = 0;
6771ac5a404SSelvin Xavier 	pkey_tbl->active = 0;
6781ac5a404SSelvin Xavier }
6791ac5a404SSelvin Xavier 
6801ac5a404SSelvin Xavier static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
6811ac5a404SSelvin Xavier 				     struct bnxt_qplib_pkey_tbl *pkey_tbl,
6821ac5a404SSelvin Xavier 				     u16 max)
6831ac5a404SSelvin Xavier {
6841ac5a404SSelvin Xavier 	pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
6851ac5a404SSelvin Xavier 	if (!pkey_tbl->tbl)
6861ac5a404SSelvin Xavier 		return -ENOMEM;
6871ac5a404SSelvin Xavier 
6881ac5a404SSelvin Xavier 	pkey_tbl->max = max;
6891ac5a404SSelvin Xavier 	return 0;
6901ac5a404SSelvin Xavier };
6911ac5a404SSelvin Xavier 
6921ac5a404SSelvin Xavier /* PDs */
6931ac5a404SSelvin Xavier int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
6941ac5a404SSelvin Xavier {
6951ac5a404SSelvin Xavier 	u32 bit_num;
6961ac5a404SSelvin Xavier 
6971ac5a404SSelvin Xavier 	bit_num = find_first_bit(pdt->tbl, pdt->max);
6981ac5a404SSelvin Xavier 	if (bit_num == pdt->max)
6991ac5a404SSelvin Xavier 		return -ENOMEM;
7001ac5a404SSelvin Xavier 
7011ac5a404SSelvin Xavier 	/* Found unused PD */
7021ac5a404SSelvin Xavier 	clear_bit(bit_num, pdt->tbl);
7031ac5a404SSelvin Xavier 	pd->id = bit_num;
7041ac5a404SSelvin Xavier 	return 0;
7051ac5a404SSelvin Xavier }
7061ac5a404SSelvin Xavier 
7071ac5a404SSelvin Xavier int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
7081ac5a404SSelvin Xavier 			  struct bnxt_qplib_pd_tbl *pdt,
7091ac5a404SSelvin Xavier 			  struct bnxt_qplib_pd *pd)
7101ac5a404SSelvin Xavier {
7111ac5a404SSelvin Xavier 	if (test_and_set_bit(pd->id, pdt->tbl)) {
71208920b8fSJoe Perches 		dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
7131ac5a404SSelvin Xavier 			 pd->id);
7141ac5a404SSelvin Xavier 		return -EINVAL;
7151ac5a404SSelvin Xavier 	}
7161ac5a404SSelvin Xavier 	pd->id = 0;
7171ac5a404SSelvin Xavier 	return 0;
7181ac5a404SSelvin Xavier }
7191ac5a404SSelvin Xavier 
7201ac5a404SSelvin Xavier static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
7211ac5a404SSelvin Xavier {
7221ac5a404SSelvin Xavier 	kfree(pdt->tbl);
7231ac5a404SSelvin Xavier 	pdt->tbl = NULL;
7241ac5a404SSelvin Xavier 	pdt->max = 0;
7251ac5a404SSelvin Xavier }
7261ac5a404SSelvin Xavier 
7271ac5a404SSelvin Xavier static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
7281ac5a404SSelvin Xavier 				   struct bnxt_qplib_pd_tbl *pdt,
7291ac5a404SSelvin Xavier 				   u32 max)
7301ac5a404SSelvin Xavier {
7311ac5a404SSelvin Xavier 	u32 bytes;
7321ac5a404SSelvin Xavier 
7331ac5a404SSelvin Xavier 	bytes = max >> 3;
7341ac5a404SSelvin Xavier 	if (!bytes)
7351ac5a404SSelvin Xavier 		bytes = 1;
7361ac5a404SSelvin Xavier 	pdt->tbl = kmalloc(bytes, GFP_KERNEL);
7371ac5a404SSelvin Xavier 	if (!pdt->tbl)
7381ac5a404SSelvin Xavier 		return -ENOMEM;
7391ac5a404SSelvin Xavier 
7401ac5a404SSelvin Xavier 	pdt->max = max;
7411ac5a404SSelvin Xavier 	memset((u8 *)pdt->tbl, 0xFF, bytes);
7421ac5a404SSelvin Xavier 
7431ac5a404SSelvin Xavier 	return 0;
7441ac5a404SSelvin Xavier }
7451ac5a404SSelvin Xavier 
7461ac5a404SSelvin Xavier /* DPIs */
7471ac5a404SSelvin Xavier int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
7481ac5a404SSelvin Xavier 			 struct bnxt_qplib_dpi     *dpi,
7491ac5a404SSelvin Xavier 			 void                      *app)
7501ac5a404SSelvin Xavier {
7511ac5a404SSelvin Xavier 	u32 bit_num;
7521ac5a404SSelvin Xavier 
7531ac5a404SSelvin Xavier 	bit_num = find_first_bit(dpit->tbl, dpit->max);
7541ac5a404SSelvin Xavier 	if (bit_num == dpit->max)
7551ac5a404SSelvin Xavier 		return -ENOMEM;
7561ac5a404SSelvin Xavier 
7571ac5a404SSelvin Xavier 	/* Found unused DPI */
7581ac5a404SSelvin Xavier 	clear_bit(bit_num, dpit->tbl);
7591ac5a404SSelvin Xavier 	dpit->app_tbl[bit_num] = app;
7601ac5a404SSelvin Xavier 
7611ac5a404SSelvin Xavier 	dpi->dpi = bit_num;
7621ac5a404SSelvin Xavier 	dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
7631ac5a404SSelvin Xavier 	dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
7641ac5a404SSelvin Xavier 
7651ac5a404SSelvin Xavier 	return 0;
7661ac5a404SSelvin Xavier }
7671ac5a404SSelvin Xavier 
7681ac5a404SSelvin Xavier int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
7691ac5a404SSelvin Xavier 			   struct bnxt_qplib_dpi_tbl *dpit,
7701ac5a404SSelvin Xavier 			   struct bnxt_qplib_dpi     *dpi)
7711ac5a404SSelvin Xavier {
7721ac5a404SSelvin Xavier 	if (dpi->dpi >= dpit->max) {
77308920b8fSJoe Perches 		dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
7741ac5a404SSelvin Xavier 		return -EINVAL;
7751ac5a404SSelvin Xavier 	}
7761ac5a404SSelvin Xavier 	if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
77708920b8fSJoe Perches 		dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
7781ac5a404SSelvin Xavier 			 dpi->dpi);
7791ac5a404SSelvin Xavier 		return -EINVAL;
7801ac5a404SSelvin Xavier 	}
7811ac5a404SSelvin Xavier 	if (dpit->app_tbl)
7821ac5a404SSelvin Xavier 		dpit->app_tbl[dpi->dpi] = NULL;
7831ac5a404SSelvin Xavier 	memset(dpi, 0, sizeof(*dpi));
7841ac5a404SSelvin Xavier 
7851ac5a404SSelvin Xavier 	return 0;
7861ac5a404SSelvin Xavier }
7871ac5a404SSelvin Xavier 
7881ac5a404SSelvin Xavier static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res     *res,
7891ac5a404SSelvin Xavier 				    struct bnxt_qplib_dpi_tbl *dpit)
7901ac5a404SSelvin Xavier {
7911ac5a404SSelvin Xavier 	kfree(dpit->tbl);
7921ac5a404SSelvin Xavier 	kfree(dpit->app_tbl);
7931ac5a404SSelvin Xavier 	if (dpit->dbr_bar_reg_iomem)
7941ac5a404SSelvin Xavier 		pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
7951ac5a404SSelvin Xavier 	memset(dpit, 0, sizeof(*dpit));
7961ac5a404SSelvin Xavier }
7971ac5a404SSelvin Xavier 
7981ac5a404SSelvin Xavier static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
7991ac5a404SSelvin Xavier 				    struct bnxt_qplib_dpi_tbl *dpit,
8001ac5a404SSelvin Xavier 				    u32                       dbr_offset)
8011ac5a404SSelvin Xavier {
8021ac5a404SSelvin Xavier 	u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
8031ac5a404SSelvin Xavier 	resource_size_t bar_reg_base;
8041ac5a404SSelvin Xavier 	u32 dbr_len, bytes;
8051ac5a404SSelvin Xavier 
8061ac5a404SSelvin Xavier 	if (dpit->dbr_bar_reg_iomem) {
80708920b8fSJoe Perches 		dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
80808920b8fSJoe Perches 			dbr_bar_reg);
8091ac5a404SSelvin Xavier 		return -EALREADY;
8101ac5a404SSelvin Xavier 	}
8111ac5a404SSelvin Xavier 
8121ac5a404SSelvin Xavier 	bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
8131ac5a404SSelvin Xavier 	if (!bar_reg_base) {
81408920b8fSJoe Perches 		dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
81508920b8fSJoe Perches 			dbr_bar_reg);
8161ac5a404SSelvin Xavier 		return -ENOMEM;
8171ac5a404SSelvin Xavier 	}
8181ac5a404SSelvin Xavier 
8191ac5a404SSelvin Xavier 	dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
8201ac5a404SSelvin Xavier 	if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
82108920b8fSJoe Perches 		dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
8221ac5a404SSelvin Xavier 		return -ENOMEM;
8231ac5a404SSelvin Xavier 	}
8241ac5a404SSelvin Xavier 
8254bdc0d67SChristoph Hellwig 	dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
8261ac5a404SSelvin Xavier 						  dbr_len);
8271ac5a404SSelvin Xavier 	if (!dpit->dbr_bar_reg_iomem) {
8281ac5a404SSelvin Xavier 		dev_err(&res->pdev->dev,
82908920b8fSJoe Perches 			"FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
8301ac5a404SSelvin Xavier 		return -ENOMEM;
8311ac5a404SSelvin Xavier 	}
8321ac5a404SSelvin Xavier 
8331ac5a404SSelvin Xavier 	dpit->unmapped_dbr = bar_reg_base + dbr_offset;
8341ac5a404SSelvin Xavier 	dpit->max = dbr_len / PAGE_SIZE;
8351ac5a404SSelvin Xavier 
8361ac5a404SSelvin Xavier 	dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
837e5b89843SMarkus Elfring 	if (!dpit->app_tbl)
838e5b89843SMarkus Elfring 		goto unmap_io;
8391ac5a404SSelvin Xavier 
8401ac5a404SSelvin Xavier 	bytes = dpit->max >> 3;
8411ac5a404SSelvin Xavier 	if (!bytes)
8421ac5a404SSelvin Xavier 		bytes = 1;
8431ac5a404SSelvin Xavier 
8441ac5a404SSelvin Xavier 	dpit->tbl = kmalloc(bytes, GFP_KERNEL);
8451ac5a404SSelvin Xavier 	if (!dpit->tbl) {
8461ac5a404SSelvin Xavier 		kfree(dpit->app_tbl);
8471ac5a404SSelvin Xavier 		dpit->app_tbl = NULL;
848e5b89843SMarkus Elfring 		goto unmap_io;
8491ac5a404SSelvin Xavier 	}
8501ac5a404SSelvin Xavier 
8511ac5a404SSelvin Xavier 	memset((u8 *)dpit->tbl, 0xFF, bytes);
8521ac5a404SSelvin Xavier 
8531ac5a404SSelvin Xavier 	return 0;
854e5b89843SMarkus Elfring 
855e5b89843SMarkus Elfring unmap_io:
856e5b89843SMarkus Elfring 	pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
857e5b89843SMarkus Elfring 	return -ENOMEM;
8581ac5a404SSelvin Xavier }
8591ac5a404SSelvin Xavier 
8601ac5a404SSelvin Xavier /* PKEYs */
8611ac5a404SSelvin Xavier static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
8621ac5a404SSelvin Xavier {
8631ac5a404SSelvin Xavier 	memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
8641ac5a404SSelvin Xavier 	pkey_tbl->active = 0;
8651ac5a404SSelvin Xavier }
8661ac5a404SSelvin Xavier 
8671ac5a404SSelvin Xavier static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
8681ac5a404SSelvin Xavier 				     struct bnxt_qplib_pkey_tbl *pkey_tbl)
8691ac5a404SSelvin Xavier {
8701ac5a404SSelvin Xavier 	u16 pkey = 0xFFFF;
8711ac5a404SSelvin Xavier 
8721ac5a404SSelvin Xavier 	memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
8731ac5a404SSelvin Xavier 
8741ac5a404SSelvin Xavier 	/* pkey default = 0xFFFF */
8751ac5a404SSelvin Xavier 	bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
8761ac5a404SSelvin Xavier }
8771ac5a404SSelvin Xavier 
8781ac5a404SSelvin Xavier /* Stats */
8791ac5a404SSelvin Xavier static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
8801ac5a404SSelvin Xavier 				      struct bnxt_qplib_stats *stats)
8811ac5a404SSelvin Xavier {
8821ac5a404SSelvin Xavier 	if (stats->dma) {
8831ac5a404SSelvin Xavier 		dma_free_coherent(&pdev->dev, stats->size,
8841ac5a404SSelvin Xavier 				  stats->dma, stats->dma_map);
8851ac5a404SSelvin Xavier 	}
8861ac5a404SSelvin Xavier 	memset(stats, 0, sizeof(*stats));
8871ac5a404SSelvin Xavier 	stats->fw_id = -1;
8881ac5a404SSelvin Xavier }
8891ac5a404SSelvin Xavier 
8901ac5a404SSelvin Xavier static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
8911ac5a404SSelvin Xavier 				      struct bnxt_qplib_stats *stats)
8921ac5a404SSelvin Xavier {
8931ac5a404SSelvin Xavier 	memset(stats, 0, sizeof(*stats));
8941ac5a404SSelvin Xavier 	stats->fw_id = -1;
895e0387e1dSDevesh Sharma 	/* 128 byte aligned context memory is required only for 57500.
896e0387e1dSDevesh Sharma 	 * However making this unconditional, it does not harm previous
897e0387e1dSDevesh Sharma 	 * generation.
898e0387e1dSDevesh Sharma 	 */
899e0387e1dSDevesh Sharma 	stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
9001ac5a404SSelvin Xavier 	stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
9011ac5a404SSelvin Xavier 					&stats->dma_map, GFP_KERNEL);
9021ac5a404SSelvin Xavier 	if (!stats->dma) {
90308920b8fSJoe Perches 		dev_err(&pdev->dev, "Stats DMA allocation failed\n");
9041ac5a404SSelvin Xavier 		return -ENOMEM;
9051ac5a404SSelvin Xavier 	}
9061ac5a404SSelvin Xavier 	return 0;
9071ac5a404SSelvin Xavier }
9081ac5a404SSelvin Xavier 
9091ac5a404SSelvin Xavier void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
9101ac5a404SSelvin Xavier {
9111ac5a404SSelvin Xavier 	bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
9121ac5a404SSelvin Xavier 	bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
9131ac5a404SSelvin Xavier }
9141ac5a404SSelvin Xavier 
9151ac5a404SSelvin Xavier int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
9161ac5a404SSelvin Xavier {
9171ac5a404SSelvin Xavier 	bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
9181ac5a404SSelvin Xavier 	bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
9191ac5a404SSelvin Xavier 
9201ac5a404SSelvin Xavier 	return 0;
9211ac5a404SSelvin Xavier }
9221ac5a404SSelvin Xavier 
9231ac5a404SSelvin Xavier void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
9241ac5a404SSelvin Xavier {
9251ac5a404SSelvin Xavier 	bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
9261ac5a404SSelvin Xavier 	bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
9271ac5a404SSelvin Xavier 	bnxt_qplib_free_pd_tbl(&res->pd_tbl);
9281ac5a404SSelvin Xavier 	bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
9291ac5a404SSelvin Xavier }
9301ac5a404SSelvin Xavier 
9311ac5a404SSelvin Xavier int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
9321ac5a404SSelvin Xavier 			 struct net_device *netdev,
9331ac5a404SSelvin Xavier 			 struct bnxt_qplib_dev_attr *dev_attr)
9341ac5a404SSelvin Xavier {
9351ac5a404SSelvin Xavier 	int rc = 0;
9361ac5a404SSelvin Xavier 
9371ac5a404SSelvin Xavier 	res->pdev = pdev;
9381ac5a404SSelvin Xavier 	res->netdev = netdev;
9391ac5a404SSelvin Xavier 
9401ac5a404SSelvin Xavier 	rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
9411ac5a404SSelvin Xavier 	if (rc)
9421ac5a404SSelvin Xavier 		goto fail;
9431ac5a404SSelvin Xavier 
9441ac5a404SSelvin Xavier 	rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
9451ac5a404SSelvin Xavier 	if (rc)
9461ac5a404SSelvin Xavier 		goto fail;
9471ac5a404SSelvin Xavier 
9481ac5a404SSelvin Xavier 	rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
9491ac5a404SSelvin Xavier 	if (rc)
9501ac5a404SSelvin Xavier 		goto fail;
9511ac5a404SSelvin Xavier 
9521ac5a404SSelvin Xavier 	rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
9531ac5a404SSelvin Xavier 	if (rc)
9541ac5a404SSelvin Xavier 		goto fail;
9551ac5a404SSelvin Xavier 
9561ac5a404SSelvin Xavier 	return 0;
9571ac5a404SSelvin Xavier fail:
9581ac5a404SSelvin Xavier 	bnxt_qplib_free_res(res);
9591ac5a404SSelvin Xavier 	return rc;
9601ac5a404SSelvin Xavier }
961