11ac5a404SSelvin Xavier /*
21ac5a404SSelvin Xavier * Broadcom NetXtreme-E RoCE driver.
31ac5a404SSelvin Xavier *
41ac5a404SSelvin Xavier * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
51ac5a404SSelvin Xavier * Broadcom refers to Broadcom Limited and/or its subsidiaries.
61ac5a404SSelvin Xavier *
71ac5a404SSelvin Xavier * This software is available to you under a choice of one of two
81ac5a404SSelvin Xavier * licenses. You may choose to be licensed under the terms of the GNU
91ac5a404SSelvin Xavier * General Public License (GPL) Version 2, available from the file
101ac5a404SSelvin Xavier * COPYING in the main directory of this source tree, or the
111ac5a404SSelvin Xavier * BSD license below:
121ac5a404SSelvin Xavier *
131ac5a404SSelvin Xavier * Redistribution and use in source and binary forms, with or without
141ac5a404SSelvin Xavier * modification, are permitted provided that the following conditions
151ac5a404SSelvin Xavier * are met:
161ac5a404SSelvin Xavier *
171ac5a404SSelvin Xavier * 1. Redistributions of source code must retain the above copyright
181ac5a404SSelvin Xavier * notice, this list of conditions and the following disclaimer.
191ac5a404SSelvin Xavier * 2. Redistributions in binary form must reproduce the above copyright
201ac5a404SSelvin Xavier * notice, this list of conditions and the following disclaimer in
211ac5a404SSelvin Xavier * the documentation and/or other materials provided with the
221ac5a404SSelvin Xavier * distribution.
231ac5a404SSelvin Xavier *
241ac5a404SSelvin Xavier * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
251ac5a404SSelvin Xavier * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
261ac5a404SSelvin Xavier * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
271ac5a404SSelvin Xavier * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
281ac5a404SSelvin Xavier * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
291ac5a404SSelvin Xavier * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
301ac5a404SSelvin Xavier * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
311ac5a404SSelvin Xavier * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
321ac5a404SSelvin Xavier * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
331ac5a404SSelvin Xavier * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
341ac5a404SSelvin Xavier * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
351ac5a404SSelvin Xavier *
361ac5a404SSelvin Xavier * Description: QPLib resource manager
371ac5a404SSelvin Xavier */
381ac5a404SSelvin Xavier
3908920b8fSJoe Perches #define dev_fmt(fmt) "QPLIB: " fmt
4008920b8fSJoe Perches
411ac5a404SSelvin Xavier #include <linux/spinlock.h>
421ac5a404SSelvin Xavier #include <linux/pci.h>
431ac5a404SSelvin Xavier #include <linux/interrupt.h>
441ac5a404SSelvin Xavier #include <linux/inetdevice.h>
451ac5a404SSelvin Xavier #include <linux/dma-mapping.h>
461ac5a404SSelvin Xavier #include <linux/if_vlan.h>
4765a16620SJason Gunthorpe #include <linux/vmalloc.h>
486ef999f5SJason Gunthorpe #include <rdma/ib_verbs.h>
496ef999f5SJason Gunthorpe #include <rdma/ib_umem.h>
506ef999f5SJason Gunthorpe
511ac5a404SSelvin Xavier #include "roce_hsi.h"
521ac5a404SSelvin Xavier #include "qplib_res.h"
531ac5a404SSelvin Xavier #include "qplib_sp.h"
541ac5a404SSelvin Xavier #include "qplib_rcfw.h"
551ac5a404SSelvin Xavier
561ac5a404SSelvin Xavier static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
571ac5a404SSelvin Xavier struct bnxt_qplib_stats *stats);
581ac5a404SSelvin Xavier static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
590c23af52SNaresh Kumar PBS struct bnxt_qplib_chip_ctx *cctx,
601ac5a404SSelvin Xavier struct bnxt_qplib_stats *stats);
611ac5a404SSelvin Xavier
621ac5a404SSelvin Xavier /* PBL */
__free_pbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pbl * pbl,bool is_umem)630c4dcd60SDevesh Sharma static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
641ac5a404SSelvin Xavier bool is_umem)
651ac5a404SSelvin Xavier {
660c4dcd60SDevesh Sharma struct pci_dev *pdev = res->pdev;
671ac5a404SSelvin Xavier int i;
681ac5a404SSelvin Xavier
691ac5a404SSelvin Xavier if (!is_umem) {
701ac5a404SSelvin Xavier for (i = 0; i < pbl->pg_count; i++) {
711ac5a404SSelvin Xavier if (pbl->pg_arr[i])
721ac5a404SSelvin Xavier dma_free_coherent(&pdev->dev, pbl->pg_size,
731ac5a404SSelvin Xavier (void *)((unsigned long)
741ac5a404SSelvin Xavier pbl->pg_arr[i] &
751ac5a404SSelvin Xavier PAGE_MASK),
761ac5a404SSelvin Xavier pbl->pg_map_arr[i]);
771ac5a404SSelvin Xavier else
781ac5a404SSelvin Xavier dev_warn(&pdev->dev,
7908920b8fSJoe Perches "PBL free pg_arr[%d] empty?!\n", i);
801ac5a404SSelvin Xavier pbl->pg_arr[i] = NULL;
811ac5a404SSelvin Xavier }
821ac5a404SSelvin Xavier }
830c4dcd60SDevesh Sharma vfree(pbl->pg_arr);
841ac5a404SSelvin Xavier pbl->pg_arr = NULL;
850c4dcd60SDevesh Sharma vfree(pbl->pg_map_arr);
861ac5a404SSelvin Xavier pbl->pg_map_arr = NULL;
871ac5a404SSelvin Xavier pbl->pg_count = 0;
881ac5a404SSelvin Xavier pbl->pg_size = 0;
891ac5a404SSelvin Xavier }
901ac5a404SSelvin Xavier
bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl * pbl,struct bnxt_qplib_sg_info * sginfo)910c4dcd60SDevesh Sharma static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
920c4dcd60SDevesh Sharma struct bnxt_qplib_sg_info *sginfo)
931ac5a404SSelvin Xavier {
946ef999f5SJason Gunthorpe struct ib_block_iter biter;
950c4dcd60SDevesh Sharma int i = 0;
960c4dcd60SDevesh Sharma
976ef999f5SJason Gunthorpe rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
986ef999f5SJason Gunthorpe pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
990c4dcd60SDevesh Sharma pbl->pg_arr[i] = NULL;
1000c4dcd60SDevesh Sharma pbl->pg_count++;
1010c4dcd60SDevesh Sharma i++;
1020c4dcd60SDevesh Sharma }
1030c4dcd60SDevesh Sharma }
1040c4dcd60SDevesh Sharma
__alloc_pbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pbl * pbl,struct bnxt_qplib_sg_info * sginfo)1050c4dcd60SDevesh Sharma static int __alloc_pbl(struct bnxt_qplib_res *res,
1060c4dcd60SDevesh Sharma struct bnxt_qplib_pbl *pbl,
1070c4dcd60SDevesh Sharma struct bnxt_qplib_sg_info *sginfo)
1080c4dcd60SDevesh Sharma {
1090c4dcd60SDevesh Sharma struct pci_dev *pdev = res->pdev;
1101ac5a404SSelvin Xavier bool is_umem = false;
1116be2067dSYueHaibing u32 pages;
1121ac5a404SSelvin Xavier int i;
1131ac5a404SSelvin Xavier
1140c4dcd60SDevesh Sharma if (sginfo->nopte)
1150c4dcd60SDevesh Sharma return 0;
1166ef999f5SJason Gunthorpe if (sginfo->umem)
1176ef999f5SJason Gunthorpe pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
1186ef999f5SJason Gunthorpe else
1190c4dcd60SDevesh Sharma pages = sginfo->npages;
1201ac5a404SSelvin Xavier /* page ptr arrays */
121666f526bSJulia Lawall pbl->pg_arr = vmalloc_array(pages, sizeof(void *));
1221ac5a404SSelvin Xavier if (!pbl->pg_arr)
1231ac5a404SSelvin Xavier return -ENOMEM;
1241ac5a404SSelvin Xavier
125666f526bSJulia Lawall pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t));
1261ac5a404SSelvin Xavier if (!pbl->pg_map_arr) {
1270c4dcd60SDevesh Sharma vfree(pbl->pg_arr);
1281ac5a404SSelvin Xavier pbl->pg_arr = NULL;
1291ac5a404SSelvin Xavier return -ENOMEM;
1301ac5a404SSelvin Xavier }
1311ac5a404SSelvin Xavier pbl->pg_count = 0;
1320c4dcd60SDevesh Sharma pbl->pg_size = sginfo->pgsize;
1331ac5a404SSelvin Xavier
1346ef999f5SJason Gunthorpe if (!sginfo->umem) {
1351ac5a404SSelvin Xavier for (i = 0; i < pages; i++) {
136750afb08SLuis Chamberlain pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
1371ac5a404SSelvin Xavier pbl->pg_size,
1381ac5a404SSelvin Xavier &pbl->pg_map_arr[i],
1391ac5a404SSelvin Xavier GFP_KERNEL);
1401ac5a404SSelvin Xavier if (!pbl->pg_arr[i])
1411ac5a404SSelvin Xavier goto fail;
1421ac5a404SSelvin Xavier pbl->pg_count++;
1431ac5a404SSelvin Xavier }
1441ac5a404SSelvin Xavier } else {
1451ac5a404SSelvin Xavier is_umem = true;
1460c4dcd60SDevesh Sharma bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
1471ac5a404SSelvin Xavier }
1481ac5a404SSelvin Xavier
1491ac5a404SSelvin Xavier return 0;
1501ac5a404SSelvin Xavier fail:
1510c4dcd60SDevesh Sharma __free_pbl(res, pbl, is_umem);
1521ac5a404SSelvin Xavier return -ENOMEM;
1531ac5a404SSelvin Xavier }
1541ac5a404SSelvin Xavier
1551ac5a404SSelvin Xavier /* HWQ */
bnxt_qplib_free_hwq(struct bnxt_qplib_res * res,struct bnxt_qplib_hwq * hwq)1560c4dcd60SDevesh Sharma void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
1570c4dcd60SDevesh Sharma struct bnxt_qplib_hwq *hwq)
1581ac5a404SSelvin Xavier {
1591ac5a404SSelvin Xavier int i;
1601ac5a404SSelvin Xavier
1611ac5a404SSelvin Xavier if (!hwq->max_elements)
1621ac5a404SSelvin Xavier return;
1631ac5a404SSelvin Xavier if (hwq->level >= PBL_LVL_MAX)
1641ac5a404SSelvin Xavier return;
1651ac5a404SSelvin Xavier
1661ac5a404SSelvin Xavier for (i = 0; i < hwq->level + 1; i++) {
1671ac5a404SSelvin Xavier if (i == hwq->level)
1680c4dcd60SDevesh Sharma __free_pbl(res, &hwq->pbl[i], hwq->is_user);
1691ac5a404SSelvin Xavier else
1700c4dcd60SDevesh Sharma __free_pbl(res, &hwq->pbl[i], false);
1711ac5a404SSelvin Xavier }
1721ac5a404SSelvin Xavier
1731ac5a404SSelvin Xavier hwq->level = PBL_LVL_MAX;
1741ac5a404SSelvin Xavier hwq->max_elements = 0;
1751ac5a404SSelvin Xavier hwq->element_size = 0;
1761ac5a404SSelvin Xavier hwq->prod = 0;
1771ac5a404SSelvin Xavier hwq->cons = 0;
1781ac5a404SSelvin Xavier hwq->cp_bit = 0;
1791ac5a404SSelvin Xavier }
1801ac5a404SSelvin Xavier
1811ac5a404SSelvin Xavier /* All HWQs are power of 2 in size */
1820c4dcd60SDevesh Sharma
bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_hwq_attr * hwq_attr)1830c4dcd60SDevesh Sharma int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
1840c4dcd60SDevesh Sharma struct bnxt_qplib_hwq_attr *hwq_attr)
1851ac5a404SSelvin Xavier {
1860c4dcd60SDevesh Sharma u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
1870c4dcd60SDevesh Sharma struct bnxt_qplib_sg_info sginfo = {};
1880c4dcd60SDevesh Sharma u32 depth, stride, npbl, npde;
1891ac5a404SSelvin Xavier dma_addr_t *src_phys_ptr, **dst_virt_ptr;
1900c4dcd60SDevesh Sharma struct bnxt_qplib_res *res;
1910c4dcd60SDevesh Sharma struct pci_dev *pdev;
1920c4dcd60SDevesh Sharma int i, rc, lvl;
1931ac5a404SSelvin Xavier
1940c4dcd60SDevesh Sharma res = hwq_attr->res;
1950c4dcd60SDevesh Sharma pdev = res->pdev;
1960c4dcd60SDevesh Sharma pg_size = hwq_attr->sginfo->pgsize;
1971ac5a404SSelvin Xavier hwq->level = PBL_LVL_MAX;
1981ac5a404SSelvin Xavier
1990c4dcd60SDevesh Sharma depth = roundup_pow_of_two(hwq_attr->depth);
2000c4dcd60SDevesh Sharma stride = roundup_pow_of_two(hwq_attr->stride);
2010c4dcd60SDevesh Sharma if (hwq_attr->aux_depth) {
2020c4dcd60SDevesh Sharma aux_slots = hwq_attr->aux_depth;
2030c4dcd60SDevesh Sharma aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
2040c4dcd60SDevesh Sharma aux_pages = (aux_slots * aux_size) / pg_size;
2050c4dcd60SDevesh Sharma if ((aux_slots * aux_size) % pg_size)
2061ac5a404SSelvin Xavier aux_pages++;
2071ac5a404SSelvin Xavier }
2085aa84840SSelvin Xavier
2096ef999f5SJason Gunthorpe if (!hwq_attr->sginfo->umem) {
2101ac5a404SSelvin Xavier hwq->is_user = false;
2110c4dcd60SDevesh Sharma npages = (depth * stride) / pg_size + aux_pages;
2120c4dcd60SDevesh Sharma if ((depth * stride) % pg_size)
2130c4dcd60SDevesh Sharma npages++;
2140c4dcd60SDevesh Sharma if (!npages)
2151ac5a404SSelvin Xavier return -EINVAL;
2160c4dcd60SDevesh Sharma hwq_attr->sginfo->npages = npages;
2171ac5a404SSelvin Xavier } else {
21808c7f093SSelvin Xavier npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem,
21908c7f093SSelvin Xavier hwq_attr->sginfo->pgsize);
2201ac5a404SSelvin Xavier hwq->is_user = true;
2211ac5a404SSelvin Xavier }
2221ac5a404SSelvin Xavier
2232b4ccce6SSelvin Xavier if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
2240c4dcd60SDevesh Sharma /* This request is Level 0, map PTE */
2250c4dcd60SDevesh Sharma rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
2261ac5a404SSelvin Xavier if (rc)
2271ac5a404SSelvin Xavier goto fail;
2281ac5a404SSelvin Xavier hwq->level = PBL_LVL_0;
2292b4ccce6SSelvin Xavier goto done;
2300c4dcd60SDevesh Sharma }
2311ac5a404SSelvin Xavier
2322b4ccce6SSelvin Xavier if (npages >= MAX_PBL_LVL_0_PGS) {
2330c4dcd60SDevesh Sharma if (npages > MAX_PBL_LVL_1_PGS) {
2340c4dcd60SDevesh Sharma u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
2350c4dcd60SDevesh Sharma 0 : PTU_PTE_VALID;
2361ac5a404SSelvin Xavier /* 2 levels of indirection */
2370c4dcd60SDevesh Sharma npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
2380c4dcd60SDevesh Sharma if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
2390c4dcd60SDevesh Sharma npbl++;
2400c4dcd60SDevesh Sharma npde = npbl >> MAX_PDL_LVL_SHIFT;
2410c4dcd60SDevesh Sharma if (npbl % BIT(MAX_PDL_LVL_SHIFT))
2420c4dcd60SDevesh Sharma npde++;
2430c4dcd60SDevesh Sharma /* Alloc PDE pages */
2440c4dcd60SDevesh Sharma sginfo.pgsize = npde * pg_size;
2450c4dcd60SDevesh Sharma sginfo.npages = 1;
2460c4dcd60SDevesh Sharma rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
2470c4dcd60SDevesh Sharma
2480c4dcd60SDevesh Sharma /* Alloc PBL pages */
2490c4dcd60SDevesh Sharma sginfo.npages = npbl;
2500c4dcd60SDevesh Sharma sginfo.pgsize = PAGE_SIZE;
2510c4dcd60SDevesh Sharma rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
2521ac5a404SSelvin Xavier if (rc)
2531ac5a404SSelvin Xavier goto fail;
2540c4dcd60SDevesh Sharma /* Fill PDL with PBL page pointers */
2551ac5a404SSelvin Xavier dst_virt_ptr =
2561ac5a404SSelvin Xavier (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
2571ac5a404SSelvin Xavier src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
2580c4dcd60SDevesh Sharma if (hwq_attr->type == HWQ_TYPE_MR) {
2590c4dcd60SDevesh Sharma /* For MR it is expected that we supply only 1 contigous
2600c4dcd60SDevesh Sharma * page i.e only 1 entry in the PDL that will contain
2610c4dcd60SDevesh Sharma * all the PBLs for the user supplied memory region
2620c4dcd60SDevesh Sharma */
2630c4dcd60SDevesh Sharma for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
2640c4dcd60SDevesh Sharma i++)
2650c4dcd60SDevesh Sharma dst_virt_ptr[0][i] = src_phys_ptr[i] |
2660c4dcd60SDevesh Sharma flag;
2670c4dcd60SDevesh Sharma } else {
2680c4dcd60SDevesh Sharma for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
2690c4dcd60SDevesh Sharma i++)
2701ac5a404SSelvin Xavier dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
2710c4dcd60SDevesh Sharma src_phys_ptr[i] |
2720c4dcd60SDevesh Sharma PTU_PDE_VALID;
2730c4dcd60SDevesh Sharma }
2740c4dcd60SDevesh Sharma /* Alloc or init PTEs */
2750c4dcd60SDevesh Sharma rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
2760c4dcd60SDevesh Sharma hwq_attr->sginfo);
2771ac5a404SSelvin Xavier if (rc)
2781ac5a404SSelvin Xavier goto fail;
2790c4dcd60SDevesh Sharma hwq->level = PBL_LVL_2;
2800c4dcd60SDevesh Sharma if (hwq_attr->sginfo->nopte)
2810c4dcd60SDevesh Sharma goto done;
2820c4dcd60SDevesh Sharma /* Fill PBLs with PTE pointers */
2831ac5a404SSelvin Xavier dst_virt_ptr =
2841ac5a404SSelvin Xavier (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
2851ac5a404SSelvin Xavier src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
2861ac5a404SSelvin Xavier for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
2871ac5a404SSelvin Xavier dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
2881ac5a404SSelvin Xavier src_phys_ptr[i] | PTU_PTE_VALID;
2891ac5a404SSelvin Xavier }
2900c4dcd60SDevesh Sharma if (hwq_attr->type == HWQ_TYPE_QUEUE) {
2911ac5a404SSelvin Xavier /* Find the last pg of the size */
2921ac5a404SSelvin Xavier i = hwq->pbl[PBL_LVL_2].pg_count;
2931ac5a404SSelvin Xavier dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
2941ac5a404SSelvin Xavier PTU_PTE_LAST;
2951ac5a404SSelvin Xavier if (i > 1)
2961ac5a404SSelvin Xavier dst_virt_ptr[PTR_PG(i - 2)]
2971ac5a404SSelvin Xavier [PTR_IDX(i - 2)] |=
2981ac5a404SSelvin Xavier PTU_PTE_NEXT_TO_LAST;
2991ac5a404SSelvin Xavier }
3000c4dcd60SDevesh Sharma } else { /* pages < 512 npbl = 1, npde = 0 */
3010c4dcd60SDevesh Sharma u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
3020c4dcd60SDevesh Sharma 0 : PTU_PTE_VALID;
3031ac5a404SSelvin Xavier
3041ac5a404SSelvin Xavier /* 1 level of indirection */
3050c4dcd60SDevesh Sharma npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
3060c4dcd60SDevesh Sharma if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
3070c4dcd60SDevesh Sharma npbl++;
3080c4dcd60SDevesh Sharma sginfo.npages = npbl;
3090c4dcd60SDevesh Sharma sginfo.pgsize = PAGE_SIZE;
3100c4dcd60SDevesh Sharma /* Alloc PBL page */
3110c4dcd60SDevesh Sharma rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
3121ac5a404SSelvin Xavier if (rc)
3131ac5a404SSelvin Xavier goto fail;
3140c4dcd60SDevesh Sharma /* Alloc or init PTEs */
3150c4dcd60SDevesh Sharma rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
3160c4dcd60SDevesh Sharma hwq_attr->sginfo);
3170c4dcd60SDevesh Sharma if (rc)
3180c4dcd60SDevesh Sharma goto fail;
3190c4dcd60SDevesh Sharma hwq->level = PBL_LVL_1;
3200c4dcd60SDevesh Sharma if (hwq_attr->sginfo->nopte)
3210c4dcd60SDevesh Sharma goto done;
3220c4dcd60SDevesh Sharma /* Fill PBL with PTE pointers */
3231ac5a404SSelvin Xavier dst_virt_ptr =
3241ac5a404SSelvin Xavier (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
3251ac5a404SSelvin Xavier src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
3260c4dcd60SDevesh Sharma for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
3271ac5a404SSelvin Xavier dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
3281ac5a404SSelvin Xavier src_phys_ptr[i] | flag;
3290c4dcd60SDevesh Sharma if (hwq_attr->type == HWQ_TYPE_QUEUE) {
3301ac5a404SSelvin Xavier /* Find the last pg of the size */
3311ac5a404SSelvin Xavier i = hwq->pbl[PBL_LVL_1].pg_count;
3321ac5a404SSelvin Xavier dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
3331ac5a404SSelvin Xavier PTU_PTE_LAST;
3341ac5a404SSelvin Xavier if (i > 1)
3351ac5a404SSelvin Xavier dst_virt_ptr[PTR_PG(i - 2)]
3361ac5a404SSelvin Xavier [PTR_IDX(i - 2)] |=
3371ac5a404SSelvin Xavier PTU_PTE_NEXT_TO_LAST;
3381ac5a404SSelvin Xavier }
3391ac5a404SSelvin Xavier }
3401ac5a404SSelvin Xavier }
3410c4dcd60SDevesh Sharma done:
3421ac5a404SSelvin Xavier hwq->prod = 0;
3431ac5a404SSelvin Xavier hwq->cons = 0;
3440c4dcd60SDevesh Sharma hwq->pdev = pdev;
3450c4dcd60SDevesh Sharma hwq->depth = hwq_attr->depth;
346f52e649eSChandramohan Akula hwq->max_elements = hwq->depth;
3470c4dcd60SDevesh Sharma hwq->element_size = stride;
348fddcbbb0SDevesh Sharma hwq->qe_ppg = pg_size / stride;
3491ac5a404SSelvin Xavier /* For direct access to the elements */
3500c4dcd60SDevesh Sharma lvl = hwq->level;
3510c4dcd60SDevesh Sharma if (hwq_attr->sginfo->nopte && hwq->level)
3520c4dcd60SDevesh Sharma lvl = hwq->level - 1;
3530c4dcd60SDevesh Sharma hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
3540c4dcd60SDevesh Sharma hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
3550c4dcd60SDevesh Sharma spin_lock_init(&hwq->lock);
3561ac5a404SSelvin Xavier
3571ac5a404SSelvin Xavier return 0;
3581ac5a404SSelvin Xavier fail:
3590c4dcd60SDevesh Sharma bnxt_qplib_free_hwq(res, hwq);
3601ac5a404SSelvin Xavier return -ENOMEM;
3611ac5a404SSelvin Xavier }
3621ac5a404SSelvin Xavier
3631ac5a404SSelvin Xavier /* Context Tables */
bnxt_qplib_free_ctx(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)3640c4dcd60SDevesh Sharma void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
3651ac5a404SSelvin Xavier struct bnxt_qplib_ctx *ctx)
3661ac5a404SSelvin Xavier {
3671ac5a404SSelvin Xavier int i;
3681ac5a404SSelvin Xavier
3690c4dcd60SDevesh Sharma bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
3700c4dcd60SDevesh Sharma bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
3710c4dcd60SDevesh Sharma bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
3720c4dcd60SDevesh Sharma bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
3730c4dcd60SDevesh Sharma bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
3741ac5a404SSelvin Xavier for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
3750c4dcd60SDevesh Sharma bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
3760c4dcd60SDevesh Sharma /* restore original pde level before destroy */
3770c4dcd60SDevesh Sharma ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
3780c4dcd60SDevesh Sharma bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
3790c4dcd60SDevesh Sharma bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
3800c4dcd60SDevesh Sharma }
3810c4dcd60SDevesh Sharma
bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)3820c4dcd60SDevesh Sharma static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
3830c4dcd60SDevesh Sharma struct bnxt_qplib_ctx *ctx)
3840c4dcd60SDevesh Sharma {
3850c4dcd60SDevesh Sharma struct bnxt_qplib_hwq_attr hwq_attr = {};
3860c4dcd60SDevesh Sharma struct bnxt_qplib_sg_info sginfo = {};
3870c4dcd60SDevesh Sharma struct bnxt_qplib_tqm_ctx *tqmctx;
38814611b9bSKalesh AP int rc;
3890c4dcd60SDevesh Sharma int i;
3900c4dcd60SDevesh Sharma
3910c4dcd60SDevesh Sharma tqmctx = &ctx->tqm_ctx;
3920c4dcd60SDevesh Sharma
3930c4dcd60SDevesh Sharma sginfo.pgsize = PAGE_SIZE;
3940c4dcd60SDevesh Sharma sginfo.pgshft = PAGE_SHIFT;
3950c4dcd60SDevesh Sharma hwq_attr.sginfo = &sginfo;
3960c4dcd60SDevesh Sharma hwq_attr.res = res;
3970c4dcd60SDevesh Sharma hwq_attr.type = HWQ_TYPE_CTX;
3980c4dcd60SDevesh Sharma hwq_attr.depth = 512;
3990c4dcd60SDevesh Sharma hwq_attr.stride = sizeof(u64);
4000c4dcd60SDevesh Sharma /* Alloc pdl buffer */
4010c4dcd60SDevesh Sharma rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
4020c4dcd60SDevesh Sharma if (rc)
4030c4dcd60SDevesh Sharma goto out;
4040c4dcd60SDevesh Sharma /* Save original pdl level */
4050c4dcd60SDevesh Sharma tqmctx->pde_level = tqmctx->pde.level;
4060c4dcd60SDevesh Sharma
4070c4dcd60SDevesh Sharma hwq_attr.stride = 1;
4080c4dcd60SDevesh Sharma for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
4090c4dcd60SDevesh Sharma if (!tqmctx->qcount[i])
4100c4dcd60SDevesh Sharma continue;
4110c4dcd60SDevesh Sharma hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
4120c4dcd60SDevesh Sharma rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
4130c4dcd60SDevesh Sharma if (rc)
4140c4dcd60SDevesh Sharma goto out;
4150c4dcd60SDevesh Sharma }
4160c4dcd60SDevesh Sharma out:
4170c4dcd60SDevesh Sharma return rc;
4180c4dcd60SDevesh Sharma }
4190c4dcd60SDevesh Sharma
bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx * ctx)4200c4dcd60SDevesh Sharma static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
4210c4dcd60SDevesh Sharma {
4220c4dcd60SDevesh Sharma struct bnxt_qplib_hwq *tbl;
4230c4dcd60SDevesh Sharma dma_addr_t *dma_ptr;
4240c4dcd60SDevesh Sharma __le64 **pbl_ptr, *ptr;
4250c4dcd60SDevesh Sharma int i, j, k;
4260c4dcd60SDevesh Sharma int fnz_idx = -1;
4270c4dcd60SDevesh Sharma int pg_count;
4280c4dcd60SDevesh Sharma
4290c4dcd60SDevesh Sharma pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
4300c4dcd60SDevesh Sharma
4310c4dcd60SDevesh Sharma for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
4320c4dcd60SDevesh Sharma i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
4330c4dcd60SDevesh Sharma tbl = &ctx->qtbl[i];
4340c4dcd60SDevesh Sharma if (!tbl->max_elements)
4350c4dcd60SDevesh Sharma continue;
4360c4dcd60SDevesh Sharma if (fnz_idx == -1)
4370c4dcd60SDevesh Sharma fnz_idx = i; /* first non-zero index */
4380c4dcd60SDevesh Sharma switch (tbl->level) {
4390c4dcd60SDevesh Sharma case PBL_LVL_2:
4400c4dcd60SDevesh Sharma pg_count = tbl->pbl[PBL_LVL_1].pg_count;
4410c4dcd60SDevesh Sharma for (k = 0; k < pg_count; k++) {
4420c4dcd60SDevesh Sharma ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
4430c4dcd60SDevesh Sharma dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
4440c4dcd60SDevesh Sharma *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
4450c4dcd60SDevesh Sharma }
4460c4dcd60SDevesh Sharma break;
4470c4dcd60SDevesh Sharma case PBL_LVL_1:
4480c4dcd60SDevesh Sharma case PBL_LVL_0:
4490c4dcd60SDevesh Sharma default:
4500c4dcd60SDevesh Sharma ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
4510c4dcd60SDevesh Sharma *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
4520c4dcd60SDevesh Sharma PTU_PTE_VALID);
4530c4dcd60SDevesh Sharma break;
4540c4dcd60SDevesh Sharma }
4550c4dcd60SDevesh Sharma }
4560c4dcd60SDevesh Sharma if (fnz_idx == -1)
4570c4dcd60SDevesh Sharma fnz_idx = 0;
4580c4dcd60SDevesh Sharma /* update pde level as per page table programming */
4590c4dcd60SDevesh Sharma ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
4600c4dcd60SDevesh Sharma ctx->qtbl[fnz_idx].level + 1;
4610c4dcd60SDevesh Sharma }
4620c4dcd60SDevesh Sharma
bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx)4630c4dcd60SDevesh Sharma static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
4640c4dcd60SDevesh Sharma struct bnxt_qplib_ctx *ctx)
4650c4dcd60SDevesh Sharma {
46614611b9bSKalesh AP int rc;
4670c4dcd60SDevesh Sharma
4680c4dcd60SDevesh Sharma rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
4690c4dcd60SDevesh Sharma if (rc)
4700c4dcd60SDevesh Sharma goto fail;
4710c4dcd60SDevesh Sharma
4720c4dcd60SDevesh Sharma bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
4730c4dcd60SDevesh Sharma fail:
4740c4dcd60SDevesh Sharma return rc;
4751ac5a404SSelvin Xavier }
4761ac5a404SSelvin Xavier
4771ac5a404SSelvin Xavier /*
4781ac5a404SSelvin Xavier * Routine: bnxt_qplib_alloc_ctx
4791ac5a404SSelvin Xavier * Description:
4801ac5a404SSelvin Xavier * Context tables are memories which are used by the chip fw.
4811ac5a404SSelvin Xavier * The 6 tables defined are:
4821ac5a404SSelvin Xavier * QPC ctx - holds QP states
4831ac5a404SSelvin Xavier * MRW ctx - holds memory region and window
4841ac5a404SSelvin Xavier * SRQ ctx - holds shared RQ states
4851ac5a404SSelvin Xavier * CQ ctx - holds completion queue states
4861ac5a404SSelvin Xavier * TQM ctx - holds Tx Queue Manager context
4871ac5a404SSelvin Xavier * TIM ctx - holds timer context
4881ac5a404SSelvin Xavier * Depending on the size of the tbl requested, either a 1 Page Buffer List
4891ac5a404SSelvin Xavier * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
4901ac5a404SSelvin Xavier * instead.
4911ac5a404SSelvin Xavier * Table might be employed as follows:
4921ac5a404SSelvin Xavier * For 0 < ctx size <= 1 PAGE, 0 level of ind is used
4931ac5a404SSelvin Xavier * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
4941ac5a404SSelvin Xavier * For 512 < ctx size <= MAX, 2 levels of ind is used
4951ac5a404SSelvin Xavier * Returns:
4961ac5a404SSelvin Xavier * 0 if success, else -ERRORS
4971ac5a404SSelvin Xavier */
bnxt_qplib_alloc_ctx(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * ctx,bool virt_fn,bool is_p5)4980c4dcd60SDevesh Sharma int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
4991ac5a404SSelvin Xavier struct bnxt_qplib_ctx *ctx,
500e0387e1dSDevesh Sharma bool virt_fn, bool is_p5)
5011ac5a404SSelvin Xavier {
5020c4dcd60SDevesh Sharma struct bnxt_qplib_hwq_attr hwq_attr = {};
5030c4dcd60SDevesh Sharma struct bnxt_qplib_sg_info sginfo = {};
50414611b9bSKalesh AP int rc;
5051ac5a404SSelvin Xavier
506e0387e1dSDevesh Sharma if (virt_fn || is_p5)
5071ac5a404SSelvin Xavier goto stats_alloc;
5081ac5a404SSelvin Xavier
5091ac5a404SSelvin Xavier /* QPC Tables */
5100c4dcd60SDevesh Sharma sginfo.pgsize = PAGE_SIZE;
5110c4dcd60SDevesh Sharma sginfo.pgshft = PAGE_SHIFT;
5120c4dcd60SDevesh Sharma hwq_attr.sginfo = &sginfo;
5130c4dcd60SDevesh Sharma
5140c4dcd60SDevesh Sharma hwq_attr.res = res;
5150c4dcd60SDevesh Sharma hwq_attr.depth = ctx->qpc_count;
5160c4dcd60SDevesh Sharma hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
5170c4dcd60SDevesh Sharma hwq_attr.type = HWQ_TYPE_CTX;
5180c4dcd60SDevesh Sharma rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
5191ac5a404SSelvin Xavier if (rc)
5201ac5a404SSelvin Xavier goto fail;
5211ac5a404SSelvin Xavier
5221ac5a404SSelvin Xavier /* MRW Tables */
5230c4dcd60SDevesh Sharma hwq_attr.depth = ctx->mrw_count;
5240c4dcd60SDevesh Sharma hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
5250c4dcd60SDevesh Sharma rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
5261ac5a404SSelvin Xavier if (rc)
5271ac5a404SSelvin Xavier goto fail;
5281ac5a404SSelvin Xavier
5291ac5a404SSelvin Xavier /* SRQ Tables */
5300c4dcd60SDevesh Sharma hwq_attr.depth = ctx->srqc_count;
5310c4dcd60SDevesh Sharma hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
5320c4dcd60SDevesh Sharma rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
5331ac5a404SSelvin Xavier if (rc)
5341ac5a404SSelvin Xavier goto fail;
5351ac5a404SSelvin Xavier
5361ac5a404SSelvin Xavier /* CQ Tables */
5370c4dcd60SDevesh Sharma hwq_attr.depth = ctx->cq_count;
5380c4dcd60SDevesh Sharma hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
5390c4dcd60SDevesh Sharma rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
5401ac5a404SSelvin Xavier if (rc)
5411ac5a404SSelvin Xavier goto fail;
5421ac5a404SSelvin Xavier
5431ac5a404SSelvin Xavier /* TQM Buffer */
5440c4dcd60SDevesh Sharma rc = bnxt_qplib_setup_tqm_rings(res, ctx);
5451ac5a404SSelvin Xavier if (rc)
5461ac5a404SSelvin Xavier goto fail;
5471ac5a404SSelvin Xavier /* TIM Buffer */
5481ac5a404SSelvin Xavier ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
5490c4dcd60SDevesh Sharma hwq_attr.depth = ctx->qpc_count * 16;
5500c4dcd60SDevesh Sharma hwq_attr.stride = 1;
5510c4dcd60SDevesh Sharma rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
5521ac5a404SSelvin Xavier if (rc)
5531ac5a404SSelvin Xavier goto fail;
5541ac5a404SSelvin Xavier stats_alloc:
5551ac5a404SSelvin Xavier /* Stats */
5560c23af52SNaresh Kumar PBS rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
5571ac5a404SSelvin Xavier if (rc)
5581ac5a404SSelvin Xavier goto fail;
5591ac5a404SSelvin Xavier
5601ac5a404SSelvin Xavier return 0;
5611ac5a404SSelvin Xavier
5621ac5a404SSelvin Xavier fail:
5630c4dcd60SDevesh Sharma bnxt_qplib_free_ctx(res, ctx);
5641ac5a404SSelvin Xavier return rc;
5651ac5a404SSelvin Xavier }
5661ac5a404SSelvin Xavier
bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl)5671ac5a404SSelvin Xavier static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
5681ac5a404SSelvin Xavier struct bnxt_qplib_sgid_tbl *sgid_tbl)
5691ac5a404SSelvin Xavier {
5701ac5a404SSelvin Xavier kfree(sgid_tbl->tbl);
5711ac5a404SSelvin Xavier kfree(sgid_tbl->hw_id);
5721ac5a404SSelvin Xavier kfree(sgid_tbl->ctx);
5735fac5b1bSKalesh AP kfree(sgid_tbl->vlan);
5741ac5a404SSelvin Xavier sgid_tbl->tbl = NULL;
5751ac5a404SSelvin Xavier sgid_tbl->hw_id = NULL;
5761ac5a404SSelvin Xavier sgid_tbl->ctx = NULL;
5775fac5b1bSKalesh AP sgid_tbl->vlan = NULL;
5781ac5a404SSelvin Xavier sgid_tbl->max = 0;
5791ac5a404SSelvin Xavier sgid_tbl->active = 0;
5801ac5a404SSelvin Xavier }
5811ac5a404SSelvin Xavier
bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl,u16 max)5821ac5a404SSelvin Xavier static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
5831ac5a404SSelvin Xavier struct bnxt_qplib_sgid_tbl *sgid_tbl,
5841ac5a404SSelvin Xavier u16 max)
5851ac5a404SSelvin Xavier {
586c56b593dSSelvin Xavier sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
5871ac5a404SSelvin Xavier if (!sgid_tbl->tbl)
5881ac5a404SSelvin Xavier return -ENOMEM;
5891ac5a404SSelvin Xavier
5901ac5a404SSelvin Xavier sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
5911ac5a404SSelvin Xavier if (!sgid_tbl->hw_id)
5921ac5a404SSelvin Xavier goto out_free1;
5931ac5a404SSelvin Xavier
5941ac5a404SSelvin Xavier sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
5951ac5a404SSelvin Xavier if (!sgid_tbl->ctx)
5961ac5a404SSelvin Xavier goto out_free2;
5971ac5a404SSelvin Xavier
5985fac5b1bSKalesh AP sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
5995fac5b1bSKalesh AP if (!sgid_tbl->vlan)
6005fac5b1bSKalesh AP goto out_free3;
6015fac5b1bSKalesh AP
6021ac5a404SSelvin Xavier sgid_tbl->max = max;
6031ac5a404SSelvin Xavier return 0;
6045fac5b1bSKalesh AP out_free3:
6055fac5b1bSKalesh AP kfree(sgid_tbl->ctx);
6065fac5b1bSKalesh AP sgid_tbl->ctx = NULL;
6071ac5a404SSelvin Xavier out_free2:
6081ac5a404SSelvin Xavier kfree(sgid_tbl->hw_id);
6091ac5a404SSelvin Xavier sgid_tbl->hw_id = NULL;
6101ac5a404SSelvin Xavier out_free1:
6111ac5a404SSelvin Xavier kfree(sgid_tbl->tbl);
6121ac5a404SSelvin Xavier sgid_tbl->tbl = NULL;
6131ac5a404SSelvin Xavier return -ENOMEM;
6141ac5a404SSelvin Xavier };
6151ac5a404SSelvin Xavier
bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl)6161ac5a404SSelvin Xavier static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
6171ac5a404SSelvin Xavier struct bnxt_qplib_sgid_tbl *sgid_tbl)
6181ac5a404SSelvin Xavier {
6191ac5a404SSelvin Xavier int i;
6201ac5a404SSelvin Xavier
6211ac5a404SSelvin Xavier for (i = 0; i < sgid_tbl->max; i++) {
6221ac5a404SSelvin Xavier if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
6231ac5a404SSelvin Xavier sizeof(bnxt_qplib_gid_zero)))
624c56b593dSSelvin Xavier bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
625c56b593dSSelvin Xavier sgid_tbl->tbl[i].vlan_id, true);
6261ac5a404SSelvin Xavier }
627c56b593dSSelvin Xavier memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
6281ac5a404SSelvin Xavier memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
6295fac5b1bSKalesh AP memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
6301ac5a404SSelvin Xavier sgid_tbl->active = 0;
6311ac5a404SSelvin Xavier }
6321ac5a404SSelvin Xavier
bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl * sgid_tbl,struct net_device * netdev)6331ac5a404SSelvin Xavier static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
6341ac5a404SSelvin Xavier struct net_device *netdev)
6351ac5a404SSelvin Xavier {
636c56b593dSSelvin Xavier u32 i;
637c56b593dSSelvin Xavier
638c56b593dSSelvin Xavier for (i = 0; i < sgid_tbl->max; i++)
639c56b593dSSelvin Xavier sgid_tbl->tbl[i].vlan_id = 0xffff;
640c56b593dSSelvin Xavier
6411ac5a404SSelvin Xavier memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
6421ac5a404SSelvin Xavier }
6431ac5a404SSelvin Xavier
6441ac5a404SSelvin Xavier /* PDs */
bnxt_qplib_alloc_pd(struct bnxt_qplib_res * res,struct bnxt_qplib_pd * pd)645213d2b9bSSelvin Xavier int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res, struct bnxt_qplib_pd *pd)
6461ac5a404SSelvin Xavier {
647213d2b9bSSelvin Xavier struct bnxt_qplib_pd_tbl *pdt = &res->pd_tbl;
6481ac5a404SSelvin Xavier u32 bit_num;
649213d2b9bSSelvin Xavier int rc = 0;
6501ac5a404SSelvin Xavier
651213d2b9bSSelvin Xavier mutex_lock(&res->pd_tbl_lock);
6521ac5a404SSelvin Xavier bit_num = find_first_bit(pdt->tbl, pdt->max);
653213d2b9bSSelvin Xavier if (bit_num == pdt->max) {
654213d2b9bSSelvin Xavier rc = -ENOMEM;
655213d2b9bSSelvin Xavier goto exit;
656213d2b9bSSelvin Xavier }
6571ac5a404SSelvin Xavier
6581ac5a404SSelvin Xavier /* Found unused PD */
6591ac5a404SSelvin Xavier clear_bit(bit_num, pdt->tbl);
6601ac5a404SSelvin Xavier pd->id = bit_num;
661213d2b9bSSelvin Xavier exit:
662213d2b9bSSelvin Xavier mutex_unlock(&res->pd_tbl_lock);
663213d2b9bSSelvin Xavier return rc;
6641ac5a404SSelvin Xavier }
6651ac5a404SSelvin Xavier
bnxt_qplib_dealloc_pd(struct bnxt_qplib_res * res,struct bnxt_qplib_pd_tbl * pdt,struct bnxt_qplib_pd * pd)6661ac5a404SSelvin Xavier int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
6671ac5a404SSelvin Xavier struct bnxt_qplib_pd_tbl *pdt,
6681ac5a404SSelvin Xavier struct bnxt_qplib_pd *pd)
6691ac5a404SSelvin Xavier {
670213d2b9bSSelvin Xavier int rc = 0;
671213d2b9bSSelvin Xavier
672213d2b9bSSelvin Xavier mutex_lock(&res->pd_tbl_lock);
6731ac5a404SSelvin Xavier if (test_and_set_bit(pd->id, pdt->tbl)) {
67408920b8fSJoe Perches dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
6751ac5a404SSelvin Xavier pd->id);
676213d2b9bSSelvin Xavier rc = -EINVAL;
677213d2b9bSSelvin Xavier goto exit;
6781ac5a404SSelvin Xavier }
6791ac5a404SSelvin Xavier pd->id = 0;
680213d2b9bSSelvin Xavier exit:
681213d2b9bSSelvin Xavier mutex_unlock(&res->pd_tbl_lock);
682213d2b9bSSelvin Xavier return rc;
6831ac5a404SSelvin Xavier }
6841ac5a404SSelvin Xavier
bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl * pdt)6851ac5a404SSelvin Xavier static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
6861ac5a404SSelvin Xavier {
6871ac5a404SSelvin Xavier kfree(pdt->tbl);
6881ac5a404SSelvin Xavier pdt->tbl = NULL;
6891ac5a404SSelvin Xavier pdt->max = 0;
6901ac5a404SSelvin Xavier }
6911ac5a404SSelvin Xavier
bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pd_tbl * pdt,u32 max)6921ac5a404SSelvin Xavier static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
6931ac5a404SSelvin Xavier struct bnxt_qplib_pd_tbl *pdt,
6941ac5a404SSelvin Xavier u32 max)
6951ac5a404SSelvin Xavier {
6961ac5a404SSelvin Xavier u32 bytes;
6971ac5a404SSelvin Xavier
6981ac5a404SSelvin Xavier bytes = max >> 3;
6991ac5a404SSelvin Xavier if (!bytes)
7001ac5a404SSelvin Xavier bytes = 1;
7011ac5a404SSelvin Xavier pdt->tbl = kmalloc(bytes, GFP_KERNEL);
7021ac5a404SSelvin Xavier if (!pdt->tbl)
7031ac5a404SSelvin Xavier return -ENOMEM;
7041ac5a404SSelvin Xavier
7051ac5a404SSelvin Xavier pdt->max = max;
7061ac5a404SSelvin Xavier memset((u8 *)pdt->tbl, 0xFF, bytes);
707213d2b9bSSelvin Xavier mutex_init(&res->pd_tbl_lock);
7081ac5a404SSelvin Xavier
7091ac5a404SSelvin Xavier return 0;
7101ac5a404SSelvin Xavier }
7111ac5a404SSelvin Xavier
7121ac5a404SSelvin Xavier /* DPIs */
bnxt_qplib_alloc_dpi(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi * dpi,void * app,u8 type)7130ac20fafSSelvin Xavier int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
7141ac5a404SSelvin Xavier struct bnxt_qplib_dpi *dpi,
7150ac20fafSSelvin Xavier void *app, u8 type)
7161ac5a404SSelvin Xavier {
7170ac20fafSSelvin Xavier struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
7180ac20fafSSelvin Xavier struct bnxt_qplib_reg_desc *reg;
7191ac5a404SSelvin Xavier u32 bit_num;
7200ac20fafSSelvin Xavier u64 umaddr;
7210ac20fafSSelvin Xavier
7220ac20fafSSelvin Xavier reg = &dpit->wcreg;
7230ac20fafSSelvin Xavier mutex_lock(&res->dpi_tbl_lock);
7241ac5a404SSelvin Xavier
7251ac5a404SSelvin Xavier bit_num = find_first_bit(dpit->tbl, dpit->max);
7260ac20fafSSelvin Xavier if (bit_num == dpit->max) {
7270ac20fafSSelvin Xavier mutex_unlock(&res->dpi_tbl_lock);
7281ac5a404SSelvin Xavier return -ENOMEM;
7290ac20fafSSelvin Xavier }
7301ac5a404SSelvin Xavier
7311ac5a404SSelvin Xavier /* Found unused DPI */
7321ac5a404SSelvin Xavier clear_bit(bit_num, dpit->tbl);
7331ac5a404SSelvin Xavier dpit->app_tbl[bit_num] = app;
7341ac5a404SSelvin Xavier
7350ac20fafSSelvin Xavier dpi->bit = bit_num;
7360ac20fafSSelvin Xavier dpi->dpi = bit_num + (reg->offset - dpit->ucreg.offset) / PAGE_SIZE;
7371ac5a404SSelvin Xavier
7380ac20fafSSelvin Xavier umaddr = reg->bar_base + reg->offset + bit_num * PAGE_SIZE;
7390ac20fafSSelvin Xavier dpi->umdbr = umaddr;
7400ac20fafSSelvin Xavier
7410ac20fafSSelvin Xavier switch (type) {
7420ac20fafSSelvin Xavier case BNXT_QPLIB_DPI_TYPE_KERNEL:
743d1d7fc3bSColin Ian King /* privileged dbr was already mapped just initialize it. */
7440ac20fafSSelvin Xavier dpi->umdbr = dpit->ucreg.bar_base +
7450ac20fafSSelvin Xavier dpit->ucreg.offset + bit_num * PAGE_SIZE;
7460ac20fafSSelvin Xavier dpi->dbr = dpit->priv_db;
7470ac20fafSSelvin Xavier dpi->dpi = dpi->bit;
7480ac20fafSSelvin Xavier break;
749360da60dSSelvin Xavier case BNXT_QPLIB_DPI_TYPE_WC:
750360da60dSSelvin Xavier dpi->dbr = ioremap_wc(umaddr, PAGE_SIZE);
751360da60dSSelvin Xavier break;
7520ac20fafSSelvin Xavier default:
7530ac20fafSSelvin Xavier dpi->dbr = ioremap(umaddr, PAGE_SIZE);
7540ac20fafSSelvin Xavier break;
7550ac20fafSSelvin Xavier }
7560ac20fafSSelvin Xavier
7570ac20fafSSelvin Xavier dpi->type = type;
7580ac20fafSSelvin Xavier mutex_unlock(&res->dpi_tbl_lock);
7591ac5a404SSelvin Xavier return 0;
7600ac20fafSSelvin Xavier
7611ac5a404SSelvin Xavier }
7621ac5a404SSelvin Xavier
bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi * dpi)7631ac5a404SSelvin Xavier int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
7641ac5a404SSelvin Xavier struct bnxt_qplib_dpi *dpi)
7651ac5a404SSelvin Xavier {
7660ac20fafSSelvin Xavier struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
7670ac20fafSSelvin Xavier
7680ac20fafSSelvin Xavier mutex_lock(&res->dpi_tbl_lock);
7690ac20fafSSelvin Xavier if (dpi->dpi && dpi->type != BNXT_QPLIB_DPI_TYPE_KERNEL)
7700ac20fafSSelvin Xavier pci_iounmap(res->pdev, dpi->dbr);
7710ac20fafSSelvin Xavier
7720ac20fafSSelvin Xavier if (test_and_set_bit(dpi->bit, dpit->tbl)) {
7730ac20fafSSelvin Xavier dev_warn(&res->pdev->dev,
7740ac20fafSSelvin Xavier "Freeing an unused DPI? dpi = %d, bit = %d\n",
7750ac20fafSSelvin Xavier dpi->dpi, dpi->bit);
7760ac20fafSSelvin Xavier mutex_unlock(&res->dpi_tbl_lock);
7771ac5a404SSelvin Xavier return -EINVAL;
7781ac5a404SSelvin Xavier }
7791ac5a404SSelvin Xavier if (dpit->app_tbl)
7800ac20fafSSelvin Xavier dpit->app_tbl[dpi->bit] = NULL;
7811ac5a404SSelvin Xavier memset(dpi, 0, sizeof(*dpi));
7820ac20fafSSelvin Xavier mutex_unlock(&res->dpi_tbl_lock);
7831ac5a404SSelvin Xavier return 0;
7841ac5a404SSelvin Xavier }
7851ac5a404SSelvin Xavier
bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi_tbl * dpit)7861ac5a404SSelvin Xavier static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
7871ac5a404SSelvin Xavier struct bnxt_qplib_dpi_tbl *dpit)
7881ac5a404SSelvin Xavier {
7891ac5a404SSelvin Xavier kfree(dpit->tbl);
7901ac5a404SSelvin Xavier kfree(dpit->app_tbl);
7910ac20fafSSelvin Xavier dpit->tbl = NULL;
7920ac20fafSSelvin Xavier dpit->app_tbl = NULL;
7930ac20fafSSelvin Xavier dpit->max = 0;
7941ac5a404SSelvin Xavier }
7951ac5a404SSelvin Xavier
bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_dev_attr * dev_attr)7961ac5a404SSelvin Xavier static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
7970ac20fafSSelvin Xavier struct bnxt_qplib_dev_attr *dev_attr)
7981ac5a404SSelvin Xavier {
7990ac20fafSSelvin Xavier struct bnxt_qplib_dpi_tbl *dpit;
8000ac20fafSSelvin Xavier struct bnxt_qplib_reg_desc *reg;
8010ac20fafSSelvin Xavier unsigned long bar_len;
8020ac20fafSSelvin Xavier u32 dbr_offset;
8030ac20fafSSelvin Xavier u32 bytes;
8041ac5a404SSelvin Xavier
8050ac20fafSSelvin Xavier dpit = &res->dpi_tbl;
8060ac20fafSSelvin Xavier reg = &dpit->wcreg;
8070ac20fafSSelvin Xavier
8080ac20fafSSelvin Xavier if (!bnxt_qplib_is_chip_gen_p5(res->cctx)) {
8090ac20fafSSelvin Xavier /* Offest should come from L2 driver */
8100ac20fafSSelvin Xavier dbr_offset = dev_attr->l2_db_size;
8110ac20fafSSelvin Xavier dpit->ucreg.offset = dbr_offset;
8120ac20fafSSelvin Xavier dpit->wcreg.offset = dbr_offset;
8131ac5a404SSelvin Xavier }
8141ac5a404SSelvin Xavier
8150ac20fafSSelvin Xavier bar_len = pci_resource_len(res->pdev, reg->bar_id);
8160ac20fafSSelvin Xavier dpit->max = (bar_len - reg->offset) / PAGE_SIZE;
8170ac20fafSSelvin Xavier if (dev_attr->max_dpi)
8180ac20fafSSelvin Xavier dpit->max = min_t(u32, dpit->max, dev_attr->max_dpi);
8191ac5a404SSelvin Xavier
8201ac5a404SSelvin Xavier dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
821e5b89843SMarkus Elfring if (!dpit->app_tbl)
8220ac20fafSSelvin Xavier return -ENOMEM;
8231ac5a404SSelvin Xavier
8241ac5a404SSelvin Xavier bytes = dpit->max >> 3;
8251ac5a404SSelvin Xavier if (!bytes)
8261ac5a404SSelvin Xavier bytes = 1;
8271ac5a404SSelvin Xavier
8281ac5a404SSelvin Xavier dpit->tbl = kmalloc(bytes, GFP_KERNEL);
8291ac5a404SSelvin Xavier if (!dpit->tbl) {
8301ac5a404SSelvin Xavier kfree(dpit->app_tbl);
8311ac5a404SSelvin Xavier dpit->app_tbl = NULL;
8320ac20fafSSelvin Xavier return -ENOMEM;
8331ac5a404SSelvin Xavier }
8341ac5a404SSelvin Xavier
8351ac5a404SSelvin Xavier memset((u8 *)dpit->tbl, 0xFF, bytes);
83664b63265SKashyap Desai mutex_init(&res->dpi_tbl_lock);
8370ac20fafSSelvin Xavier dpit->priv_db = dpit->ucreg.bar_reg + dpit->ucreg.offset;
8381ac5a404SSelvin Xavier
8391ac5a404SSelvin Xavier return 0;
840e5b89843SMarkus Elfring
8411ac5a404SSelvin Xavier }
8421ac5a404SSelvin Xavier
8431ac5a404SSelvin Xavier /* Stats */
bnxt_qplib_free_stats_ctx(struct pci_dev * pdev,struct bnxt_qplib_stats * stats)8441ac5a404SSelvin Xavier static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
8451ac5a404SSelvin Xavier struct bnxt_qplib_stats *stats)
8461ac5a404SSelvin Xavier {
8471ac5a404SSelvin Xavier if (stats->dma) {
8481ac5a404SSelvin Xavier dma_free_coherent(&pdev->dev, stats->size,
8491ac5a404SSelvin Xavier stats->dma, stats->dma_map);
8501ac5a404SSelvin Xavier }
8511ac5a404SSelvin Xavier memset(stats, 0, sizeof(*stats));
8521ac5a404SSelvin Xavier stats->fw_id = -1;
8531ac5a404SSelvin Xavier }
8541ac5a404SSelvin Xavier
bnxt_qplib_alloc_stats_ctx(struct pci_dev * pdev,struct bnxt_qplib_chip_ctx * cctx,struct bnxt_qplib_stats * stats)8551ac5a404SSelvin Xavier static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
8560c23af52SNaresh Kumar PBS struct bnxt_qplib_chip_ctx *cctx,
8571ac5a404SSelvin Xavier struct bnxt_qplib_stats *stats)
8581ac5a404SSelvin Xavier {
8591ac5a404SSelvin Xavier memset(stats, 0, sizeof(*stats));
8601ac5a404SSelvin Xavier stats->fw_id = -1;
8610c23af52SNaresh Kumar PBS stats->size = cctx->hw_stats_size;
8621ac5a404SSelvin Xavier stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
8631ac5a404SSelvin Xavier &stats->dma_map, GFP_KERNEL);
8641ac5a404SSelvin Xavier if (!stats->dma) {
86508920b8fSJoe Perches dev_err(&pdev->dev, "Stats DMA allocation failed\n");
8661ac5a404SSelvin Xavier return -ENOMEM;
8671ac5a404SSelvin Xavier }
8681ac5a404SSelvin Xavier return 0;
8691ac5a404SSelvin Xavier }
8701ac5a404SSelvin Xavier
bnxt_qplib_cleanup_res(struct bnxt_qplib_res * res)8711ac5a404SSelvin Xavier void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
8721ac5a404SSelvin Xavier {
8731ac5a404SSelvin Xavier bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
8741ac5a404SSelvin Xavier }
8751ac5a404SSelvin Xavier
bnxt_qplib_init_res(struct bnxt_qplib_res * res)8761ac5a404SSelvin Xavier int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
8771ac5a404SSelvin Xavier {
8781ac5a404SSelvin Xavier bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
8791ac5a404SSelvin Xavier
8801ac5a404SSelvin Xavier return 0;
8811ac5a404SSelvin Xavier }
8821ac5a404SSelvin Xavier
bnxt_qplib_free_res(struct bnxt_qplib_res * res)8831ac5a404SSelvin Xavier void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
8841ac5a404SSelvin Xavier {
8851ac5a404SSelvin Xavier bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
8861ac5a404SSelvin Xavier bnxt_qplib_free_pd_tbl(&res->pd_tbl);
8871ac5a404SSelvin Xavier bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
8881ac5a404SSelvin Xavier }
8891ac5a404SSelvin Xavier
bnxt_qplib_alloc_res(struct bnxt_qplib_res * res,struct pci_dev * pdev,struct net_device * netdev,struct bnxt_qplib_dev_attr * dev_attr)8901ac5a404SSelvin Xavier int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
8911ac5a404SSelvin Xavier struct net_device *netdev,
8921ac5a404SSelvin Xavier struct bnxt_qplib_dev_attr *dev_attr)
8931ac5a404SSelvin Xavier {
89414611b9bSKalesh AP int rc;
8951ac5a404SSelvin Xavier
8961ac5a404SSelvin Xavier res->pdev = pdev;
8971ac5a404SSelvin Xavier res->netdev = netdev;
8981ac5a404SSelvin Xavier
8991ac5a404SSelvin Xavier rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
9001ac5a404SSelvin Xavier if (rc)
9011ac5a404SSelvin Xavier goto fail;
9021ac5a404SSelvin Xavier
9031ac5a404SSelvin Xavier rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
9041ac5a404SSelvin Xavier if (rc)
9051ac5a404SSelvin Xavier goto fail;
9061ac5a404SSelvin Xavier
9070ac20fafSSelvin Xavier rc = bnxt_qplib_alloc_dpi_tbl(res, dev_attr);
9081ac5a404SSelvin Xavier if (rc)
9091ac5a404SSelvin Xavier goto fail;
9101ac5a404SSelvin Xavier
9111ac5a404SSelvin Xavier return 0;
9121ac5a404SSelvin Xavier fail:
9131ac5a404SSelvin Xavier bnxt_qplib_free_res(res);
9141ac5a404SSelvin Xavier return rc;
9151ac5a404SSelvin Xavier }
91635f5ace5SDevesh Sharma
bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res * res)9170ac20fafSSelvin Xavier void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res)
9180ac20fafSSelvin Xavier {
9190ac20fafSSelvin Xavier struct bnxt_qplib_reg_desc *reg;
9200ac20fafSSelvin Xavier
9210ac20fafSSelvin Xavier reg = &res->dpi_tbl.ucreg;
9220ac20fafSSelvin Xavier if (reg->bar_reg)
9230ac20fafSSelvin Xavier pci_iounmap(res->pdev, reg->bar_reg);
9240ac20fafSSelvin Xavier reg->bar_reg = NULL;
9250ac20fafSSelvin Xavier reg->bar_base = 0;
9260ac20fafSSelvin Xavier reg->len = 0;
9270ac20fafSSelvin Xavier reg->bar_id = 0;
9280ac20fafSSelvin Xavier }
9290ac20fafSSelvin Xavier
bnxt_qplib_map_db_bar(struct bnxt_qplib_res * res)9300ac20fafSSelvin Xavier int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res)
9310ac20fafSSelvin Xavier {
9320ac20fafSSelvin Xavier struct bnxt_qplib_reg_desc *ucreg;
9330ac20fafSSelvin Xavier struct bnxt_qplib_reg_desc *wcreg;
9340ac20fafSSelvin Xavier
9350ac20fafSSelvin Xavier wcreg = &res->dpi_tbl.wcreg;
9360ac20fafSSelvin Xavier wcreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
9370ac20fafSSelvin Xavier wcreg->bar_base = pci_resource_start(res->pdev, wcreg->bar_id);
9380ac20fafSSelvin Xavier
9390ac20fafSSelvin Xavier ucreg = &res->dpi_tbl.ucreg;
9400ac20fafSSelvin Xavier ucreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
9410ac20fafSSelvin Xavier ucreg->bar_base = pci_resource_start(res->pdev, ucreg->bar_id);
9420ac20fafSSelvin Xavier ucreg->len = ucreg->offset + PAGE_SIZE;
9430ac20fafSSelvin Xavier if (!ucreg->len || ((ucreg->len & (PAGE_SIZE - 1)) != 0)) {
9440ac20fafSSelvin Xavier dev_err(&res->pdev->dev, "QPLIB: invalid dbr length %d",
9450ac20fafSSelvin Xavier (int)ucreg->len);
9460ac20fafSSelvin Xavier return -EINVAL;
9470ac20fafSSelvin Xavier }
9480ac20fafSSelvin Xavier ucreg->bar_reg = ioremap(ucreg->bar_base, ucreg->len);
9490ac20fafSSelvin Xavier if (!ucreg->bar_reg) {
950d1d7fc3bSColin Ian King dev_err(&res->pdev->dev, "privileged dpi map failed!");
9510ac20fafSSelvin Xavier return -ENOMEM;
9520ac20fafSSelvin Xavier }
9530ac20fafSSelvin Xavier
9540ac20fafSSelvin Xavier return 0;
9550ac20fafSSelvin Xavier }
9560ac20fafSSelvin Xavier
bnxt_qplib_determine_atomics(struct pci_dev * dev)95735f5ace5SDevesh Sharma int bnxt_qplib_determine_atomics(struct pci_dev *dev)
95835f5ace5SDevesh Sharma {
95935f5ace5SDevesh Sharma int comp;
96035f5ace5SDevesh Sharma u16 ctl2;
96135f5ace5SDevesh Sharma
96235f5ace5SDevesh Sharma comp = pci_enable_atomic_ops_to_root(dev,
96335f5ace5SDevesh Sharma PCI_EXP_DEVCAP2_ATOMIC_COMP32);
96435f5ace5SDevesh Sharma if (comp)
96535f5ace5SDevesh Sharma return -EOPNOTSUPP;
96635f5ace5SDevesh Sharma comp = pci_enable_atomic_ops_to_root(dev,
96735f5ace5SDevesh Sharma PCI_EXP_DEVCAP2_ATOMIC_COMP64);
96835f5ace5SDevesh Sharma if (comp)
96935f5ace5SDevesh Sharma return -EOPNOTSUPP;
97035f5ace5SDevesh Sharma pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2);
97135f5ace5SDevesh Sharma return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
97235f5ace5SDevesh Sharma }
973