1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: QPLib resource manager 37 */ 38 39 #define dev_fmt(fmt) "QPLIB: " fmt 40 41 #include <linux/spinlock.h> 42 #include <linux/pci.h> 43 #include <linux/interrupt.h> 44 #include <linux/inetdevice.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/if_vlan.h> 47 #include "roce_hsi.h" 48 #include "qplib_res.h" 49 #include "qplib_sp.h" 50 #include "qplib_rcfw.h" 51 52 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, 53 struct bnxt_qplib_stats *stats); 54 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, 55 struct bnxt_qplib_stats *stats); 56 57 /* PBL */ 58 static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, 59 bool is_umem) 60 { 61 int i; 62 63 if (!is_umem) { 64 for (i = 0; i < pbl->pg_count; i++) { 65 if (pbl->pg_arr[i]) 66 dma_free_coherent(&pdev->dev, pbl->pg_size, 67 (void *)((unsigned long) 68 pbl->pg_arr[i] & 69 PAGE_MASK), 70 pbl->pg_map_arr[i]); 71 else 72 dev_warn(&pdev->dev, 73 "PBL free pg_arr[%d] empty?!\n", i); 74 pbl->pg_arr[i] = NULL; 75 } 76 } 77 kfree(pbl->pg_arr); 78 pbl->pg_arr = NULL; 79 kfree(pbl->pg_map_arr); 80 pbl->pg_map_arr = NULL; 81 pbl->pg_count = 0; 82 pbl->pg_size = 0; 83 } 84 85 static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, 86 struct scatterlist *sghead, u32 pages, 87 u32 nmaps, u32 pg_size) 88 { 89 struct sg_dma_page_iter sg_iter; 90 bool is_umem = false; 91 int i; 92 93 /* page ptr arrays */ 94 pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL); 95 if (!pbl->pg_arr) 96 return -ENOMEM; 97 98 pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL); 99 if (!pbl->pg_map_arr) { 100 kfree(pbl->pg_arr); 101 pbl->pg_arr = NULL; 102 return -ENOMEM; 103 } 104 pbl->pg_count = 0; 105 pbl->pg_size = pg_size; 106 107 if (!sghead) { 108 for (i = 0; i < pages; i++) { 109 pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev, 110 pbl->pg_size, 111 &pbl->pg_map_arr[i], 112 GFP_KERNEL); 113 if (!pbl->pg_arr[i]) 114 goto fail; 115 pbl->pg_count++; 116 } 117 } else { 118 i = 0; 119 is_umem = true; 120 for_each_sg_dma_page(sghead, &sg_iter, nmaps, 0) { 121 pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter); 122 pbl->pg_arr[i] = NULL; 123 pbl->pg_count++; 124 i++; 125 } 126 } 127 128 return 0; 129 130 fail: 131 __free_pbl(pdev, pbl, is_umem); 132 return -ENOMEM; 133 } 134 135 /* HWQ */ 136 void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq) 137 { 138 int i; 139 140 if (!hwq->max_elements) 141 return; 142 if (hwq->level >= PBL_LVL_MAX) 143 return; 144 145 for (i = 0; i < hwq->level + 1; i++) { 146 if (i == hwq->level) 147 __free_pbl(pdev, &hwq->pbl[i], hwq->is_user); 148 else 149 __free_pbl(pdev, &hwq->pbl[i], false); 150 } 151 152 hwq->level = PBL_LVL_MAX; 153 hwq->max_elements = 0; 154 hwq->element_size = 0; 155 hwq->prod = 0; 156 hwq->cons = 0; 157 hwq->cp_bit = 0; 158 } 159 160 /* All HWQs are power of 2 in size */ 161 int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq, 162 struct bnxt_qplib_sg_info *sg_info, 163 u32 *elements, u32 element_size, u32 aux, 164 u32 pg_size, enum bnxt_qplib_hwq_type hwq_type) 165 { 166 u32 pages, maps, slots, size, aux_pages = 0, aux_size = 0; 167 dma_addr_t *src_phys_ptr, **dst_virt_ptr; 168 struct scatterlist *sghead = NULL; 169 int i, rc; 170 171 hwq->level = PBL_LVL_MAX; 172 173 slots = roundup_pow_of_two(*elements); 174 if (aux) { 175 aux_size = roundup_pow_of_two(aux); 176 aux_pages = (slots * aux_size) / pg_size; 177 if ((slots * aux_size) % pg_size) 178 aux_pages++; 179 } 180 size = roundup_pow_of_two(element_size); 181 182 if (sg_info) 183 sghead = sg_info->sglist; 184 185 if (!sghead) { 186 hwq->is_user = false; 187 pages = (slots * size) / pg_size + aux_pages; 188 if ((slots * size) % pg_size) 189 pages++; 190 if (!pages) 191 return -EINVAL; 192 maps = 0; 193 } else { 194 hwq->is_user = true; 195 pages = sg_info->npages; 196 maps = sg_info->nmap; 197 } 198 199 /* Alloc the 1st memory block; can be a PDL/PTL/PBL */ 200 if (sghead && (pages == MAX_PBL_LVL_0_PGS)) 201 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead, 202 pages, maps, pg_size); 203 else 204 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 205 1, 0, pg_size); 206 if (rc) 207 goto fail; 208 209 hwq->level = PBL_LVL_0; 210 211 if (pages > MAX_PBL_LVL_0_PGS) { 212 if (pages > MAX_PBL_LVL_1_PGS) { 213 /* 2 levels of indirection */ 214 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL, 215 MAX_PBL_LVL_1_PGS_FOR_LVL_2, 216 0, pg_size); 217 if (rc) 218 goto fail; 219 /* Fill in lvl0 PBL */ 220 dst_virt_ptr = 221 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr; 222 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr; 223 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) 224 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = 225 src_phys_ptr[i] | PTU_PDE_VALID; 226 hwq->level = PBL_LVL_1; 227 228 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead, 229 pages, maps, pg_size); 230 if (rc) 231 goto fail; 232 233 /* Fill in lvl1 PBL */ 234 dst_virt_ptr = 235 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr; 236 src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr; 237 for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) { 238 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = 239 src_phys_ptr[i] | PTU_PTE_VALID; 240 } 241 if (hwq_type == HWQ_TYPE_QUEUE) { 242 /* Find the last pg of the size */ 243 i = hwq->pbl[PBL_LVL_2].pg_count; 244 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |= 245 PTU_PTE_LAST; 246 if (i > 1) 247 dst_virt_ptr[PTR_PG(i - 2)] 248 [PTR_IDX(i - 2)] |= 249 PTU_PTE_NEXT_TO_LAST; 250 } 251 hwq->level = PBL_LVL_2; 252 } else { 253 u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 : 254 PTU_PTE_VALID; 255 256 /* 1 level of indirection */ 257 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead, 258 pages, maps, pg_size); 259 if (rc) 260 goto fail; 261 /* Fill in lvl0 PBL */ 262 dst_virt_ptr = 263 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr; 264 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr; 265 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) { 266 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] = 267 src_phys_ptr[i] | flag; 268 } 269 if (hwq_type == HWQ_TYPE_QUEUE) { 270 /* Find the last pg of the size */ 271 i = hwq->pbl[PBL_LVL_1].pg_count; 272 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |= 273 PTU_PTE_LAST; 274 if (i > 1) 275 dst_virt_ptr[PTR_PG(i - 2)] 276 [PTR_IDX(i - 2)] |= 277 PTU_PTE_NEXT_TO_LAST; 278 } 279 hwq->level = PBL_LVL_1; 280 } 281 } 282 hwq->pdev = pdev; 283 spin_lock_init(&hwq->lock); 284 hwq->prod = 0; 285 hwq->cons = 0; 286 *elements = hwq->max_elements = slots; 287 hwq->element_size = size; 288 289 /* For direct access to the elements */ 290 hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr; 291 hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr; 292 293 return 0; 294 295 fail: 296 bnxt_qplib_free_hwq(pdev, hwq); 297 return -ENOMEM; 298 } 299 300 /* Context Tables */ 301 void bnxt_qplib_free_ctx(struct pci_dev *pdev, 302 struct bnxt_qplib_ctx *ctx) 303 { 304 int i; 305 306 bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl); 307 bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl); 308 bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl); 309 bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl); 310 bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl); 311 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) 312 bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]); 313 bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde); 314 bnxt_qplib_free_stats_ctx(pdev, &ctx->stats); 315 } 316 317 /* 318 * Routine: bnxt_qplib_alloc_ctx 319 * Description: 320 * Context tables are memories which are used by the chip fw. 321 * The 6 tables defined are: 322 * QPC ctx - holds QP states 323 * MRW ctx - holds memory region and window 324 * SRQ ctx - holds shared RQ states 325 * CQ ctx - holds completion queue states 326 * TQM ctx - holds Tx Queue Manager context 327 * TIM ctx - holds timer context 328 * Depending on the size of the tbl requested, either a 1 Page Buffer List 329 * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used 330 * instead. 331 * Table might be employed as follows: 332 * For 0 < ctx size <= 1 PAGE, 0 level of ind is used 333 * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used 334 * For 512 < ctx size <= MAX, 2 levels of ind is used 335 * Returns: 336 * 0 if success, else -ERRORS 337 */ 338 int bnxt_qplib_alloc_ctx(struct pci_dev *pdev, 339 struct bnxt_qplib_ctx *ctx, 340 bool virt_fn, bool is_p5) 341 { 342 int i, j, k, rc = 0; 343 int fnz_idx = -1; 344 __le64 **pbl_ptr; 345 346 if (virt_fn || is_p5) 347 goto stats_alloc; 348 349 /* QPC Tables */ 350 ctx->qpc_tbl.max_elements = ctx->qpc_count; 351 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 352 &ctx->qpc_tbl.max_elements, 353 BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0, 354 PAGE_SIZE, HWQ_TYPE_CTX); 355 if (rc) 356 goto fail; 357 358 /* MRW Tables */ 359 ctx->mrw_tbl.max_elements = ctx->mrw_count; 360 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 361 &ctx->mrw_tbl.max_elements, 362 BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0, 363 PAGE_SIZE, HWQ_TYPE_CTX); 364 if (rc) 365 goto fail; 366 367 /* SRQ Tables */ 368 ctx->srqc_tbl.max_elements = ctx->srqc_count; 369 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 370 &ctx->srqc_tbl.max_elements, 371 BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0, 372 PAGE_SIZE, HWQ_TYPE_CTX); 373 if (rc) 374 goto fail; 375 376 /* CQ Tables */ 377 ctx->cq_tbl.max_elements = ctx->cq_count; 378 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 379 &ctx->cq_tbl.max_elements, 380 BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0, 381 PAGE_SIZE, HWQ_TYPE_CTX); 382 if (rc) 383 goto fail; 384 385 /* TQM Buffer */ 386 ctx->tqm_pde.max_elements = 512; 387 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 388 &ctx->tqm_pde.max_elements, sizeof(u64), 389 0, PAGE_SIZE, HWQ_TYPE_CTX); 390 if (rc) 391 goto fail; 392 393 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) { 394 if (!ctx->tqm_count[i]) 395 continue; 396 ctx->tqm_tbl[i].max_elements = ctx->qpc_count * 397 ctx->tqm_count[i]; 398 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 399 &ctx->tqm_tbl[i].max_elements, 1, 400 0, PAGE_SIZE, HWQ_TYPE_CTX); 401 if (rc) 402 goto fail; 403 } 404 pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr; 405 for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ; 406 i++, j += MAX_TQM_ALLOC_BLK_SIZE) { 407 if (!ctx->tqm_tbl[i].max_elements) 408 continue; 409 if (fnz_idx == -1) 410 fnz_idx = i; 411 switch (ctx->tqm_tbl[i].level) { 412 case PBL_LVL_2: 413 for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count; 414 k++) 415 pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] = 416 cpu_to_le64( 417 ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k] 418 | PTU_PTE_VALID); 419 break; 420 case PBL_LVL_1: 421 case PBL_LVL_0: 422 default: 423 pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64( 424 ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] | 425 PTU_PTE_VALID); 426 break; 427 } 428 } 429 if (fnz_idx == -1) 430 fnz_idx = 0; 431 ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ? 432 PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1; 433 434 /* TIM Buffer */ 435 ctx->tim_tbl.max_elements = ctx->qpc_count * 16; 436 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 437 &ctx->tim_tbl.max_elements, 1, 438 0, PAGE_SIZE, HWQ_TYPE_CTX); 439 if (rc) 440 goto fail; 441 442 stats_alloc: 443 /* Stats */ 444 rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats); 445 if (rc) 446 goto fail; 447 448 return 0; 449 450 fail: 451 bnxt_qplib_free_ctx(pdev, ctx); 452 return rc; 453 } 454 455 /* GUID */ 456 void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid) 457 { 458 u8 mac[ETH_ALEN]; 459 460 /* MAC-48 to EUI-64 mapping */ 461 memcpy(mac, dev_addr, ETH_ALEN); 462 guid[0] = mac[0] ^ 2; 463 guid[1] = mac[1]; 464 guid[2] = mac[2]; 465 guid[3] = 0xff; 466 guid[4] = 0xfe; 467 guid[5] = mac[3]; 468 guid[6] = mac[4]; 469 guid[7] = mac[5]; 470 } 471 472 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res, 473 struct bnxt_qplib_sgid_tbl *sgid_tbl) 474 { 475 kfree(sgid_tbl->tbl); 476 kfree(sgid_tbl->hw_id); 477 kfree(sgid_tbl->ctx); 478 kfree(sgid_tbl->vlan); 479 sgid_tbl->tbl = NULL; 480 sgid_tbl->hw_id = NULL; 481 sgid_tbl->ctx = NULL; 482 sgid_tbl->vlan = NULL; 483 sgid_tbl->max = 0; 484 sgid_tbl->active = 0; 485 } 486 487 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res, 488 struct bnxt_qplib_sgid_tbl *sgid_tbl, 489 u16 max) 490 { 491 sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL); 492 if (!sgid_tbl->tbl) 493 return -ENOMEM; 494 495 sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL); 496 if (!sgid_tbl->hw_id) 497 goto out_free1; 498 499 sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL); 500 if (!sgid_tbl->ctx) 501 goto out_free2; 502 503 sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL); 504 if (!sgid_tbl->vlan) 505 goto out_free3; 506 507 sgid_tbl->max = max; 508 return 0; 509 out_free3: 510 kfree(sgid_tbl->ctx); 511 sgid_tbl->ctx = NULL; 512 out_free2: 513 kfree(sgid_tbl->hw_id); 514 sgid_tbl->hw_id = NULL; 515 out_free1: 516 kfree(sgid_tbl->tbl); 517 sgid_tbl->tbl = NULL; 518 return -ENOMEM; 519 }; 520 521 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res, 522 struct bnxt_qplib_sgid_tbl *sgid_tbl) 523 { 524 int i; 525 526 for (i = 0; i < sgid_tbl->max; i++) { 527 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero, 528 sizeof(bnxt_qplib_gid_zero))) 529 bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true); 530 } 531 memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max); 532 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); 533 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max); 534 sgid_tbl->active = 0; 535 } 536 537 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl, 538 struct net_device *netdev) 539 { 540 memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max); 541 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); 542 } 543 544 static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res, 545 struct bnxt_qplib_pkey_tbl *pkey_tbl) 546 { 547 if (!pkey_tbl->tbl) 548 dev_dbg(&res->pdev->dev, "PKEY tbl not present\n"); 549 else 550 kfree(pkey_tbl->tbl); 551 552 pkey_tbl->tbl = NULL; 553 pkey_tbl->max = 0; 554 pkey_tbl->active = 0; 555 } 556 557 static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res, 558 struct bnxt_qplib_pkey_tbl *pkey_tbl, 559 u16 max) 560 { 561 pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL); 562 if (!pkey_tbl->tbl) 563 return -ENOMEM; 564 565 pkey_tbl->max = max; 566 return 0; 567 }; 568 569 /* PDs */ 570 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd) 571 { 572 u32 bit_num; 573 574 bit_num = find_first_bit(pdt->tbl, pdt->max); 575 if (bit_num == pdt->max) 576 return -ENOMEM; 577 578 /* Found unused PD */ 579 clear_bit(bit_num, pdt->tbl); 580 pd->id = bit_num; 581 return 0; 582 } 583 584 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res, 585 struct bnxt_qplib_pd_tbl *pdt, 586 struct bnxt_qplib_pd *pd) 587 { 588 if (test_and_set_bit(pd->id, pdt->tbl)) { 589 dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n", 590 pd->id); 591 return -EINVAL; 592 } 593 pd->id = 0; 594 return 0; 595 } 596 597 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt) 598 { 599 kfree(pdt->tbl); 600 pdt->tbl = NULL; 601 pdt->max = 0; 602 } 603 604 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res, 605 struct bnxt_qplib_pd_tbl *pdt, 606 u32 max) 607 { 608 u32 bytes; 609 610 bytes = max >> 3; 611 if (!bytes) 612 bytes = 1; 613 pdt->tbl = kmalloc(bytes, GFP_KERNEL); 614 if (!pdt->tbl) 615 return -ENOMEM; 616 617 pdt->max = max; 618 memset((u8 *)pdt->tbl, 0xFF, bytes); 619 620 return 0; 621 } 622 623 /* DPIs */ 624 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit, 625 struct bnxt_qplib_dpi *dpi, 626 void *app) 627 { 628 u32 bit_num; 629 630 bit_num = find_first_bit(dpit->tbl, dpit->max); 631 if (bit_num == dpit->max) 632 return -ENOMEM; 633 634 /* Found unused DPI */ 635 clear_bit(bit_num, dpit->tbl); 636 dpit->app_tbl[bit_num] = app; 637 638 dpi->dpi = bit_num; 639 dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE); 640 dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE); 641 642 return 0; 643 } 644 645 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res, 646 struct bnxt_qplib_dpi_tbl *dpit, 647 struct bnxt_qplib_dpi *dpi) 648 { 649 if (dpi->dpi >= dpit->max) { 650 dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi); 651 return -EINVAL; 652 } 653 if (test_and_set_bit(dpi->dpi, dpit->tbl)) { 654 dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n", 655 dpi->dpi); 656 return -EINVAL; 657 } 658 if (dpit->app_tbl) 659 dpit->app_tbl[dpi->dpi] = NULL; 660 memset(dpi, 0, sizeof(*dpi)); 661 662 return 0; 663 } 664 665 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res, 666 struct bnxt_qplib_dpi_tbl *dpit) 667 { 668 kfree(dpit->tbl); 669 kfree(dpit->app_tbl); 670 if (dpit->dbr_bar_reg_iomem) 671 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem); 672 memset(dpit, 0, sizeof(*dpit)); 673 } 674 675 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res, 676 struct bnxt_qplib_dpi_tbl *dpit, 677 u32 dbr_offset) 678 { 679 u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION; 680 resource_size_t bar_reg_base; 681 u32 dbr_len, bytes; 682 683 if (dpit->dbr_bar_reg_iomem) { 684 dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n", 685 dbr_bar_reg); 686 return -EALREADY; 687 } 688 689 bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg); 690 if (!bar_reg_base) { 691 dev_err(&res->pdev->dev, "BAR region %d resc start failed\n", 692 dbr_bar_reg); 693 return -ENOMEM; 694 } 695 696 dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset; 697 if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) { 698 dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len); 699 return -ENOMEM; 700 } 701 702 dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset, 703 dbr_len); 704 if (!dpit->dbr_bar_reg_iomem) { 705 dev_err(&res->pdev->dev, 706 "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg); 707 return -ENOMEM; 708 } 709 710 dpit->unmapped_dbr = bar_reg_base + dbr_offset; 711 dpit->max = dbr_len / PAGE_SIZE; 712 713 dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL); 714 if (!dpit->app_tbl) 715 goto unmap_io; 716 717 bytes = dpit->max >> 3; 718 if (!bytes) 719 bytes = 1; 720 721 dpit->tbl = kmalloc(bytes, GFP_KERNEL); 722 if (!dpit->tbl) { 723 kfree(dpit->app_tbl); 724 dpit->app_tbl = NULL; 725 goto unmap_io; 726 } 727 728 memset((u8 *)dpit->tbl, 0xFF, bytes); 729 730 return 0; 731 732 unmap_io: 733 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem); 734 return -ENOMEM; 735 } 736 737 /* PKEYs */ 738 static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl) 739 { 740 memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); 741 pkey_tbl->active = 0; 742 } 743 744 static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res, 745 struct bnxt_qplib_pkey_tbl *pkey_tbl) 746 { 747 u16 pkey = 0xFFFF; 748 749 memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); 750 751 /* pkey default = 0xFFFF */ 752 bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false); 753 } 754 755 /* Stats */ 756 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, 757 struct bnxt_qplib_stats *stats) 758 { 759 if (stats->dma) { 760 dma_free_coherent(&pdev->dev, stats->size, 761 stats->dma, stats->dma_map); 762 } 763 memset(stats, 0, sizeof(*stats)); 764 stats->fw_id = -1; 765 } 766 767 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, 768 struct bnxt_qplib_stats *stats) 769 { 770 memset(stats, 0, sizeof(*stats)); 771 stats->fw_id = -1; 772 /* 128 byte aligned context memory is required only for 57500. 773 * However making this unconditional, it does not harm previous 774 * generation. 775 */ 776 stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128); 777 stats->dma = dma_alloc_coherent(&pdev->dev, stats->size, 778 &stats->dma_map, GFP_KERNEL); 779 if (!stats->dma) { 780 dev_err(&pdev->dev, "Stats DMA allocation failed\n"); 781 return -ENOMEM; 782 } 783 return 0; 784 } 785 786 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res) 787 { 788 bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl); 789 bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl); 790 } 791 792 int bnxt_qplib_init_res(struct bnxt_qplib_res *res) 793 { 794 bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev); 795 bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl); 796 797 return 0; 798 } 799 800 void bnxt_qplib_free_res(struct bnxt_qplib_res *res) 801 { 802 bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl); 803 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl); 804 bnxt_qplib_free_pd_tbl(&res->pd_tbl); 805 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl); 806 807 res->netdev = NULL; 808 res->pdev = NULL; 809 } 810 811 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev, 812 struct net_device *netdev, 813 struct bnxt_qplib_dev_attr *dev_attr) 814 { 815 int rc = 0; 816 817 res->pdev = pdev; 818 res->netdev = netdev; 819 820 rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid); 821 if (rc) 822 goto fail; 823 824 rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey); 825 if (rc) 826 goto fail; 827 828 rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd); 829 if (rc) 830 goto fail; 831 832 rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size); 833 if (rc) 834 goto fail; 835 836 return 0; 837 fail: 838 bnxt_qplib_free_res(res); 839 return rc; 840 } 841