1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: Main component of the bnxt_re driver 37 */ 38 39 #include <linux/module.h> 40 #include <linux/netdevice.h> 41 #include <linux/ethtool.h> 42 #include <linux/mutex.h> 43 #include <linux/list.h> 44 #include <linux/rculist.h> 45 #include <linux/spinlock.h> 46 #include <linux/pci.h> 47 #include <net/dcbnl.h> 48 #include <net/ipv6.h> 49 #include <net/addrconf.h> 50 #include <linux/if_ether.h> 51 52 #include <rdma/ib_verbs.h> 53 #include <rdma/ib_user_verbs.h> 54 #include <rdma/ib_umem.h> 55 #include <rdma/ib_addr.h> 56 57 #include "bnxt_ulp.h" 58 #include "roce_hsi.h" 59 #include "qplib_res.h" 60 #include "qplib_sp.h" 61 #include "qplib_fp.h" 62 #include "qplib_rcfw.h" 63 #include "bnxt_re.h" 64 #include "ib_verbs.h" 65 #include <rdma/bnxt_re-abi.h> 66 #include "bnxt.h" 67 #include "hw_counters.h" 68 69 static char version[] = 70 BNXT_RE_DESC "\n"; 71 72 MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>"); 73 MODULE_DESCRIPTION(BNXT_RE_DESC " Driver"); 74 MODULE_LICENSE("Dual BSD/GPL"); 75 76 /* globals */ 77 static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list); 78 /* Mutex to protect the list of bnxt_re devices added */ 79 static DEFINE_MUTEX(bnxt_re_dev_lock); 80 static struct workqueue_struct *bnxt_re_wq; 81 static void bnxt_re_remove_device(struct bnxt_re_dev *rdev); 82 static void bnxt_re_dealloc_driver(struct ib_device *ib_dev); 83 static void bnxt_re_stop_irq(void *handle); 84 85 static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode) 86 { 87 struct bnxt_qplib_chip_ctx *cctx; 88 89 cctx = rdev->chip_ctx; 90 cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? 91 mode : BNXT_QPLIB_WQE_MODE_STATIC; 92 } 93 94 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) 95 { 96 struct bnxt_qplib_chip_ctx *chip_ctx; 97 98 if (!rdev->chip_ctx) 99 return; 100 chip_ctx = rdev->chip_ctx; 101 rdev->chip_ctx = NULL; 102 rdev->rcfw.res = NULL; 103 rdev->qplib_res.cctx = NULL; 104 rdev->qplib_res.pdev = NULL; 105 rdev->qplib_res.netdev = NULL; 106 kfree(chip_ctx); 107 } 108 109 static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode) 110 { 111 struct bnxt_qplib_chip_ctx *chip_ctx; 112 struct bnxt_en_dev *en_dev; 113 struct bnxt *bp; 114 115 en_dev = rdev->en_dev; 116 bp = netdev_priv(en_dev->net); 117 118 chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL); 119 if (!chip_ctx) 120 return -ENOMEM; 121 chip_ctx->chip_num = bp->chip_num; 122 123 rdev->chip_ctx = chip_ctx; 124 /* rest members to follow eventually */ 125 126 rdev->qplib_res.cctx = rdev->chip_ctx; 127 rdev->rcfw.res = &rdev->qplib_res; 128 129 bnxt_re_set_drv_mode(rdev, wqe_mode); 130 return 0; 131 } 132 133 /* SR-IOV helper functions */ 134 135 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev) 136 { 137 struct bnxt *bp; 138 139 bp = netdev_priv(rdev->en_dev->net); 140 if (BNXT_VF(bp)) 141 rdev->is_virtfn = 1; 142 } 143 144 /* Set the maximum number of each resource that the driver actually wants 145 * to allocate. This may be up to the maximum number the firmware has 146 * reserved for the function. The driver may choose to allocate fewer 147 * resources than the firmware maximum. 148 */ 149 static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev) 150 { 151 struct bnxt_qplib_dev_attr *attr; 152 struct bnxt_qplib_ctx *ctx; 153 int i; 154 155 attr = &rdev->dev_attr; 156 ctx = &rdev->qplib_ctx; 157 158 ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT, 159 attr->max_qp); 160 ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K; 161 /* Use max_mr from fw since max_mrw does not get set */ 162 ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr); 163 ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT, 164 attr->max_srq); 165 ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq); 166 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) 167 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) 168 rdev->qplib_ctx.tqm_ctx.qcount[i] = 169 rdev->dev_attr.tqm_alloc_reqs[i]; 170 } 171 172 static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf) 173 { 174 struct bnxt_qplib_vf_res *vf_res; 175 u32 mrws = 0; 176 u32 vf_pct; 177 u32 nvfs; 178 179 vf_res = &qplib_ctx->vf_res; 180 /* 181 * Reserve a set of resources for the PF. Divide the remaining 182 * resources among the VFs 183 */ 184 vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF; 185 nvfs = num_vf; 186 num_vf = 100 * num_vf; 187 vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf; 188 vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf; 189 vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf; 190 /* 191 * The driver allows many more MRs than other resources. If the 192 * firmware does also, then reserve a fixed amount for the PF and 193 * divide the rest among VFs. VFs may use many MRs for NFS 194 * mounts, ISER, NVME applications, etc. If the firmware severely 195 * restricts the number of MRs, then let PF have half and divide 196 * the rest among VFs, as for the other resource types. 197 */ 198 if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) { 199 mrws = qplib_ctx->mrw_count * vf_pct; 200 nvfs = num_vf; 201 } else { 202 mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF; 203 } 204 vf_res->max_mrw_per_vf = (mrws / nvfs); 205 vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF; 206 } 207 208 static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev) 209 { 210 u32 num_vfs; 211 212 memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res)); 213 bnxt_re_limit_pf_res(rdev); 214 215 num_vfs = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? 216 BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs; 217 if (num_vfs) 218 bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs); 219 } 220 221 /* for handling bnxt_en callbacks later */ 222 static void bnxt_re_stop(void *p) 223 { 224 } 225 226 static void bnxt_re_start(void *p) 227 { 228 } 229 230 static void bnxt_re_sriov_config(void *p, int num_vfs) 231 { 232 struct bnxt_re_dev *rdev = p; 233 234 if (!rdev) 235 return; 236 237 rdev->num_vfs = num_vfs; 238 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { 239 bnxt_re_set_resource_limits(rdev); 240 bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw, 241 &rdev->qplib_ctx); 242 } 243 } 244 245 static void bnxt_re_shutdown(void *p) 246 { 247 struct bnxt_re_dev *rdev = p; 248 249 if (!rdev) 250 return; 251 ASSERT_RTNL(); 252 /* Release the MSIx vectors before queuing unregister */ 253 bnxt_re_stop_irq(rdev); 254 ib_unregister_device_queued(&rdev->ibdev); 255 } 256 257 static void bnxt_re_stop_irq(void *handle) 258 { 259 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; 260 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; 261 struct bnxt_qplib_nq *nq; 262 int indx; 263 264 for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) { 265 nq = &rdev->nq[indx - 1]; 266 bnxt_qplib_nq_stop_irq(nq, false); 267 } 268 269 bnxt_qplib_rcfw_stop_irq(rcfw, false); 270 } 271 272 static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) 273 { 274 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; 275 struct bnxt_msix_entry *msix_ent = rdev->msix_entries; 276 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; 277 struct bnxt_qplib_nq *nq; 278 int indx, rc; 279 280 if (!ent) { 281 /* Not setting the f/w timeout bit in rcfw. 282 * During the driver unload the first command 283 * to f/w will timeout and that will set the 284 * timeout bit. 285 */ 286 ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n"); 287 return; 288 } 289 290 /* Vectors may change after restart, so update with new vectors 291 * in device sctructure. 292 */ 293 for (indx = 0; indx < rdev->num_msix; indx++) 294 rdev->msix_entries[indx].vector = ent[indx].vector; 295 296 bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector, 297 false); 298 for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) { 299 nq = &rdev->nq[indx - 1]; 300 rc = bnxt_qplib_nq_start_irq(nq, indx - 1, 301 msix_ent[indx].vector, false); 302 if (rc) 303 ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n", 304 indx - 1); 305 } 306 } 307 308 static struct bnxt_ulp_ops bnxt_re_ulp_ops = { 309 .ulp_async_notifier = NULL, 310 .ulp_stop = bnxt_re_stop, 311 .ulp_start = bnxt_re_start, 312 .ulp_sriov_config = bnxt_re_sriov_config, 313 .ulp_shutdown = bnxt_re_shutdown, 314 .ulp_irq_stop = bnxt_re_stop_irq, 315 .ulp_irq_restart = bnxt_re_start_irq 316 }; 317 318 /* RoCE -> Net driver */ 319 320 /* Driver registration routines used to let the networking driver (bnxt_en) 321 * to know that the RoCE driver is now installed 322 */ 323 static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev) 324 { 325 struct bnxt_en_dev *en_dev; 326 int rc; 327 328 if (!rdev) 329 return -EINVAL; 330 331 en_dev = rdev->en_dev; 332 333 rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev, 334 BNXT_ROCE_ULP); 335 return rc; 336 } 337 338 static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev) 339 { 340 struct bnxt_en_dev *en_dev; 341 int rc = 0; 342 343 if (!rdev) 344 return -EINVAL; 345 346 en_dev = rdev->en_dev; 347 348 rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP, 349 &bnxt_re_ulp_ops, rdev); 350 rdev->qplib_res.pdev = rdev->en_dev->pdev; 351 return rc; 352 } 353 354 static int bnxt_re_free_msix(struct bnxt_re_dev *rdev) 355 { 356 struct bnxt_en_dev *en_dev; 357 int rc; 358 359 if (!rdev) 360 return -EINVAL; 361 362 en_dev = rdev->en_dev; 363 364 365 rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP); 366 367 return rc; 368 } 369 370 static int bnxt_re_request_msix(struct bnxt_re_dev *rdev) 371 { 372 int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got; 373 struct bnxt_en_dev *en_dev; 374 375 if (!rdev) 376 return -EINVAL; 377 378 en_dev = rdev->en_dev; 379 380 num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus()); 381 382 num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP, 383 rdev->msix_entries, 384 num_msix_want); 385 if (num_msix_got < BNXT_RE_MIN_MSIX) { 386 rc = -EINVAL; 387 goto done; 388 } 389 if (num_msix_got != num_msix_want) { 390 ibdev_warn(&rdev->ibdev, 391 "Requested %d MSI-X vectors, got %d\n", 392 num_msix_want, num_msix_got); 393 } 394 rdev->num_msix = num_msix_got; 395 done: 396 return rc; 397 } 398 399 static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr, 400 u16 opcd, u16 crid, u16 trid) 401 { 402 hdr->req_type = cpu_to_le16(opcd); 403 hdr->cmpl_ring = cpu_to_le16(crid); 404 hdr->target_id = cpu_to_le16(trid); 405 } 406 407 static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg, 408 int msg_len, void *resp, int resp_max_len, 409 int timeout) 410 { 411 fw_msg->msg = msg; 412 fw_msg->msg_len = msg_len; 413 fw_msg->resp = resp; 414 fw_msg->resp_max_len = resp_max_len; 415 fw_msg->timeout = timeout; 416 } 417 418 static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, 419 u16 fw_ring_id, int type) 420 { 421 struct bnxt_en_dev *en_dev = rdev->en_dev; 422 struct hwrm_ring_free_input req = {0}; 423 struct hwrm_ring_free_output resp; 424 struct bnxt_fw_msg fw_msg; 425 int rc = -EINVAL; 426 427 if (!en_dev) 428 return rc; 429 430 memset(&fw_msg, 0, sizeof(fw_msg)); 431 432 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1); 433 req.ring_type = type; 434 req.ring_id = cpu_to_le16(fw_ring_id); 435 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 436 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); 437 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); 438 if (rc) 439 ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x", 440 req.ring_id, rc); 441 return rc; 442 } 443 444 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, 445 struct bnxt_re_ring_attr *ring_attr, 446 u16 *fw_ring_id) 447 { 448 struct bnxt_en_dev *en_dev = rdev->en_dev; 449 struct hwrm_ring_alloc_input req = {0}; 450 struct hwrm_ring_alloc_output resp; 451 struct bnxt_fw_msg fw_msg; 452 int rc = -EINVAL; 453 454 if (!en_dev) 455 return rc; 456 457 memset(&fw_msg, 0, sizeof(fw_msg)); 458 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1); 459 req.enables = 0; 460 req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]); 461 if (ring_attr->pages > 1) { 462 /* Page size is in log2 units */ 463 req.page_size = BNXT_PAGE_SHIFT; 464 req.page_tbl_depth = 1; 465 } 466 req.fbo = 0; 467 /* Association of ring index with doorbell index and MSIX number */ 468 req.logical_id = cpu_to_le16(ring_attr->lrid); 469 req.length = cpu_to_le32(ring_attr->depth + 1); 470 req.ring_type = ring_attr->type; 471 req.int_mode = ring_attr->mode; 472 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 473 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); 474 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); 475 if (!rc) 476 *fw_ring_id = le16_to_cpu(resp.ring_id); 477 478 return rc; 479 } 480 481 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, 482 u32 fw_stats_ctx_id) 483 { 484 struct bnxt_en_dev *en_dev = rdev->en_dev; 485 struct hwrm_stat_ctx_free_input req = {0}; 486 struct bnxt_fw_msg fw_msg; 487 int rc = -EINVAL; 488 489 if (!en_dev) 490 return rc; 491 492 memset(&fw_msg, 0, sizeof(fw_msg)); 493 494 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1); 495 req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id); 496 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&req, 497 sizeof(req), DFLT_HWRM_CMD_TIMEOUT); 498 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); 499 if (rc) 500 ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x", 501 rc); 502 503 return rc; 504 } 505 506 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, 507 dma_addr_t dma_map, 508 u32 *fw_stats_ctx_id) 509 { 510 struct hwrm_stat_ctx_alloc_output resp = {0}; 511 struct hwrm_stat_ctx_alloc_input req = {0}; 512 struct bnxt_en_dev *en_dev = rdev->en_dev; 513 struct bnxt_fw_msg fw_msg; 514 int rc = -EINVAL; 515 516 *fw_stats_ctx_id = INVALID_STATS_CTX_ID; 517 518 if (!en_dev) 519 return rc; 520 521 memset(&fw_msg, 0, sizeof(fw_msg)); 522 523 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); 524 req.update_period_ms = cpu_to_le32(1000); 525 req.stats_dma_addr = cpu_to_le64(dma_map); 526 req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext)); 527 req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE; 528 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 529 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); 530 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); 531 if (!rc) 532 *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id); 533 534 return rc; 535 } 536 537 /* Device */ 538 539 static bool is_bnxt_re_dev(struct net_device *netdev) 540 { 541 struct ethtool_drvinfo drvinfo; 542 543 if (netdev->ethtool_ops && netdev->ethtool_ops->get_drvinfo) { 544 memset(&drvinfo, 0, sizeof(drvinfo)); 545 netdev->ethtool_ops->get_drvinfo(netdev, &drvinfo); 546 547 if (strcmp(drvinfo.driver, "bnxt_en")) 548 return false; 549 return true; 550 } 551 return false; 552 } 553 554 static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev) 555 { 556 struct ib_device *ibdev = 557 ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE); 558 if (!ibdev) 559 return NULL; 560 561 return container_of(ibdev, struct bnxt_re_dev, ibdev); 562 } 563 564 static void bnxt_re_dev_unprobe(struct net_device *netdev, 565 struct bnxt_en_dev *en_dev) 566 { 567 dev_put(netdev); 568 module_put(en_dev->pdev->driver->driver.owner); 569 } 570 571 static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev) 572 { 573 struct bnxt *bp = netdev_priv(netdev); 574 struct bnxt_en_dev *en_dev; 575 struct pci_dev *pdev; 576 577 /* Call bnxt_en's RoCE probe via indirect API */ 578 if (!bp->ulp_probe) 579 return ERR_PTR(-EINVAL); 580 581 en_dev = bp->ulp_probe(netdev); 582 if (IS_ERR(en_dev)) 583 return en_dev; 584 585 pdev = en_dev->pdev; 586 if (!pdev) 587 return ERR_PTR(-EINVAL); 588 589 if (!(en_dev->flags & BNXT_EN_FLAG_ROCE_CAP)) { 590 dev_info(&pdev->dev, 591 "%s: probe error: RoCE is not supported on this device", 592 ROCE_DRV_MODULE_NAME); 593 return ERR_PTR(-ENODEV); 594 } 595 596 /* Bump net device reference count */ 597 if (!try_module_get(pdev->driver->driver.owner)) 598 return ERR_PTR(-ENODEV); 599 600 dev_hold(netdev); 601 602 return en_dev; 603 } 604 605 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, 606 char *buf) 607 { 608 struct bnxt_re_dev *rdev = 609 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev); 610 611 return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor); 612 } 613 static DEVICE_ATTR_RO(hw_rev); 614 615 static ssize_t hca_type_show(struct device *device, 616 struct device_attribute *attr, char *buf) 617 { 618 struct bnxt_re_dev *rdev = 619 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev); 620 621 return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc); 622 } 623 static DEVICE_ATTR_RO(hca_type); 624 625 static struct attribute *bnxt_re_attributes[] = { 626 &dev_attr_hw_rev.attr, 627 &dev_attr_hca_type.attr, 628 NULL 629 }; 630 631 static const struct attribute_group bnxt_re_dev_attr_group = { 632 .attrs = bnxt_re_attributes, 633 }; 634 635 static const struct ib_device_ops bnxt_re_dev_ops = { 636 .owner = THIS_MODULE, 637 .driver_id = RDMA_DRIVER_BNXT_RE, 638 .uverbs_abi_ver = BNXT_RE_ABI_VERSION, 639 640 .add_gid = bnxt_re_add_gid, 641 .alloc_hw_stats = bnxt_re_ib_alloc_hw_stats, 642 .alloc_mr = bnxt_re_alloc_mr, 643 .alloc_pd = bnxt_re_alloc_pd, 644 .alloc_ucontext = bnxt_re_alloc_ucontext, 645 .create_ah = bnxt_re_create_ah, 646 .create_cq = bnxt_re_create_cq, 647 .create_qp = bnxt_re_create_qp, 648 .create_srq = bnxt_re_create_srq, 649 .dealloc_driver = bnxt_re_dealloc_driver, 650 .dealloc_pd = bnxt_re_dealloc_pd, 651 .dealloc_ucontext = bnxt_re_dealloc_ucontext, 652 .del_gid = bnxt_re_del_gid, 653 .dereg_mr = bnxt_re_dereg_mr, 654 .destroy_ah = bnxt_re_destroy_ah, 655 .destroy_cq = bnxt_re_destroy_cq, 656 .destroy_qp = bnxt_re_destroy_qp, 657 .destroy_srq = bnxt_re_destroy_srq, 658 .get_dev_fw_str = bnxt_re_query_fw_str, 659 .get_dma_mr = bnxt_re_get_dma_mr, 660 .get_hw_stats = bnxt_re_ib_get_hw_stats, 661 .get_link_layer = bnxt_re_get_link_layer, 662 .get_port_immutable = bnxt_re_get_port_immutable, 663 .map_mr_sg = bnxt_re_map_mr_sg, 664 .mmap = bnxt_re_mmap, 665 .modify_ah = bnxt_re_modify_ah, 666 .modify_qp = bnxt_re_modify_qp, 667 .modify_srq = bnxt_re_modify_srq, 668 .poll_cq = bnxt_re_poll_cq, 669 .post_recv = bnxt_re_post_recv, 670 .post_send = bnxt_re_post_send, 671 .post_srq_recv = bnxt_re_post_srq_recv, 672 .query_ah = bnxt_re_query_ah, 673 .query_device = bnxt_re_query_device, 674 .query_pkey = bnxt_re_query_pkey, 675 .query_port = bnxt_re_query_port, 676 .query_qp = bnxt_re_query_qp, 677 .query_srq = bnxt_re_query_srq, 678 .reg_user_mr = bnxt_re_reg_user_mr, 679 .req_notify_cq = bnxt_re_req_notify_cq, 680 INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah), 681 INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq), 682 INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd), 683 INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq), 684 INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx), 685 }; 686 687 static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) 688 { 689 struct ib_device *ibdev = &rdev->ibdev; 690 int ret; 691 692 /* ib device init */ 693 ibdev->node_type = RDMA_NODE_IB_CA; 694 strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA", 695 strlen(BNXT_RE_DESC) + 5); 696 ibdev->phys_port_cnt = 1; 697 698 bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid); 699 700 ibdev->num_comp_vectors = rdev->num_msix - 1; 701 ibdev->dev.parent = &rdev->en_dev->pdev->dev; 702 ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY; 703 704 /* User space */ 705 ibdev->uverbs_cmd_mask = 706 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 707 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 708 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 709 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 710 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 711 (1ull << IB_USER_VERBS_CMD_REG_MR) | 712 (1ull << IB_USER_VERBS_CMD_REREG_MR) | 713 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 714 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 715 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 716 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 717 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 718 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 719 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 720 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 721 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 722 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 723 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 724 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 725 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 726 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 727 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) | 728 (1ull << IB_USER_VERBS_CMD_QUERY_AH) | 729 (1ull << IB_USER_VERBS_CMD_DESTROY_AH); 730 /* POLL_CQ and REQ_NOTIFY_CQ is directly handled in libbnxt_re */ 731 732 733 rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group); 734 ib_set_device_ops(ibdev, &bnxt_re_dev_ops); 735 ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1); 736 if (ret) 737 return ret; 738 739 dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX); 740 return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev); 741 } 742 743 static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev) 744 { 745 dev_put(rdev->netdev); 746 rdev->netdev = NULL; 747 mutex_lock(&bnxt_re_dev_lock); 748 list_del_rcu(&rdev->list); 749 mutex_unlock(&bnxt_re_dev_lock); 750 751 synchronize_rcu(); 752 } 753 754 static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev, 755 struct bnxt_en_dev *en_dev) 756 { 757 struct bnxt_re_dev *rdev; 758 759 /* Allocate bnxt_re_dev instance here */ 760 rdev = ib_alloc_device(bnxt_re_dev, ibdev); 761 if (!rdev) { 762 ibdev_err(NULL, "%s: bnxt_re_dev allocation failure!", 763 ROCE_DRV_MODULE_NAME); 764 return NULL; 765 } 766 /* Default values */ 767 rdev->netdev = netdev; 768 dev_hold(rdev->netdev); 769 rdev->en_dev = en_dev; 770 rdev->id = rdev->en_dev->pdev->devfn; 771 INIT_LIST_HEAD(&rdev->qp_list); 772 mutex_init(&rdev->qp_lock); 773 atomic_set(&rdev->qp_count, 0); 774 atomic_set(&rdev->cq_count, 0); 775 atomic_set(&rdev->srq_count, 0); 776 atomic_set(&rdev->mr_count, 0); 777 atomic_set(&rdev->mw_count, 0); 778 rdev->cosq[0] = 0xFFFF; 779 rdev->cosq[1] = 0xFFFF; 780 781 mutex_lock(&bnxt_re_dev_lock); 782 list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list); 783 mutex_unlock(&bnxt_re_dev_lock); 784 return rdev; 785 } 786 787 static int bnxt_re_handle_unaffi_async_event(struct creq_func_event 788 *unaffi_async) 789 { 790 switch (unaffi_async->event) { 791 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR: 792 break; 793 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR: 794 break; 795 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR: 796 break; 797 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR: 798 break; 799 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR: 800 break; 801 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR: 802 break; 803 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR: 804 break; 805 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR: 806 break; 807 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR: 808 break; 809 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR: 810 break; 811 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR: 812 break; 813 default: 814 return -EINVAL; 815 } 816 return 0; 817 } 818 819 static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, 820 struct bnxt_re_qp *qp) 821 { 822 struct ib_event event; 823 unsigned int flags; 824 825 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR && 826 rdma_is_kernel_res(&qp->ib_qp.res)) { 827 flags = bnxt_re_lock_cqs(qp); 828 bnxt_qplib_add_flush_qp(&qp->qplib_qp); 829 bnxt_re_unlock_cqs(qp, flags); 830 } 831 832 memset(&event, 0, sizeof(event)); 833 if (qp->qplib_qp.srq) { 834 event.device = &qp->rdev->ibdev; 835 event.element.qp = &qp->ib_qp; 836 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 837 } 838 839 if (event.device && qp->ib_qp.event_handler) 840 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); 841 842 return 0; 843 } 844 845 static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async, 846 void *obj) 847 { 848 int rc = 0; 849 u8 event; 850 851 if (!obj) 852 return rc; /* QP was already dead, still return success */ 853 854 event = affi_async->event; 855 if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) { 856 struct bnxt_qplib_qp *lib_qp = obj; 857 struct bnxt_re_qp *qp = container_of(lib_qp, struct bnxt_re_qp, 858 qplib_qp); 859 rc = bnxt_re_handle_qp_async_event(affi_async, qp); 860 } 861 return rc; 862 } 863 864 static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw, 865 void *aeqe, void *obj) 866 { 867 struct creq_qp_event *affi_async; 868 struct creq_func_event *unaffi_async; 869 u8 type; 870 int rc; 871 872 type = ((struct creq_base *)aeqe)->type; 873 if (type == CREQ_BASE_TYPE_FUNC_EVENT) { 874 unaffi_async = aeqe; 875 rc = bnxt_re_handle_unaffi_async_event(unaffi_async); 876 } else { 877 affi_async = aeqe; 878 rc = bnxt_re_handle_affi_async_event(affi_async, obj); 879 } 880 881 return rc; 882 } 883 884 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, 885 struct bnxt_qplib_srq *handle, u8 event) 886 { 887 struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq, 888 qplib_srq); 889 struct ib_event ib_event; 890 int rc = 0; 891 892 if (!srq) { 893 ibdev_err(NULL, "%s: SRQ is NULL, SRQN not handled", 894 ROCE_DRV_MODULE_NAME); 895 rc = -EINVAL; 896 goto done; 897 } 898 ib_event.device = &srq->rdev->ibdev; 899 ib_event.element.srq = &srq->ib_srq; 900 if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT) 901 ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED; 902 else 903 ib_event.event = IB_EVENT_SRQ_ERR; 904 905 if (srq->ib_srq.event_handler) { 906 /* Lock event_handler? */ 907 (*srq->ib_srq.event_handler)(&ib_event, 908 srq->ib_srq.srq_context); 909 } 910 done: 911 return rc; 912 } 913 914 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, 915 struct bnxt_qplib_cq *handle) 916 { 917 struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq, 918 qplib_cq); 919 920 if (!cq) { 921 ibdev_err(NULL, "%s: CQ is NULL, CQN not handled", 922 ROCE_DRV_MODULE_NAME); 923 return -EINVAL; 924 } 925 if (cq->ib_cq.comp_handler) { 926 /* Lock comp_handler? */ 927 (*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context); 928 } 929 930 return 0; 931 } 932 933 #define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000 934 #define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000 935 static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx) 936 { 937 return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? 938 (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB : 939 BNXT_RE_GEN_P5_PF_NQ_DB) : 940 rdev->msix_entries[indx].db_offset; 941 } 942 943 static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev) 944 { 945 int i; 946 947 for (i = 1; i < rdev->num_msix; i++) 948 bnxt_qplib_disable_nq(&rdev->nq[i - 1]); 949 950 if (rdev->qplib_res.rcfw) 951 bnxt_qplib_cleanup_res(&rdev->qplib_res); 952 } 953 954 static int bnxt_re_init_res(struct bnxt_re_dev *rdev) 955 { 956 int num_vec_enabled = 0; 957 int rc = 0, i; 958 u32 db_offt; 959 960 bnxt_qplib_init_res(&rdev->qplib_res); 961 962 for (i = 1; i < rdev->num_msix ; i++) { 963 db_offt = bnxt_re_get_nqdb_offset(rdev, i); 964 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1], 965 i - 1, rdev->msix_entries[i].vector, 966 db_offt, &bnxt_re_cqn_handler, 967 &bnxt_re_srqn_handler); 968 if (rc) { 969 ibdev_err(&rdev->ibdev, 970 "Failed to enable NQ with rc = 0x%x", rc); 971 goto fail; 972 } 973 num_vec_enabled++; 974 } 975 return 0; 976 fail: 977 for (i = num_vec_enabled; i >= 0; i--) 978 bnxt_qplib_disable_nq(&rdev->nq[i]); 979 return rc; 980 } 981 982 static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev) 983 { 984 u8 type; 985 int i; 986 987 for (i = 0; i < rdev->num_msix - 1; i++) { 988 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 989 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); 990 bnxt_qplib_free_nq(&rdev->nq[i]); 991 rdev->nq[i].res = NULL; 992 } 993 } 994 995 static void bnxt_re_free_res(struct bnxt_re_dev *rdev) 996 { 997 bnxt_re_free_nq_res(rdev); 998 999 if (rdev->qplib_res.dpi_tbl.max) { 1000 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, 1001 &rdev->qplib_res.dpi_tbl, 1002 &rdev->dpi_privileged); 1003 } 1004 if (rdev->qplib_res.rcfw) { 1005 bnxt_qplib_free_res(&rdev->qplib_res); 1006 rdev->qplib_res.rcfw = NULL; 1007 } 1008 } 1009 1010 static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) 1011 { 1012 struct bnxt_re_ring_attr rattr = {}; 1013 int num_vec_created = 0; 1014 int rc = 0, i; 1015 u8 type; 1016 1017 /* Configure and allocate resources for qplib */ 1018 rdev->qplib_res.rcfw = &rdev->rcfw; 1019 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr, 1020 rdev->is_virtfn); 1021 if (rc) 1022 goto fail; 1023 1024 rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev, 1025 rdev->netdev, &rdev->dev_attr); 1026 if (rc) 1027 goto fail; 1028 1029 rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl, 1030 &rdev->dpi_privileged, 1031 rdev); 1032 if (rc) 1033 goto dealloc_res; 1034 1035 for (i = 0; i < rdev->num_msix - 1; i++) { 1036 struct bnxt_qplib_nq *nq; 1037 1038 nq = &rdev->nq[i]; 1039 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; 1040 rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]); 1041 if (rc) { 1042 ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x", 1043 i, rc); 1044 goto free_nq; 1045 } 1046 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 1047 rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr; 1048 rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count; 1049 rattr.type = type; 1050 rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; 1051 rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1; 1052 rattr.lrid = rdev->msix_entries[i + 1].ring_idx; 1053 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id); 1054 if (rc) { 1055 ibdev_err(&rdev->ibdev, 1056 "Failed to allocate NQ fw id with rc = 0x%x", 1057 rc); 1058 bnxt_qplib_free_nq(&rdev->nq[i]); 1059 goto free_nq; 1060 } 1061 num_vec_created++; 1062 } 1063 return 0; 1064 free_nq: 1065 for (i = num_vec_created - 1; i >= 0; i--) { 1066 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 1067 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); 1068 bnxt_qplib_free_nq(&rdev->nq[i]); 1069 } 1070 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, 1071 &rdev->qplib_res.dpi_tbl, 1072 &rdev->dpi_privileged); 1073 dealloc_res: 1074 bnxt_qplib_free_res(&rdev->qplib_res); 1075 1076 fail: 1077 rdev->qplib_res.rcfw = NULL; 1078 return rc; 1079 } 1080 1081 static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp, 1082 u8 port_num, enum ib_event_type event) 1083 { 1084 struct ib_event ib_event; 1085 1086 ib_event.device = ibdev; 1087 if (qp) { 1088 ib_event.element.qp = qp; 1089 ib_event.event = event; 1090 if (qp->event_handler) 1091 qp->event_handler(&ib_event, qp->qp_context); 1092 1093 } else { 1094 ib_event.element.port_num = port_num; 1095 ib_event.event = event; 1096 ib_dispatch_event(&ib_event); 1097 } 1098 } 1099 1100 #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02 1101 static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir, 1102 u64 *cid_map) 1103 { 1104 struct hwrm_queue_pri2cos_qcfg_input req = {0}; 1105 struct bnxt *bp = netdev_priv(rdev->netdev); 1106 struct hwrm_queue_pri2cos_qcfg_output resp; 1107 struct bnxt_en_dev *en_dev = rdev->en_dev; 1108 struct bnxt_fw_msg fw_msg; 1109 u32 flags = 0; 1110 u8 *qcfgmap, *tmp_map; 1111 int rc = 0, i; 1112 1113 if (!cid_map) 1114 return -EINVAL; 1115 1116 memset(&fw_msg, 0, sizeof(fw_msg)); 1117 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, 1118 HWRM_QUEUE_PRI2COS_QCFG, -1, -1); 1119 flags |= (dir & 0x01); 1120 flags |= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN; 1121 req.flags = cpu_to_le32(flags); 1122 req.port_id = bp->pf.port_id; 1123 1124 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 1125 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); 1126 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); 1127 if (rc) 1128 return rc; 1129 1130 if (resp.queue_cfg_info) { 1131 ibdev_warn(&rdev->ibdev, 1132 "Asymmetric cos queue configuration detected"); 1133 ibdev_warn(&rdev->ibdev, 1134 " on device, QoS may not be fully functional\n"); 1135 } 1136 qcfgmap = &resp.pri0_cos_queue_id; 1137 tmp_map = (u8 *)cid_map; 1138 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1139 tmp_map[i] = qcfgmap[i]; 1140 1141 return rc; 1142 } 1143 1144 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev, 1145 struct bnxt_re_qp *qp) 1146 { 1147 return (qp->ib_qp.qp_type == IB_QPT_GSI) || 1148 (qp == rdev->gsi_ctx.gsi_sqp); 1149 } 1150 1151 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev) 1152 { 1153 int mask = IB_QP_STATE; 1154 struct ib_qp_attr qp_attr; 1155 struct bnxt_re_qp *qp; 1156 1157 qp_attr.qp_state = IB_QPS_ERR; 1158 mutex_lock(&rdev->qp_lock); 1159 list_for_each_entry(qp, &rdev->qp_list, list) { 1160 /* Modify the state of all QPs except QP1/Shadow QP */ 1161 if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) { 1162 if (qp->qplib_qp.state != 1163 CMDQ_MODIFY_QP_NEW_STATE_RESET && 1164 qp->qplib_qp.state != 1165 CMDQ_MODIFY_QP_NEW_STATE_ERR) { 1166 bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp, 1167 1, IB_EVENT_QP_FATAL); 1168 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask, 1169 NULL); 1170 } 1171 } 1172 } 1173 mutex_unlock(&rdev->qp_lock); 1174 } 1175 1176 static int bnxt_re_update_gid(struct bnxt_re_dev *rdev) 1177 { 1178 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; 1179 struct bnxt_qplib_gid gid; 1180 u16 gid_idx, index; 1181 int rc = 0; 1182 1183 if (!ib_device_try_get(&rdev->ibdev)) 1184 return 0; 1185 1186 if (!sgid_tbl) { 1187 ibdev_err(&rdev->ibdev, "QPLIB: SGID table not allocated"); 1188 rc = -EINVAL; 1189 goto out; 1190 } 1191 1192 for (index = 0; index < sgid_tbl->active; index++) { 1193 gid_idx = sgid_tbl->hw_id[index]; 1194 1195 if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, 1196 sizeof(bnxt_qplib_gid_zero))) 1197 continue; 1198 /* need to modify the VLAN enable setting of non VLAN GID only 1199 * as setting is done for VLAN GID while adding GID 1200 */ 1201 if (sgid_tbl->vlan[index]) 1202 continue; 1203 1204 memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid)); 1205 1206 rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx, 1207 rdev->qplib_res.netdev->dev_addr); 1208 } 1209 out: 1210 ib_device_put(&rdev->ibdev); 1211 return rc; 1212 } 1213 1214 static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev) 1215 { 1216 u32 prio_map = 0, tmp_map = 0; 1217 struct net_device *netdev; 1218 struct dcb_app app; 1219 1220 netdev = rdev->netdev; 1221 1222 memset(&app, 0, sizeof(app)); 1223 app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE; 1224 app.protocol = ETH_P_IBOE; 1225 tmp_map = dcb_ieee_getapp_mask(netdev, &app); 1226 prio_map = tmp_map; 1227 1228 app.selector = IEEE_8021QAZ_APP_SEL_DGRAM; 1229 app.protocol = ROCE_V2_UDP_DPORT; 1230 tmp_map = dcb_ieee_getapp_mask(netdev, &app); 1231 prio_map |= tmp_map; 1232 1233 return prio_map; 1234 } 1235 1236 static void bnxt_re_parse_cid_map(u8 prio_map, u8 *cid_map, u16 *cosq) 1237 { 1238 u16 prio; 1239 u8 id; 1240 1241 for (prio = 0, id = 0; prio < 8; prio++) { 1242 if (prio_map & (1 << prio)) { 1243 cosq[id] = cid_map[prio]; 1244 id++; 1245 if (id == 2) /* Max 2 tcs supported */ 1246 break; 1247 } 1248 } 1249 } 1250 1251 static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev) 1252 { 1253 u8 prio_map = 0; 1254 u64 cid_map; 1255 int rc; 1256 1257 /* Get priority for roce */ 1258 prio_map = bnxt_re_get_priority_mask(rdev); 1259 1260 if (prio_map == rdev->cur_prio_map) 1261 return 0; 1262 rdev->cur_prio_map = prio_map; 1263 /* Get cosq id for this priority */ 1264 rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map); 1265 if (rc) { 1266 ibdev_warn(&rdev->ibdev, "no cos for p_mask %x\n", prio_map); 1267 return rc; 1268 } 1269 /* Parse CoS IDs for app priority */ 1270 bnxt_re_parse_cid_map(prio_map, (u8 *)&cid_map, rdev->cosq); 1271 1272 /* Config BONO. */ 1273 rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq); 1274 if (rc) { 1275 ibdev_warn(&rdev->ibdev, "no tc for cos{%x, %x}\n", 1276 rdev->cosq[0], rdev->cosq[1]); 1277 return rc; 1278 } 1279 1280 /* Actual priorities are not programmed as they are already 1281 * done by L2 driver; just enable or disable priority vlan tagging 1282 */ 1283 if ((prio_map == 0 && rdev->qplib_res.prio) || 1284 (prio_map != 0 && !rdev->qplib_res.prio)) { 1285 rdev->qplib_res.prio = prio_map ? true : false; 1286 1287 bnxt_re_update_gid(rdev); 1288 } 1289 1290 return 0; 1291 } 1292 1293 static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) 1294 { 1295 struct bnxt_en_dev *en_dev = rdev->en_dev; 1296 struct hwrm_ver_get_output resp = {0}; 1297 struct hwrm_ver_get_input req = {0}; 1298 struct bnxt_fw_msg fw_msg; 1299 int rc = 0; 1300 1301 memset(&fw_msg, 0, sizeof(fw_msg)); 1302 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, 1303 HWRM_VER_GET, -1, -1); 1304 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 1305 req.hwrm_intf_min = HWRM_VERSION_MINOR; 1306 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 1307 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 1308 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); 1309 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); 1310 if (rc) { 1311 ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x", 1312 rc); 1313 return; 1314 } 1315 rdev->qplib_ctx.hwrm_intf_ver = 1316 (u64)le16_to_cpu(resp.hwrm_intf_major) << 48 | 1317 (u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 | 1318 (u64)le16_to_cpu(resp.hwrm_intf_build) << 16 | 1319 le16_to_cpu(resp.hwrm_intf_patch); 1320 } 1321 1322 static int bnxt_re_ib_init(struct bnxt_re_dev *rdev) 1323 { 1324 int rc = 0; 1325 u32 event; 1326 1327 /* Register ib dev */ 1328 rc = bnxt_re_register_ib(rdev); 1329 if (rc) { 1330 pr_err("Failed to register with IB: %#x\n", rc); 1331 return rc; 1332 } 1333 dev_info(rdev_to_dev(rdev), "Device registered successfully"); 1334 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, 1335 &rdev->active_width); 1336 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); 1337 1338 event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ? 1339 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 1340 1341 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, event); 1342 1343 return rc; 1344 } 1345 1346 static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev) 1347 { 1348 u8 type; 1349 int rc; 1350 1351 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags)) 1352 cancel_delayed_work_sync(&rdev->worker); 1353 1354 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, 1355 &rdev->flags)) 1356 bnxt_re_cleanup_res(rdev); 1357 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags)) 1358 bnxt_re_free_res(rdev); 1359 1360 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { 1361 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); 1362 if (rc) 1363 ibdev_warn(&rdev->ibdev, 1364 "Failed to deinitialize RCFW: %#x", rc); 1365 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); 1366 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx); 1367 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); 1368 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 1369 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type); 1370 bnxt_qplib_free_rcfw_channel(&rdev->rcfw); 1371 } 1372 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) { 1373 rc = bnxt_re_free_msix(rdev); 1374 if (rc) 1375 ibdev_warn(&rdev->ibdev, 1376 "Failed to free MSI-X vectors: %#x", rc); 1377 } 1378 1379 bnxt_re_destroy_chip_ctx(rdev); 1380 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) { 1381 rc = bnxt_re_unregister_netdev(rdev); 1382 if (rc) 1383 ibdev_warn(&rdev->ibdev, 1384 "Failed to unregister with netdev: %#x", rc); 1385 } 1386 } 1387 1388 /* worker thread for polling periodic events. Now used for QoS programming*/ 1389 static void bnxt_re_worker(struct work_struct *work) 1390 { 1391 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev, 1392 worker.work); 1393 1394 bnxt_re_setup_qos(rdev); 1395 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); 1396 } 1397 1398 static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode) 1399 { 1400 struct bnxt_qplib_creq_ctx *creq; 1401 struct bnxt_re_ring_attr rattr; 1402 u32 db_offt; 1403 int vid; 1404 u8 type; 1405 int rc; 1406 1407 /* Registered a new RoCE device instance to netdev */ 1408 memset(&rattr, 0, sizeof(rattr)); 1409 rc = bnxt_re_register_netdev(rdev); 1410 if (rc) { 1411 rtnl_unlock(); 1412 ibdev_err(&rdev->ibdev, 1413 "Failed to register with netedev: %#x\n", rc); 1414 return -EINVAL; 1415 } 1416 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 1417 1418 rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode); 1419 if (rc) { 1420 ibdev_err(&rdev->ibdev, "Failed to get chip context\n"); 1421 return -EINVAL; 1422 } 1423 1424 /* Check whether VF or PF */ 1425 bnxt_re_get_sriov_func_type(rdev); 1426 1427 rc = bnxt_re_request_msix(rdev); 1428 if (rc) { 1429 ibdev_err(&rdev->ibdev, 1430 "Failed to get MSI-X vectors: %#x\n", rc); 1431 rc = -EINVAL; 1432 goto fail; 1433 } 1434 set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags); 1435 1436 bnxt_re_query_hwrm_intf_version(rdev); 1437 1438 /* Establish RCFW Communication Channel to initialize the context 1439 * memory for the function and all child VFs 1440 */ 1441 rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw, 1442 &rdev->qplib_ctx, 1443 BNXT_RE_MAX_QPC_COUNT); 1444 if (rc) { 1445 ibdev_err(&rdev->ibdev, 1446 "Failed to allocate RCFW Channel: %#x\n", rc); 1447 goto fail; 1448 } 1449 1450 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 1451 creq = &rdev->rcfw.creq; 1452 rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr; 1453 rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count; 1454 rattr.type = type; 1455 rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; 1456 rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1; 1457 rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; 1458 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id); 1459 if (rc) { 1460 ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc); 1461 goto free_rcfw; 1462 } 1463 db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX); 1464 vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector; 1465 rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw, 1466 vid, db_offt, rdev->is_virtfn, 1467 &bnxt_re_aeq_handler); 1468 if (rc) { 1469 ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n", 1470 rc); 1471 goto free_ring; 1472 } 1473 1474 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr, 1475 rdev->is_virtfn); 1476 if (rc) 1477 goto disable_rcfw; 1478 1479 bnxt_re_set_resource_limits(rdev); 1480 1481 rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0, 1482 bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)); 1483 if (rc) { 1484 ibdev_err(&rdev->ibdev, 1485 "Failed to allocate QPLIB context: %#x\n", rc); 1486 goto disable_rcfw; 1487 } 1488 rc = bnxt_re_net_stats_ctx_alloc(rdev, 1489 rdev->qplib_ctx.stats.dma_map, 1490 &rdev->qplib_ctx.stats.fw_id); 1491 if (rc) { 1492 ibdev_err(&rdev->ibdev, 1493 "Failed to allocate stats context: %#x\n", rc); 1494 goto free_ctx; 1495 } 1496 1497 rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx, 1498 rdev->is_virtfn); 1499 if (rc) { 1500 ibdev_err(&rdev->ibdev, 1501 "Failed to initialize RCFW: %#x\n", rc); 1502 goto free_sctx; 1503 } 1504 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags); 1505 1506 /* Resources based on the 'new' device caps */ 1507 rc = bnxt_re_alloc_res(rdev); 1508 if (rc) { 1509 ibdev_err(&rdev->ibdev, 1510 "Failed to allocate resources: %#x\n", rc); 1511 goto fail; 1512 } 1513 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags); 1514 rc = bnxt_re_init_res(rdev); 1515 if (rc) { 1516 ibdev_err(&rdev->ibdev, 1517 "Failed to initialize resources: %#x\n", rc); 1518 goto fail; 1519 } 1520 1521 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags); 1522 1523 if (!rdev->is_virtfn) { 1524 rc = bnxt_re_setup_qos(rdev); 1525 if (rc) 1526 ibdev_info(&rdev->ibdev, 1527 "RoCE priority not yet configured\n"); 1528 1529 INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker); 1530 set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags); 1531 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); 1532 } 1533 1534 return 0; 1535 free_sctx: 1536 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); 1537 free_ctx: 1538 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx); 1539 disable_rcfw: 1540 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); 1541 free_ring: 1542 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 1543 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type); 1544 free_rcfw: 1545 bnxt_qplib_free_rcfw_channel(&rdev->rcfw); 1546 fail: 1547 bnxt_re_dev_uninit(rdev); 1548 1549 return rc; 1550 } 1551 1552 static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev) 1553 { 1554 struct bnxt_en_dev *en_dev = rdev->en_dev; 1555 struct net_device *netdev = rdev->netdev; 1556 1557 bnxt_re_dev_remove(rdev); 1558 1559 if (netdev) 1560 bnxt_re_dev_unprobe(netdev, en_dev); 1561 } 1562 1563 static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev) 1564 { 1565 struct bnxt_en_dev *en_dev; 1566 int rc = 0; 1567 1568 if (!is_bnxt_re_dev(netdev)) 1569 return -ENODEV; 1570 1571 en_dev = bnxt_re_dev_probe(netdev); 1572 if (IS_ERR(en_dev)) { 1573 if (en_dev != ERR_PTR(-ENODEV)) 1574 ibdev_err(&(*rdev)->ibdev, "%s: Failed to probe\n", 1575 ROCE_DRV_MODULE_NAME); 1576 rc = PTR_ERR(en_dev); 1577 goto exit; 1578 } 1579 *rdev = bnxt_re_dev_add(netdev, en_dev); 1580 if (!*rdev) { 1581 rc = -ENOMEM; 1582 bnxt_re_dev_unprobe(netdev, en_dev); 1583 goto exit; 1584 } 1585 exit: 1586 return rc; 1587 } 1588 1589 static void bnxt_re_remove_device(struct bnxt_re_dev *rdev) 1590 { 1591 bnxt_re_dev_uninit(rdev); 1592 pci_dev_put(rdev->en_dev->pdev); 1593 bnxt_re_dev_unreg(rdev); 1594 } 1595 1596 static int bnxt_re_add_device(struct bnxt_re_dev **rdev, 1597 struct net_device *netdev, u8 wqe_mode) 1598 { 1599 int rc; 1600 1601 rc = bnxt_re_dev_reg(rdev, netdev); 1602 if (rc == -ENODEV) 1603 return rc; 1604 if (rc) { 1605 pr_err("Failed to register with the device %s: %#x\n", 1606 netdev->name, rc); 1607 return rc; 1608 } 1609 1610 pci_dev_get((*rdev)->en_dev->pdev); 1611 rc = bnxt_re_dev_init(*rdev, wqe_mode); 1612 if (rc) { 1613 pci_dev_put((*rdev)->en_dev->pdev); 1614 bnxt_re_dev_unreg(*rdev); 1615 } 1616 1617 return rc; 1618 } 1619 1620 static void bnxt_re_dealloc_driver(struct ib_device *ib_dev) 1621 { 1622 struct bnxt_re_dev *rdev = 1623 container_of(ib_dev, struct bnxt_re_dev, ibdev); 1624 1625 dev_info(rdev_to_dev(rdev), "Unregistering Device"); 1626 1627 rtnl_lock(); 1628 bnxt_re_remove_device(rdev); 1629 rtnl_unlock(); 1630 } 1631 1632 /* Handle all deferred netevents tasks */ 1633 static void bnxt_re_task(struct work_struct *work) 1634 { 1635 struct bnxt_re_work *re_work; 1636 struct bnxt_re_dev *rdev; 1637 int rc = 0; 1638 1639 re_work = container_of(work, struct bnxt_re_work, work); 1640 rdev = re_work->rdev; 1641 1642 if (re_work->event == NETDEV_REGISTER) { 1643 rc = bnxt_re_ib_init(rdev); 1644 if (rc) { 1645 ibdev_err(&rdev->ibdev, 1646 "Failed to register with IB: %#x", rc); 1647 rtnl_lock(); 1648 bnxt_re_remove_device(rdev); 1649 rtnl_unlock(); 1650 goto exit; 1651 } 1652 goto exit; 1653 } 1654 1655 if (!ib_device_try_get(&rdev->ibdev)) 1656 goto exit; 1657 1658 switch (re_work->event) { 1659 case NETDEV_UP: 1660 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, 1661 IB_EVENT_PORT_ACTIVE); 1662 break; 1663 case NETDEV_DOWN: 1664 bnxt_re_dev_stop(rdev); 1665 break; 1666 case NETDEV_CHANGE: 1667 if (!netif_carrier_ok(rdev->netdev)) 1668 bnxt_re_dev_stop(rdev); 1669 else if (netif_carrier_ok(rdev->netdev)) 1670 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, 1671 IB_EVENT_PORT_ACTIVE); 1672 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, 1673 &rdev->active_width); 1674 break; 1675 default: 1676 break; 1677 } 1678 ib_device_put(&rdev->ibdev); 1679 exit: 1680 put_device(&rdev->ibdev.dev); 1681 kfree(re_work); 1682 } 1683 1684 /* 1685 * "Notifier chain callback can be invoked for the same chain from 1686 * different CPUs at the same time". 1687 * 1688 * For cases when the netdev is already present, our call to the 1689 * register_netdevice_notifier() will actually get the rtnl_lock() 1690 * before sending NETDEV_REGISTER and (if up) NETDEV_UP 1691 * events. 1692 * 1693 * But for cases when the netdev is not already present, the notifier 1694 * chain is subjected to be invoked from different CPUs simultaneously. 1695 * 1696 * This is protected by the netdev_mutex. 1697 */ 1698 static int bnxt_re_netdev_event(struct notifier_block *notifier, 1699 unsigned long event, void *ptr) 1700 { 1701 struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr); 1702 struct bnxt_re_work *re_work; 1703 struct bnxt_re_dev *rdev; 1704 int rc = 0; 1705 bool sch_work = false; 1706 bool release = true; 1707 1708 real_dev = rdma_vlan_dev_real_dev(netdev); 1709 if (!real_dev) 1710 real_dev = netdev; 1711 1712 rdev = bnxt_re_from_netdev(real_dev); 1713 if (!rdev && event != NETDEV_REGISTER) 1714 return NOTIFY_OK; 1715 1716 if (real_dev != netdev) 1717 goto exit; 1718 1719 switch (event) { 1720 case NETDEV_REGISTER: 1721 if (rdev) 1722 break; 1723 rc = bnxt_re_add_device(&rdev, real_dev, 1724 BNXT_QPLIB_WQE_MODE_STATIC); 1725 if (!rc) 1726 sch_work = true; 1727 release = false; 1728 break; 1729 1730 case NETDEV_UNREGISTER: 1731 ib_unregister_device_queued(&rdev->ibdev); 1732 break; 1733 1734 default: 1735 sch_work = true; 1736 break; 1737 } 1738 if (sch_work) { 1739 /* Allocate for the deferred task */ 1740 re_work = kzalloc(sizeof(*re_work), GFP_ATOMIC); 1741 if (re_work) { 1742 get_device(&rdev->ibdev.dev); 1743 re_work->rdev = rdev; 1744 re_work->event = event; 1745 re_work->vlan_dev = (real_dev == netdev ? 1746 NULL : netdev); 1747 INIT_WORK(&re_work->work, bnxt_re_task); 1748 queue_work(bnxt_re_wq, &re_work->work); 1749 } 1750 } 1751 1752 exit: 1753 if (rdev && release) 1754 ib_device_put(&rdev->ibdev); 1755 return NOTIFY_DONE; 1756 } 1757 1758 static struct notifier_block bnxt_re_netdev_notifier = { 1759 .notifier_call = bnxt_re_netdev_event 1760 }; 1761 1762 static int __init bnxt_re_mod_init(void) 1763 { 1764 int rc = 0; 1765 1766 pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version); 1767 1768 bnxt_re_wq = create_singlethread_workqueue("bnxt_re"); 1769 if (!bnxt_re_wq) 1770 return -ENOMEM; 1771 1772 INIT_LIST_HEAD(&bnxt_re_dev_list); 1773 1774 rc = register_netdevice_notifier(&bnxt_re_netdev_notifier); 1775 if (rc) { 1776 pr_err("%s: Cannot register to netdevice_notifier", 1777 ROCE_DRV_MODULE_NAME); 1778 goto err_netdev; 1779 } 1780 return 0; 1781 1782 err_netdev: 1783 destroy_workqueue(bnxt_re_wq); 1784 1785 return rc; 1786 } 1787 1788 static void __exit bnxt_re_mod_exit(void) 1789 { 1790 struct bnxt_re_dev *rdev; 1791 1792 unregister_netdevice_notifier(&bnxt_re_netdev_notifier); 1793 if (bnxt_re_wq) 1794 destroy_workqueue(bnxt_re_wq); 1795 list_for_each_entry(rdev, &bnxt_re_dev_list, list) { 1796 /* VF device removal should be called before the removal 1797 * of PF device. Queue VFs unregister first, so that VFs 1798 * shall be removed before the PF during the call of 1799 * ib_unregister_driver. 1800 */ 1801 if (rdev->is_virtfn) 1802 ib_unregister_device(&rdev->ibdev); 1803 } 1804 ib_unregister_driver(RDMA_DRIVER_BNXT_RE); 1805 } 1806 1807 module_init(bnxt_re_mod_init); 1808 module_exit(bnxt_re_mod_exit); 1809