1 /* 2 * Broadcom NetXtreme-E RoCE driver. 3 * 4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term 5 * Broadcom refers to Broadcom Limited and/or its subsidiaries. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in 21 * the documentation and/or other materials provided with the 22 * distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * Description: Main component of the bnxt_re driver 37 */ 38 39 #include <linux/module.h> 40 #include <linux/netdevice.h> 41 #include <linux/ethtool.h> 42 #include <linux/mutex.h> 43 #include <linux/list.h> 44 #include <linux/rculist.h> 45 #include <linux/spinlock.h> 46 #include <linux/pci.h> 47 #include <net/dcbnl.h> 48 #include <net/ipv6.h> 49 #include <net/addrconf.h> 50 #include <linux/if_ether.h> 51 52 #include <rdma/ib_verbs.h> 53 #include <rdma/ib_user_verbs.h> 54 #include <rdma/ib_umem.h> 55 #include <rdma/ib_addr.h> 56 57 #include "bnxt_ulp.h" 58 #include "roce_hsi.h" 59 #include "qplib_res.h" 60 #include "qplib_sp.h" 61 #include "qplib_fp.h" 62 #include "qplib_rcfw.h" 63 #include "bnxt_re.h" 64 #include "ib_verbs.h" 65 #include <rdma/bnxt_re-abi.h> 66 #include "bnxt.h" 67 #include "hw_counters.h" 68 69 static char version[] = 70 BNXT_RE_DESC "\n"; 71 72 MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>"); 73 MODULE_DESCRIPTION(BNXT_RE_DESC " Driver"); 74 MODULE_LICENSE("Dual BSD/GPL"); 75 76 /* globals */ 77 static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list); 78 /* Mutex to protect the list of bnxt_re devices added */ 79 static DEFINE_MUTEX(bnxt_re_dev_lock); 80 static struct workqueue_struct *bnxt_re_wq; 81 static void bnxt_re_remove_device(struct bnxt_re_dev *rdev); 82 static void bnxt_re_dealloc_driver(struct ib_device *ib_dev); 83 static void bnxt_re_stop_irq(void *handle); 84 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev); 85 86 static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode) 87 { 88 struct bnxt_qplib_chip_ctx *cctx; 89 90 cctx = rdev->chip_ctx; 91 cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? 92 mode : BNXT_QPLIB_WQE_MODE_STATIC; 93 } 94 95 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev) 96 { 97 struct bnxt_qplib_chip_ctx *chip_ctx; 98 99 if (!rdev->chip_ctx) 100 return; 101 chip_ctx = rdev->chip_ctx; 102 rdev->chip_ctx = NULL; 103 rdev->rcfw.res = NULL; 104 rdev->qplib_res.cctx = NULL; 105 rdev->qplib_res.pdev = NULL; 106 rdev->qplib_res.netdev = NULL; 107 kfree(chip_ctx); 108 } 109 110 static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode) 111 { 112 struct bnxt_qplib_chip_ctx *chip_ctx; 113 struct bnxt_en_dev *en_dev; 114 struct bnxt *bp; 115 116 en_dev = rdev->en_dev; 117 bp = netdev_priv(en_dev->net); 118 119 chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL); 120 if (!chip_ctx) 121 return -ENOMEM; 122 chip_ctx->chip_num = bp->chip_num; 123 124 rdev->chip_ctx = chip_ctx; 125 /* rest members to follow eventually */ 126 127 rdev->qplib_res.cctx = rdev->chip_ctx; 128 rdev->rcfw.res = &rdev->qplib_res; 129 130 bnxt_re_set_drv_mode(rdev, wqe_mode); 131 if (bnxt_qplib_determine_atomics(en_dev->pdev)) 132 ibdev_info(&rdev->ibdev, 133 "platform doesn't support global atomics."); 134 return 0; 135 } 136 137 /* SR-IOV helper functions */ 138 139 static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev) 140 { 141 struct bnxt *bp; 142 143 bp = netdev_priv(rdev->en_dev->net); 144 if (BNXT_VF(bp)) 145 rdev->is_virtfn = 1; 146 } 147 148 /* Set the maximum number of each resource that the driver actually wants 149 * to allocate. This may be up to the maximum number the firmware has 150 * reserved for the function. The driver may choose to allocate fewer 151 * resources than the firmware maximum. 152 */ 153 static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev) 154 { 155 struct bnxt_qplib_dev_attr *attr; 156 struct bnxt_qplib_ctx *ctx; 157 int i; 158 159 attr = &rdev->dev_attr; 160 ctx = &rdev->qplib_ctx; 161 162 ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT, 163 attr->max_qp); 164 ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K; 165 /* Use max_mr from fw since max_mrw does not get set */ 166 ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr); 167 ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT, 168 attr->max_srq); 169 ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq); 170 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) 171 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) 172 rdev->qplib_ctx.tqm_ctx.qcount[i] = 173 rdev->dev_attr.tqm_alloc_reqs[i]; 174 } 175 176 static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf) 177 { 178 struct bnxt_qplib_vf_res *vf_res; 179 u32 mrws = 0; 180 u32 vf_pct; 181 u32 nvfs; 182 183 vf_res = &qplib_ctx->vf_res; 184 /* 185 * Reserve a set of resources for the PF. Divide the remaining 186 * resources among the VFs 187 */ 188 vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF; 189 nvfs = num_vf; 190 num_vf = 100 * num_vf; 191 vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf; 192 vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf; 193 vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf; 194 /* 195 * The driver allows many more MRs than other resources. If the 196 * firmware does also, then reserve a fixed amount for the PF and 197 * divide the rest among VFs. VFs may use many MRs for NFS 198 * mounts, ISER, NVME applications, etc. If the firmware severely 199 * restricts the number of MRs, then let PF have half and divide 200 * the rest among VFs, as for the other resource types. 201 */ 202 if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) { 203 mrws = qplib_ctx->mrw_count * vf_pct; 204 nvfs = num_vf; 205 } else { 206 mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF; 207 } 208 vf_res->max_mrw_per_vf = (mrws / nvfs); 209 vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF; 210 } 211 212 static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev) 213 { 214 u32 num_vfs; 215 216 memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res)); 217 bnxt_re_limit_pf_res(rdev); 218 219 num_vfs = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? 220 BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs; 221 if (num_vfs) 222 bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs); 223 } 224 225 /* for handling bnxt_en callbacks later */ 226 static void bnxt_re_stop(void *p) 227 { 228 struct bnxt_re_dev *rdev = p; 229 struct bnxt *bp; 230 231 if (!rdev) 232 return; 233 ASSERT_RTNL(); 234 235 /* L2 driver invokes this callback during device error/crash or device 236 * reset. Current RoCE driver doesn't recover the device in case of 237 * error. Handle the error by dispatching fatal events to all qps 238 * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as 239 * L2 driver want to modify the MSIx table. 240 */ 241 bp = netdev_priv(rdev->netdev); 242 243 ibdev_info(&rdev->ibdev, "Handle device stop call from L2 driver"); 244 /* Check the current device state from L2 structure and move the 245 * device to detached state if FW_FATAL_COND is set. 246 * This prevents more commands to HW during clean-up, 247 * in case the device is already in error. 248 */ 249 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 250 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags); 251 252 bnxt_re_dev_stop(rdev); 253 bnxt_re_stop_irq(rdev); 254 /* Move the device states to detached and avoid sending any more 255 * commands to HW 256 */ 257 set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags); 258 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags); 259 } 260 261 static void bnxt_re_start(void *p) 262 { 263 } 264 265 static void bnxt_re_sriov_config(void *p, int num_vfs) 266 { 267 struct bnxt_re_dev *rdev = p; 268 269 if (!rdev) 270 return; 271 272 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) 273 return; 274 rdev->num_vfs = num_vfs; 275 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { 276 bnxt_re_set_resource_limits(rdev); 277 bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw, 278 &rdev->qplib_ctx); 279 } 280 } 281 282 static void bnxt_re_shutdown(void *p) 283 { 284 struct bnxt_re_dev *rdev = p; 285 286 if (!rdev) 287 return; 288 ASSERT_RTNL(); 289 /* Release the MSIx vectors before queuing unregister */ 290 bnxt_re_stop_irq(rdev); 291 ib_unregister_device_queued(&rdev->ibdev); 292 } 293 294 static void bnxt_re_stop_irq(void *handle) 295 { 296 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; 297 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; 298 struct bnxt_qplib_nq *nq; 299 int indx; 300 301 for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) { 302 nq = &rdev->nq[indx - 1]; 303 bnxt_qplib_nq_stop_irq(nq, false); 304 } 305 306 bnxt_qplib_rcfw_stop_irq(rcfw, false); 307 } 308 309 static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) 310 { 311 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; 312 struct bnxt_msix_entry *msix_ent = rdev->msix_entries; 313 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw; 314 struct bnxt_qplib_nq *nq; 315 int indx, rc; 316 317 if (!ent) { 318 /* Not setting the f/w timeout bit in rcfw. 319 * During the driver unload the first command 320 * to f/w will timeout and that will set the 321 * timeout bit. 322 */ 323 ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n"); 324 return; 325 } 326 327 /* Vectors may change after restart, so update with new vectors 328 * in device sctructure. 329 */ 330 for (indx = 0; indx < rdev->num_msix; indx++) 331 rdev->msix_entries[indx].vector = ent[indx].vector; 332 333 bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector, 334 false); 335 for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) { 336 nq = &rdev->nq[indx - 1]; 337 rc = bnxt_qplib_nq_start_irq(nq, indx - 1, 338 msix_ent[indx].vector, false); 339 if (rc) 340 ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n", 341 indx - 1); 342 } 343 } 344 345 static struct bnxt_ulp_ops bnxt_re_ulp_ops = { 346 .ulp_async_notifier = NULL, 347 .ulp_stop = bnxt_re_stop, 348 .ulp_start = bnxt_re_start, 349 .ulp_sriov_config = bnxt_re_sriov_config, 350 .ulp_shutdown = bnxt_re_shutdown, 351 .ulp_irq_stop = bnxt_re_stop_irq, 352 .ulp_irq_restart = bnxt_re_start_irq 353 }; 354 355 /* RoCE -> Net driver */ 356 357 /* Driver registration routines used to let the networking driver (bnxt_en) 358 * to know that the RoCE driver is now installed 359 */ 360 static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev) 361 { 362 struct bnxt_en_dev *en_dev; 363 int rc; 364 365 if (!rdev) 366 return -EINVAL; 367 368 en_dev = rdev->en_dev; 369 370 rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev, 371 BNXT_ROCE_ULP); 372 return rc; 373 } 374 375 static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev) 376 { 377 struct bnxt_en_dev *en_dev; 378 int rc = 0; 379 380 if (!rdev) 381 return -EINVAL; 382 383 en_dev = rdev->en_dev; 384 385 rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP, 386 &bnxt_re_ulp_ops, rdev); 387 rdev->qplib_res.pdev = rdev->en_dev->pdev; 388 return rc; 389 } 390 391 static int bnxt_re_free_msix(struct bnxt_re_dev *rdev) 392 { 393 struct bnxt_en_dev *en_dev; 394 int rc; 395 396 if (!rdev) 397 return -EINVAL; 398 399 en_dev = rdev->en_dev; 400 401 402 rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP); 403 404 return rc; 405 } 406 407 static int bnxt_re_request_msix(struct bnxt_re_dev *rdev) 408 { 409 int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got; 410 struct bnxt_en_dev *en_dev; 411 412 if (!rdev) 413 return -EINVAL; 414 415 en_dev = rdev->en_dev; 416 417 num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus()); 418 419 num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP, 420 rdev->msix_entries, 421 num_msix_want); 422 if (num_msix_got < BNXT_RE_MIN_MSIX) { 423 rc = -EINVAL; 424 goto done; 425 } 426 if (num_msix_got != num_msix_want) { 427 ibdev_warn(&rdev->ibdev, 428 "Requested %d MSI-X vectors, got %d\n", 429 num_msix_want, num_msix_got); 430 } 431 rdev->num_msix = num_msix_got; 432 done: 433 return rc; 434 } 435 436 static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr, 437 u16 opcd, u16 crid, u16 trid) 438 { 439 hdr->req_type = cpu_to_le16(opcd); 440 hdr->cmpl_ring = cpu_to_le16(crid); 441 hdr->target_id = cpu_to_le16(trid); 442 } 443 444 static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg, 445 int msg_len, void *resp, int resp_max_len, 446 int timeout) 447 { 448 fw_msg->msg = msg; 449 fw_msg->msg_len = msg_len; 450 fw_msg->resp = resp; 451 fw_msg->resp_max_len = resp_max_len; 452 fw_msg->timeout = timeout; 453 } 454 455 static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, 456 u16 fw_ring_id, int type) 457 { 458 struct bnxt_en_dev *en_dev = rdev->en_dev; 459 struct hwrm_ring_free_input req = {0}; 460 struct hwrm_ring_free_output resp; 461 struct bnxt_fw_msg fw_msg; 462 int rc = -EINVAL; 463 464 if (!en_dev) 465 return rc; 466 467 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) 468 return 0; 469 470 memset(&fw_msg, 0, sizeof(fw_msg)); 471 472 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1); 473 req.ring_type = type; 474 req.ring_id = cpu_to_le16(fw_ring_id); 475 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 476 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); 477 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); 478 if (rc) 479 ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x", 480 req.ring_id, rc); 481 return rc; 482 } 483 484 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, 485 struct bnxt_re_ring_attr *ring_attr, 486 u16 *fw_ring_id) 487 { 488 struct bnxt_en_dev *en_dev = rdev->en_dev; 489 struct hwrm_ring_alloc_input req = {0}; 490 struct hwrm_ring_alloc_output resp; 491 struct bnxt_fw_msg fw_msg; 492 int rc = -EINVAL; 493 494 if (!en_dev) 495 return rc; 496 497 memset(&fw_msg, 0, sizeof(fw_msg)); 498 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1); 499 req.enables = 0; 500 req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]); 501 if (ring_attr->pages > 1) { 502 /* Page size is in log2 units */ 503 req.page_size = BNXT_PAGE_SHIFT; 504 req.page_tbl_depth = 1; 505 } 506 req.fbo = 0; 507 /* Association of ring index with doorbell index and MSIX number */ 508 req.logical_id = cpu_to_le16(ring_attr->lrid); 509 req.length = cpu_to_le32(ring_attr->depth + 1); 510 req.ring_type = ring_attr->type; 511 req.int_mode = ring_attr->mode; 512 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 513 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); 514 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); 515 if (!rc) 516 *fw_ring_id = le16_to_cpu(resp.ring_id); 517 518 return rc; 519 } 520 521 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, 522 u32 fw_stats_ctx_id) 523 { 524 struct bnxt_en_dev *en_dev = rdev->en_dev; 525 struct hwrm_stat_ctx_free_input req = {0}; 526 struct bnxt_fw_msg fw_msg; 527 int rc = -EINVAL; 528 529 if (!en_dev) 530 return rc; 531 532 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) 533 return 0; 534 535 memset(&fw_msg, 0, sizeof(fw_msg)); 536 537 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1); 538 req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id); 539 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&req, 540 sizeof(req), DFLT_HWRM_CMD_TIMEOUT); 541 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); 542 if (rc) 543 ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x", 544 rc); 545 546 return rc; 547 } 548 549 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, 550 dma_addr_t dma_map, 551 u32 *fw_stats_ctx_id) 552 { 553 struct hwrm_stat_ctx_alloc_output resp = {0}; 554 struct hwrm_stat_ctx_alloc_input req = {0}; 555 struct bnxt_en_dev *en_dev = rdev->en_dev; 556 struct bnxt_fw_msg fw_msg; 557 int rc = -EINVAL; 558 559 *fw_stats_ctx_id = INVALID_STATS_CTX_ID; 560 561 if (!en_dev) 562 return rc; 563 564 memset(&fw_msg, 0, sizeof(fw_msg)); 565 566 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); 567 req.update_period_ms = cpu_to_le32(1000); 568 req.stats_dma_addr = cpu_to_le64(dma_map); 569 req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext)); 570 req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE; 571 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 572 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); 573 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); 574 if (!rc) 575 *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id); 576 577 return rc; 578 } 579 580 /* Device */ 581 582 static bool is_bnxt_re_dev(struct net_device *netdev) 583 { 584 struct ethtool_drvinfo drvinfo; 585 586 if (netdev->ethtool_ops && netdev->ethtool_ops->get_drvinfo) { 587 memset(&drvinfo, 0, sizeof(drvinfo)); 588 netdev->ethtool_ops->get_drvinfo(netdev, &drvinfo); 589 590 if (strcmp(drvinfo.driver, "bnxt_en")) 591 return false; 592 return true; 593 } 594 return false; 595 } 596 597 static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev) 598 { 599 struct ib_device *ibdev = 600 ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE); 601 if (!ibdev) 602 return NULL; 603 604 return container_of(ibdev, struct bnxt_re_dev, ibdev); 605 } 606 607 static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev) 608 { 609 struct bnxt_en_dev *en_dev; 610 struct pci_dev *pdev; 611 612 en_dev = bnxt_ulp_probe(netdev); 613 if (IS_ERR(en_dev)) 614 return en_dev; 615 616 pdev = en_dev->pdev; 617 if (!pdev) 618 return ERR_PTR(-EINVAL); 619 620 if (!(en_dev->flags & BNXT_EN_FLAG_ROCE_CAP)) { 621 dev_info(&pdev->dev, 622 "%s: probe error: RoCE is not supported on this device", 623 ROCE_DRV_MODULE_NAME); 624 return ERR_PTR(-ENODEV); 625 } 626 627 dev_hold(netdev); 628 629 return en_dev; 630 } 631 632 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, 633 char *buf) 634 { 635 struct bnxt_re_dev *rdev = 636 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev); 637 638 return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->vendor); 639 } 640 static DEVICE_ATTR_RO(hw_rev); 641 642 static ssize_t hca_type_show(struct device *device, 643 struct device_attribute *attr, char *buf) 644 { 645 struct bnxt_re_dev *rdev = 646 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev); 647 648 return sysfs_emit(buf, "%s\n", rdev->ibdev.node_desc); 649 } 650 static DEVICE_ATTR_RO(hca_type); 651 652 static struct attribute *bnxt_re_attributes[] = { 653 &dev_attr_hw_rev.attr, 654 &dev_attr_hca_type.attr, 655 NULL 656 }; 657 658 static const struct attribute_group bnxt_re_dev_attr_group = { 659 .attrs = bnxt_re_attributes, 660 }; 661 662 static const struct ib_device_ops bnxt_re_dev_ops = { 663 .owner = THIS_MODULE, 664 .driver_id = RDMA_DRIVER_BNXT_RE, 665 .uverbs_abi_ver = BNXT_RE_ABI_VERSION, 666 667 .add_gid = bnxt_re_add_gid, 668 .alloc_hw_port_stats = bnxt_re_ib_alloc_hw_port_stats, 669 .alloc_mr = bnxt_re_alloc_mr, 670 .alloc_pd = bnxt_re_alloc_pd, 671 .alloc_ucontext = bnxt_re_alloc_ucontext, 672 .create_ah = bnxt_re_create_ah, 673 .create_cq = bnxt_re_create_cq, 674 .create_qp = bnxt_re_create_qp, 675 .create_srq = bnxt_re_create_srq, 676 .create_user_ah = bnxt_re_create_ah, 677 .dealloc_driver = bnxt_re_dealloc_driver, 678 .dealloc_pd = bnxt_re_dealloc_pd, 679 .dealloc_ucontext = bnxt_re_dealloc_ucontext, 680 .del_gid = bnxt_re_del_gid, 681 .dereg_mr = bnxt_re_dereg_mr, 682 .destroy_ah = bnxt_re_destroy_ah, 683 .destroy_cq = bnxt_re_destroy_cq, 684 .destroy_qp = bnxt_re_destroy_qp, 685 .destroy_srq = bnxt_re_destroy_srq, 686 .device_group = &bnxt_re_dev_attr_group, 687 .get_dev_fw_str = bnxt_re_query_fw_str, 688 .get_dma_mr = bnxt_re_get_dma_mr, 689 .get_hw_stats = bnxt_re_ib_get_hw_stats, 690 .get_link_layer = bnxt_re_get_link_layer, 691 .get_port_immutable = bnxt_re_get_port_immutable, 692 .map_mr_sg = bnxt_re_map_mr_sg, 693 .mmap = bnxt_re_mmap, 694 .modify_ah = bnxt_re_modify_ah, 695 .modify_qp = bnxt_re_modify_qp, 696 .modify_srq = bnxt_re_modify_srq, 697 .poll_cq = bnxt_re_poll_cq, 698 .post_recv = bnxt_re_post_recv, 699 .post_send = bnxt_re_post_send, 700 .post_srq_recv = bnxt_re_post_srq_recv, 701 .query_ah = bnxt_re_query_ah, 702 .query_device = bnxt_re_query_device, 703 .query_pkey = bnxt_re_query_pkey, 704 .query_port = bnxt_re_query_port, 705 .query_qp = bnxt_re_query_qp, 706 .query_srq = bnxt_re_query_srq, 707 .reg_user_mr = bnxt_re_reg_user_mr, 708 .req_notify_cq = bnxt_re_req_notify_cq, 709 INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah), 710 INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq), 711 INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd), 712 INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq), 713 INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx), 714 }; 715 716 static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) 717 { 718 struct ib_device *ibdev = &rdev->ibdev; 719 int ret; 720 721 /* ib device init */ 722 ibdev->node_type = RDMA_NODE_IB_CA; 723 strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA", 724 strlen(BNXT_RE_DESC) + 5); 725 ibdev->phys_port_cnt = 1; 726 727 bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid); 728 729 ibdev->num_comp_vectors = rdev->num_msix - 1; 730 ibdev->dev.parent = &rdev->en_dev->pdev->dev; 731 ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY; 732 733 ib_set_device_ops(ibdev, &bnxt_re_dev_ops); 734 ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1); 735 if (ret) 736 return ret; 737 738 dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX); 739 return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev); 740 } 741 742 static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev) 743 { 744 dev_put(rdev->netdev); 745 rdev->netdev = NULL; 746 mutex_lock(&bnxt_re_dev_lock); 747 list_del_rcu(&rdev->list); 748 mutex_unlock(&bnxt_re_dev_lock); 749 750 synchronize_rcu(); 751 } 752 753 static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev, 754 struct bnxt_en_dev *en_dev) 755 { 756 struct bnxt_re_dev *rdev; 757 758 /* Allocate bnxt_re_dev instance here */ 759 rdev = ib_alloc_device(bnxt_re_dev, ibdev); 760 if (!rdev) { 761 ibdev_err(NULL, "%s: bnxt_re_dev allocation failure!", 762 ROCE_DRV_MODULE_NAME); 763 return NULL; 764 } 765 /* Default values */ 766 rdev->netdev = netdev; 767 dev_hold(rdev->netdev); 768 rdev->en_dev = en_dev; 769 rdev->id = rdev->en_dev->pdev->devfn; 770 INIT_LIST_HEAD(&rdev->qp_list); 771 mutex_init(&rdev->qp_lock); 772 atomic_set(&rdev->qp_count, 0); 773 atomic_set(&rdev->cq_count, 0); 774 atomic_set(&rdev->srq_count, 0); 775 atomic_set(&rdev->mr_count, 0); 776 atomic_set(&rdev->mw_count, 0); 777 rdev->cosq[0] = 0xFFFF; 778 rdev->cosq[1] = 0xFFFF; 779 780 mutex_lock(&bnxt_re_dev_lock); 781 list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list); 782 mutex_unlock(&bnxt_re_dev_lock); 783 return rdev; 784 } 785 786 static int bnxt_re_handle_unaffi_async_event(struct creq_func_event 787 *unaffi_async) 788 { 789 switch (unaffi_async->event) { 790 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR: 791 break; 792 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR: 793 break; 794 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR: 795 break; 796 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR: 797 break; 798 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR: 799 break; 800 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR: 801 break; 802 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR: 803 break; 804 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR: 805 break; 806 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR: 807 break; 808 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR: 809 break; 810 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR: 811 break; 812 default: 813 return -EINVAL; 814 } 815 return 0; 816 } 817 818 static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, 819 struct bnxt_re_qp *qp) 820 { 821 struct ib_event event; 822 unsigned int flags; 823 824 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR && 825 rdma_is_kernel_res(&qp->ib_qp.res)) { 826 flags = bnxt_re_lock_cqs(qp); 827 bnxt_qplib_add_flush_qp(&qp->qplib_qp); 828 bnxt_re_unlock_cqs(qp, flags); 829 } 830 831 memset(&event, 0, sizeof(event)); 832 if (qp->qplib_qp.srq) { 833 event.device = &qp->rdev->ibdev; 834 event.element.qp = &qp->ib_qp; 835 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 836 } 837 838 if (event.device && qp->ib_qp.event_handler) 839 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); 840 841 return 0; 842 } 843 844 static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async, 845 void *obj) 846 { 847 int rc = 0; 848 u8 event; 849 850 if (!obj) 851 return rc; /* QP was already dead, still return success */ 852 853 event = affi_async->event; 854 if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) { 855 struct bnxt_qplib_qp *lib_qp = obj; 856 struct bnxt_re_qp *qp = container_of(lib_qp, struct bnxt_re_qp, 857 qplib_qp); 858 rc = bnxt_re_handle_qp_async_event(affi_async, qp); 859 } 860 return rc; 861 } 862 863 static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw, 864 void *aeqe, void *obj) 865 { 866 struct creq_qp_event *affi_async; 867 struct creq_func_event *unaffi_async; 868 u8 type; 869 int rc; 870 871 type = ((struct creq_base *)aeqe)->type; 872 if (type == CREQ_BASE_TYPE_FUNC_EVENT) { 873 unaffi_async = aeqe; 874 rc = bnxt_re_handle_unaffi_async_event(unaffi_async); 875 } else { 876 affi_async = aeqe; 877 rc = bnxt_re_handle_affi_async_event(affi_async, obj); 878 } 879 880 return rc; 881 } 882 883 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, 884 struct bnxt_qplib_srq *handle, u8 event) 885 { 886 struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq, 887 qplib_srq); 888 struct ib_event ib_event; 889 int rc = 0; 890 891 ib_event.device = &srq->rdev->ibdev; 892 ib_event.element.srq = &srq->ib_srq; 893 if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT) 894 ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED; 895 else 896 ib_event.event = IB_EVENT_SRQ_ERR; 897 898 if (srq->ib_srq.event_handler) { 899 /* Lock event_handler? */ 900 (*srq->ib_srq.event_handler)(&ib_event, 901 srq->ib_srq.srq_context); 902 } 903 return rc; 904 } 905 906 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, 907 struct bnxt_qplib_cq *handle) 908 { 909 struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq, 910 qplib_cq); 911 912 if (cq->ib_cq.comp_handler) { 913 /* Lock comp_handler? */ 914 (*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context); 915 } 916 917 return 0; 918 } 919 920 #define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000 921 #define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000 922 static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx) 923 { 924 return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? 925 (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB : 926 BNXT_RE_GEN_P5_PF_NQ_DB) : 927 rdev->msix_entries[indx].db_offset; 928 } 929 930 static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev) 931 { 932 int i; 933 934 for (i = 1; i < rdev->num_msix; i++) 935 bnxt_qplib_disable_nq(&rdev->nq[i - 1]); 936 937 if (rdev->qplib_res.rcfw) 938 bnxt_qplib_cleanup_res(&rdev->qplib_res); 939 } 940 941 static int bnxt_re_init_res(struct bnxt_re_dev *rdev) 942 { 943 int num_vec_enabled = 0; 944 int rc = 0, i; 945 u32 db_offt; 946 947 bnxt_qplib_init_res(&rdev->qplib_res); 948 949 for (i = 1; i < rdev->num_msix ; i++) { 950 db_offt = bnxt_re_get_nqdb_offset(rdev, i); 951 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1], 952 i - 1, rdev->msix_entries[i].vector, 953 db_offt, &bnxt_re_cqn_handler, 954 &bnxt_re_srqn_handler); 955 if (rc) { 956 ibdev_err(&rdev->ibdev, 957 "Failed to enable NQ with rc = 0x%x", rc); 958 goto fail; 959 } 960 num_vec_enabled++; 961 } 962 return 0; 963 fail: 964 for (i = num_vec_enabled; i >= 0; i--) 965 bnxt_qplib_disable_nq(&rdev->nq[i]); 966 return rc; 967 } 968 969 static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev) 970 { 971 u8 type; 972 int i; 973 974 for (i = 0; i < rdev->num_msix - 1; i++) { 975 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 976 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); 977 bnxt_qplib_free_nq(&rdev->nq[i]); 978 rdev->nq[i].res = NULL; 979 } 980 } 981 982 static void bnxt_re_free_res(struct bnxt_re_dev *rdev) 983 { 984 bnxt_re_free_nq_res(rdev); 985 986 if (rdev->qplib_res.dpi_tbl.max) { 987 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, 988 &rdev->qplib_res.dpi_tbl, 989 &rdev->dpi_privileged); 990 } 991 if (rdev->qplib_res.rcfw) { 992 bnxt_qplib_free_res(&rdev->qplib_res); 993 rdev->qplib_res.rcfw = NULL; 994 } 995 } 996 997 static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev) 998 { 999 struct bnxt_re_ring_attr rattr = {}; 1000 int num_vec_created = 0; 1001 int rc = 0, i; 1002 u8 type; 1003 1004 /* Configure and allocate resources for qplib */ 1005 rdev->qplib_res.rcfw = &rdev->rcfw; 1006 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr, 1007 rdev->is_virtfn); 1008 if (rc) 1009 goto fail; 1010 1011 rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev, 1012 rdev->netdev, &rdev->dev_attr); 1013 if (rc) 1014 goto fail; 1015 1016 rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl, 1017 &rdev->dpi_privileged, 1018 rdev); 1019 if (rc) 1020 goto dealloc_res; 1021 1022 for (i = 0; i < rdev->num_msix - 1; i++) { 1023 struct bnxt_qplib_nq *nq; 1024 1025 nq = &rdev->nq[i]; 1026 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; 1027 rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]); 1028 if (rc) { 1029 ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x", 1030 i, rc); 1031 goto free_nq; 1032 } 1033 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 1034 rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr; 1035 rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count; 1036 rattr.type = type; 1037 rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; 1038 rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1; 1039 rattr.lrid = rdev->msix_entries[i + 1].ring_idx; 1040 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id); 1041 if (rc) { 1042 ibdev_err(&rdev->ibdev, 1043 "Failed to allocate NQ fw id with rc = 0x%x", 1044 rc); 1045 bnxt_qplib_free_nq(&rdev->nq[i]); 1046 goto free_nq; 1047 } 1048 num_vec_created++; 1049 } 1050 return 0; 1051 free_nq: 1052 for (i = num_vec_created - 1; i >= 0; i--) { 1053 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 1054 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type); 1055 bnxt_qplib_free_nq(&rdev->nq[i]); 1056 } 1057 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, 1058 &rdev->qplib_res.dpi_tbl, 1059 &rdev->dpi_privileged); 1060 dealloc_res: 1061 bnxt_qplib_free_res(&rdev->qplib_res); 1062 1063 fail: 1064 rdev->qplib_res.rcfw = NULL; 1065 return rc; 1066 } 1067 1068 static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp, 1069 u8 port_num, enum ib_event_type event) 1070 { 1071 struct ib_event ib_event; 1072 1073 ib_event.device = ibdev; 1074 if (qp) { 1075 ib_event.element.qp = qp; 1076 ib_event.event = event; 1077 if (qp->event_handler) 1078 qp->event_handler(&ib_event, qp->qp_context); 1079 1080 } else { 1081 ib_event.element.port_num = port_num; 1082 ib_event.event = event; 1083 ib_dispatch_event(&ib_event); 1084 } 1085 } 1086 1087 #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02 1088 static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir, 1089 u64 *cid_map) 1090 { 1091 struct hwrm_queue_pri2cos_qcfg_input req = {0}; 1092 struct bnxt *bp = netdev_priv(rdev->netdev); 1093 struct hwrm_queue_pri2cos_qcfg_output resp; 1094 struct bnxt_en_dev *en_dev = rdev->en_dev; 1095 struct bnxt_fw_msg fw_msg; 1096 u32 flags = 0; 1097 u8 *qcfgmap, *tmp_map; 1098 int rc = 0, i; 1099 1100 if (!cid_map) 1101 return -EINVAL; 1102 1103 memset(&fw_msg, 0, sizeof(fw_msg)); 1104 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, 1105 HWRM_QUEUE_PRI2COS_QCFG, -1, -1); 1106 flags |= (dir & 0x01); 1107 flags |= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN; 1108 req.flags = cpu_to_le32(flags); 1109 req.port_id = bp->pf.port_id; 1110 1111 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 1112 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); 1113 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); 1114 if (rc) 1115 return rc; 1116 1117 if (resp.queue_cfg_info) { 1118 ibdev_warn(&rdev->ibdev, 1119 "Asymmetric cos queue configuration detected"); 1120 ibdev_warn(&rdev->ibdev, 1121 " on device, QoS may not be fully functional\n"); 1122 } 1123 qcfgmap = &resp.pri0_cos_queue_id; 1124 tmp_map = (u8 *)cid_map; 1125 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) 1126 tmp_map[i] = qcfgmap[i]; 1127 1128 return rc; 1129 } 1130 1131 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev, 1132 struct bnxt_re_qp *qp) 1133 { 1134 return (qp->ib_qp.qp_type == IB_QPT_GSI) || 1135 (qp == rdev->gsi_ctx.gsi_sqp); 1136 } 1137 1138 static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev) 1139 { 1140 int mask = IB_QP_STATE; 1141 struct ib_qp_attr qp_attr; 1142 struct bnxt_re_qp *qp; 1143 1144 qp_attr.qp_state = IB_QPS_ERR; 1145 mutex_lock(&rdev->qp_lock); 1146 list_for_each_entry(qp, &rdev->qp_list, list) { 1147 /* Modify the state of all QPs except QP1/Shadow QP */ 1148 if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) { 1149 if (qp->qplib_qp.state != 1150 CMDQ_MODIFY_QP_NEW_STATE_RESET && 1151 qp->qplib_qp.state != 1152 CMDQ_MODIFY_QP_NEW_STATE_ERR) { 1153 bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp, 1154 1, IB_EVENT_QP_FATAL); 1155 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask, 1156 NULL); 1157 } 1158 } 1159 } 1160 mutex_unlock(&rdev->qp_lock); 1161 } 1162 1163 static int bnxt_re_update_gid(struct bnxt_re_dev *rdev) 1164 { 1165 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; 1166 struct bnxt_qplib_gid gid; 1167 u16 gid_idx, index; 1168 int rc = 0; 1169 1170 if (!ib_device_try_get(&rdev->ibdev)) 1171 return 0; 1172 1173 if (!sgid_tbl) { 1174 ibdev_err(&rdev->ibdev, "QPLIB: SGID table not allocated"); 1175 rc = -EINVAL; 1176 goto out; 1177 } 1178 1179 for (index = 0; index < sgid_tbl->active; index++) { 1180 gid_idx = sgid_tbl->hw_id[index]; 1181 1182 if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, 1183 sizeof(bnxt_qplib_gid_zero))) 1184 continue; 1185 /* need to modify the VLAN enable setting of non VLAN GID only 1186 * as setting is done for VLAN GID while adding GID 1187 */ 1188 if (sgid_tbl->vlan[index]) 1189 continue; 1190 1191 memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid)); 1192 1193 rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx, 1194 rdev->qplib_res.netdev->dev_addr); 1195 } 1196 out: 1197 ib_device_put(&rdev->ibdev); 1198 return rc; 1199 } 1200 1201 static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev) 1202 { 1203 u32 prio_map = 0, tmp_map = 0; 1204 struct net_device *netdev; 1205 struct dcb_app app; 1206 1207 netdev = rdev->netdev; 1208 1209 memset(&app, 0, sizeof(app)); 1210 app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE; 1211 app.protocol = ETH_P_IBOE; 1212 tmp_map = dcb_ieee_getapp_mask(netdev, &app); 1213 prio_map = tmp_map; 1214 1215 app.selector = IEEE_8021QAZ_APP_SEL_DGRAM; 1216 app.protocol = ROCE_V2_UDP_DPORT; 1217 tmp_map = dcb_ieee_getapp_mask(netdev, &app); 1218 prio_map |= tmp_map; 1219 1220 return prio_map; 1221 } 1222 1223 static void bnxt_re_parse_cid_map(u8 prio_map, u8 *cid_map, u16 *cosq) 1224 { 1225 u16 prio; 1226 u8 id; 1227 1228 for (prio = 0, id = 0; prio < 8; prio++) { 1229 if (prio_map & (1 << prio)) { 1230 cosq[id] = cid_map[prio]; 1231 id++; 1232 if (id == 2) /* Max 2 tcs supported */ 1233 break; 1234 } 1235 } 1236 } 1237 1238 static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev) 1239 { 1240 u8 prio_map = 0; 1241 u64 cid_map; 1242 int rc; 1243 1244 /* Get priority for roce */ 1245 prio_map = bnxt_re_get_priority_mask(rdev); 1246 1247 if (prio_map == rdev->cur_prio_map) 1248 return 0; 1249 rdev->cur_prio_map = prio_map; 1250 /* Get cosq id for this priority */ 1251 rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map); 1252 if (rc) { 1253 ibdev_warn(&rdev->ibdev, "no cos for p_mask %x\n", prio_map); 1254 return rc; 1255 } 1256 /* Parse CoS IDs for app priority */ 1257 bnxt_re_parse_cid_map(prio_map, (u8 *)&cid_map, rdev->cosq); 1258 1259 /* Config BONO. */ 1260 rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq); 1261 if (rc) { 1262 ibdev_warn(&rdev->ibdev, "no tc for cos{%x, %x}\n", 1263 rdev->cosq[0], rdev->cosq[1]); 1264 return rc; 1265 } 1266 1267 /* Actual priorities are not programmed as they are already 1268 * done by L2 driver; just enable or disable priority vlan tagging 1269 */ 1270 if ((prio_map == 0 && rdev->qplib_res.prio) || 1271 (prio_map != 0 && !rdev->qplib_res.prio)) { 1272 rdev->qplib_res.prio = prio_map ? true : false; 1273 1274 bnxt_re_update_gid(rdev); 1275 } 1276 1277 return 0; 1278 } 1279 1280 static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) 1281 { 1282 struct bnxt_en_dev *en_dev = rdev->en_dev; 1283 struct hwrm_ver_get_output resp = {0}; 1284 struct hwrm_ver_get_input req = {0}; 1285 struct bnxt_fw_msg fw_msg; 1286 int rc = 0; 1287 1288 memset(&fw_msg, 0, sizeof(fw_msg)); 1289 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, 1290 HWRM_VER_GET, -1, -1); 1291 req.hwrm_intf_maj = HWRM_VERSION_MAJOR; 1292 req.hwrm_intf_min = HWRM_VERSION_MINOR; 1293 req.hwrm_intf_upd = HWRM_VERSION_UPDATE; 1294 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 1295 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); 1296 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); 1297 if (rc) { 1298 ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x", 1299 rc); 1300 return; 1301 } 1302 rdev->qplib_ctx.hwrm_intf_ver = 1303 (u64)le16_to_cpu(resp.hwrm_intf_major) << 48 | 1304 (u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 | 1305 (u64)le16_to_cpu(resp.hwrm_intf_build) << 16 | 1306 le16_to_cpu(resp.hwrm_intf_patch); 1307 } 1308 1309 static int bnxt_re_ib_init(struct bnxt_re_dev *rdev) 1310 { 1311 int rc = 0; 1312 u32 event; 1313 1314 /* Register ib dev */ 1315 rc = bnxt_re_register_ib(rdev); 1316 if (rc) { 1317 pr_err("Failed to register with IB: %#x\n", rc); 1318 return rc; 1319 } 1320 dev_info(rdev_to_dev(rdev), "Device registered successfully"); 1321 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, 1322 &rdev->active_width); 1323 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); 1324 1325 event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ? 1326 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 1327 1328 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, event); 1329 1330 return rc; 1331 } 1332 1333 static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev) 1334 { 1335 u8 type; 1336 int rc; 1337 1338 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags)) 1339 cancel_delayed_work_sync(&rdev->worker); 1340 1341 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, 1342 &rdev->flags)) 1343 bnxt_re_cleanup_res(rdev); 1344 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags)) 1345 bnxt_re_free_res(rdev); 1346 1347 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { 1348 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); 1349 if (rc) 1350 ibdev_warn(&rdev->ibdev, 1351 "Failed to deinitialize RCFW: %#x", rc); 1352 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); 1353 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx); 1354 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); 1355 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 1356 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type); 1357 bnxt_qplib_free_rcfw_channel(&rdev->rcfw); 1358 } 1359 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) { 1360 rc = bnxt_re_free_msix(rdev); 1361 if (rc) 1362 ibdev_warn(&rdev->ibdev, 1363 "Failed to free MSI-X vectors: %#x", rc); 1364 } 1365 1366 bnxt_re_destroy_chip_ctx(rdev); 1367 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) { 1368 rc = bnxt_re_unregister_netdev(rdev); 1369 if (rc) 1370 ibdev_warn(&rdev->ibdev, 1371 "Failed to unregister with netdev: %#x", rc); 1372 } 1373 } 1374 1375 /* worker thread for polling periodic events. Now used for QoS programming*/ 1376 static void bnxt_re_worker(struct work_struct *work) 1377 { 1378 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev, 1379 worker.work); 1380 1381 bnxt_re_setup_qos(rdev); 1382 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); 1383 } 1384 1385 static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode) 1386 { 1387 struct bnxt_qplib_creq_ctx *creq; 1388 struct bnxt_re_ring_attr rattr; 1389 u32 db_offt; 1390 int vid; 1391 u8 type; 1392 int rc; 1393 1394 /* Registered a new RoCE device instance to netdev */ 1395 memset(&rattr, 0, sizeof(rattr)); 1396 rc = bnxt_re_register_netdev(rdev); 1397 if (rc) { 1398 rtnl_unlock(); 1399 ibdev_err(&rdev->ibdev, 1400 "Failed to register with netedev: %#x\n", rc); 1401 return -EINVAL; 1402 } 1403 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); 1404 1405 rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode); 1406 if (rc) { 1407 ibdev_err(&rdev->ibdev, "Failed to get chip context\n"); 1408 return -EINVAL; 1409 } 1410 1411 /* Check whether VF or PF */ 1412 bnxt_re_get_sriov_func_type(rdev); 1413 1414 rc = bnxt_re_request_msix(rdev); 1415 if (rc) { 1416 ibdev_err(&rdev->ibdev, 1417 "Failed to get MSI-X vectors: %#x\n", rc); 1418 rc = -EINVAL; 1419 goto fail; 1420 } 1421 set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags); 1422 1423 bnxt_re_query_hwrm_intf_version(rdev); 1424 1425 /* Establish RCFW Communication Channel to initialize the context 1426 * memory for the function and all child VFs 1427 */ 1428 rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw, 1429 &rdev->qplib_ctx, 1430 BNXT_RE_MAX_QPC_COUNT); 1431 if (rc) { 1432 ibdev_err(&rdev->ibdev, 1433 "Failed to allocate RCFW Channel: %#x\n", rc); 1434 goto fail; 1435 } 1436 1437 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 1438 creq = &rdev->rcfw.creq; 1439 rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr; 1440 rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count; 1441 rattr.type = type; 1442 rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX; 1443 rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1; 1444 rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx; 1445 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id); 1446 if (rc) { 1447 ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc); 1448 goto free_rcfw; 1449 } 1450 db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX); 1451 vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector; 1452 rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw, 1453 vid, db_offt, rdev->is_virtfn, 1454 &bnxt_re_aeq_handler); 1455 if (rc) { 1456 ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n", 1457 rc); 1458 goto free_ring; 1459 } 1460 1461 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr, 1462 rdev->is_virtfn); 1463 if (rc) 1464 goto disable_rcfw; 1465 1466 bnxt_re_set_resource_limits(rdev); 1467 1468 rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0, 1469 bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)); 1470 if (rc) { 1471 ibdev_err(&rdev->ibdev, 1472 "Failed to allocate QPLIB context: %#x\n", rc); 1473 goto disable_rcfw; 1474 } 1475 rc = bnxt_re_net_stats_ctx_alloc(rdev, 1476 rdev->qplib_ctx.stats.dma_map, 1477 &rdev->qplib_ctx.stats.fw_id); 1478 if (rc) { 1479 ibdev_err(&rdev->ibdev, 1480 "Failed to allocate stats context: %#x\n", rc); 1481 goto free_ctx; 1482 } 1483 1484 rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx, 1485 rdev->is_virtfn); 1486 if (rc) { 1487 ibdev_err(&rdev->ibdev, 1488 "Failed to initialize RCFW: %#x\n", rc); 1489 goto free_sctx; 1490 } 1491 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags); 1492 1493 /* Resources based on the 'new' device caps */ 1494 rc = bnxt_re_alloc_res(rdev); 1495 if (rc) { 1496 ibdev_err(&rdev->ibdev, 1497 "Failed to allocate resources: %#x\n", rc); 1498 goto fail; 1499 } 1500 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags); 1501 rc = bnxt_re_init_res(rdev); 1502 if (rc) { 1503 ibdev_err(&rdev->ibdev, 1504 "Failed to initialize resources: %#x\n", rc); 1505 goto fail; 1506 } 1507 1508 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags); 1509 1510 if (!rdev->is_virtfn) { 1511 rc = bnxt_re_setup_qos(rdev); 1512 if (rc) 1513 ibdev_info(&rdev->ibdev, 1514 "RoCE priority not yet configured\n"); 1515 1516 INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker); 1517 set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags); 1518 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); 1519 } 1520 1521 return 0; 1522 free_sctx: 1523 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); 1524 free_ctx: 1525 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx); 1526 disable_rcfw: 1527 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); 1528 free_ring: 1529 type = bnxt_qplib_get_ring_type(rdev->chip_ctx); 1530 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type); 1531 free_rcfw: 1532 bnxt_qplib_free_rcfw_channel(&rdev->rcfw); 1533 fail: 1534 bnxt_re_dev_uninit(rdev); 1535 1536 return rc; 1537 } 1538 1539 static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev) 1540 { 1541 struct net_device *netdev = rdev->netdev; 1542 1543 bnxt_re_dev_remove(rdev); 1544 1545 if (netdev) 1546 dev_put(netdev); 1547 } 1548 1549 static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev) 1550 { 1551 struct bnxt_en_dev *en_dev; 1552 int rc = 0; 1553 1554 if (!is_bnxt_re_dev(netdev)) 1555 return -ENODEV; 1556 1557 en_dev = bnxt_re_dev_probe(netdev); 1558 if (IS_ERR(en_dev)) { 1559 if (en_dev != ERR_PTR(-ENODEV)) 1560 ibdev_err(&(*rdev)->ibdev, "%s: Failed to probe\n", 1561 ROCE_DRV_MODULE_NAME); 1562 rc = PTR_ERR(en_dev); 1563 goto exit; 1564 } 1565 *rdev = bnxt_re_dev_add(netdev, en_dev); 1566 if (!*rdev) { 1567 rc = -ENOMEM; 1568 dev_put(netdev); 1569 goto exit; 1570 } 1571 exit: 1572 return rc; 1573 } 1574 1575 static void bnxt_re_remove_device(struct bnxt_re_dev *rdev) 1576 { 1577 bnxt_re_dev_uninit(rdev); 1578 pci_dev_put(rdev->en_dev->pdev); 1579 bnxt_re_dev_unreg(rdev); 1580 } 1581 1582 static int bnxt_re_add_device(struct bnxt_re_dev **rdev, 1583 struct net_device *netdev, u8 wqe_mode) 1584 { 1585 int rc; 1586 1587 rc = bnxt_re_dev_reg(rdev, netdev); 1588 if (rc == -ENODEV) 1589 return rc; 1590 if (rc) { 1591 pr_err("Failed to register with the device %s: %#x\n", 1592 netdev->name, rc); 1593 return rc; 1594 } 1595 1596 pci_dev_get((*rdev)->en_dev->pdev); 1597 rc = bnxt_re_dev_init(*rdev, wqe_mode); 1598 if (rc) { 1599 pci_dev_put((*rdev)->en_dev->pdev); 1600 bnxt_re_dev_unreg(*rdev); 1601 } 1602 1603 return rc; 1604 } 1605 1606 static void bnxt_re_dealloc_driver(struct ib_device *ib_dev) 1607 { 1608 struct bnxt_re_dev *rdev = 1609 container_of(ib_dev, struct bnxt_re_dev, ibdev); 1610 1611 dev_info(rdev_to_dev(rdev), "Unregistering Device"); 1612 1613 rtnl_lock(); 1614 bnxt_re_remove_device(rdev); 1615 rtnl_unlock(); 1616 } 1617 1618 /* Handle all deferred netevents tasks */ 1619 static void bnxt_re_task(struct work_struct *work) 1620 { 1621 struct bnxt_re_work *re_work; 1622 struct bnxt_re_dev *rdev; 1623 int rc = 0; 1624 1625 re_work = container_of(work, struct bnxt_re_work, work); 1626 rdev = re_work->rdev; 1627 1628 if (re_work->event == NETDEV_REGISTER) { 1629 rc = bnxt_re_ib_init(rdev); 1630 if (rc) { 1631 ibdev_err(&rdev->ibdev, 1632 "Failed to register with IB: %#x", rc); 1633 rtnl_lock(); 1634 bnxt_re_remove_device(rdev); 1635 rtnl_unlock(); 1636 goto exit; 1637 } 1638 goto exit; 1639 } 1640 1641 if (!ib_device_try_get(&rdev->ibdev)) 1642 goto exit; 1643 1644 switch (re_work->event) { 1645 case NETDEV_UP: 1646 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, 1647 IB_EVENT_PORT_ACTIVE); 1648 break; 1649 case NETDEV_DOWN: 1650 bnxt_re_dev_stop(rdev); 1651 break; 1652 case NETDEV_CHANGE: 1653 if (!netif_carrier_ok(rdev->netdev)) 1654 bnxt_re_dev_stop(rdev); 1655 else if (netif_carrier_ok(rdev->netdev)) 1656 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, 1657 IB_EVENT_PORT_ACTIVE); 1658 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, 1659 &rdev->active_width); 1660 break; 1661 default: 1662 break; 1663 } 1664 ib_device_put(&rdev->ibdev); 1665 exit: 1666 put_device(&rdev->ibdev.dev); 1667 kfree(re_work); 1668 } 1669 1670 /* 1671 * "Notifier chain callback can be invoked for the same chain from 1672 * different CPUs at the same time". 1673 * 1674 * For cases when the netdev is already present, our call to the 1675 * register_netdevice_notifier() will actually get the rtnl_lock() 1676 * before sending NETDEV_REGISTER and (if up) NETDEV_UP 1677 * events. 1678 * 1679 * But for cases when the netdev is not already present, the notifier 1680 * chain is subjected to be invoked from different CPUs simultaneously. 1681 * 1682 * This is protected by the netdev_mutex. 1683 */ 1684 static int bnxt_re_netdev_event(struct notifier_block *notifier, 1685 unsigned long event, void *ptr) 1686 { 1687 struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr); 1688 struct bnxt_re_work *re_work; 1689 struct bnxt_re_dev *rdev; 1690 int rc = 0; 1691 bool sch_work = false; 1692 bool release = true; 1693 1694 real_dev = rdma_vlan_dev_real_dev(netdev); 1695 if (!real_dev) 1696 real_dev = netdev; 1697 1698 rdev = bnxt_re_from_netdev(real_dev); 1699 if (!rdev && event != NETDEV_REGISTER) 1700 return NOTIFY_OK; 1701 1702 if (real_dev != netdev) 1703 goto exit; 1704 1705 switch (event) { 1706 case NETDEV_REGISTER: 1707 if (rdev) 1708 break; 1709 rc = bnxt_re_add_device(&rdev, real_dev, 1710 BNXT_QPLIB_WQE_MODE_STATIC); 1711 if (!rc) 1712 sch_work = true; 1713 release = false; 1714 break; 1715 1716 case NETDEV_UNREGISTER: 1717 ib_unregister_device_queued(&rdev->ibdev); 1718 break; 1719 1720 default: 1721 sch_work = true; 1722 break; 1723 } 1724 if (sch_work) { 1725 /* Allocate for the deferred task */ 1726 re_work = kzalloc(sizeof(*re_work), GFP_ATOMIC); 1727 if (re_work) { 1728 get_device(&rdev->ibdev.dev); 1729 re_work->rdev = rdev; 1730 re_work->event = event; 1731 re_work->vlan_dev = (real_dev == netdev ? 1732 NULL : netdev); 1733 INIT_WORK(&re_work->work, bnxt_re_task); 1734 queue_work(bnxt_re_wq, &re_work->work); 1735 } 1736 } 1737 1738 exit: 1739 if (rdev && release) 1740 ib_device_put(&rdev->ibdev); 1741 return NOTIFY_DONE; 1742 } 1743 1744 static struct notifier_block bnxt_re_netdev_notifier = { 1745 .notifier_call = bnxt_re_netdev_event 1746 }; 1747 1748 static int __init bnxt_re_mod_init(void) 1749 { 1750 int rc = 0; 1751 1752 pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version); 1753 1754 bnxt_re_wq = create_singlethread_workqueue("bnxt_re"); 1755 if (!bnxt_re_wq) 1756 return -ENOMEM; 1757 1758 INIT_LIST_HEAD(&bnxt_re_dev_list); 1759 1760 rc = register_netdevice_notifier(&bnxt_re_netdev_notifier); 1761 if (rc) { 1762 pr_err("%s: Cannot register to netdevice_notifier", 1763 ROCE_DRV_MODULE_NAME); 1764 goto err_netdev; 1765 } 1766 return 0; 1767 1768 err_netdev: 1769 destroy_workqueue(bnxt_re_wq); 1770 1771 return rc; 1772 } 1773 1774 static void __exit bnxt_re_mod_exit(void) 1775 { 1776 struct bnxt_re_dev *rdev; 1777 1778 unregister_netdevice_notifier(&bnxt_re_netdev_notifier); 1779 if (bnxt_re_wq) 1780 destroy_workqueue(bnxt_re_wq); 1781 list_for_each_entry(rdev, &bnxt_re_dev_list, list) { 1782 /* VF device removal should be called before the removal 1783 * of PF device. Queue VFs unregister first, so that VFs 1784 * shall be removed before the PF during the call of 1785 * ib_unregister_driver. 1786 */ 1787 if (rdev->is_virtfn) 1788 ib_unregister_device(&rdev->ibdev); 1789 } 1790 ib_unregister_driver(RDMA_DRIVER_BNXT_RE); 1791 } 1792 1793 module_init(bnxt_re_mod_init); 1794 module_exit(bnxt_re_mod_exit); 1795