1 /* 2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of EITHER the GNU General Public License 6 * version 2 as published by the Free Software Foundation or the BSD 7 * 2-Clause License. This program is distributed in the hope that it 8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED 9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. 10 * See the GNU General Public License version 2 for more details at 11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program available in the file COPYING in the main 15 * directory of this source tree. 16 * 17 * The BSD 2-Clause License 18 * 19 * Redistribution and use in source and binary forms, with or 20 * without modification, are permitted provided that the following 21 * conditions are met: 22 * 23 * - Redistributions of source code must retain the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer. 26 * 27 * - Redistributions in binary form must reproduce the above 28 * copyright notice, this list of conditions and the following 29 * disclaimer in the documentation and/or other materials 30 * provided with the distribution. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 */ 45 46 #include <linux/errno.h> 47 #include <linux/inetdevice.h> 48 #include <linux/init.h> 49 #include <linux/module.h> 50 #include <linux/slab.h> 51 #include <rdma/ib_addr.h> 52 #include <rdma/ib_smi.h> 53 #include <rdma/ib_user_verbs.h> 54 #include <net/addrconf.h> 55 56 #include "pvrdma.h" 57 58 #define DRV_NAME "vmw_pvrdma" 59 #define DRV_VERSION "1.0.1.0-k" 60 61 static DEFINE_MUTEX(pvrdma_device_list_lock); 62 static LIST_HEAD(pvrdma_device_list); 63 static struct workqueue_struct *event_wq; 64 65 static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context); 66 static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context); 67 68 static ssize_t hca_type_show(struct device *device, 69 struct device_attribute *attr, char *buf) 70 { 71 return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION); 72 } 73 static DEVICE_ATTR_RO(hca_type); 74 75 static ssize_t hw_rev_show(struct device *device, 76 struct device_attribute *attr, char *buf) 77 { 78 return sprintf(buf, "%d\n", PVRDMA_REV_ID); 79 } 80 static DEVICE_ATTR_RO(hw_rev); 81 82 static ssize_t board_id_show(struct device *device, 83 struct device_attribute *attr, char *buf) 84 { 85 return sprintf(buf, "%d\n", PVRDMA_BOARD_ID); 86 } 87 static DEVICE_ATTR_RO(board_id); 88 89 static struct attribute *pvrdma_class_attributes[] = { 90 &dev_attr_hw_rev.attr, 91 &dev_attr_hca_type.attr, 92 &dev_attr_board_id.attr, 93 NULL, 94 }; 95 96 static const struct attribute_group pvrdma_attr_group = { 97 .attrs = pvrdma_class_attributes, 98 }; 99 100 static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str) 101 { 102 struct pvrdma_dev *dev = 103 container_of(device, struct pvrdma_dev, ib_dev); 104 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d\n", 105 (int) (dev->dsr->caps.fw_ver >> 32), 106 (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff, 107 (int) dev->dsr->caps.fw_ver & 0xffff); 108 } 109 110 static int pvrdma_init_device(struct pvrdma_dev *dev) 111 { 112 /* Initialize some device related stuff */ 113 spin_lock_init(&dev->cmd_lock); 114 sema_init(&dev->cmd_sema, 1); 115 atomic_set(&dev->num_qps, 0); 116 atomic_set(&dev->num_srqs, 0); 117 atomic_set(&dev->num_cqs, 0); 118 atomic_set(&dev->num_pds, 0); 119 atomic_set(&dev->num_ahs, 0); 120 121 return 0; 122 } 123 124 static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num, 125 struct ib_port_immutable *immutable) 126 { 127 struct pvrdma_dev *dev = to_vdev(ibdev); 128 struct ib_port_attr attr; 129 int err; 130 131 if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1) 132 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE; 133 else if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2) 134 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 135 136 err = ib_query_port(ibdev, port_num, &attr); 137 if (err) 138 return err; 139 140 immutable->pkey_tbl_len = attr.pkey_tbl_len; 141 immutable->gid_tbl_len = attr.gid_tbl_len; 142 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 143 return 0; 144 } 145 146 static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev, 147 u8 port_num) 148 { 149 struct net_device *netdev; 150 struct pvrdma_dev *dev = to_vdev(ibdev); 151 152 if (port_num != 1) 153 return NULL; 154 155 rcu_read_lock(); 156 netdev = dev->netdev; 157 if (netdev) 158 dev_hold(netdev); 159 rcu_read_unlock(); 160 161 return netdev; 162 } 163 164 static int pvrdma_register_device(struct pvrdma_dev *dev) 165 { 166 int ret = -1; 167 168 dev->ib_dev.node_guid = dev->dsr->caps.node_guid; 169 dev->sys_image_guid = dev->dsr->caps.sys_image_guid; 170 dev->flags = 0; 171 dev->ib_dev.owner = THIS_MODULE; 172 dev->ib_dev.num_comp_vectors = 1; 173 dev->ib_dev.dev.parent = &dev->pdev->dev; 174 dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION; 175 dev->ib_dev.uverbs_cmd_mask = 176 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 177 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 178 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 179 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 180 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 181 (1ull << IB_USER_VERBS_CMD_REG_MR) | 182 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 183 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 184 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 185 (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 186 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 187 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 188 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 189 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 190 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 191 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 192 (1ull << IB_USER_VERBS_CMD_POST_SEND) | 193 (1ull << IB_USER_VERBS_CMD_POST_RECV) | 194 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 195 (1ull << IB_USER_VERBS_CMD_DESTROY_AH); 196 197 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 198 dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt; 199 200 dev->ib_dev.query_device = pvrdma_query_device; 201 dev->ib_dev.query_port = pvrdma_query_port; 202 dev->ib_dev.query_gid = pvrdma_query_gid; 203 dev->ib_dev.query_pkey = pvrdma_query_pkey; 204 dev->ib_dev.modify_port = pvrdma_modify_port; 205 dev->ib_dev.alloc_ucontext = pvrdma_alloc_ucontext; 206 dev->ib_dev.dealloc_ucontext = pvrdma_dealloc_ucontext; 207 dev->ib_dev.mmap = pvrdma_mmap; 208 dev->ib_dev.alloc_pd = pvrdma_alloc_pd; 209 dev->ib_dev.dealloc_pd = pvrdma_dealloc_pd; 210 dev->ib_dev.create_ah = pvrdma_create_ah; 211 dev->ib_dev.destroy_ah = pvrdma_destroy_ah; 212 dev->ib_dev.create_qp = pvrdma_create_qp; 213 dev->ib_dev.modify_qp = pvrdma_modify_qp; 214 dev->ib_dev.query_qp = pvrdma_query_qp; 215 dev->ib_dev.destroy_qp = pvrdma_destroy_qp; 216 dev->ib_dev.post_send = pvrdma_post_send; 217 dev->ib_dev.post_recv = pvrdma_post_recv; 218 dev->ib_dev.create_cq = pvrdma_create_cq; 219 dev->ib_dev.destroy_cq = pvrdma_destroy_cq; 220 dev->ib_dev.poll_cq = pvrdma_poll_cq; 221 dev->ib_dev.req_notify_cq = pvrdma_req_notify_cq; 222 dev->ib_dev.get_dma_mr = pvrdma_get_dma_mr; 223 dev->ib_dev.reg_user_mr = pvrdma_reg_user_mr; 224 dev->ib_dev.dereg_mr = pvrdma_dereg_mr; 225 dev->ib_dev.alloc_mr = pvrdma_alloc_mr; 226 dev->ib_dev.map_mr_sg = pvrdma_map_mr_sg; 227 dev->ib_dev.add_gid = pvrdma_add_gid; 228 dev->ib_dev.del_gid = pvrdma_del_gid; 229 dev->ib_dev.get_netdev = pvrdma_get_netdev; 230 dev->ib_dev.get_port_immutable = pvrdma_port_immutable; 231 dev->ib_dev.get_link_layer = pvrdma_port_link_layer; 232 dev->ib_dev.get_dev_fw_str = pvrdma_get_fw_ver_str; 233 234 mutex_init(&dev->port_mutex); 235 spin_lock_init(&dev->desc_lock); 236 237 dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *), 238 GFP_KERNEL); 239 if (!dev->cq_tbl) 240 return ret; 241 spin_lock_init(&dev->cq_tbl_lock); 242 243 dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *), 244 GFP_KERNEL); 245 if (!dev->qp_tbl) 246 goto err_cq_free; 247 spin_lock_init(&dev->qp_tbl_lock); 248 249 /* Check if SRQ is supported by backend */ 250 if (dev->dsr->caps.max_srq) { 251 dev->ib_dev.uverbs_cmd_mask |= 252 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 253 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 254 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 255 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 256 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 257 258 dev->ib_dev.create_srq = pvrdma_create_srq; 259 dev->ib_dev.modify_srq = pvrdma_modify_srq; 260 dev->ib_dev.query_srq = pvrdma_query_srq; 261 dev->ib_dev.destroy_srq = pvrdma_destroy_srq; 262 263 dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq, 264 sizeof(struct pvrdma_srq *), 265 GFP_KERNEL); 266 if (!dev->srq_tbl) 267 goto err_qp_free; 268 } 269 dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA; 270 spin_lock_init(&dev->srq_tbl_lock); 271 rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group); 272 273 ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", NULL); 274 if (ret) 275 goto err_srq_free; 276 277 dev->ib_active = true; 278 279 return 0; 280 281 err_srq_free: 282 kfree(dev->srq_tbl); 283 err_qp_free: 284 kfree(dev->qp_tbl); 285 err_cq_free: 286 kfree(dev->cq_tbl); 287 288 return ret; 289 } 290 291 static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id) 292 { 293 u32 icr = PVRDMA_INTR_CAUSE_RESPONSE; 294 struct pvrdma_dev *dev = dev_id; 295 296 dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n"); 297 298 if (!dev->pdev->msix_enabled) { 299 /* Legacy intr */ 300 icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR); 301 if (icr == 0) 302 return IRQ_NONE; 303 } 304 305 if (icr == PVRDMA_INTR_CAUSE_RESPONSE) 306 complete(&dev->cmd_done); 307 308 return IRQ_HANDLED; 309 } 310 311 static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type) 312 { 313 struct pvrdma_qp *qp; 314 unsigned long flags; 315 316 spin_lock_irqsave(&dev->qp_tbl_lock, flags); 317 qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp]; 318 if (qp) 319 refcount_inc(&qp->refcnt); 320 spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); 321 322 if (qp && qp->ibqp.event_handler) { 323 struct ib_qp *ibqp = &qp->ibqp; 324 struct ib_event e; 325 326 e.device = ibqp->device; 327 e.element.qp = ibqp; 328 e.event = type; /* 1:1 mapping for now. */ 329 ibqp->event_handler(&e, ibqp->qp_context); 330 } 331 if (qp) { 332 if (refcount_dec_and_test(&qp->refcnt)) 333 complete(&qp->free); 334 } 335 } 336 337 static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type) 338 { 339 struct pvrdma_cq *cq; 340 unsigned long flags; 341 342 spin_lock_irqsave(&dev->cq_tbl_lock, flags); 343 cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq]; 344 if (cq) 345 refcount_inc(&cq->refcnt); 346 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); 347 348 if (cq && cq->ibcq.event_handler) { 349 struct ib_cq *ibcq = &cq->ibcq; 350 struct ib_event e; 351 352 e.device = ibcq->device; 353 e.element.cq = ibcq; 354 e.event = type; /* 1:1 mapping for now. */ 355 ibcq->event_handler(&e, ibcq->cq_context); 356 } 357 if (cq) { 358 if (refcount_dec_and_test(&cq->refcnt)) 359 complete(&cq->free); 360 } 361 } 362 363 static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type) 364 { 365 struct pvrdma_srq *srq; 366 unsigned long flags; 367 368 spin_lock_irqsave(&dev->srq_tbl_lock, flags); 369 if (dev->srq_tbl) 370 srq = dev->srq_tbl[srqn % dev->dsr->caps.max_srq]; 371 else 372 srq = NULL; 373 if (srq) 374 refcount_inc(&srq->refcnt); 375 spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); 376 377 if (srq && srq->ibsrq.event_handler) { 378 struct ib_srq *ibsrq = &srq->ibsrq; 379 struct ib_event e; 380 381 e.device = ibsrq->device; 382 e.element.srq = ibsrq; 383 e.event = type; /* 1:1 mapping for now. */ 384 ibsrq->event_handler(&e, ibsrq->srq_context); 385 } 386 if (srq) { 387 if (refcount_dec_and_test(&srq->refcnt)) 388 complete(&srq->free); 389 } 390 } 391 392 static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port, 393 enum ib_event_type event) 394 { 395 struct ib_event ib_event; 396 397 memset(&ib_event, 0, sizeof(ib_event)); 398 ib_event.device = &dev->ib_dev; 399 ib_event.element.port_num = port; 400 ib_event.event = event; 401 ib_dispatch_event(&ib_event); 402 } 403 404 static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type) 405 { 406 if (port < 1 || port > dev->dsr->caps.phys_port_cnt) { 407 dev_warn(&dev->pdev->dev, "event on port %d\n", port); 408 return; 409 } 410 411 pvrdma_dispatch_event(dev, port, type); 412 } 413 414 static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i) 415 { 416 return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr( 417 &dev->async_pdir, 418 PAGE_SIZE + 419 sizeof(struct pvrdma_eqe) * i); 420 } 421 422 static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id) 423 { 424 struct pvrdma_dev *dev = dev_id; 425 struct pvrdma_ring *ring = &dev->async_ring_state->rx; 426 int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) * 427 PAGE_SIZE / sizeof(struct pvrdma_eqe); 428 unsigned int head; 429 430 dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n"); 431 432 /* 433 * Don't process events until the IB device is registered. Otherwise 434 * we'll try to ib_dispatch_event() on an invalid device. 435 */ 436 if (!dev->ib_active) 437 return IRQ_HANDLED; 438 439 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) { 440 struct pvrdma_eqe *eqe; 441 442 eqe = get_eqe(dev, head); 443 444 switch (eqe->type) { 445 case PVRDMA_EVENT_QP_FATAL: 446 case PVRDMA_EVENT_QP_REQ_ERR: 447 case PVRDMA_EVENT_QP_ACCESS_ERR: 448 case PVRDMA_EVENT_COMM_EST: 449 case PVRDMA_EVENT_SQ_DRAINED: 450 case PVRDMA_EVENT_PATH_MIG: 451 case PVRDMA_EVENT_PATH_MIG_ERR: 452 case PVRDMA_EVENT_QP_LAST_WQE_REACHED: 453 pvrdma_qp_event(dev, eqe->info, eqe->type); 454 break; 455 456 case PVRDMA_EVENT_CQ_ERR: 457 pvrdma_cq_event(dev, eqe->info, eqe->type); 458 break; 459 460 case PVRDMA_EVENT_SRQ_ERR: 461 case PVRDMA_EVENT_SRQ_LIMIT_REACHED: 462 pvrdma_srq_event(dev, eqe->info, eqe->type); 463 break; 464 465 case PVRDMA_EVENT_PORT_ACTIVE: 466 case PVRDMA_EVENT_PORT_ERR: 467 case PVRDMA_EVENT_LID_CHANGE: 468 case PVRDMA_EVENT_PKEY_CHANGE: 469 case PVRDMA_EVENT_SM_CHANGE: 470 case PVRDMA_EVENT_CLIENT_REREGISTER: 471 case PVRDMA_EVENT_GID_CHANGE: 472 pvrdma_dev_event(dev, eqe->info, eqe->type); 473 break; 474 475 case PVRDMA_EVENT_DEVICE_FATAL: 476 pvrdma_dev_event(dev, 1, eqe->type); 477 break; 478 479 default: 480 break; 481 } 482 483 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); 484 } 485 486 return IRQ_HANDLED; 487 } 488 489 static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev, 490 unsigned int i) 491 { 492 return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr( 493 &dev->cq_pdir, 494 PAGE_SIZE + 495 sizeof(struct pvrdma_cqne) * i); 496 } 497 498 static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id) 499 { 500 struct pvrdma_dev *dev = dev_id; 501 struct pvrdma_ring *ring = &dev->cq_ring_state->rx; 502 int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE / 503 sizeof(struct pvrdma_cqne); 504 unsigned int head; 505 unsigned long flags; 506 507 dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n"); 508 509 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) { 510 struct pvrdma_cqne *cqne; 511 struct pvrdma_cq *cq; 512 513 cqne = get_cqne(dev, head); 514 spin_lock_irqsave(&dev->cq_tbl_lock, flags); 515 cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq]; 516 if (cq) 517 refcount_inc(&cq->refcnt); 518 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); 519 520 if (cq && cq->ibcq.comp_handler) 521 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 522 if (cq) { 523 if (refcount_dec_and_test(&cq->refcnt)) 524 complete(&cq->free); 525 } 526 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); 527 } 528 529 return IRQ_HANDLED; 530 } 531 532 static void pvrdma_free_irq(struct pvrdma_dev *dev) 533 { 534 int i; 535 536 dev_dbg(&dev->pdev->dev, "freeing interrupts\n"); 537 for (i = 0; i < dev->nr_vectors; i++) 538 free_irq(pci_irq_vector(dev->pdev, i), dev); 539 } 540 541 static void pvrdma_enable_intrs(struct pvrdma_dev *dev) 542 { 543 dev_dbg(&dev->pdev->dev, "enable interrupts\n"); 544 pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0); 545 } 546 547 static void pvrdma_disable_intrs(struct pvrdma_dev *dev) 548 { 549 dev_dbg(&dev->pdev->dev, "disable interrupts\n"); 550 pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0); 551 } 552 553 static int pvrdma_alloc_intrs(struct pvrdma_dev *dev) 554 { 555 struct pci_dev *pdev = dev->pdev; 556 int ret = 0, i; 557 558 ret = pci_alloc_irq_vectors(pdev, 1, PVRDMA_MAX_INTERRUPTS, 559 PCI_IRQ_MSIX); 560 if (ret < 0) { 561 ret = pci_alloc_irq_vectors(pdev, 1, 1, 562 PCI_IRQ_MSI | PCI_IRQ_LEGACY); 563 if (ret < 0) 564 return ret; 565 } 566 dev->nr_vectors = ret; 567 568 ret = request_irq(pci_irq_vector(dev->pdev, 0), pvrdma_intr0_handler, 569 pdev->msix_enabled ? 0 : IRQF_SHARED, DRV_NAME, dev); 570 if (ret) { 571 dev_err(&dev->pdev->dev, 572 "failed to request interrupt 0\n"); 573 goto out_free_vectors; 574 } 575 576 for (i = 1; i < dev->nr_vectors; i++) { 577 ret = request_irq(pci_irq_vector(dev->pdev, i), 578 i == 1 ? pvrdma_intr1_handler : 579 pvrdma_intrx_handler, 580 0, DRV_NAME, dev); 581 if (ret) { 582 dev_err(&dev->pdev->dev, 583 "failed to request interrupt %d\n", i); 584 goto free_irqs; 585 } 586 } 587 588 return 0; 589 590 free_irqs: 591 while (--i >= 0) 592 free_irq(pci_irq_vector(dev->pdev, i), dev); 593 out_free_vectors: 594 pci_free_irq_vectors(pdev); 595 return ret; 596 } 597 598 static void pvrdma_free_slots(struct pvrdma_dev *dev) 599 { 600 struct pci_dev *pdev = dev->pdev; 601 602 if (dev->resp_slot) 603 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot, 604 dev->dsr->resp_slot_dma); 605 if (dev->cmd_slot) 606 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot, 607 dev->dsr->cmd_slot_dma); 608 } 609 610 static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev, 611 const union ib_gid *gid, 612 u8 gid_type, 613 int index) 614 { 615 int ret; 616 union pvrdma_cmd_req req; 617 struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind; 618 619 if (!dev->sgid_tbl) { 620 dev_warn(&dev->pdev->dev, "sgid table not initialized\n"); 621 return -EINVAL; 622 } 623 624 memset(cmd_bind, 0, sizeof(*cmd_bind)); 625 cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND; 626 memcpy(cmd_bind->new_gid, gid->raw, 16); 627 cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024); 628 cmd_bind->vlan = 0xfff; 629 cmd_bind->index = index; 630 cmd_bind->gid_type = gid_type; 631 632 ret = pvrdma_cmd_post(dev, &req, NULL, 0); 633 if (ret < 0) { 634 dev_warn(&dev->pdev->dev, 635 "could not create binding, error: %d\n", ret); 636 return -EFAULT; 637 } 638 memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid)); 639 return 0; 640 } 641 642 static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context) 643 { 644 struct pvrdma_dev *dev = to_vdev(attr->device); 645 646 return pvrdma_add_gid_at_index(dev, &attr->gid, 647 ib_gid_type_to_pvrdma(attr->gid_type), 648 attr->index); 649 } 650 651 static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index) 652 { 653 int ret; 654 union pvrdma_cmd_req req; 655 struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind; 656 657 /* Update sgid table. */ 658 if (!dev->sgid_tbl) { 659 dev_warn(&dev->pdev->dev, "sgid table not initialized\n"); 660 return -EINVAL; 661 } 662 663 memset(cmd_dest, 0, sizeof(*cmd_dest)); 664 cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND; 665 memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16); 666 cmd_dest->index = index; 667 668 ret = pvrdma_cmd_post(dev, &req, NULL, 0); 669 if (ret < 0) { 670 dev_warn(&dev->pdev->dev, 671 "could not destroy binding, error: %d\n", ret); 672 return ret; 673 } 674 memset(&dev->sgid_tbl[index], 0, 16); 675 return 0; 676 } 677 678 static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context) 679 { 680 struct pvrdma_dev *dev = to_vdev(attr->device); 681 682 dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s", 683 attr->index, dev->netdev->name); 684 685 return pvrdma_del_gid_at_index(dev, attr->index); 686 } 687 688 static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev, 689 struct net_device *ndev, 690 unsigned long event) 691 { 692 struct pci_dev *pdev_net; 693 unsigned int slot; 694 695 switch (event) { 696 case NETDEV_REBOOT: 697 case NETDEV_DOWN: 698 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); 699 break; 700 case NETDEV_UP: 701 pvrdma_write_reg(dev, PVRDMA_REG_CTL, 702 PVRDMA_DEVICE_CTL_UNQUIESCE); 703 704 mb(); 705 706 if (pvrdma_read_reg(dev, PVRDMA_REG_ERR)) 707 dev_err(&dev->pdev->dev, 708 "failed to activate device during link up\n"); 709 else 710 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); 711 break; 712 case NETDEV_UNREGISTER: 713 dev_put(dev->netdev); 714 dev->netdev = NULL; 715 break; 716 case NETDEV_REGISTER: 717 /* vmxnet3 will have same bus, slot. But func will be 0 */ 718 slot = PCI_SLOT(dev->pdev->devfn); 719 pdev_net = pci_get_slot(dev->pdev->bus, 720 PCI_DEVFN(slot, 0)); 721 if ((dev->netdev == NULL) && 722 (pci_get_drvdata(pdev_net) == ndev)) { 723 /* this is our netdev */ 724 dev->netdev = ndev; 725 dev_hold(ndev); 726 } 727 pci_dev_put(pdev_net); 728 break; 729 730 default: 731 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n", 732 event, dev_name(&dev->ib_dev.dev)); 733 break; 734 } 735 } 736 737 static void pvrdma_netdevice_event_work(struct work_struct *work) 738 { 739 struct pvrdma_netdevice_work *netdev_work; 740 struct pvrdma_dev *dev; 741 742 netdev_work = container_of(work, struct pvrdma_netdevice_work, work); 743 744 mutex_lock(&pvrdma_device_list_lock); 745 list_for_each_entry(dev, &pvrdma_device_list, device_link) { 746 if ((netdev_work->event == NETDEV_REGISTER) || 747 (dev->netdev == netdev_work->event_netdev)) { 748 pvrdma_netdevice_event_handle(dev, 749 netdev_work->event_netdev, 750 netdev_work->event); 751 break; 752 } 753 } 754 mutex_unlock(&pvrdma_device_list_lock); 755 756 kfree(netdev_work); 757 } 758 759 static int pvrdma_netdevice_event(struct notifier_block *this, 760 unsigned long event, void *ptr) 761 { 762 struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr); 763 struct pvrdma_netdevice_work *netdev_work; 764 765 netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC); 766 if (!netdev_work) 767 return NOTIFY_BAD; 768 769 INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work); 770 netdev_work->event_netdev = event_netdev; 771 netdev_work->event = event; 772 queue_work(event_wq, &netdev_work->work); 773 774 return NOTIFY_DONE; 775 } 776 777 static int pvrdma_pci_probe(struct pci_dev *pdev, 778 const struct pci_device_id *id) 779 { 780 struct pci_dev *pdev_net; 781 struct pvrdma_dev *dev; 782 int ret; 783 unsigned long start; 784 unsigned long len; 785 dma_addr_t slot_dma = 0; 786 787 dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev)); 788 789 /* Allocate zero-out device */ 790 dev = (struct pvrdma_dev *)ib_alloc_device(sizeof(*dev)); 791 if (!dev) { 792 dev_err(&pdev->dev, "failed to allocate IB device\n"); 793 return -ENOMEM; 794 } 795 796 mutex_lock(&pvrdma_device_list_lock); 797 list_add(&dev->device_link, &pvrdma_device_list); 798 mutex_unlock(&pvrdma_device_list_lock); 799 800 ret = pvrdma_init_device(dev); 801 if (ret) 802 goto err_free_device; 803 804 dev->pdev = pdev; 805 pci_set_drvdata(pdev, dev); 806 807 ret = pci_enable_device(pdev); 808 if (ret) { 809 dev_err(&pdev->dev, "cannot enable PCI device\n"); 810 goto err_free_device; 811 } 812 813 dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n", 814 pci_resource_flags(pdev, 0)); 815 dev_dbg(&pdev->dev, "PCI resource len %#llx\n", 816 (unsigned long long)pci_resource_len(pdev, 0)); 817 dev_dbg(&pdev->dev, "PCI resource start %#llx\n", 818 (unsigned long long)pci_resource_start(pdev, 0)); 819 dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n", 820 pci_resource_flags(pdev, 1)); 821 dev_dbg(&pdev->dev, "PCI resource len %#llx\n", 822 (unsigned long long)pci_resource_len(pdev, 1)); 823 dev_dbg(&pdev->dev, "PCI resource start %#llx\n", 824 (unsigned long long)pci_resource_start(pdev, 1)); 825 826 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 827 !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 828 dev_err(&pdev->dev, "PCI BAR region not MMIO\n"); 829 ret = -ENOMEM; 830 goto err_free_device; 831 } 832 833 ret = pci_request_regions(pdev, DRV_NAME); 834 if (ret) { 835 dev_err(&pdev->dev, "cannot request PCI resources\n"); 836 goto err_disable_pdev; 837 } 838 839 /* Enable 64-Bit DMA */ 840 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 841 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 842 if (ret != 0) { 843 dev_err(&pdev->dev, 844 "pci_set_consistent_dma_mask failed\n"); 845 goto err_free_resource; 846 } 847 } else { 848 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 849 if (ret != 0) { 850 dev_err(&pdev->dev, 851 "pci_set_dma_mask failed\n"); 852 goto err_free_resource; 853 } 854 } 855 856 pci_set_master(pdev); 857 858 /* Map register space */ 859 start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG); 860 len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG); 861 dev->regs = ioremap(start, len); 862 if (!dev->regs) { 863 dev_err(&pdev->dev, "register mapping failed\n"); 864 ret = -ENOMEM; 865 goto err_free_resource; 866 } 867 868 /* Setup per-device UAR. */ 869 dev->driver_uar.index = 0; 870 dev->driver_uar.pfn = 871 pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >> 872 PAGE_SHIFT; 873 dev->driver_uar.map = 874 ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 875 if (!dev->driver_uar.map) { 876 dev_err(&pdev->dev, "failed to remap UAR pages\n"); 877 ret = -ENOMEM; 878 goto err_unmap_regs; 879 } 880 881 dev->dsr_version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION); 882 dev_info(&pdev->dev, "device version %d, driver version %d\n", 883 dev->dsr_version, PVRDMA_VERSION); 884 885 dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr), 886 &dev->dsrbase, GFP_KERNEL); 887 if (!dev->dsr) { 888 dev_err(&pdev->dev, "failed to allocate shared region\n"); 889 ret = -ENOMEM; 890 goto err_uar_unmap; 891 } 892 893 /* Setup the shared region */ 894 dev->dsr->driver_version = PVRDMA_VERSION; 895 dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ? 896 PVRDMA_GOS_BITS_32 : 897 PVRDMA_GOS_BITS_64; 898 dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX; 899 dev->dsr->gos_info.gos_ver = 1; 900 dev->dsr->uar_pfn = dev->driver_uar.pfn; 901 902 /* Command slot. */ 903 dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 904 &slot_dma, GFP_KERNEL); 905 if (!dev->cmd_slot) { 906 ret = -ENOMEM; 907 goto err_free_dsr; 908 } 909 910 dev->dsr->cmd_slot_dma = (u64)slot_dma; 911 912 /* Response slot. */ 913 dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 914 &slot_dma, GFP_KERNEL); 915 if (!dev->resp_slot) { 916 ret = -ENOMEM; 917 goto err_free_slots; 918 } 919 920 dev->dsr->resp_slot_dma = (u64)slot_dma; 921 922 /* Async event ring */ 923 dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; 924 ret = pvrdma_page_dir_init(dev, &dev->async_pdir, 925 dev->dsr->async_ring_pages.num_pages, true); 926 if (ret) 927 goto err_free_slots; 928 dev->async_ring_state = dev->async_pdir.pages[0]; 929 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma; 930 931 /* CQ notification ring */ 932 dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; 933 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir, 934 dev->dsr->cq_ring_pages.num_pages, true); 935 if (ret) 936 goto err_free_async_ring; 937 dev->cq_ring_state = dev->cq_pdir.pages[0]; 938 dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma; 939 940 /* 941 * Write the PA of the shared region to the device. The writes must be 942 * ordered such that the high bits are written last. When the writes 943 * complete, the device will have filled out the capabilities. 944 */ 945 946 pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase); 947 pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH, 948 (u32)((u64)(dev->dsrbase) >> 32)); 949 950 /* Make sure the write is complete before reading status. */ 951 mb(); 952 953 /* The driver supports RoCE V1 and V2. */ 954 if (!PVRDMA_SUPPORTED(dev)) { 955 dev_err(&pdev->dev, "driver needs RoCE v1 or v2 support\n"); 956 ret = -EFAULT; 957 goto err_free_cq_ring; 958 } 959 960 /* Paired vmxnet3 will have same bus, slot. But func will be 0 */ 961 pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); 962 if (!pdev_net) { 963 dev_err(&pdev->dev, "failed to find paired net device\n"); 964 ret = -ENODEV; 965 goto err_free_cq_ring; 966 } 967 968 if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE || 969 pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) { 970 dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n"); 971 pci_dev_put(pdev_net); 972 ret = -ENODEV; 973 goto err_free_cq_ring; 974 } 975 976 dev->netdev = pci_get_drvdata(pdev_net); 977 pci_dev_put(pdev_net); 978 if (!dev->netdev) { 979 dev_err(&pdev->dev, "failed to get vmxnet3 device\n"); 980 ret = -ENODEV; 981 goto err_free_cq_ring; 982 } 983 dev_hold(dev->netdev); 984 985 dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name); 986 987 /* Interrupt setup */ 988 ret = pvrdma_alloc_intrs(dev); 989 if (ret) { 990 dev_err(&pdev->dev, "failed to allocate interrupts\n"); 991 ret = -ENOMEM; 992 goto err_free_cq_ring; 993 } 994 995 /* Allocate UAR table. */ 996 ret = pvrdma_uar_table_init(dev); 997 if (ret) { 998 dev_err(&pdev->dev, "failed to allocate UAR table\n"); 999 ret = -ENOMEM; 1000 goto err_free_intrs; 1001 } 1002 1003 /* Allocate GID table */ 1004 dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len, 1005 sizeof(union ib_gid), GFP_KERNEL); 1006 if (!dev->sgid_tbl) { 1007 ret = -ENOMEM; 1008 goto err_free_uar_table; 1009 } 1010 dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len); 1011 1012 pvrdma_enable_intrs(dev); 1013 1014 /* Activate pvrdma device */ 1015 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE); 1016 1017 /* Make sure the write is complete before reading status. */ 1018 mb(); 1019 1020 /* Check if device was successfully activated */ 1021 ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR); 1022 if (ret != 0) { 1023 dev_err(&pdev->dev, "failed to activate device\n"); 1024 ret = -EFAULT; 1025 goto err_disable_intr; 1026 } 1027 1028 /* Register IB device */ 1029 ret = pvrdma_register_device(dev); 1030 if (ret) { 1031 dev_err(&pdev->dev, "failed to register IB device\n"); 1032 goto err_disable_intr; 1033 } 1034 1035 dev->nb_netdev.notifier_call = pvrdma_netdevice_event; 1036 ret = register_netdevice_notifier(&dev->nb_netdev); 1037 if (ret) { 1038 dev_err(&pdev->dev, "failed to register netdevice events\n"); 1039 goto err_unreg_ibdev; 1040 } 1041 1042 dev_info(&pdev->dev, "attached to device\n"); 1043 return 0; 1044 1045 err_unreg_ibdev: 1046 ib_unregister_device(&dev->ib_dev); 1047 err_disable_intr: 1048 pvrdma_disable_intrs(dev); 1049 kfree(dev->sgid_tbl); 1050 err_free_uar_table: 1051 pvrdma_uar_table_cleanup(dev); 1052 err_free_intrs: 1053 pvrdma_free_irq(dev); 1054 pci_free_irq_vectors(pdev); 1055 err_free_cq_ring: 1056 if (dev->netdev) { 1057 dev_put(dev->netdev); 1058 dev->netdev = NULL; 1059 } 1060 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1061 err_free_async_ring: 1062 pvrdma_page_dir_cleanup(dev, &dev->async_pdir); 1063 err_free_slots: 1064 pvrdma_free_slots(dev); 1065 err_free_dsr: 1066 dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr, 1067 dev->dsrbase); 1068 err_uar_unmap: 1069 iounmap(dev->driver_uar.map); 1070 err_unmap_regs: 1071 iounmap(dev->regs); 1072 err_free_resource: 1073 pci_release_regions(pdev); 1074 err_disable_pdev: 1075 pci_disable_device(pdev); 1076 pci_set_drvdata(pdev, NULL); 1077 err_free_device: 1078 mutex_lock(&pvrdma_device_list_lock); 1079 list_del(&dev->device_link); 1080 mutex_unlock(&pvrdma_device_list_lock); 1081 ib_dealloc_device(&dev->ib_dev); 1082 return ret; 1083 } 1084 1085 static void pvrdma_pci_remove(struct pci_dev *pdev) 1086 { 1087 struct pvrdma_dev *dev = pci_get_drvdata(pdev); 1088 1089 if (!dev) 1090 return; 1091 1092 dev_info(&pdev->dev, "detaching from device\n"); 1093 1094 unregister_netdevice_notifier(&dev->nb_netdev); 1095 dev->nb_netdev.notifier_call = NULL; 1096 1097 flush_workqueue(event_wq); 1098 1099 if (dev->netdev) { 1100 dev_put(dev->netdev); 1101 dev->netdev = NULL; 1102 } 1103 1104 /* Unregister ib device */ 1105 ib_unregister_device(&dev->ib_dev); 1106 1107 mutex_lock(&pvrdma_device_list_lock); 1108 list_del(&dev->device_link); 1109 mutex_unlock(&pvrdma_device_list_lock); 1110 1111 pvrdma_disable_intrs(dev); 1112 pvrdma_free_irq(dev); 1113 pci_free_irq_vectors(pdev); 1114 1115 /* Deactivate pvrdma device */ 1116 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET); 1117 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1118 pvrdma_page_dir_cleanup(dev, &dev->async_pdir); 1119 pvrdma_free_slots(dev); 1120 1121 iounmap(dev->regs); 1122 kfree(dev->sgid_tbl); 1123 kfree(dev->cq_tbl); 1124 kfree(dev->srq_tbl); 1125 kfree(dev->qp_tbl); 1126 pvrdma_uar_table_cleanup(dev); 1127 iounmap(dev->driver_uar.map); 1128 1129 ib_dealloc_device(&dev->ib_dev); 1130 1131 /* Free pci resources */ 1132 pci_release_regions(pdev); 1133 pci_disable_device(pdev); 1134 pci_set_drvdata(pdev, NULL); 1135 } 1136 1137 static const struct pci_device_id pvrdma_pci_table[] = { 1138 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), }, 1139 { 0 }, 1140 }; 1141 1142 MODULE_DEVICE_TABLE(pci, pvrdma_pci_table); 1143 1144 static struct pci_driver pvrdma_driver = { 1145 .name = DRV_NAME, 1146 .id_table = pvrdma_pci_table, 1147 .probe = pvrdma_pci_probe, 1148 .remove = pvrdma_pci_remove, 1149 }; 1150 1151 static int __init pvrdma_init(void) 1152 { 1153 int err; 1154 1155 event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM); 1156 if (!event_wq) 1157 return -ENOMEM; 1158 1159 err = pci_register_driver(&pvrdma_driver); 1160 if (err) 1161 destroy_workqueue(event_wq); 1162 1163 return err; 1164 } 1165 1166 static void __exit pvrdma_cleanup(void) 1167 { 1168 pci_unregister_driver(&pvrdma_driver); 1169 1170 destroy_workqueue(event_wq); 1171 } 1172 1173 module_init(pvrdma_init); 1174 module_exit(pvrdma_cleanup); 1175 1176 MODULE_AUTHOR("VMware, Inc"); 1177 MODULE_DESCRIPTION("VMware Paravirtual RDMA driver"); 1178 MODULE_LICENSE("Dual BSD/GPL"); 1179