1 /* 2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of EITHER the GNU General Public License 6 * version 2 as published by the Free Software Foundation or the BSD 7 * 2-Clause License. This program is distributed in the hope that it 8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED 9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. 10 * See the GNU General Public License version 2 for more details at 11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program available in the file COPYING in the main 15 * directory of this source tree. 16 * 17 * The BSD 2-Clause License 18 * 19 * Redistribution and use in source and binary forms, with or 20 * without modification, are permitted provided that the following 21 * conditions are met: 22 * 23 * - Redistributions of source code must retain the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer. 26 * 27 * - Redistributions in binary form must reproduce the above 28 * copyright notice, this list of conditions and the following 29 * disclaimer in the documentation and/or other materials 30 * provided with the distribution. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 */ 45 46 #include <linux/errno.h> 47 #include <linux/inetdevice.h> 48 #include <linux/init.h> 49 #include <linux/module.h> 50 #include <linux/slab.h> 51 #include <rdma/ib_addr.h> 52 #include <rdma/ib_smi.h> 53 #include <rdma/ib_user_verbs.h> 54 #include <net/addrconf.h> 55 56 #include "pvrdma.h" 57 58 #define DRV_NAME "vmw_pvrdma" 59 #define DRV_VERSION "1.0.1.0-k" 60 61 static DEFINE_MUTEX(pvrdma_device_list_lock); 62 static LIST_HEAD(pvrdma_device_list); 63 static struct workqueue_struct *event_wq; 64 65 static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context); 66 static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context); 67 68 static ssize_t show_hca(struct device *device, struct device_attribute *attr, 69 char *buf) 70 { 71 return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION); 72 } 73 74 static ssize_t show_rev(struct device *device, struct device_attribute *attr, 75 char *buf) 76 { 77 return sprintf(buf, "%d\n", PVRDMA_REV_ID); 78 } 79 80 static ssize_t show_board(struct device *device, struct device_attribute *attr, 81 char *buf) 82 { 83 return sprintf(buf, "%d\n", PVRDMA_BOARD_ID); 84 } 85 86 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 87 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 88 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 89 90 static struct device_attribute *pvrdma_class_attributes[] = { 91 &dev_attr_hw_rev, 92 &dev_attr_hca_type, 93 &dev_attr_board_id 94 }; 95 96 static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str) 97 { 98 struct pvrdma_dev *dev = 99 container_of(device, struct pvrdma_dev, ib_dev); 100 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d\n", 101 (int) (dev->dsr->caps.fw_ver >> 32), 102 (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff, 103 (int) dev->dsr->caps.fw_ver & 0xffff); 104 } 105 106 static int pvrdma_init_device(struct pvrdma_dev *dev) 107 { 108 /* Initialize some device related stuff */ 109 spin_lock_init(&dev->cmd_lock); 110 sema_init(&dev->cmd_sema, 1); 111 atomic_set(&dev->num_qps, 0); 112 atomic_set(&dev->num_srqs, 0); 113 atomic_set(&dev->num_cqs, 0); 114 atomic_set(&dev->num_pds, 0); 115 atomic_set(&dev->num_ahs, 0); 116 117 return 0; 118 } 119 120 static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num, 121 struct ib_port_immutable *immutable) 122 { 123 struct pvrdma_dev *dev = to_vdev(ibdev); 124 struct ib_port_attr attr; 125 int err; 126 127 if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1) 128 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE; 129 else if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2) 130 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 131 132 err = ib_query_port(ibdev, port_num, &attr); 133 if (err) 134 return err; 135 136 immutable->pkey_tbl_len = attr.pkey_tbl_len; 137 immutable->gid_tbl_len = attr.gid_tbl_len; 138 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 139 return 0; 140 } 141 142 static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev, 143 u8 port_num) 144 { 145 struct net_device *netdev; 146 struct pvrdma_dev *dev = to_vdev(ibdev); 147 148 if (port_num != 1) 149 return NULL; 150 151 rcu_read_lock(); 152 netdev = dev->netdev; 153 if (netdev) 154 dev_hold(netdev); 155 rcu_read_unlock(); 156 157 return netdev; 158 } 159 160 static int pvrdma_register_device(struct pvrdma_dev *dev) 161 { 162 int ret = -1; 163 int i = 0; 164 165 strlcpy(dev->ib_dev.name, "vmw_pvrdma%d", IB_DEVICE_NAME_MAX); 166 dev->ib_dev.node_guid = dev->dsr->caps.node_guid; 167 dev->sys_image_guid = dev->dsr->caps.sys_image_guid; 168 dev->flags = 0; 169 dev->ib_dev.owner = THIS_MODULE; 170 dev->ib_dev.num_comp_vectors = 1; 171 dev->ib_dev.dev.parent = &dev->pdev->dev; 172 dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION; 173 dev->ib_dev.uverbs_cmd_mask = 174 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 175 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 176 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 177 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 178 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 179 (1ull << IB_USER_VERBS_CMD_REG_MR) | 180 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 181 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 182 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 183 (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 184 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 185 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 186 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 187 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 188 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 189 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 190 (1ull << IB_USER_VERBS_CMD_POST_SEND) | 191 (1ull << IB_USER_VERBS_CMD_POST_RECV) | 192 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 193 (1ull << IB_USER_VERBS_CMD_DESTROY_AH); 194 195 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 196 dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt; 197 198 dev->ib_dev.query_device = pvrdma_query_device; 199 dev->ib_dev.query_port = pvrdma_query_port; 200 dev->ib_dev.query_gid = pvrdma_query_gid; 201 dev->ib_dev.query_pkey = pvrdma_query_pkey; 202 dev->ib_dev.modify_port = pvrdma_modify_port; 203 dev->ib_dev.alloc_ucontext = pvrdma_alloc_ucontext; 204 dev->ib_dev.dealloc_ucontext = pvrdma_dealloc_ucontext; 205 dev->ib_dev.mmap = pvrdma_mmap; 206 dev->ib_dev.alloc_pd = pvrdma_alloc_pd; 207 dev->ib_dev.dealloc_pd = pvrdma_dealloc_pd; 208 dev->ib_dev.create_ah = pvrdma_create_ah; 209 dev->ib_dev.destroy_ah = pvrdma_destroy_ah; 210 dev->ib_dev.create_qp = pvrdma_create_qp; 211 dev->ib_dev.modify_qp = pvrdma_modify_qp; 212 dev->ib_dev.query_qp = pvrdma_query_qp; 213 dev->ib_dev.destroy_qp = pvrdma_destroy_qp; 214 dev->ib_dev.post_send = pvrdma_post_send; 215 dev->ib_dev.post_recv = pvrdma_post_recv; 216 dev->ib_dev.create_cq = pvrdma_create_cq; 217 dev->ib_dev.destroy_cq = pvrdma_destroy_cq; 218 dev->ib_dev.poll_cq = pvrdma_poll_cq; 219 dev->ib_dev.req_notify_cq = pvrdma_req_notify_cq; 220 dev->ib_dev.get_dma_mr = pvrdma_get_dma_mr; 221 dev->ib_dev.reg_user_mr = pvrdma_reg_user_mr; 222 dev->ib_dev.dereg_mr = pvrdma_dereg_mr; 223 dev->ib_dev.alloc_mr = pvrdma_alloc_mr; 224 dev->ib_dev.map_mr_sg = pvrdma_map_mr_sg; 225 dev->ib_dev.add_gid = pvrdma_add_gid; 226 dev->ib_dev.del_gid = pvrdma_del_gid; 227 dev->ib_dev.get_netdev = pvrdma_get_netdev; 228 dev->ib_dev.get_port_immutable = pvrdma_port_immutable; 229 dev->ib_dev.get_link_layer = pvrdma_port_link_layer; 230 dev->ib_dev.get_dev_fw_str = pvrdma_get_fw_ver_str; 231 232 mutex_init(&dev->port_mutex); 233 spin_lock_init(&dev->desc_lock); 234 235 dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *), 236 GFP_KERNEL); 237 if (!dev->cq_tbl) 238 return ret; 239 spin_lock_init(&dev->cq_tbl_lock); 240 241 dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *), 242 GFP_KERNEL); 243 if (!dev->qp_tbl) 244 goto err_cq_free; 245 spin_lock_init(&dev->qp_tbl_lock); 246 247 /* Check if SRQ is supported by backend */ 248 if (dev->dsr->caps.max_srq) { 249 dev->ib_dev.uverbs_cmd_mask |= 250 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 251 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 252 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 253 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 254 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 255 256 dev->ib_dev.create_srq = pvrdma_create_srq; 257 dev->ib_dev.modify_srq = pvrdma_modify_srq; 258 dev->ib_dev.query_srq = pvrdma_query_srq; 259 dev->ib_dev.destroy_srq = pvrdma_destroy_srq; 260 261 dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq, 262 sizeof(struct pvrdma_srq *), 263 GFP_KERNEL); 264 if (!dev->srq_tbl) 265 goto err_qp_free; 266 } 267 dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA; 268 spin_lock_init(&dev->srq_tbl_lock); 269 270 ret = ib_register_device(&dev->ib_dev, NULL); 271 if (ret) 272 goto err_srq_free; 273 274 for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) { 275 ret = device_create_file(&dev->ib_dev.dev, 276 pvrdma_class_attributes[i]); 277 if (ret) 278 goto err_class; 279 } 280 281 dev->ib_active = true; 282 283 return 0; 284 285 err_class: 286 ib_unregister_device(&dev->ib_dev); 287 err_srq_free: 288 kfree(dev->srq_tbl); 289 err_qp_free: 290 kfree(dev->qp_tbl); 291 err_cq_free: 292 kfree(dev->cq_tbl); 293 294 return ret; 295 } 296 297 static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id) 298 { 299 u32 icr = PVRDMA_INTR_CAUSE_RESPONSE; 300 struct pvrdma_dev *dev = dev_id; 301 302 dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n"); 303 304 if (!dev->pdev->msix_enabled) { 305 /* Legacy intr */ 306 icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR); 307 if (icr == 0) 308 return IRQ_NONE; 309 } 310 311 if (icr == PVRDMA_INTR_CAUSE_RESPONSE) 312 complete(&dev->cmd_done); 313 314 return IRQ_HANDLED; 315 } 316 317 static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type) 318 { 319 struct pvrdma_qp *qp; 320 unsigned long flags; 321 322 spin_lock_irqsave(&dev->qp_tbl_lock, flags); 323 qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp]; 324 if (qp) 325 refcount_inc(&qp->refcnt); 326 spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); 327 328 if (qp && qp->ibqp.event_handler) { 329 struct ib_qp *ibqp = &qp->ibqp; 330 struct ib_event e; 331 332 e.device = ibqp->device; 333 e.element.qp = ibqp; 334 e.event = type; /* 1:1 mapping for now. */ 335 ibqp->event_handler(&e, ibqp->qp_context); 336 } 337 if (qp) { 338 if (refcount_dec_and_test(&qp->refcnt)) 339 complete(&qp->free); 340 } 341 } 342 343 static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type) 344 { 345 struct pvrdma_cq *cq; 346 unsigned long flags; 347 348 spin_lock_irqsave(&dev->cq_tbl_lock, flags); 349 cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq]; 350 if (cq) 351 refcount_inc(&cq->refcnt); 352 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); 353 354 if (cq && cq->ibcq.event_handler) { 355 struct ib_cq *ibcq = &cq->ibcq; 356 struct ib_event e; 357 358 e.device = ibcq->device; 359 e.element.cq = ibcq; 360 e.event = type; /* 1:1 mapping for now. */ 361 ibcq->event_handler(&e, ibcq->cq_context); 362 } 363 if (cq) { 364 if (refcount_dec_and_test(&cq->refcnt)) 365 complete(&cq->free); 366 } 367 } 368 369 static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type) 370 { 371 struct pvrdma_srq *srq; 372 unsigned long flags; 373 374 spin_lock_irqsave(&dev->srq_tbl_lock, flags); 375 if (dev->srq_tbl) 376 srq = dev->srq_tbl[srqn % dev->dsr->caps.max_srq]; 377 else 378 srq = NULL; 379 if (srq) 380 refcount_inc(&srq->refcnt); 381 spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); 382 383 if (srq && srq->ibsrq.event_handler) { 384 struct ib_srq *ibsrq = &srq->ibsrq; 385 struct ib_event e; 386 387 e.device = ibsrq->device; 388 e.element.srq = ibsrq; 389 e.event = type; /* 1:1 mapping for now. */ 390 ibsrq->event_handler(&e, ibsrq->srq_context); 391 } 392 if (srq) { 393 if (refcount_dec_and_test(&srq->refcnt)) 394 complete(&srq->free); 395 } 396 } 397 398 static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port, 399 enum ib_event_type event) 400 { 401 struct ib_event ib_event; 402 403 memset(&ib_event, 0, sizeof(ib_event)); 404 ib_event.device = &dev->ib_dev; 405 ib_event.element.port_num = port; 406 ib_event.event = event; 407 ib_dispatch_event(&ib_event); 408 } 409 410 static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type) 411 { 412 if (port < 1 || port > dev->dsr->caps.phys_port_cnt) { 413 dev_warn(&dev->pdev->dev, "event on port %d\n", port); 414 return; 415 } 416 417 pvrdma_dispatch_event(dev, port, type); 418 } 419 420 static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i) 421 { 422 return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr( 423 &dev->async_pdir, 424 PAGE_SIZE + 425 sizeof(struct pvrdma_eqe) * i); 426 } 427 428 static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id) 429 { 430 struct pvrdma_dev *dev = dev_id; 431 struct pvrdma_ring *ring = &dev->async_ring_state->rx; 432 int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) * 433 PAGE_SIZE / sizeof(struct pvrdma_eqe); 434 unsigned int head; 435 436 dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n"); 437 438 /* 439 * Don't process events until the IB device is registered. Otherwise 440 * we'll try to ib_dispatch_event() on an invalid device. 441 */ 442 if (!dev->ib_active) 443 return IRQ_HANDLED; 444 445 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) { 446 struct pvrdma_eqe *eqe; 447 448 eqe = get_eqe(dev, head); 449 450 switch (eqe->type) { 451 case PVRDMA_EVENT_QP_FATAL: 452 case PVRDMA_EVENT_QP_REQ_ERR: 453 case PVRDMA_EVENT_QP_ACCESS_ERR: 454 case PVRDMA_EVENT_COMM_EST: 455 case PVRDMA_EVENT_SQ_DRAINED: 456 case PVRDMA_EVENT_PATH_MIG: 457 case PVRDMA_EVENT_PATH_MIG_ERR: 458 case PVRDMA_EVENT_QP_LAST_WQE_REACHED: 459 pvrdma_qp_event(dev, eqe->info, eqe->type); 460 break; 461 462 case PVRDMA_EVENT_CQ_ERR: 463 pvrdma_cq_event(dev, eqe->info, eqe->type); 464 break; 465 466 case PVRDMA_EVENT_SRQ_ERR: 467 case PVRDMA_EVENT_SRQ_LIMIT_REACHED: 468 pvrdma_srq_event(dev, eqe->info, eqe->type); 469 break; 470 471 case PVRDMA_EVENT_PORT_ACTIVE: 472 case PVRDMA_EVENT_PORT_ERR: 473 case PVRDMA_EVENT_LID_CHANGE: 474 case PVRDMA_EVENT_PKEY_CHANGE: 475 case PVRDMA_EVENT_SM_CHANGE: 476 case PVRDMA_EVENT_CLIENT_REREGISTER: 477 case PVRDMA_EVENT_GID_CHANGE: 478 pvrdma_dev_event(dev, eqe->info, eqe->type); 479 break; 480 481 case PVRDMA_EVENT_DEVICE_FATAL: 482 pvrdma_dev_event(dev, 1, eqe->type); 483 break; 484 485 default: 486 break; 487 } 488 489 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); 490 } 491 492 return IRQ_HANDLED; 493 } 494 495 static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev, 496 unsigned int i) 497 { 498 return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr( 499 &dev->cq_pdir, 500 PAGE_SIZE + 501 sizeof(struct pvrdma_cqne) * i); 502 } 503 504 static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id) 505 { 506 struct pvrdma_dev *dev = dev_id; 507 struct pvrdma_ring *ring = &dev->cq_ring_state->rx; 508 int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE / 509 sizeof(struct pvrdma_cqne); 510 unsigned int head; 511 unsigned long flags; 512 513 dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n"); 514 515 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) { 516 struct pvrdma_cqne *cqne; 517 struct pvrdma_cq *cq; 518 519 cqne = get_cqne(dev, head); 520 spin_lock_irqsave(&dev->cq_tbl_lock, flags); 521 cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq]; 522 if (cq) 523 refcount_inc(&cq->refcnt); 524 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); 525 526 if (cq && cq->ibcq.comp_handler) 527 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 528 if (cq) { 529 if (refcount_dec_and_test(&cq->refcnt)) 530 complete(&cq->free); 531 } 532 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); 533 } 534 535 return IRQ_HANDLED; 536 } 537 538 static void pvrdma_free_irq(struct pvrdma_dev *dev) 539 { 540 int i; 541 542 dev_dbg(&dev->pdev->dev, "freeing interrupts\n"); 543 for (i = 0; i < dev->nr_vectors; i++) 544 free_irq(pci_irq_vector(dev->pdev, i), dev); 545 } 546 547 static void pvrdma_enable_intrs(struct pvrdma_dev *dev) 548 { 549 dev_dbg(&dev->pdev->dev, "enable interrupts\n"); 550 pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0); 551 } 552 553 static void pvrdma_disable_intrs(struct pvrdma_dev *dev) 554 { 555 dev_dbg(&dev->pdev->dev, "disable interrupts\n"); 556 pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0); 557 } 558 559 static int pvrdma_alloc_intrs(struct pvrdma_dev *dev) 560 { 561 struct pci_dev *pdev = dev->pdev; 562 int ret = 0, i; 563 564 ret = pci_alloc_irq_vectors(pdev, 1, PVRDMA_MAX_INTERRUPTS, 565 PCI_IRQ_MSIX); 566 if (ret < 0) { 567 ret = pci_alloc_irq_vectors(pdev, 1, 1, 568 PCI_IRQ_MSI | PCI_IRQ_LEGACY); 569 if (ret < 0) 570 return ret; 571 } 572 dev->nr_vectors = ret; 573 574 ret = request_irq(pci_irq_vector(dev->pdev, 0), pvrdma_intr0_handler, 575 pdev->msix_enabled ? 0 : IRQF_SHARED, DRV_NAME, dev); 576 if (ret) { 577 dev_err(&dev->pdev->dev, 578 "failed to request interrupt 0\n"); 579 goto out_free_vectors; 580 } 581 582 for (i = 1; i < dev->nr_vectors; i++) { 583 ret = request_irq(pci_irq_vector(dev->pdev, i), 584 i == 1 ? pvrdma_intr1_handler : 585 pvrdma_intrx_handler, 586 0, DRV_NAME, dev); 587 if (ret) { 588 dev_err(&dev->pdev->dev, 589 "failed to request interrupt %d\n", i); 590 goto free_irqs; 591 } 592 } 593 594 return 0; 595 596 free_irqs: 597 while (--i >= 0) 598 free_irq(pci_irq_vector(dev->pdev, i), dev); 599 out_free_vectors: 600 pci_free_irq_vectors(pdev); 601 return ret; 602 } 603 604 static void pvrdma_free_slots(struct pvrdma_dev *dev) 605 { 606 struct pci_dev *pdev = dev->pdev; 607 608 if (dev->resp_slot) 609 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot, 610 dev->dsr->resp_slot_dma); 611 if (dev->cmd_slot) 612 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot, 613 dev->dsr->cmd_slot_dma); 614 } 615 616 static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev, 617 const union ib_gid *gid, 618 u8 gid_type, 619 int index) 620 { 621 int ret; 622 union pvrdma_cmd_req req; 623 struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind; 624 625 if (!dev->sgid_tbl) { 626 dev_warn(&dev->pdev->dev, "sgid table not initialized\n"); 627 return -EINVAL; 628 } 629 630 memset(cmd_bind, 0, sizeof(*cmd_bind)); 631 cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND; 632 memcpy(cmd_bind->new_gid, gid->raw, 16); 633 cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024); 634 cmd_bind->vlan = 0xfff; 635 cmd_bind->index = index; 636 cmd_bind->gid_type = gid_type; 637 638 ret = pvrdma_cmd_post(dev, &req, NULL, 0); 639 if (ret < 0) { 640 dev_warn(&dev->pdev->dev, 641 "could not create binding, error: %d\n", ret); 642 return -EFAULT; 643 } 644 memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid)); 645 return 0; 646 } 647 648 static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context) 649 { 650 struct pvrdma_dev *dev = to_vdev(attr->device); 651 652 return pvrdma_add_gid_at_index(dev, &attr->gid, 653 ib_gid_type_to_pvrdma(attr->gid_type), 654 attr->index); 655 } 656 657 static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index) 658 { 659 int ret; 660 union pvrdma_cmd_req req; 661 struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind; 662 663 /* Update sgid table. */ 664 if (!dev->sgid_tbl) { 665 dev_warn(&dev->pdev->dev, "sgid table not initialized\n"); 666 return -EINVAL; 667 } 668 669 memset(cmd_dest, 0, sizeof(*cmd_dest)); 670 cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND; 671 memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16); 672 cmd_dest->index = index; 673 674 ret = pvrdma_cmd_post(dev, &req, NULL, 0); 675 if (ret < 0) { 676 dev_warn(&dev->pdev->dev, 677 "could not destroy binding, error: %d\n", ret); 678 return ret; 679 } 680 memset(&dev->sgid_tbl[index], 0, 16); 681 return 0; 682 } 683 684 static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context) 685 { 686 struct pvrdma_dev *dev = to_vdev(attr->device); 687 688 dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s", 689 attr->index, dev->netdev->name); 690 691 return pvrdma_del_gid_at_index(dev, attr->index); 692 } 693 694 static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev, 695 struct net_device *ndev, 696 unsigned long event) 697 { 698 struct pci_dev *pdev_net; 699 unsigned int slot; 700 701 switch (event) { 702 case NETDEV_REBOOT: 703 case NETDEV_DOWN: 704 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); 705 break; 706 case NETDEV_UP: 707 pvrdma_write_reg(dev, PVRDMA_REG_CTL, 708 PVRDMA_DEVICE_CTL_UNQUIESCE); 709 710 mb(); 711 712 if (pvrdma_read_reg(dev, PVRDMA_REG_ERR)) 713 dev_err(&dev->pdev->dev, 714 "failed to activate device during link up\n"); 715 else 716 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); 717 break; 718 case NETDEV_UNREGISTER: 719 dev_put(dev->netdev); 720 dev->netdev = NULL; 721 break; 722 case NETDEV_REGISTER: 723 /* vmxnet3 will have same bus, slot. But func will be 0 */ 724 slot = PCI_SLOT(dev->pdev->devfn); 725 pdev_net = pci_get_slot(dev->pdev->bus, 726 PCI_DEVFN(slot, 0)); 727 if ((dev->netdev == NULL) && 728 (pci_get_drvdata(pdev_net) == ndev)) { 729 /* this is our netdev */ 730 dev->netdev = ndev; 731 dev_hold(ndev); 732 } 733 pci_dev_put(pdev_net); 734 break; 735 736 default: 737 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n", 738 event, dev->ib_dev.name); 739 break; 740 } 741 } 742 743 static void pvrdma_netdevice_event_work(struct work_struct *work) 744 { 745 struct pvrdma_netdevice_work *netdev_work; 746 struct pvrdma_dev *dev; 747 748 netdev_work = container_of(work, struct pvrdma_netdevice_work, work); 749 750 mutex_lock(&pvrdma_device_list_lock); 751 list_for_each_entry(dev, &pvrdma_device_list, device_link) { 752 if ((netdev_work->event == NETDEV_REGISTER) || 753 (dev->netdev == netdev_work->event_netdev)) { 754 pvrdma_netdevice_event_handle(dev, 755 netdev_work->event_netdev, 756 netdev_work->event); 757 break; 758 } 759 } 760 mutex_unlock(&pvrdma_device_list_lock); 761 762 kfree(netdev_work); 763 } 764 765 static int pvrdma_netdevice_event(struct notifier_block *this, 766 unsigned long event, void *ptr) 767 { 768 struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr); 769 struct pvrdma_netdevice_work *netdev_work; 770 771 netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC); 772 if (!netdev_work) 773 return NOTIFY_BAD; 774 775 INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work); 776 netdev_work->event_netdev = event_netdev; 777 netdev_work->event = event; 778 queue_work(event_wq, &netdev_work->work); 779 780 return NOTIFY_DONE; 781 } 782 783 static int pvrdma_pci_probe(struct pci_dev *pdev, 784 const struct pci_device_id *id) 785 { 786 struct pci_dev *pdev_net; 787 struct pvrdma_dev *dev; 788 int ret; 789 unsigned long start; 790 unsigned long len; 791 dma_addr_t slot_dma = 0; 792 793 dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev)); 794 795 /* Allocate zero-out device */ 796 dev = (struct pvrdma_dev *)ib_alloc_device(sizeof(*dev)); 797 if (!dev) { 798 dev_err(&pdev->dev, "failed to allocate IB device\n"); 799 return -ENOMEM; 800 } 801 802 mutex_lock(&pvrdma_device_list_lock); 803 list_add(&dev->device_link, &pvrdma_device_list); 804 mutex_unlock(&pvrdma_device_list_lock); 805 806 ret = pvrdma_init_device(dev); 807 if (ret) 808 goto err_free_device; 809 810 dev->pdev = pdev; 811 pci_set_drvdata(pdev, dev); 812 813 ret = pci_enable_device(pdev); 814 if (ret) { 815 dev_err(&pdev->dev, "cannot enable PCI device\n"); 816 goto err_free_device; 817 } 818 819 dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n", 820 pci_resource_flags(pdev, 0)); 821 dev_dbg(&pdev->dev, "PCI resource len %#llx\n", 822 (unsigned long long)pci_resource_len(pdev, 0)); 823 dev_dbg(&pdev->dev, "PCI resource start %#llx\n", 824 (unsigned long long)pci_resource_start(pdev, 0)); 825 dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n", 826 pci_resource_flags(pdev, 1)); 827 dev_dbg(&pdev->dev, "PCI resource len %#llx\n", 828 (unsigned long long)pci_resource_len(pdev, 1)); 829 dev_dbg(&pdev->dev, "PCI resource start %#llx\n", 830 (unsigned long long)pci_resource_start(pdev, 1)); 831 832 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 833 !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 834 dev_err(&pdev->dev, "PCI BAR region not MMIO\n"); 835 ret = -ENOMEM; 836 goto err_free_device; 837 } 838 839 ret = pci_request_regions(pdev, DRV_NAME); 840 if (ret) { 841 dev_err(&pdev->dev, "cannot request PCI resources\n"); 842 goto err_disable_pdev; 843 } 844 845 /* Enable 64-Bit DMA */ 846 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 847 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 848 if (ret != 0) { 849 dev_err(&pdev->dev, 850 "pci_set_consistent_dma_mask failed\n"); 851 goto err_free_resource; 852 } 853 } else { 854 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 855 if (ret != 0) { 856 dev_err(&pdev->dev, 857 "pci_set_dma_mask failed\n"); 858 goto err_free_resource; 859 } 860 } 861 862 pci_set_master(pdev); 863 864 /* Map register space */ 865 start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG); 866 len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG); 867 dev->regs = ioremap(start, len); 868 if (!dev->regs) { 869 dev_err(&pdev->dev, "register mapping failed\n"); 870 ret = -ENOMEM; 871 goto err_free_resource; 872 } 873 874 /* Setup per-device UAR. */ 875 dev->driver_uar.index = 0; 876 dev->driver_uar.pfn = 877 pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >> 878 PAGE_SHIFT; 879 dev->driver_uar.map = 880 ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 881 if (!dev->driver_uar.map) { 882 dev_err(&pdev->dev, "failed to remap UAR pages\n"); 883 ret = -ENOMEM; 884 goto err_unmap_regs; 885 } 886 887 dev->dsr_version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION); 888 dev_info(&pdev->dev, "device version %d, driver version %d\n", 889 dev->dsr_version, PVRDMA_VERSION); 890 891 dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr), 892 &dev->dsrbase, GFP_KERNEL); 893 if (!dev->dsr) { 894 dev_err(&pdev->dev, "failed to allocate shared region\n"); 895 ret = -ENOMEM; 896 goto err_uar_unmap; 897 } 898 899 /* Setup the shared region */ 900 dev->dsr->driver_version = PVRDMA_VERSION; 901 dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ? 902 PVRDMA_GOS_BITS_32 : 903 PVRDMA_GOS_BITS_64; 904 dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX; 905 dev->dsr->gos_info.gos_ver = 1; 906 dev->dsr->uar_pfn = dev->driver_uar.pfn; 907 908 /* Command slot. */ 909 dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 910 &slot_dma, GFP_KERNEL); 911 if (!dev->cmd_slot) { 912 ret = -ENOMEM; 913 goto err_free_dsr; 914 } 915 916 dev->dsr->cmd_slot_dma = (u64)slot_dma; 917 918 /* Response slot. */ 919 dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 920 &slot_dma, GFP_KERNEL); 921 if (!dev->resp_slot) { 922 ret = -ENOMEM; 923 goto err_free_slots; 924 } 925 926 dev->dsr->resp_slot_dma = (u64)slot_dma; 927 928 /* Async event ring */ 929 dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; 930 ret = pvrdma_page_dir_init(dev, &dev->async_pdir, 931 dev->dsr->async_ring_pages.num_pages, true); 932 if (ret) 933 goto err_free_slots; 934 dev->async_ring_state = dev->async_pdir.pages[0]; 935 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma; 936 937 /* CQ notification ring */ 938 dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; 939 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir, 940 dev->dsr->cq_ring_pages.num_pages, true); 941 if (ret) 942 goto err_free_async_ring; 943 dev->cq_ring_state = dev->cq_pdir.pages[0]; 944 dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma; 945 946 /* 947 * Write the PA of the shared region to the device. The writes must be 948 * ordered such that the high bits are written last. When the writes 949 * complete, the device will have filled out the capabilities. 950 */ 951 952 pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase); 953 pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH, 954 (u32)((u64)(dev->dsrbase) >> 32)); 955 956 /* Make sure the write is complete before reading status. */ 957 mb(); 958 959 /* The driver supports RoCE V1 and V2. */ 960 if (!PVRDMA_SUPPORTED(dev)) { 961 dev_err(&pdev->dev, "driver needs RoCE v1 or v2 support\n"); 962 ret = -EFAULT; 963 goto err_free_cq_ring; 964 } 965 966 /* Paired vmxnet3 will have same bus, slot. But func will be 0 */ 967 pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); 968 if (!pdev_net) { 969 dev_err(&pdev->dev, "failed to find paired net device\n"); 970 ret = -ENODEV; 971 goto err_free_cq_ring; 972 } 973 974 if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE || 975 pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) { 976 dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n"); 977 pci_dev_put(pdev_net); 978 ret = -ENODEV; 979 goto err_free_cq_ring; 980 } 981 982 dev->netdev = pci_get_drvdata(pdev_net); 983 pci_dev_put(pdev_net); 984 if (!dev->netdev) { 985 dev_err(&pdev->dev, "failed to get vmxnet3 device\n"); 986 ret = -ENODEV; 987 goto err_free_cq_ring; 988 } 989 dev_hold(dev->netdev); 990 991 dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name); 992 993 /* Interrupt setup */ 994 ret = pvrdma_alloc_intrs(dev); 995 if (ret) { 996 dev_err(&pdev->dev, "failed to allocate interrupts\n"); 997 ret = -ENOMEM; 998 goto err_free_cq_ring; 999 } 1000 1001 /* Allocate UAR table. */ 1002 ret = pvrdma_uar_table_init(dev); 1003 if (ret) { 1004 dev_err(&pdev->dev, "failed to allocate UAR table\n"); 1005 ret = -ENOMEM; 1006 goto err_free_intrs; 1007 } 1008 1009 /* Allocate GID table */ 1010 dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len, 1011 sizeof(union ib_gid), GFP_KERNEL); 1012 if (!dev->sgid_tbl) { 1013 ret = -ENOMEM; 1014 goto err_free_uar_table; 1015 } 1016 dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len); 1017 1018 pvrdma_enable_intrs(dev); 1019 1020 /* Activate pvrdma device */ 1021 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE); 1022 1023 /* Make sure the write is complete before reading status. */ 1024 mb(); 1025 1026 /* Check if device was successfully activated */ 1027 ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR); 1028 if (ret != 0) { 1029 dev_err(&pdev->dev, "failed to activate device\n"); 1030 ret = -EFAULT; 1031 goto err_disable_intr; 1032 } 1033 1034 /* Register IB device */ 1035 ret = pvrdma_register_device(dev); 1036 if (ret) { 1037 dev_err(&pdev->dev, "failed to register IB device\n"); 1038 goto err_disable_intr; 1039 } 1040 1041 dev->nb_netdev.notifier_call = pvrdma_netdevice_event; 1042 ret = register_netdevice_notifier(&dev->nb_netdev); 1043 if (ret) { 1044 dev_err(&pdev->dev, "failed to register netdevice events\n"); 1045 goto err_unreg_ibdev; 1046 } 1047 1048 dev_info(&pdev->dev, "attached to device\n"); 1049 return 0; 1050 1051 err_unreg_ibdev: 1052 ib_unregister_device(&dev->ib_dev); 1053 err_disable_intr: 1054 pvrdma_disable_intrs(dev); 1055 kfree(dev->sgid_tbl); 1056 err_free_uar_table: 1057 pvrdma_uar_table_cleanup(dev); 1058 err_free_intrs: 1059 pvrdma_free_irq(dev); 1060 pci_free_irq_vectors(pdev); 1061 err_free_cq_ring: 1062 if (dev->netdev) { 1063 dev_put(dev->netdev); 1064 dev->netdev = NULL; 1065 } 1066 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1067 err_free_async_ring: 1068 pvrdma_page_dir_cleanup(dev, &dev->async_pdir); 1069 err_free_slots: 1070 pvrdma_free_slots(dev); 1071 err_free_dsr: 1072 dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr, 1073 dev->dsrbase); 1074 err_uar_unmap: 1075 iounmap(dev->driver_uar.map); 1076 err_unmap_regs: 1077 iounmap(dev->regs); 1078 err_free_resource: 1079 pci_release_regions(pdev); 1080 err_disable_pdev: 1081 pci_disable_device(pdev); 1082 pci_set_drvdata(pdev, NULL); 1083 err_free_device: 1084 mutex_lock(&pvrdma_device_list_lock); 1085 list_del(&dev->device_link); 1086 mutex_unlock(&pvrdma_device_list_lock); 1087 ib_dealloc_device(&dev->ib_dev); 1088 return ret; 1089 } 1090 1091 static void pvrdma_pci_remove(struct pci_dev *pdev) 1092 { 1093 struct pvrdma_dev *dev = pci_get_drvdata(pdev); 1094 1095 if (!dev) 1096 return; 1097 1098 dev_info(&pdev->dev, "detaching from device\n"); 1099 1100 unregister_netdevice_notifier(&dev->nb_netdev); 1101 dev->nb_netdev.notifier_call = NULL; 1102 1103 flush_workqueue(event_wq); 1104 1105 if (dev->netdev) { 1106 dev_put(dev->netdev); 1107 dev->netdev = NULL; 1108 } 1109 1110 /* Unregister ib device */ 1111 ib_unregister_device(&dev->ib_dev); 1112 1113 mutex_lock(&pvrdma_device_list_lock); 1114 list_del(&dev->device_link); 1115 mutex_unlock(&pvrdma_device_list_lock); 1116 1117 pvrdma_disable_intrs(dev); 1118 pvrdma_free_irq(dev); 1119 pci_free_irq_vectors(pdev); 1120 1121 /* Deactivate pvrdma device */ 1122 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET); 1123 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1124 pvrdma_page_dir_cleanup(dev, &dev->async_pdir); 1125 pvrdma_free_slots(dev); 1126 1127 iounmap(dev->regs); 1128 kfree(dev->sgid_tbl); 1129 kfree(dev->cq_tbl); 1130 kfree(dev->srq_tbl); 1131 kfree(dev->qp_tbl); 1132 pvrdma_uar_table_cleanup(dev); 1133 iounmap(dev->driver_uar.map); 1134 1135 ib_dealloc_device(&dev->ib_dev); 1136 1137 /* Free pci resources */ 1138 pci_release_regions(pdev); 1139 pci_disable_device(pdev); 1140 pci_set_drvdata(pdev, NULL); 1141 } 1142 1143 static const struct pci_device_id pvrdma_pci_table[] = { 1144 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), }, 1145 { 0 }, 1146 }; 1147 1148 MODULE_DEVICE_TABLE(pci, pvrdma_pci_table); 1149 1150 static struct pci_driver pvrdma_driver = { 1151 .name = DRV_NAME, 1152 .id_table = pvrdma_pci_table, 1153 .probe = pvrdma_pci_probe, 1154 .remove = pvrdma_pci_remove, 1155 }; 1156 1157 static int __init pvrdma_init(void) 1158 { 1159 int err; 1160 1161 event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM); 1162 if (!event_wq) 1163 return -ENOMEM; 1164 1165 err = pci_register_driver(&pvrdma_driver); 1166 if (err) 1167 destroy_workqueue(event_wq); 1168 1169 return err; 1170 } 1171 1172 static void __exit pvrdma_cleanup(void) 1173 { 1174 pci_unregister_driver(&pvrdma_driver); 1175 1176 destroy_workqueue(event_wq); 1177 } 1178 1179 module_init(pvrdma_init); 1180 module_exit(pvrdma_cleanup); 1181 1182 MODULE_AUTHOR("VMware, Inc"); 1183 MODULE_DESCRIPTION("VMware Paravirtual RDMA driver"); 1184 MODULE_LICENSE("Dual BSD/GPL"); 1185