1 /* 2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of EITHER the GNU General Public License 6 * version 2 as published by the Free Software Foundation or the BSD 7 * 2-Clause License. This program is distributed in the hope that it 8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED 9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. 10 * See the GNU General Public License version 2 for more details at 11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program available in the file COPYING in the main 15 * directory of this source tree. 16 * 17 * The BSD 2-Clause License 18 * 19 * Redistribution and use in source and binary forms, with or 20 * without modification, are permitted provided that the following 21 * conditions are met: 22 * 23 * - Redistributions of source code must retain the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer. 26 * 27 * - Redistributions in binary form must reproduce the above 28 * copyright notice, this list of conditions and the following 29 * disclaimer in the documentation and/or other materials 30 * provided with the distribution. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 */ 45 46 #include <linux/errno.h> 47 #include <linux/inetdevice.h> 48 #include <linux/init.h> 49 #include <linux/module.h> 50 #include <linux/slab.h> 51 #include <rdma/ib_addr.h> 52 #include <rdma/ib_smi.h> 53 #include <rdma/ib_user_verbs.h> 54 #include <net/addrconf.h> 55 56 #include "pvrdma.h" 57 58 #define DRV_NAME "vmw_pvrdma" 59 #define DRV_VERSION "1.0.1.0-k" 60 61 static DEFINE_MUTEX(pvrdma_device_list_lock); 62 static LIST_HEAD(pvrdma_device_list); 63 static struct workqueue_struct *event_wq; 64 65 static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context); 66 static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context); 67 68 static ssize_t hca_type_show(struct device *device, 69 struct device_attribute *attr, char *buf) 70 { 71 return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION); 72 } 73 static DEVICE_ATTR_RO(hca_type); 74 75 static ssize_t hw_rev_show(struct device *device, 76 struct device_attribute *attr, char *buf) 77 { 78 return sprintf(buf, "%d\n", PVRDMA_REV_ID); 79 } 80 static DEVICE_ATTR_RO(hw_rev); 81 82 static ssize_t board_id_show(struct device *device, 83 struct device_attribute *attr, char *buf) 84 { 85 return sprintf(buf, "%d\n", PVRDMA_BOARD_ID); 86 } 87 static DEVICE_ATTR_RO(board_id); 88 89 static struct attribute *pvrdma_class_attributes[] = { 90 &dev_attr_hw_rev.attr, 91 &dev_attr_hca_type.attr, 92 &dev_attr_board_id.attr, 93 NULL, 94 }; 95 96 static const struct attribute_group pvrdma_attr_group = { 97 .attrs = pvrdma_class_attributes, 98 }; 99 100 static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str) 101 { 102 struct pvrdma_dev *dev = 103 container_of(device, struct pvrdma_dev, ib_dev); 104 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d\n", 105 (int) (dev->dsr->caps.fw_ver >> 32), 106 (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff, 107 (int) dev->dsr->caps.fw_ver & 0xffff); 108 } 109 110 static int pvrdma_init_device(struct pvrdma_dev *dev) 111 { 112 /* Initialize some device related stuff */ 113 spin_lock_init(&dev->cmd_lock); 114 sema_init(&dev->cmd_sema, 1); 115 atomic_set(&dev->num_qps, 0); 116 atomic_set(&dev->num_srqs, 0); 117 atomic_set(&dev->num_cqs, 0); 118 atomic_set(&dev->num_pds, 0); 119 atomic_set(&dev->num_ahs, 0); 120 121 return 0; 122 } 123 124 static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num, 125 struct ib_port_immutable *immutable) 126 { 127 struct pvrdma_dev *dev = to_vdev(ibdev); 128 struct ib_port_attr attr; 129 int err; 130 131 if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1) 132 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE; 133 else if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2) 134 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 135 136 err = ib_query_port(ibdev, port_num, &attr); 137 if (err) 138 return err; 139 140 immutable->pkey_tbl_len = attr.pkey_tbl_len; 141 immutable->gid_tbl_len = attr.gid_tbl_len; 142 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 143 return 0; 144 } 145 146 static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev, 147 u8 port_num) 148 { 149 struct net_device *netdev; 150 struct pvrdma_dev *dev = to_vdev(ibdev); 151 152 if (port_num != 1) 153 return NULL; 154 155 rcu_read_lock(); 156 netdev = dev->netdev; 157 if (netdev) 158 dev_hold(netdev); 159 rcu_read_unlock(); 160 161 return netdev; 162 } 163 164 static const struct ib_device_ops pvrdma_dev_ops = { 165 .add_gid = pvrdma_add_gid, 166 .alloc_mr = pvrdma_alloc_mr, 167 .alloc_pd = pvrdma_alloc_pd, 168 .alloc_ucontext = pvrdma_alloc_ucontext, 169 .create_ah = pvrdma_create_ah, 170 .create_cq = pvrdma_create_cq, 171 .create_qp = pvrdma_create_qp, 172 .dealloc_pd = pvrdma_dealloc_pd, 173 .dealloc_ucontext = pvrdma_dealloc_ucontext, 174 .del_gid = pvrdma_del_gid, 175 .dereg_mr = pvrdma_dereg_mr, 176 .destroy_ah = pvrdma_destroy_ah, 177 .destroy_cq = pvrdma_destroy_cq, 178 .destroy_qp = pvrdma_destroy_qp, 179 .get_dev_fw_str = pvrdma_get_fw_ver_str, 180 .get_dma_mr = pvrdma_get_dma_mr, 181 .get_link_layer = pvrdma_port_link_layer, 182 .get_netdev = pvrdma_get_netdev, 183 .get_port_immutable = pvrdma_port_immutable, 184 .map_mr_sg = pvrdma_map_mr_sg, 185 .mmap = pvrdma_mmap, 186 .modify_port = pvrdma_modify_port, 187 .modify_qp = pvrdma_modify_qp, 188 .poll_cq = pvrdma_poll_cq, 189 .post_recv = pvrdma_post_recv, 190 .post_send = pvrdma_post_send, 191 .query_device = pvrdma_query_device, 192 .query_gid = pvrdma_query_gid, 193 .query_pkey = pvrdma_query_pkey, 194 .query_port = pvrdma_query_port, 195 .query_qp = pvrdma_query_qp, 196 .reg_user_mr = pvrdma_reg_user_mr, 197 .req_notify_cq = pvrdma_req_notify_cq, 198 }; 199 200 static const struct ib_device_ops pvrdma_dev_srq_ops = { 201 .create_srq = pvrdma_create_srq, 202 .destroy_srq = pvrdma_destroy_srq, 203 .modify_srq = pvrdma_modify_srq, 204 .query_srq = pvrdma_query_srq, 205 }; 206 207 static int pvrdma_register_device(struct pvrdma_dev *dev) 208 { 209 int ret = -1; 210 211 dev->ib_dev.node_guid = dev->dsr->caps.node_guid; 212 dev->sys_image_guid = dev->dsr->caps.sys_image_guid; 213 dev->flags = 0; 214 dev->ib_dev.owner = THIS_MODULE; 215 dev->ib_dev.num_comp_vectors = 1; 216 dev->ib_dev.dev.parent = &dev->pdev->dev; 217 dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION; 218 dev->ib_dev.uverbs_cmd_mask = 219 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 220 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 221 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 222 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 223 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 224 (1ull << IB_USER_VERBS_CMD_REG_MR) | 225 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 226 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 227 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 228 (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 229 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 230 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 231 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 232 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 233 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 234 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 235 (1ull << IB_USER_VERBS_CMD_POST_SEND) | 236 (1ull << IB_USER_VERBS_CMD_POST_RECV) | 237 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 238 (1ull << IB_USER_VERBS_CMD_DESTROY_AH); 239 240 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 241 dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt; 242 243 ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_ops); 244 245 mutex_init(&dev->port_mutex); 246 spin_lock_init(&dev->desc_lock); 247 248 dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *), 249 GFP_KERNEL); 250 if (!dev->cq_tbl) 251 return ret; 252 spin_lock_init(&dev->cq_tbl_lock); 253 254 dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *), 255 GFP_KERNEL); 256 if (!dev->qp_tbl) 257 goto err_cq_free; 258 spin_lock_init(&dev->qp_tbl_lock); 259 260 /* Check if SRQ is supported by backend */ 261 if (dev->dsr->caps.max_srq) { 262 dev->ib_dev.uverbs_cmd_mask |= 263 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 264 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 265 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 266 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 267 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 268 269 ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_srq_ops); 270 271 dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq, 272 sizeof(struct pvrdma_srq *), 273 GFP_KERNEL); 274 if (!dev->srq_tbl) 275 goto err_qp_free; 276 } 277 dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA; 278 spin_lock_init(&dev->srq_tbl_lock); 279 rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group); 280 281 ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", NULL); 282 if (ret) 283 goto err_srq_free; 284 285 dev->ib_active = true; 286 287 return 0; 288 289 err_srq_free: 290 kfree(dev->srq_tbl); 291 err_qp_free: 292 kfree(dev->qp_tbl); 293 err_cq_free: 294 kfree(dev->cq_tbl); 295 296 return ret; 297 } 298 299 static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id) 300 { 301 u32 icr = PVRDMA_INTR_CAUSE_RESPONSE; 302 struct pvrdma_dev *dev = dev_id; 303 304 dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n"); 305 306 if (!dev->pdev->msix_enabled) { 307 /* Legacy intr */ 308 icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR); 309 if (icr == 0) 310 return IRQ_NONE; 311 } 312 313 if (icr == PVRDMA_INTR_CAUSE_RESPONSE) 314 complete(&dev->cmd_done); 315 316 return IRQ_HANDLED; 317 } 318 319 static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type) 320 { 321 struct pvrdma_qp *qp; 322 unsigned long flags; 323 324 spin_lock_irqsave(&dev->qp_tbl_lock, flags); 325 qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp]; 326 if (qp) 327 refcount_inc(&qp->refcnt); 328 spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); 329 330 if (qp && qp->ibqp.event_handler) { 331 struct ib_qp *ibqp = &qp->ibqp; 332 struct ib_event e; 333 334 e.device = ibqp->device; 335 e.element.qp = ibqp; 336 e.event = type; /* 1:1 mapping for now. */ 337 ibqp->event_handler(&e, ibqp->qp_context); 338 } 339 if (qp) { 340 if (refcount_dec_and_test(&qp->refcnt)) 341 complete(&qp->free); 342 } 343 } 344 345 static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type) 346 { 347 struct pvrdma_cq *cq; 348 unsigned long flags; 349 350 spin_lock_irqsave(&dev->cq_tbl_lock, flags); 351 cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq]; 352 if (cq) 353 refcount_inc(&cq->refcnt); 354 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); 355 356 if (cq && cq->ibcq.event_handler) { 357 struct ib_cq *ibcq = &cq->ibcq; 358 struct ib_event e; 359 360 e.device = ibcq->device; 361 e.element.cq = ibcq; 362 e.event = type; /* 1:1 mapping for now. */ 363 ibcq->event_handler(&e, ibcq->cq_context); 364 } 365 if (cq) { 366 if (refcount_dec_and_test(&cq->refcnt)) 367 complete(&cq->free); 368 } 369 } 370 371 static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type) 372 { 373 struct pvrdma_srq *srq; 374 unsigned long flags; 375 376 spin_lock_irqsave(&dev->srq_tbl_lock, flags); 377 if (dev->srq_tbl) 378 srq = dev->srq_tbl[srqn % dev->dsr->caps.max_srq]; 379 else 380 srq = NULL; 381 if (srq) 382 refcount_inc(&srq->refcnt); 383 spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); 384 385 if (srq && srq->ibsrq.event_handler) { 386 struct ib_srq *ibsrq = &srq->ibsrq; 387 struct ib_event e; 388 389 e.device = ibsrq->device; 390 e.element.srq = ibsrq; 391 e.event = type; /* 1:1 mapping for now. */ 392 ibsrq->event_handler(&e, ibsrq->srq_context); 393 } 394 if (srq) { 395 if (refcount_dec_and_test(&srq->refcnt)) 396 complete(&srq->free); 397 } 398 } 399 400 static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port, 401 enum ib_event_type event) 402 { 403 struct ib_event ib_event; 404 405 memset(&ib_event, 0, sizeof(ib_event)); 406 ib_event.device = &dev->ib_dev; 407 ib_event.element.port_num = port; 408 ib_event.event = event; 409 ib_dispatch_event(&ib_event); 410 } 411 412 static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type) 413 { 414 if (port < 1 || port > dev->dsr->caps.phys_port_cnt) { 415 dev_warn(&dev->pdev->dev, "event on port %d\n", port); 416 return; 417 } 418 419 pvrdma_dispatch_event(dev, port, type); 420 } 421 422 static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i) 423 { 424 return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr( 425 &dev->async_pdir, 426 PAGE_SIZE + 427 sizeof(struct pvrdma_eqe) * i); 428 } 429 430 static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id) 431 { 432 struct pvrdma_dev *dev = dev_id; 433 struct pvrdma_ring *ring = &dev->async_ring_state->rx; 434 int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) * 435 PAGE_SIZE / sizeof(struct pvrdma_eqe); 436 unsigned int head; 437 438 dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n"); 439 440 /* 441 * Don't process events until the IB device is registered. Otherwise 442 * we'll try to ib_dispatch_event() on an invalid device. 443 */ 444 if (!dev->ib_active) 445 return IRQ_HANDLED; 446 447 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) { 448 struct pvrdma_eqe *eqe; 449 450 eqe = get_eqe(dev, head); 451 452 switch (eqe->type) { 453 case PVRDMA_EVENT_QP_FATAL: 454 case PVRDMA_EVENT_QP_REQ_ERR: 455 case PVRDMA_EVENT_QP_ACCESS_ERR: 456 case PVRDMA_EVENT_COMM_EST: 457 case PVRDMA_EVENT_SQ_DRAINED: 458 case PVRDMA_EVENT_PATH_MIG: 459 case PVRDMA_EVENT_PATH_MIG_ERR: 460 case PVRDMA_EVENT_QP_LAST_WQE_REACHED: 461 pvrdma_qp_event(dev, eqe->info, eqe->type); 462 break; 463 464 case PVRDMA_EVENT_CQ_ERR: 465 pvrdma_cq_event(dev, eqe->info, eqe->type); 466 break; 467 468 case PVRDMA_EVENT_SRQ_ERR: 469 case PVRDMA_EVENT_SRQ_LIMIT_REACHED: 470 pvrdma_srq_event(dev, eqe->info, eqe->type); 471 break; 472 473 case PVRDMA_EVENT_PORT_ACTIVE: 474 case PVRDMA_EVENT_PORT_ERR: 475 case PVRDMA_EVENT_LID_CHANGE: 476 case PVRDMA_EVENT_PKEY_CHANGE: 477 case PVRDMA_EVENT_SM_CHANGE: 478 case PVRDMA_EVENT_CLIENT_REREGISTER: 479 case PVRDMA_EVENT_GID_CHANGE: 480 pvrdma_dev_event(dev, eqe->info, eqe->type); 481 break; 482 483 case PVRDMA_EVENT_DEVICE_FATAL: 484 pvrdma_dev_event(dev, 1, eqe->type); 485 break; 486 487 default: 488 break; 489 } 490 491 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); 492 } 493 494 return IRQ_HANDLED; 495 } 496 497 static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev, 498 unsigned int i) 499 { 500 return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr( 501 &dev->cq_pdir, 502 PAGE_SIZE + 503 sizeof(struct pvrdma_cqne) * i); 504 } 505 506 static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id) 507 { 508 struct pvrdma_dev *dev = dev_id; 509 struct pvrdma_ring *ring = &dev->cq_ring_state->rx; 510 int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE / 511 sizeof(struct pvrdma_cqne); 512 unsigned int head; 513 unsigned long flags; 514 515 dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n"); 516 517 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) { 518 struct pvrdma_cqne *cqne; 519 struct pvrdma_cq *cq; 520 521 cqne = get_cqne(dev, head); 522 spin_lock_irqsave(&dev->cq_tbl_lock, flags); 523 cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq]; 524 if (cq) 525 refcount_inc(&cq->refcnt); 526 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); 527 528 if (cq && cq->ibcq.comp_handler) 529 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 530 if (cq) { 531 if (refcount_dec_and_test(&cq->refcnt)) 532 complete(&cq->free); 533 } 534 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); 535 } 536 537 return IRQ_HANDLED; 538 } 539 540 static void pvrdma_free_irq(struct pvrdma_dev *dev) 541 { 542 int i; 543 544 dev_dbg(&dev->pdev->dev, "freeing interrupts\n"); 545 for (i = 0; i < dev->nr_vectors; i++) 546 free_irq(pci_irq_vector(dev->pdev, i), dev); 547 } 548 549 static void pvrdma_enable_intrs(struct pvrdma_dev *dev) 550 { 551 dev_dbg(&dev->pdev->dev, "enable interrupts\n"); 552 pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0); 553 } 554 555 static void pvrdma_disable_intrs(struct pvrdma_dev *dev) 556 { 557 dev_dbg(&dev->pdev->dev, "disable interrupts\n"); 558 pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0); 559 } 560 561 static int pvrdma_alloc_intrs(struct pvrdma_dev *dev) 562 { 563 struct pci_dev *pdev = dev->pdev; 564 int ret = 0, i; 565 566 ret = pci_alloc_irq_vectors(pdev, 1, PVRDMA_MAX_INTERRUPTS, 567 PCI_IRQ_MSIX); 568 if (ret < 0) { 569 ret = pci_alloc_irq_vectors(pdev, 1, 1, 570 PCI_IRQ_MSI | PCI_IRQ_LEGACY); 571 if (ret < 0) 572 return ret; 573 } 574 dev->nr_vectors = ret; 575 576 ret = request_irq(pci_irq_vector(dev->pdev, 0), pvrdma_intr0_handler, 577 pdev->msix_enabled ? 0 : IRQF_SHARED, DRV_NAME, dev); 578 if (ret) { 579 dev_err(&dev->pdev->dev, 580 "failed to request interrupt 0\n"); 581 goto out_free_vectors; 582 } 583 584 for (i = 1; i < dev->nr_vectors; i++) { 585 ret = request_irq(pci_irq_vector(dev->pdev, i), 586 i == 1 ? pvrdma_intr1_handler : 587 pvrdma_intrx_handler, 588 0, DRV_NAME, dev); 589 if (ret) { 590 dev_err(&dev->pdev->dev, 591 "failed to request interrupt %d\n", i); 592 goto free_irqs; 593 } 594 } 595 596 return 0; 597 598 free_irqs: 599 while (--i >= 0) 600 free_irq(pci_irq_vector(dev->pdev, i), dev); 601 out_free_vectors: 602 pci_free_irq_vectors(pdev); 603 return ret; 604 } 605 606 static void pvrdma_free_slots(struct pvrdma_dev *dev) 607 { 608 struct pci_dev *pdev = dev->pdev; 609 610 if (dev->resp_slot) 611 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot, 612 dev->dsr->resp_slot_dma); 613 if (dev->cmd_slot) 614 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot, 615 dev->dsr->cmd_slot_dma); 616 } 617 618 static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev, 619 const union ib_gid *gid, 620 u8 gid_type, 621 int index) 622 { 623 int ret; 624 union pvrdma_cmd_req req; 625 struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind; 626 627 if (!dev->sgid_tbl) { 628 dev_warn(&dev->pdev->dev, "sgid table not initialized\n"); 629 return -EINVAL; 630 } 631 632 memset(cmd_bind, 0, sizeof(*cmd_bind)); 633 cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND; 634 memcpy(cmd_bind->new_gid, gid->raw, 16); 635 cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024); 636 cmd_bind->vlan = 0xfff; 637 cmd_bind->index = index; 638 cmd_bind->gid_type = gid_type; 639 640 ret = pvrdma_cmd_post(dev, &req, NULL, 0); 641 if (ret < 0) { 642 dev_warn(&dev->pdev->dev, 643 "could not create binding, error: %d\n", ret); 644 return -EFAULT; 645 } 646 memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid)); 647 return 0; 648 } 649 650 static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context) 651 { 652 struct pvrdma_dev *dev = to_vdev(attr->device); 653 654 return pvrdma_add_gid_at_index(dev, &attr->gid, 655 ib_gid_type_to_pvrdma(attr->gid_type), 656 attr->index); 657 } 658 659 static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index) 660 { 661 int ret; 662 union pvrdma_cmd_req req; 663 struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind; 664 665 /* Update sgid table. */ 666 if (!dev->sgid_tbl) { 667 dev_warn(&dev->pdev->dev, "sgid table not initialized\n"); 668 return -EINVAL; 669 } 670 671 memset(cmd_dest, 0, sizeof(*cmd_dest)); 672 cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND; 673 memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16); 674 cmd_dest->index = index; 675 676 ret = pvrdma_cmd_post(dev, &req, NULL, 0); 677 if (ret < 0) { 678 dev_warn(&dev->pdev->dev, 679 "could not destroy binding, error: %d\n", ret); 680 return ret; 681 } 682 memset(&dev->sgid_tbl[index], 0, 16); 683 return 0; 684 } 685 686 static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context) 687 { 688 struct pvrdma_dev *dev = to_vdev(attr->device); 689 690 dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s", 691 attr->index, dev->netdev->name); 692 693 return pvrdma_del_gid_at_index(dev, attr->index); 694 } 695 696 static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev, 697 struct net_device *ndev, 698 unsigned long event) 699 { 700 struct pci_dev *pdev_net; 701 unsigned int slot; 702 703 switch (event) { 704 case NETDEV_REBOOT: 705 case NETDEV_DOWN: 706 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); 707 break; 708 case NETDEV_UP: 709 pvrdma_write_reg(dev, PVRDMA_REG_CTL, 710 PVRDMA_DEVICE_CTL_UNQUIESCE); 711 712 mb(); 713 714 if (pvrdma_read_reg(dev, PVRDMA_REG_ERR)) 715 dev_err(&dev->pdev->dev, 716 "failed to activate device during link up\n"); 717 else 718 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); 719 break; 720 case NETDEV_UNREGISTER: 721 dev_put(dev->netdev); 722 dev->netdev = NULL; 723 break; 724 case NETDEV_REGISTER: 725 /* vmxnet3 will have same bus, slot. But func will be 0 */ 726 slot = PCI_SLOT(dev->pdev->devfn); 727 pdev_net = pci_get_slot(dev->pdev->bus, 728 PCI_DEVFN(slot, 0)); 729 if ((dev->netdev == NULL) && 730 (pci_get_drvdata(pdev_net) == ndev)) { 731 /* this is our netdev */ 732 dev->netdev = ndev; 733 dev_hold(ndev); 734 } 735 pci_dev_put(pdev_net); 736 break; 737 738 default: 739 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n", 740 event, dev_name(&dev->ib_dev.dev)); 741 break; 742 } 743 } 744 745 static void pvrdma_netdevice_event_work(struct work_struct *work) 746 { 747 struct pvrdma_netdevice_work *netdev_work; 748 struct pvrdma_dev *dev; 749 750 netdev_work = container_of(work, struct pvrdma_netdevice_work, work); 751 752 mutex_lock(&pvrdma_device_list_lock); 753 list_for_each_entry(dev, &pvrdma_device_list, device_link) { 754 if ((netdev_work->event == NETDEV_REGISTER) || 755 (dev->netdev == netdev_work->event_netdev)) { 756 pvrdma_netdevice_event_handle(dev, 757 netdev_work->event_netdev, 758 netdev_work->event); 759 break; 760 } 761 } 762 mutex_unlock(&pvrdma_device_list_lock); 763 764 kfree(netdev_work); 765 } 766 767 static int pvrdma_netdevice_event(struct notifier_block *this, 768 unsigned long event, void *ptr) 769 { 770 struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr); 771 struct pvrdma_netdevice_work *netdev_work; 772 773 netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC); 774 if (!netdev_work) 775 return NOTIFY_BAD; 776 777 INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work); 778 netdev_work->event_netdev = event_netdev; 779 netdev_work->event = event; 780 queue_work(event_wq, &netdev_work->work); 781 782 return NOTIFY_DONE; 783 } 784 785 static int pvrdma_pci_probe(struct pci_dev *pdev, 786 const struct pci_device_id *id) 787 { 788 struct pci_dev *pdev_net; 789 struct pvrdma_dev *dev; 790 int ret; 791 unsigned long start; 792 unsigned long len; 793 dma_addr_t slot_dma = 0; 794 795 dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev)); 796 797 /* Allocate zero-out device */ 798 dev = (struct pvrdma_dev *)ib_alloc_device(sizeof(*dev)); 799 if (!dev) { 800 dev_err(&pdev->dev, "failed to allocate IB device\n"); 801 return -ENOMEM; 802 } 803 804 mutex_lock(&pvrdma_device_list_lock); 805 list_add(&dev->device_link, &pvrdma_device_list); 806 mutex_unlock(&pvrdma_device_list_lock); 807 808 ret = pvrdma_init_device(dev); 809 if (ret) 810 goto err_free_device; 811 812 dev->pdev = pdev; 813 pci_set_drvdata(pdev, dev); 814 815 ret = pci_enable_device(pdev); 816 if (ret) { 817 dev_err(&pdev->dev, "cannot enable PCI device\n"); 818 goto err_free_device; 819 } 820 821 dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n", 822 pci_resource_flags(pdev, 0)); 823 dev_dbg(&pdev->dev, "PCI resource len %#llx\n", 824 (unsigned long long)pci_resource_len(pdev, 0)); 825 dev_dbg(&pdev->dev, "PCI resource start %#llx\n", 826 (unsigned long long)pci_resource_start(pdev, 0)); 827 dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n", 828 pci_resource_flags(pdev, 1)); 829 dev_dbg(&pdev->dev, "PCI resource len %#llx\n", 830 (unsigned long long)pci_resource_len(pdev, 1)); 831 dev_dbg(&pdev->dev, "PCI resource start %#llx\n", 832 (unsigned long long)pci_resource_start(pdev, 1)); 833 834 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 835 !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 836 dev_err(&pdev->dev, "PCI BAR region not MMIO\n"); 837 ret = -ENOMEM; 838 goto err_free_device; 839 } 840 841 ret = pci_request_regions(pdev, DRV_NAME); 842 if (ret) { 843 dev_err(&pdev->dev, "cannot request PCI resources\n"); 844 goto err_disable_pdev; 845 } 846 847 /* Enable 64-Bit DMA */ 848 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 849 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 850 if (ret != 0) { 851 dev_err(&pdev->dev, 852 "pci_set_consistent_dma_mask failed\n"); 853 goto err_free_resource; 854 } 855 } else { 856 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 857 if (ret != 0) { 858 dev_err(&pdev->dev, 859 "pci_set_dma_mask failed\n"); 860 goto err_free_resource; 861 } 862 } 863 864 pci_set_master(pdev); 865 866 /* Map register space */ 867 start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG); 868 len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG); 869 dev->regs = ioremap(start, len); 870 if (!dev->regs) { 871 dev_err(&pdev->dev, "register mapping failed\n"); 872 ret = -ENOMEM; 873 goto err_free_resource; 874 } 875 876 /* Setup per-device UAR. */ 877 dev->driver_uar.index = 0; 878 dev->driver_uar.pfn = 879 pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >> 880 PAGE_SHIFT; 881 dev->driver_uar.map = 882 ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 883 if (!dev->driver_uar.map) { 884 dev_err(&pdev->dev, "failed to remap UAR pages\n"); 885 ret = -ENOMEM; 886 goto err_unmap_regs; 887 } 888 889 dev->dsr_version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION); 890 dev_info(&pdev->dev, "device version %d, driver version %d\n", 891 dev->dsr_version, PVRDMA_VERSION); 892 893 dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr), 894 &dev->dsrbase, GFP_KERNEL); 895 if (!dev->dsr) { 896 dev_err(&pdev->dev, "failed to allocate shared region\n"); 897 ret = -ENOMEM; 898 goto err_uar_unmap; 899 } 900 901 /* Setup the shared region */ 902 dev->dsr->driver_version = PVRDMA_VERSION; 903 dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ? 904 PVRDMA_GOS_BITS_32 : 905 PVRDMA_GOS_BITS_64; 906 dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX; 907 dev->dsr->gos_info.gos_ver = 1; 908 dev->dsr->uar_pfn = dev->driver_uar.pfn; 909 910 /* Command slot. */ 911 dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 912 &slot_dma, GFP_KERNEL); 913 if (!dev->cmd_slot) { 914 ret = -ENOMEM; 915 goto err_free_dsr; 916 } 917 918 dev->dsr->cmd_slot_dma = (u64)slot_dma; 919 920 /* Response slot. */ 921 dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 922 &slot_dma, GFP_KERNEL); 923 if (!dev->resp_slot) { 924 ret = -ENOMEM; 925 goto err_free_slots; 926 } 927 928 dev->dsr->resp_slot_dma = (u64)slot_dma; 929 930 /* Async event ring */ 931 dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; 932 ret = pvrdma_page_dir_init(dev, &dev->async_pdir, 933 dev->dsr->async_ring_pages.num_pages, true); 934 if (ret) 935 goto err_free_slots; 936 dev->async_ring_state = dev->async_pdir.pages[0]; 937 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma; 938 939 /* CQ notification ring */ 940 dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; 941 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir, 942 dev->dsr->cq_ring_pages.num_pages, true); 943 if (ret) 944 goto err_free_async_ring; 945 dev->cq_ring_state = dev->cq_pdir.pages[0]; 946 dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma; 947 948 /* 949 * Write the PA of the shared region to the device. The writes must be 950 * ordered such that the high bits are written last. When the writes 951 * complete, the device will have filled out the capabilities. 952 */ 953 954 pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase); 955 pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH, 956 (u32)((u64)(dev->dsrbase) >> 32)); 957 958 /* Make sure the write is complete before reading status. */ 959 mb(); 960 961 /* The driver supports RoCE V1 and V2. */ 962 if (!PVRDMA_SUPPORTED(dev)) { 963 dev_err(&pdev->dev, "driver needs RoCE v1 or v2 support\n"); 964 ret = -EFAULT; 965 goto err_free_cq_ring; 966 } 967 968 /* Paired vmxnet3 will have same bus, slot. But func will be 0 */ 969 pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); 970 if (!pdev_net) { 971 dev_err(&pdev->dev, "failed to find paired net device\n"); 972 ret = -ENODEV; 973 goto err_free_cq_ring; 974 } 975 976 if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE || 977 pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) { 978 dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n"); 979 pci_dev_put(pdev_net); 980 ret = -ENODEV; 981 goto err_free_cq_ring; 982 } 983 984 dev->netdev = pci_get_drvdata(pdev_net); 985 pci_dev_put(pdev_net); 986 if (!dev->netdev) { 987 dev_err(&pdev->dev, "failed to get vmxnet3 device\n"); 988 ret = -ENODEV; 989 goto err_free_cq_ring; 990 } 991 dev_hold(dev->netdev); 992 993 dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name); 994 995 /* Interrupt setup */ 996 ret = pvrdma_alloc_intrs(dev); 997 if (ret) { 998 dev_err(&pdev->dev, "failed to allocate interrupts\n"); 999 ret = -ENOMEM; 1000 goto err_free_cq_ring; 1001 } 1002 1003 /* Allocate UAR table. */ 1004 ret = pvrdma_uar_table_init(dev); 1005 if (ret) { 1006 dev_err(&pdev->dev, "failed to allocate UAR table\n"); 1007 ret = -ENOMEM; 1008 goto err_free_intrs; 1009 } 1010 1011 /* Allocate GID table */ 1012 dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len, 1013 sizeof(union ib_gid), GFP_KERNEL); 1014 if (!dev->sgid_tbl) { 1015 ret = -ENOMEM; 1016 goto err_free_uar_table; 1017 } 1018 dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len); 1019 1020 pvrdma_enable_intrs(dev); 1021 1022 /* Activate pvrdma device */ 1023 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE); 1024 1025 /* Make sure the write is complete before reading status. */ 1026 mb(); 1027 1028 /* Check if device was successfully activated */ 1029 ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR); 1030 if (ret != 0) { 1031 dev_err(&pdev->dev, "failed to activate device\n"); 1032 ret = -EFAULT; 1033 goto err_disable_intr; 1034 } 1035 1036 /* Register IB device */ 1037 ret = pvrdma_register_device(dev); 1038 if (ret) { 1039 dev_err(&pdev->dev, "failed to register IB device\n"); 1040 goto err_disable_intr; 1041 } 1042 1043 dev->nb_netdev.notifier_call = pvrdma_netdevice_event; 1044 ret = register_netdevice_notifier(&dev->nb_netdev); 1045 if (ret) { 1046 dev_err(&pdev->dev, "failed to register netdevice events\n"); 1047 goto err_unreg_ibdev; 1048 } 1049 1050 dev_info(&pdev->dev, "attached to device\n"); 1051 return 0; 1052 1053 err_unreg_ibdev: 1054 ib_unregister_device(&dev->ib_dev); 1055 err_disable_intr: 1056 pvrdma_disable_intrs(dev); 1057 kfree(dev->sgid_tbl); 1058 err_free_uar_table: 1059 pvrdma_uar_table_cleanup(dev); 1060 err_free_intrs: 1061 pvrdma_free_irq(dev); 1062 pci_free_irq_vectors(pdev); 1063 err_free_cq_ring: 1064 if (dev->netdev) { 1065 dev_put(dev->netdev); 1066 dev->netdev = NULL; 1067 } 1068 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1069 err_free_async_ring: 1070 pvrdma_page_dir_cleanup(dev, &dev->async_pdir); 1071 err_free_slots: 1072 pvrdma_free_slots(dev); 1073 err_free_dsr: 1074 dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr, 1075 dev->dsrbase); 1076 err_uar_unmap: 1077 iounmap(dev->driver_uar.map); 1078 err_unmap_regs: 1079 iounmap(dev->regs); 1080 err_free_resource: 1081 pci_release_regions(pdev); 1082 err_disable_pdev: 1083 pci_disable_device(pdev); 1084 pci_set_drvdata(pdev, NULL); 1085 err_free_device: 1086 mutex_lock(&pvrdma_device_list_lock); 1087 list_del(&dev->device_link); 1088 mutex_unlock(&pvrdma_device_list_lock); 1089 ib_dealloc_device(&dev->ib_dev); 1090 return ret; 1091 } 1092 1093 static void pvrdma_pci_remove(struct pci_dev *pdev) 1094 { 1095 struct pvrdma_dev *dev = pci_get_drvdata(pdev); 1096 1097 if (!dev) 1098 return; 1099 1100 dev_info(&pdev->dev, "detaching from device\n"); 1101 1102 unregister_netdevice_notifier(&dev->nb_netdev); 1103 dev->nb_netdev.notifier_call = NULL; 1104 1105 flush_workqueue(event_wq); 1106 1107 if (dev->netdev) { 1108 dev_put(dev->netdev); 1109 dev->netdev = NULL; 1110 } 1111 1112 /* Unregister ib device */ 1113 ib_unregister_device(&dev->ib_dev); 1114 1115 mutex_lock(&pvrdma_device_list_lock); 1116 list_del(&dev->device_link); 1117 mutex_unlock(&pvrdma_device_list_lock); 1118 1119 pvrdma_disable_intrs(dev); 1120 pvrdma_free_irq(dev); 1121 pci_free_irq_vectors(pdev); 1122 1123 /* Deactivate pvrdma device */ 1124 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET); 1125 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1126 pvrdma_page_dir_cleanup(dev, &dev->async_pdir); 1127 pvrdma_free_slots(dev); 1128 1129 iounmap(dev->regs); 1130 kfree(dev->sgid_tbl); 1131 kfree(dev->cq_tbl); 1132 kfree(dev->srq_tbl); 1133 kfree(dev->qp_tbl); 1134 pvrdma_uar_table_cleanup(dev); 1135 iounmap(dev->driver_uar.map); 1136 1137 ib_dealloc_device(&dev->ib_dev); 1138 1139 /* Free pci resources */ 1140 pci_release_regions(pdev); 1141 pci_disable_device(pdev); 1142 pci_set_drvdata(pdev, NULL); 1143 } 1144 1145 static const struct pci_device_id pvrdma_pci_table[] = { 1146 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), }, 1147 { 0 }, 1148 }; 1149 1150 MODULE_DEVICE_TABLE(pci, pvrdma_pci_table); 1151 1152 static struct pci_driver pvrdma_driver = { 1153 .name = DRV_NAME, 1154 .id_table = pvrdma_pci_table, 1155 .probe = pvrdma_pci_probe, 1156 .remove = pvrdma_pci_remove, 1157 }; 1158 1159 static int __init pvrdma_init(void) 1160 { 1161 int err; 1162 1163 event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM); 1164 if (!event_wq) 1165 return -ENOMEM; 1166 1167 err = pci_register_driver(&pvrdma_driver); 1168 if (err) 1169 destroy_workqueue(event_wq); 1170 1171 return err; 1172 } 1173 1174 static void __exit pvrdma_cleanup(void) 1175 { 1176 pci_unregister_driver(&pvrdma_driver); 1177 1178 destroy_workqueue(event_wq); 1179 } 1180 1181 module_init(pvrdma_init); 1182 module_exit(pvrdma_cleanup); 1183 1184 MODULE_AUTHOR("VMware, Inc"); 1185 MODULE_DESCRIPTION("VMware Paravirtual RDMA driver"); 1186 MODULE_LICENSE("Dual BSD/GPL"); 1187