1 /* 2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of EITHER the GNU General Public License 6 * version 2 as published by the Free Software Foundation or the BSD 7 * 2-Clause License. This program is distributed in the hope that it 8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED 9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. 10 * See the GNU General Public License version 2 for more details at 11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program available in the file COPYING in the main 15 * directory of this source tree. 16 * 17 * The BSD 2-Clause License 18 * 19 * Redistribution and use in source and binary forms, with or 20 * without modification, are permitted provided that the following 21 * conditions are met: 22 * 23 * - Redistributions of source code must retain the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer. 26 * 27 * - Redistributions in binary form must reproduce the above 28 * copyright notice, this list of conditions and the following 29 * disclaimer in the documentation and/or other materials 30 * provided with the distribution. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 */ 45 46 #include <linux/errno.h> 47 #include <linux/inetdevice.h> 48 #include <linux/init.h> 49 #include <linux/module.h> 50 #include <linux/slab.h> 51 #include <rdma/ib_addr.h> 52 #include <rdma/ib_smi.h> 53 #include <rdma/ib_user_verbs.h> 54 #include <net/addrconf.h> 55 56 #include "pvrdma.h" 57 58 #define DRV_NAME "vmw_pvrdma" 59 #define DRV_VERSION "1.0.1.0-k" 60 61 static DEFINE_MUTEX(pvrdma_device_list_lock); 62 static LIST_HEAD(pvrdma_device_list); 63 static struct workqueue_struct *event_wq; 64 65 static int pvrdma_add_gid(struct ib_device *ibdev, 66 u8 port_num, 67 unsigned int index, 68 const union ib_gid *gid, 69 const struct ib_gid_attr *attr, 70 void **context); 71 static int pvrdma_del_gid(struct ib_device *ibdev, 72 u8 port_num, 73 unsigned int index, 74 void **context); 75 76 77 static ssize_t show_hca(struct device *device, struct device_attribute *attr, 78 char *buf) 79 { 80 return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION); 81 } 82 83 static ssize_t show_rev(struct device *device, struct device_attribute *attr, 84 char *buf) 85 { 86 return sprintf(buf, "%d\n", PVRDMA_REV_ID); 87 } 88 89 static ssize_t show_board(struct device *device, struct device_attribute *attr, 90 char *buf) 91 { 92 return sprintf(buf, "%d\n", PVRDMA_BOARD_ID); 93 } 94 95 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 96 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 97 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 98 99 static struct device_attribute *pvrdma_class_attributes[] = { 100 &dev_attr_hw_rev, 101 &dev_attr_hca_type, 102 &dev_attr_board_id 103 }; 104 105 static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str) 106 { 107 struct pvrdma_dev *dev = 108 container_of(device, struct pvrdma_dev, ib_dev); 109 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d\n", 110 (int) (dev->dsr->caps.fw_ver >> 32), 111 (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff, 112 (int) dev->dsr->caps.fw_ver & 0xffff); 113 } 114 115 static int pvrdma_init_device(struct pvrdma_dev *dev) 116 { 117 /* Initialize some device related stuff */ 118 spin_lock_init(&dev->cmd_lock); 119 sema_init(&dev->cmd_sema, 1); 120 atomic_set(&dev->num_qps, 0); 121 atomic_set(&dev->num_cqs, 0); 122 atomic_set(&dev->num_pds, 0); 123 atomic_set(&dev->num_ahs, 0); 124 125 return 0; 126 } 127 128 static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num, 129 struct ib_port_immutable *immutable) 130 { 131 struct pvrdma_dev *dev = to_vdev(ibdev); 132 struct ib_port_attr attr; 133 int err; 134 135 if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1) 136 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE; 137 else if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2) 138 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 139 140 err = ib_query_port(ibdev, port_num, &attr); 141 if (err) 142 return err; 143 144 immutable->pkey_tbl_len = attr.pkey_tbl_len; 145 immutable->gid_tbl_len = attr.gid_tbl_len; 146 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 147 return 0; 148 } 149 150 static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev, 151 u8 port_num) 152 { 153 struct net_device *netdev; 154 struct pvrdma_dev *dev = to_vdev(ibdev); 155 156 if (port_num != 1) 157 return NULL; 158 159 rcu_read_lock(); 160 netdev = dev->netdev; 161 if (netdev) 162 dev_hold(netdev); 163 rcu_read_unlock(); 164 165 return netdev; 166 } 167 168 static int pvrdma_register_device(struct pvrdma_dev *dev) 169 { 170 int ret = -1; 171 int i = 0; 172 173 strlcpy(dev->ib_dev.name, "vmw_pvrdma%d", IB_DEVICE_NAME_MAX); 174 dev->ib_dev.node_guid = dev->dsr->caps.node_guid; 175 dev->sys_image_guid = dev->dsr->caps.sys_image_guid; 176 dev->flags = 0; 177 dev->ib_dev.owner = THIS_MODULE; 178 dev->ib_dev.num_comp_vectors = 1; 179 dev->ib_dev.dev.parent = &dev->pdev->dev; 180 dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION; 181 dev->ib_dev.uverbs_cmd_mask = 182 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 183 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 184 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 185 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 186 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 187 (1ull << IB_USER_VERBS_CMD_REG_MR) | 188 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 189 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 190 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 191 (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 192 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 193 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 194 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 195 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 196 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 197 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 198 (1ull << IB_USER_VERBS_CMD_POST_SEND) | 199 (1ull << IB_USER_VERBS_CMD_POST_RECV) | 200 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 201 (1ull << IB_USER_VERBS_CMD_DESTROY_AH); 202 203 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 204 dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt; 205 206 dev->ib_dev.query_device = pvrdma_query_device; 207 dev->ib_dev.query_port = pvrdma_query_port; 208 dev->ib_dev.query_gid = pvrdma_query_gid; 209 dev->ib_dev.query_pkey = pvrdma_query_pkey; 210 dev->ib_dev.modify_port = pvrdma_modify_port; 211 dev->ib_dev.alloc_ucontext = pvrdma_alloc_ucontext; 212 dev->ib_dev.dealloc_ucontext = pvrdma_dealloc_ucontext; 213 dev->ib_dev.mmap = pvrdma_mmap; 214 dev->ib_dev.alloc_pd = pvrdma_alloc_pd; 215 dev->ib_dev.dealloc_pd = pvrdma_dealloc_pd; 216 dev->ib_dev.create_ah = pvrdma_create_ah; 217 dev->ib_dev.destroy_ah = pvrdma_destroy_ah; 218 dev->ib_dev.create_qp = pvrdma_create_qp; 219 dev->ib_dev.modify_qp = pvrdma_modify_qp; 220 dev->ib_dev.query_qp = pvrdma_query_qp; 221 dev->ib_dev.destroy_qp = pvrdma_destroy_qp; 222 dev->ib_dev.post_send = pvrdma_post_send; 223 dev->ib_dev.post_recv = pvrdma_post_recv; 224 dev->ib_dev.create_cq = pvrdma_create_cq; 225 dev->ib_dev.modify_cq = pvrdma_modify_cq; 226 dev->ib_dev.resize_cq = pvrdma_resize_cq; 227 dev->ib_dev.destroy_cq = pvrdma_destroy_cq; 228 dev->ib_dev.poll_cq = pvrdma_poll_cq; 229 dev->ib_dev.req_notify_cq = pvrdma_req_notify_cq; 230 dev->ib_dev.get_dma_mr = pvrdma_get_dma_mr; 231 dev->ib_dev.reg_user_mr = pvrdma_reg_user_mr; 232 dev->ib_dev.dereg_mr = pvrdma_dereg_mr; 233 dev->ib_dev.alloc_mr = pvrdma_alloc_mr; 234 dev->ib_dev.map_mr_sg = pvrdma_map_mr_sg; 235 dev->ib_dev.add_gid = pvrdma_add_gid; 236 dev->ib_dev.del_gid = pvrdma_del_gid; 237 dev->ib_dev.get_netdev = pvrdma_get_netdev; 238 dev->ib_dev.get_port_immutable = pvrdma_port_immutable; 239 dev->ib_dev.get_link_layer = pvrdma_port_link_layer; 240 dev->ib_dev.get_dev_fw_str = pvrdma_get_fw_ver_str; 241 242 mutex_init(&dev->port_mutex); 243 spin_lock_init(&dev->desc_lock); 244 245 dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(void *), 246 GFP_KERNEL); 247 if (!dev->cq_tbl) 248 return ret; 249 spin_lock_init(&dev->cq_tbl_lock); 250 251 dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(void *), 252 GFP_KERNEL); 253 if (!dev->qp_tbl) 254 goto err_cq_free; 255 spin_lock_init(&dev->qp_tbl_lock); 256 257 ret = ib_register_device(&dev->ib_dev, NULL); 258 if (ret) 259 goto err_qp_free; 260 261 for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) { 262 ret = device_create_file(&dev->ib_dev.dev, 263 pvrdma_class_attributes[i]); 264 if (ret) 265 goto err_class; 266 } 267 268 dev->ib_active = true; 269 270 return 0; 271 272 err_class: 273 ib_unregister_device(&dev->ib_dev); 274 err_qp_free: 275 kfree(dev->qp_tbl); 276 err_cq_free: 277 kfree(dev->cq_tbl); 278 279 return ret; 280 } 281 282 static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id) 283 { 284 u32 icr = PVRDMA_INTR_CAUSE_RESPONSE; 285 struct pvrdma_dev *dev = dev_id; 286 287 dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n"); 288 289 if (!dev->pdev->msix_enabled) { 290 /* Legacy intr */ 291 icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR); 292 if (icr == 0) 293 return IRQ_NONE; 294 } 295 296 if (icr == PVRDMA_INTR_CAUSE_RESPONSE) 297 complete(&dev->cmd_done); 298 299 return IRQ_HANDLED; 300 } 301 302 static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type) 303 { 304 struct pvrdma_qp *qp; 305 unsigned long flags; 306 307 spin_lock_irqsave(&dev->qp_tbl_lock, flags); 308 qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp]; 309 if (qp) 310 atomic_inc(&qp->refcnt); 311 spin_unlock_irqrestore(&dev->qp_tbl_lock, flags); 312 313 if (qp && qp->ibqp.event_handler) { 314 struct ib_qp *ibqp = &qp->ibqp; 315 struct ib_event e; 316 317 e.device = ibqp->device; 318 e.element.qp = ibqp; 319 e.event = type; /* 1:1 mapping for now. */ 320 ibqp->event_handler(&e, ibqp->qp_context); 321 } 322 if (qp) { 323 atomic_dec(&qp->refcnt); 324 if (atomic_read(&qp->refcnt) == 0) 325 wake_up(&qp->wait); 326 } 327 } 328 329 static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type) 330 { 331 struct pvrdma_cq *cq; 332 unsigned long flags; 333 334 spin_lock_irqsave(&dev->cq_tbl_lock, flags); 335 cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq]; 336 if (cq) 337 atomic_inc(&cq->refcnt); 338 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); 339 340 if (cq && cq->ibcq.event_handler) { 341 struct ib_cq *ibcq = &cq->ibcq; 342 struct ib_event e; 343 344 e.device = ibcq->device; 345 e.element.cq = ibcq; 346 e.event = type; /* 1:1 mapping for now. */ 347 ibcq->event_handler(&e, ibcq->cq_context); 348 } 349 if (cq) { 350 atomic_dec(&cq->refcnt); 351 if (atomic_read(&cq->refcnt) == 0) 352 wake_up(&cq->wait); 353 } 354 } 355 356 static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port, 357 enum ib_event_type event) 358 { 359 struct ib_event ib_event; 360 361 memset(&ib_event, 0, sizeof(ib_event)); 362 ib_event.device = &dev->ib_dev; 363 ib_event.element.port_num = port; 364 ib_event.event = event; 365 ib_dispatch_event(&ib_event); 366 } 367 368 static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type) 369 { 370 if (port < 1 || port > dev->dsr->caps.phys_port_cnt) { 371 dev_warn(&dev->pdev->dev, "event on port %d\n", port); 372 return; 373 } 374 375 pvrdma_dispatch_event(dev, port, type); 376 } 377 378 static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i) 379 { 380 return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr( 381 &dev->async_pdir, 382 PAGE_SIZE + 383 sizeof(struct pvrdma_eqe) * i); 384 } 385 386 static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id) 387 { 388 struct pvrdma_dev *dev = dev_id; 389 struct pvrdma_ring *ring = &dev->async_ring_state->rx; 390 int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) * 391 PAGE_SIZE / sizeof(struct pvrdma_eqe); 392 unsigned int head; 393 394 dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n"); 395 396 /* 397 * Don't process events until the IB device is registered. Otherwise 398 * we'll try to ib_dispatch_event() on an invalid device. 399 */ 400 if (!dev->ib_active) 401 return IRQ_HANDLED; 402 403 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) { 404 struct pvrdma_eqe *eqe; 405 406 eqe = get_eqe(dev, head); 407 408 switch (eqe->type) { 409 case PVRDMA_EVENT_QP_FATAL: 410 case PVRDMA_EVENT_QP_REQ_ERR: 411 case PVRDMA_EVENT_QP_ACCESS_ERR: 412 case PVRDMA_EVENT_COMM_EST: 413 case PVRDMA_EVENT_SQ_DRAINED: 414 case PVRDMA_EVENT_PATH_MIG: 415 case PVRDMA_EVENT_PATH_MIG_ERR: 416 case PVRDMA_EVENT_QP_LAST_WQE_REACHED: 417 pvrdma_qp_event(dev, eqe->info, eqe->type); 418 break; 419 420 case PVRDMA_EVENT_CQ_ERR: 421 pvrdma_cq_event(dev, eqe->info, eqe->type); 422 break; 423 424 case PVRDMA_EVENT_SRQ_ERR: 425 case PVRDMA_EVENT_SRQ_LIMIT_REACHED: 426 break; 427 428 case PVRDMA_EVENT_PORT_ACTIVE: 429 case PVRDMA_EVENT_PORT_ERR: 430 case PVRDMA_EVENT_LID_CHANGE: 431 case PVRDMA_EVENT_PKEY_CHANGE: 432 case PVRDMA_EVENT_SM_CHANGE: 433 case PVRDMA_EVENT_CLIENT_REREGISTER: 434 case PVRDMA_EVENT_GID_CHANGE: 435 pvrdma_dev_event(dev, eqe->info, eqe->type); 436 break; 437 438 case PVRDMA_EVENT_DEVICE_FATAL: 439 pvrdma_dev_event(dev, 1, eqe->type); 440 break; 441 442 default: 443 break; 444 } 445 446 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); 447 } 448 449 return IRQ_HANDLED; 450 } 451 452 static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev, 453 unsigned int i) 454 { 455 return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr( 456 &dev->cq_pdir, 457 PAGE_SIZE + 458 sizeof(struct pvrdma_cqne) * i); 459 } 460 461 static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id) 462 { 463 struct pvrdma_dev *dev = dev_id; 464 struct pvrdma_ring *ring = &dev->cq_ring_state->rx; 465 int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE / 466 sizeof(struct pvrdma_cqne); 467 unsigned int head; 468 unsigned long flags; 469 470 dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n"); 471 472 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) { 473 struct pvrdma_cqne *cqne; 474 struct pvrdma_cq *cq; 475 476 cqne = get_cqne(dev, head); 477 spin_lock_irqsave(&dev->cq_tbl_lock, flags); 478 cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq]; 479 if (cq) 480 atomic_inc(&cq->refcnt); 481 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags); 482 483 if (cq && cq->ibcq.comp_handler) 484 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 485 if (cq) { 486 atomic_dec(&cq->refcnt); 487 if (atomic_read(&cq->refcnt)) 488 wake_up(&cq->wait); 489 } 490 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); 491 } 492 493 return IRQ_HANDLED; 494 } 495 496 static void pvrdma_free_irq(struct pvrdma_dev *dev) 497 { 498 int i; 499 500 dev_dbg(&dev->pdev->dev, "freeing interrupts\n"); 501 for (i = 0; i < dev->nr_vectors; i++) 502 free_irq(pci_irq_vector(dev->pdev, i), dev); 503 } 504 505 static void pvrdma_enable_intrs(struct pvrdma_dev *dev) 506 { 507 dev_dbg(&dev->pdev->dev, "enable interrupts\n"); 508 pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0); 509 } 510 511 static void pvrdma_disable_intrs(struct pvrdma_dev *dev) 512 { 513 dev_dbg(&dev->pdev->dev, "disable interrupts\n"); 514 pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0); 515 } 516 517 static int pvrdma_alloc_intrs(struct pvrdma_dev *dev) 518 { 519 struct pci_dev *pdev = dev->pdev; 520 int ret = 0, i; 521 522 ret = pci_alloc_irq_vectors(pdev, 1, PVRDMA_MAX_INTERRUPTS, 523 PCI_IRQ_MSIX); 524 if (ret < 0) { 525 ret = pci_alloc_irq_vectors(pdev, 1, 1, 526 PCI_IRQ_MSI | PCI_IRQ_LEGACY); 527 if (ret < 0) 528 return ret; 529 } 530 dev->nr_vectors = ret; 531 532 ret = request_irq(pci_irq_vector(dev->pdev, 0), pvrdma_intr0_handler, 533 pdev->msix_enabled ? 0 : IRQF_SHARED, DRV_NAME, dev); 534 if (ret) { 535 dev_err(&dev->pdev->dev, 536 "failed to request interrupt 0\n"); 537 goto out_free_vectors; 538 } 539 540 for (i = 1; i < dev->nr_vectors; i++) { 541 ret = request_irq(pci_irq_vector(dev->pdev, i), 542 i == 1 ? pvrdma_intr1_handler : 543 pvrdma_intrx_handler, 544 0, DRV_NAME, dev); 545 if (ret) { 546 dev_err(&dev->pdev->dev, 547 "failed to request interrupt %d\n", i); 548 goto free_irqs; 549 } 550 } 551 552 return 0; 553 554 free_irqs: 555 while (--i >= 0) 556 free_irq(pci_irq_vector(dev->pdev, i), dev); 557 out_free_vectors: 558 pci_free_irq_vectors(pdev); 559 return ret; 560 } 561 562 static void pvrdma_free_slots(struct pvrdma_dev *dev) 563 { 564 struct pci_dev *pdev = dev->pdev; 565 566 if (dev->resp_slot) 567 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot, 568 dev->dsr->resp_slot_dma); 569 if (dev->cmd_slot) 570 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot, 571 dev->dsr->cmd_slot_dma); 572 } 573 574 static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev, 575 const union ib_gid *gid, 576 u8 gid_type, 577 int index) 578 { 579 int ret; 580 union pvrdma_cmd_req req; 581 struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind; 582 583 if (!dev->sgid_tbl) { 584 dev_warn(&dev->pdev->dev, "sgid table not initialized\n"); 585 return -EINVAL; 586 } 587 588 memset(cmd_bind, 0, sizeof(*cmd_bind)); 589 cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND; 590 memcpy(cmd_bind->new_gid, gid->raw, 16); 591 cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024); 592 cmd_bind->vlan = 0xfff; 593 cmd_bind->index = index; 594 cmd_bind->gid_type = gid_type; 595 596 ret = pvrdma_cmd_post(dev, &req, NULL, 0); 597 if (ret < 0) { 598 dev_warn(&dev->pdev->dev, 599 "could not create binding, error: %d\n", ret); 600 return -EFAULT; 601 } 602 memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid)); 603 return 0; 604 } 605 606 static int pvrdma_add_gid(struct ib_device *ibdev, 607 u8 port_num, 608 unsigned int index, 609 const union ib_gid *gid, 610 const struct ib_gid_attr *attr, 611 void **context) 612 { 613 struct pvrdma_dev *dev = to_vdev(ibdev); 614 615 return pvrdma_add_gid_at_index(dev, gid, 616 ib_gid_type_to_pvrdma(attr->gid_type), 617 index); 618 } 619 620 static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index) 621 { 622 int ret; 623 union pvrdma_cmd_req req; 624 struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind; 625 626 /* Update sgid table. */ 627 if (!dev->sgid_tbl) { 628 dev_warn(&dev->pdev->dev, "sgid table not initialized\n"); 629 return -EINVAL; 630 } 631 632 memset(cmd_dest, 0, sizeof(*cmd_dest)); 633 cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND; 634 memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16); 635 cmd_dest->index = index; 636 637 ret = pvrdma_cmd_post(dev, &req, NULL, 0); 638 if (ret < 0) { 639 dev_warn(&dev->pdev->dev, 640 "could not destroy binding, error: %d\n", ret); 641 return ret; 642 } 643 memset(&dev->sgid_tbl[index], 0, 16); 644 return 0; 645 } 646 647 static int pvrdma_del_gid(struct ib_device *ibdev, 648 u8 port_num, 649 unsigned int index, 650 void **context) 651 { 652 struct pvrdma_dev *dev = to_vdev(ibdev); 653 654 dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s", 655 index, dev->netdev->name); 656 657 return pvrdma_del_gid_at_index(dev, index); 658 } 659 660 static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev, 661 unsigned long event) 662 { 663 switch (event) { 664 case NETDEV_REBOOT: 665 case NETDEV_DOWN: 666 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); 667 break; 668 case NETDEV_UP: 669 pvrdma_write_reg(dev, PVRDMA_REG_CTL, 670 PVRDMA_DEVICE_CTL_UNQUIESCE); 671 672 mb(); 673 674 if (pvrdma_read_reg(dev, PVRDMA_REG_ERR)) 675 dev_err(&dev->pdev->dev, 676 "failed to activate device during link up\n"); 677 else 678 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); 679 break; 680 default: 681 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n", 682 event, dev->ib_dev.name); 683 break; 684 } 685 } 686 687 static void pvrdma_netdevice_event_work(struct work_struct *work) 688 { 689 struct pvrdma_netdevice_work *netdev_work; 690 struct pvrdma_dev *dev; 691 692 netdev_work = container_of(work, struct pvrdma_netdevice_work, work); 693 694 mutex_lock(&pvrdma_device_list_lock); 695 list_for_each_entry(dev, &pvrdma_device_list, device_link) { 696 if (dev->netdev == netdev_work->event_netdev) { 697 pvrdma_netdevice_event_handle(dev, netdev_work->event); 698 break; 699 } 700 } 701 mutex_unlock(&pvrdma_device_list_lock); 702 703 kfree(netdev_work); 704 } 705 706 static int pvrdma_netdevice_event(struct notifier_block *this, 707 unsigned long event, void *ptr) 708 { 709 struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr); 710 struct pvrdma_netdevice_work *netdev_work; 711 712 netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC); 713 if (!netdev_work) 714 return NOTIFY_BAD; 715 716 INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work); 717 netdev_work->event_netdev = event_netdev; 718 netdev_work->event = event; 719 queue_work(event_wq, &netdev_work->work); 720 721 return NOTIFY_DONE; 722 } 723 724 static int pvrdma_pci_probe(struct pci_dev *pdev, 725 const struct pci_device_id *id) 726 { 727 struct pci_dev *pdev_net; 728 struct pvrdma_dev *dev; 729 int ret; 730 unsigned long start; 731 unsigned long len; 732 dma_addr_t slot_dma = 0; 733 734 dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev)); 735 736 /* Allocate zero-out device */ 737 dev = (struct pvrdma_dev *)ib_alloc_device(sizeof(*dev)); 738 if (!dev) { 739 dev_err(&pdev->dev, "failed to allocate IB device\n"); 740 return -ENOMEM; 741 } 742 743 mutex_lock(&pvrdma_device_list_lock); 744 list_add(&dev->device_link, &pvrdma_device_list); 745 mutex_unlock(&pvrdma_device_list_lock); 746 747 ret = pvrdma_init_device(dev); 748 if (ret) 749 goto err_free_device; 750 751 dev->pdev = pdev; 752 pci_set_drvdata(pdev, dev); 753 754 ret = pci_enable_device(pdev); 755 if (ret) { 756 dev_err(&pdev->dev, "cannot enable PCI device\n"); 757 goto err_free_device; 758 } 759 760 dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n", 761 pci_resource_flags(pdev, 0)); 762 dev_dbg(&pdev->dev, "PCI resource len %#llx\n", 763 (unsigned long long)pci_resource_len(pdev, 0)); 764 dev_dbg(&pdev->dev, "PCI resource start %#llx\n", 765 (unsigned long long)pci_resource_start(pdev, 0)); 766 dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n", 767 pci_resource_flags(pdev, 1)); 768 dev_dbg(&pdev->dev, "PCI resource len %#llx\n", 769 (unsigned long long)pci_resource_len(pdev, 1)); 770 dev_dbg(&pdev->dev, "PCI resource start %#llx\n", 771 (unsigned long long)pci_resource_start(pdev, 1)); 772 773 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 774 !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 775 dev_err(&pdev->dev, "PCI BAR region not MMIO\n"); 776 ret = -ENOMEM; 777 goto err_free_device; 778 } 779 780 ret = pci_request_regions(pdev, DRV_NAME); 781 if (ret) { 782 dev_err(&pdev->dev, "cannot request PCI resources\n"); 783 goto err_disable_pdev; 784 } 785 786 /* Enable 64-Bit DMA */ 787 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 788 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 789 if (ret != 0) { 790 dev_err(&pdev->dev, 791 "pci_set_consistent_dma_mask failed\n"); 792 goto err_free_resource; 793 } 794 } else { 795 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 796 if (ret != 0) { 797 dev_err(&pdev->dev, 798 "pci_set_dma_mask failed\n"); 799 goto err_free_resource; 800 } 801 } 802 803 pci_set_master(pdev); 804 805 /* Map register space */ 806 start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG); 807 len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG); 808 dev->regs = ioremap(start, len); 809 if (!dev->regs) { 810 dev_err(&pdev->dev, "register mapping failed\n"); 811 ret = -ENOMEM; 812 goto err_free_resource; 813 } 814 815 /* Setup per-device UAR. */ 816 dev->driver_uar.index = 0; 817 dev->driver_uar.pfn = 818 pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >> 819 PAGE_SHIFT; 820 dev->driver_uar.map = 821 ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 822 if (!dev->driver_uar.map) { 823 dev_err(&pdev->dev, "failed to remap UAR pages\n"); 824 ret = -ENOMEM; 825 goto err_unmap_regs; 826 } 827 828 dev->dsr_version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION); 829 dev_info(&pdev->dev, "device version %d, driver version %d\n", 830 dev->dsr_version, PVRDMA_VERSION); 831 832 dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr), 833 &dev->dsrbase, GFP_KERNEL); 834 if (!dev->dsr) { 835 dev_err(&pdev->dev, "failed to allocate shared region\n"); 836 ret = -ENOMEM; 837 goto err_uar_unmap; 838 } 839 840 /* Setup the shared region */ 841 memset(dev->dsr, 0, sizeof(*dev->dsr)); 842 dev->dsr->driver_version = PVRDMA_VERSION; 843 dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ? 844 PVRDMA_GOS_BITS_32 : 845 PVRDMA_GOS_BITS_64; 846 dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX; 847 dev->dsr->gos_info.gos_ver = 1; 848 dev->dsr->uar_pfn = dev->driver_uar.pfn; 849 850 /* Command slot. */ 851 dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 852 &slot_dma, GFP_KERNEL); 853 if (!dev->cmd_slot) { 854 ret = -ENOMEM; 855 goto err_free_dsr; 856 } 857 858 dev->dsr->cmd_slot_dma = (u64)slot_dma; 859 860 /* Response slot. */ 861 dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, 862 &slot_dma, GFP_KERNEL); 863 if (!dev->resp_slot) { 864 ret = -ENOMEM; 865 goto err_free_slots; 866 } 867 868 dev->dsr->resp_slot_dma = (u64)slot_dma; 869 870 /* Async event ring */ 871 dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; 872 ret = pvrdma_page_dir_init(dev, &dev->async_pdir, 873 dev->dsr->async_ring_pages.num_pages, true); 874 if (ret) 875 goto err_free_slots; 876 dev->async_ring_state = dev->async_pdir.pages[0]; 877 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma; 878 879 /* CQ notification ring */ 880 dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; 881 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir, 882 dev->dsr->cq_ring_pages.num_pages, true); 883 if (ret) 884 goto err_free_async_ring; 885 dev->cq_ring_state = dev->cq_pdir.pages[0]; 886 dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma; 887 888 /* 889 * Write the PA of the shared region to the device. The writes must be 890 * ordered such that the high bits are written last. When the writes 891 * complete, the device will have filled out the capabilities. 892 */ 893 894 pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase); 895 pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH, 896 (u32)((u64)(dev->dsrbase) >> 32)); 897 898 /* Make sure the write is complete before reading status. */ 899 mb(); 900 901 /* The driver supports RoCE V1 and V2. */ 902 if (!PVRDMA_SUPPORTED(dev)) { 903 dev_err(&pdev->dev, "driver needs RoCE v1 or v2 support\n"); 904 ret = -EFAULT; 905 goto err_free_cq_ring; 906 } 907 908 /* Paired vmxnet3 will have same bus, slot. But func will be 0 */ 909 pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); 910 if (!pdev_net) { 911 dev_err(&pdev->dev, "failed to find paired net device\n"); 912 ret = -ENODEV; 913 goto err_free_cq_ring; 914 } 915 916 if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE || 917 pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) { 918 dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n"); 919 pci_dev_put(pdev_net); 920 ret = -ENODEV; 921 goto err_free_cq_ring; 922 } 923 924 dev->netdev = pci_get_drvdata(pdev_net); 925 pci_dev_put(pdev_net); 926 if (!dev->netdev) { 927 dev_err(&pdev->dev, "failed to get vmxnet3 device\n"); 928 ret = -ENODEV; 929 goto err_free_cq_ring; 930 } 931 932 dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name); 933 934 /* Interrupt setup */ 935 ret = pvrdma_alloc_intrs(dev); 936 if (ret) { 937 dev_err(&pdev->dev, "failed to allocate interrupts\n"); 938 ret = -ENOMEM; 939 goto err_free_cq_ring; 940 } 941 942 /* Allocate UAR table. */ 943 ret = pvrdma_uar_table_init(dev); 944 if (ret) { 945 dev_err(&pdev->dev, "failed to allocate UAR table\n"); 946 ret = -ENOMEM; 947 goto err_free_intrs; 948 } 949 950 /* Allocate GID table */ 951 dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len, 952 sizeof(union ib_gid), GFP_KERNEL); 953 if (!dev->sgid_tbl) { 954 ret = -ENOMEM; 955 goto err_free_uar_table; 956 } 957 dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len); 958 959 pvrdma_enable_intrs(dev); 960 961 /* Activate pvrdma device */ 962 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE); 963 964 /* Make sure the write is complete before reading status. */ 965 mb(); 966 967 /* Check if device was successfully activated */ 968 ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR); 969 if (ret != 0) { 970 dev_err(&pdev->dev, "failed to activate device\n"); 971 ret = -EFAULT; 972 goto err_disable_intr; 973 } 974 975 /* Register IB device */ 976 ret = pvrdma_register_device(dev); 977 if (ret) { 978 dev_err(&pdev->dev, "failed to register IB device\n"); 979 goto err_disable_intr; 980 } 981 982 dev->nb_netdev.notifier_call = pvrdma_netdevice_event; 983 ret = register_netdevice_notifier(&dev->nb_netdev); 984 if (ret) { 985 dev_err(&pdev->dev, "failed to register netdevice events\n"); 986 goto err_unreg_ibdev; 987 } 988 989 dev_info(&pdev->dev, "attached to device\n"); 990 return 0; 991 992 err_unreg_ibdev: 993 ib_unregister_device(&dev->ib_dev); 994 err_disable_intr: 995 pvrdma_disable_intrs(dev); 996 kfree(dev->sgid_tbl); 997 err_free_uar_table: 998 pvrdma_uar_table_cleanup(dev); 999 err_free_intrs: 1000 pvrdma_free_irq(dev); 1001 pci_free_irq_vectors(pdev); 1002 err_free_cq_ring: 1003 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1004 err_free_async_ring: 1005 pvrdma_page_dir_cleanup(dev, &dev->async_pdir); 1006 err_free_slots: 1007 pvrdma_free_slots(dev); 1008 err_free_dsr: 1009 dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr, 1010 dev->dsrbase); 1011 err_uar_unmap: 1012 iounmap(dev->driver_uar.map); 1013 err_unmap_regs: 1014 iounmap(dev->regs); 1015 err_free_resource: 1016 pci_release_regions(pdev); 1017 err_disable_pdev: 1018 pci_disable_device(pdev); 1019 pci_set_drvdata(pdev, NULL); 1020 err_free_device: 1021 mutex_lock(&pvrdma_device_list_lock); 1022 list_del(&dev->device_link); 1023 mutex_unlock(&pvrdma_device_list_lock); 1024 ib_dealloc_device(&dev->ib_dev); 1025 return ret; 1026 } 1027 1028 static void pvrdma_pci_remove(struct pci_dev *pdev) 1029 { 1030 struct pvrdma_dev *dev = pci_get_drvdata(pdev); 1031 1032 if (!dev) 1033 return; 1034 1035 dev_info(&pdev->dev, "detaching from device\n"); 1036 1037 unregister_netdevice_notifier(&dev->nb_netdev); 1038 dev->nb_netdev.notifier_call = NULL; 1039 1040 flush_workqueue(event_wq); 1041 1042 /* Unregister ib device */ 1043 ib_unregister_device(&dev->ib_dev); 1044 1045 mutex_lock(&pvrdma_device_list_lock); 1046 list_del(&dev->device_link); 1047 mutex_unlock(&pvrdma_device_list_lock); 1048 1049 pvrdma_disable_intrs(dev); 1050 pvrdma_free_irq(dev); 1051 pci_free_irq_vectors(pdev); 1052 1053 /* Deactivate pvrdma device */ 1054 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET); 1055 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1056 pvrdma_page_dir_cleanup(dev, &dev->async_pdir); 1057 pvrdma_free_slots(dev); 1058 1059 iounmap(dev->regs); 1060 kfree(dev->sgid_tbl); 1061 kfree(dev->cq_tbl); 1062 kfree(dev->qp_tbl); 1063 pvrdma_uar_table_cleanup(dev); 1064 iounmap(dev->driver_uar.map); 1065 1066 ib_dealloc_device(&dev->ib_dev); 1067 1068 /* Free pci resources */ 1069 pci_release_regions(pdev); 1070 pci_disable_device(pdev); 1071 pci_set_drvdata(pdev, NULL); 1072 } 1073 1074 static const struct pci_device_id pvrdma_pci_table[] = { 1075 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), }, 1076 { 0 }, 1077 }; 1078 1079 MODULE_DEVICE_TABLE(pci, pvrdma_pci_table); 1080 1081 static struct pci_driver pvrdma_driver = { 1082 .name = DRV_NAME, 1083 .id_table = pvrdma_pci_table, 1084 .probe = pvrdma_pci_probe, 1085 .remove = pvrdma_pci_remove, 1086 }; 1087 1088 static int __init pvrdma_init(void) 1089 { 1090 int err; 1091 1092 event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM); 1093 if (!event_wq) 1094 return -ENOMEM; 1095 1096 err = pci_register_driver(&pvrdma_driver); 1097 if (err) 1098 destroy_workqueue(event_wq); 1099 1100 return err; 1101 } 1102 1103 static void __exit pvrdma_cleanup(void) 1104 { 1105 pci_unregister_driver(&pvrdma_driver); 1106 1107 destroy_workqueue(event_wq); 1108 } 1109 1110 module_init(pvrdma_init); 1111 module_exit(pvrdma_cleanup); 1112 1113 MODULE_AUTHOR("VMware, Inc"); 1114 MODULE_DESCRIPTION("VMware Paravirtual RDMA driver"); 1115 MODULE_LICENSE("Dual BSD/GPL"); 1116