1 /* 2 * VMware VMCI Driver 3 * 4 * Copyright (C) 2012 VMware, Inc. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation version 2 and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * for more details. 14 */ 15 16 #include <linux/vmw_vmci_defs.h> 17 #include <linux/vmw_vmci_api.h> 18 #include <linux/moduleparam.h> 19 #include <linux/interrupt.h> 20 #include <linux/highmem.h> 21 #include <linux/kernel.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/sched.h> 25 #include <linux/slab.h> 26 #include <linux/init.h> 27 #include <linux/pci.h> 28 #include <linux/smp.h> 29 #include <linux/io.h> 30 #include <linux/vmalloc.h> 31 32 #include "vmci_datagram.h" 33 #include "vmci_doorbell.h" 34 #include "vmci_context.h" 35 #include "vmci_driver.h" 36 #include "vmci_event.h" 37 38 #define PCI_DEVICE_ID_VMWARE_VMCI 0x0740 39 40 #define VMCI_UTIL_NUM_RESOURCES 1 41 42 static bool vmci_disable_msi; 43 module_param_named(disable_msi, vmci_disable_msi, bool, 0); 44 MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); 45 46 static bool vmci_disable_msix; 47 module_param_named(disable_msix, vmci_disable_msix, bool, 0); 48 MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); 49 50 static u32 ctx_update_sub_id = VMCI_INVALID_ID; 51 static u32 vm_context_id = VMCI_INVALID_ID; 52 53 struct vmci_guest_device { 54 struct device *dev; /* PCI device we are attached to */ 55 void __iomem *iobase; 56 57 unsigned int irq; 58 unsigned int intr_type; 59 bool exclusive_vectors; 60 struct msix_entry msix_entries[VMCI_MAX_INTRS]; 61 62 struct tasklet_struct datagram_tasklet; 63 struct tasklet_struct bm_tasklet; 64 65 void *data_buffer; 66 void *notification_bitmap; 67 dma_addr_t notification_base; 68 }; 69 70 /* vmci_dev singleton device and supporting data*/ 71 struct pci_dev *vmci_pdev; 72 static struct vmci_guest_device *vmci_dev_g; 73 static DEFINE_SPINLOCK(vmci_dev_spinlock); 74 75 static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0); 76 77 bool vmci_guest_code_active(void) 78 { 79 return atomic_read(&vmci_num_guest_devices) != 0; 80 } 81 82 u32 vmci_get_vm_context_id(void) 83 { 84 if (vm_context_id == VMCI_INVALID_ID) { 85 struct vmci_datagram get_cid_msg; 86 get_cid_msg.dst = 87 vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 88 VMCI_GET_CONTEXT_ID); 89 get_cid_msg.src = VMCI_ANON_SRC_HANDLE; 90 get_cid_msg.payload_size = 0; 91 vm_context_id = vmci_send_datagram(&get_cid_msg); 92 } 93 return vm_context_id; 94 } 95 96 /* 97 * VM to hypervisor call mechanism. We use the standard VMware naming 98 * convention since shared code is calling this function as well. 99 */ 100 int vmci_send_datagram(struct vmci_datagram *dg) 101 { 102 unsigned long flags; 103 int result; 104 105 /* Check args. */ 106 if (dg == NULL) 107 return VMCI_ERROR_INVALID_ARGS; 108 109 /* 110 * Need to acquire spinlock on the device because the datagram 111 * data may be spread over multiple pages and the monitor may 112 * interleave device user rpc calls from multiple 113 * VCPUs. Acquiring the spinlock precludes that 114 * possibility. Disabling interrupts to avoid incoming 115 * datagrams during a "rep out" and possibly landing up in 116 * this function. 117 */ 118 spin_lock_irqsave(&vmci_dev_spinlock, flags); 119 120 if (vmci_dev_g) { 121 iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR, 122 dg, VMCI_DG_SIZE(dg)); 123 result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR); 124 } else { 125 result = VMCI_ERROR_UNAVAILABLE; 126 } 127 128 spin_unlock_irqrestore(&vmci_dev_spinlock, flags); 129 130 return result; 131 } 132 EXPORT_SYMBOL_GPL(vmci_send_datagram); 133 134 /* 135 * Gets called with the new context id if updated or resumed. 136 * Context id. 137 */ 138 static void vmci_guest_cid_update(u32 sub_id, 139 const struct vmci_event_data *event_data, 140 void *client_data) 141 { 142 const struct vmci_event_payld_ctx *ev_payload = 143 vmci_event_data_const_payload(event_data); 144 145 if (sub_id != ctx_update_sub_id) { 146 pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id); 147 return; 148 } 149 150 if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) { 151 pr_devel("Invalid event data\n"); 152 return; 153 } 154 155 pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n", 156 vm_context_id, ev_payload->context_id, event_data->event); 157 158 vm_context_id = ev_payload->context_id; 159 } 160 161 /* 162 * Verify that the host supports the hypercalls we need. If it does not, 163 * try to find fallback hypercalls and use those instead. Returns 164 * true if required hypercalls (or fallback hypercalls) are 165 * supported by the host, false otherwise. 166 */ 167 static int vmci_check_host_caps(struct pci_dev *pdev) 168 { 169 bool result; 170 struct vmci_resource_query_msg *msg; 171 u32 msg_size = sizeof(struct vmci_resource_query_hdr) + 172 VMCI_UTIL_NUM_RESOURCES * sizeof(u32); 173 struct vmci_datagram *check_msg; 174 175 check_msg = kmalloc(msg_size, GFP_KERNEL); 176 if (!check_msg) { 177 dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); 178 return -ENOMEM; 179 } 180 181 check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 182 VMCI_RESOURCES_QUERY); 183 check_msg->src = VMCI_ANON_SRC_HANDLE; 184 check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE; 185 msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg); 186 187 msg->num_resources = VMCI_UTIL_NUM_RESOURCES; 188 msg->resources[0] = VMCI_GET_CONTEXT_ID; 189 190 /* Checks that hyper calls are supported */ 191 result = vmci_send_datagram(check_msg) == 0x01; 192 kfree(check_msg); 193 194 dev_dbg(&pdev->dev, "%s: Host capability check: %s\n", 195 __func__, result ? "PASSED" : "FAILED"); 196 197 /* We need the vector. There are no fallbacks. */ 198 return result ? 0 : -ENXIO; 199 } 200 201 /* 202 * Reads datagrams from the data in port and dispatches them. We 203 * always start reading datagrams into only the first page of the 204 * datagram buffer. If the datagrams don't fit into one page, we 205 * use the maximum datagram buffer size for the remainder of the 206 * invocation. This is a simple heuristic for not penalizing 207 * small datagrams. 208 * 209 * This function assumes that it has exclusive access to the data 210 * in port for the duration of the call. 211 */ 212 static void vmci_dispatch_dgs(unsigned long data) 213 { 214 struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data; 215 u8 *dg_in_buffer = vmci_dev->data_buffer; 216 struct vmci_datagram *dg; 217 size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE; 218 size_t current_dg_in_buffer_size = PAGE_SIZE; 219 size_t remaining_bytes; 220 221 BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE); 222 223 ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, 224 vmci_dev->data_buffer, current_dg_in_buffer_size); 225 dg = (struct vmci_datagram *)dg_in_buffer; 226 remaining_bytes = current_dg_in_buffer_size; 227 228 while (dg->dst.resource != VMCI_INVALID_ID || 229 remaining_bytes > PAGE_SIZE) { 230 unsigned dg_in_size; 231 232 /* 233 * When the input buffer spans multiple pages, a datagram can 234 * start on any page boundary in the buffer. 235 */ 236 if (dg->dst.resource == VMCI_INVALID_ID) { 237 dg = (struct vmci_datagram *)roundup( 238 (uintptr_t)dg + 1, PAGE_SIZE); 239 remaining_bytes = 240 (size_t)(dg_in_buffer + 241 current_dg_in_buffer_size - 242 (u8 *)dg); 243 continue; 244 } 245 246 dg_in_size = VMCI_DG_SIZE_ALIGNED(dg); 247 248 if (dg_in_size <= dg_in_buffer_size) { 249 int result; 250 251 /* 252 * If the remaining bytes in the datagram 253 * buffer doesn't contain the complete 254 * datagram, we first make sure we have enough 255 * room for it and then we read the reminder 256 * of the datagram and possibly any following 257 * datagrams. 258 */ 259 if (dg_in_size > remaining_bytes) { 260 if (remaining_bytes != 261 current_dg_in_buffer_size) { 262 263 /* 264 * We move the partial 265 * datagram to the front and 266 * read the reminder of the 267 * datagram and possibly 268 * following calls into the 269 * following bytes. 270 */ 271 memmove(dg_in_buffer, dg_in_buffer + 272 current_dg_in_buffer_size - 273 remaining_bytes, 274 remaining_bytes); 275 dg = (struct vmci_datagram *) 276 dg_in_buffer; 277 } 278 279 if (current_dg_in_buffer_size != 280 dg_in_buffer_size) 281 current_dg_in_buffer_size = 282 dg_in_buffer_size; 283 284 ioread8_rep(vmci_dev->iobase + 285 VMCI_DATA_IN_ADDR, 286 vmci_dev->data_buffer + 287 remaining_bytes, 288 current_dg_in_buffer_size - 289 remaining_bytes); 290 } 291 292 /* 293 * We special case event datagrams from the 294 * hypervisor. 295 */ 296 if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && 297 dg->dst.resource == VMCI_EVENT_HANDLER) { 298 result = vmci_event_dispatch(dg); 299 } else { 300 result = vmci_datagram_invoke_guest_handler(dg); 301 } 302 if (result < VMCI_SUCCESS) 303 dev_dbg(vmci_dev->dev, 304 "Datagram with resource (ID=0x%x) failed (err=%d)\n", 305 dg->dst.resource, result); 306 307 /* On to the next datagram. */ 308 dg = (struct vmci_datagram *)((u8 *)dg + 309 dg_in_size); 310 } else { 311 size_t bytes_to_skip; 312 313 /* 314 * Datagram doesn't fit in datagram buffer of maximal 315 * size. We drop it. 316 */ 317 dev_dbg(vmci_dev->dev, 318 "Failed to receive datagram (size=%u bytes)\n", 319 dg_in_size); 320 321 bytes_to_skip = dg_in_size - remaining_bytes; 322 if (current_dg_in_buffer_size != dg_in_buffer_size) 323 current_dg_in_buffer_size = dg_in_buffer_size; 324 325 for (;;) { 326 ioread8_rep(vmci_dev->iobase + 327 VMCI_DATA_IN_ADDR, 328 vmci_dev->data_buffer, 329 current_dg_in_buffer_size); 330 if (bytes_to_skip <= current_dg_in_buffer_size) 331 break; 332 333 bytes_to_skip -= current_dg_in_buffer_size; 334 } 335 dg = (struct vmci_datagram *)(dg_in_buffer + 336 bytes_to_skip); 337 } 338 339 remaining_bytes = 340 (size_t) (dg_in_buffer + current_dg_in_buffer_size - 341 (u8 *)dg); 342 343 if (remaining_bytes < VMCI_DG_HEADERSIZE) { 344 /* Get the next batch of datagrams. */ 345 346 ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, 347 vmci_dev->data_buffer, 348 current_dg_in_buffer_size); 349 dg = (struct vmci_datagram *)dg_in_buffer; 350 remaining_bytes = current_dg_in_buffer_size; 351 } 352 } 353 } 354 355 /* 356 * Scans the notification bitmap for raised flags, clears them 357 * and handles the notifications. 358 */ 359 static void vmci_process_bitmap(unsigned long data) 360 { 361 struct vmci_guest_device *dev = (struct vmci_guest_device *)data; 362 363 if (!dev->notification_bitmap) { 364 dev_dbg(dev->dev, "No bitmap present in %s\n", __func__); 365 return; 366 } 367 368 vmci_dbell_scan_notification_entries(dev->notification_bitmap); 369 } 370 371 /* 372 * Enable MSI-X. Try exclusive vectors first, then shared vectors. 373 */ 374 static int vmci_enable_msix(struct pci_dev *pdev, 375 struct vmci_guest_device *vmci_dev) 376 { 377 int i; 378 int result; 379 380 for (i = 0; i < VMCI_MAX_INTRS; ++i) { 381 vmci_dev->msix_entries[i].entry = i; 382 vmci_dev->msix_entries[i].vector = i; 383 } 384 385 result = pci_enable_msix_exact(pdev, 386 vmci_dev->msix_entries, VMCI_MAX_INTRS); 387 if (result == 0) 388 vmci_dev->exclusive_vectors = true; 389 else if (result == -ENOSPC) 390 result = pci_enable_msix_exact(pdev, vmci_dev->msix_entries, 1); 391 392 return result; 393 } 394 395 /* 396 * Interrupt handler for legacy or MSI interrupt, or for first MSI-X 397 * interrupt (vector VMCI_INTR_DATAGRAM). 398 */ 399 static irqreturn_t vmci_interrupt(int irq, void *_dev) 400 { 401 struct vmci_guest_device *dev = _dev; 402 403 /* 404 * If we are using MSI-X with exclusive vectors then we simply schedule 405 * the datagram tasklet, since we know the interrupt was meant for us. 406 * Otherwise we must read the ICR to determine what to do. 407 */ 408 409 if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) { 410 tasklet_schedule(&dev->datagram_tasklet); 411 } else { 412 unsigned int icr; 413 414 /* Acknowledge interrupt and determine what needs doing. */ 415 icr = ioread32(dev->iobase + VMCI_ICR_ADDR); 416 if (icr == 0 || icr == ~0) 417 return IRQ_NONE; 418 419 if (icr & VMCI_ICR_DATAGRAM) { 420 tasklet_schedule(&dev->datagram_tasklet); 421 icr &= ~VMCI_ICR_DATAGRAM; 422 } 423 424 if (icr & VMCI_ICR_NOTIFICATION) { 425 tasklet_schedule(&dev->bm_tasklet); 426 icr &= ~VMCI_ICR_NOTIFICATION; 427 } 428 429 if (icr != 0) 430 dev_warn(dev->dev, 431 "Ignoring unknown interrupt cause (%d)\n", 432 icr); 433 } 434 435 return IRQ_HANDLED; 436 } 437 438 /* 439 * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION, 440 * which is for the notification bitmap. Will only get called if we are 441 * using MSI-X with exclusive vectors. 442 */ 443 static irqreturn_t vmci_interrupt_bm(int irq, void *_dev) 444 { 445 struct vmci_guest_device *dev = _dev; 446 447 /* For MSI-X we can just assume it was meant for us. */ 448 tasklet_schedule(&dev->bm_tasklet); 449 450 return IRQ_HANDLED; 451 } 452 453 /* 454 * Most of the initialization at module load time is done here. 455 */ 456 static int vmci_guest_probe_device(struct pci_dev *pdev, 457 const struct pci_device_id *id) 458 { 459 struct vmci_guest_device *vmci_dev; 460 void __iomem *iobase; 461 unsigned int capabilities; 462 unsigned long cmd; 463 int vmci_err; 464 int error; 465 466 dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n"); 467 468 error = pcim_enable_device(pdev); 469 if (error) { 470 dev_err(&pdev->dev, 471 "Failed to enable VMCI device: %d\n", error); 472 return error; 473 } 474 475 error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME); 476 if (error) { 477 dev_err(&pdev->dev, "Failed to reserve/map IO regions\n"); 478 return error; 479 } 480 481 iobase = pcim_iomap_table(pdev)[0]; 482 483 dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n", 484 (unsigned long)iobase, pdev->irq); 485 486 vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL); 487 if (!vmci_dev) { 488 dev_err(&pdev->dev, 489 "Can't allocate memory for VMCI device\n"); 490 return -ENOMEM; 491 } 492 493 vmci_dev->dev = &pdev->dev; 494 vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; 495 vmci_dev->exclusive_vectors = false; 496 vmci_dev->iobase = iobase; 497 498 tasklet_init(&vmci_dev->datagram_tasklet, 499 vmci_dispatch_dgs, (unsigned long)vmci_dev); 500 tasklet_init(&vmci_dev->bm_tasklet, 501 vmci_process_bitmap, (unsigned long)vmci_dev); 502 503 vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE); 504 if (!vmci_dev->data_buffer) { 505 dev_err(&pdev->dev, 506 "Can't allocate memory for datagram buffer\n"); 507 return -ENOMEM; 508 } 509 510 pci_set_master(pdev); /* To enable queue_pair functionality. */ 511 512 /* 513 * Verify that the VMCI Device supports the capabilities that 514 * we need. If the device is missing capabilities that we would 515 * like to use, check for fallback capabilities and use those 516 * instead (so we can run a new VM on old hosts). Fail the load if 517 * a required capability is missing and there is no fallback. 518 * 519 * Right now, we need datagrams. There are no fallbacks. 520 */ 521 capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR); 522 if (!(capabilities & VMCI_CAPS_DATAGRAM)) { 523 dev_err(&pdev->dev, "Device does not support datagrams\n"); 524 error = -ENXIO; 525 goto err_free_data_buffer; 526 } 527 528 /* 529 * If the hardware supports notifications, we will use that as 530 * well. 531 */ 532 if (capabilities & VMCI_CAPS_NOTIFICATIONS) { 533 vmci_dev->notification_bitmap = dma_alloc_coherent( 534 &pdev->dev, PAGE_SIZE, &vmci_dev->notification_base, 535 GFP_KERNEL); 536 if (!vmci_dev->notification_bitmap) { 537 dev_warn(&pdev->dev, 538 "Unable to allocate notification bitmap\n"); 539 } else { 540 memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE); 541 capabilities |= VMCI_CAPS_NOTIFICATIONS; 542 } 543 } 544 545 dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities); 546 547 /* Let the host know which capabilities we intend to use. */ 548 iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR); 549 550 /* Set up global device so that we can start sending datagrams */ 551 spin_lock_irq(&vmci_dev_spinlock); 552 vmci_dev_g = vmci_dev; 553 vmci_pdev = pdev; 554 spin_unlock_irq(&vmci_dev_spinlock); 555 556 /* 557 * Register notification bitmap with device if that capability is 558 * used. 559 */ 560 if (capabilities & VMCI_CAPS_NOTIFICATIONS) { 561 unsigned long bitmap_ppn = 562 vmci_dev->notification_base >> PAGE_SHIFT; 563 if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { 564 dev_warn(&pdev->dev, 565 "VMCI device unable to register notification bitmap with PPN 0x%x\n", 566 (u32) bitmap_ppn); 567 error = -ENXIO; 568 goto err_remove_vmci_dev_g; 569 } 570 } 571 572 /* Check host capabilities. */ 573 error = vmci_check_host_caps(pdev); 574 if (error) 575 goto err_remove_bitmap; 576 577 /* Enable device. */ 578 579 /* 580 * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can 581 * update the internal context id when needed. 582 */ 583 vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE, 584 vmci_guest_cid_update, NULL, 585 &ctx_update_sub_id); 586 if (vmci_err < VMCI_SUCCESS) 587 dev_warn(&pdev->dev, 588 "Failed to subscribe to event (type=%d): %d\n", 589 VMCI_EVENT_CTX_ID_UPDATE, vmci_err); 590 591 /* 592 * Enable interrupts. Try MSI-X first, then MSI, and then fallback on 593 * legacy interrupts. 594 */ 595 if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) { 596 vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX; 597 vmci_dev->irq = vmci_dev->msix_entries[0].vector; 598 } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) { 599 vmci_dev->intr_type = VMCI_INTR_TYPE_MSI; 600 vmci_dev->irq = pdev->irq; 601 } else { 602 vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; 603 vmci_dev->irq = pdev->irq; 604 } 605 606 /* 607 * Request IRQ for legacy or MSI interrupts, or for first 608 * MSI-X vector. 609 */ 610 error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED, 611 KBUILD_MODNAME, vmci_dev); 612 if (error) { 613 dev_err(&pdev->dev, "Irq %u in use: %d\n", 614 vmci_dev->irq, error); 615 goto err_disable_msi; 616 } 617 618 /* 619 * For MSI-X with exclusive vectors we need to request an 620 * interrupt for each vector so that we get a separate 621 * interrupt handler routine. This allows us to distinguish 622 * between the vectors. 623 */ 624 if (vmci_dev->exclusive_vectors) { 625 error = request_irq(vmci_dev->msix_entries[1].vector, 626 vmci_interrupt_bm, 0, KBUILD_MODNAME, 627 vmci_dev); 628 if (error) { 629 dev_err(&pdev->dev, 630 "Failed to allocate irq %u: %d\n", 631 vmci_dev->msix_entries[1].vector, error); 632 goto err_free_irq; 633 } 634 } 635 636 dev_dbg(&pdev->dev, "Registered device\n"); 637 638 atomic_inc(&vmci_num_guest_devices); 639 640 /* Enable specific interrupt bits. */ 641 cmd = VMCI_IMR_DATAGRAM; 642 if (capabilities & VMCI_CAPS_NOTIFICATIONS) 643 cmd |= VMCI_IMR_NOTIFICATION; 644 iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR); 645 646 /* Enable interrupts. */ 647 iowrite32(VMCI_CONTROL_INT_ENABLE, 648 vmci_dev->iobase + VMCI_CONTROL_ADDR); 649 650 pci_set_drvdata(pdev, vmci_dev); 651 return 0; 652 653 err_free_irq: 654 free_irq(vmci_dev->irq, vmci_dev); 655 tasklet_kill(&vmci_dev->datagram_tasklet); 656 tasklet_kill(&vmci_dev->bm_tasklet); 657 658 err_disable_msi: 659 if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) 660 pci_disable_msix(pdev); 661 else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) 662 pci_disable_msi(pdev); 663 664 vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); 665 if (vmci_err < VMCI_SUCCESS) 666 dev_warn(&pdev->dev, 667 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", 668 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); 669 670 err_remove_bitmap: 671 if (vmci_dev->notification_bitmap) { 672 iowrite32(VMCI_CONTROL_RESET, 673 vmci_dev->iobase + VMCI_CONTROL_ADDR); 674 dma_free_coherent(&pdev->dev, PAGE_SIZE, 675 vmci_dev->notification_bitmap, 676 vmci_dev->notification_base); 677 } 678 679 err_remove_vmci_dev_g: 680 spin_lock_irq(&vmci_dev_spinlock); 681 vmci_pdev = NULL; 682 vmci_dev_g = NULL; 683 spin_unlock_irq(&vmci_dev_spinlock); 684 685 err_free_data_buffer: 686 vfree(vmci_dev->data_buffer); 687 688 /* The rest are managed resources and will be freed by PCI core */ 689 return error; 690 } 691 692 static void vmci_guest_remove_device(struct pci_dev *pdev) 693 { 694 struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev); 695 int vmci_err; 696 697 dev_dbg(&pdev->dev, "Removing device\n"); 698 699 atomic_dec(&vmci_num_guest_devices); 700 701 vmci_qp_guest_endpoints_exit(); 702 703 vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); 704 if (vmci_err < VMCI_SUCCESS) 705 dev_warn(&pdev->dev, 706 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", 707 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); 708 709 spin_lock_irq(&vmci_dev_spinlock); 710 vmci_dev_g = NULL; 711 vmci_pdev = NULL; 712 spin_unlock_irq(&vmci_dev_spinlock); 713 714 dev_dbg(&pdev->dev, "Resetting vmci device\n"); 715 iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR); 716 717 /* 718 * Free IRQ and then disable MSI/MSI-X as appropriate. For 719 * MSI-X, we might have multiple vectors, each with their own 720 * IRQ, which we must free too. 721 */ 722 free_irq(vmci_dev->irq, vmci_dev); 723 if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) { 724 if (vmci_dev->exclusive_vectors) 725 free_irq(vmci_dev->msix_entries[1].vector, vmci_dev); 726 pci_disable_msix(pdev); 727 } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) { 728 pci_disable_msi(pdev); 729 } 730 731 tasklet_kill(&vmci_dev->datagram_tasklet); 732 tasklet_kill(&vmci_dev->bm_tasklet); 733 734 if (vmci_dev->notification_bitmap) { 735 /* 736 * The device reset above cleared the bitmap state of the 737 * device, so we can safely free it here. 738 */ 739 740 dma_free_coherent(&pdev->dev, PAGE_SIZE, 741 vmci_dev->notification_bitmap, 742 vmci_dev->notification_base); 743 } 744 745 vfree(vmci_dev->data_buffer); 746 747 /* The rest are managed resources and will be freed by PCI core */ 748 } 749 750 static const struct pci_device_id vmci_ids[] = { 751 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), }, 752 { 0 }, 753 }; 754 MODULE_DEVICE_TABLE(pci, vmci_ids); 755 756 static struct pci_driver vmci_guest_driver = { 757 .name = KBUILD_MODNAME, 758 .id_table = vmci_ids, 759 .probe = vmci_guest_probe_device, 760 .remove = vmci_guest_remove_device, 761 }; 762 763 int __init vmci_guest_init(void) 764 { 765 return pci_register_driver(&vmci_guest_driver); 766 } 767 768 void __exit vmci_guest_exit(void) 769 { 770 pci_unregister_driver(&vmci_guest_driver); 771 } 772