1 /* 2 * VMware VMCI Driver 3 * 4 * Copyright (C) 2012 VMware, Inc. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation version 2 and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * for more details. 14 */ 15 16 #include <linux/vmw_vmci_defs.h> 17 #include <linux/vmw_vmci_api.h> 18 #include <linux/moduleparam.h> 19 #include <linux/interrupt.h> 20 #include <linux/highmem.h> 21 #include <linux/kernel.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/sched.h> 25 #include <linux/slab.h> 26 #include <linux/init.h> 27 #include <linux/pci.h> 28 #include <linux/smp.h> 29 #include <linux/io.h> 30 #include <linux/vmalloc.h> 31 32 #include "vmci_datagram.h" 33 #include "vmci_doorbell.h" 34 #include "vmci_context.h" 35 #include "vmci_driver.h" 36 #include "vmci_event.h" 37 38 #define PCI_DEVICE_ID_VMWARE_VMCI 0x0740 39 40 #define VMCI_UTIL_NUM_RESOURCES 1 41 42 static bool vmci_disable_msi; 43 module_param_named(disable_msi, vmci_disable_msi, bool, 0); 44 MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); 45 46 static bool vmci_disable_msix; 47 module_param_named(disable_msix, vmci_disable_msix, bool, 0); 48 MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); 49 50 static u32 ctx_update_sub_id = VMCI_INVALID_ID; 51 static u32 vm_context_id = VMCI_INVALID_ID; 52 53 struct vmci_guest_device { 54 struct device *dev; /* PCI device we are attached to */ 55 void __iomem *iobase; 56 57 bool exclusive_vectors; 58 59 struct tasklet_struct datagram_tasklet; 60 struct tasklet_struct bm_tasklet; 61 62 void *data_buffer; 63 void *notification_bitmap; 64 dma_addr_t notification_base; 65 }; 66 67 static bool use_ppn64; 68 69 bool vmci_use_ppn64(void) 70 { 71 return use_ppn64; 72 } 73 74 /* vmci_dev singleton device and supporting data*/ 75 struct pci_dev *vmci_pdev; 76 static struct vmci_guest_device *vmci_dev_g; 77 static DEFINE_SPINLOCK(vmci_dev_spinlock); 78 79 static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0); 80 81 bool vmci_guest_code_active(void) 82 { 83 return atomic_read(&vmci_num_guest_devices) != 0; 84 } 85 86 u32 vmci_get_vm_context_id(void) 87 { 88 if (vm_context_id == VMCI_INVALID_ID) { 89 struct vmci_datagram get_cid_msg; 90 get_cid_msg.dst = 91 vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 92 VMCI_GET_CONTEXT_ID); 93 get_cid_msg.src = VMCI_ANON_SRC_HANDLE; 94 get_cid_msg.payload_size = 0; 95 vm_context_id = vmci_send_datagram(&get_cid_msg); 96 } 97 return vm_context_id; 98 } 99 100 /* 101 * VM to hypervisor call mechanism. We use the standard VMware naming 102 * convention since shared code is calling this function as well. 103 */ 104 int vmci_send_datagram(struct vmci_datagram *dg) 105 { 106 unsigned long flags; 107 int result; 108 109 /* Check args. */ 110 if (dg == NULL) 111 return VMCI_ERROR_INVALID_ARGS; 112 113 /* 114 * Need to acquire spinlock on the device because the datagram 115 * data may be spread over multiple pages and the monitor may 116 * interleave device user rpc calls from multiple 117 * VCPUs. Acquiring the spinlock precludes that 118 * possibility. Disabling interrupts to avoid incoming 119 * datagrams during a "rep out" and possibly landing up in 120 * this function. 121 */ 122 spin_lock_irqsave(&vmci_dev_spinlock, flags); 123 124 if (vmci_dev_g) { 125 iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR, 126 dg, VMCI_DG_SIZE(dg)); 127 result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR); 128 } else { 129 result = VMCI_ERROR_UNAVAILABLE; 130 } 131 132 spin_unlock_irqrestore(&vmci_dev_spinlock, flags); 133 134 return result; 135 } 136 EXPORT_SYMBOL_GPL(vmci_send_datagram); 137 138 /* 139 * Gets called with the new context id if updated or resumed. 140 * Context id. 141 */ 142 static void vmci_guest_cid_update(u32 sub_id, 143 const struct vmci_event_data *event_data, 144 void *client_data) 145 { 146 const struct vmci_event_payld_ctx *ev_payload = 147 vmci_event_data_const_payload(event_data); 148 149 if (sub_id != ctx_update_sub_id) { 150 pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id); 151 return; 152 } 153 154 if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) { 155 pr_devel("Invalid event data\n"); 156 return; 157 } 158 159 pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n", 160 vm_context_id, ev_payload->context_id, event_data->event); 161 162 vm_context_id = ev_payload->context_id; 163 } 164 165 /* 166 * Verify that the host supports the hypercalls we need. If it does not, 167 * try to find fallback hypercalls and use those instead. Returns 168 * true if required hypercalls (or fallback hypercalls) are 169 * supported by the host, false otherwise. 170 */ 171 static int vmci_check_host_caps(struct pci_dev *pdev) 172 { 173 bool result; 174 struct vmci_resource_query_msg *msg; 175 u32 msg_size = sizeof(struct vmci_resource_query_hdr) + 176 VMCI_UTIL_NUM_RESOURCES * sizeof(u32); 177 struct vmci_datagram *check_msg; 178 179 check_msg = kmalloc(msg_size, GFP_KERNEL); 180 if (!check_msg) { 181 dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); 182 return -ENOMEM; 183 } 184 185 check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 186 VMCI_RESOURCES_QUERY); 187 check_msg->src = VMCI_ANON_SRC_HANDLE; 188 check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE; 189 msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg); 190 191 msg->num_resources = VMCI_UTIL_NUM_RESOURCES; 192 msg->resources[0] = VMCI_GET_CONTEXT_ID; 193 194 /* Checks that hyper calls are supported */ 195 result = vmci_send_datagram(check_msg) == 0x01; 196 kfree(check_msg); 197 198 dev_dbg(&pdev->dev, "%s: Host capability check: %s\n", 199 __func__, result ? "PASSED" : "FAILED"); 200 201 /* We need the vector. There are no fallbacks. */ 202 return result ? 0 : -ENXIO; 203 } 204 205 /* 206 * Reads datagrams from the data in port and dispatches them. We 207 * always start reading datagrams into only the first page of the 208 * datagram buffer. If the datagrams don't fit into one page, we 209 * use the maximum datagram buffer size for the remainder of the 210 * invocation. This is a simple heuristic for not penalizing 211 * small datagrams. 212 * 213 * This function assumes that it has exclusive access to the data 214 * in port for the duration of the call. 215 */ 216 static void vmci_dispatch_dgs(unsigned long data) 217 { 218 struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data; 219 u8 *dg_in_buffer = vmci_dev->data_buffer; 220 struct vmci_datagram *dg; 221 size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE; 222 size_t current_dg_in_buffer_size = PAGE_SIZE; 223 size_t remaining_bytes; 224 225 BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE); 226 227 ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, 228 vmci_dev->data_buffer, current_dg_in_buffer_size); 229 dg = (struct vmci_datagram *)dg_in_buffer; 230 remaining_bytes = current_dg_in_buffer_size; 231 232 while (dg->dst.resource != VMCI_INVALID_ID || 233 remaining_bytes > PAGE_SIZE) { 234 unsigned dg_in_size; 235 236 /* 237 * When the input buffer spans multiple pages, a datagram can 238 * start on any page boundary in the buffer. 239 */ 240 if (dg->dst.resource == VMCI_INVALID_ID) { 241 dg = (struct vmci_datagram *)roundup( 242 (uintptr_t)dg + 1, PAGE_SIZE); 243 remaining_bytes = 244 (size_t)(dg_in_buffer + 245 current_dg_in_buffer_size - 246 (u8 *)dg); 247 continue; 248 } 249 250 dg_in_size = VMCI_DG_SIZE_ALIGNED(dg); 251 252 if (dg_in_size <= dg_in_buffer_size) { 253 int result; 254 255 /* 256 * If the remaining bytes in the datagram 257 * buffer doesn't contain the complete 258 * datagram, we first make sure we have enough 259 * room for it and then we read the reminder 260 * of the datagram and possibly any following 261 * datagrams. 262 */ 263 if (dg_in_size > remaining_bytes) { 264 if (remaining_bytes != 265 current_dg_in_buffer_size) { 266 267 /* 268 * We move the partial 269 * datagram to the front and 270 * read the reminder of the 271 * datagram and possibly 272 * following calls into the 273 * following bytes. 274 */ 275 memmove(dg_in_buffer, dg_in_buffer + 276 current_dg_in_buffer_size - 277 remaining_bytes, 278 remaining_bytes); 279 dg = (struct vmci_datagram *) 280 dg_in_buffer; 281 } 282 283 if (current_dg_in_buffer_size != 284 dg_in_buffer_size) 285 current_dg_in_buffer_size = 286 dg_in_buffer_size; 287 288 ioread8_rep(vmci_dev->iobase + 289 VMCI_DATA_IN_ADDR, 290 vmci_dev->data_buffer + 291 remaining_bytes, 292 current_dg_in_buffer_size - 293 remaining_bytes); 294 } 295 296 /* 297 * We special case event datagrams from the 298 * hypervisor. 299 */ 300 if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && 301 dg->dst.resource == VMCI_EVENT_HANDLER) { 302 result = vmci_event_dispatch(dg); 303 } else { 304 result = vmci_datagram_invoke_guest_handler(dg); 305 } 306 if (result < VMCI_SUCCESS) 307 dev_dbg(vmci_dev->dev, 308 "Datagram with resource (ID=0x%x) failed (err=%d)\n", 309 dg->dst.resource, result); 310 311 /* On to the next datagram. */ 312 dg = (struct vmci_datagram *)((u8 *)dg + 313 dg_in_size); 314 } else { 315 size_t bytes_to_skip; 316 317 /* 318 * Datagram doesn't fit in datagram buffer of maximal 319 * size. We drop it. 320 */ 321 dev_dbg(vmci_dev->dev, 322 "Failed to receive datagram (size=%u bytes)\n", 323 dg_in_size); 324 325 bytes_to_skip = dg_in_size - remaining_bytes; 326 if (current_dg_in_buffer_size != dg_in_buffer_size) 327 current_dg_in_buffer_size = dg_in_buffer_size; 328 329 for (;;) { 330 ioread8_rep(vmci_dev->iobase + 331 VMCI_DATA_IN_ADDR, 332 vmci_dev->data_buffer, 333 current_dg_in_buffer_size); 334 if (bytes_to_skip <= current_dg_in_buffer_size) 335 break; 336 337 bytes_to_skip -= current_dg_in_buffer_size; 338 } 339 dg = (struct vmci_datagram *)(dg_in_buffer + 340 bytes_to_skip); 341 } 342 343 remaining_bytes = 344 (size_t) (dg_in_buffer + current_dg_in_buffer_size - 345 (u8 *)dg); 346 347 if (remaining_bytes < VMCI_DG_HEADERSIZE) { 348 /* Get the next batch of datagrams. */ 349 350 ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, 351 vmci_dev->data_buffer, 352 current_dg_in_buffer_size); 353 dg = (struct vmci_datagram *)dg_in_buffer; 354 remaining_bytes = current_dg_in_buffer_size; 355 } 356 } 357 } 358 359 /* 360 * Scans the notification bitmap for raised flags, clears them 361 * and handles the notifications. 362 */ 363 static void vmci_process_bitmap(unsigned long data) 364 { 365 struct vmci_guest_device *dev = (struct vmci_guest_device *)data; 366 367 if (!dev->notification_bitmap) { 368 dev_dbg(dev->dev, "No bitmap present in %s\n", __func__); 369 return; 370 } 371 372 vmci_dbell_scan_notification_entries(dev->notification_bitmap); 373 } 374 375 /* 376 * Interrupt handler for legacy or MSI interrupt, or for first MSI-X 377 * interrupt (vector VMCI_INTR_DATAGRAM). 378 */ 379 static irqreturn_t vmci_interrupt(int irq, void *_dev) 380 { 381 struct vmci_guest_device *dev = _dev; 382 383 /* 384 * If we are using MSI-X with exclusive vectors then we simply schedule 385 * the datagram tasklet, since we know the interrupt was meant for us. 386 * Otherwise we must read the ICR to determine what to do. 387 */ 388 389 if (dev->exclusive_vectors) { 390 tasklet_schedule(&dev->datagram_tasklet); 391 } else { 392 unsigned int icr; 393 394 /* Acknowledge interrupt and determine what needs doing. */ 395 icr = ioread32(dev->iobase + VMCI_ICR_ADDR); 396 if (icr == 0 || icr == ~0) 397 return IRQ_NONE; 398 399 if (icr & VMCI_ICR_DATAGRAM) { 400 tasklet_schedule(&dev->datagram_tasklet); 401 icr &= ~VMCI_ICR_DATAGRAM; 402 } 403 404 if (icr & VMCI_ICR_NOTIFICATION) { 405 tasklet_schedule(&dev->bm_tasklet); 406 icr &= ~VMCI_ICR_NOTIFICATION; 407 } 408 409 if (icr != 0) 410 dev_warn(dev->dev, 411 "Ignoring unknown interrupt cause (%d)\n", 412 icr); 413 } 414 415 return IRQ_HANDLED; 416 } 417 418 /* 419 * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION, 420 * which is for the notification bitmap. Will only get called if we are 421 * using MSI-X with exclusive vectors. 422 */ 423 static irqreturn_t vmci_interrupt_bm(int irq, void *_dev) 424 { 425 struct vmci_guest_device *dev = _dev; 426 427 /* For MSI-X we can just assume it was meant for us. */ 428 tasklet_schedule(&dev->bm_tasklet); 429 430 return IRQ_HANDLED; 431 } 432 433 /* 434 * Most of the initialization at module load time is done here. 435 */ 436 static int vmci_guest_probe_device(struct pci_dev *pdev, 437 const struct pci_device_id *id) 438 { 439 struct vmci_guest_device *vmci_dev; 440 void __iomem *iobase; 441 unsigned int capabilities; 442 unsigned int caps_in_use; 443 unsigned long cmd; 444 int vmci_err; 445 int error; 446 447 dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n"); 448 449 error = pcim_enable_device(pdev); 450 if (error) { 451 dev_err(&pdev->dev, 452 "Failed to enable VMCI device: %d\n", error); 453 return error; 454 } 455 456 error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME); 457 if (error) { 458 dev_err(&pdev->dev, "Failed to reserve/map IO regions\n"); 459 return error; 460 } 461 462 iobase = pcim_iomap_table(pdev)[0]; 463 464 dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n", 465 (unsigned long)iobase, pdev->irq); 466 467 vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL); 468 if (!vmci_dev) { 469 dev_err(&pdev->dev, 470 "Can't allocate memory for VMCI device\n"); 471 return -ENOMEM; 472 } 473 474 vmci_dev->dev = &pdev->dev; 475 vmci_dev->exclusive_vectors = false; 476 vmci_dev->iobase = iobase; 477 478 tasklet_init(&vmci_dev->datagram_tasklet, 479 vmci_dispatch_dgs, (unsigned long)vmci_dev); 480 tasklet_init(&vmci_dev->bm_tasklet, 481 vmci_process_bitmap, (unsigned long)vmci_dev); 482 483 vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE); 484 if (!vmci_dev->data_buffer) { 485 dev_err(&pdev->dev, 486 "Can't allocate memory for datagram buffer\n"); 487 return -ENOMEM; 488 } 489 490 pci_set_master(pdev); /* To enable queue_pair functionality. */ 491 492 /* 493 * Verify that the VMCI Device supports the capabilities that 494 * we need. If the device is missing capabilities that we would 495 * like to use, check for fallback capabilities and use those 496 * instead (so we can run a new VM on old hosts). Fail the load if 497 * a required capability is missing and there is no fallback. 498 * 499 * Right now, we need datagrams. There are no fallbacks. 500 */ 501 capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR); 502 if (!(capabilities & VMCI_CAPS_DATAGRAM)) { 503 dev_err(&pdev->dev, "Device does not support datagrams\n"); 504 error = -ENXIO; 505 goto err_free_data_buffer; 506 } 507 caps_in_use = VMCI_CAPS_DATAGRAM; 508 509 /* 510 * Use 64-bit PPNs if the device supports. 511 * 512 * There is no check for the return value of dma_set_mask_and_coherent 513 * since this driver can handle the default mask values if 514 * dma_set_mask_and_coherent fails. 515 */ 516 if (capabilities & VMCI_CAPS_PPN64) { 517 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 518 use_ppn64 = true; 519 caps_in_use |= VMCI_CAPS_PPN64; 520 } else { 521 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); 522 use_ppn64 = false; 523 } 524 525 /* 526 * If the hardware supports notifications, we will use that as 527 * well. 528 */ 529 if (capabilities & VMCI_CAPS_NOTIFICATIONS) { 530 vmci_dev->notification_bitmap = dma_alloc_coherent( 531 &pdev->dev, PAGE_SIZE, &vmci_dev->notification_base, 532 GFP_KERNEL); 533 if (!vmci_dev->notification_bitmap) { 534 dev_warn(&pdev->dev, 535 "Unable to allocate notification bitmap\n"); 536 } else { 537 memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE); 538 caps_in_use |= VMCI_CAPS_NOTIFICATIONS; 539 } 540 } 541 542 dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use); 543 544 /* Let the host know which capabilities we intend to use. */ 545 iowrite32(caps_in_use, vmci_dev->iobase + VMCI_CAPS_ADDR); 546 547 /* Set up global device so that we can start sending datagrams */ 548 spin_lock_irq(&vmci_dev_spinlock); 549 vmci_dev_g = vmci_dev; 550 vmci_pdev = pdev; 551 spin_unlock_irq(&vmci_dev_spinlock); 552 553 /* 554 * Register notification bitmap with device if that capability is 555 * used. 556 */ 557 if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) { 558 unsigned long bitmap_ppn = 559 vmci_dev->notification_base >> PAGE_SHIFT; 560 if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { 561 dev_warn(&pdev->dev, 562 "VMCI device unable to register notification bitmap with PPN 0x%lx\n", 563 bitmap_ppn); 564 error = -ENXIO; 565 goto err_remove_vmci_dev_g; 566 } 567 } 568 569 /* Check host capabilities. */ 570 error = vmci_check_host_caps(pdev); 571 if (error) 572 goto err_remove_bitmap; 573 574 /* Enable device. */ 575 576 /* 577 * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can 578 * update the internal context id when needed. 579 */ 580 vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE, 581 vmci_guest_cid_update, NULL, 582 &ctx_update_sub_id); 583 if (vmci_err < VMCI_SUCCESS) 584 dev_warn(&pdev->dev, 585 "Failed to subscribe to event (type=%d): %d\n", 586 VMCI_EVENT_CTX_ID_UPDATE, vmci_err); 587 588 /* 589 * Enable interrupts. Try MSI-X first, then MSI, and then fallback on 590 * legacy interrupts. 591 */ 592 error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, 593 PCI_IRQ_MSIX); 594 if (error < 0) { 595 error = pci_alloc_irq_vectors(pdev, 1, 1, 596 PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); 597 if (error < 0) 598 goto err_remove_bitmap; 599 } else { 600 vmci_dev->exclusive_vectors = true; 601 } 602 603 /* 604 * Request IRQ for legacy or MSI interrupts, or for first 605 * MSI-X vector. 606 */ 607 error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt, 608 IRQF_SHARED, KBUILD_MODNAME, vmci_dev); 609 if (error) { 610 dev_err(&pdev->dev, "Irq %u in use: %d\n", 611 pci_irq_vector(pdev, 0), error); 612 goto err_disable_msi; 613 } 614 615 /* 616 * For MSI-X with exclusive vectors we need to request an 617 * interrupt for each vector so that we get a separate 618 * interrupt handler routine. This allows us to distinguish 619 * between the vectors. 620 */ 621 if (vmci_dev->exclusive_vectors) { 622 error = request_irq(pci_irq_vector(pdev, 1), 623 vmci_interrupt_bm, 0, KBUILD_MODNAME, 624 vmci_dev); 625 if (error) { 626 dev_err(&pdev->dev, 627 "Failed to allocate irq %u: %d\n", 628 pci_irq_vector(pdev, 1), error); 629 goto err_free_irq; 630 } 631 } 632 633 dev_dbg(&pdev->dev, "Registered device\n"); 634 635 atomic_inc(&vmci_num_guest_devices); 636 637 /* Enable specific interrupt bits. */ 638 cmd = VMCI_IMR_DATAGRAM; 639 if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) 640 cmd |= VMCI_IMR_NOTIFICATION; 641 iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR); 642 643 /* Enable interrupts. */ 644 iowrite32(VMCI_CONTROL_INT_ENABLE, 645 vmci_dev->iobase + VMCI_CONTROL_ADDR); 646 647 pci_set_drvdata(pdev, vmci_dev); 648 return 0; 649 650 err_free_irq: 651 free_irq(pci_irq_vector(pdev, 0), vmci_dev); 652 tasklet_kill(&vmci_dev->datagram_tasklet); 653 tasklet_kill(&vmci_dev->bm_tasklet); 654 655 err_disable_msi: 656 pci_free_irq_vectors(pdev); 657 658 vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); 659 if (vmci_err < VMCI_SUCCESS) 660 dev_warn(&pdev->dev, 661 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", 662 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); 663 664 err_remove_bitmap: 665 if (vmci_dev->notification_bitmap) { 666 iowrite32(VMCI_CONTROL_RESET, 667 vmci_dev->iobase + VMCI_CONTROL_ADDR); 668 dma_free_coherent(&pdev->dev, PAGE_SIZE, 669 vmci_dev->notification_bitmap, 670 vmci_dev->notification_base); 671 } 672 673 err_remove_vmci_dev_g: 674 spin_lock_irq(&vmci_dev_spinlock); 675 vmci_pdev = NULL; 676 vmci_dev_g = NULL; 677 spin_unlock_irq(&vmci_dev_spinlock); 678 679 err_free_data_buffer: 680 vfree(vmci_dev->data_buffer); 681 682 /* The rest are managed resources and will be freed by PCI core */ 683 return error; 684 } 685 686 static void vmci_guest_remove_device(struct pci_dev *pdev) 687 { 688 struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev); 689 int vmci_err; 690 691 dev_dbg(&pdev->dev, "Removing device\n"); 692 693 atomic_dec(&vmci_num_guest_devices); 694 695 vmci_qp_guest_endpoints_exit(); 696 697 vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); 698 if (vmci_err < VMCI_SUCCESS) 699 dev_warn(&pdev->dev, 700 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", 701 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); 702 703 spin_lock_irq(&vmci_dev_spinlock); 704 vmci_dev_g = NULL; 705 vmci_pdev = NULL; 706 spin_unlock_irq(&vmci_dev_spinlock); 707 708 dev_dbg(&pdev->dev, "Resetting vmci device\n"); 709 iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR); 710 711 /* 712 * Free IRQ and then disable MSI/MSI-X as appropriate. For 713 * MSI-X, we might have multiple vectors, each with their own 714 * IRQ, which we must free too. 715 */ 716 if (vmci_dev->exclusive_vectors) 717 free_irq(pci_irq_vector(pdev, 1), vmci_dev); 718 free_irq(pci_irq_vector(pdev, 0), vmci_dev); 719 pci_free_irq_vectors(pdev); 720 721 tasklet_kill(&vmci_dev->datagram_tasklet); 722 tasklet_kill(&vmci_dev->bm_tasklet); 723 724 if (vmci_dev->notification_bitmap) { 725 /* 726 * The device reset above cleared the bitmap state of the 727 * device, so we can safely free it here. 728 */ 729 730 dma_free_coherent(&pdev->dev, PAGE_SIZE, 731 vmci_dev->notification_bitmap, 732 vmci_dev->notification_base); 733 } 734 735 vfree(vmci_dev->data_buffer); 736 737 /* The rest are managed resources and will be freed by PCI core */ 738 } 739 740 static const struct pci_device_id vmci_ids[] = { 741 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), }, 742 { 0 }, 743 }; 744 MODULE_DEVICE_TABLE(pci, vmci_ids); 745 746 static struct pci_driver vmci_guest_driver = { 747 .name = KBUILD_MODNAME, 748 .id_table = vmci_ids, 749 .probe = vmci_guest_probe_device, 750 .remove = vmci_guest_remove_device, 751 }; 752 753 int __init vmci_guest_init(void) 754 { 755 return pci_register_driver(&vmci_guest_driver); 756 } 757 758 void __exit vmci_guest_exit(void) 759 { 760 pci_unregister_driver(&vmci_guest_driver); 761 } 762