11f166439SGeorge Zhang /* 21f166439SGeorge Zhang * VMware VMCI Driver 31f166439SGeorge Zhang * 41f166439SGeorge Zhang * Copyright (C) 2012 VMware, Inc. All rights reserved. 51f166439SGeorge Zhang * 61f166439SGeorge Zhang * This program is free software; you can redistribute it and/or modify it 71f166439SGeorge Zhang * under the terms of the GNU General Public License as published by the 81f166439SGeorge Zhang * Free Software Foundation version 2 and no later version. 91f166439SGeorge Zhang * 101f166439SGeorge Zhang * This program is distributed in the hope that it will be useful, but 111f166439SGeorge Zhang * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 121f166439SGeorge Zhang * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 131f166439SGeorge Zhang * for more details. 141f166439SGeorge Zhang */ 151f166439SGeorge Zhang 161f166439SGeorge Zhang #include <linux/vmw_vmci_defs.h> 171f166439SGeorge Zhang #include <linux/vmw_vmci_api.h> 181f166439SGeorge Zhang #include <linux/moduleparam.h> 191f166439SGeorge Zhang #include <linux/interrupt.h> 201f166439SGeorge Zhang #include <linux/highmem.h> 211f166439SGeorge Zhang #include <linux/kernel.h> 22ea8a83a4SDmitry Torokhov #include <linux/mm.h> 231f166439SGeorge Zhang #include <linux/module.h> 241f166439SGeorge Zhang #include <linux/sched.h> 25ea8a83a4SDmitry Torokhov #include <linux/slab.h> 261f166439SGeorge Zhang #include <linux/init.h> 271f166439SGeorge Zhang #include <linux/pci.h> 281f166439SGeorge Zhang #include <linux/smp.h> 291f166439SGeorge Zhang #include <linux/io.h> 30ea8a83a4SDmitry Torokhov #include <linux/vmalloc.h> 311f166439SGeorge Zhang 321f166439SGeorge Zhang #include "vmci_datagram.h" 331f166439SGeorge Zhang #include "vmci_doorbell.h" 341f166439SGeorge Zhang #include "vmci_context.h" 351f166439SGeorge Zhang #include "vmci_driver.h" 361f166439SGeorge Zhang #include "vmci_event.h" 371f166439SGeorge Zhang 381f166439SGeorge Zhang #define PCI_VENDOR_ID_VMWARE 0x15AD 391f166439SGeorge Zhang #define PCI_DEVICE_ID_VMWARE_VMCI 0x0740 401f166439SGeorge Zhang 411f166439SGeorge Zhang #define VMCI_UTIL_NUM_RESOURCES 1 421f166439SGeorge Zhang 431f166439SGeorge Zhang static bool vmci_disable_msi; 441f166439SGeorge Zhang module_param_named(disable_msi, vmci_disable_msi, bool, 0); 451f166439SGeorge Zhang MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); 461f166439SGeorge Zhang 471f166439SGeorge Zhang static bool vmci_disable_msix; 481f166439SGeorge Zhang module_param_named(disable_msix, vmci_disable_msix, bool, 0); 491f166439SGeorge Zhang MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); 501f166439SGeorge Zhang 511f166439SGeorge Zhang static u32 ctx_update_sub_id = VMCI_INVALID_ID; 521f166439SGeorge Zhang static u32 vm_context_id = VMCI_INVALID_ID; 531f166439SGeorge Zhang 541f166439SGeorge Zhang struct vmci_guest_device { 551f166439SGeorge Zhang struct device *dev; /* PCI device we are attached to */ 561f166439SGeorge Zhang void __iomem *iobase; 571f166439SGeorge Zhang 581f166439SGeorge Zhang unsigned int irq; 591f166439SGeorge Zhang unsigned int intr_type; 601f166439SGeorge Zhang bool exclusive_vectors; 611f166439SGeorge Zhang struct msix_entry msix_entries[VMCI_MAX_INTRS]; 621f166439SGeorge Zhang 631f166439SGeorge Zhang struct tasklet_struct datagram_tasklet; 641f166439SGeorge Zhang struct tasklet_struct bm_tasklet; 651f166439SGeorge Zhang 661f166439SGeorge Zhang void *data_buffer; 671f166439SGeorge Zhang void *notification_bitmap; 681f166439SGeorge Zhang }; 691f166439SGeorge Zhang 701f166439SGeorge Zhang /* vmci_dev singleton device and supporting data*/ 711f166439SGeorge Zhang static struct vmci_guest_device *vmci_dev_g; 721f166439SGeorge Zhang static DEFINE_SPINLOCK(vmci_dev_spinlock); 731f166439SGeorge Zhang 741f166439SGeorge Zhang static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0); 751f166439SGeorge Zhang 761f166439SGeorge Zhang bool vmci_guest_code_active(void) 771f166439SGeorge Zhang { 781f166439SGeorge Zhang return atomic_read(&vmci_num_guest_devices) != 0; 791f166439SGeorge Zhang } 801f166439SGeorge Zhang 811f166439SGeorge Zhang u32 vmci_get_vm_context_id(void) 821f166439SGeorge Zhang { 831f166439SGeorge Zhang if (vm_context_id == VMCI_INVALID_ID) { 841f166439SGeorge Zhang struct vmci_datagram get_cid_msg; 851f166439SGeorge Zhang get_cid_msg.dst = 861f166439SGeorge Zhang vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 871f166439SGeorge Zhang VMCI_GET_CONTEXT_ID); 881f166439SGeorge Zhang get_cid_msg.src = VMCI_ANON_SRC_HANDLE; 891f166439SGeorge Zhang get_cid_msg.payload_size = 0; 905a19b789SAndy King vm_context_id = vmci_send_datagram(&get_cid_msg); 911f166439SGeorge Zhang } 921f166439SGeorge Zhang return vm_context_id; 931f166439SGeorge Zhang } 941f166439SGeorge Zhang 951f166439SGeorge Zhang /* 961f166439SGeorge Zhang * VM to hypervisor call mechanism. We use the standard VMware naming 971f166439SGeorge Zhang * convention since shared code is calling this function as well. 981f166439SGeorge Zhang */ 991f166439SGeorge Zhang int vmci_send_datagram(struct vmci_datagram *dg) 1001f166439SGeorge Zhang { 1011f166439SGeorge Zhang unsigned long flags; 1021f166439SGeorge Zhang int result; 1031f166439SGeorge Zhang 1041f166439SGeorge Zhang /* Check args. */ 1051f166439SGeorge Zhang if (dg == NULL) 1061f166439SGeorge Zhang return VMCI_ERROR_INVALID_ARGS; 1071f166439SGeorge Zhang 1081f166439SGeorge Zhang /* 1091f166439SGeorge Zhang * Need to acquire spinlock on the device because the datagram 1101f166439SGeorge Zhang * data may be spread over multiple pages and the monitor may 1111f166439SGeorge Zhang * interleave device user rpc calls from multiple 1121f166439SGeorge Zhang * VCPUs. Acquiring the spinlock precludes that 1131f166439SGeorge Zhang * possibility. Disabling interrupts to avoid incoming 1141f166439SGeorge Zhang * datagrams during a "rep out" and possibly landing up in 1151f166439SGeorge Zhang * this function. 1161f166439SGeorge Zhang */ 1171f166439SGeorge Zhang spin_lock_irqsave(&vmci_dev_spinlock, flags); 1181f166439SGeorge Zhang 1191f166439SGeorge Zhang if (vmci_dev_g) { 1201f166439SGeorge Zhang iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR, 1211f166439SGeorge Zhang dg, VMCI_DG_SIZE(dg)); 1221f166439SGeorge Zhang result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR); 1231f166439SGeorge Zhang } else { 1241f166439SGeorge Zhang result = VMCI_ERROR_UNAVAILABLE; 1251f166439SGeorge Zhang } 1261f166439SGeorge Zhang 1271f166439SGeorge Zhang spin_unlock_irqrestore(&vmci_dev_spinlock, flags); 1281f166439SGeorge Zhang 1291f166439SGeorge Zhang return result; 1301f166439SGeorge Zhang } 1311f166439SGeorge Zhang EXPORT_SYMBOL_GPL(vmci_send_datagram); 1321f166439SGeorge Zhang 1331f166439SGeorge Zhang /* 1341f166439SGeorge Zhang * Gets called with the new context id if updated or resumed. 1351f166439SGeorge Zhang * Context id. 1361f166439SGeorge Zhang */ 1371f166439SGeorge Zhang static void vmci_guest_cid_update(u32 sub_id, 1381f166439SGeorge Zhang const struct vmci_event_data *event_data, 1391f166439SGeorge Zhang void *client_data) 1401f166439SGeorge Zhang { 1411f166439SGeorge Zhang const struct vmci_event_payld_ctx *ev_payload = 1421f166439SGeorge Zhang vmci_event_data_const_payload(event_data); 1431f166439SGeorge Zhang 1441f166439SGeorge Zhang if (sub_id != ctx_update_sub_id) { 1451f166439SGeorge Zhang pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id); 1461f166439SGeorge Zhang return; 1471f166439SGeorge Zhang } 1481f166439SGeorge Zhang 1491f166439SGeorge Zhang if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) { 1501f166439SGeorge Zhang pr_devel("Invalid event data\n"); 1511f166439SGeorge Zhang return; 1521f166439SGeorge Zhang } 1531f166439SGeorge Zhang 1541f166439SGeorge Zhang pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n", 1551f166439SGeorge Zhang vm_context_id, ev_payload->context_id, event_data->event); 1561f166439SGeorge Zhang 1571f166439SGeorge Zhang vm_context_id = ev_payload->context_id; 1581f166439SGeorge Zhang } 1591f166439SGeorge Zhang 1601f166439SGeorge Zhang /* 1611f166439SGeorge Zhang * Verify that the host supports the hypercalls we need. If it does not, 1621f166439SGeorge Zhang * try to find fallback hypercalls and use those instead. Returns 1631f166439SGeorge Zhang * true if required hypercalls (or fallback hypercalls) are 1641f166439SGeorge Zhang * supported by the host, false otherwise. 1651f166439SGeorge Zhang */ 1661f166439SGeorge Zhang static bool vmci_check_host_caps(struct pci_dev *pdev) 1671f166439SGeorge Zhang { 1681f166439SGeorge Zhang bool result; 1691f166439SGeorge Zhang struct vmci_resource_query_msg *msg; 1701f166439SGeorge Zhang u32 msg_size = sizeof(struct vmci_resource_query_hdr) + 1711f166439SGeorge Zhang VMCI_UTIL_NUM_RESOURCES * sizeof(u32); 1721f166439SGeorge Zhang struct vmci_datagram *check_msg; 1731f166439SGeorge Zhang 1741f166439SGeorge Zhang check_msg = kmalloc(msg_size, GFP_KERNEL); 1751f166439SGeorge Zhang if (!check_msg) { 1761f166439SGeorge Zhang dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__); 1771f166439SGeorge Zhang return false; 1781f166439SGeorge Zhang } 1791f166439SGeorge Zhang 1801f166439SGeorge Zhang check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1811f166439SGeorge Zhang VMCI_RESOURCES_QUERY); 1821f166439SGeorge Zhang check_msg->src = VMCI_ANON_SRC_HANDLE; 1831f166439SGeorge Zhang check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE; 1841f166439SGeorge Zhang msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg); 1851f166439SGeorge Zhang 1861f166439SGeorge Zhang msg->num_resources = VMCI_UTIL_NUM_RESOURCES; 1871f166439SGeorge Zhang msg->resources[0] = VMCI_GET_CONTEXT_ID; 1881f166439SGeorge Zhang 1891f166439SGeorge Zhang /* Checks that hyper calls are supported */ 1901f166439SGeorge Zhang result = vmci_send_datagram(check_msg) == 0x01; 1911f166439SGeorge Zhang kfree(check_msg); 1921f166439SGeorge Zhang 1931f166439SGeorge Zhang dev_dbg(&pdev->dev, "%s: Host capability check: %s\n", 1941f166439SGeorge Zhang __func__, result ? "PASSED" : "FAILED"); 1951f166439SGeorge Zhang 1961f166439SGeorge Zhang /* We need the vector. There are no fallbacks. */ 1971f166439SGeorge Zhang return result; 1981f166439SGeorge Zhang } 1991f166439SGeorge Zhang 2001f166439SGeorge Zhang /* 2011f166439SGeorge Zhang * Reads datagrams from the data in port and dispatches them. We 2021f166439SGeorge Zhang * always start reading datagrams into only the first page of the 2031f166439SGeorge Zhang * datagram buffer. If the datagrams don't fit into one page, we 2041f166439SGeorge Zhang * use the maximum datagram buffer size for the remainder of the 2051f166439SGeorge Zhang * invocation. This is a simple heuristic for not penalizing 2061f166439SGeorge Zhang * small datagrams. 2071f166439SGeorge Zhang * 2081f166439SGeorge Zhang * This function assumes that it has exclusive access to the data 2091f166439SGeorge Zhang * in port for the duration of the call. 2101f166439SGeorge Zhang */ 2111f166439SGeorge Zhang static void vmci_dispatch_dgs(unsigned long data) 2121f166439SGeorge Zhang { 2131f166439SGeorge Zhang struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data; 2141f166439SGeorge Zhang u8 *dg_in_buffer = vmci_dev->data_buffer; 2151f166439SGeorge Zhang struct vmci_datagram *dg; 2161f166439SGeorge Zhang size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE; 2171f166439SGeorge Zhang size_t current_dg_in_buffer_size = PAGE_SIZE; 2181f166439SGeorge Zhang size_t remaining_bytes; 2191f166439SGeorge Zhang 2201f166439SGeorge Zhang BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE); 2211f166439SGeorge Zhang 2221f166439SGeorge Zhang ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, 2231f166439SGeorge Zhang vmci_dev->data_buffer, current_dg_in_buffer_size); 2241f166439SGeorge Zhang dg = (struct vmci_datagram *)dg_in_buffer; 2251f166439SGeorge Zhang remaining_bytes = current_dg_in_buffer_size; 2261f166439SGeorge Zhang 2271f166439SGeorge Zhang while (dg->dst.resource != VMCI_INVALID_ID || 2281f166439SGeorge Zhang remaining_bytes > PAGE_SIZE) { 2291f166439SGeorge Zhang unsigned dg_in_size; 2301f166439SGeorge Zhang 2311f166439SGeorge Zhang /* 2321f166439SGeorge Zhang * When the input buffer spans multiple pages, a datagram can 2331f166439SGeorge Zhang * start on any page boundary in the buffer. 2341f166439SGeorge Zhang */ 2351f166439SGeorge Zhang if (dg->dst.resource == VMCI_INVALID_ID) { 2361f166439SGeorge Zhang dg = (struct vmci_datagram *)roundup( 2371f166439SGeorge Zhang (uintptr_t)dg + 1, PAGE_SIZE); 2381f166439SGeorge Zhang remaining_bytes = 2391f166439SGeorge Zhang (size_t)(dg_in_buffer + 2401f166439SGeorge Zhang current_dg_in_buffer_size - 2411f166439SGeorge Zhang (u8 *)dg); 2421f166439SGeorge Zhang continue; 2431f166439SGeorge Zhang } 2441f166439SGeorge Zhang 2451f166439SGeorge Zhang dg_in_size = VMCI_DG_SIZE_ALIGNED(dg); 2461f166439SGeorge Zhang 2471f166439SGeorge Zhang if (dg_in_size <= dg_in_buffer_size) { 2481f166439SGeorge Zhang int result; 2491f166439SGeorge Zhang 2501f166439SGeorge Zhang /* 2511f166439SGeorge Zhang * If the remaining bytes in the datagram 2521f166439SGeorge Zhang * buffer doesn't contain the complete 2531f166439SGeorge Zhang * datagram, we first make sure we have enough 2541f166439SGeorge Zhang * room for it and then we read the reminder 2551f166439SGeorge Zhang * of the datagram and possibly any following 2561f166439SGeorge Zhang * datagrams. 2571f166439SGeorge Zhang */ 2581f166439SGeorge Zhang if (dg_in_size > remaining_bytes) { 2591f166439SGeorge Zhang if (remaining_bytes != 2601f166439SGeorge Zhang current_dg_in_buffer_size) { 2611f166439SGeorge Zhang 2621f166439SGeorge Zhang /* 2631f166439SGeorge Zhang * We move the partial 2641f166439SGeorge Zhang * datagram to the front and 2651f166439SGeorge Zhang * read the reminder of the 2661f166439SGeorge Zhang * datagram and possibly 2671f166439SGeorge Zhang * following calls into the 2681f166439SGeorge Zhang * following bytes. 2691f166439SGeorge Zhang */ 2701f166439SGeorge Zhang memmove(dg_in_buffer, dg_in_buffer + 2711f166439SGeorge Zhang current_dg_in_buffer_size - 2721f166439SGeorge Zhang remaining_bytes, 2731f166439SGeorge Zhang remaining_bytes); 2741f166439SGeorge Zhang dg = (struct vmci_datagram *) 2751f166439SGeorge Zhang dg_in_buffer; 2761f166439SGeorge Zhang } 2771f166439SGeorge Zhang 2781f166439SGeorge Zhang if (current_dg_in_buffer_size != 2791f166439SGeorge Zhang dg_in_buffer_size) 2801f166439SGeorge Zhang current_dg_in_buffer_size = 2811f166439SGeorge Zhang dg_in_buffer_size; 2821f166439SGeorge Zhang 2831f166439SGeorge Zhang ioread8_rep(vmci_dev->iobase + 2841f166439SGeorge Zhang VMCI_DATA_IN_ADDR, 2851f166439SGeorge Zhang vmci_dev->data_buffer + 2861f166439SGeorge Zhang remaining_bytes, 2871f166439SGeorge Zhang current_dg_in_buffer_size - 2881f166439SGeorge Zhang remaining_bytes); 2891f166439SGeorge Zhang } 2901f166439SGeorge Zhang 2911f166439SGeorge Zhang /* 2921f166439SGeorge Zhang * We special case event datagrams from the 2931f166439SGeorge Zhang * hypervisor. 2941f166439SGeorge Zhang */ 2951f166439SGeorge Zhang if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && 2961f166439SGeorge Zhang dg->dst.resource == VMCI_EVENT_HANDLER) { 2971f166439SGeorge Zhang result = vmci_event_dispatch(dg); 2981f166439SGeorge Zhang } else { 2991f166439SGeorge Zhang result = vmci_datagram_invoke_guest_handler(dg); 3001f166439SGeorge Zhang } 3011f166439SGeorge Zhang if (result < VMCI_SUCCESS) 3021f166439SGeorge Zhang dev_dbg(vmci_dev->dev, 3031f166439SGeorge Zhang "Datagram with resource (ID=0x%x) failed (err=%d)\n", 3041f166439SGeorge Zhang dg->dst.resource, result); 3051f166439SGeorge Zhang 3061f166439SGeorge Zhang /* On to the next datagram. */ 3071f166439SGeorge Zhang dg = (struct vmci_datagram *)((u8 *)dg + 3081f166439SGeorge Zhang dg_in_size); 3091f166439SGeorge Zhang } else { 3101f166439SGeorge Zhang size_t bytes_to_skip; 3111f166439SGeorge Zhang 3121f166439SGeorge Zhang /* 3131f166439SGeorge Zhang * Datagram doesn't fit in datagram buffer of maximal 3141f166439SGeorge Zhang * size. We drop it. 3151f166439SGeorge Zhang */ 3161f166439SGeorge Zhang dev_dbg(vmci_dev->dev, 3171f166439SGeorge Zhang "Failed to receive datagram (size=%u bytes)\n", 3181f166439SGeorge Zhang dg_in_size); 3191f166439SGeorge Zhang 3201f166439SGeorge Zhang bytes_to_skip = dg_in_size - remaining_bytes; 3211f166439SGeorge Zhang if (current_dg_in_buffer_size != dg_in_buffer_size) 3221f166439SGeorge Zhang current_dg_in_buffer_size = dg_in_buffer_size; 3231f166439SGeorge Zhang 3241f166439SGeorge Zhang for (;;) { 3251f166439SGeorge Zhang ioread8_rep(vmci_dev->iobase + 3261f166439SGeorge Zhang VMCI_DATA_IN_ADDR, 3271f166439SGeorge Zhang vmci_dev->data_buffer, 3281f166439SGeorge Zhang current_dg_in_buffer_size); 3291f166439SGeorge Zhang if (bytes_to_skip <= current_dg_in_buffer_size) 3301f166439SGeorge Zhang break; 3311f166439SGeorge Zhang 3321f166439SGeorge Zhang bytes_to_skip -= current_dg_in_buffer_size; 3331f166439SGeorge Zhang } 3341f166439SGeorge Zhang dg = (struct vmci_datagram *)(dg_in_buffer + 3351f166439SGeorge Zhang bytes_to_skip); 3361f166439SGeorge Zhang } 3371f166439SGeorge Zhang 3381f166439SGeorge Zhang remaining_bytes = 3391f166439SGeorge Zhang (size_t) (dg_in_buffer + current_dg_in_buffer_size - 3401f166439SGeorge Zhang (u8 *)dg); 3411f166439SGeorge Zhang 3421f166439SGeorge Zhang if (remaining_bytes < VMCI_DG_HEADERSIZE) { 3431f166439SGeorge Zhang /* Get the next batch of datagrams. */ 3441f166439SGeorge Zhang 3451f166439SGeorge Zhang ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR, 3461f166439SGeorge Zhang vmci_dev->data_buffer, 3471f166439SGeorge Zhang current_dg_in_buffer_size); 3481f166439SGeorge Zhang dg = (struct vmci_datagram *)dg_in_buffer; 3491f166439SGeorge Zhang remaining_bytes = current_dg_in_buffer_size; 3501f166439SGeorge Zhang } 3511f166439SGeorge Zhang } 3521f166439SGeorge Zhang } 3531f166439SGeorge Zhang 3541f166439SGeorge Zhang /* 3551f166439SGeorge Zhang * Scans the notification bitmap for raised flags, clears them 3561f166439SGeorge Zhang * and handles the notifications. 3571f166439SGeorge Zhang */ 3581f166439SGeorge Zhang static void vmci_process_bitmap(unsigned long data) 3591f166439SGeorge Zhang { 3601f166439SGeorge Zhang struct vmci_guest_device *dev = (struct vmci_guest_device *)data; 3611f166439SGeorge Zhang 3621f166439SGeorge Zhang if (!dev->notification_bitmap) { 3631f166439SGeorge Zhang dev_dbg(dev->dev, "No bitmap present in %s\n", __func__); 3641f166439SGeorge Zhang return; 3651f166439SGeorge Zhang } 3661f166439SGeorge Zhang 3671f166439SGeorge Zhang vmci_dbell_scan_notification_entries(dev->notification_bitmap); 3681f166439SGeorge Zhang } 3691f166439SGeorge Zhang 3701f166439SGeorge Zhang /* 3711f166439SGeorge Zhang * Enable MSI-X. Try exclusive vectors first, then shared vectors. 3721f166439SGeorge Zhang */ 3731f166439SGeorge Zhang static int vmci_enable_msix(struct pci_dev *pdev, 3741f166439SGeorge Zhang struct vmci_guest_device *vmci_dev) 3751f166439SGeorge Zhang { 3761f166439SGeorge Zhang int i; 3771f166439SGeorge Zhang int result; 3781f166439SGeorge Zhang 3791f166439SGeorge Zhang for (i = 0; i < VMCI_MAX_INTRS; ++i) { 3801f166439SGeorge Zhang vmci_dev->msix_entries[i].entry = i; 3811f166439SGeorge Zhang vmci_dev->msix_entries[i].vector = i; 3821f166439SGeorge Zhang } 3831f166439SGeorge Zhang 3841f166439SGeorge Zhang result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS); 3851f166439SGeorge Zhang if (result == 0) 3861f166439SGeorge Zhang vmci_dev->exclusive_vectors = true; 3871f166439SGeorge Zhang else if (result > 0) 3881f166439SGeorge Zhang result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1); 3891f166439SGeorge Zhang 3901f166439SGeorge Zhang return result; 3911f166439SGeorge Zhang } 3921f166439SGeorge Zhang 3931f166439SGeorge Zhang /* 3941f166439SGeorge Zhang * Interrupt handler for legacy or MSI interrupt, or for first MSI-X 3951f166439SGeorge Zhang * interrupt (vector VMCI_INTR_DATAGRAM). 3961f166439SGeorge Zhang */ 3971f166439SGeorge Zhang static irqreturn_t vmci_interrupt(int irq, void *_dev) 3981f166439SGeorge Zhang { 3991f166439SGeorge Zhang struct vmci_guest_device *dev = _dev; 4001f166439SGeorge Zhang 4011f166439SGeorge Zhang /* 4021f166439SGeorge Zhang * If we are using MSI-X with exclusive vectors then we simply schedule 4031f166439SGeorge Zhang * the datagram tasklet, since we know the interrupt was meant for us. 4041f166439SGeorge Zhang * Otherwise we must read the ICR to determine what to do. 4051f166439SGeorge Zhang */ 4061f166439SGeorge Zhang 4071f166439SGeorge Zhang if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) { 4081f166439SGeorge Zhang tasklet_schedule(&dev->datagram_tasklet); 4091f166439SGeorge Zhang } else { 4101f166439SGeorge Zhang unsigned int icr; 4111f166439SGeorge Zhang 4121f166439SGeorge Zhang /* Acknowledge interrupt and determine what needs doing. */ 4131f166439SGeorge Zhang icr = ioread32(dev->iobase + VMCI_ICR_ADDR); 4141f166439SGeorge Zhang if (icr == 0 || icr == ~0) 4151f166439SGeorge Zhang return IRQ_NONE; 4161f166439SGeorge Zhang 4171f166439SGeorge Zhang if (icr & VMCI_ICR_DATAGRAM) { 4181f166439SGeorge Zhang tasklet_schedule(&dev->datagram_tasklet); 4191f166439SGeorge Zhang icr &= ~VMCI_ICR_DATAGRAM; 4201f166439SGeorge Zhang } 4211f166439SGeorge Zhang 4221f166439SGeorge Zhang if (icr & VMCI_ICR_NOTIFICATION) { 4231f166439SGeorge Zhang tasklet_schedule(&dev->bm_tasklet); 4241f166439SGeorge Zhang icr &= ~VMCI_ICR_NOTIFICATION; 4251f166439SGeorge Zhang } 4261f166439SGeorge Zhang 4271f166439SGeorge Zhang if (icr != 0) 4281f166439SGeorge Zhang dev_warn(dev->dev, 4291f166439SGeorge Zhang "Ignoring unknown interrupt cause (%d)\n", 4301f166439SGeorge Zhang icr); 4311f166439SGeorge Zhang } 4321f166439SGeorge Zhang 4331f166439SGeorge Zhang return IRQ_HANDLED; 4341f166439SGeorge Zhang } 4351f166439SGeorge Zhang 4361f166439SGeorge Zhang /* 4371f166439SGeorge Zhang * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION, 4381f166439SGeorge Zhang * which is for the notification bitmap. Will only get called if we are 4391f166439SGeorge Zhang * using MSI-X with exclusive vectors. 4401f166439SGeorge Zhang */ 4411f166439SGeorge Zhang static irqreturn_t vmci_interrupt_bm(int irq, void *_dev) 4421f166439SGeorge Zhang { 4431f166439SGeorge Zhang struct vmci_guest_device *dev = _dev; 4441f166439SGeorge Zhang 4451f166439SGeorge Zhang /* For MSI-X we can just assume it was meant for us. */ 4461f166439SGeorge Zhang tasklet_schedule(&dev->bm_tasklet); 4471f166439SGeorge Zhang 4481f166439SGeorge Zhang return IRQ_HANDLED; 4491f166439SGeorge Zhang } 4501f166439SGeorge Zhang 4511f166439SGeorge Zhang /* 4521f166439SGeorge Zhang * Most of the initialization at module load time is done here. 4531f166439SGeorge Zhang */ 4541f166439SGeorge Zhang static int vmci_guest_probe_device(struct pci_dev *pdev, 4551f166439SGeorge Zhang const struct pci_device_id *id) 4561f166439SGeorge Zhang { 4571f166439SGeorge Zhang struct vmci_guest_device *vmci_dev; 4581f166439SGeorge Zhang void __iomem *iobase; 4591f166439SGeorge Zhang unsigned int capabilities; 4601f166439SGeorge Zhang unsigned long cmd; 4611f166439SGeorge Zhang int vmci_err; 4621f166439SGeorge Zhang int error; 4631f166439SGeorge Zhang 4641f166439SGeorge Zhang dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n"); 4651f166439SGeorge Zhang 4661f166439SGeorge Zhang error = pcim_enable_device(pdev); 4671f166439SGeorge Zhang if (error) { 4681f166439SGeorge Zhang dev_err(&pdev->dev, 4691f166439SGeorge Zhang "Failed to enable VMCI device: %d\n", error); 4701f166439SGeorge Zhang return error; 4711f166439SGeorge Zhang } 4721f166439SGeorge Zhang 4731f166439SGeorge Zhang error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME); 4741f166439SGeorge Zhang if (error) { 4751f166439SGeorge Zhang dev_err(&pdev->dev, "Failed to reserve/map IO regions\n"); 4761f166439SGeorge Zhang return error; 4771f166439SGeorge Zhang } 4781f166439SGeorge Zhang 4791f166439SGeorge Zhang iobase = pcim_iomap_table(pdev)[0]; 4801f166439SGeorge Zhang 4811f166439SGeorge Zhang dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n", 4821f166439SGeorge Zhang (unsigned long)iobase, pdev->irq); 4831f166439SGeorge Zhang 4841f166439SGeorge Zhang vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL); 4851f166439SGeorge Zhang if (!vmci_dev) { 4861f166439SGeorge Zhang dev_err(&pdev->dev, 4871f166439SGeorge Zhang "Can't allocate memory for VMCI device\n"); 4881f166439SGeorge Zhang return -ENOMEM; 4891f166439SGeorge Zhang } 4901f166439SGeorge Zhang 4911f166439SGeorge Zhang vmci_dev->dev = &pdev->dev; 4921f166439SGeorge Zhang vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; 4931f166439SGeorge Zhang vmci_dev->exclusive_vectors = false; 4941f166439SGeorge Zhang vmci_dev->iobase = iobase; 4951f166439SGeorge Zhang 4961f166439SGeorge Zhang tasklet_init(&vmci_dev->datagram_tasklet, 4971f166439SGeorge Zhang vmci_dispatch_dgs, (unsigned long)vmci_dev); 4981f166439SGeorge Zhang tasklet_init(&vmci_dev->bm_tasklet, 4991f166439SGeorge Zhang vmci_process_bitmap, (unsigned long)vmci_dev); 5001f166439SGeorge Zhang 5011f166439SGeorge Zhang vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE); 5021f166439SGeorge Zhang if (!vmci_dev->data_buffer) { 5031f166439SGeorge Zhang dev_err(&pdev->dev, 5041f166439SGeorge Zhang "Can't allocate memory for datagram buffer\n"); 5051f166439SGeorge Zhang return -ENOMEM; 5061f166439SGeorge Zhang } 5071f166439SGeorge Zhang 5081f166439SGeorge Zhang pci_set_master(pdev); /* To enable queue_pair functionality. */ 5091f166439SGeorge Zhang 5101f166439SGeorge Zhang /* 5111f166439SGeorge Zhang * Verify that the VMCI Device supports the capabilities that 5121f166439SGeorge Zhang * we need. If the device is missing capabilities that we would 5131f166439SGeorge Zhang * like to use, check for fallback capabilities and use those 5141f166439SGeorge Zhang * instead (so we can run a new VM on old hosts). Fail the load if 5151f166439SGeorge Zhang * a required capability is missing and there is no fallback. 5161f166439SGeorge Zhang * 5171f166439SGeorge Zhang * Right now, we need datagrams. There are no fallbacks. 5181f166439SGeorge Zhang */ 5191f166439SGeorge Zhang capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR); 5201f166439SGeorge Zhang if (!(capabilities & VMCI_CAPS_DATAGRAM)) { 5211f166439SGeorge Zhang dev_err(&pdev->dev, "Device does not support datagrams\n"); 5221f166439SGeorge Zhang error = -ENXIO; 5231f166439SGeorge Zhang goto err_free_data_buffer; 5241f166439SGeorge Zhang } 5251f166439SGeorge Zhang 5261f166439SGeorge Zhang /* 5271f166439SGeorge Zhang * If the hardware supports notifications, we will use that as 5281f166439SGeorge Zhang * well. 5291f166439SGeorge Zhang */ 5301f166439SGeorge Zhang if (capabilities & VMCI_CAPS_NOTIFICATIONS) { 5311f166439SGeorge Zhang vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE); 5321f166439SGeorge Zhang if (!vmci_dev->notification_bitmap) { 5331f166439SGeorge Zhang dev_warn(&pdev->dev, 5341f166439SGeorge Zhang "Unable to allocate notification bitmap\n"); 5351f166439SGeorge Zhang } else { 5361f166439SGeorge Zhang memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE); 5371f166439SGeorge Zhang capabilities |= VMCI_CAPS_NOTIFICATIONS; 5381f166439SGeorge Zhang } 5391f166439SGeorge Zhang } 5401f166439SGeorge Zhang 5411f166439SGeorge Zhang dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities); 5421f166439SGeorge Zhang 5431f166439SGeorge Zhang /* Let the host know which capabilities we intend to use. */ 5441f166439SGeorge Zhang iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR); 5451f166439SGeorge Zhang 5461f166439SGeorge Zhang /* Set up global device so that we can start sending datagrams */ 5471f166439SGeorge Zhang spin_lock_irq(&vmci_dev_spinlock); 5481f166439SGeorge Zhang vmci_dev_g = vmci_dev; 5491f166439SGeorge Zhang spin_unlock_irq(&vmci_dev_spinlock); 5501f166439SGeorge Zhang 5511f166439SGeorge Zhang /* 5521f166439SGeorge Zhang * Register notification bitmap with device if that capability is 5531f166439SGeorge Zhang * used. 5541f166439SGeorge Zhang */ 5551f166439SGeorge Zhang if (capabilities & VMCI_CAPS_NOTIFICATIONS) { 5561f166439SGeorge Zhang struct page *page = 5571f166439SGeorge Zhang vmalloc_to_page(vmci_dev->notification_bitmap); 5581f166439SGeorge Zhang unsigned long bitmap_ppn = page_to_pfn(page); 5591f166439SGeorge Zhang if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { 5601f166439SGeorge Zhang dev_warn(&pdev->dev, 5611f166439SGeorge Zhang "VMCI device unable to register notification bitmap with PPN 0x%x\n", 5621f166439SGeorge Zhang (u32) bitmap_ppn); 5631f166439SGeorge Zhang goto err_remove_vmci_dev_g; 5641f166439SGeorge Zhang } 5651f166439SGeorge Zhang } 5661f166439SGeorge Zhang 5671f166439SGeorge Zhang /* Check host capabilities. */ 5681f166439SGeorge Zhang if (!vmci_check_host_caps(pdev)) 5691f166439SGeorge Zhang goto err_remove_bitmap; 5701f166439SGeorge Zhang 5711f166439SGeorge Zhang /* Enable device. */ 5721f166439SGeorge Zhang 5731f166439SGeorge Zhang /* 5741f166439SGeorge Zhang * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can 5751f166439SGeorge Zhang * update the internal context id when needed. 5761f166439SGeorge Zhang */ 5771f166439SGeorge Zhang vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE, 5781f166439SGeorge Zhang vmci_guest_cid_update, NULL, 5791f166439SGeorge Zhang &ctx_update_sub_id); 5801f166439SGeorge Zhang if (vmci_err < VMCI_SUCCESS) 5811f166439SGeorge Zhang dev_warn(&pdev->dev, 5821f166439SGeorge Zhang "Failed to subscribe to event (type=%d): %d\n", 5831f166439SGeorge Zhang VMCI_EVENT_CTX_ID_UPDATE, vmci_err); 5841f166439SGeorge Zhang 5851f166439SGeorge Zhang /* 5861f166439SGeorge Zhang * Enable interrupts. Try MSI-X first, then MSI, and then fallback on 5871f166439SGeorge Zhang * legacy interrupts. 5881f166439SGeorge Zhang */ 5891f166439SGeorge Zhang if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) { 5901f166439SGeorge Zhang vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX; 5911f166439SGeorge Zhang vmci_dev->irq = vmci_dev->msix_entries[0].vector; 5921f166439SGeorge Zhang } else if (!vmci_disable_msi && !pci_enable_msi(pdev)) { 5931f166439SGeorge Zhang vmci_dev->intr_type = VMCI_INTR_TYPE_MSI; 5941f166439SGeorge Zhang vmci_dev->irq = pdev->irq; 5951f166439SGeorge Zhang } else { 5961f166439SGeorge Zhang vmci_dev->intr_type = VMCI_INTR_TYPE_INTX; 5971f166439SGeorge Zhang vmci_dev->irq = pdev->irq; 5981f166439SGeorge Zhang } 5991f166439SGeorge Zhang 6001f166439SGeorge Zhang /* 6011f166439SGeorge Zhang * Request IRQ for legacy or MSI interrupts, or for first 6021f166439SGeorge Zhang * MSI-X vector. 6031f166439SGeorge Zhang */ 6041f166439SGeorge Zhang error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED, 6051f166439SGeorge Zhang KBUILD_MODNAME, vmci_dev); 6061f166439SGeorge Zhang if (error) { 6071f166439SGeorge Zhang dev_err(&pdev->dev, "Irq %u in use: %d\n", 6081f166439SGeorge Zhang vmci_dev->irq, error); 6091f166439SGeorge Zhang goto err_disable_msi; 6101f166439SGeorge Zhang } 6111f166439SGeorge Zhang 6121f166439SGeorge Zhang /* 6131f166439SGeorge Zhang * For MSI-X with exclusive vectors we need to request an 6141f166439SGeorge Zhang * interrupt for each vector so that we get a separate 6151f166439SGeorge Zhang * interrupt handler routine. This allows us to distinguish 6161f166439SGeorge Zhang * between the vectors. 6171f166439SGeorge Zhang */ 6181f166439SGeorge Zhang if (vmci_dev->exclusive_vectors) { 6191f166439SGeorge Zhang error = request_irq(vmci_dev->msix_entries[1].vector, 6201f166439SGeorge Zhang vmci_interrupt_bm, 0, KBUILD_MODNAME, 6211f166439SGeorge Zhang vmci_dev); 6221f166439SGeorge Zhang if (error) { 6231f166439SGeorge Zhang dev_err(&pdev->dev, 6241f166439SGeorge Zhang "Failed to allocate irq %u: %d\n", 6251f166439SGeorge Zhang vmci_dev->msix_entries[1].vector, error); 6261f166439SGeorge Zhang goto err_free_irq; 6271f166439SGeorge Zhang } 6281f166439SGeorge Zhang } 6291f166439SGeorge Zhang 6301f166439SGeorge Zhang dev_dbg(&pdev->dev, "Registered device\n"); 6311f166439SGeorge Zhang 6321f166439SGeorge Zhang atomic_inc(&vmci_num_guest_devices); 6331f166439SGeorge Zhang 6341f166439SGeorge Zhang /* Enable specific interrupt bits. */ 6351f166439SGeorge Zhang cmd = VMCI_IMR_DATAGRAM; 6361f166439SGeorge Zhang if (capabilities & VMCI_CAPS_NOTIFICATIONS) 6371f166439SGeorge Zhang cmd |= VMCI_IMR_NOTIFICATION; 6381f166439SGeorge Zhang iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR); 6391f166439SGeorge Zhang 6401f166439SGeorge Zhang /* Enable interrupts. */ 6411f166439SGeorge Zhang iowrite32(VMCI_CONTROL_INT_ENABLE, 6421f166439SGeorge Zhang vmci_dev->iobase + VMCI_CONTROL_ADDR); 6431f166439SGeorge Zhang 6441f166439SGeorge Zhang pci_set_drvdata(pdev, vmci_dev); 6451f166439SGeorge Zhang return 0; 6461f166439SGeorge Zhang 6471f166439SGeorge Zhang err_free_irq: 6481f166439SGeorge Zhang free_irq(vmci_dev->irq, &vmci_dev); 6491f166439SGeorge Zhang tasklet_kill(&vmci_dev->datagram_tasklet); 6501f166439SGeorge Zhang tasklet_kill(&vmci_dev->bm_tasklet); 6511f166439SGeorge Zhang 6521f166439SGeorge Zhang err_disable_msi: 6531f166439SGeorge Zhang if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) 6541f166439SGeorge Zhang pci_disable_msix(pdev); 6551f166439SGeorge Zhang else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) 6561f166439SGeorge Zhang pci_disable_msi(pdev); 6571f166439SGeorge Zhang 6581f166439SGeorge Zhang vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); 6591f166439SGeorge Zhang if (vmci_err < VMCI_SUCCESS) 6601f166439SGeorge Zhang dev_warn(&pdev->dev, 6611f166439SGeorge Zhang "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", 6621f166439SGeorge Zhang VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); 6631f166439SGeorge Zhang 6641f166439SGeorge Zhang err_remove_bitmap: 6651f166439SGeorge Zhang if (vmci_dev->notification_bitmap) { 6661f166439SGeorge Zhang iowrite32(VMCI_CONTROL_RESET, 6671f166439SGeorge Zhang vmci_dev->iobase + VMCI_CONTROL_ADDR); 6681f166439SGeorge Zhang vfree(vmci_dev->notification_bitmap); 6691f166439SGeorge Zhang } 6701f166439SGeorge Zhang 6711f166439SGeorge Zhang err_remove_vmci_dev_g: 6721f166439SGeorge Zhang spin_lock_irq(&vmci_dev_spinlock); 6731f166439SGeorge Zhang vmci_dev_g = NULL; 6741f166439SGeorge Zhang spin_unlock_irq(&vmci_dev_spinlock); 6751f166439SGeorge Zhang 6761f166439SGeorge Zhang err_free_data_buffer: 6771f166439SGeorge Zhang vfree(vmci_dev->data_buffer); 6781f166439SGeorge Zhang 6791f166439SGeorge Zhang /* The rest are managed resources and will be freed by PCI core */ 6801f166439SGeorge Zhang return error; 6811f166439SGeorge Zhang } 6821f166439SGeorge Zhang 6831f166439SGeorge Zhang static void vmci_guest_remove_device(struct pci_dev *pdev) 6841f166439SGeorge Zhang { 6851f166439SGeorge Zhang struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev); 6861f166439SGeorge Zhang int vmci_err; 6871f166439SGeorge Zhang 6881f166439SGeorge Zhang dev_dbg(&pdev->dev, "Removing device\n"); 6891f166439SGeorge Zhang 6901f166439SGeorge Zhang atomic_dec(&vmci_num_guest_devices); 6911f166439SGeorge Zhang 6921f166439SGeorge Zhang vmci_qp_guest_endpoints_exit(); 6931f166439SGeorge Zhang 6941f166439SGeorge Zhang vmci_err = vmci_event_unsubscribe(ctx_update_sub_id); 6951f166439SGeorge Zhang if (vmci_err < VMCI_SUCCESS) 6961f166439SGeorge Zhang dev_warn(&pdev->dev, 6971f166439SGeorge Zhang "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n", 6981f166439SGeorge Zhang VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err); 6991f166439SGeorge Zhang 7001f166439SGeorge Zhang spin_lock_irq(&vmci_dev_spinlock); 7011f166439SGeorge Zhang vmci_dev_g = NULL; 7021f166439SGeorge Zhang spin_unlock_irq(&vmci_dev_spinlock); 7031f166439SGeorge Zhang 7041f166439SGeorge Zhang dev_dbg(&pdev->dev, "Resetting vmci device\n"); 7051f166439SGeorge Zhang iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR); 7061f166439SGeorge Zhang 7071f166439SGeorge Zhang /* 7081f166439SGeorge Zhang * Free IRQ and then disable MSI/MSI-X as appropriate. For 7091f166439SGeorge Zhang * MSI-X, we might have multiple vectors, each with their own 7101f166439SGeorge Zhang * IRQ, which we must free too. 7111f166439SGeorge Zhang */ 7121f166439SGeorge Zhang free_irq(vmci_dev->irq, vmci_dev); 7131f166439SGeorge Zhang if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) { 7141f166439SGeorge Zhang if (vmci_dev->exclusive_vectors) 7151f166439SGeorge Zhang free_irq(vmci_dev->msix_entries[1].vector, vmci_dev); 7161f166439SGeorge Zhang pci_disable_msix(pdev); 7171f166439SGeorge Zhang } else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) { 7181f166439SGeorge Zhang pci_disable_msi(pdev); 7191f166439SGeorge Zhang } 7201f166439SGeorge Zhang 7211f166439SGeorge Zhang tasklet_kill(&vmci_dev->datagram_tasklet); 7221f166439SGeorge Zhang tasklet_kill(&vmci_dev->bm_tasklet); 7231f166439SGeorge Zhang 7241f166439SGeorge Zhang if (vmci_dev->notification_bitmap) { 7251f166439SGeorge Zhang /* 7261f166439SGeorge Zhang * The device reset above cleared the bitmap state of the 7271f166439SGeorge Zhang * device, so we can safely free it here. 7281f166439SGeorge Zhang */ 7291f166439SGeorge Zhang 7301f166439SGeorge Zhang vfree(vmci_dev->notification_bitmap); 7311f166439SGeorge Zhang } 7321f166439SGeorge Zhang 7331f166439SGeorge Zhang vfree(vmci_dev->data_buffer); 7341f166439SGeorge Zhang 7351f166439SGeorge Zhang /* The rest are managed resources and will be freed by PCI core */ 7361f166439SGeorge Zhang } 7371f166439SGeorge Zhang 7381f166439SGeorge Zhang static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = { 7391f166439SGeorge Zhang { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), }, 7401f166439SGeorge Zhang { 0 }, 7411f166439SGeorge Zhang }; 7421f166439SGeorge Zhang MODULE_DEVICE_TABLE(pci, vmci_ids); 7431f166439SGeorge Zhang 7441f166439SGeorge Zhang static struct pci_driver vmci_guest_driver = { 7451f166439SGeorge Zhang .name = KBUILD_MODNAME, 7461f166439SGeorge Zhang .id_table = vmci_ids, 7471f166439SGeorge Zhang .probe = vmci_guest_probe_device, 7481f166439SGeorge Zhang .remove = vmci_guest_remove_device, 7491f166439SGeorge Zhang }; 7501f166439SGeorge Zhang 7511f166439SGeorge Zhang int __init vmci_guest_init(void) 7521f166439SGeorge Zhang { 7531f166439SGeorge Zhang return pci_register_driver(&vmci_guest_driver); 7541f166439SGeorge Zhang } 7551f166439SGeorge Zhang 7561f166439SGeorge Zhang void __exit vmci_guest_exit(void) 7571f166439SGeorge Zhang { 7581f166439SGeorge Zhang pci_unregister_driver(&vmci_guest_driver); 7591f166439SGeorge Zhang } 760