18bf50399SGeorge Zhang /* 28bf50399SGeorge Zhang * VMware VMCI Driver 38bf50399SGeorge Zhang * 48bf50399SGeorge Zhang * Copyright (C) 2012 VMware, Inc. All rights reserved. 58bf50399SGeorge Zhang * 68bf50399SGeorge Zhang * This program is free software; you can redistribute it and/or modify it 78bf50399SGeorge Zhang * under the terms of the GNU General Public License as published by the 88bf50399SGeorge Zhang * Free Software Foundation version 2 and no later version. 98bf50399SGeorge Zhang * 108bf50399SGeorge Zhang * This program is distributed in the hope that it will be useful, but 118bf50399SGeorge Zhang * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 128bf50399SGeorge Zhang * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 138bf50399SGeorge Zhang * for more details. 148bf50399SGeorge Zhang */ 158bf50399SGeorge Zhang 168bf50399SGeorge Zhang #include <linux/vmw_vmci_defs.h> 178bf50399SGeorge Zhang #include <linux/vmw_vmci_api.h> 188bf50399SGeorge Zhang #include <linux/moduleparam.h> 198bf50399SGeorge Zhang #include <linux/miscdevice.h> 208bf50399SGeorge Zhang #include <linux/interrupt.h> 218bf50399SGeorge Zhang #include <linux/highmem.h> 228bf50399SGeorge Zhang #include <linux/atomic.h> 238bf50399SGeorge Zhang #include <linux/kernel.h> 248bf50399SGeorge Zhang #include <linux/module.h> 258bf50399SGeorge Zhang #include <linux/mutex.h> 268bf50399SGeorge Zhang #include <linux/sched.h> 278bf50399SGeorge Zhang #include <linux/file.h> 288bf50399SGeorge Zhang #include <linux/init.h> 298bf50399SGeorge Zhang #include <linux/poll.h> 308bf50399SGeorge Zhang #include <linux/pci.h> 318bf50399SGeorge Zhang #include <linux/smp.h> 328bf50399SGeorge Zhang #include <linux/fs.h> 338bf50399SGeorge Zhang #include <linux/io.h> 348bf50399SGeorge Zhang 358bf50399SGeorge Zhang #include "vmci_handle_array.h" 368bf50399SGeorge Zhang #include "vmci_queue_pair.h" 378bf50399SGeorge Zhang #include "vmci_datagram.h" 388bf50399SGeorge Zhang #include "vmci_doorbell.h" 398bf50399SGeorge Zhang #include "vmci_resource.h" 408bf50399SGeorge Zhang #include "vmci_context.h" 418bf50399SGeorge Zhang #include "vmci_driver.h" 428bf50399SGeorge Zhang #include "vmci_event.h" 438bf50399SGeorge Zhang 448bf50399SGeorge Zhang #define VMCI_UTIL_NUM_RESOURCES 1 458bf50399SGeorge Zhang 468bf50399SGeorge Zhang enum { 478bf50399SGeorge Zhang VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0, 488bf50399SGeorge Zhang VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1, 498bf50399SGeorge Zhang }; 508bf50399SGeorge Zhang 518bf50399SGeorge Zhang enum { 528bf50399SGeorge Zhang VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0, 538bf50399SGeorge Zhang VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1, 548bf50399SGeorge Zhang VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2, 558bf50399SGeorge Zhang }; 568bf50399SGeorge Zhang 578bf50399SGeorge Zhang /* 588bf50399SGeorge Zhang * VMCI driver initialization. This block can also be used to 598bf50399SGeorge Zhang * pass initial group membership etc. 608bf50399SGeorge Zhang */ 618bf50399SGeorge Zhang struct vmci_init_blk { 628bf50399SGeorge Zhang u32 cid; 638bf50399SGeorge Zhang u32 flags; 648bf50399SGeorge Zhang }; 658bf50399SGeorge Zhang 668bf50399SGeorge Zhang /* VMCIqueue_pairAllocInfo_VMToVM */ 678bf50399SGeorge Zhang struct vmci_qp_alloc_info_vmvm { 688bf50399SGeorge Zhang struct vmci_handle handle; 698bf50399SGeorge Zhang u32 peer; 708bf50399SGeorge Zhang u32 flags; 718bf50399SGeorge Zhang u64 produce_size; 728bf50399SGeorge Zhang u64 consume_size; 738bf50399SGeorge Zhang u64 produce_page_file; /* User VA. */ 748bf50399SGeorge Zhang u64 consume_page_file; /* User VA. */ 758bf50399SGeorge Zhang u64 produce_page_file_size; /* Size of the file name array. */ 768bf50399SGeorge Zhang u64 consume_page_file_size; /* Size of the file name array. */ 778bf50399SGeorge Zhang s32 result; 788bf50399SGeorge Zhang u32 _pad; 798bf50399SGeorge Zhang }; 808bf50399SGeorge Zhang 818bf50399SGeorge Zhang /* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */ 828bf50399SGeorge Zhang struct vmci_set_notify_info { 838bf50399SGeorge Zhang u64 notify_uva; 848bf50399SGeorge Zhang s32 result; 858bf50399SGeorge Zhang u32 _pad; 868bf50399SGeorge Zhang }; 878bf50399SGeorge Zhang 888bf50399SGeorge Zhang /* 898bf50399SGeorge Zhang * Per-instance host state 908bf50399SGeorge Zhang */ 918bf50399SGeorge Zhang struct vmci_host_dev { 928bf50399SGeorge Zhang struct vmci_ctx *context; 938bf50399SGeorge Zhang int user_version; 948bf50399SGeorge Zhang enum vmci_obj_type ct_type; 958bf50399SGeorge Zhang struct mutex lock; /* Mutex lock for vmci context access */ 968bf50399SGeorge Zhang }; 978bf50399SGeorge Zhang 988bf50399SGeorge Zhang static struct vmci_ctx *host_context; 998bf50399SGeorge Zhang static bool vmci_host_device_initialized; 1008bf50399SGeorge Zhang static atomic_t vmci_host_active_users = ATOMIC_INIT(0); 1018bf50399SGeorge Zhang 1028bf50399SGeorge Zhang /* 1038bf50399SGeorge Zhang * Determines whether the VMCI host personality is 1048bf50399SGeorge Zhang * available. Since the core functionality of the host driver is 1058bf50399SGeorge Zhang * always present, all guests could possibly use the host 1068bf50399SGeorge Zhang * personality. However, to minimize the deviation from the 1078bf50399SGeorge Zhang * pre-unified driver state of affairs, we only consider the host 1088bf50399SGeorge Zhang * device active if there is no active guest device or if there 1098bf50399SGeorge Zhang * are VMX'en with active VMCI contexts using the host device. 1108bf50399SGeorge Zhang */ 1118bf50399SGeorge Zhang bool vmci_host_code_active(void) 1128bf50399SGeorge Zhang { 1138bf50399SGeorge Zhang return vmci_host_device_initialized && 1148bf50399SGeorge Zhang (!vmci_guest_code_active() || 1158bf50399SGeorge Zhang atomic_read(&vmci_host_active_users) > 0); 1168bf50399SGeorge Zhang } 1178bf50399SGeorge Zhang 1188bf50399SGeorge Zhang /* 1198bf50399SGeorge Zhang * Called on open of /dev/vmci. 1208bf50399SGeorge Zhang */ 1218bf50399SGeorge Zhang static int vmci_host_open(struct inode *inode, struct file *filp) 1228bf50399SGeorge Zhang { 1238bf50399SGeorge Zhang struct vmci_host_dev *vmci_host_dev; 1248bf50399SGeorge Zhang 1258bf50399SGeorge Zhang vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL); 1268bf50399SGeorge Zhang if (vmci_host_dev == NULL) 1278bf50399SGeorge Zhang return -ENOMEM; 1288bf50399SGeorge Zhang 1298bf50399SGeorge Zhang vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; 1308bf50399SGeorge Zhang mutex_init(&vmci_host_dev->lock); 1318bf50399SGeorge Zhang filp->private_data = vmci_host_dev; 1328bf50399SGeorge Zhang 1338bf50399SGeorge Zhang return 0; 1348bf50399SGeorge Zhang } 1358bf50399SGeorge Zhang 1368bf50399SGeorge Zhang /* 1378bf50399SGeorge Zhang * Called on close of /dev/vmci, most often when the process 1388bf50399SGeorge Zhang * exits. 1398bf50399SGeorge Zhang */ 1408bf50399SGeorge Zhang static int vmci_host_close(struct inode *inode, struct file *filp) 1418bf50399SGeorge Zhang { 1428bf50399SGeorge Zhang struct vmci_host_dev *vmci_host_dev = filp->private_data; 1438bf50399SGeorge Zhang 1448bf50399SGeorge Zhang if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { 1458bf50399SGeorge Zhang vmci_ctx_destroy(vmci_host_dev->context); 1468bf50399SGeorge Zhang vmci_host_dev->context = NULL; 1478bf50399SGeorge Zhang 1488bf50399SGeorge Zhang /* 1498bf50399SGeorge Zhang * The number of active contexts is used to track whether any 1508bf50399SGeorge Zhang * VMX'en are using the host personality. It is incremented when 1518bf50399SGeorge Zhang * a context is created through the IOCTL_VMCI_INIT_CONTEXT 1528bf50399SGeorge Zhang * ioctl. 1538bf50399SGeorge Zhang */ 1548bf50399SGeorge Zhang atomic_dec(&vmci_host_active_users); 1558bf50399SGeorge Zhang } 1568bf50399SGeorge Zhang vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; 1578bf50399SGeorge Zhang 1588bf50399SGeorge Zhang kfree(vmci_host_dev); 1598bf50399SGeorge Zhang filp->private_data = NULL; 1608bf50399SGeorge Zhang return 0; 1618bf50399SGeorge Zhang } 1628bf50399SGeorge Zhang 1638bf50399SGeorge Zhang /* 1648bf50399SGeorge Zhang * This is used to wake up the VMX when a VMCI call arrives, or 1658bf50399SGeorge Zhang * to wake up select() or poll() at the next clock tick. 1668bf50399SGeorge Zhang */ 1678bf50399SGeorge Zhang static unsigned int vmci_host_poll(struct file *filp, poll_table *wait) 1688bf50399SGeorge Zhang { 1698bf50399SGeorge Zhang struct vmci_host_dev *vmci_host_dev = filp->private_data; 1708bf50399SGeorge Zhang struct vmci_ctx *context = vmci_host_dev->context; 1718bf50399SGeorge Zhang unsigned int mask = 0; 1728bf50399SGeorge Zhang 1738bf50399SGeorge Zhang if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { 1748bf50399SGeorge Zhang /* Check for VMCI calls to this VM context. */ 1758bf50399SGeorge Zhang if (wait) 1768bf50399SGeorge Zhang poll_wait(filp, &context->host_context.wait_queue, 1778bf50399SGeorge Zhang wait); 1788bf50399SGeorge Zhang 1798bf50399SGeorge Zhang spin_lock(&context->lock); 1808bf50399SGeorge Zhang if (context->pending_datagrams > 0 || 1818bf50399SGeorge Zhang vmci_handle_arr_get_size( 1828bf50399SGeorge Zhang context->pending_doorbell_array) > 0) { 1838bf50399SGeorge Zhang mask = POLLIN; 1848bf50399SGeorge Zhang } 1858bf50399SGeorge Zhang spin_unlock(&context->lock); 1868bf50399SGeorge Zhang } 1878bf50399SGeorge Zhang return mask; 1888bf50399SGeorge Zhang } 1898bf50399SGeorge Zhang 1908bf50399SGeorge Zhang /* 1918bf50399SGeorge Zhang * Copies the handles of a handle array into a user buffer, and 1928bf50399SGeorge Zhang * returns the new length in userBufferSize. If the copy to the 1938bf50399SGeorge Zhang * user buffer fails, the functions still returns VMCI_SUCCESS, 1948bf50399SGeorge Zhang * but retval != 0. 1958bf50399SGeorge Zhang */ 1968bf50399SGeorge Zhang static int drv_cp_harray_to_user(void __user *user_buf_uva, 1978bf50399SGeorge Zhang u64 *user_buf_size, 1988bf50399SGeorge Zhang struct vmci_handle_arr *handle_array, 1998bf50399SGeorge Zhang int *retval) 2008bf50399SGeorge Zhang { 2018bf50399SGeorge Zhang u32 array_size = 0; 2028bf50399SGeorge Zhang struct vmci_handle *handles; 2038bf50399SGeorge Zhang 2048bf50399SGeorge Zhang if (handle_array) 2058bf50399SGeorge Zhang array_size = vmci_handle_arr_get_size(handle_array); 2068bf50399SGeorge Zhang 2078bf50399SGeorge Zhang if (array_size * sizeof(*handles) > *user_buf_size) 2088bf50399SGeorge Zhang return VMCI_ERROR_MORE_DATA; 2098bf50399SGeorge Zhang 2108bf50399SGeorge Zhang *user_buf_size = array_size * sizeof(*handles); 2118bf50399SGeorge Zhang if (*user_buf_size) 2128bf50399SGeorge Zhang *retval = copy_to_user(user_buf_uva, 2138bf50399SGeorge Zhang vmci_handle_arr_get_handles 2148bf50399SGeorge Zhang (handle_array), *user_buf_size); 2158bf50399SGeorge Zhang 2168bf50399SGeorge Zhang return VMCI_SUCCESS; 2178bf50399SGeorge Zhang } 2188bf50399SGeorge Zhang 2198bf50399SGeorge Zhang /* 2208bf50399SGeorge Zhang * Sets up a given context for notify to work. Calls drv_map_bool_ptr() 2218bf50399SGeorge Zhang * which maps the notify boolean in user VA in kernel space. 2228bf50399SGeorge Zhang */ 2238bf50399SGeorge Zhang static int vmci_host_setup_notify(struct vmci_ctx *context, 2248bf50399SGeorge Zhang unsigned long uva) 2258bf50399SGeorge Zhang { 2268bf50399SGeorge Zhang struct page *page; 2278bf50399SGeorge Zhang int retval; 2288bf50399SGeorge Zhang 2298bf50399SGeorge Zhang if (context->notify_page) { 2308bf50399SGeorge Zhang pr_devel("%s: Notify mechanism is already set up\n", __func__); 2318bf50399SGeorge Zhang return VMCI_ERROR_DUPLICATE_ENTRY; 2328bf50399SGeorge Zhang } 2338bf50399SGeorge Zhang 2348bf50399SGeorge Zhang /* 2358bf50399SGeorge Zhang * We are using 'bool' internally, but let's make sure we explicit 2368bf50399SGeorge Zhang * about the size. 2378bf50399SGeorge Zhang */ 2388bf50399SGeorge Zhang BUILD_BUG_ON(sizeof(bool) != sizeof(u8)); 2398bf50399SGeorge Zhang if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8))) 2408bf50399SGeorge Zhang return VMCI_ERROR_GENERIC; 2418bf50399SGeorge Zhang 2428bf50399SGeorge Zhang /* 2438bf50399SGeorge Zhang * Lock physical page backing a given user VA. 2448bf50399SGeorge Zhang */ 2458bf50399SGeorge Zhang down_read(¤t->mm->mmap_sem); 2468bf50399SGeorge Zhang retval = get_user_pages(current, current->mm, 2478bf50399SGeorge Zhang PAGE_ALIGN(uva), 2488bf50399SGeorge Zhang 1, 1, 0, &page, NULL); 2498bf50399SGeorge Zhang up_read(¤t->mm->mmap_sem); 2508bf50399SGeorge Zhang if (retval != 1) 2518bf50399SGeorge Zhang return VMCI_ERROR_GENERIC; 2528bf50399SGeorge Zhang 2538bf50399SGeorge Zhang /* 2548bf50399SGeorge Zhang * Map the locked page and set up notify pointer. 2558bf50399SGeorge Zhang */ 2568bf50399SGeorge Zhang context->notify = kmap(page) + (uva & (PAGE_SIZE - 1)); 2578bf50399SGeorge Zhang vmci_ctx_check_signal_notify(context); 2588bf50399SGeorge Zhang 2598bf50399SGeorge Zhang return VMCI_SUCCESS; 2608bf50399SGeorge Zhang } 2618bf50399SGeorge Zhang 2628bf50399SGeorge Zhang static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev, 2638bf50399SGeorge Zhang unsigned int cmd, void __user *uptr) 2648bf50399SGeorge Zhang { 2658bf50399SGeorge Zhang if (cmd == IOCTL_VMCI_VERSION2) { 2668bf50399SGeorge Zhang int __user *vptr = uptr; 2678bf50399SGeorge Zhang if (get_user(vmci_host_dev->user_version, vptr)) 2688bf50399SGeorge Zhang return -EFAULT; 2698bf50399SGeorge Zhang } 2708bf50399SGeorge Zhang 2718bf50399SGeorge Zhang /* 2728bf50399SGeorge Zhang * The basic logic here is: 2738bf50399SGeorge Zhang * 2748bf50399SGeorge Zhang * If the user sends in a version of 0 tell it our version. 2758bf50399SGeorge Zhang * If the user didn't send in a version, tell it our version. 2768bf50399SGeorge Zhang * If the user sent in an old version, tell it -its- version. 2778bf50399SGeorge Zhang * If the user sent in an newer version, tell it our version. 2788bf50399SGeorge Zhang * 2798bf50399SGeorge Zhang * The rationale behind telling the caller its version is that 2808bf50399SGeorge Zhang * Workstation 6.5 required that VMX and VMCI kernel module were 2818bf50399SGeorge Zhang * version sync'd. All new VMX users will be programmed to 2828bf50399SGeorge Zhang * handle the VMCI kernel module version. 2838bf50399SGeorge Zhang */ 2848bf50399SGeorge Zhang 2858bf50399SGeorge Zhang if (vmci_host_dev->user_version > 0 && 2868bf50399SGeorge Zhang vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) { 2878bf50399SGeorge Zhang return vmci_host_dev->user_version; 2888bf50399SGeorge Zhang } 2898bf50399SGeorge Zhang 2908bf50399SGeorge Zhang return VMCI_VERSION; 2918bf50399SGeorge Zhang } 2928bf50399SGeorge Zhang 2938bf50399SGeorge Zhang #define vmci_ioctl_err(fmt, ...) \ 2948bf50399SGeorge Zhang pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__) 2958bf50399SGeorge Zhang 2968bf50399SGeorge Zhang static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev, 2978bf50399SGeorge Zhang const char *ioctl_name, 2988bf50399SGeorge Zhang void __user *uptr) 2998bf50399SGeorge Zhang { 3008bf50399SGeorge Zhang struct vmci_init_blk init_block; 3018bf50399SGeorge Zhang const struct cred *cred; 3028bf50399SGeorge Zhang int retval; 3038bf50399SGeorge Zhang 3048bf50399SGeorge Zhang if (copy_from_user(&init_block, uptr, sizeof(init_block))) { 3058bf50399SGeorge Zhang vmci_ioctl_err("error reading init block\n"); 3068bf50399SGeorge Zhang return -EFAULT; 3078bf50399SGeorge Zhang } 3088bf50399SGeorge Zhang 3098bf50399SGeorge Zhang mutex_lock(&vmci_host_dev->lock); 3108bf50399SGeorge Zhang 3118bf50399SGeorge Zhang if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) { 3128bf50399SGeorge Zhang vmci_ioctl_err("received VMCI init on initialized handle\n"); 3138bf50399SGeorge Zhang retval = -EINVAL; 3148bf50399SGeorge Zhang goto out; 3158bf50399SGeorge Zhang } 3168bf50399SGeorge Zhang 3178bf50399SGeorge Zhang if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) { 3188bf50399SGeorge Zhang vmci_ioctl_err("unsupported VMCI restriction flag\n"); 3198bf50399SGeorge Zhang retval = -EINVAL; 3208bf50399SGeorge Zhang goto out; 3218bf50399SGeorge Zhang } 3228bf50399SGeorge Zhang 3238bf50399SGeorge Zhang cred = get_current_cred(); 3248bf50399SGeorge Zhang vmci_host_dev->context = vmci_ctx_create(init_block.cid, 3258bf50399SGeorge Zhang init_block.flags, 0, 3268bf50399SGeorge Zhang vmci_host_dev->user_version, 3278bf50399SGeorge Zhang cred); 3288bf50399SGeorge Zhang put_cred(cred); 3298bf50399SGeorge Zhang if (IS_ERR(vmci_host_dev->context)) { 3308bf50399SGeorge Zhang retval = PTR_ERR(vmci_host_dev->context); 3318bf50399SGeorge Zhang vmci_ioctl_err("error initializing context\n"); 3328bf50399SGeorge Zhang goto out; 3338bf50399SGeorge Zhang } 3348bf50399SGeorge Zhang 3358bf50399SGeorge Zhang /* 3368bf50399SGeorge Zhang * Copy cid to userlevel, we do this to allow the VMX 3378bf50399SGeorge Zhang * to enforce its policy on cid generation. 3388bf50399SGeorge Zhang */ 3398bf50399SGeorge Zhang init_block.cid = vmci_ctx_get_id(vmci_host_dev->context); 3408bf50399SGeorge Zhang if (copy_to_user(uptr, &init_block, sizeof(init_block))) { 3418bf50399SGeorge Zhang vmci_ctx_destroy(vmci_host_dev->context); 3428bf50399SGeorge Zhang vmci_host_dev->context = NULL; 3438bf50399SGeorge Zhang vmci_ioctl_err("error writing init block\n"); 3448bf50399SGeorge Zhang retval = -EFAULT; 3458bf50399SGeorge Zhang goto out; 3468bf50399SGeorge Zhang } 3478bf50399SGeorge Zhang 3488bf50399SGeorge Zhang vmci_host_dev->ct_type = VMCIOBJ_CONTEXT; 3498bf50399SGeorge Zhang atomic_inc(&vmci_host_active_users); 3508bf50399SGeorge Zhang 3518bf50399SGeorge Zhang retval = 0; 3528bf50399SGeorge Zhang 3538bf50399SGeorge Zhang out: 3548bf50399SGeorge Zhang mutex_unlock(&vmci_host_dev->lock); 3558bf50399SGeorge Zhang return retval; 3568bf50399SGeorge Zhang } 3578bf50399SGeorge Zhang 3588bf50399SGeorge Zhang static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev, 3598bf50399SGeorge Zhang const char *ioctl_name, 3608bf50399SGeorge Zhang void __user *uptr) 3618bf50399SGeorge Zhang { 3628bf50399SGeorge Zhang struct vmci_datagram_snd_rcv_info send_info; 3638bf50399SGeorge Zhang struct vmci_datagram *dg = NULL; 3648bf50399SGeorge Zhang u32 cid; 3658bf50399SGeorge Zhang 3668bf50399SGeorge Zhang if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 3678bf50399SGeorge Zhang vmci_ioctl_err("only valid for contexts\n"); 3688bf50399SGeorge Zhang return -EINVAL; 3698bf50399SGeorge Zhang } 3708bf50399SGeorge Zhang 3718bf50399SGeorge Zhang if (copy_from_user(&send_info, uptr, sizeof(send_info))) 3728bf50399SGeorge Zhang return -EFAULT; 3738bf50399SGeorge Zhang 3748bf50399SGeorge Zhang if (send_info.len > VMCI_MAX_DG_SIZE) { 3758bf50399SGeorge Zhang vmci_ioctl_err("datagram is too big (size=%d)\n", 3768bf50399SGeorge Zhang send_info.len); 3778bf50399SGeorge Zhang return -EINVAL; 3788bf50399SGeorge Zhang } 3798bf50399SGeorge Zhang 3808bf50399SGeorge Zhang if (send_info.len < sizeof(*dg)) { 3818bf50399SGeorge Zhang vmci_ioctl_err("datagram is too small (size=%d)\n", 3828bf50399SGeorge Zhang send_info.len); 3838bf50399SGeorge Zhang return -EINVAL; 3848bf50399SGeorge Zhang } 3858bf50399SGeorge Zhang 3868bf50399SGeorge Zhang dg = kmalloc(send_info.len, GFP_KERNEL); 3878bf50399SGeorge Zhang if (!dg) { 3888bf50399SGeorge Zhang vmci_ioctl_err( 3898bf50399SGeorge Zhang "cannot allocate memory to dispatch datagram\n"); 3908bf50399SGeorge Zhang return -ENOMEM; 3918bf50399SGeorge Zhang } 3928bf50399SGeorge Zhang 3938bf50399SGeorge Zhang if (copy_from_user(dg, (void __user *)(uintptr_t)send_info.addr, 3948bf50399SGeorge Zhang send_info.len)) { 3958bf50399SGeorge Zhang vmci_ioctl_err("error getting datagram\n"); 3968bf50399SGeorge Zhang kfree(dg); 3978bf50399SGeorge Zhang return -EFAULT; 3988bf50399SGeorge Zhang } 3998bf50399SGeorge Zhang 4008bf50399SGeorge Zhang pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n", 4018bf50399SGeorge Zhang dg->dst.context, dg->dst.resource, 4028bf50399SGeorge Zhang dg->src.context, dg->src.resource, 4038bf50399SGeorge Zhang (unsigned long long)dg->payload_size); 4048bf50399SGeorge Zhang 4058bf50399SGeorge Zhang /* Get source context id. */ 4068bf50399SGeorge Zhang cid = vmci_ctx_get_id(vmci_host_dev->context); 4078bf50399SGeorge Zhang send_info.result = vmci_datagram_dispatch(cid, dg, true); 4088bf50399SGeorge Zhang kfree(dg); 4098bf50399SGeorge Zhang 4108bf50399SGeorge Zhang return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0; 4118bf50399SGeorge Zhang } 4128bf50399SGeorge Zhang 4138bf50399SGeorge Zhang static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev, 4148bf50399SGeorge Zhang const char *ioctl_name, 4158bf50399SGeorge Zhang void __user *uptr) 4168bf50399SGeorge Zhang { 4178bf50399SGeorge Zhang struct vmci_datagram_snd_rcv_info recv_info; 4188bf50399SGeorge Zhang struct vmci_datagram *dg = NULL; 4198bf50399SGeorge Zhang int retval; 4208bf50399SGeorge Zhang size_t size; 4218bf50399SGeorge Zhang 4228bf50399SGeorge Zhang if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 4238bf50399SGeorge Zhang vmci_ioctl_err("only valid for contexts\n"); 4248bf50399SGeorge Zhang return -EINVAL; 4258bf50399SGeorge Zhang } 4268bf50399SGeorge Zhang 4278bf50399SGeorge Zhang if (copy_from_user(&recv_info, uptr, sizeof(recv_info))) 4288bf50399SGeorge Zhang return -EFAULT; 4298bf50399SGeorge Zhang 4308bf50399SGeorge Zhang size = recv_info.len; 4318bf50399SGeorge Zhang recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context, 4328bf50399SGeorge Zhang &size, &dg); 4338bf50399SGeorge Zhang 4348bf50399SGeorge Zhang if (recv_info.result >= VMCI_SUCCESS) { 4358bf50399SGeorge Zhang void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr; 4368bf50399SGeorge Zhang retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg)); 4378bf50399SGeorge Zhang kfree(dg); 4388bf50399SGeorge Zhang if (retval != 0) 4398bf50399SGeorge Zhang return -EFAULT; 4408bf50399SGeorge Zhang } 4418bf50399SGeorge Zhang 4428bf50399SGeorge Zhang return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0; 4438bf50399SGeorge Zhang } 4448bf50399SGeorge Zhang 4458bf50399SGeorge Zhang static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev, 4468bf50399SGeorge Zhang const char *ioctl_name, 4478bf50399SGeorge Zhang void __user *uptr) 4488bf50399SGeorge Zhang { 4498bf50399SGeorge Zhang struct vmci_handle handle; 4508bf50399SGeorge Zhang int vmci_status; 4518bf50399SGeorge Zhang int __user *retptr; 4528bf50399SGeorge Zhang u32 cid; 4538bf50399SGeorge Zhang 4548bf50399SGeorge Zhang if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 4558bf50399SGeorge Zhang vmci_ioctl_err("only valid for contexts\n"); 4568bf50399SGeorge Zhang return -EINVAL; 4578bf50399SGeorge Zhang } 4588bf50399SGeorge Zhang 4598bf50399SGeorge Zhang cid = vmci_ctx_get_id(vmci_host_dev->context); 4608bf50399SGeorge Zhang 4618bf50399SGeorge Zhang if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 4628bf50399SGeorge Zhang struct vmci_qp_alloc_info_vmvm alloc_info; 4638bf50399SGeorge Zhang struct vmci_qp_alloc_info_vmvm __user *info = uptr; 4648bf50399SGeorge Zhang 4658bf50399SGeorge Zhang if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) 4668bf50399SGeorge Zhang return -EFAULT; 4678bf50399SGeorge Zhang 4688bf50399SGeorge Zhang handle = alloc_info.handle; 4698bf50399SGeorge Zhang retptr = &info->result; 4708bf50399SGeorge Zhang 4718bf50399SGeorge Zhang vmci_status = vmci_qp_broker_alloc(alloc_info.handle, 4728bf50399SGeorge Zhang alloc_info.peer, 4738bf50399SGeorge Zhang alloc_info.flags, 4748bf50399SGeorge Zhang VMCI_NO_PRIVILEGE_FLAGS, 4758bf50399SGeorge Zhang alloc_info.produce_size, 4768bf50399SGeorge Zhang alloc_info.consume_size, 4778bf50399SGeorge Zhang NULL, 4788bf50399SGeorge Zhang vmci_host_dev->context); 4798bf50399SGeorge Zhang 4808bf50399SGeorge Zhang if (vmci_status == VMCI_SUCCESS) 4818bf50399SGeorge Zhang vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE; 4828bf50399SGeorge Zhang } else { 4838bf50399SGeorge Zhang struct vmci_qp_alloc_info alloc_info; 4848bf50399SGeorge Zhang struct vmci_qp_alloc_info __user *info = uptr; 4858bf50399SGeorge Zhang struct vmci_qp_page_store page_store; 4868bf50399SGeorge Zhang 4878bf50399SGeorge Zhang if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) 4888bf50399SGeorge Zhang return -EFAULT; 4898bf50399SGeorge Zhang 4908bf50399SGeorge Zhang handle = alloc_info.handle; 4918bf50399SGeorge Zhang retptr = &info->result; 4928bf50399SGeorge Zhang 4938bf50399SGeorge Zhang page_store.pages = alloc_info.ppn_va; 4948bf50399SGeorge Zhang page_store.len = alloc_info.num_ppns; 4958bf50399SGeorge Zhang 4968bf50399SGeorge Zhang vmci_status = vmci_qp_broker_alloc(alloc_info.handle, 4978bf50399SGeorge Zhang alloc_info.peer, 4988bf50399SGeorge Zhang alloc_info.flags, 4998bf50399SGeorge Zhang VMCI_NO_PRIVILEGE_FLAGS, 5008bf50399SGeorge Zhang alloc_info.produce_size, 5018bf50399SGeorge Zhang alloc_info.consume_size, 5028bf50399SGeorge Zhang &page_store, 5038bf50399SGeorge Zhang vmci_host_dev->context); 5048bf50399SGeorge Zhang } 5058bf50399SGeorge Zhang 5068bf50399SGeorge Zhang if (put_user(vmci_status, retptr)) { 5078bf50399SGeorge Zhang if (vmci_status >= VMCI_SUCCESS) { 5088bf50399SGeorge Zhang vmci_status = vmci_qp_broker_detach(handle, 5098bf50399SGeorge Zhang vmci_host_dev->context); 5108bf50399SGeorge Zhang } 5118bf50399SGeorge Zhang return -EFAULT; 5128bf50399SGeorge Zhang } 5138bf50399SGeorge Zhang 5148bf50399SGeorge Zhang return 0; 5158bf50399SGeorge Zhang } 5168bf50399SGeorge Zhang 5178bf50399SGeorge Zhang static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev, 5188bf50399SGeorge Zhang const char *ioctl_name, 5198bf50399SGeorge Zhang void __user *uptr) 5208bf50399SGeorge Zhang { 5218bf50399SGeorge Zhang struct vmci_qp_set_va_info set_va_info; 5228bf50399SGeorge Zhang struct vmci_qp_set_va_info __user *info = uptr; 5238bf50399SGeorge Zhang s32 result; 5248bf50399SGeorge Zhang 5258bf50399SGeorge Zhang if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 5268bf50399SGeorge Zhang vmci_ioctl_err("only valid for contexts\n"); 5278bf50399SGeorge Zhang return -EINVAL; 5288bf50399SGeorge Zhang } 5298bf50399SGeorge Zhang 5308bf50399SGeorge Zhang if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 5318bf50399SGeorge Zhang vmci_ioctl_err("is not allowed\n"); 5328bf50399SGeorge Zhang return -EINVAL; 5338bf50399SGeorge Zhang } 5348bf50399SGeorge Zhang 5358bf50399SGeorge Zhang if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info))) 5368bf50399SGeorge Zhang return -EFAULT; 5378bf50399SGeorge Zhang 5388bf50399SGeorge Zhang if (set_va_info.va) { 5398bf50399SGeorge Zhang /* 5408bf50399SGeorge Zhang * VMX is passing down a new VA for the queue 5418bf50399SGeorge Zhang * pair mapping. 5428bf50399SGeorge Zhang */ 5438bf50399SGeorge Zhang result = vmci_qp_broker_map(set_va_info.handle, 5448bf50399SGeorge Zhang vmci_host_dev->context, 5458bf50399SGeorge Zhang set_va_info.va); 5468bf50399SGeorge Zhang } else { 5478bf50399SGeorge Zhang /* 5488bf50399SGeorge Zhang * The queue pair is about to be unmapped by 5498bf50399SGeorge Zhang * the VMX. 5508bf50399SGeorge Zhang */ 5518bf50399SGeorge Zhang result = vmci_qp_broker_unmap(set_va_info.handle, 5528bf50399SGeorge Zhang vmci_host_dev->context, 0); 5538bf50399SGeorge Zhang } 5548bf50399SGeorge Zhang 5558bf50399SGeorge Zhang return put_user(result, &info->result) ? -EFAULT : 0; 5568bf50399SGeorge Zhang } 5578bf50399SGeorge Zhang 5588bf50399SGeorge Zhang static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev, 5598bf50399SGeorge Zhang const char *ioctl_name, 5608bf50399SGeorge Zhang void __user *uptr) 5618bf50399SGeorge Zhang { 5628bf50399SGeorge Zhang struct vmci_qp_page_file_info page_file_info; 5638bf50399SGeorge Zhang struct vmci_qp_page_file_info __user *info = uptr; 5648bf50399SGeorge Zhang s32 result; 5658bf50399SGeorge Zhang 5668bf50399SGeorge Zhang if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP || 5678bf50399SGeorge Zhang vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) { 5688bf50399SGeorge Zhang vmci_ioctl_err("not supported on this VMX (version=%d)\n", 5698bf50399SGeorge Zhang vmci_host_dev->user_version); 5708bf50399SGeorge Zhang return -EINVAL; 5718bf50399SGeorge Zhang } 5728bf50399SGeorge Zhang 5738bf50399SGeorge Zhang if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 5748bf50399SGeorge Zhang vmci_ioctl_err("only valid for contexts\n"); 5758bf50399SGeorge Zhang return -EINVAL; 5768bf50399SGeorge Zhang } 5778bf50399SGeorge Zhang 5788bf50399SGeorge Zhang if (copy_from_user(&page_file_info, uptr, sizeof(*info))) 5798bf50399SGeorge Zhang return -EFAULT; 5808bf50399SGeorge Zhang 5818bf50399SGeorge Zhang /* 5828bf50399SGeorge Zhang * Communicate success pre-emptively to the caller. Note that the 5838bf50399SGeorge Zhang * basic premise is that it is incumbent upon the caller not to look at 5848bf50399SGeorge Zhang * the info.result field until after the ioctl() returns. And then, 5858bf50399SGeorge Zhang * only if the ioctl() result indicates no error. We send up the 5868bf50399SGeorge Zhang * SUCCESS status before calling SetPageStore() store because failing 5878bf50399SGeorge Zhang * to copy up the result code means unwinding the SetPageStore(). 5888bf50399SGeorge Zhang * 5898bf50399SGeorge Zhang * It turns out the logic to unwind a SetPageStore() opens a can of 5908bf50399SGeorge Zhang * worms. For example, if a host had created the queue_pair and a 5918bf50399SGeorge Zhang * guest attaches and SetPageStore() is successful but writing success 5928bf50399SGeorge Zhang * fails, then ... the host has to be stopped from writing (anymore) 5938bf50399SGeorge Zhang * data into the queue_pair. That means an additional test in the 5948bf50399SGeorge Zhang * VMCI_Enqueue() code path. Ugh. 5958bf50399SGeorge Zhang */ 5968bf50399SGeorge Zhang 5978bf50399SGeorge Zhang if (put_user(VMCI_SUCCESS, &info->result)) { 5988bf50399SGeorge Zhang /* 5998bf50399SGeorge Zhang * In this case, we can't write a result field of the 6008bf50399SGeorge Zhang * caller's info block. So, we don't even try to 6018bf50399SGeorge Zhang * SetPageStore(). 6028bf50399SGeorge Zhang */ 6038bf50399SGeorge Zhang return -EFAULT; 6048bf50399SGeorge Zhang } 6058bf50399SGeorge Zhang 6068bf50399SGeorge Zhang result = vmci_qp_broker_set_page_store(page_file_info.handle, 6078bf50399SGeorge Zhang page_file_info.produce_va, 6088bf50399SGeorge Zhang page_file_info.consume_va, 6098bf50399SGeorge Zhang vmci_host_dev->context); 6108bf50399SGeorge Zhang if (result < VMCI_SUCCESS) { 6118bf50399SGeorge Zhang if (put_user(result, &info->result)) { 6128bf50399SGeorge Zhang /* 6138bf50399SGeorge Zhang * Note that in this case the SetPageStore() 6148bf50399SGeorge Zhang * call failed but we were unable to 6158bf50399SGeorge Zhang * communicate that to the caller (because the 6168bf50399SGeorge Zhang * copy_to_user() call failed). So, if we 6178bf50399SGeorge Zhang * simply return an error (in this case 6188bf50399SGeorge Zhang * -EFAULT) then the caller will know that the 6198bf50399SGeorge Zhang * SetPageStore failed even though we couldn't 6208bf50399SGeorge Zhang * put the result code in the result field and 6218bf50399SGeorge Zhang * indicate exactly why it failed. 6228bf50399SGeorge Zhang * 6238bf50399SGeorge Zhang * That says nothing about the issue where we 6248bf50399SGeorge Zhang * were once able to write to the caller's info 6258bf50399SGeorge Zhang * memory and now can't. Something more 6268bf50399SGeorge Zhang * serious is probably going on than the fact 6278bf50399SGeorge Zhang * that SetPageStore() didn't work. 6288bf50399SGeorge Zhang */ 6298bf50399SGeorge Zhang return -EFAULT; 6308bf50399SGeorge Zhang } 6318bf50399SGeorge Zhang } 6328bf50399SGeorge Zhang 6338bf50399SGeorge Zhang return 0; 6348bf50399SGeorge Zhang } 6358bf50399SGeorge Zhang 6368bf50399SGeorge Zhang static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev, 6378bf50399SGeorge Zhang const char *ioctl_name, 6388bf50399SGeorge Zhang void __user *uptr) 6398bf50399SGeorge Zhang { 6408bf50399SGeorge Zhang struct vmci_qp_dtch_info detach_info; 6418bf50399SGeorge Zhang struct vmci_qp_dtch_info __user *info = uptr; 6428bf50399SGeorge Zhang s32 result; 6438bf50399SGeorge Zhang 6448bf50399SGeorge Zhang if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 6458bf50399SGeorge Zhang vmci_ioctl_err("only valid for contexts\n"); 6468bf50399SGeorge Zhang return -EINVAL; 6478bf50399SGeorge Zhang } 6488bf50399SGeorge Zhang 6498bf50399SGeorge Zhang if (copy_from_user(&detach_info, uptr, sizeof(detach_info))) 6508bf50399SGeorge Zhang return -EFAULT; 6518bf50399SGeorge Zhang 6528bf50399SGeorge Zhang result = vmci_qp_broker_detach(detach_info.handle, 6538bf50399SGeorge Zhang vmci_host_dev->context); 6548bf50399SGeorge Zhang if (result == VMCI_SUCCESS && 6558bf50399SGeorge Zhang vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 6568bf50399SGeorge Zhang result = VMCI_SUCCESS_LAST_DETACH; 6578bf50399SGeorge Zhang } 6588bf50399SGeorge Zhang 6598bf50399SGeorge Zhang return put_user(result, &info->result) ? -EFAULT : 0; 6608bf50399SGeorge Zhang } 6618bf50399SGeorge Zhang 6628bf50399SGeorge Zhang static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev, 6638bf50399SGeorge Zhang const char *ioctl_name, 6648bf50399SGeorge Zhang void __user *uptr) 6658bf50399SGeorge Zhang { 6668bf50399SGeorge Zhang struct vmci_ctx_info ar_info; 6678bf50399SGeorge Zhang struct vmci_ctx_info __user *info = uptr; 6688bf50399SGeorge Zhang s32 result; 6698bf50399SGeorge Zhang u32 cid; 6708bf50399SGeorge Zhang 6718bf50399SGeorge Zhang if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 6728bf50399SGeorge Zhang vmci_ioctl_err("only valid for contexts\n"); 6738bf50399SGeorge Zhang return -EINVAL; 6748bf50399SGeorge Zhang } 6758bf50399SGeorge Zhang 6768bf50399SGeorge Zhang if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) 6778bf50399SGeorge Zhang return -EFAULT; 6788bf50399SGeorge Zhang 6798bf50399SGeorge Zhang cid = vmci_ctx_get_id(vmci_host_dev->context); 6808bf50399SGeorge Zhang result = vmci_ctx_add_notification(cid, ar_info.remote_cid); 6818bf50399SGeorge Zhang 6828bf50399SGeorge Zhang return put_user(result, &info->result) ? -EFAULT : 0; 6838bf50399SGeorge Zhang } 6848bf50399SGeorge Zhang 6858bf50399SGeorge Zhang static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev, 6868bf50399SGeorge Zhang const char *ioctl_name, 6878bf50399SGeorge Zhang void __user *uptr) 6888bf50399SGeorge Zhang { 6898bf50399SGeorge Zhang struct vmci_ctx_info ar_info; 6908bf50399SGeorge Zhang struct vmci_ctx_info __user *info = uptr; 6918bf50399SGeorge Zhang u32 cid; 6928bf50399SGeorge Zhang int result; 6938bf50399SGeorge Zhang 6948bf50399SGeorge Zhang if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 6958bf50399SGeorge Zhang vmci_ioctl_err("only valid for contexts\n"); 6968bf50399SGeorge Zhang return -EINVAL; 6978bf50399SGeorge Zhang } 6988bf50399SGeorge Zhang 6998bf50399SGeorge Zhang if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) 7008bf50399SGeorge Zhang return -EFAULT; 7018bf50399SGeorge Zhang 7028bf50399SGeorge Zhang cid = vmci_ctx_get_id(vmci_host_dev->context); 7038bf50399SGeorge Zhang result = vmci_ctx_remove_notification(cid, 7048bf50399SGeorge Zhang ar_info.remote_cid); 7058bf50399SGeorge Zhang 7068bf50399SGeorge Zhang return put_user(result, &info->result) ? -EFAULT : 0; 7078bf50399SGeorge Zhang } 7088bf50399SGeorge Zhang 7098bf50399SGeorge Zhang static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev, 7108bf50399SGeorge Zhang const char *ioctl_name, 7118bf50399SGeorge Zhang void __user *uptr) 7128bf50399SGeorge Zhang { 7138bf50399SGeorge Zhang struct vmci_ctx_chkpt_buf_info get_info; 7148bf50399SGeorge Zhang u32 cid; 7158bf50399SGeorge Zhang void *cpt_buf; 7168bf50399SGeorge Zhang int retval; 7178bf50399SGeorge Zhang 7188bf50399SGeorge Zhang if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 7198bf50399SGeorge Zhang vmci_ioctl_err("only valid for contexts\n"); 7208bf50399SGeorge Zhang return -EINVAL; 7218bf50399SGeorge Zhang } 7228bf50399SGeorge Zhang 7238bf50399SGeorge Zhang if (copy_from_user(&get_info, uptr, sizeof(get_info))) 7248bf50399SGeorge Zhang return -EFAULT; 7258bf50399SGeorge Zhang 7268bf50399SGeorge Zhang cid = vmci_ctx_get_id(vmci_host_dev->context); 7278bf50399SGeorge Zhang get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type, 7288bf50399SGeorge Zhang &get_info.buf_size, &cpt_buf); 7298bf50399SGeorge Zhang if (get_info.result == VMCI_SUCCESS && get_info.buf_size) { 7308bf50399SGeorge Zhang void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf; 7318bf50399SGeorge Zhang retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size); 7328bf50399SGeorge Zhang kfree(cpt_buf); 7338bf50399SGeorge Zhang 7348bf50399SGeorge Zhang if (retval) 7358bf50399SGeorge Zhang return -EFAULT; 7368bf50399SGeorge Zhang } 7378bf50399SGeorge Zhang 7388bf50399SGeorge Zhang return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0; 7398bf50399SGeorge Zhang } 7408bf50399SGeorge Zhang 7418bf50399SGeorge Zhang static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev, 7428bf50399SGeorge Zhang const char *ioctl_name, 7438bf50399SGeorge Zhang void __user *uptr) 7448bf50399SGeorge Zhang { 7458bf50399SGeorge Zhang struct vmci_ctx_chkpt_buf_info set_info; 7468bf50399SGeorge Zhang u32 cid; 7478bf50399SGeorge Zhang void *cpt_buf; 7488bf50399SGeorge Zhang int retval; 7498bf50399SGeorge Zhang 7508bf50399SGeorge Zhang if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 7518bf50399SGeorge Zhang vmci_ioctl_err("only valid for contexts\n"); 7528bf50399SGeorge Zhang return -EINVAL; 7538bf50399SGeorge Zhang } 7548bf50399SGeorge Zhang 7558bf50399SGeorge Zhang if (copy_from_user(&set_info, uptr, sizeof(set_info))) 7568bf50399SGeorge Zhang return -EFAULT; 7578bf50399SGeorge Zhang 7588bf50399SGeorge Zhang cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL); 7598bf50399SGeorge Zhang if (!cpt_buf) { 7608bf50399SGeorge Zhang vmci_ioctl_err( 7618bf50399SGeorge Zhang "cannot allocate memory to set cpt state (type=%d)\n", 7628bf50399SGeorge Zhang set_info.cpt_type); 7638bf50399SGeorge Zhang return -ENOMEM; 7648bf50399SGeorge Zhang } 7658bf50399SGeorge Zhang 7668bf50399SGeorge Zhang if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf, 7678bf50399SGeorge Zhang set_info.buf_size)) { 7688bf50399SGeorge Zhang retval = -EFAULT; 7698bf50399SGeorge Zhang goto out; 7708bf50399SGeorge Zhang } 7718bf50399SGeorge Zhang 7728bf50399SGeorge Zhang cid = vmci_ctx_get_id(vmci_host_dev->context); 7738bf50399SGeorge Zhang set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type, 7748bf50399SGeorge Zhang set_info.buf_size, cpt_buf); 7758bf50399SGeorge Zhang 7768bf50399SGeorge Zhang retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0; 7778bf50399SGeorge Zhang 7788bf50399SGeorge Zhang out: 7798bf50399SGeorge Zhang kfree(cpt_buf); 7808bf50399SGeorge Zhang return retval; 7818bf50399SGeorge Zhang } 7828bf50399SGeorge Zhang 7838bf50399SGeorge Zhang static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev, 7848bf50399SGeorge Zhang const char *ioctl_name, 7858bf50399SGeorge Zhang void __user *uptr) 7868bf50399SGeorge Zhang { 7878bf50399SGeorge Zhang u32 __user *u32ptr = uptr; 7888bf50399SGeorge Zhang 7898bf50399SGeorge Zhang return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0; 7908bf50399SGeorge Zhang } 7918bf50399SGeorge Zhang 7928bf50399SGeorge Zhang static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev, 7938bf50399SGeorge Zhang const char *ioctl_name, 7948bf50399SGeorge Zhang void __user *uptr) 7958bf50399SGeorge Zhang { 7968bf50399SGeorge Zhang struct vmci_set_notify_info notify_info; 7978bf50399SGeorge Zhang 7988bf50399SGeorge Zhang if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 7998bf50399SGeorge Zhang vmci_ioctl_err("only valid for contexts\n"); 8008bf50399SGeorge Zhang return -EINVAL; 8018bf50399SGeorge Zhang } 8028bf50399SGeorge Zhang 8038bf50399SGeorge Zhang if (copy_from_user(¬ify_info, uptr, sizeof(notify_info))) 8048bf50399SGeorge Zhang return -EFAULT; 8058bf50399SGeorge Zhang 8068bf50399SGeorge Zhang if (notify_info.notify_uva) { 8078bf50399SGeorge Zhang notify_info.result = 8088bf50399SGeorge Zhang vmci_host_setup_notify(vmci_host_dev->context, 8098bf50399SGeorge Zhang notify_info.notify_uva); 8108bf50399SGeorge Zhang } else { 8118bf50399SGeorge Zhang vmci_ctx_unset_notify(vmci_host_dev->context); 8128bf50399SGeorge Zhang notify_info.result = VMCI_SUCCESS; 8138bf50399SGeorge Zhang } 8148bf50399SGeorge Zhang 8158bf50399SGeorge Zhang return copy_to_user(uptr, ¬ify_info, sizeof(notify_info)) ? 8168bf50399SGeorge Zhang -EFAULT : 0; 8178bf50399SGeorge Zhang } 8188bf50399SGeorge Zhang 8198bf50399SGeorge Zhang static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev, 8208bf50399SGeorge Zhang const char *ioctl_name, 8218bf50399SGeorge Zhang void __user *uptr) 8228bf50399SGeorge Zhang { 8238bf50399SGeorge Zhang struct vmci_dbell_notify_resource_info info; 8248bf50399SGeorge Zhang u32 cid; 8258bf50399SGeorge Zhang 8268bf50399SGeorge Zhang if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { 8278bf50399SGeorge Zhang vmci_ioctl_err("invalid for current VMX versions\n"); 8288bf50399SGeorge Zhang return -EINVAL; 8298bf50399SGeorge Zhang } 8308bf50399SGeorge Zhang 8318bf50399SGeorge Zhang if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 8328bf50399SGeorge Zhang vmci_ioctl_err("only valid for contexts\n"); 8338bf50399SGeorge Zhang return -EINVAL; 8348bf50399SGeorge Zhang } 8358bf50399SGeorge Zhang 8368bf50399SGeorge Zhang if (copy_from_user(&info, uptr, sizeof(info))) 8378bf50399SGeorge Zhang return -EFAULT; 8388bf50399SGeorge Zhang 8398bf50399SGeorge Zhang cid = vmci_ctx_get_id(vmci_host_dev->context); 8408bf50399SGeorge Zhang 8418bf50399SGeorge Zhang switch (info.action) { 8428bf50399SGeorge Zhang case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY: 8438bf50399SGeorge Zhang if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) { 8448bf50399SGeorge Zhang u32 flags = VMCI_NO_PRIVILEGE_FLAGS; 8458bf50399SGeorge Zhang info.result = vmci_ctx_notify_dbell(cid, info.handle, 8468bf50399SGeorge Zhang flags); 8478bf50399SGeorge Zhang } else { 8488bf50399SGeorge Zhang info.result = VMCI_ERROR_UNAVAILABLE; 8498bf50399SGeorge Zhang } 8508bf50399SGeorge Zhang break; 8518bf50399SGeorge Zhang 8528bf50399SGeorge Zhang case VMCI_NOTIFY_RESOURCE_ACTION_CREATE: 8538bf50399SGeorge Zhang info.result = vmci_ctx_dbell_create(cid, info.handle); 8548bf50399SGeorge Zhang break; 8558bf50399SGeorge Zhang 8568bf50399SGeorge Zhang case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY: 8578bf50399SGeorge Zhang info.result = vmci_ctx_dbell_destroy(cid, info.handle); 8588bf50399SGeorge Zhang break; 8598bf50399SGeorge Zhang 8608bf50399SGeorge Zhang default: 8618bf50399SGeorge Zhang vmci_ioctl_err("got unknown action (action=%d)\n", 8628bf50399SGeorge Zhang info.action); 8638bf50399SGeorge Zhang info.result = VMCI_ERROR_INVALID_ARGS; 8648bf50399SGeorge Zhang } 8658bf50399SGeorge Zhang 8668bf50399SGeorge Zhang return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; 8678bf50399SGeorge Zhang } 8688bf50399SGeorge Zhang 8698bf50399SGeorge Zhang static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev, 8708bf50399SGeorge Zhang const char *ioctl_name, 8718bf50399SGeorge Zhang void __user *uptr) 8728bf50399SGeorge Zhang { 8738bf50399SGeorge Zhang struct vmci_ctx_notify_recv_info info; 8748bf50399SGeorge Zhang struct vmci_handle_arr *db_handle_array; 8758bf50399SGeorge Zhang struct vmci_handle_arr *qp_handle_array; 8768bf50399SGeorge Zhang void __user *ubuf; 8778bf50399SGeorge Zhang u32 cid; 8788bf50399SGeorge Zhang int retval = 0; 8798bf50399SGeorge Zhang 8808bf50399SGeorge Zhang if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 8818bf50399SGeorge Zhang vmci_ioctl_err("only valid for contexts\n"); 8828bf50399SGeorge Zhang return -EINVAL; 8838bf50399SGeorge Zhang } 8848bf50399SGeorge Zhang 8858bf50399SGeorge Zhang if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { 8868bf50399SGeorge Zhang vmci_ioctl_err("not supported for the current vmx version\n"); 8878bf50399SGeorge Zhang return -EINVAL; 8888bf50399SGeorge Zhang } 8898bf50399SGeorge Zhang 8908bf50399SGeorge Zhang if (copy_from_user(&info, uptr, sizeof(info))) 8918bf50399SGeorge Zhang return -EFAULT; 8928bf50399SGeorge Zhang 8938bf50399SGeorge Zhang if ((info.db_handle_buf_size && !info.db_handle_buf_uva) || 8948bf50399SGeorge Zhang (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) { 8958bf50399SGeorge Zhang return -EINVAL; 8968bf50399SGeorge Zhang } 8978bf50399SGeorge Zhang 8988bf50399SGeorge Zhang cid = vmci_ctx_get_id(vmci_host_dev->context); 8998bf50399SGeorge Zhang 9008bf50399SGeorge Zhang info.result = vmci_ctx_rcv_notifications_get(cid, 9018bf50399SGeorge Zhang &db_handle_array, &qp_handle_array); 9028bf50399SGeorge Zhang if (info.result != VMCI_SUCCESS) 9038bf50399SGeorge Zhang return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; 9048bf50399SGeorge Zhang 9058bf50399SGeorge Zhang ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva; 9068bf50399SGeorge Zhang info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size, 9078bf50399SGeorge Zhang db_handle_array, &retval); 9088bf50399SGeorge Zhang if (info.result == VMCI_SUCCESS && !retval) { 9098bf50399SGeorge Zhang ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva; 9108bf50399SGeorge Zhang info.result = drv_cp_harray_to_user(ubuf, 9118bf50399SGeorge Zhang &info.qp_handle_buf_size, 9128bf50399SGeorge Zhang qp_handle_array, &retval); 9138bf50399SGeorge Zhang } 9148bf50399SGeorge Zhang 9158bf50399SGeorge Zhang if (!retval && copy_to_user(uptr, &info, sizeof(info))) 9168bf50399SGeorge Zhang retval = -EFAULT; 9178bf50399SGeorge Zhang 9188bf50399SGeorge Zhang vmci_ctx_rcv_notifications_release(cid, 9198bf50399SGeorge Zhang db_handle_array, qp_handle_array, 9208bf50399SGeorge Zhang info.result == VMCI_SUCCESS && !retval); 9218bf50399SGeorge Zhang 9228bf50399SGeorge Zhang return retval; 9238bf50399SGeorge Zhang } 9248bf50399SGeorge Zhang 9258bf50399SGeorge Zhang static long vmci_host_unlocked_ioctl(struct file *filp, 9268bf50399SGeorge Zhang unsigned int iocmd, unsigned long ioarg) 9278bf50399SGeorge Zhang { 9288bf50399SGeorge Zhang #define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \ 9298bf50399SGeorge Zhang char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \ 9308bf50399SGeorge Zhang return vmci_host_do_ ## ioctl_fn( \ 9318bf50399SGeorge Zhang vmci_host_dev, name, uptr); \ 9328bf50399SGeorge Zhang } while (0) 9338bf50399SGeorge Zhang 9348bf50399SGeorge Zhang struct vmci_host_dev *vmci_host_dev = filp->private_data; 9358bf50399SGeorge Zhang void __user *uptr = (void __user *)ioarg; 9368bf50399SGeorge Zhang 9378bf50399SGeorge Zhang switch (iocmd) { 9388bf50399SGeorge Zhang case IOCTL_VMCI_INIT_CONTEXT: 9398bf50399SGeorge Zhang VMCI_DO_IOCTL(INIT_CONTEXT, init_context); 9408bf50399SGeorge Zhang case IOCTL_VMCI_DATAGRAM_SEND: 9418bf50399SGeorge Zhang VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram); 9428bf50399SGeorge Zhang case IOCTL_VMCI_DATAGRAM_RECEIVE: 9438bf50399SGeorge Zhang VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram); 9448bf50399SGeorge Zhang case IOCTL_VMCI_QUEUEPAIR_ALLOC: 9458bf50399SGeorge Zhang VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair); 9468bf50399SGeorge Zhang case IOCTL_VMCI_QUEUEPAIR_SETVA: 9478bf50399SGeorge Zhang VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva); 9488bf50399SGeorge Zhang case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE: 9498bf50399SGeorge Zhang VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf); 9508bf50399SGeorge Zhang case IOCTL_VMCI_QUEUEPAIR_DETACH: 9518bf50399SGeorge Zhang VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach); 9528bf50399SGeorge Zhang case IOCTL_VMCI_CTX_ADD_NOTIFICATION: 9538bf50399SGeorge Zhang VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify); 9548bf50399SGeorge Zhang case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION: 9558bf50399SGeorge Zhang VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify); 9568bf50399SGeorge Zhang case IOCTL_VMCI_CTX_GET_CPT_STATE: 9578bf50399SGeorge Zhang VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state); 9588bf50399SGeorge Zhang case IOCTL_VMCI_CTX_SET_CPT_STATE: 9598bf50399SGeorge Zhang VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state); 9608bf50399SGeorge Zhang case IOCTL_VMCI_GET_CONTEXT_ID: 9618bf50399SGeorge Zhang VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id); 9628bf50399SGeorge Zhang case IOCTL_VMCI_SET_NOTIFY: 9638bf50399SGeorge Zhang VMCI_DO_IOCTL(SET_NOTIFY, set_notify); 9648bf50399SGeorge Zhang case IOCTL_VMCI_NOTIFY_RESOURCE: 9658bf50399SGeorge Zhang VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource); 9668bf50399SGeorge Zhang case IOCTL_VMCI_NOTIFICATIONS_RECEIVE: 9678bf50399SGeorge Zhang VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications); 9688bf50399SGeorge Zhang 9698bf50399SGeorge Zhang case IOCTL_VMCI_VERSION: 9708bf50399SGeorge Zhang case IOCTL_VMCI_VERSION2: 9718bf50399SGeorge Zhang return vmci_host_get_version(vmci_host_dev, iocmd, uptr); 9728bf50399SGeorge Zhang 9738bf50399SGeorge Zhang default: 9748bf50399SGeorge Zhang pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd); 9758bf50399SGeorge Zhang return -EINVAL; 9768bf50399SGeorge Zhang } 9778bf50399SGeorge Zhang 9788bf50399SGeorge Zhang #undef VMCI_DO_IOCTL 9798bf50399SGeorge Zhang } 9808bf50399SGeorge Zhang 9818bf50399SGeorge Zhang static const struct file_operations vmuser_fops = { 9828bf50399SGeorge Zhang .owner = THIS_MODULE, 9838bf50399SGeorge Zhang .open = vmci_host_open, 9848bf50399SGeorge Zhang .release = vmci_host_close, 9858bf50399SGeorge Zhang .poll = vmci_host_poll, 9868bf50399SGeorge Zhang .unlocked_ioctl = vmci_host_unlocked_ioctl, 9878bf50399SGeorge Zhang .compat_ioctl = vmci_host_unlocked_ioctl, 9888bf50399SGeorge Zhang }; 9898bf50399SGeorge Zhang 9908bf50399SGeorge Zhang static struct miscdevice vmci_host_miscdev = { 9918bf50399SGeorge Zhang .name = "vmci", 9928bf50399SGeorge Zhang .minor = MISC_DYNAMIC_MINOR, 9938bf50399SGeorge Zhang .fops = &vmuser_fops, 9948bf50399SGeorge Zhang }; 9958bf50399SGeorge Zhang 9968bf50399SGeorge Zhang int __init vmci_host_init(void) 9978bf50399SGeorge Zhang { 9988bf50399SGeorge Zhang int error; 9998bf50399SGeorge Zhang 10008bf50399SGeorge Zhang host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID, 10018bf50399SGeorge Zhang VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS, 10028bf50399SGeorge Zhang -1, VMCI_VERSION, NULL); 10038bf50399SGeorge Zhang if (IS_ERR(host_context)) { 10048bf50399SGeorge Zhang error = PTR_ERR(host_context); 10058bf50399SGeorge Zhang pr_warn("Failed to initialize VMCIContext (error%d)\n", 10068bf50399SGeorge Zhang error); 10078bf50399SGeorge Zhang return error; 10088bf50399SGeorge Zhang } 10098bf50399SGeorge Zhang 10108bf50399SGeorge Zhang error = misc_register(&vmci_host_miscdev); 10118bf50399SGeorge Zhang if (error) { 10128bf50399SGeorge Zhang pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n", 10138bf50399SGeorge Zhang vmci_host_miscdev.name, 10148bf50399SGeorge Zhang MISC_MAJOR, vmci_host_miscdev.minor, 10158bf50399SGeorge Zhang error); 10168bf50399SGeorge Zhang pr_warn("Unable to initialize host personality\n"); 10178bf50399SGeorge Zhang vmci_ctx_destroy(host_context); 10188bf50399SGeorge Zhang return error; 10198bf50399SGeorge Zhang } 10208bf50399SGeorge Zhang 10218bf50399SGeorge Zhang pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n", 10228bf50399SGeorge Zhang vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor); 10238bf50399SGeorge Zhang 10248bf50399SGeorge Zhang vmci_host_device_initialized = true; 10258bf50399SGeorge Zhang return 0; 10268bf50399SGeorge Zhang } 10278bf50399SGeorge Zhang 10288bf50399SGeorge Zhang void __exit vmci_host_exit(void) 10298bf50399SGeorge Zhang { 10308bf50399SGeorge Zhang int error; 10318bf50399SGeorge Zhang 10328bf50399SGeorge Zhang vmci_host_device_initialized = false; 10338bf50399SGeorge Zhang 10348bf50399SGeorge Zhang error = misc_deregister(&vmci_host_miscdev); 10358bf50399SGeorge Zhang if (error) 10368bf50399SGeorge Zhang pr_warn("Error unregistering character device: %d\n", error); 10378bf50399SGeorge Zhang 10388bf50399SGeorge Zhang vmci_ctx_destroy(host_context); 10398bf50399SGeorge Zhang vmci_qp_broker_exit(); 10408bf50399SGeorge Zhang 10418bf50399SGeorge Zhang pr_debug("VMCI host driver module unloaded\n"); 10428bf50399SGeorge Zhang } 1043