1 /* 2 * VMware VMCI Driver 3 * 4 * Copyright (C) 2012 VMware, Inc. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation version 2 and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * for more details. 14 */ 15 16 #include <linux/vmw_vmci_defs.h> 17 #include <linux/vmw_vmci_api.h> 18 #include <linux/miscdevice.h> 19 #include <linux/interrupt.h> 20 #include <linux/highmem.h> 21 #include <linux/atomic.h> 22 #include <linux/kernel.h> 23 #include <linux/module.h> 24 #include <linux/mutex.h> 25 #include <linux/sched.h> 26 #include <linux/cred.h> 27 #include <linux/slab.h> 28 #include <linux/file.h> 29 #include <linux/init.h> 30 #include <linux/poll.h> 31 #include <linux/pci.h> 32 #include <linux/smp.h> 33 #include <linux/fs.h> 34 #include <linux/io.h> 35 36 #include "vmci_handle_array.h" 37 #include "vmci_queue_pair.h" 38 #include "vmci_datagram.h" 39 #include "vmci_doorbell.h" 40 #include "vmci_resource.h" 41 #include "vmci_context.h" 42 #include "vmci_driver.h" 43 #include "vmci_event.h" 44 45 #define VMCI_UTIL_NUM_RESOURCES 1 46 47 enum { 48 VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0, 49 VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1, 50 }; 51 52 enum { 53 VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0, 54 VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1, 55 VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2, 56 }; 57 58 /* 59 * VMCI driver initialization. This block can also be used to 60 * pass initial group membership etc. 61 */ 62 struct vmci_init_blk { 63 u32 cid; 64 u32 flags; 65 }; 66 67 /* VMCIqueue_pairAllocInfo_VMToVM */ 68 struct vmci_qp_alloc_info_vmvm { 69 struct vmci_handle handle; 70 u32 peer; 71 u32 flags; 72 u64 produce_size; 73 u64 consume_size; 74 u64 produce_page_file; /* User VA. */ 75 u64 consume_page_file; /* User VA. */ 76 u64 produce_page_file_size; /* Size of the file name array. */ 77 u64 consume_page_file_size; /* Size of the file name array. */ 78 s32 result; 79 u32 _pad; 80 }; 81 82 /* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */ 83 struct vmci_set_notify_info { 84 u64 notify_uva; 85 s32 result; 86 u32 _pad; 87 }; 88 89 /* 90 * Per-instance host state 91 */ 92 struct vmci_host_dev { 93 struct vmci_ctx *context; 94 int user_version; 95 enum vmci_obj_type ct_type; 96 struct mutex lock; /* Mutex lock for vmci context access */ 97 }; 98 99 static struct vmci_ctx *host_context; 100 static bool vmci_host_device_initialized; 101 static atomic_t vmci_host_active_users = ATOMIC_INIT(0); 102 103 /* 104 * Determines whether the VMCI host personality is 105 * available. Since the core functionality of the host driver is 106 * always present, all guests could possibly use the host 107 * personality. However, to minimize the deviation from the 108 * pre-unified driver state of affairs, we only consider the host 109 * device active if there is no active guest device or if there 110 * are VMX'en with active VMCI contexts using the host device. 111 */ 112 bool vmci_host_code_active(void) 113 { 114 return vmci_host_device_initialized && 115 (!vmci_guest_code_active() || 116 atomic_read(&vmci_host_active_users) > 0); 117 } 118 119 /* 120 * Called on open of /dev/vmci. 121 */ 122 static int vmci_host_open(struct inode *inode, struct file *filp) 123 { 124 struct vmci_host_dev *vmci_host_dev; 125 126 vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL); 127 if (vmci_host_dev == NULL) 128 return -ENOMEM; 129 130 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; 131 mutex_init(&vmci_host_dev->lock); 132 filp->private_data = vmci_host_dev; 133 134 return 0; 135 } 136 137 /* 138 * Called on close of /dev/vmci, most often when the process 139 * exits. 140 */ 141 static int vmci_host_close(struct inode *inode, struct file *filp) 142 { 143 struct vmci_host_dev *vmci_host_dev = filp->private_data; 144 145 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { 146 vmci_ctx_destroy(vmci_host_dev->context); 147 vmci_host_dev->context = NULL; 148 149 /* 150 * The number of active contexts is used to track whether any 151 * VMX'en are using the host personality. It is incremented when 152 * a context is created through the IOCTL_VMCI_INIT_CONTEXT 153 * ioctl. 154 */ 155 atomic_dec(&vmci_host_active_users); 156 } 157 vmci_host_dev->ct_type = VMCIOBJ_NOT_SET; 158 159 kfree(vmci_host_dev); 160 filp->private_data = NULL; 161 return 0; 162 } 163 164 /* 165 * This is used to wake up the VMX when a VMCI call arrives, or 166 * to wake up select() or poll() at the next clock tick. 167 */ 168 static __poll_t vmci_host_poll(struct file *filp, poll_table *wait) 169 { 170 struct vmci_host_dev *vmci_host_dev = filp->private_data; 171 struct vmci_ctx *context = vmci_host_dev->context; 172 __poll_t mask = 0; 173 174 if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) { 175 /* Check for VMCI calls to this VM context. */ 176 if (wait) 177 poll_wait(filp, &context->host_context.wait_queue, 178 wait); 179 180 spin_lock(&context->lock); 181 if (context->pending_datagrams > 0 || 182 vmci_handle_arr_get_size( 183 context->pending_doorbell_array) > 0) { 184 mask = EPOLLIN; 185 } 186 spin_unlock(&context->lock); 187 } 188 return mask; 189 } 190 191 /* 192 * Copies the handles of a handle array into a user buffer, and 193 * returns the new length in userBufferSize. If the copy to the 194 * user buffer fails, the functions still returns VMCI_SUCCESS, 195 * but retval != 0. 196 */ 197 static int drv_cp_harray_to_user(void __user *user_buf_uva, 198 u64 *user_buf_size, 199 struct vmci_handle_arr *handle_array, 200 int *retval) 201 { 202 u32 array_size = 0; 203 struct vmci_handle *handles; 204 205 if (handle_array) 206 array_size = vmci_handle_arr_get_size(handle_array); 207 208 if (array_size * sizeof(*handles) > *user_buf_size) 209 return VMCI_ERROR_MORE_DATA; 210 211 *user_buf_size = array_size * sizeof(*handles); 212 if (*user_buf_size) 213 *retval = copy_to_user(user_buf_uva, 214 vmci_handle_arr_get_handles 215 (handle_array), *user_buf_size); 216 217 return VMCI_SUCCESS; 218 } 219 220 /* 221 * Sets up a given context for notify to work. Maps the notify 222 * boolean in user VA into kernel space. 223 */ 224 static int vmci_host_setup_notify(struct vmci_ctx *context, 225 unsigned long uva) 226 { 227 int retval; 228 229 if (context->notify_page) { 230 pr_devel("%s: Notify mechanism is already set up\n", __func__); 231 return VMCI_ERROR_DUPLICATE_ENTRY; 232 } 233 234 /* 235 * We are using 'bool' internally, but let's make sure we explicit 236 * about the size. 237 */ 238 BUILD_BUG_ON(sizeof(bool) != sizeof(u8)); 239 if (!access_ok((void __user *)uva, sizeof(u8))) 240 return VMCI_ERROR_GENERIC; 241 242 /* 243 * Lock physical page backing a given user VA. 244 */ 245 retval = get_user_pages_fast(uva, 1, FOLL_WRITE, &context->notify_page); 246 if (retval != 1) { 247 context->notify_page = NULL; 248 return VMCI_ERROR_GENERIC; 249 } 250 251 /* 252 * Map the locked page and set up notify pointer. 253 */ 254 context->notify = kmap(context->notify_page) + (uva & (PAGE_SIZE - 1)); 255 vmci_ctx_check_signal_notify(context); 256 257 return VMCI_SUCCESS; 258 } 259 260 static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev, 261 unsigned int cmd, void __user *uptr) 262 { 263 if (cmd == IOCTL_VMCI_VERSION2) { 264 int __user *vptr = uptr; 265 if (get_user(vmci_host_dev->user_version, vptr)) 266 return -EFAULT; 267 } 268 269 /* 270 * The basic logic here is: 271 * 272 * If the user sends in a version of 0 tell it our version. 273 * If the user didn't send in a version, tell it our version. 274 * If the user sent in an old version, tell it -its- version. 275 * If the user sent in an newer version, tell it our version. 276 * 277 * The rationale behind telling the caller its version is that 278 * Workstation 6.5 required that VMX and VMCI kernel module were 279 * version sync'd. All new VMX users will be programmed to 280 * handle the VMCI kernel module version. 281 */ 282 283 if (vmci_host_dev->user_version > 0 && 284 vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) { 285 return vmci_host_dev->user_version; 286 } 287 288 return VMCI_VERSION; 289 } 290 291 #define vmci_ioctl_err(fmt, ...) \ 292 pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__) 293 294 static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev, 295 const char *ioctl_name, 296 void __user *uptr) 297 { 298 struct vmci_init_blk init_block; 299 const struct cred *cred; 300 int retval; 301 302 if (copy_from_user(&init_block, uptr, sizeof(init_block))) { 303 vmci_ioctl_err("error reading init block\n"); 304 return -EFAULT; 305 } 306 307 mutex_lock(&vmci_host_dev->lock); 308 309 if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) { 310 vmci_ioctl_err("received VMCI init on initialized handle\n"); 311 retval = -EINVAL; 312 goto out; 313 } 314 315 if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) { 316 vmci_ioctl_err("unsupported VMCI restriction flag\n"); 317 retval = -EINVAL; 318 goto out; 319 } 320 321 cred = get_current_cred(); 322 vmci_host_dev->context = vmci_ctx_create(init_block.cid, 323 init_block.flags, 0, 324 vmci_host_dev->user_version, 325 cred); 326 put_cred(cred); 327 if (IS_ERR(vmci_host_dev->context)) { 328 retval = PTR_ERR(vmci_host_dev->context); 329 vmci_ioctl_err("error initializing context\n"); 330 goto out; 331 } 332 333 /* 334 * Copy cid to userlevel, we do this to allow the VMX 335 * to enforce its policy on cid generation. 336 */ 337 init_block.cid = vmci_ctx_get_id(vmci_host_dev->context); 338 if (copy_to_user(uptr, &init_block, sizeof(init_block))) { 339 vmci_ctx_destroy(vmci_host_dev->context); 340 vmci_host_dev->context = NULL; 341 vmci_ioctl_err("error writing init block\n"); 342 retval = -EFAULT; 343 goto out; 344 } 345 346 vmci_host_dev->ct_type = VMCIOBJ_CONTEXT; 347 atomic_inc(&vmci_host_active_users); 348 349 retval = 0; 350 351 out: 352 mutex_unlock(&vmci_host_dev->lock); 353 return retval; 354 } 355 356 static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev, 357 const char *ioctl_name, 358 void __user *uptr) 359 { 360 struct vmci_datagram_snd_rcv_info send_info; 361 struct vmci_datagram *dg = NULL; 362 u32 cid; 363 364 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 365 vmci_ioctl_err("only valid for contexts\n"); 366 return -EINVAL; 367 } 368 369 if (copy_from_user(&send_info, uptr, sizeof(send_info))) 370 return -EFAULT; 371 372 if (send_info.len > VMCI_MAX_DG_SIZE) { 373 vmci_ioctl_err("datagram is too big (size=%d)\n", 374 send_info.len); 375 return -EINVAL; 376 } 377 378 if (send_info.len < sizeof(*dg)) { 379 vmci_ioctl_err("datagram is too small (size=%d)\n", 380 send_info.len); 381 return -EINVAL; 382 } 383 384 dg = memdup_user((void __user *)(uintptr_t)send_info.addr, 385 send_info.len); 386 if (IS_ERR(dg)) { 387 vmci_ioctl_err( 388 "cannot allocate memory to dispatch datagram\n"); 389 return PTR_ERR(dg); 390 } 391 392 if (VMCI_DG_SIZE(dg) != send_info.len) { 393 vmci_ioctl_err("datagram size mismatch\n"); 394 kfree(dg); 395 return -EINVAL; 396 } 397 398 pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n", 399 dg->dst.context, dg->dst.resource, 400 dg->src.context, dg->src.resource, 401 (unsigned long long)dg->payload_size); 402 403 /* Get source context id. */ 404 cid = vmci_ctx_get_id(vmci_host_dev->context); 405 send_info.result = vmci_datagram_dispatch(cid, dg, true); 406 kfree(dg); 407 408 return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0; 409 } 410 411 static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev, 412 const char *ioctl_name, 413 void __user *uptr) 414 { 415 struct vmci_datagram_snd_rcv_info recv_info; 416 struct vmci_datagram *dg = NULL; 417 int retval; 418 size_t size; 419 420 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 421 vmci_ioctl_err("only valid for contexts\n"); 422 return -EINVAL; 423 } 424 425 if (copy_from_user(&recv_info, uptr, sizeof(recv_info))) 426 return -EFAULT; 427 428 size = recv_info.len; 429 recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context, 430 &size, &dg); 431 432 if (recv_info.result >= VMCI_SUCCESS) { 433 void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr; 434 retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg)); 435 kfree(dg); 436 if (retval != 0) 437 return -EFAULT; 438 } 439 440 return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0; 441 } 442 443 static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev, 444 const char *ioctl_name, 445 void __user *uptr) 446 { 447 struct vmci_handle handle; 448 int vmci_status; 449 int __user *retptr; 450 451 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 452 vmci_ioctl_err("only valid for contexts\n"); 453 return -EINVAL; 454 } 455 456 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 457 struct vmci_qp_alloc_info_vmvm alloc_info; 458 struct vmci_qp_alloc_info_vmvm __user *info = uptr; 459 460 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) 461 return -EFAULT; 462 463 handle = alloc_info.handle; 464 retptr = &info->result; 465 466 vmci_status = vmci_qp_broker_alloc(alloc_info.handle, 467 alloc_info.peer, 468 alloc_info.flags, 469 VMCI_NO_PRIVILEGE_FLAGS, 470 alloc_info.produce_size, 471 alloc_info.consume_size, 472 NULL, 473 vmci_host_dev->context); 474 475 if (vmci_status == VMCI_SUCCESS) 476 vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE; 477 } else { 478 struct vmci_qp_alloc_info alloc_info; 479 struct vmci_qp_alloc_info __user *info = uptr; 480 struct vmci_qp_page_store page_store; 481 482 if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info))) 483 return -EFAULT; 484 485 handle = alloc_info.handle; 486 retptr = &info->result; 487 488 page_store.pages = alloc_info.ppn_va; 489 page_store.len = alloc_info.num_ppns; 490 491 vmci_status = vmci_qp_broker_alloc(alloc_info.handle, 492 alloc_info.peer, 493 alloc_info.flags, 494 VMCI_NO_PRIVILEGE_FLAGS, 495 alloc_info.produce_size, 496 alloc_info.consume_size, 497 &page_store, 498 vmci_host_dev->context); 499 } 500 501 if (put_user(vmci_status, retptr)) { 502 if (vmci_status >= VMCI_SUCCESS) { 503 vmci_status = vmci_qp_broker_detach(handle, 504 vmci_host_dev->context); 505 } 506 return -EFAULT; 507 } 508 509 return 0; 510 } 511 512 static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev, 513 const char *ioctl_name, 514 void __user *uptr) 515 { 516 struct vmci_qp_set_va_info set_va_info; 517 struct vmci_qp_set_va_info __user *info = uptr; 518 s32 result; 519 520 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 521 vmci_ioctl_err("only valid for contexts\n"); 522 return -EINVAL; 523 } 524 525 if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 526 vmci_ioctl_err("is not allowed\n"); 527 return -EINVAL; 528 } 529 530 if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info))) 531 return -EFAULT; 532 533 if (set_va_info.va) { 534 /* 535 * VMX is passing down a new VA for the queue 536 * pair mapping. 537 */ 538 result = vmci_qp_broker_map(set_va_info.handle, 539 vmci_host_dev->context, 540 set_va_info.va); 541 } else { 542 /* 543 * The queue pair is about to be unmapped by 544 * the VMX. 545 */ 546 result = vmci_qp_broker_unmap(set_va_info.handle, 547 vmci_host_dev->context, 0); 548 } 549 550 return put_user(result, &info->result) ? -EFAULT : 0; 551 } 552 553 static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev, 554 const char *ioctl_name, 555 void __user *uptr) 556 { 557 struct vmci_qp_page_file_info page_file_info; 558 struct vmci_qp_page_file_info __user *info = uptr; 559 s32 result; 560 561 if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP || 562 vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) { 563 vmci_ioctl_err("not supported on this VMX (version=%d)\n", 564 vmci_host_dev->user_version); 565 return -EINVAL; 566 } 567 568 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 569 vmci_ioctl_err("only valid for contexts\n"); 570 return -EINVAL; 571 } 572 573 if (copy_from_user(&page_file_info, uptr, sizeof(*info))) 574 return -EFAULT; 575 576 /* 577 * Communicate success pre-emptively to the caller. Note that the 578 * basic premise is that it is incumbent upon the caller not to look at 579 * the info.result field until after the ioctl() returns. And then, 580 * only if the ioctl() result indicates no error. We send up the 581 * SUCCESS status before calling SetPageStore() store because failing 582 * to copy up the result code means unwinding the SetPageStore(). 583 * 584 * It turns out the logic to unwind a SetPageStore() opens a can of 585 * worms. For example, if a host had created the queue_pair and a 586 * guest attaches and SetPageStore() is successful but writing success 587 * fails, then ... the host has to be stopped from writing (anymore) 588 * data into the queue_pair. That means an additional test in the 589 * VMCI_Enqueue() code path. Ugh. 590 */ 591 592 if (put_user(VMCI_SUCCESS, &info->result)) { 593 /* 594 * In this case, we can't write a result field of the 595 * caller's info block. So, we don't even try to 596 * SetPageStore(). 597 */ 598 return -EFAULT; 599 } 600 601 result = vmci_qp_broker_set_page_store(page_file_info.handle, 602 page_file_info.produce_va, 603 page_file_info.consume_va, 604 vmci_host_dev->context); 605 if (result < VMCI_SUCCESS) { 606 if (put_user(result, &info->result)) { 607 /* 608 * Note that in this case the SetPageStore() 609 * call failed but we were unable to 610 * communicate that to the caller (because the 611 * copy_to_user() call failed). So, if we 612 * simply return an error (in this case 613 * -EFAULT) then the caller will know that the 614 * SetPageStore failed even though we couldn't 615 * put the result code in the result field and 616 * indicate exactly why it failed. 617 * 618 * That says nothing about the issue where we 619 * were once able to write to the caller's info 620 * memory and now can't. Something more 621 * serious is probably going on than the fact 622 * that SetPageStore() didn't work. 623 */ 624 return -EFAULT; 625 } 626 } 627 628 return 0; 629 } 630 631 static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev, 632 const char *ioctl_name, 633 void __user *uptr) 634 { 635 struct vmci_qp_dtch_info detach_info; 636 struct vmci_qp_dtch_info __user *info = uptr; 637 s32 result; 638 639 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 640 vmci_ioctl_err("only valid for contexts\n"); 641 return -EINVAL; 642 } 643 644 if (copy_from_user(&detach_info, uptr, sizeof(detach_info))) 645 return -EFAULT; 646 647 result = vmci_qp_broker_detach(detach_info.handle, 648 vmci_host_dev->context); 649 if (result == VMCI_SUCCESS && 650 vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) { 651 result = VMCI_SUCCESS_LAST_DETACH; 652 } 653 654 return put_user(result, &info->result) ? -EFAULT : 0; 655 } 656 657 static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev, 658 const char *ioctl_name, 659 void __user *uptr) 660 { 661 struct vmci_ctx_info ar_info; 662 struct vmci_ctx_info __user *info = uptr; 663 s32 result; 664 u32 cid; 665 666 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 667 vmci_ioctl_err("only valid for contexts\n"); 668 return -EINVAL; 669 } 670 671 if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) 672 return -EFAULT; 673 674 cid = vmci_ctx_get_id(vmci_host_dev->context); 675 result = vmci_ctx_add_notification(cid, ar_info.remote_cid); 676 677 return put_user(result, &info->result) ? -EFAULT : 0; 678 } 679 680 static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev, 681 const char *ioctl_name, 682 void __user *uptr) 683 { 684 struct vmci_ctx_info ar_info; 685 struct vmci_ctx_info __user *info = uptr; 686 u32 cid; 687 int result; 688 689 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 690 vmci_ioctl_err("only valid for contexts\n"); 691 return -EINVAL; 692 } 693 694 if (copy_from_user(&ar_info, uptr, sizeof(ar_info))) 695 return -EFAULT; 696 697 cid = vmci_ctx_get_id(vmci_host_dev->context); 698 result = vmci_ctx_remove_notification(cid, 699 ar_info.remote_cid); 700 701 return put_user(result, &info->result) ? -EFAULT : 0; 702 } 703 704 static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev, 705 const char *ioctl_name, 706 void __user *uptr) 707 { 708 struct vmci_ctx_chkpt_buf_info get_info; 709 u32 cid; 710 void *cpt_buf; 711 int retval; 712 713 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 714 vmci_ioctl_err("only valid for contexts\n"); 715 return -EINVAL; 716 } 717 718 if (copy_from_user(&get_info, uptr, sizeof(get_info))) 719 return -EFAULT; 720 721 cid = vmci_ctx_get_id(vmci_host_dev->context); 722 get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type, 723 &get_info.buf_size, &cpt_buf); 724 if (get_info.result == VMCI_SUCCESS && get_info.buf_size) { 725 void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf; 726 retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size); 727 kfree(cpt_buf); 728 729 if (retval) 730 return -EFAULT; 731 } 732 733 return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0; 734 } 735 736 static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev, 737 const char *ioctl_name, 738 void __user *uptr) 739 { 740 struct vmci_ctx_chkpt_buf_info set_info; 741 u32 cid; 742 void *cpt_buf; 743 int retval; 744 745 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 746 vmci_ioctl_err("only valid for contexts\n"); 747 return -EINVAL; 748 } 749 750 if (copy_from_user(&set_info, uptr, sizeof(set_info))) 751 return -EFAULT; 752 753 cpt_buf = memdup_user((void __user *)(uintptr_t)set_info.cpt_buf, 754 set_info.buf_size); 755 if (IS_ERR(cpt_buf)) 756 return PTR_ERR(cpt_buf); 757 758 cid = vmci_ctx_get_id(vmci_host_dev->context); 759 set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type, 760 set_info.buf_size, cpt_buf); 761 762 retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0; 763 764 kfree(cpt_buf); 765 return retval; 766 } 767 768 static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev, 769 const char *ioctl_name, 770 void __user *uptr) 771 { 772 u32 __user *u32ptr = uptr; 773 774 return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0; 775 } 776 777 static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev, 778 const char *ioctl_name, 779 void __user *uptr) 780 { 781 struct vmci_set_notify_info notify_info; 782 783 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 784 vmci_ioctl_err("only valid for contexts\n"); 785 return -EINVAL; 786 } 787 788 if (copy_from_user(¬ify_info, uptr, sizeof(notify_info))) 789 return -EFAULT; 790 791 if (notify_info.notify_uva) { 792 notify_info.result = 793 vmci_host_setup_notify(vmci_host_dev->context, 794 notify_info.notify_uva); 795 } else { 796 vmci_ctx_unset_notify(vmci_host_dev->context); 797 notify_info.result = VMCI_SUCCESS; 798 } 799 800 return copy_to_user(uptr, ¬ify_info, sizeof(notify_info)) ? 801 -EFAULT : 0; 802 } 803 804 static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev, 805 const char *ioctl_name, 806 void __user *uptr) 807 { 808 struct vmci_dbell_notify_resource_info info; 809 u32 cid; 810 811 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { 812 vmci_ioctl_err("invalid for current VMX versions\n"); 813 return -EINVAL; 814 } 815 816 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 817 vmci_ioctl_err("only valid for contexts\n"); 818 return -EINVAL; 819 } 820 821 if (copy_from_user(&info, uptr, sizeof(info))) 822 return -EFAULT; 823 824 cid = vmci_ctx_get_id(vmci_host_dev->context); 825 826 switch (info.action) { 827 case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY: 828 if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) { 829 u32 flags = VMCI_NO_PRIVILEGE_FLAGS; 830 info.result = vmci_ctx_notify_dbell(cid, info.handle, 831 flags); 832 } else { 833 info.result = VMCI_ERROR_UNAVAILABLE; 834 } 835 break; 836 837 case VMCI_NOTIFY_RESOURCE_ACTION_CREATE: 838 info.result = vmci_ctx_dbell_create(cid, info.handle); 839 break; 840 841 case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY: 842 info.result = vmci_ctx_dbell_destroy(cid, info.handle); 843 break; 844 845 default: 846 vmci_ioctl_err("got unknown action (action=%d)\n", 847 info.action); 848 info.result = VMCI_ERROR_INVALID_ARGS; 849 } 850 851 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; 852 } 853 854 static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev, 855 const char *ioctl_name, 856 void __user *uptr) 857 { 858 struct vmci_ctx_notify_recv_info info; 859 struct vmci_handle_arr *db_handle_array; 860 struct vmci_handle_arr *qp_handle_array; 861 void __user *ubuf; 862 u32 cid; 863 int retval = 0; 864 865 if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) { 866 vmci_ioctl_err("only valid for contexts\n"); 867 return -EINVAL; 868 } 869 870 if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) { 871 vmci_ioctl_err("not supported for the current vmx version\n"); 872 return -EINVAL; 873 } 874 875 if (copy_from_user(&info, uptr, sizeof(info))) 876 return -EFAULT; 877 878 if ((info.db_handle_buf_size && !info.db_handle_buf_uva) || 879 (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) { 880 return -EINVAL; 881 } 882 883 cid = vmci_ctx_get_id(vmci_host_dev->context); 884 885 info.result = vmci_ctx_rcv_notifications_get(cid, 886 &db_handle_array, &qp_handle_array); 887 if (info.result != VMCI_SUCCESS) 888 return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0; 889 890 ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva; 891 info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size, 892 db_handle_array, &retval); 893 if (info.result == VMCI_SUCCESS && !retval) { 894 ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva; 895 info.result = drv_cp_harray_to_user(ubuf, 896 &info.qp_handle_buf_size, 897 qp_handle_array, &retval); 898 } 899 900 if (!retval && copy_to_user(uptr, &info, sizeof(info))) 901 retval = -EFAULT; 902 903 vmci_ctx_rcv_notifications_release(cid, 904 db_handle_array, qp_handle_array, 905 info.result == VMCI_SUCCESS && !retval); 906 907 return retval; 908 } 909 910 static long vmci_host_unlocked_ioctl(struct file *filp, 911 unsigned int iocmd, unsigned long ioarg) 912 { 913 #define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do { \ 914 char *name = __stringify(IOCTL_VMCI_ ## ioctl_name); \ 915 return vmci_host_do_ ## ioctl_fn( \ 916 vmci_host_dev, name, uptr); \ 917 } while (0) 918 919 struct vmci_host_dev *vmci_host_dev = filp->private_data; 920 void __user *uptr = (void __user *)ioarg; 921 922 switch (iocmd) { 923 case IOCTL_VMCI_INIT_CONTEXT: 924 VMCI_DO_IOCTL(INIT_CONTEXT, init_context); 925 case IOCTL_VMCI_DATAGRAM_SEND: 926 VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram); 927 case IOCTL_VMCI_DATAGRAM_RECEIVE: 928 VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram); 929 case IOCTL_VMCI_QUEUEPAIR_ALLOC: 930 VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair); 931 case IOCTL_VMCI_QUEUEPAIR_SETVA: 932 VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva); 933 case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE: 934 VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf); 935 case IOCTL_VMCI_QUEUEPAIR_DETACH: 936 VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach); 937 case IOCTL_VMCI_CTX_ADD_NOTIFICATION: 938 VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify); 939 case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION: 940 VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify); 941 case IOCTL_VMCI_CTX_GET_CPT_STATE: 942 VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state); 943 case IOCTL_VMCI_CTX_SET_CPT_STATE: 944 VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state); 945 case IOCTL_VMCI_GET_CONTEXT_ID: 946 VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id); 947 case IOCTL_VMCI_SET_NOTIFY: 948 VMCI_DO_IOCTL(SET_NOTIFY, set_notify); 949 case IOCTL_VMCI_NOTIFY_RESOURCE: 950 VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource); 951 case IOCTL_VMCI_NOTIFICATIONS_RECEIVE: 952 VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications); 953 954 case IOCTL_VMCI_VERSION: 955 case IOCTL_VMCI_VERSION2: 956 return vmci_host_get_version(vmci_host_dev, iocmd, uptr); 957 958 default: 959 pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd); 960 return -EINVAL; 961 } 962 963 #undef VMCI_DO_IOCTL 964 } 965 966 static const struct file_operations vmuser_fops = { 967 .owner = THIS_MODULE, 968 .open = vmci_host_open, 969 .release = vmci_host_close, 970 .poll = vmci_host_poll, 971 .unlocked_ioctl = vmci_host_unlocked_ioctl, 972 .compat_ioctl = vmci_host_unlocked_ioctl, 973 }; 974 975 static struct miscdevice vmci_host_miscdev = { 976 .name = "vmci", 977 .minor = MISC_DYNAMIC_MINOR, 978 .fops = &vmuser_fops, 979 }; 980 981 int __init vmci_host_init(void) 982 { 983 int error; 984 985 host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID, 986 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS, 987 -1, VMCI_VERSION, NULL); 988 if (IS_ERR(host_context)) { 989 error = PTR_ERR(host_context); 990 pr_warn("Failed to initialize VMCIContext (error%d)\n", 991 error); 992 return error; 993 } 994 995 error = misc_register(&vmci_host_miscdev); 996 if (error) { 997 pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n", 998 vmci_host_miscdev.name, 999 MISC_MAJOR, vmci_host_miscdev.minor, 1000 error); 1001 pr_warn("Unable to initialize host personality\n"); 1002 vmci_ctx_destroy(host_context); 1003 return error; 1004 } 1005 1006 pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n", 1007 vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor); 1008 1009 vmci_host_device_initialized = true; 1010 return 0; 1011 } 1012 1013 void __exit vmci_host_exit(void) 1014 { 1015 vmci_host_device_initialized = false; 1016 1017 misc_deregister(&vmci_host_miscdev); 1018 vmci_ctx_destroy(host_context); 1019 vmci_qp_broker_exit(); 1020 1021 pr_debug("VMCI host driver module unloaded\n"); 1022 } 1023