1 /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ 2 /* 3 * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn. 4 * 5 * Copyright (C) 2007-2016 Oracle Corporation 6 */ 7 8 #include <linux/device.h> 9 #include <linux/mm.h> 10 #include <linux/sched.h> 11 #include <linux/sizes.h> 12 #include <linux/slab.h> 13 #include <linux/vbox_err.h> 14 #include <linux/vbox_utils.h> 15 #include <linux/vmalloc.h> 16 #include "vboxguest_core.h" 17 #include "vboxguest_version.h" 18 19 /* Get the pointer to the first HGCM parameter. */ 20 #define VBG_IOCTL_HGCM_CALL_PARMS(a) \ 21 ((struct vmmdev_hgcm_function_parameter *)( \ 22 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call))) 23 /* Get the pointer to the first HGCM parameter in a 32-bit request. */ 24 #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \ 25 ((struct vmmdev_hgcm_function_parameter32 *)( \ 26 (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call))) 27 28 #define GUEST_MAPPINGS_TRIES 5 29 30 /** 31 * Reserves memory in which the VMM can relocate any guest mappings 32 * that are floating around. 33 * 34 * This operation is a little bit tricky since the VMM might not accept 35 * just any address because of address clashes between the three contexts 36 * it operates in, so we try several times. 37 * 38 * Failure to reserve the guest mappings is ignored. 39 * 40 * @gdev: The Guest extension device. 41 */ 42 static void vbg_guest_mappings_init(struct vbg_dev *gdev) 43 { 44 struct vmmdev_hypervisorinfo *req; 45 void *guest_mappings[GUEST_MAPPINGS_TRIES]; 46 struct page **pages = NULL; 47 u32 size, hypervisor_size; 48 int i, rc; 49 50 /* Query the required space. */ 51 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO); 52 if (!req) 53 return; 54 55 req->hypervisor_start = 0; 56 req->hypervisor_size = 0; 57 rc = vbg_req_perform(gdev, req); 58 if (rc < 0) 59 goto out; 60 61 /* 62 * The VMM will report back if there is nothing it wants to map, like 63 * for instance in VT-x and AMD-V mode. 64 */ 65 if (req->hypervisor_size == 0) 66 goto out; 67 68 hypervisor_size = req->hypervisor_size; 69 /* Add 4M so that we can align the vmap to 4MiB as the host requires. */ 70 size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M; 71 72 pages = kmalloc(sizeof(*pages) * (size >> PAGE_SHIFT), GFP_KERNEL); 73 if (!pages) 74 goto out; 75 76 gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER); 77 if (!gdev->guest_mappings_dummy_page) 78 goto out; 79 80 for (i = 0; i < (size >> PAGE_SHIFT); i++) 81 pages[i] = gdev->guest_mappings_dummy_page; 82 83 /* 84 * Try several times, the VMM might not accept some addresses because 85 * of address clashes between the three contexts. 86 */ 87 for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) { 88 guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT), 89 VM_MAP, PAGE_KERNEL_RO); 90 if (!guest_mappings[i]) 91 break; 92 93 req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO; 94 req->header.rc = VERR_INTERNAL_ERROR; 95 req->hypervisor_size = hypervisor_size; 96 req->hypervisor_start = 97 (unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M); 98 99 rc = vbg_req_perform(gdev, req); 100 if (rc >= 0) { 101 gdev->guest_mappings = guest_mappings[i]; 102 break; 103 } 104 } 105 106 /* Free vmap's from failed attempts. */ 107 while (--i >= 0) 108 vunmap(guest_mappings[i]); 109 110 /* On failure free the dummy-page backing the vmap */ 111 if (!gdev->guest_mappings) { 112 __free_page(gdev->guest_mappings_dummy_page); 113 gdev->guest_mappings_dummy_page = NULL; 114 } 115 116 out: 117 vbg_req_free(req, sizeof(*req)); 118 kfree(pages); 119 } 120 121 /** 122 * Undo what vbg_guest_mappings_init did. 123 * 124 * @gdev: The Guest extension device. 125 */ 126 static void vbg_guest_mappings_exit(struct vbg_dev *gdev) 127 { 128 struct vmmdev_hypervisorinfo *req; 129 int rc; 130 131 if (!gdev->guest_mappings) 132 return; 133 134 /* 135 * Tell the host that we're going to free the memory we reserved for 136 * it, the free it up. (Leak the memory if anything goes wrong here.) 137 */ 138 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO); 139 if (!req) 140 return; 141 142 req->hypervisor_start = 0; 143 req->hypervisor_size = 0; 144 145 rc = vbg_req_perform(gdev, req); 146 147 vbg_req_free(req, sizeof(*req)); 148 149 if (rc < 0) { 150 vbg_err("%s error: %d\n", __func__, rc); 151 return; 152 } 153 154 vunmap(gdev->guest_mappings); 155 gdev->guest_mappings = NULL; 156 157 __free_page(gdev->guest_mappings_dummy_page); 158 gdev->guest_mappings_dummy_page = NULL; 159 } 160 161 /** 162 * Report the guest information to the host. 163 * Return: 0 or negative errno value. 164 * @gdev: The Guest extension device. 165 */ 166 static int vbg_report_guest_info(struct vbg_dev *gdev) 167 { 168 /* 169 * Allocate and fill in the two guest info reports. 170 */ 171 struct vmmdev_guest_info *req1 = NULL; 172 struct vmmdev_guest_info2 *req2 = NULL; 173 int rc, ret = -ENOMEM; 174 175 req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO); 176 req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2); 177 if (!req1 || !req2) 178 goto out_free; 179 180 req1->interface_version = VMMDEV_VERSION; 181 req1->os_type = VMMDEV_OSTYPE_LINUX26; 182 #if __BITS_PER_LONG == 64 183 req1->os_type |= VMMDEV_OSTYPE_X64; 184 #endif 185 186 req2->additions_major = VBG_VERSION_MAJOR; 187 req2->additions_minor = VBG_VERSION_MINOR; 188 req2->additions_build = VBG_VERSION_BUILD; 189 req2->additions_revision = VBG_SVN_REV; 190 /* (no features defined yet) */ 191 req2->additions_features = 0; 192 strlcpy(req2->name, VBG_VERSION_STRING, 193 sizeof(req2->name)); 194 195 /* 196 * There are two protocols here: 197 * 1. INFO2 + INFO1. Supported by >=3.2.51. 198 * 2. INFO1 and optionally INFO2. The old protocol. 199 * 200 * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED 201 * if not supported by the VMMDev (message ordering requirement). 202 */ 203 rc = vbg_req_perform(gdev, req2); 204 if (rc >= 0) { 205 rc = vbg_req_perform(gdev, req1); 206 } else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) { 207 rc = vbg_req_perform(gdev, req1); 208 if (rc >= 0) { 209 rc = vbg_req_perform(gdev, req2); 210 if (rc == VERR_NOT_IMPLEMENTED) 211 rc = VINF_SUCCESS; 212 } 213 } 214 ret = vbg_status_code_to_errno(rc); 215 216 out_free: 217 vbg_req_free(req2, sizeof(*req2)); 218 vbg_req_free(req1, sizeof(*req1)); 219 return ret; 220 } 221 222 /** 223 * Report the guest driver status to the host. 224 * Return: 0 or negative errno value. 225 * @gdev: The Guest extension device. 226 * @active: Flag whether the driver is now active or not. 227 */ 228 static int vbg_report_driver_status(struct vbg_dev *gdev, bool active) 229 { 230 struct vmmdev_guest_status *req; 231 int rc; 232 233 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS); 234 if (!req) 235 return -ENOMEM; 236 237 req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER; 238 if (active) 239 req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE; 240 else 241 req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE; 242 req->flags = 0; 243 244 rc = vbg_req_perform(gdev, req); 245 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */ 246 rc = VINF_SUCCESS; 247 248 vbg_req_free(req, sizeof(*req)); 249 250 return vbg_status_code_to_errno(rc); 251 } 252 253 /** 254 * Inflate the balloon by one chunk. The caller owns the balloon mutex. 255 * Return: 0 or negative errno value. 256 * @gdev: The Guest extension device. 257 * @chunk_idx: Index of the chunk. 258 */ 259 static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx) 260 { 261 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req; 262 struct page **pages; 263 int i, rc, ret; 264 265 pages = kmalloc(sizeof(*pages) * VMMDEV_MEMORY_BALLOON_CHUNK_PAGES, 266 GFP_KERNEL | __GFP_NOWARN); 267 if (!pages) 268 return -ENOMEM; 269 270 req->header.size = sizeof(*req); 271 req->inflate = true; 272 req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; 273 274 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) { 275 pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN); 276 if (!pages[i]) { 277 ret = -ENOMEM; 278 goto out_error; 279 } 280 281 req->phys_page[i] = page_to_phys(pages[i]); 282 } 283 284 rc = vbg_req_perform(gdev, req); 285 if (rc < 0) { 286 vbg_err("%s error, rc: %d\n", __func__, rc); 287 ret = vbg_status_code_to_errno(rc); 288 goto out_error; 289 } 290 291 gdev->mem_balloon.pages[chunk_idx] = pages; 292 293 return 0; 294 295 out_error: 296 while (--i >= 0) 297 __free_page(pages[i]); 298 kfree(pages); 299 300 return ret; 301 } 302 303 /** 304 * Deflate the balloon by one chunk. The caller owns the balloon mutex. 305 * Return: 0 or negative errno value. 306 * @gdev: The Guest extension device. 307 * @chunk_idx: Index of the chunk. 308 */ 309 static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx) 310 { 311 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req; 312 struct page **pages = gdev->mem_balloon.pages[chunk_idx]; 313 int i, rc; 314 315 req->header.size = sizeof(*req); 316 req->inflate = false; 317 req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; 318 319 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) 320 req->phys_page[i] = page_to_phys(pages[i]); 321 322 rc = vbg_req_perform(gdev, req); 323 if (rc < 0) { 324 vbg_err("%s error, rc: %d\n", __func__, rc); 325 return vbg_status_code_to_errno(rc); 326 } 327 328 for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) 329 __free_page(pages[i]); 330 kfree(pages); 331 gdev->mem_balloon.pages[chunk_idx] = NULL; 332 333 return 0; 334 } 335 336 /** 337 * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size 338 * the host wants the balloon to be and adjust accordingly. 339 */ 340 static void vbg_balloon_work(struct work_struct *work) 341 { 342 struct vbg_dev *gdev = 343 container_of(work, struct vbg_dev, mem_balloon.work); 344 struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req; 345 u32 i, chunks; 346 int rc, ret; 347 348 /* 349 * Setting this bit means that we request the value from the host and 350 * change the guest memory balloon according to the returned value. 351 */ 352 req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST; 353 rc = vbg_req_perform(gdev, req); 354 if (rc < 0) { 355 vbg_err("%s error, rc: %d)\n", __func__, rc); 356 return; 357 } 358 359 /* 360 * The host always returns the same maximum amount of chunks, so 361 * we do this once. 362 */ 363 if (!gdev->mem_balloon.max_chunks) { 364 gdev->mem_balloon.pages = 365 devm_kcalloc(gdev->dev, req->phys_mem_chunks, 366 sizeof(struct page **), GFP_KERNEL); 367 if (!gdev->mem_balloon.pages) 368 return; 369 370 gdev->mem_balloon.max_chunks = req->phys_mem_chunks; 371 } 372 373 chunks = req->balloon_chunks; 374 if (chunks > gdev->mem_balloon.max_chunks) { 375 vbg_err("%s: illegal balloon size %u (max=%u)\n", 376 __func__, chunks, gdev->mem_balloon.max_chunks); 377 return; 378 } 379 380 if (chunks > gdev->mem_balloon.chunks) { 381 /* inflate */ 382 for (i = gdev->mem_balloon.chunks; i < chunks; i++) { 383 ret = vbg_balloon_inflate(gdev, i); 384 if (ret < 0) 385 return; 386 387 gdev->mem_balloon.chunks++; 388 } 389 } else { 390 /* deflate */ 391 for (i = gdev->mem_balloon.chunks; i-- > chunks;) { 392 ret = vbg_balloon_deflate(gdev, i); 393 if (ret < 0) 394 return; 395 396 gdev->mem_balloon.chunks--; 397 } 398 } 399 } 400 401 /** 402 * Callback for heartbeat timer. 403 */ 404 static void vbg_heartbeat_timer(struct timer_list *t) 405 { 406 struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer); 407 408 vbg_req_perform(gdev, gdev->guest_heartbeat_req); 409 mod_timer(&gdev->heartbeat_timer, 410 msecs_to_jiffies(gdev->heartbeat_interval_ms)); 411 } 412 413 /** 414 * Configure the host to check guest's heartbeat 415 * and get heartbeat interval from the host. 416 * Return: 0 or negative errno value. 417 * @gdev: The Guest extension device. 418 * @enabled: Set true to enable guest heartbeat checks on host. 419 */ 420 static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled) 421 { 422 struct vmmdev_heartbeat *req; 423 int rc; 424 425 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE); 426 if (!req) 427 return -ENOMEM; 428 429 req->enabled = enabled; 430 req->interval_ns = 0; 431 rc = vbg_req_perform(gdev, req); 432 do_div(req->interval_ns, 1000000); /* ns -> ms */ 433 gdev->heartbeat_interval_ms = req->interval_ns; 434 vbg_req_free(req, sizeof(*req)); 435 436 return vbg_status_code_to_errno(rc); 437 } 438 439 /** 440 * Initializes the heartbeat timer. This feature may be disabled by the host. 441 * Return: 0 or negative errno value. 442 * @gdev: The Guest extension device. 443 */ 444 static int vbg_heartbeat_init(struct vbg_dev *gdev) 445 { 446 int ret; 447 448 /* Make sure that heartbeat checking is disabled if we fail. */ 449 ret = vbg_heartbeat_host_config(gdev, false); 450 if (ret < 0) 451 return ret; 452 453 ret = vbg_heartbeat_host_config(gdev, true); 454 if (ret < 0) 455 return ret; 456 457 gdev->guest_heartbeat_req = vbg_req_alloc( 458 sizeof(*gdev->guest_heartbeat_req), 459 VMMDEVREQ_GUEST_HEARTBEAT); 460 if (!gdev->guest_heartbeat_req) 461 return -ENOMEM; 462 463 vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n", 464 __func__, gdev->heartbeat_interval_ms); 465 mod_timer(&gdev->heartbeat_timer, 0); 466 467 return 0; 468 } 469 470 /** 471 * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking. 472 * @gdev: The Guest extension device. 473 */ 474 static void vbg_heartbeat_exit(struct vbg_dev *gdev) 475 { 476 del_timer_sync(&gdev->heartbeat_timer); 477 vbg_heartbeat_host_config(gdev, false); 478 vbg_req_free(gdev->guest_heartbeat_req, 479 sizeof(*gdev->guest_heartbeat_req)); 480 } 481 482 /** 483 * Applies a change to the bit usage tracker. 484 * Return: true if the mask changed, false if not. 485 * @tracker: The bit usage tracker. 486 * @changed: The bits to change. 487 * @previous: The previous value of the bits. 488 */ 489 static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker, 490 u32 changed, u32 previous) 491 { 492 bool global_change = false; 493 494 while (changed) { 495 u32 bit = ffs(changed) - 1; 496 u32 bitmask = BIT(bit); 497 498 if (bitmask & previous) { 499 tracker->per_bit_usage[bit] -= 1; 500 if (tracker->per_bit_usage[bit] == 0) { 501 global_change = true; 502 tracker->mask &= ~bitmask; 503 } 504 } else { 505 tracker->per_bit_usage[bit] += 1; 506 if (tracker->per_bit_usage[bit] == 1) { 507 global_change = true; 508 tracker->mask |= bitmask; 509 } 510 } 511 512 changed &= ~bitmask; 513 } 514 515 return global_change; 516 } 517 518 /** 519 * Init and termination worker for resetting the (host) event filter on the host 520 * Return: 0 or negative errno value. 521 * @gdev: The Guest extension device. 522 * @fixed_events: Fixed events (init time). 523 */ 524 static int vbg_reset_host_event_filter(struct vbg_dev *gdev, 525 u32 fixed_events) 526 { 527 struct vmmdev_mask *req; 528 int rc; 529 530 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); 531 if (!req) 532 return -ENOMEM; 533 534 req->not_mask = U32_MAX & ~fixed_events; 535 req->or_mask = fixed_events; 536 rc = vbg_req_perform(gdev, req); 537 if (rc < 0) 538 vbg_err("%s error, rc: %d\n", __func__, rc); 539 540 vbg_req_free(req, sizeof(*req)); 541 return vbg_status_code_to_errno(rc); 542 } 543 544 /** 545 * Changes the event filter mask for the given session. 546 * 547 * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to 548 * do session cleanup. Takes the session spinlock. 549 * 550 * Return: 0 or negative errno value. 551 * @gdev: The Guest extension device. 552 * @session: The session. 553 * @or_mask: The events to add. 554 * @not_mask: The events to remove. 555 * @session_termination: Set if we're called by the session cleanup code. 556 * This tweaks the error handling so we perform 557 * proper session cleanup even if the host 558 * misbehaves. 559 */ 560 static int vbg_set_session_event_filter(struct vbg_dev *gdev, 561 struct vbg_session *session, 562 u32 or_mask, u32 not_mask, 563 bool session_termination) 564 { 565 struct vmmdev_mask *req; 566 u32 changed, previous; 567 int rc, ret = 0; 568 569 /* Allocate a request buffer before taking the spinlock */ 570 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); 571 if (!req) { 572 if (!session_termination) 573 return -ENOMEM; 574 /* Ignore allocation failure, we must do session cleanup. */ 575 } 576 577 mutex_lock(&gdev->session_mutex); 578 579 /* Apply the changes to the session mask. */ 580 previous = session->event_filter; 581 session->event_filter |= or_mask; 582 session->event_filter &= ~not_mask; 583 584 /* If anything actually changed, update the global usage counters. */ 585 changed = previous ^ session->event_filter; 586 if (!changed) 587 goto out; 588 589 vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous); 590 or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask; 591 592 if (gdev->event_filter_host == or_mask || !req) 593 goto out; 594 595 gdev->event_filter_host = or_mask; 596 req->or_mask = or_mask; 597 req->not_mask = ~or_mask; 598 rc = vbg_req_perform(gdev, req); 599 if (rc < 0) { 600 ret = vbg_status_code_to_errno(rc); 601 602 /* Failed, roll back (unless it's session termination time). */ 603 gdev->event_filter_host = U32_MAX; 604 if (session_termination) 605 goto out; 606 607 vbg_track_bit_usage(&gdev->event_filter_tracker, changed, 608 session->event_filter); 609 session->event_filter = previous; 610 } 611 612 out: 613 mutex_unlock(&gdev->session_mutex); 614 vbg_req_free(req, sizeof(*req)); 615 616 return ret; 617 } 618 619 /** 620 * Init and termination worker for set guest capabilities to zero on the host. 621 * Return: 0 or negative errno value. 622 * @gdev: The Guest extension device. 623 */ 624 static int vbg_reset_host_capabilities(struct vbg_dev *gdev) 625 { 626 struct vmmdev_mask *req; 627 int rc; 628 629 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); 630 if (!req) 631 return -ENOMEM; 632 633 req->not_mask = U32_MAX; 634 req->or_mask = 0; 635 rc = vbg_req_perform(gdev, req); 636 if (rc < 0) 637 vbg_err("%s error, rc: %d\n", __func__, rc); 638 639 vbg_req_free(req, sizeof(*req)); 640 return vbg_status_code_to_errno(rc); 641 } 642 643 /** 644 * Sets the guest capabilities for a session. Takes the session spinlock. 645 * Return: 0 or negative errno value. 646 * @gdev: The Guest extension device. 647 * @session: The session. 648 * @or_mask: The capabilities to add. 649 * @not_mask: The capabilities to remove. 650 * @session_termination: Set if we're called by the session cleanup code. 651 * This tweaks the error handling so we perform 652 * proper session cleanup even if the host 653 * misbehaves. 654 */ 655 static int vbg_set_session_capabilities(struct vbg_dev *gdev, 656 struct vbg_session *session, 657 u32 or_mask, u32 not_mask, 658 bool session_termination) 659 { 660 struct vmmdev_mask *req; 661 u32 changed, previous; 662 int rc, ret = 0; 663 664 /* Allocate a request buffer before taking the spinlock */ 665 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); 666 if (!req) { 667 if (!session_termination) 668 return -ENOMEM; 669 /* Ignore allocation failure, we must do session cleanup. */ 670 } 671 672 mutex_lock(&gdev->session_mutex); 673 674 /* Apply the changes to the session mask. */ 675 previous = session->guest_caps; 676 session->guest_caps |= or_mask; 677 session->guest_caps &= ~not_mask; 678 679 /* If anything actually changed, update the global usage counters. */ 680 changed = previous ^ session->guest_caps; 681 if (!changed) 682 goto out; 683 684 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous); 685 or_mask = gdev->guest_caps_tracker.mask; 686 687 if (gdev->guest_caps_host == or_mask || !req) 688 goto out; 689 690 gdev->guest_caps_host = or_mask; 691 req->or_mask = or_mask; 692 req->not_mask = ~or_mask; 693 rc = vbg_req_perform(gdev, req); 694 if (rc < 0) { 695 ret = vbg_status_code_to_errno(rc); 696 697 /* Failed, roll back (unless it's session termination time). */ 698 gdev->guest_caps_host = U32_MAX; 699 if (session_termination) 700 goto out; 701 702 vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, 703 session->guest_caps); 704 session->guest_caps = previous; 705 } 706 707 out: 708 mutex_unlock(&gdev->session_mutex); 709 vbg_req_free(req, sizeof(*req)); 710 711 return ret; 712 } 713 714 /** 715 * vbg_query_host_version get the host feature mask and version information. 716 * Return: 0 or negative errno value. 717 * @gdev: The Guest extension device. 718 */ 719 static int vbg_query_host_version(struct vbg_dev *gdev) 720 { 721 struct vmmdev_host_version *req; 722 int rc, ret; 723 724 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION); 725 if (!req) 726 return -ENOMEM; 727 728 rc = vbg_req_perform(gdev, req); 729 ret = vbg_status_code_to_errno(rc); 730 if (ret) { 731 vbg_err("%s error: %d\n", __func__, rc); 732 goto out; 733 } 734 735 snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u", 736 req->major, req->minor, req->build, req->revision); 737 gdev->host_features = req->features; 738 739 vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version, 740 gdev->host_features); 741 742 if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) { 743 vbg_err("vboxguest: Error host too old (does not support page-lists)\n"); 744 ret = -ENODEV; 745 } 746 747 out: 748 vbg_req_free(req, sizeof(*req)); 749 return ret; 750 } 751 752 /** 753 * Initializes the VBoxGuest device extension when the 754 * device driver is loaded. 755 * 756 * The native code locates the VMMDev on the PCI bus and retrieve 757 * the MMIO and I/O port ranges, this function will take care of 758 * mapping the MMIO memory (if present). Upon successful return 759 * the native code should set up the interrupt handler. 760 * 761 * Return: 0 or negative errno value. 762 * 763 * @gdev: The Guest extension device. 764 * @fixed_events: Events that will be enabled upon init and no client 765 * will ever be allowed to mask. 766 */ 767 int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events) 768 { 769 int ret = -ENOMEM; 770 771 gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM; 772 gdev->event_filter_host = U32_MAX; /* forces a report */ 773 gdev->guest_caps_host = U32_MAX; /* forces a report */ 774 775 init_waitqueue_head(&gdev->event_wq); 776 init_waitqueue_head(&gdev->hgcm_wq); 777 spin_lock_init(&gdev->event_spinlock); 778 mutex_init(&gdev->session_mutex); 779 mutex_init(&gdev->cancel_req_mutex); 780 timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0); 781 INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work); 782 783 gdev->mem_balloon.get_req = 784 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req), 785 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ); 786 gdev->mem_balloon.change_req = 787 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req), 788 VMMDEVREQ_CHANGE_MEMBALLOON); 789 gdev->cancel_req = 790 vbg_req_alloc(sizeof(*(gdev->cancel_req)), 791 VMMDEVREQ_HGCM_CANCEL2); 792 gdev->ack_events_req = 793 vbg_req_alloc(sizeof(*gdev->ack_events_req), 794 VMMDEVREQ_ACKNOWLEDGE_EVENTS); 795 gdev->mouse_status_req = 796 vbg_req_alloc(sizeof(*gdev->mouse_status_req), 797 VMMDEVREQ_GET_MOUSE_STATUS); 798 799 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req || 800 !gdev->cancel_req || !gdev->ack_events_req || 801 !gdev->mouse_status_req) 802 goto err_free_reqs; 803 804 ret = vbg_query_host_version(gdev); 805 if (ret) 806 goto err_free_reqs; 807 808 ret = vbg_report_guest_info(gdev); 809 if (ret) { 810 vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret); 811 goto err_free_reqs; 812 } 813 814 ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events); 815 if (ret) { 816 vbg_err("vboxguest: Error setting fixed event filter: %d\n", 817 ret); 818 goto err_free_reqs; 819 } 820 821 ret = vbg_reset_host_capabilities(gdev); 822 if (ret) { 823 vbg_err("vboxguest: Error clearing guest capabilities: %d\n", 824 ret); 825 goto err_free_reqs; 826 } 827 828 ret = vbg_core_set_mouse_status(gdev, 0); 829 if (ret) { 830 vbg_err("vboxguest: Error clearing mouse status: %d\n", ret); 831 goto err_free_reqs; 832 } 833 834 /* These may fail without requiring the driver init to fail. */ 835 vbg_guest_mappings_init(gdev); 836 vbg_heartbeat_init(gdev); 837 838 /* All Done! */ 839 ret = vbg_report_driver_status(gdev, true); 840 if (ret < 0) 841 vbg_err("vboxguest: Error reporting driver status: %d\n", ret); 842 843 return 0; 844 845 err_free_reqs: 846 vbg_req_free(gdev->mouse_status_req, 847 sizeof(*gdev->mouse_status_req)); 848 vbg_req_free(gdev->ack_events_req, 849 sizeof(*gdev->ack_events_req)); 850 vbg_req_free(gdev->cancel_req, 851 sizeof(*gdev->cancel_req)); 852 vbg_req_free(gdev->mem_balloon.change_req, 853 sizeof(*gdev->mem_balloon.change_req)); 854 vbg_req_free(gdev->mem_balloon.get_req, 855 sizeof(*gdev->mem_balloon.get_req)); 856 return ret; 857 } 858 859 /** 860 * Call this on exit to clean-up vboxguest-core managed resources. 861 * 862 * The native code should call this before the driver is loaded, 863 * but don't call this on shutdown. 864 * @gdev: The Guest extension device. 865 */ 866 void vbg_core_exit(struct vbg_dev *gdev) 867 { 868 vbg_heartbeat_exit(gdev); 869 vbg_guest_mappings_exit(gdev); 870 871 /* Clear the host flags (mouse status etc). */ 872 vbg_reset_host_event_filter(gdev, 0); 873 vbg_reset_host_capabilities(gdev); 874 vbg_core_set_mouse_status(gdev, 0); 875 876 vbg_req_free(gdev->mouse_status_req, 877 sizeof(*gdev->mouse_status_req)); 878 vbg_req_free(gdev->ack_events_req, 879 sizeof(*gdev->ack_events_req)); 880 vbg_req_free(gdev->cancel_req, 881 sizeof(*gdev->cancel_req)); 882 vbg_req_free(gdev->mem_balloon.change_req, 883 sizeof(*gdev->mem_balloon.change_req)); 884 vbg_req_free(gdev->mem_balloon.get_req, 885 sizeof(*gdev->mem_balloon.get_req)); 886 } 887 888 /** 889 * Creates a VBoxGuest user session. 890 * 891 * vboxguest_linux.c calls this when userspace opens the char-device. 892 * Return: A pointer to the new session or an ERR_PTR on error. 893 * @gdev: The Guest extension device. 894 * @user: Set if this is a session for the vboxuser device. 895 */ 896 struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user) 897 { 898 struct vbg_session *session; 899 900 session = kzalloc(sizeof(*session), GFP_KERNEL); 901 if (!session) 902 return ERR_PTR(-ENOMEM); 903 904 session->gdev = gdev; 905 session->user_session = user; 906 907 return session; 908 } 909 910 /** 911 * Closes a VBoxGuest session. 912 * @session: The session to close (and free). 913 */ 914 void vbg_core_close_session(struct vbg_session *session) 915 { 916 struct vbg_dev *gdev = session->gdev; 917 int i, rc; 918 919 vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true); 920 vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true); 921 922 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) { 923 if (!session->hgcm_client_ids[i]) 924 continue; 925 926 vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc); 927 } 928 929 kfree(session); 930 } 931 932 static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size, 933 size_t out_size) 934 { 935 if (hdr->size_in != (sizeof(*hdr) + in_size) || 936 hdr->size_out != (sizeof(*hdr) + out_size)) 937 return -EINVAL; 938 939 return 0; 940 } 941 942 static int vbg_ioctl_driver_version_info( 943 struct vbg_ioctl_driver_version_info *info) 944 { 945 const u16 vbg_maj_version = VBG_IOC_VERSION >> 16; 946 u16 min_maj_version, req_maj_version; 947 948 if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out))) 949 return -EINVAL; 950 951 req_maj_version = info->u.in.req_version >> 16; 952 min_maj_version = info->u.in.min_version >> 16; 953 954 if (info->u.in.min_version > info->u.in.req_version || 955 min_maj_version != req_maj_version) 956 return -EINVAL; 957 958 if (info->u.in.min_version <= VBG_IOC_VERSION && 959 min_maj_version == vbg_maj_version) { 960 info->u.out.session_version = VBG_IOC_VERSION; 961 } else { 962 info->u.out.session_version = U32_MAX; 963 info->hdr.rc = VERR_VERSION_MISMATCH; 964 } 965 966 info->u.out.driver_version = VBG_IOC_VERSION; 967 info->u.out.driver_revision = 0; 968 info->u.out.reserved1 = 0; 969 info->u.out.reserved2 = 0; 970 971 return 0; 972 } 973 974 static bool vbg_wait_event_cond(struct vbg_dev *gdev, 975 struct vbg_session *session, 976 u32 event_mask) 977 { 978 unsigned long flags; 979 bool wakeup; 980 u32 events; 981 982 spin_lock_irqsave(&gdev->event_spinlock, flags); 983 984 events = gdev->pending_events & event_mask; 985 wakeup = events || session->cancel_waiters; 986 987 spin_unlock_irqrestore(&gdev->event_spinlock, flags); 988 989 return wakeup; 990 } 991 992 /* Must be called with the event_lock held */ 993 static u32 vbg_consume_events_locked(struct vbg_dev *gdev, 994 struct vbg_session *session, 995 u32 event_mask) 996 { 997 u32 events = gdev->pending_events & event_mask; 998 999 gdev->pending_events &= ~events; 1000 return events; 1001 } 1002 1003 static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev, 1004 struct vbg_session *session, 1005 struct vbg_ioctl_wait_for_events *wait) 1006 { 1007 u32 timeout_ms = wait->u.in.timeout_ms; 1008 u32 event_mask = wait->u.in.events; 1009 unsigned long flags; 1010 long timeout; 1011 int ret = 0; 1012 1013 if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out))) 1014 return -EINVAL; 1015 1016 if (timeout_ms == U32_MAX) 1017 timeout = MAX_SCHEDULE_TIMEOUT; 1018 else 1019 timeout = msecs_to_jiffies(timeout_ms); 1020 1021 wait->u.out.events = 0; 1022 do { 1023 timeout = wait_event_interruptible_timeout( 1024 gdev->event_wq, 1025 vbg_wait_event_cond(gdev, session, event_mask), 1026 timeout); 1027 1028 spin_lock_irqsave(&gdev->event_spinlock, flags); 1029 1030 if (timeout < 0 || session->cancel_waiters) { 1031 ret = -EINTR; 1032 } else if (timeout == 0) { 1033 ret = -ETIMEDOUT; 1034 } else { 1035 wait->u.out.events = 1036 vbg_consume_events_locked(gdev, session, event_mask); 1037 } 1038 1039 spin_unlock_irqrestore(&gdev->event_spinlock, flags); 1040 1041 /* 1042 * Someone else may have consumed the event(s) first, in 1043 * which case we go back to waiting. 1044 */ 1045 } while (ret == 0 && wait->u.out.events == 0); 1046 1047 return ret; 1048 } 1049 1050 static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev, 1051 struct vbg_session *session, 1052 struct vbg_ioctl_hdr *hdr) 1053 { 1054 unsigned long flags; 1055 1056 if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr)) 1057 return -EINVAL; 1058 1059 spin_lock_irqsave(&gdev->event_spinlock, flags); 1060 session->cancel_waiters = true; 1061 spin_unlock_irqrestore(&gdev->event_spinlock, flags); 1062 1063 wake_up(&gdev->event_wq); 1064 1065 return 0; 1066 } 1067 1068 /** 1069 * Checks if the VMM request is allowed in the context of the given session. 1070 * Return: 0 or negative errno value. 1071 * @gdev: The Guest extension device. 1072 * @session: The calling session. 1073 * @req: The request. 1074 */ 1075 static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session, 1076 const struct vmmdev_request_header *req) 1077 { 1078 const struct vmmdev_guest_status *guest_status; 1079 bool trusted_apps_only; 1080 1081 switch (req->request_type) { 1082 /* Trusted users apps only. */ 1083 case VMMDEVREQ_QUERY_CREDENTIALS: 1084 case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT: 1085 case VMMDEVREQ_REGISTER_SHARED_MODULE: 1086 case VMMDEVREQ_UNREGISTER_SHARED_MODULE: 1087 case VMMDEVREQ_WRITE_COREDUMP: 1088 case VMMDEVREQ_GET_CPU_HOTPLUG_REQ: 1089 case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS: 1090 case VMMDEVREQ_CHECK_SHARED_MODULES: 1091 case VMMDEVREQ_GET_PAGE_SHARING_STATUS: 1092 case VMMDEVREQ_DEBUG_IS_PAGE_SHARED: 1093 case VMMDEVREQ_REPORT_GUEST_STATS: 1094 case VMMDEVREQ_REPORT_GUEST_USER_STATE: 1095 case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ: 1096 trusted_apps_only = true; 1097 break; 1098 1099 /* Anyone. */ 1100 case VMMDEVREQ_GET_MOUSE_STATUS: 1101 case VMMDEVREQ_SET_MOUSE_STATUS: 1102 case VMMDEVREQ_SET_POINTER_SHAPE: 1103 case VMMDEVREQ_GET_HOST_VERSION: 1104 case VMMDEVREQ_IDLE: 1105 case VMMDEVREQ_GET_HOST_TIME: 1106 case VMMDEVREQ_SET_POWER_STATUS: 1107 case VMMDEVREQ_ACKNOWLEDGE_EVENTS: 1108 case VMMDEVREQ_CTL_GUEST_FILTER_MASK: 1109 case VMMDEVREQ_REPORT_GUEST_STATUS: 1110 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ: 1111 case VMMDEVREQ_VIDEMODE_SUPPORTED: 1112 case VMMDEVREQ_GET_HEIGHT_REDUCTION: 1113 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2: 1114 case VMMDEVREQ_VIDEMODE_SUPPORTED2: 1115 case VMMDEVREQ_VIDEO_ACCEL_ENABLE: 1116 case VMMDEVREQ_VIDEO_ACCEL_FLUSH: 1117 case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION: 1118 case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX: 1119 case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ: 1120 case VMMDEVREQ_GET_VRDPCHANGE_REQ: 1121 case VMMDEVREQ_LOG_STRING: 1122 case VMMDEVREQ_GET_SESSION_ID: 1123 trusted_apps_only = false; 1124 break; 1125 1126 /* Depends on the request parameters... */ 1127 case VMMDEVREQ_REPORT_GUEST_CAPABILITIES: 1128 guest_status = (const struct vmmdev_guest_status *)req; 1129 switch (guest_status->facility) { 1130 case VBOXGUEST_FACILITY_TYPE_ALL: 1131 case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER: 1132 vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n", 1133 guest_status->facility); 1134 return -EPERM; 1135 case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE: 1136 trusted_apps_only = true; 1137 break; 1138 case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT: 1139 case VBOXGUEST_FACILITY_TYPE_SEAMLESS: 1140 case VBOXGUEST_FACILITY_TYPE_GRAPHICS: 1141 default: 1142 trusted_apps_only = false; 1143 break; 1144 } 1145 break; 1146 1147 /* Anything else is not allowed. */ 1148 default: 1149 vbg_err("Denying userspace vmm call type %#08x\n", 1150 req->request_type); 1151 return -EPERM; 1152 } 1153 1154 if (trusted_apps_only && session->user_session) { 1155 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n", 1156 req->request_type); 1157 return -EPERM; 1158 } 1159 1160 return 0; 1161 } 1162 1163 static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev, 1164 struct vbg_session *session, void *data) 1165 { 1166 struct vbg_ioctl_hdr *hdr = data; 1167 int ret; 1168 1169 if (hdr->size_in != hdr->size_out) 1170 return -EINVAL; 1171 1172 if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE) 1173 return -E2BIG; 1174 1175 if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT) 1176 return -EINVAL; 1177 1178 ret = vbg_req_allowed(gdev, session, data); 1179 if (ret < 0) 1180 return ret; 1181 1182 vbg_req_perform(gdev, data); 1183 WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE); 1184 1185 return 0; 1186 } 1187 1188 static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev, 1189 struct vbg_session *session, 1190 struct vbg_ioctl_hgcm_connect *conn) 1191 { 1192 u32 client_id; 1193 int i, ret; 1194 1195 if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out))) 1196 return -EINVAL; 1197 1198 /* Find a free place in the sessions clients array and claim it */ 1199 mutex_lock(&gdev->session_mutex); 1200 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) { 1201 if (!session->hgcm_client_ids[i]) { 1202 session->hgcm_client_ids[i] = U32_MAX; 1203 break; 1204 } 1205 } 1206 mutex_unlock(&gdev->session_mutex); 1207 1208 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) 1209 return -EMFILE; 1210 1211 ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id, 1212 &conn->hdr.rc); 1213 1214 mutex_lock(&gdev->session_mutex); 1215 if (ret == 0 && conn->hdr.rc >= 0) { 1216 conn->u.out.client_id = client_id; 1217 session->hgcm_client_ids[i] = client_id; 1218 } else { 1219 conn->u.out.client_id = 0; 1220 session->hgcm_client_ids[i] = 0; 1221 } 1222 mutex_unlock(&gdev->session_mutex); 1223 1224 return ret; 1225 } 1226 1227 static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev, 1228 struct vbg_session *session, 1229 struct vbg_ioctl_hgcm_disconnect *disconn) 1230 { 1231 u32 client_id; 1232 int i, ret; 1233 1234 if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0)) 1235 return -EINVAL; 1236 1237 client_id = disconn->u.in.client_id; 1238 if (client_id == 0 || client_id == U32_MAX) 1239 return -EINVAL; 1240 1241 mutex_lock(&gdev->session_mutex); 1242 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) { 1243 if (session->hgcm_client_ids[i] == client_id) { 1244 session->hgcm_client_ids[i] = U32_MAX; 1245 break; 1246 } 1247 } 1248 mutex_unlock(&gdev->session_mutex); 1249 1250 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) 1251 return -EINVAL; 1252 1253 ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc); 1254 1255 mutex_lock(&gdev->session_mutex); 1256 if (ret == 0 && disconn->hdr.rc >= 0) 1257 session->hgcm_client_ids[i] = 0; 1258 else 1259 session->hgcm_client_ids[i] = client_id; 1260 mutex_unlock(&gdev->session_mutex); 1261 1262 return ret; 1263 } 1264 1265 static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev, 1266 struct vbg_session *session, bool f32bit, 1267 struct vbg_ioctl_hgcm_call *call) 1268 { 1269 size_t actual_size; 1270 u32 client_id; 1271 int i, ret; 1272 1273 if (call->hdr.size_in < sizeof(*call)) 1274 return -EINVAL; 1275 1276 if (call->hdr.size_in != call->hdr.size_out) 1277 return -EINVAL; 1278 1279 if (call->parm_count > VMMDEV_HGCM_MAX_PARMS) 1280 return -E2BIG; 1281 1282 client_id = call->client_id; 1283 if (client_id == 0 || client_id == U32_MAX) 1284 return -EINVAL; 1285 1286 actual_size = sizeof(*call); 1287 if (f32bit) 1288 actual_size += call->parm_count * 1289 sizeof(struct vmmdev_hgcm_function_parameter32); 1290 else 1291 actual_size += call->parm_count * 1292 sizeof(struct vmmdev_hgcm_function_parameter); 1293 if (call->hdr.size_in < actual_size) { 1294 vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n", 1295 call->hdr.size_in, actual_size); 1296 return -EINVAL; 1297 } 1298 call->hdr.size_out = actual_size; 1299 1300 /* 1301 * Validate the client id. 1302 */ 1303 mutex_lock(&gdev->session_mutex); 1304 for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) 1305 if (session->hgcm_client_ids[i] == client_id) 1306 break; 1307 mutex_unlock(&gdev->session_mutex); 1308 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) { 1309 vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n", 1310 client_id); 1311 return -EINVAL; 1312 } 1313 1314 if (f32bit) 1315 ret = vbg_hgcm_call32(gdev, client_id, 1316 call->function, call->timeout_ms, 1317 VBG_IOCTL_HGCM_CALL_PARMS32(call), 1318 call->parm_count, &call->hdr.rc); 1319 else 1320 ret = vbg_hgcm_call(gdev, client_id, 1321 call->function, call->timeout_ms, 1322 VBG_IOCTL_HGCM_CALL_PARMS(call), 1323 call->parm_count, &call->hdr.rc); 1324 1325 if (ret == -E2BIG) { 1326 /* E2BIG needs to be reported through the hdr.rc field. */ 1327 call->hdr.rc = VERR_OUT_OF_RANGE; 1328 ret = 0; 1329 } 1330 1331 if (ret && ret != -EINTR && ret != -ETIMEDOUT) 1332 vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret); 1333 1334 return ret; 1335 } 1336 1337 static int vbg_ioctl_log(struct vbg_ioctl_log *log) 1338 { 1339 if (log->hdr.size_out != sizeof(log->hdr)) 1340 return -EINVAL; 1341 1342 vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)), 1343 log->u.in.msg); 1344 1345 return 0; 1346 } 1347 1348 static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev, 1349 struct vbg_session *session, 1350 struct vbg_ioctl_change_filter *filter) 1351 { 1352 u32 or_mask, not_mask; 1353 1354 if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0)) 1355 return -EINVAL; 1356 1357 or_mask = filter->u.in.or_mask; 1358 not_mask = filter->u.in.not_mask; 1359 1360 if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK) 1361 return -EINVAL; 1362 1363 return vbg_set_session_event_filter(gdev, session, or_mask, not_mask, 1364 false); 1365 } 1366 1367 static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, 1368 struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps) 1369 { 1370 u32 or_mask, not_mask; 1371 int ret; 1372 1373 if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out))) 1374 return -EINVAL; 1375 1376 or_mask = caps->u.in.or_mask; 1377 not_mask = caps->u.in.not_mask; 1378 1379 if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK) 1380 return -EINVAL; 1381 1382 ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask, 1383 false); 1384 if (ret) 1385 return ret; 1386 1387 caps->u.out.session_caps = session->guest_caps; 1388 caps->u.out.global_caps = gdev->guest_caps_host; 1389 1390 return 0; 1391 } 1392 1393 static int vbg_ioctl_check_balloon(struct vbg_dev *gdev, 1394 struct vbg_ioctl_check_balloon *balloon_info) 1395 { 1396 if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out))) 1397 return -EINVAL; 1398 1399 balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; 1400 /* 1401 * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST 1402 * events entirely in the kernel, see vbg_core_isr(). 1403 */ 1404 balloon_info->u.out.handle_in_r3 = false; 1405 1406 return 0; 1407 } 1408 1409 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, 1410 struct vbg_ioctl_write_coredump *dump) 1411 { 1412 struct vmmdev_write_core_dump *req; 1413 1414 if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0)) 1415 return -EINVAL; 1416 1417 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP); 1418 if (!req) 1419 return -ENOMEM; 1420 1421 req->flags = dump->u.in.flags; 1422 dump->hdr.rc = vbg_req_perform(gdev, req); 1423 1424 vbg_req_free(req, sizeof(*req)); 1425 return 0; 1426 } 1427 1428 /** 1429 * Common IOCtl for user to kernel communication. 1430 * Return: 0 or negative errno value. 1431 * @session: The client session. 1432 * @req: The requested function. 1433 * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr). 1434 */ 1435 int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) 1436 { 1437 unsigned int req_no_size = req & ~IOCSIZE_MASK; 1438 struct vbg_dev *gdev = session->gdev; 1439 struct vbg_ioctl_hdr *hdr = data; 1440 bool f32bit = false; 1441 1442 hdr->rc = VINF_SUCCESS; 1443 if (!hdr->size_out) 1444 hdr->size_out = hdr->size_in; 1445 1446 /* 1447 * hdr->version and hdr->size_in / hdr->size_out minimum size are 1448 * already checked by vbg_misc_device_ioctl(). 1449 */ 1450 1451 /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */ 1452 if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) || 1453 req == VBG_IOCTL_VMMDEV_REQUEST_BIG) 1454 return vbg_ioctl_vmmrequest(gdev, session, data); 1455 1456 if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT) 1457 return -EINVAL; 1458 1459 /* Fixed size requests. */ 1460 switch (req) { 1461 case VBG_IOCTL_DRIVER_VERSION_INFO: 1462 return vbg_ioctl_driver_version_info(data); 1463 case VBG_IOCTL_HGCM_CONNECT: 1464 return vbg_ioctl_hgcm_connect(gdev, session, data); 1465 case VBG_IOCTL_HGCM_DISCONNECT: 1466 return vbg_ioctl_hgcm_disconnect(gdev, session, data); 1467 case VBG_IOCTL_WAIT_FOR_EVENTS: 1468 return vbg_ioctl_wait_for_events(gdev, session, data); 1469 case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS: 1470 return vbg_ioctl_interrupt_all_wait_events(gdev, session, data); 1471 case VBG_IOCTL_CHANGE_FILTER_MASK: 1472 return vbg_ioctl_change_filter_mask(gdev, session, data); 1473 case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES: 1474 return vbg_ioctl_change_guest_capabilities(gdev, session, data); 1475 case VBG_IOCTL_CHECK_BALLOON: 1476 return vbg_ioctl_check_balloon(gdev, data); 1477 case VBG_IOCTL_WRITE_CORE_DUMP: 1478 return vbg_ioctl_write_core_dump(gdev, data); 1479 } 1480 1481 /* Variable sized requests. */ 1482 switch (req_no_size) { 1483 #ifdef CONFIG_COMPAT 1484 case VBG_IOCTL_HGCM_CALL_32(0): 1485 f32bit = true; 1486 /* Fall through */ 1487 #endif 1488 case VBG_IOCTL_HGCM_CALL(0): 1489 return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); 1490 case VBG_IOCTL_LOG(0): 1491 return vbg_ioctl_log(data); 1492 } 1493 1494 vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req); 1495 return -ENOTTY; 1496 } 1497 1498 /** 1499 * Report guest supported mouse-features to the host. 1500 * 1501 * Return: 0 or negative errno value. 1502 * @gdev: The Guest extension device. 1503 * @features: The set of features to report to the host. 1504 */ 1505 int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features) 1506 { 1507 struct vmmdev_mouse_status *req; 1508 int rc; 1509 1510 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS); 1511 if (!req) 1512 return -ENOMEM; 1513 1514 req->mouse_features = features; 1515 req->pointer_pos_x = 0; 1516 req->pointer_pos_y = 0; 1517 1518 rc = vbg_req_perform(gdev, req); 1519 if (rc < 0) 1520 vbg_err("%s error, rc: %d\n", __func__, rc); 1521 1522 vbg_req_free(req, sizeof(*req)); 1523 return vbg_status_code_to_errno(rc); 1524 } 1525 1526 /** Core interrupt service routine. */ 1527 irqreturn_t vbg_core_isr(int irq, void *dev_id) 1528 { 1529 struct vbg_dev *gdev = dev_id; 1530 struct vmmdev_events *req = gdev->ack_events_req; 1531 bool mouse_position_changed = false; 1532 unsigned long flags; 1533 u32 events = 0; 1534 int rc; 1535 1536 if (!gdev->mmio->V.V1_04.have_events) 1537 return IRQ_NONE; 1538 1539 /* Get and acknowlegde events. */ 1540 req->header.rc = VERR_INTERNAL_ERROR; 1541 req->events = 0; 1542 rc = vbg_req_perform(gdev, req); 1543 if (rc < 0) { 1544 vbg_err("Error performing events req, rc: %d\n", rc); 1545 return IRQ_NONE; 1546 } 1547 1548 events = req->events; 1549 1550 if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) { 1551 mouse_position_changed = true; 1552 events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED; 1553 } 1554 1555 if (events & VMMDEV_EVENT_HGCM) { 1556 wake_up(&gdev->hgcm_wq); 1557 events &= ~VMMDEV_EVENT_HGCM; 1558 } 1559 1560 if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) { 1561 schedule_work(&gdev->mem_balloon.work); 1562 events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST; 1563 } 1564 1565 if (events) { 1566 spin_lock_irqsave(&gdev->event_spinlock, flags); 1567 gdev->pending_events |= events; 1568 spin_unlock_irqrestore(&gdev->event_spinlock, flags); 1569 1570 wake_up(&gdev->event_wq); 1571 } 1572 1573 if (mouse_position_changed) 1574 vbg_linux_mouse_event(gdev); 1575 1576 return IRQ_HANDLED; 1577 } 1578