1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VMware VMCI Driver 4 * 5 * Copyright (C) 2012 VMware, Inc. All rights reserved. 6 */ 7 8 #include <linux/vmw_vmci_defs.h> 9 #include <linux/vmw_vmci_api.h> 10 #include <linux/highmem.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/sched.h> 14 #include <linux/cred.h> 15 #include <linux/slab.h> 16 17 #include "vmci_queue_pair.h" 18 #include "vmci_datagram.h" 19 #include "vmci_doorbell.h" 20 #include "vmci_context.h" 21 #include "vmci_driver.h" 22 #include "vmci_event.h" 23 24 /* Use a wide upper bound for the maximum contexts. */ 25 #define VMCI_MAX_CONTEXTS 2000 26 27 /* 28 * List of current VMCI contexts. Contexts can be added by 29 * vmci_ctx_create() and removed via vmci_ctx_destroy(). 30 * These, along with context lookup, are protected by the 31 * list structure's lock. 32 */ 33 static struct { 34 struct list_head head; 35 spinlock_t lock; /* Spinlock for context list operations */ 36 } ctx_list = { 37 .head = LIST_HEAD_INIT(ctx_list.head), 38 .lock = __SPIN_LOCK_UNLOCKED(ctx_list.lock), 39 }; 40 41 /* Used by contexts that did not set up notify flag pointers */ 42 static bool ctx_dummy_notify; 43 44 static void ctx_signal_notify(struct vmci_ctx *context) 45 { 46 *context->notify = true; 47 } 48 49 static void ctx_clear_notify(struct vmci_ctx *context) 50 { 51 *context->notify = false; 52 } 53 54 /* 55 * If nothing requires the attention of the guest, clears both 56 * notify flag and call. 57 */ 58 static void ctx_clear_notify_call(struct vmci_ctx *context) 59 { 60 if (context->pending_datagrams == 0 && 61 vmci_handle_arr_get_size(context->pending_doorbell_array) == 0) 62 ctx_clear_notify(context); 63 } 64 65 /* 66 * Sets the context's notify flag iff datagrams are pending for this 67 * context. Called from vmci_setup_notify(). 68 */ 69 void vmci_ctx_check_signal_notify(struct vmci_ctx *context) 70 { 71 spin_lock(&context->lock); 72 if (context->pending_datagrams) 73 ctx_signal_notify(context); 74 spin_unlock(&context->lock); 75 } 76 77 /* 78 * Allocates and initializes a VMCI context. 79 */ 80 struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags, 81 uintptr_t event_hnd, 82 int user_version, 83 const struct cred *cred) 84 { 85 struct vmci_ctx *context; 86 int error; 87 88 if (cid == VMCI_INVALID_ID) { 89 pr_devel("Invalid context ID for VMCI context\n"); 90 error = -EINVAL; 91 goto err_out; 92 } 93 94 if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) { 95 pr_devel("Invalid flag (flags=0x%x) for VMCI context\n", 96 priv_flags); 97 error = -EINVAL; 98 goto err_out; 99 } 100 101 if (user_version == 0) { 102 pr_devel("Invalid suer_version %d\n", user_version); 103 error = -EINVAL; 104 goto err_out; 105 } 106 107 context = kzalloc(sizeof(*context), GFP_KERNEL); 108 if (!context) { 109 pr_warn("Failed to allocate memory for VMCI context\n"); 110 error = -ENOMEM; 111 goto err_out; 112 } 113 114 kref_init(&context->kref); 115 spin_lock_init(&context->lock); 116 INIT_LIST_HEAD(&context->list_item); 117 INIT_LIST_HEAD(&context->datagram_queue); 118 INIT_LIST_HEAD(&context->notifier_list); 119 120 /* Initialize host-specific VMCI context. */ 121 init_waitqueue_head(&context->host_context.wait_queue); 122 123 context->queue_pair_array = 124 vmci_handle_arr_create(0, VMCI_MAX_GUEST_QP_COUNT); 125 if (!context->queue_pair_array) { 126 error = -ENOMEM; 127 goto err_free_ctx; 128 } 129 130 context->doorbell_array = 131 vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT); 132 if (!context->doorbell_array) { 133 error = -ENOMEM; 134 goto err_free_qp_array; 135 } 136 137 context->pending_doorbell_array = 138 vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT); 139 if (!context->pending_doorbell_array) { 140 error = -ENOMEM; 141 goto err_free_db_array; 142 } 143 144 context->user_version = user_version; 145 146 context->priv_flags = priv_flags; 147 148 if (cred) 149 context->cred = get_cred(cred); 150 151 context->notify = &ctx_dummy_notify; 152 context->notify_page = NULL; 153 154 /* 155 * If we collide with an existing context we generate a new 156 * and use it instead. The VMX will determine if regeneration 157 * is okay. Since there isn't 4B - 16 VMs running on a given 158 * host, the below loop will terminate. 159 */ 160 spin_lock(&ctx_list.lock); 161 162 while (vmci_ctx_exists(cid)) { 163 /* We reserve the lowest 16 ids for fixed contexts. */ 164 cid = max(cid, VMCI_RESERVED_CID_LIMIT - 1) + 1; 165 if (cid == VMCI_INVALID_ID) 166 cid = VMCI_RESERVED_CID_LIMIT; 167 } 168 context->cid = cid; 169 170 list_add_tail_rcu(&context->list_item, &ctx_list.head); 171 spin_unlock(&ctx_list.lock); 172 173 return context; 174 175 err_free_db_array: 176 vmci_handle_arr_destroy(context->doorbell_array); 177 err_free_qp_array: 178 vmci_handle_arr_destroy(context->queue_pair_array); 179 err_free_ctx: 180 kfree(context); 181 err_out: 182 return ERR_PTR(error); 183 } 184 185 /* 186 * Destroy VMCI context. 187 */ 188 void vmci_ctx_destroy(struct vmci_ctx *context) 189 { 190 spin_lock(&ctx_list.lock); 191 list_del_rcu(&context->list_item); 192 spin_unlock(&ctx_list.lock); 193 synchronize_rcu(); 194 195 vmci_ctx_put(context); 196 } 197 198 /* 199 * Fire notification for all contexts interested in given cid. 200 */ 201 static int ctx_fire_notification(u32 context_id, u32 priv_flags) 202 { 203 u32 i, array_size; 204 struct vmci_ctx *sub_ctx; 205 struct vmci_handle_arr *subscriber_array; 206 struct vmci_handle context_handle = 207 vmci_make_handle(context_id, VMCI_EVENT_HANDLER); 208 209 /* 210 * We create an array to hold the subscribers we find when 211 * scanning through all contexts. 212 */ 213 subscriber_array = vmci_handle_arr_create(0, VMCI_MAX_CONTEXTS); 214 if (subscriber_array == NULL) 215 return VMCI_ERROR_NO_MEM; 216 217 /* 218 * Scan all contexts to find who is interested in being 219 * notified about given contextID. 220 */ 221 rcu_read_lock(); 222 list_for_each_entry_rcu(sub_ctx, &ctx_list.head, list_item) { 223 struct vmci_handle_list *node; 224 225 /* 226 * We only deliver notifications of the removal of 227 * contexts, if the two contexts are allowed to 228 * interact. 229 */ 230 if (vmci_deny_interaction(priv_flags, sub_ctx->priv_flags)) 231 continue; 232 233 list_for_each_entry_rcu(node, &sub_ctx->notifier_list, node) { 234 if (!vmci_handle_is_equal(node->handle, context_handle)) 235 continue; 236 237 vmci_handle_arr_append_entry(&subscriber_array, 238 vmci_make_handle(sub_ctx->cid, 239 VMCI_EVENT_HANDLER)); 240 } 241 } 242 rcu_read_unlock(); 243 244 /* Fire event to all subscribers. */ 245 array_size = vmci_handle_arr_get_size(subscriber_array); 246 for (i = 0; i < array_size; i++) { 247 int result; 248 struct vmci_event_ctx ev; 249 250 ev.msg.hdr.dst = vmci_handle_arr_get_entry(subscriber_array, i); 251 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 252 VMCI_CONTEXT_RESOURCE_ID); 253 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 254 ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED; 255 ev.payload.context_id = context_id; 256 257 result = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, 258 &ev.msg.hdr, false); 259 if (result < VMCI_SUCCESS) { 260 pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n", 261 ev.msg.event_data.event, 262 ev.msg.hdr.dst.context); 263 /* We continue to enqueue on next subscriber. */ 264 } 265 } 266 vmci_handle_arr_destroy(subscriber_array); 267 268 return VMCI_SUCCESS; 269 } 270 271 /* 272 * Returns the current number of pending datagrams. The call may 273 * also serve as a synchronization point for the datagram queue, 274 * as no enqueue operations can occur concurrently. 275 */ 276 int vmci_ctx_pending_datagrams(u32 cid, u32 *pending) 277 { 278 struct vmci_ctx *context; 279 280 context = vmci_ctx_get(cid); 281 if (context == NULL) 282 return VMCI_ERROR_INVALID_ARGS; 283 284 spin_lock(&context->lock); 285 if (pending) 286 *pending = context->pending_datagrams; 287 spin_unlock(&context->lock); 288 vmci_ctx_put(context); 289 290 return VMCI_SUCCESS; 291 } 292 293 /* 294 * Queues a VMCI datagram for the appropriate target VM context. 295 */ 296 int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg) 297 { 298 struct vmci_datagram_queue_entry *dq_entry; 299 struct vmci_ctx *context; 300 struct vmci_handle dg_src; 301 size_t vmci_dg_size; 302 303 vmci_dg_size = VMCI_DG_SIZE(dg); 304 if (vmci_dg_size > VMCI_MAX_DG_SIZE) { 305 pr_devel("Datagram too large (bytes=%zu)\n", vmci_dg_size); 306 return VMCI_ERROR_INVALID_ARGS; 307 } 308 309 /* Get the target VM's VMCI context. */ 310 context = vmci_ctx_get(cid); 311 if (!context) { 312 pr_devel("Invalid context (ID=0x%x)\n", cid); 313 return VMCI_ERROR_INVALID_ARGS; 314 } 315 316 /* Allocate guest call entry and add it to the target VM's queue. */ 317 dq_entry = kmalloc(sizeof(*dq_entry), GFP_KERNEL); 318 if (dq_entry == NULL) { 319 pr_warn("Failed to allocate memory for datagram\n"); 320 vmci_ctx_put(context); 321 return VMCI_ERROR_NO_MEM; 322 } 323 dq_entry->dg = dg; 324 dq_entry->dg_size = vmci_dg_size; 325 dg_src = dg->src; 326 INIT_LIST_HEAD(&dq_entry->list_item); 327 328 spin_lock(&context->lock); 329 330 /* 331 * We put a higher limit on datagrams from the hypervisor. If 332 * the pending datagram is not from hypervisor, then we check 333 * if enqueueing it would exceed the 334 * VMCI_MAX_DATAGRAM_QUEUE_SIZE limit on the destination. If 335 * the pending datagram is from hypervisor, we allow it to be 336 * queued at the destination side provided we don't reach the 337 * VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE limit. 338 */ 339 if (context->datagram_queue_size + vmci_dg_size >= 340 VMCI_MAX_DATAGRAM_QUEUE_SIZE && 341 (!vmci_handle_is_equal(dg_src, 342 vmci_make_handle 343 (VMCI_HYPERVISOR_CONTEXT_ID, 344 VMCI_CONTEXT_RESOURCE_ID)) || 345 context->datagram_queue_size + vmci_dg_size >= 346 VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE)) { 347 spin_unlock(&context->lock); 348 vmci_ctx_put(context); 349 kfree(dq_entry); 350 pr_devel("Context (ID=0x%x) receive queue is full\n", cid); 351 return VMCI_ERROR_NO_RESOURCES; 352 } 353 354 list_add(&dq_entry->list_item, &context->datagram_queue); 355 context->pending_datagrams++; 356 context->datagram_queue_size += vmci_dg_size; 357 ctx_signal_notify(context); 358 wake_up(&context->host_context.wait_queue); 359 spin_unlock(&context->lock); 360 vmci_ctx_put(context); 361 362 return vmci_dg_size; 363 } 364 365 /* 366 * Verifies whether a context with the specified context ID exists. 367 * FIXME: utility is dubious as no decisions can be reliably made 368 * using this data as context can appear and disappear at any time. 369 */ 370 bool vmci_ctx_exists(u32 cid) 371 { 372 struct vmci_ctx *context; 373 bool exists = false; 374 375 rcu_read_lock(); 376 377 list_for_each_entry_rcu(context, &ctx_list.head, list_item) { 378 if (context->cid == cid) { 379 exists = true; 380 break; 381 } 382 } 383 384 rcu_read_unlock(); 385 return exists; 386 } 387 388 /* 389 * Retrieves VMCI context corresponding to the given cid. 390 */ 391 struct vmci_ctx *vmci_ctx_get(u32 cid) 392 { 393 struct vmci_ctx *c, *context = NULL; 394 395 if (cid == VMCI_INVALID_ID) 396 return NULL; 397 398 rcu_read_lock(); 399 list_for_each_entry_rcu(c, &ctx_list.head, list_item) { 400 if (c->cid == cid) { 401 /* 402 * The context owner drops its own reference to the 403 * context only after removing it from the list and 404 * waiting for RCU grace period to expire. This 405 * means that we are not about to increase the 406 * reference count of something that is in the 407 * process of being destroyed. 408 */ 409 context = c; 410 kref_get(&context->kref); 411 break; 412 } 413 } 414 rcu_read_unlock(); 415 416 return context; 417 } 418 419 /* 420 * Deallocates all parts of a context data structure. This 421 * function doesn't lock the context, because it assumes that 422 * the caller was holding the last reference to context. 423 */ 424 static void ctx_free_ctx(struct kref *kref) 425 { 426 struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref); 427 struct vmci_datagram_queue_entry *dq_entry, *dq_entry_tmp; 428 struct vmci_handle temp_handle; 429 struct vmci_handle_list *notifier, *tmp; 430 431 /* 432 * Fire event to all contexts interested in knowing this 433 * context is dying. 434 */ 435 ctx_fire_notification(context->cid, context->priv_flags); 436 437 /* 438 * Cleanup all queue pair resources attached to context. If 439 * the VM dies without cleaning up, this code will make sure 440 * that no resources are leaked. 441 */ 442 temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0); 443 while (!vmci_handle_is_equal(temp_handle, VMCI_INVALID_HANDLE)) { 444 if (vmci_qp_broker_detach(temp_handle, 445 context) < VMCI_SUCCESS) { 446 /* 447 * When vmci_qp_broker_detach() succeeds it 448 * removes the handle from the array. If 449 * detach fails, we must remove the handle 450 * ourselves. 451 */ 452 vmci_handle_arr_remove_entry(context->queue_pair_array, 453 temp_handle); 454 } 455 temp_handle = 456 vmci_handle_arr_get_entry(context->queue_pair_array, 0); 457 } 458 459 /* 460 * It is fine to destroy this without locking the callQueue, as 461 * this is the only thread having a reference to the context. 462 */ 463 list_for_each_entry_safe(dq_entry, dq_entry_tmp, 464 &context->datagram_queue, list_item) { 465 WARN_ON(dq_entry->dg_size != VMCI_DG_SIZE(dq_entry->dg)); 466 list_del(&dq_entry->list_item); 467 kfree(dq_entry->dg); 468 kfree(dq_entry); 469 } 470 471 list_for_each_entry_safe(notifier, tmp, 472 &context->notifier_list, node) { 473 list_del(¬ifier->node); 474 kfree(notifier); 475 } 476 477 vmci_handle_arr_destroy(context->queue_pair_array); 478 vmci_handle_arr_destroy(context->doorbell_array); 479 vmci_handle_arr_destroy(context->pending_doorbell_array); 480 vmci_ctx_unset_notify(context); 481 if (context->cred) 482 put_cred(context->cred); 483 kfree(context); 484 } 485 486 /* 487 * Drops reference to VMCI context. If this is the last reference to 488 * the context it will be deallocated. A context is created with 489 * a reference count of one, and on destroy, it is removed from 490 * the context list before its reference count is decremented. Thus, 491 * if we reach zero, we are sure that nobody else are about to increment 492 * it (they need the entry in the context list for that), and so there 493 * is no need for locking. 494 */ 495 void vmci_ctx_put(struct vmci_ctx *context) 496 { 497 kref_put(&context->kref, ctx_free_ctx); 498 } 499 500 /* 501 * Dequeues the next datagram and returns it to caller. 502 * The caller passes in a pointer to the max size datagram 503 * it can handle and the datagram is only unqueued if the 504 * size is less than max_size. If larger max_size is set to 505 * the size of the datagram to give the caller a chance to 506 * set up a larger buffer for the guestcall. 507 */ 508 int vmci_ctx_dequeue_datagram(struct vmci_ctx *context, 509 size_t *max_size, 510 struct vmci_datagram **dg) 511 { 512 struct vmci_datagram_queue_entry *dq_entry; 513 struct list_head *list_item; 514 int rv; 515 516 /* Dequeue the next datagram entry. */ 517 spin_lock(&context->lock); 518 if (context->pending_datagrams == 0) { 519 ctx_clear_notify_call(context); 520 spin_unlock(&context->lock); 521 pr_devel("No datagrams pending\n"); 522 return VMCI_ERROR_NO_MORE_DATAGRAMS; 523 } 524 525 list_item = context->datagram_queue.next; 526 527 dq_entry = 528 list_entry(list_item, struct vmci_datagram_queue_entry, list_item); 529 530 /* Check size of caller's buffer. */ 531 if (*max_size < dq_entry->dg_size) { 532 *max_size = dq_entry->dg_size; 533 spin_unlock(&context->lock); 534 pr_devel("Caller's buffer should be at least (size=%u bytes)\n", 535 (u32) *max_size); 536 return VMCI_ERROR_NO_MEM; 537 } 538 539 list_del(list_item); 540 context->pending_datagrams--; 541 context->datagram_queue_size -= dq_entry->dg_size; 542 if (context->pending_datagrams == 0) { 543 ctx_clear_notify_call(context); 544 rv = VMCI_SUCCESS; 545 } else { 546 /* 547 * Return the size of the next datagram. 548 */ 549 struct vmci_datagram_queue_entry *next_entry; 550 551 list_item = context->datagram_queue.next; 552 next_entry = 553 list_entry(list_item, struct vmci_datagram_queue_entry, 554 list_item); 555 556 /* 557 * The following size_t -> int truncation is fine as 558 * the maximum size of a (routable) datagram is 68KB. 559 */ 560 rv = (int)next_entry->dg_size; 561 } 562 spin_unlock(&context->lock); 563 564 /* Caller must free datagram. */ 565 *dg = dq_entry->dg; 566 dq_entry->dg = NULL; 567 kfree(dq_entry); 568 569 return rv; 570 } 571 572 /* 573 * Reverts actions set up by vmci_setup_notify(). Unmaps and unlocks the 574 * page mapped/locked by vmci_setup_notify(). 575 */ 576 void vmci_ctx_unset_notify(struct vmci_ctx *context) 577 { 578 struct page *notify_page; 579 580 spin_lock(&context->lock); 581 582 notify_page = context->notify_page; 583 context->notify = &ctx_dummy_notify; 584 context->notify_page = NULL; 585 586 spin_unlock(&context->lock); 587 588 if (notify_page) { 589 kunmap(notify_page); 590 put_page(notify_page); 591 } 592 } 593 594 /* 595 * Add remote_cid to list of contexts current contexts wants 596 * notifications from/about. 597 */ 598 int vmci_ctx_add_notification(u32 context_id, u32 remote_cid) 599 { 600 struct vmci_ctx *context; 601 struct vmci_handle_list *notifier, *n; 602 int result; 603 bool exists = false; 604 605 context = vmci_ctx_get(context_id); 606 if (!context) 607 return VMCI_ERROR_NOT_FOUND; 608 609 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(remote_cid)) { 610 pr_devel("Context removed notifications for other VMs not supported (src=0x%x, remote=0x%x)\n", 611 context_id, remote_cid); 612 result = VMCI_ERROR_DST_UNREACHABLE; 613 goto out; 614 } 615 616 if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) { 617 result = VMCI_ERROR_NO_ACCESS; 618 goto out; 619 } 620 621 notifier = kmalloc(sizeof(struct vmci_handle_list), GFP_KERNEL); 622 if (!notifier) { 623 result = VMCI_ERROR_NO_MEM; 624 goto out; 625 } 626 627 INIT_LIST_HEAD(¬ifier->node); 628 notifier->handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER); 629 630 spin_lock(&context->lock); 631 632 if (context->n_notifiers < VMCI_MAX_CONTEXTS) { 633 list_for_each_entry(n, &context->notifier_list, node) { 634 if (vmci_handle_is_equal(n->handle, notifier->handle)) { 635 exists = true; 636 break; 637 } 638 } 639 640 if (exists) { 641 kfree(notifier); 642 result = VMCI_ERROR_ALREADY_EXISTS; 643 } else { 644 list_add_tail_rcu(¬ifier->node, 645 &context->notifier_list); 646 context->n_notifiers++; 647 result = VMCI_SUCCESS; 648 } 649 } else { 650 kfree(notifier); 651 result = VMCI_ERROR_NO_MEM; 652 } 653 654 spin_unlock(&context->lock); 655 656 out: 657 vmci_ctx_put(context); 658 return result; 659 } 660 661 /* 662 * Remove remote_cid from current context's list of contexts it is 663 * interested in getting notifications from/about. 664 */ 665 int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid) 666 { 667 struct vmci_ctx *context; 668 struct vmci_handle_list *notifier, *tmp; 669 struct vmci_handle handle; 670 bool found = false; 671 672 context = vmci_ctx_get(context_id); 673 if (!context) 674 return VMCI_ERROR_NOT_FOUND; 675 676 handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER); 677 678 spin_lock(&context->lock); 679 list_for_each_entry_safe(notifier, tmp, 680 &context->notifier_list, node) { 681 if (vmci_handle_is_equal(notifier->handle, handle)) { 682 list_del_rcu(¬ifier->node); 683 context->n_notifiers--; 684 found = true; 685 break; 686 } 687 } 688 spin_unlock(&context->lock); 689 690 if (found) 691 kvfree_rcu(notifier); 692 693 vmci_ctx_put(context); 694 695 return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND; 696 } 697 698 static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context, 699 u32 *buf_size, void **pbuf) 700 { 701 u32 *notifiers; 702 size_t data_size; 703 struct vmci_handle_list *entry; 704 int i = 0; 705 706 if (context->n_notifiers == 0) { 707 *buf_size = 0; 708 *pbuf = NULL; 709 return VMCI_SUCCESS; 710 } 711 712 data_size = context->n_notifiers * sizeof(*notifiers); 713 if (*buf_size < data_size) { 714 *buf_size = data_size; 715 return VMCI_ERROR_MORE_DATA; 716 } 717 718 notifiers = kmalloc(data_size, GFP_ATOMIC); /* FIXME: want GFP_KERNEL */ 719 if (!notifiers) 720 return VMCI_ERROR_NO_MEM; 721 722 list_for_each_entry(entry, &context->notifier_list, node) 723 notifiers[i++] = entry->handle.context; 724 725 *buf_size = data_size; 726 *pbuf = notifiers; 727 return VMCI_SUCCESS; 728 } 729 730 static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context, 731 u32 *buf_size, void **pbuf) 732 { 733 struct dbell_cpt_state *dbells; 734 u32 i, n_doorbells; 735 736 n_doorbells = vmci_handle_arr_get_size(context->doorbell_array); 737 if (n_doorbells > 0) { 738 size_t data_size = n_doorbells * sizeof(*dbells); 739 if (*buf_size < data_size) { 740 *buf_size = data_size; 741 return VMCI_ERROR_MORE_DATA; 742 } 743 744 dbells = kzalloc(data_size, GFP_ATOMIC); 745 if (!dbells) 746 return VMCI_ERROR_NO_MEM; 747 748 for (i = 0; i < n_doorbells; i++) 749 dbells[i].handle = vmci_handle_arr_get_entry( 750 context->doorbell_array, i); 751 752 *buf_size = data_size; 753 *pbuf = dbells; 754 } else { 755 *buf_size = 0; 756 *pbuf = NULL; 757 } 758 759 return VMCI_SUCCESS; 760 } 761 762 /* 763 * Get current context's checkpoint state of given type. 764 */ 765 int vmci_ctx_get_chkpt_state(u32 context_id, 766 u32 cpt_type, 767 u32 *buf_size, 768 void **pbuf) 769 { 770 struct vmci_ctx *context; 771 int result; 772 773 context = vmci_ctx_get(context_id); 774 if (!context) 775 return VMCI_ERROR_NOT_FOUND; 776 777 spin_lock(&context->lock); 778 779 switch (cpt_type) { 780 case VMCI_NOTIFICATION_CPT_STATE: 781 result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf); 782 break; 783 784 case VMCI_WELLKNOWN_CPT_STATE: 785 /* 786 * For compatibility with VMX'en with VM to VM communication, we 787 * always return zero wellknown handles. 788 */ 789 790 *buf_size = 0; 791 *pbuf = NULL; 792 result = VMCI_SUCCESS; 793 break; 794 795 case VMCI_DOORBELL_CPT_STATE: 796 result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf); 797 break; 798 799 default: 800 pr_devel("Invalid cpt state (type=%d)\n", cpt_type); 801 result = VMCI_ERROR_INVALID_ARGS; 802 break; 803 } 804 805 spin_unlock(&context->lock); 806 vmci_ctx_put(context); 807 808 return result; 809 } 810 811 /* 812 * Set current context's checkpoint state of given type. 813 */ 814 int vmci_ctx_set_chkpt_state(u32 context_id, 815 u32 cpt_type, 816 u32 buf_size, 817 void *cpt_buf) 818 { 819 u32 i; 820 u32 current_id; 821 int result = VMCI_SUCCESS; 822 u32 num_ids = buf_size / sizeof(u32); 823 824 if (cpt_type == VMCI_WELLKNOWN_CPT_STATE && num_ids > 0) { 825 /* 826 * We would end up here if VMX with VM to VM communication 827 * attempts to restore a checkpoint with wellknown handles. 828 */ 829 pr_warn("Attempt to restore checkpoint with obsolete wellknown handles\n"); 830 return VMCI_ERROR_OBSOLETE; 831 } 832 833 if (cpt_type != VMCI_NOTIFICATION_CPT_STATE) { 834 pr_devel("Invalid cpt state (type=%d)\n", cpt_type); 835 return VMCI_ERROR_INVALID_ARGS; 836 } 837 838 for (i = 0; i < num_ids && result == VMCI_SUCCESS; i++) { 839 current_id = ((u32 *)cpt_buf)[i]; 840 result = vmci_ctx_add_notification(context_id, current_id); 841 if (result != VMCI_SUCCESS) 842 break; 843 } 844 if (result != VMCI_SUCCESS) 845 pr_devel("Failed to set cpt state (type=%d) (error=%d)\n", 846 cpt_type, result); 847 848 return result; 849 } 850 851 /* 852 * Retrieves the specified context's pending notifications in the 853 * form of a handle array. The handle arrays returned are the 854 * actual data - not a copy and should not be modified by the 855 * caller. They must be released using 856 * vmci_ctx_rcv_notifications_release. 857 */ 858 int vmci_ctx_rcv_notifications_get(u32 context_id, 859 struct vmci_handle_arr **db_handle_array, 860 struct vmci_handle_arr **qp_handle_array) 861 { 862 struct vmci_ctx *context; 863 int result = VMCI_SUCCESS; 864 865 context = vmci_ctx_get(context_id); 866 if (context == NULL) 867 return VMCI_ERROR_NOT_FOUND; 868 869 spin_lock(&context->lock); 870 871 *db_handle_array = context->pending_doorbell_array; 872 context->pending_doorbell_array = 873 vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT); 874 if (!context->pending_doorbell_array) { 875 context->pending_doorbell_array = *db_handle_array; 876 *db_handle_array = NULL; 877 result = VMCI_ERROR_NO_MEM; 878 } 879 *qp_handle_array = NULL; 880 881 spin_unlock(&context->lock); 882 vmci_ctx_put(context); 883 884 return result; 885 } 886 887 /* 888 * Releases handle arrays with pending notifications previously 889 * retrieved using vmci_ctx_rcv_notifications_get. If the 890 * notifications were not successfully handed over to the guest, 891 * success must be false. 892 */ 893 void vmci_ctx_rcv_notifications_release(u32 context_id, 894 struct vmci_handle_arr *db_handle_array, 895 struct vmci_handle_arr *qp_handle_array, 896 bool success) 897 { 898 struct vmci_ctx *context = vmci_ctx_get(context_id); 899 900 spin_lock(&context->lock); 901 if (!success) { 902 struct vmci_handle handle; 903 904 /* 905 * New notifications may have been added while we were not 906 * holding the context lock, so we transfer any new pending 907 * doorbell notifications to the old array, and reinstate the 908 * old array. 909 */ 910 911 handle = vmci_handle_arr_remove_tail( 912 context->pending_doorbell_array); 913 while (!vmci_handle_is_invalid(handle)) { 914 if (!vmci_handle_arr_has_entry(db_handle_array, 915 handle)) { 916 vmci_handle_arr_append_entry( 917 &db_handle_array, handle); 918 } 919 handle = vmci_handle_arr_remove_tail( 920 context->pending_doorbell_array); 921 } 922 vmci_handle_arr_destroy(context->pending_doorbell_array); 923 context->pending_doorbell_array = db_handle_array; 924 db_handle_array = NULL; 925 } else { 926 ctx_clear_notify_call(context); 927 } 928 spin_unlock(&context->lock); 929 vmci_ctx_put(context); 930 931 if (db_handle_array) 932 vmci_handle_arr_destroy(db_handle_array); 933 934 if (qp_handle_array) 935 vmci_handle_arr_destroy(qp_handle_array); 936 } 937 938 /* 939 * Registers that a new doorbell handle has been allocated by the 940 * context. Only doorbell handles registered can be notified. 941 */ 942 int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle) 943 { 944 struct vmci_ctx *context; 945 int result; 946 947 if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle)) 948 return VMCI_ERROR_INVALID_ARGS; 949 950 context = vmci_ctx_get(context_id); 951 if (context == NULL) 952 return VMCI_ERROR_NOT_FOUND; 953 954 spin_lock(&context->lock); 955 if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) 956 result = vmci_handle_arr_append_entry(&context->doorbell_array, 957 handle); 958 else 959 result = VMCI_ERROR_DUPLICATE_ENTRY; 960 961 spin_unlock(&context->lock); 962 vmci_ctx_put(context); 963 964 return result; 965 } 966 967 /* 968 * Unregisters a doorbell handle that was previously registered 969 * with vmci_ctx_dbell_create. 970 */ 971 int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle) 972 { 973 struct vmci_ctx *context; 974 struct vmci_handle removed_handle; 975 976 if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle)) 977 return VMCI_ERROR_INVALID_ARGS; 978 979 context = vmci_ctx_get(context_id); 980 if (context == NULL) 981 return VMCI_ERROR_NOT_FOUND; 982 983 spin_lock(&context->lock); 984 removed_handle = 985 vmci_handle_arr_remove_entry(context->doorbell_array, handle); 986 vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle); 987 spin_unlock(&context->lock); 988 989 vmci_ctx_put(context); 990 991 return vmci_handle_is_invalid(removed_handle) ? 992 VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS; 993 } 994 995 /* 996 * Unregisters all doorbell handles that were previously 997 * registered with vmci_ctx_dbell_create. 998 */ 999 int vmci_ctx_dbell_destroy_all(u32 context_id) 1000 { 1001 struct vmci_ctx *context; 1002 struct vmci_handle handle; 1003 1004 if (context_id == VMCI_INVALID_ID) 1005 return VMCI_ERROR_INVALID_ARGS; 1006 1007 context = vmci_ctx_get(context_id); 1008 if (context == NULL) 1009 return VMCI_ERROR_NOT_FOUND; 1010 1011 spin_lock(&context->lock); 1012 do { 1013 struct vmci_handle_arr *arr = context->doorbell_array; 1014 handle = vmci_handle_arr_remove_tail(arr); 1015 } while (!vmci_handle_is_invalid(handle)); 1016 do { 1017 struct vmci_handle_arr *arr = context->pending_doorbell_array; 1018 handle = vmci_handle_arr_remove_tail(arr); 1019 } while (!vmci_handle_is_invalid(handle)); 1020 spin_unlock(&context->lock); 1021 1022 vmci_ctx_put(context); 1023 1024 return VMCI_SUCCESS; 1025 } 1026 1027 /* 1028 * Registers a notification of a doorbell handle initiated by the 1029 * specified source context. The notification of doorbells are 1030 * subject to the same isolation rules as datagram delivery. To 1031 * allow host side senders of notifications a finer granularity 1032 * of sender rights than those assigned to the sending context 1033 * itself, the host context is required to specify a different 1034 * set of privilege flags that will override the privileges of 1035 * the source context. 1036 */ 1037 int vmci_ctx_notify_dbell(u32 src_cid, 1038 struct vmci_handle handle, 1039 u32 src_priv_flags) 1040 { 1041 struct vmci_ctx *dst_context; 1042 int result; 1043 1044 if (vmci_handle_is_invalid(handle)) 1045 return VMCI_ERROR_INVALID_ARGS; 1046 1047 /* Get the target VM's VMCI context. */ 1048 dst_context = vmci_ctx_get(handle.context); 1049 if (!dst_context) { 1050 pr_devel("Invalid context (ID=0x%x)\n", handle.context); 1051 return VMCI_ERROR_NOT_FOUND; 1052 } 1053 1054 if (src_cid != handle.context) { 1055 u32 dst_priv_flags; 1056 1057 if (VMCI_CONTEXT_IS_VM(src_cid) && 1058 VMCI_CONTEXT_IS_VM(handle.context)) { 1059 pr_devel("Doorbell notification from VM to VM not supported (src=0x%x, dst=0x%x)\n", 1060 src_cid, handle.context); 1061 result = VMCI_ERROR_DST_UNREACHABLE; 1062 goto out; 1063 } 1064 1065 result = vmci_dbell_get_priv_flags(handle, &dst_priv_flags); 1066 if (result < VMCI_SUCCESS) { 1067 pr_warn("Failed to get privilege flags for destination (handle=0x%x:0x%x)\n", 1068 handle.context, handle.resource); 1069 goto out; 1070 } 1071 1072 if (src_cid != VMCI_HOST_CONTEXT_ID || 1073 src_priv_flags == VMCI_NO_PRIVILEGE_FLAGS) { 1074 src_priv_flags = vmci_context_get_priv_flags(src_cid); 1075 } 1076 1077 if (vmci_deny_interaction(src_priv_flags, dst_priv_flags)) { 1078 result = VMCI_ERROR_NO_ACCESS; 1079 goto out; 1080 } 1081 } 1082 1083 if (handle.context == VMCI_HOST_CONTEXT_ID) { 1084 result = vmci_dbell_host_context_notify(src_cid, handle); 1085 } else { 1086 spin_lock(&dst_context->lock); 1087 1088 if (!vmci_handle_arr_has_entry(dst_context->doorbell_array, 1089 handle)) { 1090 result = VMCI_ERROR_NOT_FOUND; 1091 } else { 1092 if (!vmci_handle_arr_has_entry( 1093 dst_context->pending_doorbell_array, 1094 handle)) { 1095 result = vmci_handle_arr_append_entry( 1096 &dst_context->pending_doorbell_array, 1097 handle); 1098 if (result == VMCI_SUCCESS) { 1099 ctx_signal_notify(dst_context); 1100 wake_up(&dst_context->host_context.wait_queue); 1101 } 1102 } else { 1103 result = VMCI_SUCCESS; 1104 } 1105 } 1106 spin_unlock(&dst_context->lock); 1107 } 1108 1109 out: 1110 vmci_ctx_put(dst_context); 1111 1112 return result; 1113 } 1114 1115 bool vmci_ctx_supports_host_qp(struct vmci_ctx *context) 1116 { 1117 return context && context->user_version >= VMCI_VERSION_HOSTQP; 1118 } 1119 1120 /* 1121 * Registers that a new queue pair handle has been allocated by 1122 * the context. 1123 */ 1124 int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle) 1125 { 1126 int result; 1127 1128 if (context == NULL || vmci_handle_is_invalid(handle)) 1129 return VMCI_ERROR_INVALID_ARGS; 1130 1131 if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) 1132 result = vmci_handle_arr_append_entry( 1133 &context->queue_pair_array, handle); 1134 else 1135 result = VMCI_ERROR_DUPLICATE_ENTRY; 1136 1137 return result; 1138 } 1139 1140 /* 1141 * Unregisters a queue pair handle that was previously registered 1142 * with vmci_ctx_qp_create. 1143 */ 1144 int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle) 1145 { 1146 struct vmci_handle hndl; 1147 1148 if (context == NULL || vmci_handle_is_invalid(handle)) 1149 return VMCI_ERROR_INVALID_ARGS; 1150 1151 hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle); 1152 1153 return vmci_handle_is_invalid(hndl) ? 1154 VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS; 1155 } 1156 1157 /* 1158 * Determines whether a given queue pair handle is registered 1159 * with the given context. 1160 */ 1161 bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle) 1162 { 1163 if (context == NULL || vmci_handle_is_invalid(handle)) 1164 return false; 1165 1166 return vmci_handle_arr_has_entry(context->queue_pair_array, handle); 1167 } 1168 1169 /* 1170 * vmci_context_get_priv_flags() - Retrieve privilege flags. 1171 * @context_id: The context ID of the VMCI context. 1172 * 1173 * Retrieves privilege flags of the given VMCI context ID. 1174 */ 1175 u32 vmci_context_get_priv_flags(u32 context_id) 1176 { 1177 if (vmci_host_code_active()) { 1178 u32 flags; 1179 struct vmci_ctx *context; 1180 1181 context = vmci_ctx_get(context_id); 1182 if (!context) 1183 return VMCI_LEAST_PRIVILEGE_FLAGS; 1184 1185 flags = context->priv_flags; 1186 vmci_ctx_put(context); 1187 return flags; 1188 } 1189 return VMCI_NO_PRIVILEGE_FLAGS; 1190 } 1191 EXPORT_SYMBOL_GPL(vmci_context_get_priv_flags); 1192 1193 /* 1194 * vmci_is_context_owner() - Determimnes if user is the context owner 1195 * @context_id: The context ID of the VMCI context. 1196 * @uid: The host user id (real kernel value). 1197 * 1198 * Determines whether a given UID is the owner of given VMCI context. 1199 */ 1200 bool vmci_is_context_owner(u32 context_id, kuid_t uid) 1201 { 1202 bool is_owner = false; 1203 1204 if (vmci_host_code_active()) { 1205 struct vmci_ctx *context = vmci_ctx_get(context_id); 1206 if (context) { 1207 if (context->cred) 1208 is_owner = uid_eq(context->cred->uid, uid); 1209 vmci_ctx_put(context); 1210 } 1211 } 1212 1213 return is_owner; 1214 } 1215 EXPORT_SYMBOL_GPL(vmci_is_context_owner); 1216