1 /* 2 * VMware VMCI Driver 3 * 4 * Copyright (C) 2012 VMware, Inc. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation version 2 and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * for more details. 14 */ 15 16 #include <linux/vmw_vmci_defs.h> 17 #include <linux/vmw_vmci_api.h> 18 #include <linux/highmem.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/pagemap.h> 24 #include <linux/pci.h> 25 #include <linux/sched.h> 26 #include <linux/slab.h> 27 #include <linux/uio.h> 28 #include <linux/wait.h> 29 #include <linux/vmalloc.h> 30 #include <linux/skbuff.h> 31 32 #include "vmci_handle_array.h" 33 #include "vmci_queue_pair.h" 34 #include "vmci_datagram.h" 35 #include "vmci_resource.h" 36 #include "vmci_context.h" 37 #include "vmci_driver.h" 38 #include "vmci_event.h" 39 #include "vmci_route.h" 40 41 /* 42 * In the following, we will distinguish between two kinds of VMX processes - 43 * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized 44 * VMCI page files in the VMX and supporting VM to VM communication and the 45 * newer ones that use the guest memory directly. We will in the following 46 * refer to the older VMX versions as old-style VMX'en, and the newer ones as 47 * new-style VMX'en. 48 * 49 * The state transition datagram is as follows (the VMCIQPB_ prefix has been 50 * removed for readability) - see below for more details on the transtions: 51 * 52 * -------------- NEW ------------- 53 * | | 54 * \_/ \_/ 55 * CREATED_NO_MEM <-----------------> CREATED_MEM 56 * | | | 57 * | o-----------------------o | 58 * | | | 59 * \_/ \_/ \_/ 60 * ATTACHED_NO_MEM <----------------> ATTACHED_MEM 61 * | | | 62 * | o----------------------o | 63 * | | | 64 * \_/ \_/ \_/ 65 * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM 66 * | | 67 * | | 68 * -------------> gone <------------- 69 * 70 * In more detail. When a VMCI queue pair is first created, it will be in the 71 * VMCIQPB_NEW state. It will then move into one of the following states: 72 * 73 * - VMCIQPB_CREATED_NO_MEM: this state indicates that either: 74 * 75 * - the created was performed by a host endpoint, in which case there is 76 * no backing memory yet. 77 * 78 * - the create was initiated by an old-style VMX, that uses 79 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at 80 * a later point in time. This state can be distinguished from the one 81 * above by the context ID of the creator. A host side is not allowed to 82 * attach until the page store has been set. 83 * 84 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair 85 * is created by a VMX using the queue pair device backend that 86 * sets the UVAs of the queue pair immediately and stores the 87 * information for later attachers. At this point, it is ready for 88 * the host side to attach to it. 89 * 90 * Once the queue pair is in one of the created states (with the exception of 91 * the case mentioned for older VMX'en above), it is possible to attach to the 92 * queue pair. Again we have two new states possible: 93 * 94 * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following 95 * paths: 96 * 97 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue 98 * pair, and attaches to a queue pair previously created by the host side. 99 * 100 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair 101 * already created by a guest. 102 * 103 * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls 104 * vmci_qp_broker_set_page_store (see below). 105 * 106 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the 107 * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will 108 * bring the queue pair into this state. Once vmci_qp_broker_set_page_store 109 * is called to register the user memory, the VMCIQPB_ATTACH_MEM state 110 * will be entered. 111 * 112 * From the attached queue pair, the queue pair can enter the shutdown states 113 * when either side of the queue pair detaches. If the guest side detaches 114 * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where 115 * the content of the queue pair will no longer be available. If the host 116 * side detaches first, the queue pair will either enter the 117 * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or 118 * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped 119 * (e.g., the host detaches while a guest is stunned). 120 * 121 * New-style VMX'en will also unmap guest memory, if the guest is 122 * quiesced, e.g., during a snapshot operation. In that case, the guest 123 * memory will no longer be available, and the queue pair will transition from 124 * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more, 125 * in which case the queue pair will transition from the *_NO_MEM state at that 126 * point back to the *_MEM state. Note that the *_NO_MEM state may have changed, 127 * since the peer may have either attached or detached in the meantime. The 128 * values are laid out such that ++ on a state will move from a *_NO_MEM to a 129 * *_MEM state, and vice versa. 130 */ 131 132 /* The Kernel specific component of the struct vmci_queue structure. */ 133 struct vmci_queue_kern_if { 134 struct mutex __mutex; /* Protects the queue. */ 135 struct mutex *mutex; /* Shared by producer and consumer queues. */ 136 size_t num_pages; /* Number of pages incl. header. */ 137 bool host; /* Host or guest? */ 138 union { 139 struct { 140 dma_addr_t *pas; 141 void **vas; 142 } g; /* Used by the guest. */ 143 struct { 144 struct page **page; 145 struct page **header_page; 146 } h; /* Used by the host. */ 147 } u; 148 }; 149 150 /* 151 * This structure is opaque to the clients. 152 */ 153 struct vmci_qp { 154 struct vmci_handle handle; 155 struct vmci_queue *produce_q; 156 struct vmci_queue *consume_q; 157 u64 produce_q_size; 158 u64 consume_q_size; 159 u32 peer; 160 u32 flags; 161 u32 priv_flags; 162 bool guest_endpoint; 163 unsigned int blocked; 164 unsigned int generation; 165 wait_queue_head_t event; 166 }; 167 168 enum qp_broker_state { 169 VMCIQPB_NEW, 170 VMCIQPB_CREATED_NO_MEM, 171 VMCIQPB_CREATED_MEM, 172 VMCIQPB_ATTACHED_NO_MEM, 173 VMCIQPB_ATTACHED_MEM, 174 VMCIQPB_SHUTDOWN_NO_MEM, 175 VMCIQPB_SHUTDOWN_MEM, 176 VMCIQPB_GONE 177 }; 178 179 #define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \ 180 _qpb->state == VMCIQPB_ATTACHED_MEM || \ 181 _qpb->state == VMCIQPB_SHUTDOWN_MEM) 182 183 /* 184 * In the queue pair broker, we always use the guest point of view for 185 * the produce and consume queue values and references, e.g., the 186 * produce queue size stored is the guests produce queue size. The 187 * host endpoint will need to swap these around. The only exception is 188 * the local queue pairs on the host, in which case the host endpoint 189 * that creates the queue pair will have the right orientation, and 190 * the attaching host endpoint will need to swap. 191 */ 192 struct qp_entry { 193 struct list_head list_item; 194 struct vmci_handle handle; 195 u32 peer; 196 u32 flags; 197 u64 produce_size; 198 u64 consume_size; 199 u32 ref_count; 200 }; 201 202 struct qp_broker_entry { 203 struct vmci_resource resource; 204 struct qp_entry qp; 205 u32 create_id; 206 u32 attach_id; 207 enum qp_broker_state state; 208 bool require_trusted_attach; 209 bool created_by_trusted; 210 bool vmci_page_files; /* Created by VMX using VMCI page files */ 211 struct vmci_queue *produce_q; 212 struct vmci_queue *consume_q; 213 struct vmci_queue_header saved_produce_q; 214 struct vmci_queue_header saved_consume_q; 215 vmci_event_release_cb wakeup_cb; 216 void *client_data; 217 void *local_mem; /* Kernel memory for local queue pair */ 218 }; 219 220 struct qp_guest_endpoint { 221 struct vmci_resource resource; 222 struct qp_entry qp; 223 u64 num_ppns; 224 void *produce_q; 225 void *consume_q; 226 struct ppn_set ppn_set; 227 }; 228 229 struct qp_list { 230 struct list_head head; 231 struct mutex mutex; /* Protect queue list. */ 232 }; 233 234 static struct qp_list qp_broker_list = { 235 .head = LIST_HEAD_INIT(qp_broker_list.head), 236 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex), 237 }; 238 239 static struct qp_list qp_guest_endpoints = { 240 .head = LIST_HEAD_INIT(qp_guest_endpoints.head), 241 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex), 242 }; 243 244 #define INVALID_VMCI_GUEST_MEM_ID 0 245 #define QPE_NUM_PAGES(_QPE) ((u32) \ 246 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \ 247 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2)) 248 249 250 /* 251 * Frees kernel VA space for a given queue and its queue header, and 252 * frees physical data pages. 253 */ 254 static void qp_free_queue(void *q, u64 size) 255 { 256 struct vmci_queue *queue = q; 257 258 if (queue) { 259 u64 i; 260 261 /* Given size does not include header, so add in a page here. */ 262 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) { 263 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE, 264 queue->kernel_if->u.g.vas[i], 265 queue->kernel_if->u.g.pas[i]); 266 } 267 268 vfree(queue); 269 } 270 } 271 272 /* 273 * Allocates kernel queue pages of specified size with IOMMU mappings, 274 * plus space for the queue structure/kernel interface and the queue 275 * header. 276 */ 277 static void *qp_alloc_queue(u64 size, u32 flags) 278 { 279 u64 i; 280 struct vmci_queue *queue; 281 size_t pas_size; 282 size_t vas_size; 283 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); 284 u64 num_pages; 285 286 if (size > SIZE_MAX - PAGE_SIZE) 287 return NULL; 288 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 289 if (num_pages > 290 (SIZE_MAX - queue_size) / 291 (sizeof(*queue->kernel_if->u.g.pas) + 292 sizeof(*queue->kernel_if->u.g.vas))) 293 return NULL; 294 295 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); 296 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); 297 queue_size += pas_size + vas_size; 298 299 queue = vmalloc(queue_size); 300 if (!queue) 301 return NULL; 302 303 queue->q_header = NULL; 304 queue->saved_header = NULL; 305 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 306 queue->kernel_if->mutex = NULL; 307 queue->kernel_if->num_pages = num_pages; 308 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1); 309 queue->kernel_if->u.g.vas = 310 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size); 311 queue->kernel_if->host = false; 312 313 for (i = 0; i < num_pages; i++) { 314 queue->kernel_if->u.g.vas[i] = 315 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE, 316 &queue->kernel_if->u.g.pas[i], 317 GFP_KERNEL); 318 if (!queue->kernel_if->u.g.vas[i]) { 319 /* Size excl. the header. */ 320 qp_free_queue(queue, i * PAGE_SIZE); 321 return NULL; 322 } 323 } 324 325 /* Queue header is the first page. */ 326 queue->q_header = queue->kernel_if->u.g.vas[0]; 327 328 return queue; 329 } 330 331 /* 332 * Copies from a given buffer or iovector to a VMCI Queue. Uses 333 * kmap()/kunmap() to dynamically map/unmap required portions of the queue 334 * by traversing the offset -> page translation structure for the queue. 335 * Assumes that offset + size does not wrap around in the queue. 336 */ 337 static int qp_memcpy_to_queue_iter(struct vmci_queue *queue, 338 u64 queue_offset, 339 struct iov_iter *from, 340 size_t size) 341 { 342 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 343 size_t bytes_copied = 0; 344 345 while (bytes_copied < size) { 346 const u64 page_index = 347 (queue_offset + bytes_copied) / PAGE_SIZE; 348 const size_t page_offset = 349 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 350 void *va; 351 size_t to_copy; 352 353 if (kernel_if->host) 354 va = kmap(kernel_if->u.h.page[page_index]); 355 else 356 va = kernel_if->u.g.vas[page_index + 1]; 357 /* Skip header. */ 358 359 if (size - bytes_copied > PAGE_SIZE - page_offset) 360 /* Enough payload to fill up from this page. */ 361 to_copy = PAGE_SIZE - page_offset; 362 else 363 to_copy = size - bytes_copied; 364 365 if (!copy_from_iter_full((u8 *)va + page_offset, to_copy, 366 from)) { 367 if (kernel_if->host) 368 kunmap(kernel_if->u.h.page[page_index]); 369 return VMCI_ERROR_INVALID_ARGS; 370 } 371 bytes_copied += to_copy; 372 if (kernel_if->host) 373 kunmap(kernel_if->u.h.page[page_index]); 374 } 375 376 return VMCI_SUCCESS; 377 } 378 379 /* 380 * Copies to a given buffer or iovector from a VMCI Queue. Uses 381 * kmap()/kunmap() to dynamically map/unmap required portions of the queue 382 * by traversing the offset -> page translation structure for the queue. 383 * Assumes that offset + size does not wrap around in the queue. 384 */ 385 static int qp_memcpy_from_queue_iter(struct iov_iter *to, 386 const struct vmci_queue *queue, 387 u64 queue_offset, size_t size) 388 { 389 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 390 size_t bytes_copied = 0; 391 392 while (bytes_copied < size) { 393 const u64 page_index = 394 (queue_offset + bytes_copied) / PAGE_SIZE; 395 const size_t page_offset = 396 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 397 void *va; 398 size_t to_copy; 399 int err; 400 401 if (kernel_if->host) 402 va = kmap(kernel_if->u.h.page[page_index]); 403 else 404 va = kernel_if->u.g.vas[page_index + 1]; 405 /* Skip header. */ 406 407 if (size - bytes_copied > PAGE_SIZE - page_offset) 408 /* Enough payload to fill up this page. */ 409 to_copy = PAGE_SIZE - page_offset; 410 else 411 to_copy = size - bytes_copied; 412 413 err = copy_to_iter((u8 *)va + page_offset, to_copy, to); 414 if (err != to_copy) { 415 if (kernel_if->host) 416 kunmap(kernel_if->u.h.page[page_index]); 417 return VMCI_ERROR_INVALID_ARGS; 418 } 419 bytes_copied += to_copy; 420 if (kernel_if->host) 421 kunmap(kernel_if->u.h.page[page_index]); 422 } 423 424 return VMCI_SUCCESS; 425 } 426 427 /* 428 * Allocates two list of PPNs --- one for the pages in the produce queue, 429 * and the other for the pages in the consume queue. Intializes the list 430 * of PPNs with the page frame numbers of the KVA for the two queues (and 431 * the queue headers). 432 */ 433 static int qp_alloc_ppn_set(void *prod_q, 434 u64 num_produce_pages, 435 void *cons_q, 436 u64 num_consume_pages, struct ppn_set *ppn_set) 437 { 438 u32 *produce_ppns; 439 u32 *consume_ppns; 440 struct vmci_queue *produce_q = prod_q; 441 struct vmci_queue *consume_q = cons_q; 442 u64 i; 443 444 if (!produce_q || !num_produce_pages || !consume_q || 445 !num_consume_pages || !ppn_set) 446 return VMCI_ERROR_INVALID_ARGS; 447 448 if (ppn_set->initialized) 449 return VMCI_ERROR_ALREADY_EXISTS; 450 451 produce_ppns = 452 kmalloc_array(num_produce_pages, sizeof(*produce_ppns), 453 GFP_KERNEL); 454 if (!produce_ppns) 455 return VMCI_ERROR_NO_MEM; 456 457 consume_ppns = 458 kmalloc_array(num_consume_pages, sizeof(*consume_ppns), 459 GFP_KERNEL); 460 if (!consume_ppns) { 461 kfree(produce_ppns); 462 return VMCI_ERROR_NO_MEM; 463 } 464 465 for (i = 0; i < num_produce_pages; i++) { 466 unsigned long pfn; 467 468 produce_ppns[i] = 469 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 470 pfn = produce_ppns[i]; 471 472 /* Fail allocation if PFN isn't supported by hypervisor. */ 473 if (sizeof(pfn) > sizeof(*produce_ppns) 474 && pfn != produce_ppns[i]) 475 goto ppn_error; 476 } 477 478 for (i = 0; i < num_consume_pages; i++) { 479 unsigned long pfn; 480 481 consume_ppns[i] = 482 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 483 pfn = consume_ppns[i]; 484 485 /* Fail allocation if PFN isn't supported by hypervisor. */ 486 if (sizeof(pfn) > sizeof(*consume_ppns) 487 && pfn != consume_ppns[i]) 488 goto ppn_error; 489 } 490 491 ppn_set->num_produce_pages = num_produce_pages; 492 ppn_set->num_consume_pages = num_consume_pages; 493 ppn_set->produce_ppns = produce_ppns; 494 ppn_set->consume_ppns = consume_ppns; 495 ppn_set->initialized = true; 496 return VMCI_SUCCESS; 497 498 ppn_error: 499 kfree(produce_ppns); 500 kfree(consume_ppns); 501 return VMCI_ERROR_INVALID_ARGS; 502 } 503 504 /* 505 * Frees the two list of PPNs for a queue pair. 506 */ 507 static void qp_free_ppn_set(struct ppn_set *ppn_set) 508 { 509 if (ppn_set->initialized) { 510 /* Do not call these functions on NULL inputs. */ 511 kfree(ppn_set->produce_ppns); 512 kfree(ppn_set->consume_ppns); 513 } 514 memset(ppn_set, 0, sizeof(*ppn_set)); 515 } 516 517 /* 518 * Populates the list of PPNs in the hypercall structure with the PPNS 519 * of the produce queue and the consume queue. 520 */ 521 static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set) 522 { 523 memcpy(call_buf, ppn_set->produce_ppns, 524 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns)); 525 memcpy(call_buf + 526 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns), 527 ppn_set->consume_ppns, 528 ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns)); 529 530 return VMCI_SUCCESS; 531 } 532 533 /* 534 * Allocates kernel VA space of specified size plus space for the queue 535 * and kernel interface. This is different from the guest queue allocator, 536 * because we do not allocate our own queue header/data pages here but 537 * share those of the guest. 538 */ 539 static struct vmci_queue *qp_host_alloc_queue(u64 size) 540 { 541 struct vmci_queue *queue; 542 size_t queue_page_size; 543 u64 num_pages; 544 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); 545 546 if (size > SIZE_MAX - PAGE_SIZE) 547 return NULL; 548 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 549 if (num_pages > (SIZE_MAX - queue_size) / 550 sizeof(*queue->kernel_if->u.h.page)) 551 return NULL; 552 553 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); 554 555 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); 556 if (queue) { 557 queue->q_header = NULL; 558 queue->saved_header = NULL; 559 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 560 queue->kernel_if->host = true; 561 queue->kernel_if->mutex = NULL; 562 queue->kernel_if->num_pages = num_pages; 563 queue->kernel_if->u.h.header_page = 564 (struct page **)((u8 *)queue + queue_size); 565 queue->kernel_if->u.h.page = 566 &queue->kernel_if->u.h.header_page[1]; 567 } 568 569 return queue; 570 } 571 572 /* 573 * Frees kernel memory for a given queue (header plus translation 574 * structure). 575 */ 576 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size) 577 { 578 kfree(queue); 579 } 580 581 /* 582 * Initialize the mutex for the pair of queues. This mutex is used to 583 * protect the q_header and the buffer from changing out from under any 584 * users of either queue. Of course, it's only any good if the mutexes 585 * are actually acquired. Queue structure must lie on non-paged memory 586 * or we cannot guarantee access to the mutex. 587 */ 588 static void qp_init_queue_mutex(struct vmci_queue *produce_q, 589 struct vmci_queue *consume_q) 590 { 591 /* 592 * Only the host queue has shared state - the guest queues do not 593 * need to synchronize access using a queue mutex. 594 */ 595 596 if (produce_q->kernel_if->host) { 597 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; 598 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; 599 mutex_init(produce_q->kernel_if->mutex); 600 } 601 } 602 603 /* 604 * Cleans up the mutex for the pair of queues. 605 */ 606 static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q, 607 struct vmci_queue *consume_q) 608 { 609 if (produce_q->kernel_if->host) { 610 produce_q->kernel_if->mutex = NULL; 611 consume_q->kernel_if->mutex = NULL; 612 } 613 } 614 615 /* 616 * Acquire the mutex for the queue. Note that the produce_q and 617 * the consume_q share a mutex. So, only one of the two need to 618 * be passed in to this routine. Either will work just fine. 619 */ 620 static void qp_acquire_queue_mutex(struct vmci_queue *queue) 621 { 622 if (queue->kernel_if->host) 623 mutex_lock(queue->kernel_if->mutex); 624 } 625 626 /* 627 * Release the mutex for the queue. Note that the produce_q and 628 * the consume_q share a mutex. So, only one of the two need to 629 * be passed in to this routine. Either will work just fine. 630 */ 631 static void qp_release_queue_mutex(struct vmci_queue *queue) 632 { 633 if (queue->kernel_if->host) 634 mutex_unlock(queue->kernel_if->mutex); 635 } 636 637 /* 638 * Helper function to release pages in the PageStoreAttachInfo 639 * previously obtained using get_user_pages. 640 */ 641 static void qp_release_pages(struct page **pages, 642 u64 num_pages, bool dirty) 643 { 644 int i; 645 646 for (i = 0; i < num_pages; i++) { 647 if (dirty) 648 set_page_dirty(pages[i]); 649 650 put_page(pages[i]); 651 pages[i] = NULL; 652 } 653 } 654 655 /* 656 * Lock the user pages referenced by the {produce,consume}Buffer 657 * struct into memory and populate the {produce,consume}Pages 658 * arrays in the attach structure with them. 659 */ 660 static int qp_host_get_user_memory(u64 produce_uva, 661 u64 consume_uva, 662 struct vmci_queue *produce_q, 663 struct vmci_queue *consume_q) 664 { 665 int retval; 666 int err = VMCI_SUCCESS; 667 668 retval = get_user_pages_fast((uintptr_t) produce_uva, 669 produce_q->kernel_if->num_pages, 1, 670 produce_q->kernel_if->u.h.header_page); 671 if (retval < (int)produce_q->kernel_if->num_pages) { 672 pr_debug("get_user_pages_fast(produce) failed (retval=%d)", 673 retval); 674 qp_release_pages(produce_q->kernel_if->u.h.header_page, 675 retval, false); 676 err = VMCI_ERROR_NO_MEM; 677 goto out; 678 } 679 680 retval = get_user_pages_fast((uintptr_t) consume_uva, 681 consume_q->kernel_if->num_pages, 1, 682 consume_q->kernel_if->u.h.header_page); 683 if (retval < (int)consume_q->kernel_if->num_pages) { 684 pr_debug("get_user_pages_fast(consume) failed (retval=%d)", 685 retval); 686 qp_release_pages(consume_q->kernel_if->u.h.header_page, 687 retval, false); 688 qp_release_pages(produce_q->kernel_if->u.h.header_page, 689 produce_q->kernel_if->num_pages, false); 690 err = VMCI_ERROR_NO_MEM; 691 } 692 693 out: 694 return err; 695 } 696 697 /* 698 * Registers the specification of the user pages used for backing a queue 699 * pair. Enough information to map in pages is stored in the OS specific 700 * part of the struct vmci_queue structure. 701 */ 702 static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store, 703 struct vmci_queue *produce_q, 704 struct vmci_queue *consume_q) 705 { 706 u64 produce_uva; 707 u64 consume_uva; 708 709 /* 710 * The new style and the old style mapping only differs in 711 * that we either get a single or two UVAs, so we split the 712 * single UVA range at the appropriate spot. 713 */ 714 produce_uva = page_store->pages; 715 consume_uva = page_store->pages + 716 produce_q->kernel_if->num_pages * PAGE_SIZE; 717 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q, 718 consume_q); 719 } 720 721 /* 722 * Releases and removes the references to user pages stored in the attach 723 * struct. Pages are released from the page cache and may become 724 * swappable again. 725 */ 726 static void qp_host_unregister_user_memory(struct vmci_queue *produce_q, 727 struct vmci_queue *consume_q) 728 { 729 qp_release_pages(produce_q->kernel_if->u.h.header_page, 730 produce_q->kernel_if->num_pages, true); 731 memset(produce_q->kernel_if->u.h.header_page, 0, 732 sizeof(*produce_q->kernel_if->u.h.header_page) * 733 produce_q->kernel_if->num_pages); 734 qp_release_pages(consume_q->kernel_if->u.h.header_page, 735 consume_q->kernel_if->num_pages, true); 736 memset(consume_q->kernel_if->u.h.header_page, 0, 737 sizeof(*consume_q->kernel_if->u.h.header_page) * 738 consume_q->kernel_if->num_pages); 739 } 740 741 /* 742 * Once qp_host_register_user_memory has been performed on a 743 * queue, the queue pair headers can be mapped into the 744 * kernel. Once mapped, they must be unmapped with 745 * qp_host_unmap_queues prior to calling 746 * qp_host_unregister_user_memory. 747 * Pages are pinned. 748 */ 749 static int qp_host_map_queues(struct vmci_queue *produce_q, 750 struct vmci_queue *consume_q) 751 { 752 int result; 753 754 if (!produce_q->q_header || !consume_q->q_header) { 755 struct page *headers[2]; 756 757 if (produce_q->q_header != consume_q->q_header) 758 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 759 760 if (produce_q->kernel_if->u.h.header_page == NULL || 761 *produce_q->kernel_if->u.h.header_page == NULL) 762 return VMCI_ERROR_UNAVAILABLE; 763 764 headers[0] = *produce_q->kernel_if->u.h.header_page; 765 headers[1] = *consume_q->kernel_if->u.h.header_page; 766 767 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); 768 if (produce_q->q_header != NULL) { 769 consume_q->q_header = 770 (struct vmci_queue_header *)((u8 *) 771 produce_q->q_header + 772 PAGE_SIZE); 773 result = VMCI_SUCCESS; 774 } else { 775 pr_warn("vmap failed\n"); 776 result = VMCI_ERROR_NO_MEM; 777 } 778 } else { 779 result = VMCI_SUCCESS; 780 } 781 782 return result; 783 } 784 785 /* 786 * Unmaps previously mapped queue pair headers from the kernel. 787 * Pages are unpinned. 788 */ 789 static int qp_host_unmap_queues(u32 gid, 790 struct vmci_queue *produce_q, 791 struct vmci_queue *consume_q) 792 { 793 if (produce_q->q_header) { 794 if (produce_q->q_header < consume_q->q_header) 795 vunmap(produce_q->q_header); 796 else 797 vunmap(consume_q->q_header); 798 799 produce_q->q_header = NULL; 800 consume_q->q_header = NULL; 801 } 802 803 return VMCI_SUCCESS; 804 } 805 806 /* 807 * Finds the entry in the list corresponding to a given handle. Assumes 808 * that the list is locked. 809 */ 810 static struct qp_entry *qp_list_find(struct qp_list *qp_list, 811 struct vmci_handle handle) 812 { 813 struct qp_entry *entry; 814 815 if (vmci_handle_is_invalid(handle)) 816 return NULL; 817 818 list_for_each_entry(entry, &qp_list->head, list_item) { 819 if (vmci_handle_is_equal(entry->handle, handle)) 820 return entry; 821 } 822 823 return NULL; 824 } 825 826 /* 827 * Finds the entry in the list corresponding to a given handle. 828 */ 829 static struct qp_guest_endpoint * 830 qp_guest_handle_to_entry(struct vmci_handle handle) 831 { 832 struct qp_guest_endpoint *entry; 833 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle); 834 835 entry = qp ? container_of( 836 qp, struct qp_guest_endpoint, qp) : NULL; 837 return entry; 838 } 839 840 /* 841 * Finds the entry in the list corresponding to a given handle. 842 */ 843 static struct qp_broker_entry * 844 qp_broker_handle_to_entry(struct vmci_handle handle) 845 { 846 struct qp_broker_entry *entry; 847 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle); 848 849 entry = qp ? container_of( 850 qp, struct qp_broker_entry, qp) : NULL; 851 return entry; 852 } 853 854 /* 855 * Dispatches a queue pair event message directly into the local event 856 * queue. 857 */ 858 static int qp_notify_peer_local(bool attach, struct vmci_handle handle) 859 { 860 u32 context_id = vmci_get_context_id(); 861 struct vmci_event_qp ev; 862 863 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); 864 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 865 VMCI_CONTEXT_RESOURCE_ID); 866 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 867 ev.msg.event_data.event = 868 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; 869 ev.payload.peer_id = context_id; 870 ev.payload.handle = handle; 871 872 return vmci_event_dispatch(&ev.msg.hdr); 873 } 874 875 /* 876 * Allocates and initializes a qp_guest_endpoint structure. 877 * Allocates a queue_pair rid (and handle) iff the given entry has 878 * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX 879 * are reserved handles. Assumes that the QP list mutex is held 880 * by the caller. 881 */ 882 static struct qp_guest_endpoint * 883 qp_guest_endpoint_create(struct vmci_handle handle, 884 u32 peer, 885 u32 flags, 886 u64 produce_size, 887 u64 consume_size, 888 void *produce_q, 889 void *consume_q) 890 { 891 int result; 892 struct qp_guest_endpoint *entry; 893 /* One page each for the queue headers. */ 894 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) + 895 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2; 896 897 if (vmci_handle_is_invalid(handle)) { 898 u32 context_id = vmci_get_context_id(); 899 900 handle = vmci_make_handle(context_id, VMCI_INVALID_ID); 901 } 902 903 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 904 if (entry) { 905 entry->qp.peer = peer; 906 entry->qp.flags = flags; 907 entry->qp.produce_size = produce_size; 908 entry->qp.consume_size = consume_size; 909 entry->qp.ref_count = 0; 910 entry->num_ppns = num_ppns; 911 entry->produce_q = produce_q; 912 entry->consume_q = consume_q; 913 INIT_LIST_HEAD(&entry->qp.list_item); 914 915 /* Add resource obj */ 916 result = vmci_resource_add(&entry->resource, 917 VMCI_RESOURCE_TYPE_QPAIR_GUEST, 918 handle); 919 entry->qp.handle = vmci_resource_handle(&entry->resource); 920 if ((result != VMCI_SUCCESS) || 921 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { 922 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", 923 handle.context, handle.resource, result); 924 kfree(entry); 925 entry = NULL; 926 } 927 } 928 return entry; 929 } 930 931 /* 932 * Frees a qp_guest_endpoint structure. 933 */ 934 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry) 935 { 936 qp_free_ppn_set(&entry->ppn_set); 937 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); 938 qp_free_queue(entry->produce_q, entry->qp.produce_size); 939 qp_free_queue(entry->consume_q, entry->qp.consume_size); 940 /* Unlink from resource hash table and free callback */ 941 vmci_resource_remove(&entry->resource); 942 943 kfree(entry); 944 } 945 946 /* 947 * Helper to make a queue_pairAlloc hypercall when the driver is 948 * supporting a guest device. 949 */ 950 static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry) 951 { 952 struct vmci_qp_alloc_msg *alloc_msg; 953 size_t msg_size; 954 int result; 955 956 if (!entry || entry->num_ppns <= 2) 957 return VMCI_ERROR_INVALID_ARGS; 958 959 msg_size = sizeof(*alloc_msg) + 960 (size_t) entry->num_ppns * sizeof(u32); 961 alloc_msg = kmalloc(msg_size, GFP_KERNEL); 962 if (!alloc_msg) 963 return VMCI_ERROR_NO_MEM; 964 965 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 966 VMCI_QUEUEPAIR_ALLOC); 967 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE; 968 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE; 969 alloc_msg->handle = entry->qp.handle; 970 alloc_msg->peer = entry->qp.peer; 971 alloc_msg->flags = entry->qp.flags; 972 alloc_msg->produce_size = entry->qp.produce_size; 973 alloc_msg->consume_size = entry->qp.consume_size; 974 alloc_msg->num_ppns = entry->num_ppns; 975 976 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg), 977 &entry->ppn_set); 978 if (result == VMCI_SUCCESS) 979 result = vmci_send_datagram(&alloc_msg->hdr); 980 981 kfree(alloc_msg); 982 983 return result; 984 } 985 986 /* 987 * Helper to make a queue_pairDetach hypercall when the driver is 988 * supporting a guest device. 989 */ 990 static int qp_detatch_hypercall(struct vmci_handle handle) 991 { 992 struct vmci_qp_detach_msg detach_msg; 993 994 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 995 VMCI_QUEUEPAIR_DETACH); 996 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE; 997 detach_msg.hdr.payload_size = sizeof(handle); 998 detach_msg.handle = handle; 999 1000 return vmci_send_datagram(&detach_msg.hdr); 1001 } 1002 1003 /* 1004 * Adds the given entry to the list. Assumes that the list is locked. 1005 */ 1006 static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry) 1007 { 1008 if (entry) 1009 list_add(&entry->list_item, &qp_list->head); 1010 } 1011 1012 /* 1013 * Removes the given entry from the list. Assumes that the list is locked. 1014 */ 1015 static void qp_list_remove_entry(struct qp_list *qp_list, 1016 struct qp_entry *entry) 1017 { 1018 if (entry) 1019 list_del(&entry->list_item); 1020 } 1021 1022 /* 1023 * Helper for VMCI queue_pair detach interface. Frees the physical 1024 * pages for the queue pair. 1025 */ 1026 static int qp_detatch_guest_work(struct vmci_handle handle) 1027 { 1028 int result; 1029 struct qp_guest_endpoint *entry; 1030 u32 ref_count = ~0; /* To avoid compiler warning below */ 1031 1032 mutex_lock(&qp_guest_endpoints.mutex); 1033 1034 entry = qp_guest_handle_to_entry(handle); 1035 if (!entry) { 1036 mutex_unlock(&qp_guest_endpoints.mutex); 1037 return VMCI_ERROR_NOT_FOUND; 1038 } 1039 1040 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1041 result = VMCI_SUCCESS; 1042 1043 if (entry->qp.ref_count > 1) { 1044 result = qp_notify_peer_local(false, handle); 1045 /* 1046 * We can fail to notify a local queuepair 1047 * because we can't allocate. We still want 1048 * to release the entry if that happens, so 1049 * don't bail out yet. 1050 */ 1051 } 1052 } else { 1053 result = qp_detatch_hypercall(handle); 1054 if (result < VMCI_SUCCESS) { 1055 /* 1056 * We failed to notify a non-local queuepair. 1057 * That other queuepair might still be 1058 * accessing the shared memory, so don't 1059 * release the entry yet. It will get cleaned 1060 * up by VMCIqueue_pair_Exit() if necessary 1061 * (assuming we are going away, otherwise why 1062 * did this fail?). 1063 */ 1064 1065 mutex_unlock(&qp_guest_endpoints.mutex); 1066 return result; 1067 } 1068 } 1069 1070 /* 1071 * If we get here then we either failed to notify a local queuepair, or 1072 * we succeeded in all cases. Release the entry if required. 1073 */ 1074 1075 entry->qp.ref_count--; 1076 if (entry->qp.ref_count == 0) 1077 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); 1078 1079 /* If we didn't remove the entry, this could change once we unlock. */ 1080 if (entry) 1081 ref_count = entry->qp.ref_count; 1082 1083 mutex_unlock(&qp_guest_endpoints.mutex); 1084 1085 if (ref_count == 0) 1086 qp_guest_endpoint_destroy(entry); 1087 1088 return result; 1089 } 1090 1091 /* 1092 * This functions handles the actual allocation of a VMCI queue 1093 * pair guest endpoint. Allocates physical pages for the queue 1094 * pair. It makes OS dependent calls through generic wrappers. 1095 */ 1096 static int qp_alloc_guest_work(struct vmci_handle *handle, 1097 struct vmci_queue **produce_q, 1098 u64 produce_size, 1099 struct vmci_queue **consume_q, 1100 u64 consume_size, 1101 u32 peer, 1102 u32 flags, 1103 u32 priv_flags) 1104 { 1105 const u64 num_produce_pages = 1106 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1; 1107 const u64 num_consume_pages = 1108 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1; 1109 void *my_produce_q = NULL; 1110 void *my_consume_q = NULL; 1111 int result; 1112 struct qp_guest_endpoint *queue_pair_entry = NULL; 1113 1114 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS) 1115 return VMCI_ERROR_NO_ACCESS; 1116 1117 mutex_lock(&qp_guest_endpoints.mutex); 1118 1119 queue_pair_entry = qp_guest_handle_to_entry(*handle); 1120 if (queue_pair_entry) { 1121 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1122 /* Local attach case. */ 1123 if (queue_pair_entry->qp.ref_count > 1) { 1124 pr_devel("Error attempting to attach more than once\n"); 1125 result = VMCI_ERROR_UNAVAILABLE; 1126 goto error_keep_entry; 1127 } 1128 1129 if (queue_pair_entry->qp.produce_size != consume_size || 1130 queue_pair_entry->qp.consume_size != 1131 produce_size || 1132 queue_pair_entry->qp.flags != 1133 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) { 1134 pr_devel("Error mismatched queue pair in local attach\n"); 1135 result = VMCI_ERROR_QUEUEPAIR_MISMATCH; 1136 goto error_keep_entry; 1137 } 1138 1139 /* 1140 * Do a local attach. We swap the consume and 1141 * produce queues for the attacher and deliver 1142 * an attach event. 1143 */ 1144 result = qp_notify_peer_local(true, *handle); 1145 if (result < VMCI_SUCCESS) 1146 goto error_keep_entry; 1147 1148 my_produce_q = queue_pair_entry->consume_q; 1149 my_consume_q = queue_pair_entry->produce_q; 1150 goto out; 1151 } 1152 1153 result = VMCI_ERROR_ALREADY_EXISTS; 1154 goto error_keep_entry; 1155 } 1156 1157 my_produce_q = qp_alloc_queue(produce_size, flags); 1158 if (!my_produce_q) { 1159 pr_warn("Error allocating pages for produce queue\n"); 1160 result = VMCI_ERROR_NO_MEM; 1161 goto error; 1162 } 1163 1164 my_consume_q = qp_alloc_queue(consume_size, flags); 1165 if (!my_consume_q) { 1166 pr_warn("Error allocating pages for consume queue\n"); 1167 result = VMCI_ERROR_NO_MEM; 1168 goto error; 1169 } 1170 1171 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags, 1172 produce_size, consume_size, 1173 my_produce_q, my_consume_q); 1174 if (!queue_pair_entry) { 1175 pr_warn("Error allocating memory in %s\n", __func__); 1176 result = VMCI_ERROR_NO_MEM; 1177 goto error; 1178 } 1179 1180 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q, 1181 num_consume_pages, 1182 &queue_pair_entry->ppn_set); 1183 if (result < VMCI_SUCCESS) { 1184 pr_warn("qp_alloc_ppn_set failed\n"); 1185 goto error; 1186 } 1187 1188 /* 1189 * It's only necessary to notify the host if this queue pair will be 1190 * attached to from another context. 1191 */ 1192 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1193 /* Local create case. */ 1194 u32 context_id = vmci_get_context_id(); 1195 1196 /* 1197 * Enforce similar checks on local queue pairs as we 1198 * do for regular ones. The handle's context must 1199 * match the creator or attacher context id (here they 1200 * are both the current context id) and the 1201 * attach-only flag cannot exist during create. We 1202 * also ensure specified peer is this context or an 1203 * invalid one. 1204 */ 1205 if (queue_pair_entry->qp.handle.context != context_id || 1206 (queue_pair_entry->qp.peer != VMCI_INVALID_ID && 1207 queue_pair_entry->qp.peer != context_id)) { 1208 result = VMCI_ERROR_NO_ACCESS; 1209 goto error; 1210 } 1211 1212 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) { 1213 result = VMCI_ERROR_NOT_FOUND; 1214 goto error; 1215 } 1216 } else { 1217 result = qp_alloc_hypercall(queue_pair_entry); 1218 if (result < VMCI_SUCCESS) { 1219 pr_warn("qp_alloc_hypercall result = %d\n", result); 1220 goto error; 1221 } 1222 } 1223 1224 qp_init_queue_mutex((struct vmci_queue *)my_produce_q, 1225 (struct vmci_queue *)my_consume_q); 1226 1227 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp); 1228 1229 out: 1230 queue_pair_entry->qp.ref_count++; 1231 *handle = queue_pair_entry->qp.handle; 1232 *produce_q = (struct vmci_queue *)my_produce_q; 1233 *consume_q = (struct vmci_queue *)my_consume_q; 1234 1235 /* 1236 * We should initialize the queue pair header pages on a local 1237 * queue pair create. For non-local queue pairs, the 1238 * hypervisor initializes the header pages in the create step. 1239 */ 1240 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) && 1241 queue_pair_entry->qp.ref_count == 1) { 1242 vmci_q_header_init((*produce_q)->q_header, *handle); 1243 vmci_q_header_init((*consume_q)->q_header, *handle); 1244 } 1245 1246 mutex_unlock(&qp_guest_endpoints.mutex); 1247 1248 return VMCI_SUCCESS; 1249 1250 error: 1251 mutex_unlock(&qp_guest_endpoints.mutex); 1252 if (queue_pair_entry) { 1253 /* The queues will be freed inside the destroy routine. */ 1254 qp_guest_endpoint_destroy(queue_pair_entry); 1255 } else { 1256 qp_free_queue(my_produce_q, produce_size); 1257 qp_free_queue(my_consume_q, consume_size); 1258 } 1259 return result; 1260 1261 error_keep_entry: 1262 /* This path should only be used when an existing entry was found. */ 1263 mutex_unlock(&qp_guest_endpoints.mutex); 1264 return result; 1265 } 1266 1267 /* 1268 * The first endpoint issuing a queue pair allocation will create the state 1269 * of the queue pair in the queue pair broker. 1270 * 1271 * If the creator is a guest, it will associate a VMX virtual address range 1272 * with the queue pair as specified by the page_store. For compatibility with 1273 * older VMX'en, that would use a separate step to set the VMX virtual 1274 * address range, the virtual address range can be registered later using 1275 * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be 1276 * used. 1277 * 1278 * If the creator is the host, a page_store of NULL should be used as well, 1279 * since the host is not able to supply a page store for the queue pair. 1280 * 1281 * For older VMX and host callers, the queue pair will be created in the 1282 * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be 1283 * created in VMCOQPB_CREATED_MEM state. 1284 */ 1285 static int qp_broker_create(struct vmci_handle handle, 1286 u32 peer, 1287 u32 flags, 1288 u32 priv_flags, 1289 u64 produce_size, 1290 u64 consume_size, 1291 struct vmci_qp_page_store *page_store, 1292 struct vmci_ctx *context, 1293 vmci_event_release_cb wakeup_cb, 1294 void *client_data, struct qp_broker_entry **ent) 1295 { 1296 struct qp_broker_entry *entry = NULL; 1297 const u32 context_id = vmci_ctx_get_id(context); 1298 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1299 int result; 1300 u64 guest_produce_size; 1301 u64 guest_consume_size; 1302 1303 /* Do not create if the caller asked not to. */ 1304 if (flags & VMCI_QPFLAG_ATTACH_ONLY) 1305 return VMCI_ERROR_NOT_FOUND; 1306 1307 /* 1308 * Creator's context ID should match handle's context ID or the creator 1309 * must allow the context in handle's context ID as the "peer". 1310 */ 1311 if (handle.context != context_id && handle.context != peer) 1312 return VMCI_ERROR_NO_ACCESS; 1313 1314 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer)) 1315 return VMCI_ERROR_DST_UNREACHABLE; 1316 1317 /* 1318 * Creator's context ID for local queue pairs should match the 1319 * peer, if a peer is specified. 1320 */ 1321 if (is_local && peer != VMCI_INVALID_ID && context_id != peer) 1322 return VMCI_ERROR_NO_ACCESS; 1323 1324 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 1325 if (!entry) 1326 return VMCI_ERROR_NO_MEM; 1327 1328 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) { 1329 /* 1330 * The queue pair broker entry stores values from the guest 1331 * point of view, so a creating host side endpoint should swap 1332 * produce and consume values -- unless it is a local queue 1333 * pair, in which case no swapping is necessary, since the local 1334 * attacher will swap queues. 1335 */ 1336 1337 guest_produce_size = consume_size; 1338 guest_consume_size = produce_size; 1339 } else { 1340 guest_produce_size = produce_size; 1341 guest_consume_size = consume_size; 1342 } 1343 1344 entry->qp.handle = handle; 1345 entry->qp.peer = peer; 1346 entry->qp.flags = flags; 1347 entry->qp.produce_size = guest_produce_size; 1348 entry->qp.consume_size = guest_consume_size; 1349 entry->qp.ref_count = 1; 1350 entry->create_id = context_id; 1351 entry->attach_id = VMCI_INVALID_ID; 1352 entry->state = VMCIQPB_NEW; 1353 entry->require_trusted_attach = 1354 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED); 1355 entry->created_by_trusted = 1356 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED); 1357 entry->vmci_page_files = false; 1358 entry->wakeup_cb = wakeup_cb; 1359 entry->client_data = client_data; 1360 entry->produce_q = qp_host_alloc_queue(guest_produce_size); 1361 if (entry->produce_q == NULL) { 1362 result = VMCI_ERROR_NO_MEM; 1363 goto error; 1364 } 1365 entry->consume_q = qp_host_alloc_queue(guest_consume_size); 1366 if (entry->consume_q == NULL) { 1367 result = VMCI_ERROR_NO_MEM; 1368 goto error; 1369 } 1370 1371 qp_init_queue_mutex(entry->produce_q, entry->consume_q); 1372 1373 INIT_LIST_HEAD(&entry->qp.list_item); 1374 1375 if (is_local) { 1376 u8 *tmp; 1377 1378 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), 1379 PAGE_SIZE, GFP_KERNEL); 1380 if (entry->local_mem == NULL) { 1381 result = VMCI_ERROR_NO_MEM; 1382 goto error; 1383 } 1384 entry->state = VMCIQPB_CREATED_MEM; 1385 entry->produce_q->q_header = entry->local_mem; 1386 tmp = (u8 *)entry->local_mem + PAGE_SIZE * 1387 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); 1388 entry->consume_q->q_header = (struct vmci_queue_header *)tmp; 1389 } else if (page_store) { 1390 /* 1391 * The VMX already initialized the queue pair headers, so no 1392 * need for the kernel side to do that. 1393 */ 1394 result = qp_host_register_user_memory(page_store, 1395 entry->produce_q, 1396 entry->consume_q); 1397 if (result < VMCI_SUCCESS) 1398 goto error; 1399 1400 entry->state = VMCIQPB_CREATED_MEM; 1401 } else { 1402 /* 1403 * A create without a page_store may be either a host 1404 * side create (in which case we are waiting for the 1405 * guest side to supply the memory) or an old style 1406 * queue pair create (in which case we will expect a 1407 * set page store call as the next step). 1408 */ 1409 entry->state = VMCIQPB_CREATED_NO_MEM; 1410 } 1411 1412 qp_list_add_entry(&qp_broker_list, &entry->qp); 1413 if (ent != NULL) 1414 *ent = entry; 1415 1416 /* Add to resource obj */ 1417 result = vmci_resource_add(&entry->resource, 1418 VMCI_RESOURCE_TYPE_QPAIR_HOST, 1419 handle); 1420 if (result != VMCI_SUCCESS) { 1421 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", 1422 handle.context, handle.resource, result); 1423 goto error; 1424 } 1425 1426 entry->qp.handle = vmci_resource_handle(&entry->resource); 1427 if (is_local) { 1428 vmci_q_header_init(entry->produce_q->q_header, 1429 entry->qp.handle); 1430 vmci_q_header_init(entry->consume_q->q_header, 1431 entry->qp.handle); 1432 } 1433 1434 vmci_ctx_qp_create(context, entry->qp.handle); 1435 1436 return VMCI_SUCCESS; 1437 1438 error: 1439 if (entry != NULL) { 1440 qp_host_free_queue(entry->produce_q, guest_produce_size); 1441 qp_host_free_queue(entry->consume_q, guest_consume_size); 1442 kfree(entry); 1443 } 1444 1445 return result; 1446 } 1447 1448 /* 1449 * Enqueues an event datagram to notify the peer VM attached to 1450 * the given queue pair handle about attach/detach event by the 1451 * given VM. Returns Payload size of datagram enqueued on 1452 * success, error code otherwise. 1453 */ 1454 static int qp_notify_peer(bool attach, 1455 struct vmci_handle handle, 1456 u32 my_id, 1457 u32 peer_id) 1458 { 1459 int rv; 1460 struct vmci_event_qp ev; 1461 1462 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID || 1463 peer_id == VMCI_INVALID_ID) 1464 return VMCI_ERROR_INVALID_ARGS; 1465 1466 /* 1467 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on 1468 * number of pending events from the hypervisor to a given VM 1469 * otherwise a rogue VM could do an arbitrary number of attach 1470 * and detach operations causing memory pressure in the host 1471 * kernel. 1472 */ 1473 1474 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); 1475 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1476 VMCI_CONTEXT_RESOURCE_ID); 1477 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 1478 ev.msg.event_data.event = attach ? 1479 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; 1480 ev.payload.handle = handle; 1481 ev.payload.peer_id = my_id; 1482 1483 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, 1484 &ev.msg.hdr, false); 1485 if (rv < VMCI_SUCCESS) 1486 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n", 1487 attach ? "ATTACH" : "DETACH", peer_id); 1488 1489 return rv; 1490 } 1491 1492 /* 1493 * The second endpoint issuing a queue pair allocation will attach to 1494 * the queue pair registered with the queue pair broker. 1495 * 1496 * If the attacher is a guest, it will associate a VMX virtual address 1497 * range with the queue pair as specified by the page_store. At this 1498 * point, the already attach host endpoint may start using the queue 1499 * pair, and an attach event is sent to it. For compatibility with 1500 * older VMX'en, that used a separate step to set the VMX virtual 1501 * address range, the virtual address range can be registered later 1502 * using vmci_qp_broker_set_page_store. In that case, a page_store of 1503 * NULL should be used, and the attach event will be generated once 1504 * the actual page store has been set. 1505 * 1506 * If the attacher is the host, a page_store of NULL should be used as 1507 * well, since the page store information is already set by the guest. 1508 * 1509 * For new VMX and host callers, the queue pair will be moved to the 1510 * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be 1511 * moved to the VMCOQPB_ATTACHED_NO_MEM state. 1512 */ 1513 static int qp_broker_attach(struct qp_broker_entry *entry, 1514 u32 peer, 1515 u32 flags, 1516 u32 priv_flags, 1517 u64 produce_size, 1518 u64 consume_size, 1519 struct vmci_qp_page_store *page_store, 1520 struct vmci_ctx *context, 1521 vmci_event_release_cb wakeup_cb, 1522 void *client_data, 1523 struct qp_broker_entry **ent) 1524 { 1525 const u32 context_id = vmci_ctx_get_id(context); 1526 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1527 int result; 1528 1529 if (entry->state != VMCIQPB_CREATED_NO_MEM && 1530 entry->state != VMCIQPB_CREATED_MEM) 1531 return VMCI_ERROR_UNAVAILABLE; 1532 1533 if (is_local) { 1534 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || 1535 context_id != entry->create_id) { 1536 return VMCI_ERROR_INVALID_ARGS; 1537 } 1538 } else if (context_id == entry->create_id || 1539 context_id == entry->attach_id) { 1540 return VMCI_ERROR_ALREADY_EXISTS; 1541 } 1542 1543 if (VMCI_CONTEXT_IS_VM(context_id) && 1544 VMCI_CONTEXT_IS_VM(entry->create_id)) 1545 return VMCI_ERROR_DST_UNREACHABLE; 1546 1547 /* 1548 * If we are attaching from a restricted context then the queuepair 1549 * must have been created by a trusted endpoint. 1550 */ 1551 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) && 1552 !entry->created_by_trusted) 1553 return VMCI_ERROR_NO_ACCESS; 1554 1555 /* 1556 * If we are attaching to a queuepair that was created by a restricted 1557 * context then we must be trusted. 1558 */ 1559 if (entry->require_trusted_attach && 1560 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED))) 1561 return VMCI_ERROR_NO_ACCESS; 1562 1563 /* 1564 * If the creator specifies VMCI_INVALID_ID in "peer" field, access 1565 * control check is not performed. 1566 */ 1567 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) 1568 return VMCI_ERROR_NO_ACCESS; 1569 1570 if (entry->create_id == VMCI_HOST_CONTEXT_ID) { 1571 /* 1572 * Do not attach if the caller doesn't support Host Queue Pairs 1573 * and a host created this queue pair. 1574 */ 1575 1576 if (!vmci_ctx_supports_host_qp(context)) 1577 return VMCI_ERROR_INVALID_RESOURCE; 1578 1579 } else if (context_id == VMCI_HOST_CONTEXT_ID) { 1580 struct vmci_ctx *create_context; 1581 bool supports_host_qp; 1582 1583 /* 1584 * Do not attach a host to a user created queue pair if that 1585 * user doesn't support host queue pair end points. 1586 */ 1587 1588 create_context = vmci_ctx_get(entry->create_id); 1589 supports_host_qp = vmci_ctx_supports_host_qp(create_context); 1590 vmci_ctx_put(create_context); 1591 1592 if (!supports_host_qp) 1593 return VMCI_ERROR_INVALID_RESOURCE; 1594 } 1595 1596 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) 1597 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1598 1599 if (context_id != VMCI_HOST_CONTEXT_ID) { 1600 /* 1601 * The queue pair broker entry stores values from the guest 1602 * point of view, so an attaching guest should match the values 1603 * stored in the entry. 1604 */ 1605 1606 if (entry->qp.produce_size != produce_size || 1607 entry->qp.consume_size != consume_size) { 1608 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1609 } 1610 } else if (entry->qp.produce_size != consume_size || 1611 entry->qp.consume_size != produce_size) { 1612 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1613 } 1614 1615 if (context_id != VMCI_HOST_CONTEXT_ID) { 1616 /* 1617 * If a guest attached to a queue pair, it will supply 1618 * the backing memory. If this is a pre NOVMVM vmx, 1619 * the backing memory will be supplied by calling 1620 * vmci_qp_broker_set_page_store() following the 1621 * return of the vmci_qp_broker_alloc() call. If it is 1622 * a vmx of version NOVMVM or later, the page store 1623 * must be supplied as part of the 1624 * vmci_qp_broker_alloc call. Under all circumstances 1625 * must the initially created queue pair not have any 1626 * memory associated with it already. 1627 */ 1628 1629 if (entry->state != VMCIQPB_CREATED_NO_MEM) 1630 return VMCI_ERROR_INVALID_ARGS; 1631 1632 if (page_store != NULL) { 1633 /* 1634 * Patch up host state to point to guest 1635 * supplied memory. The VMX already 1636 * initialized the queue pair headers, so no 1637 * need for the kernel side to do that. 1638 */ 1639 1640 result = qp_host_register_user_memory(page_store, 1641 entry->produce_q, 1642 entry->consume_q); 1643 if (result < VMCI_SUCCESS) 1644 return result; 1645 1646 entry->state = VMCIQPB_ATTACHED_MEM; 1647 } else { 1648 entry->state = VMCIQPB_ATTACHED_NO_MEM; 1649 } 1650 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) { 1651 /* 1652 * The host side is attempting to attach to a queue 1653 * pair that doesn't have any memory associated with 1654 * it. This must be a pre NOVMVM vmx that hasn't set 1655 * the page store information yet, or a quiesced VM. 1656 */ 1657 1658 return VMCI_ERROR_UNAVAILABLE; 1659 } else { 1660 /* The host side has successfully attached to a queue pair. */ 1661 entry->state = VMCIQPB_ATTACHED_MEM; 1662 } 1663 1664 if (entry->state == VMCIQPB_ATTACHED_MEM) { 1665 result = 1666 qp_notify_peer(true, entry->qp.handle, context_id, 1667 entry->create_id); 1668 if (result < VMCI_SUCCESS) 1669 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", 1670 entry->create_id, entry->qp.handle.context, 1671 entry->qp.handle.resource); 1672 } 1673 1674 entry->attach_id = context_id; 1675 entry->qp.ref_count++; 1676 if (wakeup_cb) { 1677 entry->wakeup_cb = wakeup_cb; 1678 entry->client_data = client_data; 1679 } 1680 1681 /* 1682 * When attaching to local queue pairs, the context already has 1683 * an entry tracking the queue pair, so don't add another one. 1684 */ 1685 if (!is_local) 1686 vmci_ctx_qp_create(context, entry->qp.handle); 1687 1688 if (ent != NULL) 1689 *ent = entry; 1690 1691 return VMCI_SUCCESS; 1692 } 1693 1694 /* 1695 * queue_pair_Alloc for use when setting up queue pair endpoints 1696 * on the host. 1697 */ 1698 static int qp_broker_alloc(struct vmci_handle handle, 1699 u32 peer, 1700 u32 flags, 1701 u32 priv_flags, 1702 u64 produce_size, 1703 u64 consume_size, 1704 struct vmci_qp_page_store *page_store, 1705 struct vmci_ctx *context, 1706 vmci_event_release_cb wakeup_cb, 1707 void *client_data, 1708 struct qp_broker_entry **ent, 1709 bool *swap) 1710 { 1711 const u32 context_id = vmci_ctx_get_id(context); 1712 bool create; 1713 struct qp_broker_entry *entry = NULL; 1714 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1715 int result; 1716 1717 if (vmci_handle_is_invalid(handle) || 1718 (flags & ~VMCI_QP_ALL_FLAGS) || is_local || 1719 !(produce_size || consume_size) || 1720 !context || context_id == VMCI_INVALID_ID || 1721 handle.context == VMCI_INVALID_ID) { 1722 return VMCI_ERROR_INVALID_ARGS; 1723 } 1724 1725 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store)) 1726 return VMCI_ERROR_INVALID_ARGS; 1727 1728 /* 1729 * In the initial argument check, we ensure that non-vmkernel hosts 1730 * are not allowed to create local queue pairs. 1731 */ 1732 1733 mutex_lock(&qp_broker_list.mutex); 1734 1735 if (!is_local && vmci_ctx_qp_exists(context, handle)) { 1736 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n", 1737 context_id, handle.context, handle.resource); 1738 mutex_unlock(&qp_broker_list.mutex); 1739 return VMCI_ERROR_ALREADY_EXISTS; 1740 } 1741 1742 if (handle.resource != VMCI_INVALID_ID) 1743 entry = qp_broker_handle_to_entry(handle); 1744 1745 if (!entry) { 1746 create = true; 1747 result = 1748 qp_broker_create(handle, peer, flags, priv_flags, 1749 produce_size, consume_size, page_store, 1750 context, wakeup_cb, client_data, ent); 1751 } else { 1752 create = false; 1753 result = 1754 qp_broker_attach(entry, peer, flags, priv_flags, 1755 produce_size, consume_size, page_store, 1756 context, wakeup_cb, client_data, ent); 1757 } 1758 1759 mutex_unlock(&qp_broker_list.mutex); 1760 1761 if (swap) 1762 *swap = (context_id == VMCI_HOST_CONTEXT_ID) && 1763 !(create && is_local); 1764 1765 return result; 1766 } 1767 1768 /* 1769 * This function implements the kernel API for allocating a queue 1770 * pair. 1771 */ 1772 static int qp_alloc_host_work(struct vmci_handle *handle, 1773 struct vmci_queue **produce_q, 1774 u64 produce_size, 1775 struct vmci_queue **consume_q, 1776 u64 consume_size, 1777 u32 peer, 1778 u32 flags, 1779 u32 priv_flags, 1780 vmci_event_release_cb wakeup_cb, 1781 void *client_data) 1782 { 1783 struct vmci_handle new_handle; 1784 struct vmci_ctx *context; 1785 struct qp_broker_entry *entry; 1786 int result; 1787 bool swap; 1788 1789 if (vmci_handle_is_invalid(*handle)) { 1790 new_handle = vmci_make_handle( 1791 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID); 1792 } else 1793 new_handle = *handle; 1794 1795 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); 1796 entry = NULL; 1797 result = 1798 qp_broker_alloc(new_handle, peer, flags, priv_flags, 1799 produce_size, consume_size, NULL, context, 1800 wakeup_cb, client_data, &entry, &swap); 1801 if (result == VMCI_SUCCESS) { 1802 if (swap) { 1803 /* 1804 * If this is a local queue pair, the attacher 1805 * will swap around produce and consume 1806 * queues. 1807 */ 1808 1809 *produce_q = entry->consume_q; 1810 *consume_q = entry->produce_q; 1811 } else { 1812 *produce_q = entry->produce_q; 1813 *consume_q = entry->consume_q; 1814 } 1815 1816 *handle = vmci_resource_handle(&entry->resource); 1817 } else { 1818 *handle = VMCI_INVALID_HANDLE; 1819 pr_devel("queue pair broker failed to alloc (result=%d)\n", 1820 result); 1821 } 1822 vmci_ctx_put(context); 1823 return result; 1824 } 1825 1826 /* 1827 * Allocates a VMCI queue_pair. Only checks validity of input 1828 * arguments. The real work is done in the host or guest 1829 * specific function. 1830 */ 1831 int vmci_qp_alloc(struct vmci_handle *handle, 1832 struct vmci_queue **produce_q, 1833 u64 produce_size, 1834 struct vmci_queue **consume_q, 1835 u64 consume_size, 1836 u32 peer, 1837 u32 flags, 1838 u32 priv_flags, 1839 bool guest_endpoint, 1840 vmci_event_release_cb wakeup_cb, 1841 void *client_data) 1842 { 1843 if (!handle || !produce_q || !consume_q || 1844 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS)) 1845 return VMCI_ERROR_INVALID_ARGS; 1846 1847 if (guest_endpoint) { 1848 return qp_alloc_guest_work(handle, produce_q, 1849 produce_size, consume_q, 1850 consume_size, peer, 1851 flags, priv_flags); 1852 } else { 1853 return qp_alloc_host_work(handle, produce_q, 1854 produce_size, consume_q, 1855 consume_size, peer, flags, 1856 priv_flags, wakeup_cb, client_data); 1857 } 1858 } 1859 1860 /* 1861 * This function implements the host kernel API for detaching from 1862 * a queue pair. 1863 */ 1864 static int qp_detatch_host_work(struct vmci_handle handle) 1865 { 1866 int result; 1867 struct vmci_ctx *context; 1868 1869 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); 1870 1871 result = vmci_qp_broker_detach(handle, context); 1872 1873 vmci_ctx_put(context); 1874 return result; 1875 } 1876 1877 /* 1878 * Detaches from a VMCI queue_pair. Only checks validity of input argument. 1879 * Real work is done in the host or guest specific function. 1880 */ 1881 static int qp_detatch(struct vmci_handle handle, bool guest_endpoint) 1882 { 1883 if (vmci_handle_is_invalid(handle)) 1884 return VMCI_ERROR_INVALID_ARGS; 1885 1886 if (guest_endpoint) 1887 return qp_detatch_guest_work(handle); 1888 else 1889 return qp_detatch_host_work(handle); 1890 } 1891 1892 /* 1893 * Returns the entry from the head of the list. Assumes that the list is 1894 * locked. 1895 */ 1896 static struct qp_entry *qp_list_get_head(struct qp_list *qp_list) 1897 { 1898 if (!list_empty(&qp_list->head)) { 1899 struct qp_entry *entry = 1900 list_first_entry(&qp_list->head, struct qp_entry, 1901 list_item); 1902 return entry; 1903 } 1904 1905 return NULL; 1906 } 1907 1908 void vmci_qp_broker_exit(void) 1909 { 1910 struct qp_entry *entry; 1911 struct qp_broker_entry *be; 1912 1913 mutex_lock(&qp_broker_list.mutex); 1914 1915 while ((entry = qp_list_get_head(&qp_broker_list))) { 1916 be = (struct qp_broker_entry *)entry; 1917 1918 qp_list_remove_entry(&qp_broker_list, entry); 1919 kfree(be); 1920 } 1921 1922 mutex_unlock(&qp_broker_list.mutex); 1923 } 1924 1925 /* 1926 * Requests that a queue pair be allocated with the VMCI queue 1927 * pair broker. Allocates a queue pair entry if one does not 1928 * exist. Attaches to one if it exists, and retrieves the page 1929 * files backing that queue_pair. Assumes that the queue pair 1930 * broker lock is held. 1931 */ 1932 int vmci_qp_broker_alloc(struct vmci_handle handle, 1933 u32 peer, 1934 u32 flags, 1935 u32 priv_flags, 1936 u64 produce_size, 1937 u64 consume_size, 1938 struct vmci_qp_page_store *page_store, 1939 struct vmci_ctx *context) 1940 { 1941 return qp_broker_alloc(handle, peer, flags, priv_flags, 1942 produce_size, consume_size, 1943 page_store, context, NULL, NULL, NULL, NULL); 1944 } 1945 1946 /* 1947 * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate 1948 * step to add the UVAs of the VMX mapping of the queue pair. This function 1949 * provides backwards compatibility with such VMX'en, and takes care of 1950 * registering the page store for a queue pair previously allocated by the 1951 * VMX during create or attach. This function will move the queue pair state 1952 * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or 1953 * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the 1954 * attached state with memory, the queue pair is ready to be used by the 1955 * host peer, and an attached event will be generated. 1956 * 1957 * Assumes that the queue pair broker lock is held. 1958 * 1959 * This function is only used by the hosted platform, since there is no 1960 * issue with backwards compatibility for vmkernel. 1961 */ 1962 int vmci_qp_broker_set_page_store(struct vmci_handle handle, 1963 u64 produce_uva, 1964 u64 consume_uva, 1965 struct vmci_ctx *context) 1966 { 1967 struct qp_broker_entry *entry; 1968 int result; 1969 const u32 context_id = vmci_ctx_get_id(context); 1970 1971 if (vmci_handle_is_invalid(handle) || !context || 1972 context_id == VMCI_INVALID_ID) 1973 return VMCI_ERROR_INVALID_ARGS; 1974 1975 /* 1976 * We only support guest to host queue pairs, so the VMX must 1977 * supply UVAs for the mapped page files. 1978 */ 1979 1980 if (produce_uva == 0 || consume_uva == 0) 1981 return VMCI_ERROR_INVALID_ARGS; 1982 1983 mutex_lock(&qp_broker_list.mutex); 1984 1985 if (!vmci_ctx_qp_exists(context, handle)) { 1986 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 1987 context_id, handle.context, handle.resource); 1988 result = VMCI_ERROR_NOT_FOUND; 1989 goto out; 1990 } 1991 1992 entry = qp_broker_handle_to_entry(handle); 1993 if (!entry) { 1994 result = VMCI_ERROR_NOT_FOUND; 1995 goto out; 1996 } 1997 1998 /* 1999 * If I'm the owner then I can set the page store. 2000 * 2001 * Or, if a host created the queue_pair and I'm the attached peer 2002 * then I can set the page store. 2003 */ 2004 if (entry->create_id != context_id && 2005 (entry->create_id != VMCI_HOST_CONTEXT_ID || 2006 entry->attach_id != context_id)) { 2007 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER; 2008 goto out; 2009 } 2010 2011 if (entry->state != VMCIQPB_CREATED_NO_MEM && 2012 entry->state != VMCIQPB_ATTACHED_NO_MEM) { 2013 result = VMCI_ERROR_UNAVAILABLE; 2014 goto out; 2015 } 2016 2017 result = qp_host_get_user_memory(produce_uva, consume_uva, 2018 entry->produce_q, entry->consume_q); 2019 if (result < VMCI_SUCCESS) 2020 goto out; 2021 2022 result = qp_host_map_queues(entry->produce_q, entry->consume_q); 2023 if (result < VMCI_SUCCESS) { 2024 qp_host_unregister_user_memory(entry->produce_q, 2025 entry->consume_q); 2026 goto out; 2027 } 2028 2029 if (entry->state == VMCIQPB_CREATED_NO_MEM) 2030 entry->state = VMCIQPB_CREATED_MEM; 2031 else 2032 entry->state = VMCIQPB_ATTACHED_MEM; 2033 2034 entry->vmci_page_files = true; 2035 2036 if (entry->state == VMCIQPB_ATTACHED_MEM) { 2037 result = 2038 qp_notify_peer(true, handle, context_id, entry->create_id); 2039 if (result < VMCI_SUCCESS) { 2040 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", 2041 entry->create_id, entry->qp.handle.context, 2042 entry->qp.handle.resource); 2043 } 2044 } 2045 2046 result = VMCI_SUCCESS; 2047 out: 2048 mutex_unlock(&qp_broker_list.mutex); 2049 return result; 2050 } 2051 2052 /* 2053 * Resets saved queue headers for the given QP broker 2054 * entry. Should be used when guest memory becomes available 2055 * again, or the guest detaches. 2056 */ 2057 static void qp_reset_saved_headers(struct qp_broker_entry *entry) 2058 { 2059 entry->produce_q->saved_header = NULL; 2060 entry->consume_q->saved_header = NULL; 2061 } 2062 2063 /* 2064 * The main entry point for detaching from a queue pair registered with the 2065 * queue pair broker. If more than one endpoint is attached to the queue 2066 * pair, the first endpoint will mainly decrement a reference count and 2067 * generate a notification to its peer. The last endpoint will clean up 2068 * the queue pair state registered with the broker. 2069 * 2070 * When a guest endpoint detaches, it will unmap and unregister the guest 2071 * memory backing the queue pair. If the host is still attached, it will 2072 * no longer be able to access the queue pair content. 2073 * 2074 * If the queue pair is already in a state where there is no memory 2075 * registered for the queue pair (any *_NO_MEM state), it will transition to 2076 * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest 2077 * endpoint is the first of two endpoints to detach. If the host endpoint is 2078 * the first out of two to detach, the queue pair will move to the 2079 * VMCIQPB_SHUTDOWN_MEM state. 2080 */ 2081 int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context) 2082 { 2083 struct qp_broker_entry *entry; 2084 const u32 context_id = vmci_ctx_get_id(context); 2085 u32 peer_id; 2086 bool is_local = false; 2087 int result; 2088 2089 if (vmci_handle_is_invalid(handle) || !context || 2090 context_id == VMCI_INVALID_ID) { 2091 return VMCI_ERROR_INVALID_ARGS; 2092 } 2093 2094 mutex_lock(&qp_broker_list.mutex); 2095 2096 if (!vmci_ctx_qp_exists(context, handle)) { 2097 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2098 context_id, handle.context, handle.resource); 2099 result = VMCI_ERROR_NOT_FOUND; 2100 goto out; 2101 } 2102 2103 entry = qp_broker_handle_to_entry(handle); 2104 if (!entry) { 2105 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n", 2106 context_id, handle.context, handle.resource); 2107 result = VMCI_ERROR_NOT_FOUND; 2108 goto out; 2109 } 2110 2111 if (context_id != entry->create_id && context_id != entry->attach_id) { 2112 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2113 goto out; 2114 } 2115 2116 if (context_id == entry->create_id) { 2117 peer_id = entry->attach_id; 2118 entry->create_id = VMCI_INVALID_ID; 2119 } else { 2120 peer_id = entry->create_id; 2121 entry->attach_id = VMCI_INVALID_ID; 2122 } 2123 entry->qp.ref_count--; 2124 2125 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2126 2127 if (context_id != VMCI_HOST_CONTEXT_ID) { 2128 bool headers_mapped; 2129 2130 /* 2131 * Pre NOVMVM vmx'en may detach from a queue pair 2132 * before setting the page store, and in that case 2133 * there is no user memory to detach from. Also, more 2134 * recent VMX'en may detach from a queue pair in the 2135 * quiesced state. 2136 */ 2137 2138 qp_acquire_queue_mutex(entry->produce_q); 2139 headers_mapped = entry->produce_q->q_header || 2140 entry->consume_q->q_header; 2141 if (QPBROKERSTATE_HAS_MEM(entry)) { 2142 result = 2143 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID, 2144 entry->produce_q, 2145 entry->consume_q); 2146 if (result < VMCI_SUCCESS) 2147 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", 2148 handle.context, handle.resource, 2149 result); 2150 2151 qp_host_unregister_user_memory(entry->produce_q, 2152 entry->consume_q); 2153 2154 } 2155 2156 if (!headers_mapped) 2157 qp_reset_saved_headers(entry); 2158 2159 qp_release_queue_mutex(entry->produce_q); 2160 2161 if (!headers_mapped && entry->wakeup_cb) 2162 entry->wakeup_cb(entry->client_data); 2163 2164 } else { 2165 if (entry->wakeup_cb) { 2166 entry->wakeup_cb = NULL; 2167 entry->client_data = NULL; 2168 } 2169 } 2170 2171 if (entry->qp.ref_count == 0) { 2172 qp_list_remove_entry(&qp_broker_list, &entry->qp); 2173 2174 if (is_local) 2175 kfree(entry->local_mem); 2176 2177 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); 2178 qp_host_free_queue(entry->produce_q, entry->qp.produce_size); 2179 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); 2180 /* Unlink from resource hash table and free callback */ 2181 vmci_resource_remove(&entry->resource); 2182 2183 kfree(entry); 2184 2185 vmci_ctx_qp_destroy(context, handle); 2186 } else { 2187 qp_notify_peer(false, handle, context_id, peer_id); 2188 if (context_id == VMCI_HOST_CONTEXT_ID && 2189 QPBROKERSTATE_HAS_MEM(entry)) { 2190 entry->state = VMCIQPB_SHUTDOWN_MEM; 2191 } else { 2192 entry->state = VMCIQPB_SHUTDOWN_NO_MEM; 2193 } 2194 2195 if (!is_local) 2196 vmci_ctx_qp_destroy(context, handle); 2197 2198 } 2199 result = VMCI_SUCCESS; 2200 out: 2201 mutex_unlock(&qp_broker_list.mutex); 2202 return result; 2203 } 2204 2205 /* 2206 * Establishes the necessary mappings for a queue pair given a 2207 * reference to the queue pair guest memory. This is usually 2208 * called when a guest is unquiesced and the VMX is allowed to 2209 * map guest memory once again. 2210 */ 2211 int vmci_qp_broker_map(struct vmci_handle handle, 2212 struct vmci_ctx *context, 2213 u64 guest_mem) 2214 { 2215 struct qp_broker_entry *entry; 2216 const u32 context_id = vmci_ctx_get_id(context); 2217 int result; 2218 2219 if (vmci_handle_is_invalid(handle) || !context || 2220 context_id == VMCI_INVALID_ID) 2221 return VMCI_ERROR_INVALID_ARGS; 2222 2223 mutex_lock(&qp_broker_list.mutex); 2224 2225 if (!vmci_ctx_qp_exists(context, handle)) { 2226 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2227 context_id, handle.context, handle.resource); 2228 result = VMCI_ERROR_NOT_FOUND; 2229 goto out; 2230 } 2231 2232 entry = qp_broker_handle_to_entry(handle); 2233 if (!entry) { 2234 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", 2235 context_id, handle.context, handle.resource); 2236 result = VMCI_ERROR_NOT_FOUND; 2237 goto out; 2238 } 2239 2240 if (context_id != entry->create_id && context_id != entry->attach_id) { 2241 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2242 goto out; 2243 } 2244 2245 result = VMCI_SUCCESS; 2246 2247 if (context_id != VMCI_HOST_CONTEXT_ID) { 2248 struct vmci_qp_page_store page_store; 2249 2250 page_store.pages = guest_mem; 2251 page_store.len = QPE_NUM_PAGES(entry->qp); 2252 2253 qp_acquire_queue_mutex(entry->produce_q); 2254 qp_reset_saved_headers(entry); 2255 result = 2256 qp_host_register_user_memory(&page_store, 2257 entry->produce_q, 2258 entry->consume_q); 2259 qp_release_queue_mutex(entry->produce_q); 2260 if (result == VMCI_SUCCESS) { 2261 /* Move state from *_NO_MEM to *_MEM */ 2262 2263 entry->state++; 2264 2265 if (entry->wakeup_cb) 2266 entry->wakeup_cb(entry->client_data); 2267 } 2268 } 2269 2270 out: 2271 mutex_unlock(&qp_broker_list.mutex); 2272 return result; 2273 } 2274 2275 /* 2276 * Saves a snapshot of the queue headers for the given QP broker 2277 * entry. Should be used when guest memory is unmapped. 2278 * Results: 2279 * VMCI_SUCCESS on success, appropriate error code if guest memory 2280 * can't be accessed.. 2281 */ 2282 static int qp_save_headers(struct qp_broker_entry *entry) 2283 { 2284 int result; 2285 2286 if (entry->produce_q->saved_header != NULL && 2287 entry->consume_q->saved_header != NULL) { 2288 /* 2289 * If the headers have already been saved, we don't need to do 2290 * it again, and we don't want to map in the headers 2291 * unnecessarily. 2292 */ 2293 2294 return VMCI_SUCCESS; 2295 } 2296 2297 if (NULL == entry->produce_q->q_header || 2298 NULL == entry->consume_q->q_header) { 2299 result = qp_host_map_queues(entry->produce_q, entry->consume_q); 2300 if (result < VMCI_SUCCESS) 2301 return result; 2302 } 2303 2304 memcpy(&entry->saved_produce_q, entry->produce_q->q_header, 2305 sizeof(entry->saved_produce_q)); 2306 entry->produce_q->saved_header = &entry->saved_produce_q; 2307 memcpy(&entry->saved_consume_q, entry->consume_q->q_header, 2308 sizeof(entry->saved_consume_q)); 2309 entry->consume_q->saved_header = &entry->saved_consume_q; 2310 2311 return VMCI_SUCCESS; 2312 } 2313 2314 /* 2315 * Removes all references to the guest memory of a given queue pair, and 2316 * will move the queue pair from state *_MEM to *_NO_MEM. It is usually 2317 * called when a VM is being quiesced where access to guest memory should 2318 * avoided. 2319 */ 2320 int vmci_qp_broker_unmap(struct vmci_handle handle, 2321 struct vmci_ctx *context, 2322 u32 gid) 2323 { 2324 struct qp_broker_entry *entry; 2325 const u32 context_id = vmci_ctx_get_id(context); 2326 int result; 2327 2328 if (vmci_handle_is_invalid(handle) || !context || 2329 context_id == VMCI_INVALID_ID) 2330 return VMCI_ERROR_INVALID_ARGS; 2331 2332 mutex_lock(&qp_broker_list.mutex); 2333 2334 if (!vmci_ctx_qp_exists(context, handle)) { 2335 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2336 context_id, handle.context, handle.resource); 2337 result = VMCI_ERROR_NOT_FOUND; 2338 goto out; 2339 } 2340 2341 entry = qp_broker_handle_to_entry(handle); 2342 if (!entry) { 2343 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", 2344 context_id, handle.context, handle.resource); 2345 result = VMCI_ERROR_NOT_FOUND; 2346 goto out; 2347 } 2348 2349 if (context_id != entry->create_id && context_id != entry->attach_id) { 2350 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2351 goto out; 2352 } 2353 2354 if (context_id != VMCI_HOST_CONTEXT_ID) { 2355 qp_acquire_queue_mutex(entry->produce_q); 2356 result = qp_save_headers(entry); 2357 if (result < VMCI_SUCCESS) 2358 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", 2359 handle.context, handle.resource, result); 2360 2361 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); 2362 2363 /* 2364 * On hosted, when we unmap queue pairs, the VMX will also 2365 * unmap the guest memory, so we invalidate the previously 2366 * registered memory. If the queue pair is mapped again at a 2367 * later point in time, we will need to reregister the user 2368 * memory with a possibly new user VA. 2369 */ 2370 qp_host_unregister_user_memory(entry->produce_q, 2371 entry->consume_q); 2372 2373 /* 2374 * Move state from *_MEM to *_NO_MEM. 2375 */ 2376 entry->state--; 2377 2378 qp_release_queue_mutex(entry->produce_q); 2379 } 2380 2381 result = VMCI_SUCCESS; 2382 2383 out: 2384 mutex_unlock(&qp_broker_list.mutex); 2385 return result; 2386 } 2387 2388 /* 2389 * Destroys all guest queue pair endpoints. If active guest queue 2390 * pairs still exist, hypercalls to attempt detach from these 2391 * queue pairs will be made. Any failure to detach is silently 2392 * ignored. 2393 */ 2394 void vmci_qp_guest_endpoints_exit(void) 2395 { 2396 struct qp_entry *entry; 2397 struct qp_guest_endpoint *ep; 2398 2399 mutex_lock(&qp_guest_endpoints.mutex); 2400 2401 while ((entry = qp_list_get_head(&qp_guest_endpoints))) { 2402 ep = (struct qp_guest_endpoint *)entry; 2403 2404 /* Don't make a hypercall for local queue_pairs. */ 2405 if (!(entry->flags & VMCI_QPFLAG_LOCAL)) 2406 qp_detatch_hypercall(entry->handle); 2407 2408 /* We cannot fail the exit, so let's reset ref_count. */ 2409 entry->ref_count = 0; 2410 qp_list_remove_entry(&qp_guest_endpoints, entry); 2411 2412 qp_guest_endpoint_destroy(ep); 2413 } 2414 2415 mutex_unlock(&qp_guest_endpoints.mutex); 2416 } 2417 2418 /* 2419 * Helper routine that will lock the queue pair before subsequent 2420 * operations. 2421 * Note: Non-blocking on the host side is currently only implemented in ESX. 2422 * Since non-blocking isn't yet implemented on the host personality we 2423 * have no reason to acquire a spin lock. So to avoid the use of an 2424 * unnecessary lock only acquire the mutex if we can block. 2425 */ 2426 static void qp_lock(const struct vmci_qp *qpair) 2427 { 2428 qp_acquire_queue_mutex(qpair->produce_q); 2429 } 2430 2431 /* 2432 * Helper routine that unlocks the queue pair after calling 2433 * qp_lock. 2434 */ 2435 static void qp_unlock(const struct vmci_qp *qpair) 2436 { 2437 qp_release_queue_mutex(qpair->produce_q); 2438 } 2439 2440 /* 2441 * The queue headers may not be mapped at all times. If a queue is 2442 * currently not mapped, it will be attempted to do so. 2443 */ 2444 static int qp_map_queue_headers(struct vmci_queue *produce_q, 2445 struct vmci_queue *consume_q) 2446 { 2447 int result; 2448 2449 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { 2450 result = qp_host_map_queues(produce_q, consume_q); 2451 if (result < VMCI_SUCCESS) 2452 return (produce_q->saved_header && 2453 consume_q->saved_header) ? 2454 VMCI_ERROR_QUEUEPAIR_NOT_READY : 2455 VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2456 } 2457 2458 return VMCI_SUCCESS; 2459 } 2460 2461 /* 2462 * Helper routine that will retrieve the produce and consume 2463 * headers of a given queue pair. If the guest memory of the 2464 * queue pair is currently not available, the saved queue headers 2465 * will be returned, if these are available. 2466 */ 2467 static int qp_get_queue_headers(const struct vmci_qp *qpair, 2468 struct vmci_queue_header **produce_q_header, 2469 struct vmci_queue_header **consume_q_header) 2470 { 2471 int result; 2472 2473 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); 2474 if (result == VMCI_SUCCESS) { 2475 *produce_q_header = qpair->produce_q->q_header; 2476 *consume_q_header = qpair->consume_q->q_header; 2477 } else if (qpair->produce_q->saved_header && 2478 qpair->consume_q->saved_header) { 2479 *produce_q_header = qpair->produce_q->saved_header; 2480 *consume_q_header = qpair->consume_q->saved_header; 2481 result = VMCI_SUCCESS; 2482 } 2483 2484 return result; 2485 } 2486 2487 /* 2488 * Callback from VMCI queue pair broker indicating that a queue 2489 * pair that was previously not ready, now either is ready or 2490 * gone forever. 2491 */ 2492 static int qp_wakeup_cb(void *client_data) 2493 { 2494 struct vmci_qp *qpair = (struct vmci_qp *)client_data; 2495 2496 qp_lock(qpair); 2497 while (qpair->blocked > 0) { 2498 qpair->blocked--; 2499 qpair->generation++; 2500 wake_up(&qpair->event); 2501 } 2502 qp_unlock(qpair); 2503 2504 return VMCI_SUCCESS; 2505 } 2506 2507 /* 2508 * Makes the calling thread wait for the queue pair to become 2509 * ready for host side access. Returns true when thread is 2510 * woken up after queue pair state change, false otherwise. 2511 */ 2512 static bool qp_wait_for_ready_queue(struct vmci_qp *qpair) 2513 { 2514 unsigned int generation; 2515 2516 qpair->blocked++; 2517 generation = qpair->generation; 2518 qp_unlock(qpair); 2519 wait_event(qpair->event, generation != qpair->generation); 2520 qp_lock(qpair); 2521 2522 return true; 2523 } 2524 2525 /* 2526 * Enqueues a given buffer to the produce queue using the provided 2527 * function. As many bytes as possible (space available in the queue) 2528 * are enqueued. Assumes the queue->mutex has been acquired. Returns 2529 * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue 2530 * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the 2531 * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if 2532 * an error occured when accessing the buffer, 2533 * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't 2534 * available. Otherwise, the number of bytes written to the queue is 2535 * returned. Updates the tail pointer of the produce queue. 2536 */ 2537 static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, 2538 struct vmci_queue *consume_q, 2539 const u64 produce_q_size, 2540 struct iov_iter *from) 2541 { 2542 s64 free_space; 2543 u64 tail; 2544 size_t buf_size = iov_iter_count(from); 2545 size_t written; 2546 ssize_t result; 2547 2548 result = qp_map_queue_headers(produce_q, consume_q); 2549 if (unlikely(result != VMCI_SUCCESS)) 2550 return result; 2551 2552 free_space = vmci_q_header_free_space(produce_q->q_header, 2553 consume_q->q_header, 2554 produce_q_size); 2555 if (free_space == 0) 2556 return VMCI_ERROR_QUEUEPAIR_NOSPACE; 2557 2558 if (free_space < VMCI_SUCCESS) 2559 return (ssize_t) free_space; 2560 2561 written = (size_t) (free_space > buf_size ? buf_size : free_space); 2562 tail = vmci_q_header_producer_tail(produce_q->q_header); 2563 if (likely(tail + written < produce_q_size)) { 2564 result = qp_memcpy_to_queue_iter(produce_q, tail, from, written); 2565 } else { 2566 /* Tail pointer wraps around. */ 2567 2568 const size_t tmp = (size_t) (produce_q_size - tail); 2569 2570 result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp); 2571 if (result >= VMCI_SUCCESS) 2572 result = qp_memcpy_to_queue_iter(produce_q, 0, from, 2573 written - tmp); 2574 } 2575 2576 if (result < VMCI_SUCCESS) 2577 return result; 2578 2579 vmci_q_header_add_producer_tail(produce_q->q_header, written, 2580 produce_q_size); 2581 return written; 2582 } 2583 2584 /* 2585 * Dequeues data (if available) from the given consume queue. Writes data 2586 * to the user provided buffer using the provided function. 2587 * Assumes the queue->mutex has been acquired. 2588 * Results: 2589 * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue. 2590 * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue 2591 * (as defined by the queue size). 2592 * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer. 2593 * Otherwise the number of bytes dequeued is returned. 2594 * Side effects: 2595 * Updates the head pointer of the consume queue. 2596 */ 2597 static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, 2598 struct vmci_queue *consume_q, 2599 const u64 consume_q_size, 2600 struct iov_iter *to, 2601 bool update_consumer) 2602 { 2603 size_t buf_size = iov_iter_count(to); 2604 s64 buf_ready; 2605 u64 head; 2606 size_t read; 2607 ssize_t result; 2608 2609 result = qp_map_queue_headers(produce_q, consume_q); 2610 if (unlikely(result != VMCI_SUCCESS)) 2611 return result; 2612 2613 buf_ready = vmci_q_header_buf_ready(consume_q->q_header, 2614 produce_q->q_header, 2615 consume_q_size); 2616 if (buf_ready == 0) 2617 return VMCI_ERROR_QUEUEPAIR_NODATA; 2618 2619 if (buf_ready < VMCI_SUCCESS) 2620 return (ssize_t) buf_ready; 2621 2622 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready); 2623 head = vmci_q_header_consumer_head(produce_q->q_header); 2624 if (likely(head + read < consume_q_size)) { 2625 result = qp_memcpy_from_queue_iter(to, consume_q, head, read); 2626 } else { 2627 /* Head pointer wraps around. */ 2628 2629 const size_t tmp = (size_t) (consume_q_size - head); 2630 2631 result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp); 2632 if (result >= VMCI_SUCCESS) 2633 result = qp_memcpy_from_queue_iter(to, consume_q, 0, 2634 read - tmp); 2635 2636 } 2637 2638 if (result < VMCI_SUCCESS) 2639 return result; 2640 2641 if (update_consumer) 2642 vmci_q_header_add_consumer_head(produce_q->q_header, 2643 read, consume_q_size); 2644 2645 return read; 2646 } 2647 2648 /* 2649 * vmci_qpair_alloc() - Allocates a queue pair. 2650 * @qpair: Pointer for the new vmci_qp struct. 2651 * @handle: Handle to track the resource. 2652 * @produce_qsize: Desired size of the producer queue. 2653 * @consume_qsize: Desired size of the consumer queue. 2654 * @peer: ContextID of the peer. 2655 * @flags: VMCI flags. 2656 * @priv_flags: VMCI priviledge flags. 2657 * 2658 * This is the client interface for allocating the memory for a 2659 * vmci_qp structure and then attaching to the underlying 2660 * queue. If an error occurs allocating the memory for the 2661 * vmci_qp structure no attempt is made to attach. If an 2662 * error occurs attaching, then the structure is freed. 2663 */ 2664 int vmci_qpair_alloc(struct vmci_qp **qpair, 2665 struct vmci_handle *handle, 2666 u64 produce_qsize, 2667 u64 consume_qsize, 2668 u32 peer, 2669 u32 flags, 2670 u32 priv_flags) 2671 { 2672 struct vmci_qp *my_qpair; 2673 int retval; 2674 struct vmci_handle src = VMCI_INVALID_HANDLE; 2675 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID); 2676 enum vmci_route route; 2677 vmci_event_release_cb wakeup_cb; 2678 void *client_data; 2679 2680 /* 2681 * Restrict the size of a queuepair. The device already 2682 * enforces a limit on the total amount of memory that can be 2683 * allocated to queuepairs for a guest. However, we try to 2684 * allocate this memory before we make the queuepair 2685 * allocation hypercall. On Linux, we allocate each page 2686 * separately, which means rather than fail, the guest will 2687 * thrash while it tries to allocate, and will become 2688 * increasingly unresponsive to the point where it appears to 2689 * be hung. So we place a limit on the size of an individual 2690 * queuepair here, and leave the device to enforce the 2691 * restriction on total queuepair memory. (Note that this 2692 * doesn't prevent all cases; a user with only this much 2693 * physical memory could still get into trouble.) The error 2694 * used by the device is NO_RESOURCES, so use that here too. 2695 */ 2696 2697 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) || 2698 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY) 2699 return VMCI_ERROR_NO_RESOURCES; 2700 2701 retval = vmci_route(&src, &dst, false, &route); 2702 if (retval < VMCI_SUCCESS) 2703 route = vmci_guest_code_active() ? 2704 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST; 2705 2706 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) { 2707 pr_devel("NONBLOCK OR PINNED set"); 2708 return VMCI_ERROR_INVALID_ARGS; 2709 } 2710 2711 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL); 2712 if (!my_qpair) 2713 return VMCI_ERROR_NO_MEM; 2714 2715 my_qpair->produce_q_size = produce_qsize; 2716 my_qpair->consume_q_size = consume_qsize; 2717 my_qpair->peer = peer; 2718 my_qpair->flags = flags; 2719 my_qpair->priv_flags = priv_flags; 2720 2721 wakeup_cb = NULL; 2722 client_data = NULL; 2723 2724 if (VMCI_ROUTE_AS_HOST == route) { 2725 my_qpair->guest_endpoint = false; 2726 if (!(flags & VMCI_QPFLAG_LOCAL)) { 2727 my_qpair->blocked = 0; 2728 my_qpair->generation = 0; 2729 init_waitqueue_head(&my_qpair->event); 2730 wakeup_cb = qp_wakeup_cb; 2731 client_data = (void *)my_qpair; 2732 } 2733 } else { 2734 my_qpair->guest_endpoint = true; 2735 } 2736 2737 retval = vmci_qp_alloc(handle, 2738 &my_qpair->produce_q, 2739 my_qpair->produce_q_size, 2740 &my_qpair->consume_q, 2741 my_qpair->consume_q_size, 2742 my_qpair->peer, 2743 my_qpair->flags, 2744 my_qpair->priv_flags, 2745 my_qpair->guest_endpoint, 2746 wakeup_cb, client_data); 2747 2748 if (retval < VMCI_SUCCESS) { 2749 kfree(my_qpair); 2750 return retval; 2751 } 2752 2753 *qpair = my_qpair; 2754 my_qpair->handle = *handle; 2755 2756 return retval; 2757 } 2758 EXPORT_SYMBOL_GPL(vmci_qpair_alloc); 2759 2760 /* 2761 * vmci_qpair_detach() - Detatches the client from a queue pair. 2762 * @qpair: Reference of a pointer to the qpair struct. 2763 * 2764 * This is the client interface for detaching from a VMCIQPair. 2765 * Note that this routine will free the memory allocated for the 2766 * vmci_qp structure too. 2767 */ 2768 int vmci_qpair_detach(struct vmci_qp **qpair) 2769 { 2770 int result; 2771 struct vmci_qp *old_qpair; 2772 2773 if (!qpair || !(*qpair)) 2774 return VMCI_ERROR_INVALID_ARGS; 2775 2776 old_qpair = *qpair; 2777 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint); 2778 2779 /* 2780 * The guest can fail to detach for a number of reasons, and 2781 * if it does so, it will cleanup the entry (if there is one). 2782 * The host can fail too, but it won't cleanup the entry 2783 * immediately, it will do that later when the context is 2784 * freed. Either way, we need to release the qpair struct 2785 * here; there isn't much the caller can do, and we don't want 2786 * to leak. 2787 */ 2788 2789 memset(old_qpair, 0, sizeof(*old_qpair)); 2790 old_qpair->handle = VMCI_INVALID_HANDLE; 2791 old_qpair->peer = VMCI_INVALID_ID; 2792 kfree(old_qpair); 2793 *qpair = NULL; 2794 2795 return result; 2796 } 2797 EXPORT_SYMBOL_GPL(vmci_qpair_detach); 2798 2799 /* 2800 * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer. 2801 * @qpair: Pointer to the queue pair struct. 2802 * @producer_tail: Reference used for storing producer tail index. 2803 * @consumer_head: Reference used for storing the consumer head index. 2804 * 2805 * This is the client interface for getting the current indexes of the 2806 * QPair from the point of the view of the caller as the producer. 2807 */ 2808 int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair, 2809 u64 *producer_tail, 2810 u64 *consumer_head) 2811 { 2812 struct vmci_queue_header *produce_q_header; 2813 struct vmci_queue_header *consume_q_header; 2814 int result; 2815 2816 if (!qpair) 2817 return VMCI_ERROR_INVALID_ARGS; 2818 2819 qp_lock(qpair); 2820 result = 2821 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2822 if (result == VMCI_SUCCESS) 2823 vmci_q_header_get_pointers(produce_q_header, consume_q_header, 2824 producer_tail, consumer_head); 2825 qp_unlock(qpair); 2826 2827 if (result == VMCI_SUCCESS && 2828 ((producer_tail && *producer_tail >= qpair->produce_q_size) || 2829 (consumer_head && *consumer_head >= qpair->produce_q_size))) 2830 return VMCI_ERROR_INVALID_SIZE; 2831 2832 return result; 2833 } 2834 EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes); 2835 2836 /* 2837 * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the consumer. 2838 * @qpair: Pointer to the queue pair struct. 2839 * @consumer_tail: Reference used for storing consumer tail index. 2840 * @producer_head: Reference used for storing the producer head index. 2841 * 2842 * This is the client interface for getting the current indexes of the 2843 * QPair from the point of the view of the caller as the consumer. 2844 */ 2845 int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair, 2846 u64 *consumer_tail, 2847 u64 *producer_head) 2848 { 2849 struct vmci_queue_header *produce_q_header; 2850 struct vmci_queue_header *consume_q_header; 2851 int result; 2852 2853 if (!qpair) 2854 return VMCI_ERROR_INVALID_ARGS; 2855 2856 qp_lock(qpair); 2857 result = 2858 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2859 if (result == VMCI_SUCCESS) 2860 vmci_q_header_get_pointers(consume_q_header, produce_q_header, 2861 consumer_tail, producer_head); 2862 qp_unlock(qpair); 2863 2864 if (result == VMCI_SUCCESS && 2865 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) || 2866 (producer_head && *producer_head >= qpair->consume_q_size))) 2867 return VMCI_ERROR_INVALID_SIZE; 2868 2869 return result; 2870 } 2871 EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes); 2872 2873 /* 2874 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue. 2875 * @qpair: Pointer to the queue pair struct. 2876 * 2877 * This is the client interface for getting the amount of free 2878 * space in the QPair from the point of the view of the caller as 2879 * the producer which is the common case. Returns < 0 if err, else 2880 * available bytes into which data can be enqueued if > 0. 2881 */ 2882 s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair) 2883 { 2884 struct vmci_queue_header *produce_q_header; 2885 struct vmci_queue_header *consume_q_header; 2886 s64 result; 2887 2888 if (!qpair) 2889 return VMCI_ERROR_INVALID_ARGS; 2890 2891 qp_lock(qpair); 2892 result = 2893 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2894 if (result == VMCI_SUCCESS) 2895 result = vmci_q_header_free_space(produce_q_header, 2896 consume_q_header, 2897 qpair->produce_q_size); 2898 else 2899 result = 0; 2900 2901 qp_unlock(qpair); 2902 2903 return result; 2904 } 2905 EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space); 2906 2907 /* 2908 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue. 2909 * @qpair: Pointer to the queue pair struct. 2910 * 2911 * This is the client interface for getting the amount of free 2912 * space in the QPair from the point of the view of the caller as 2913 * the consumer which is not the common case. Returns < 0 if err, else 2914 * available bytes into which data can be enqueued if > 0. 2915 */ 2916 s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair) 2917 { 2918 struct vmci_queue_header *produce_q_header; 2919 struct vmci_queue_header *consume_q_header; 2920 s64 result; 2921 2922 if (!qpair) 2923 return VMCI_ERROR_INVALID_ARGS; 2924 2925 qp_lock(qpair); 2926 result = 2927 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2928 if (result == VMCI_SUCCESS) 2929 result = vmci_q_header_free_space(consume_q_header, 2930 produce_q_header, 2931 qpair->consume_q_size); 2932 else 2933 result = 0; 2934 2935 qp_unlock(qpair); 2936 2937 return result; 2938 } 2939 EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space); 2940 2941 /* 2942 * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from 2943 * producer queue. 2944 * @qpair: Pointer to the queue pair struct. 2945 * 2946 * This is the client interface for getting the amount of 2947 * enqueued data in the QPair from the point of the view of the 2948 * caller as the producer which is not the common case. Returns < 0 if err, 2949 * else available bytes that may be read. 2950 */ 2951 s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair) 2952 { 2953 struct vmci_queue_header *produce_q_header; 2954 struct vmci_queue_header *consume_q_header; 2955 s64 result; 2956 2957 if (!qpair) 2958 return VMCI_ERROR_INVALID_ARGS; 2959 2960 qp_lock(qpair); 2961 result = 2962 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2963 if (result == VMCI_SUCCESS) 2964 result = vmci_q_header_buf_ready(produce_q_header, 2965 consume_q_header, 2966 qpair->produce_q_size); 2967 else 2968 result = 0; 2969 2970 qp_unlock(qpair); 2971 2972 return result; 2973 } 2974 EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready); 2975 2976 /* 2977 * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from 2978 * consumer queue. 2979 * @qpair: Pointer to the queue pair struct. 2980 * 2981 * This is the client interface for getting the amount of 2982 * enqueued data in the QPair from the point of the view of the 2983 * caller as the consumer which is the normal case. Returns < 0 if err, 2984 * else available bytes that may be read. 2985 */ 2986 s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair) 2987 { 2988 struct vmci_queue_header *produce_q_header; 2989 struct vmci_queue_header *consume_q_header; 2990 s64 result; 2991 2992 if (!qpair) 2993 return VMCI_ERROR_INVALID_ARGS; 2994 2995 qp_lock(qpair); 2996 result = 2997 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2998 if (result == VMCI_SUCCESS) 2999 result = vmci_q_header_buf_ready(consume_q_header, 3000 produce_q_header, 3001 qpair->consume_q_size); 3002 else 3003 result = 0; 3004 3005 qp_unlock(qpair); 3006 3007 return result; 3008 } 3009 EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready); 3010 3011 /* 3012 * vmci_qpair_enqueue() - Throw data on the queue. 3013 * @qpair: Pointer to the queue pair struct. 3014 * @buf: Pointer to buffer containing data 3015 * @buf_size: Length of buffer. 3016 * @buf_type: Buffer type (Unused). 3017 * 3018 * This is the client interface for enqueueing data into the queue. 3019 * Returns number of bytes enqueued or < 0 on error. 3020 */ 3021 ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, 3022 const void *buf, 3023 size_t buf_size, 3024 int buf_type) 3025 { 3026 ssize_t result; 3027 struct iov_iter from; 3028 struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size}; 3029 3030 if (!qpair || !buf) 3031 return VMCI_ERROR_INVALID_ARGS; 3032 3033 iov_iter_kvec(&from, WRITE, &v, 1, buf_size); 3034 3035 qp_lock(qpair); 3036 3037 do { 3038 result = qp_enqueue_locked(qpair->produce_q, 3039 qpair->consume_q, 3040 qpair->produce_q_size, 3041 &from); 3042 3043 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3044 !qp_wait_for_ready_queue(qpair)) 3045 result = VMCI_ERROR_WOULD_BLOCK; 3046 3047 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3048 3049 qp_unlock(qpair); 3050 3051 return result; 3052 } 3053 EXPORT_SYMBOL_GPL(vmci_qpair_enqueue); 3054 3055 /* 3056 * vmci_qpair_dequeue() - Get data from the queue. 3057 * @qpair: Pointer to the queue pair struct. 3058 * @buf: Pointer to buffer for the data 3059 * @buf_size: Length of buffer. 3060 * @buf_type: Buffer type (Unused). 3061 * 3062 * This is the client interface for dequeueing data from the queue. 3063 * Returns number of bytes dequeued or < 0 on error. 3064 */ 3065 ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, 3066 void *buf, 3067 size_t buf_size, 3068 int buf_type) 3069 { 3070 ssize_t result; 3071 struct iov_iter to; 3072 struct kvec v = {.iov_base = buf, .iov_len = buf_size}; 3073 3074 if (!qpair || !buf) 3075 return VMCI_ERROR_INVALID_ARGS; 3076 3077 iov_iter_kvec(&to, READ, &v, 1, buf_size); 3078 3079 qp_lock(qpair); 3080 3081 do { 3082 result = qp_dequeue_locked(qpair->produce_q, 3083 qpair->consume_q, 3084 qpair->consume_q_size, 3085 &to, true); 3086 3087 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3088 !qp_wait_for_ready_queue(qpair)) 3089 result = VMCI_ERROR_WOULD_BLOCK; 3090 3091 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3092 3093 qp_unlock(qpair); 3094 3095 return result; 3096 } 3097 EXPORT_SYMBOL_GPL(vmci_qpair_dequeue); 3098 3099 /* 3100 * vmci_qpair_peek() - Peek at the data in the queue. 3101 * @qpair: Pointer to the queue pair struct. 3102 * @buf: Pointer to buffer for the data 3103 * @buf_size: Length of buffer. 3104 * @buf_type: Buffer type (Unused on Linux). 3105 * 3106 * This is the client interface for peeking into a queue. (I.e., 3107 * copy data from the queue without updating the head pointer.) 3108 * Returns number of bytes dequeued or < 0 on error. 3109 */ 3110 ssize_t vmci_qpair_peek(struct vmci_qp *qpair, 3111 void *buf, 3112 size_t buf_size, 3113 int buf_type) 3114 { 3115 struct iov_iter to; 3116 struct kvec v = {.iov_base = buf, .iov_len = buf_size}; 3117 ssize_t result; 3118 3119 if (!qpair || !buf) 3120 return VMCI_ERROR_INVALID_ARGS; 3121 3122 iov_iter_kvec(&to, READ, &v, 1, buf_size); 3123 3124 qp_lock(qpair); 3125 3126 do { 3127 result = qp_dequeue_locked(qpair->produce_q, 3128 qpair->consume_q, 3129 qpair->consume_q_size, 3130 &to, false); 3131 3132 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3133 !qp_wait_for_ready_queue(qpair)) 3134 result = VMCI_ERROR_WOULD_BLOCK; 3135 3136 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3137 3138 qp_unlock(qpair); 3139 3140 return result; 3141 } 3142 EXPORT_SYMBOL_GPL(vmci_qpair_peek); 3143 3144 /* 3145 * vmci_qpair_enquev() - Throw data on the queue using iov. 3146 * @qpair: Pointer to the queue pair struct. 3147 * @iov: Pointer to buffer containing data 3148 * @iov_size: Length of buffer. 3149 * @buf_type: Buffer type (Unused). 3150 * 3151 * This is the client interface for enqueueing data into the queue. 3152 * This function uses IO vectors to handle the work. Returns number 3153 * of bytes enqueued or < 0 on error. 3154 */ 3155 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, 3156 struct msghdr *msg, 3157 size_t iov_size, 3158 int buf_type) 3159 { 3160 ssize_t result; 3161 3162 if (!qpair) 3163 return VMCI_ERROR_INVALID_ARGS; 3164 3165 qp_lock(qpair); 3166 3167 do { 3168 result = qp_enqueue_locked(qpair->produce_q, 3169 qpair->consume_q, 3170 qpair->produce_q_size, 3171 &msg->msg_iter); 3172 3173 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3174 !qp_wait_for_ready_queue(qpair)) 3175 result = VMCI_ERROR_WOULD_BLOCK; 3176 3177 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3178 3179 qp_unlock(qpair); 3180 3181 return result; 3182 } 3183 EXPORT_SYMBOL_GPL(vmci_qpair_enquev); 3184 3185 /* 3186 * vmci_qpair_dequev() - Get data from the queue using iov. 3187 * @qpair: Pointer to the queue pair struct. 3188 * @iov: Pointer to buffer for the data 3189 * @iov_size: Length of buffer. 3190 * @buf_type: Buffer type (Unused). 3191 * 3192 * This is the client interface for dequeueing data from the queue. 3193 * This function uses IO vectors to handle the work. Returns number 3194 * of bytes dequeued or < 0 on error. 3195 */ 3196 ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, 3197 struct msghdr *msg, 3198 size_t iov_size, 3199 int buf_type) 3200 { 3201 ssize_t result; 3202 3203 if (!qpair) 3204 return VMCI_ERROR_INVALID_ARGS; 3205 3206 qp_lock(qpair); 3207 3208 do { 3209 result = qp_dequeue_locked(qpair->produce_q, 3210 qpair->consume_q, 3211 qpair->consume_q_size, 3212 &msg->msg_iter, true); 3213 3214 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3215 !qp_wait_for_ready_queue(qpair)) 3216 result = VMCI_ERROR_WOULD_BLOCK; 3217 3218 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3219 3220 qp_unlock(qpair); 3221 3222 return result; 3223 } 3224 EXPORT_SYMBOL_GPL(vmci_qpair_dequev); 3225 3226 /* 3227 * vmci_qpair_peekv() - Peek at the data in the queue using iov. 3228 * @qpair: Pointer to the queue pair struct. 3229 * @iov: Pointer to buffer for the data 3230 * @iov_size: Length of buffer. 3231 * @buf_type: Buffer type (Unused on Linux). 3232 * 3233 * This is the client interface for peeking into a queue. (I.e., 3234 * copy data from the queue without updating the head pointer.) 3235 * This function uses IO vectors to handle the work. Returns number 3236 * of bytes peeked or < 0 on error. 3237 */ 3238 ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, 3239 struct msghdr *msg, 3240 size_t iov_size, 3241 int buf_type) 3242 { 3243 ssize_t result; 3244 3245 if (!qpair) 3246 return VMCI_ERROR_INVALID_ARGS; 3247 3248 qp_lock(qpair); 3249 3250 do { 3251 result = qp_dequeue_locked(qpair->produce_q, 3252 qpair->consume_q, 3253 qpair->consume_q_size, 3254 &msg->msg_iter, false); 3255 3256 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3257 !qp_wait_for_ready_queue(qpair)) 3258 result = VMCI_ERROR_WOULD_BLOCK; 3259 3260 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3261 3262 qp_unlock(qpair); 3263 return result; 3264 } 3265 EXPORT_SYMBOL_GPL(vmci_qpair_peekv); 3266