1 /* 2 * VMware VMCI Driver 3 * 4 * Copyright (C) 2012 VMware, Inc. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation version 2 and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * for more details. 14 */ 15 16 #include <linux/vmw_vmci_defs.h> 17 #include <linux/vmw_vmci_api.h> 18 #include <linux/highmem.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/pagemap.h> 24 #include <linux/pci.h> 25 #include <linux/sched.h> 26 #include <linux/slab.h> 27 #include <linux/uio.h> 28 #include <linux/wait.h> 29 #include <linux/vmalloc.h> 30 #include <linux/skbuff.h> 31 32 #include "vmci_handle_array.h" 33 #include "vmci_queue_pair.h" 34 #include "vmci_datagram.h" 35 #include "vmci_resource.h" 36 #include "vmci_context.h" 37 #include "vmci_driver.h" 38 #include "vmci_event.h" 39 #include "vmci_route.h" 40 41 /* 42 * In the following, we will distinguish between two kinds of VMX processes - 43 * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized 44 * VMCI page files in the VMX and supporting VM to VM communication and the 45 * newer ones that use the guest memory directly. We will in the following 46 * refer to the older VMX versions as old-style VMX'en, and the newer ones as 47 * new-style VMX'en. 48 * 49 * The state transition datagram is as follows (the VMCIQPB_ prefix has been 50 * removed for readability) - see below for more details on the transtions: 51 * 52 * -------------- NEW ------------- 53 * | | 54 * \_/ \_/ 55 * CREATED_NO_MEM <-----------------> CREATED_MEM 56 * | | | 57 * | o-----------------------o | 58 * | | | 59 * \_/ \_/ \_/ 60 * ATTACHED_NO_MEM <----------------> ATTACHED_MEM 61 * | | | 62 * | o----------------------o | 63 * | | | 64 * \_/ \_/ \_/ 65 * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM 66 * | | 67 * | | 68 * -------------> gone <------------- 69 * 70 * In more detail. When a VMCI queue pair is first created, it will be in the 71 * VMCIQPB_NEW state. It will then move into one of the following states: 72 * 73 * - VMCIQPB_CREATED_NO_MEM: this state indicates that either: 74 * 75 * - the created was performed by a host endpoint, in which case there is 76 * no backing memory yet. 77 * 78 * - the create was initiated by an old-style VMX, that uses 79 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at 80 * a later point in time. This state can be distinguished from the one 81 * above by the context ID of the creator. A host side is not allowed to 82 * attach until the page store has been set. 83 * 84 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair 85 * is created by a VMX using the queue pair device backend that 86 * sets the UVAs of the queue pair immediately and stores the 87 * information for later attachers. At this point, it is ready for 88 * the host side to attach to it. 89 * 90 * Once the queue pair is in one of the created states (with the exception of 91 * the case mentioned for older VMX'en above), it is possible to attach to the 92 * queue pair. Again we have two new states possible: 93 * 94 * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following 95 * paths: 96 * 97 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue 98 * pair, and attaches to a queue pair previously created by the host side. 99 * 100 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair 101 * already created by a guest. 102 * 103 * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls 104 * vmci_qp_broker_set_page_store (see below). 105 * 106 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the 107 * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will 108 * bring the queue pair into this state. Once vmci_qp_broker_set_page_store 109 * is called to register the user memory, the VMCIQPB_ATTACH_MEM state 110 * will be entered. 111 * 112 * From the attached queue pair, the queue pair can enter the shutdown states 113 * when either side of the queue pair detaches. If the guest side detaches 114 * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where 115 * the content of the queue pair will no longer be available. If the host 116 * side detaches first, the queue pair will either enter the 117 * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or 118 * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped 119 * (e.g., the host detaches while a guest is stunned). 120 * 121 * New-style VMX'en will also unmap guest memory, if the guest is 122 * quiesced, e.g., during a snapshot operation. In that case, the guest 123 * memory will no longer be available, and the queue pair will transition from 124 * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more, 125 * in which case the queue pair will transition from the *_NO_MEM state at that 126 * point back to the *_MEM state. Note that the *_NO_MEM state may have changed, 127 * since the peer may have either attached or detached in the meantime. The 128 * values are laid out such that ++ on a state will move from a *_NO_MEM to a 129 * *_MEM state, and vice versa. 130 */ 131 132 /* The Kernel specific component of the struct vmci_queue structure. */ 133 struct vmci_queue_kern_if { 134 struct mutex __mutex; /* Protects the queue. */ 135 struct mutex *mutex; /* Shared by producer and consumer queues. */ 136 size_t num_pages; /* Number of pages incl. header. */ 137 bool host; /* Host or guest? */ 138 union { 139 struct { 140 dma_addr_t *pas; 141 void **vas; 142 } g; /* Used by the guest. */ 143 struct { 144 struct page **page; 145 struct page **header_page; 146 } h; /* Used by the host. */ 147 } u; 148 }; 149 150 /* 151 * This structure is opaque to the clients. 152 */ 153 struct vmci_qp { 154 struct vmci_handle handle; 155 struct vmci_queue *produce_q; 156 struct vmci_queue *consume_q; 157 u64 produce_q_size; 158 u64 consume_q_size; 159 u32 peer; 160 u32 flags; 161 u32 priv_flags; 162 bool guest_endpoint; 163 unsigned int blocked; 164 unsigned int generation; 165 wait_queue_head_t event; 166 }; 167 168 enum qp_broker_state { 169 VMCIQPB_NEW, 170 VMCIQPB_CREATED_NO_MEM, 171 VMCIQPB_CREATED_MEM, 172 VMCIQPB_ATTACHED_NO_MEM, 173 VMCIQPB_ATTACHED_MEM, 174 VMCIQPB_SHUTDOWN_NO_MEM, 175 VMCIQPB_SHUTDOWN_MEM, 176 VMCIQPB_GONE 177 }; 178 179 #define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \ 180 _qpb->state == VMCIQPB_ATTACHED_MEM || \ 181 _qpb->state == VMCIQPB_SHUTDOWN_MEM) 182 183 /* 184 * In the queue pair broker, we always use the guest point of view for 185 * the produce and consume queue values and references, e.g., the 186 * produce queue size stored is the guests produce queue size. The 187 * host endpoint will need to swap these around. The only exception is 188 * the local queue pairs on the host, in which case the host endpoint 189 * that creates the queue pair will have the right orientation, and 190 * the attaching host endpoint will need to swap. 191 */ 192 struct qp_entry { 193 struct list_head list_item; 194 struct vmci_handle handle; 195 u32 peer; 196 u32 flags; 197 u64 produce_size; 198 u64 consume_size; 199 u32 ref_count; 200 }; 201 202 struct qp_broker_entry { 203 struct vmci_resource resource; 204 struct qp_entry qp; 205 u32 create_id; 206 u32 attach_id; 207 enum qp_broker_state state; 208 bool require_trusted_attach; 209 bool created_by_trusted; 210 bool vmci_page_files; /* Created by VMX using VMCI page files */ 211 struct vmci_queue *produce_q; 212 struct vmci_queue *consume_q; 213 struct vmci_queue_header saved_produce_q; 214 struct vmci_queue_header saved_consume_q; 215 vmci_event_release_cb wakeup_cb; 216 void *client_data; 217 void *local_mem; /* Kernel memory for local queue pair */ 218 }; 219 220 struct qp_guest_endpoint { 221 struct vmci_resource resource; 222 struct qp_entry qp; 223 u64 num_ppns; 224 void *produce_q; 225 void *consume_q; 226 struct ppn_set ppn_set; 227 }; 228 229 struct qp_list { 230 struct list_head head; 231 struct mutex mutex; /* Protect queue list. */ 232 }; 233 234 static struct qp_list qp_broker_list = { 235 .head = LIST_HEAD_INIT(qp_broker_list.head), 236 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex), 237 }; 238 239 static struct qp_list qp_guest_endpoints = { 240 .head = LIST_HEAD_INIT(qp_guest_endpoints.head), 241 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex), 242 }; 243 244 #define INVALID_VMCI_GUEST_MEM_ID 0 245 #define QPE_NUM_PAGES(_QPE) ((u32) \ 246 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \ 247 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2)) 248 249 250 /* 251 * Frees kernel VA space for a given queue and its queue header, and 252 * frees physical data pages. 253 */ 254 static void qp_free_queue(void *q, u64 size) 255 { 256 struct vmci_queue *queue = q; 257 258 if (queue) { 259 u64 i; 260 261 /* Given size does not include header, so add in a page here. */ 262 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) { 263 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE, 264 queue->kernel_if->u.g.vas[i], 265 queue->kernel_if->u.g.pas[i]); 266 } 267 268 vfree(queue); 269 } 270 } 271 272 /* 273 * Allocates kernel queue pages of specified size with IOMMU mappings, 274 * plus space for the queue structure/kernel interface and the queue 275 * header. 276 */ 277 static void *qp_alloc_queue(u64 size, u32 flags) 278 { 279 u64 i; 280 struct vmci_queue *queue; 281 size_t pas_size; 282 size_t vas_size; 283 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); 284 u64 num_pages; 285 286 if (size > SIZE_MAX - PAGE_SIZE) 287 return NULL; 288 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 289 if (num_pages > 290 (SIZE_MAX - queue_size) / 291 (sizeof(*queue->kernel_if->u.g.pas) + 292 sizeof(*queue->kernel_if->u.g.vas))) 293 return NULL; 294 295 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); 296 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); 297 queue_size += pas_size + vas_size; 298 299 queue = vmalloc(queue_size); 300 if (!queue) 301 return NULL; 302 303 queue->q_header = NULL; 304 queue->saved_header = NULL; 305 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 306 queue->kernel_if->mutex = NULL; 307 queue->kernel_if->num_pages = num_pages; 308 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1); 309 queue->kernel_if->u.g.vas = 310 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size); 311 queue->kernel_if->host = false; 312 313 for (i = 0; i < num_pages; i++) { 314 queue->kernel_if->u.g.vas[i] = 315 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE, 316 &queue->kernel_if->u.g.pas[i], 317 GFP_KERNEL); 318 if (!queue->kernel_if->u.g.vas[i]) { 319 /* Size excl. the header. */ 320 qp_free_queue(queue, i * PAGE_SIZE); 321 return NULL; 322 } 323 } 324 325 /* Queue header is the first page. */ 326 queue->q_header = queue->kernel_if->u.g.vas[0]; 327 328 return queue; 329 } 330 331 /* 332 * Copies from a given buffer or iovector to a VMCI Queue. Uses 333 * kmap()/kunmap() to dynamically map/unmap required portions of the queue 334 * by traversing the offset -> page translation structure for the queue. 335 * Assumes that offset + size does not wrap around in the queue. 336 */ 337 static int qp_memcpy_to_queue_iter(struct vmci_queue *queue, 338 u64 queue_offset, 339 struct iov_iter *from, 340 size_t size) 341 { 342 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 343 size_t bytes_copied = 0; 344 345 while (bytes_copied < size) { 346 const u64 page_index = 347 (queue_offset + bytes_copied) / PAGE_SIZE; 348 const size_t page_offset = 349 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 350 void *va; 351 size_t to_copy; 352 353 if (kernel_if->host) 354 va = kmap(kernel_if->u.h.page[page_index]); 355 else 356 va = kernel_if->u.g.vas[page_index + 1]; 357 /* Skip header. */ 358 359 if (size - bytes_copied > PAGE_SIZE - page_offset) 360 /* Enough payload to fill up from this page. */ 361 to_copy = PAGE_SIZE - page_offset; 362 else 363 to_copy = size - bytes_copied; 364 365 if (!copy_from_iter_full((u8 *)va + page_offset, to_copy, 366 from)) { 367 if (kernel_if->host) 368 kunmap(kernel_if->u.h.page[page_index]); 369 return VMCI_ERROR_INVALID_ARGS; 370 } 371 bytes_copied += to_copy; 372 if (kernel_if->host) 373 kunmap(kernel_if->u.h.page[page_index]); 374 } 375 376 return VMCI_SUCCESS; 377 } 378 379 /* 380 * Copies to a given buffer or iovector from a VMCI Queue. Uses 381 * kmap()/kunmap() to dynamically map/unmap required portions of the queue 382 * by traversing the offset -> page translation structure for the queue. 383 * Assumes that offset + size does not wrap around in the queue. 384 */ 385 static int qp_memcpy_from_queue_iter(struct iov_iter *to, 386 const struct vmci_queue *queue, 387 u64 queue_offset, size_t size) 388 { 389 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 390 size_t bytes_copied = 0; 391 392 while (bytes_copied < size) { 393 const u64 page_index = 394 (queue_offset + bytes_copied) / PAGE_SIZE; 395 const size_t page_offset = 396 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 397 void *va; 398 size_t to_copy; 399 int err; 400 401 if (kernel_if->host) 402 va = kmap(kernel_if->u.h.page[page_index]); 403 else 404 va = kernel_if->u.g.vas[page_index + 1]; 405 /* Skip header. */ 406 407 if (size - bytes_copied > PAGE_SIZE - page_offset) 408 /* Enough payload to fill up this page. */ 409 to_copy = PAGE_SIZE - page_offset; 410 else 411 to_copy = size - bytes_copied; 412 413 err = copy_to_iter((u8 *)va + page_offset, to_copy, to); 414 if (err != to_copy) { 415 if (kernel_if->host) 416 kunmap(kernel_if->u.h.page[page_index]); 417 return VMCI_ERROR_INVALID_ARGS; 418 } 419 bytes_copied += to_copy; 420 if (kernel_if->host) 421 kunmap(kernel_if->u.h.page[page_index]); 422 } 423 424 return VMCI_SUCCESS; 425 } 426 427 /* 428 * Allocates two list of PPNs --- one for the pages in the produce queue, 429 * and the other for the pages in the consume queue. Intializes the list 430 * of PPNs with the page frame numbers of the KVA for the two queues (and 431 * the queue headers). 432 */ 433 static int qp_alloc_ppn_set(void *prod_q, 434 u64 num_produce_pages, 435 void *cons_q, 436 u64 num_consume_pages, struct ppn_set *ppn_set) 437 { 438 u32 *produce_ppns; 439 u32 *consume_ppns; 440 struct vmci_queue *produce_q = prod_q; 441 struct vmci_queue *consume_q = cons_q; 442 u64 i; 443 444 if (!produce_q || !num_produce_pages || !consume_q || 445 !num_consume_pages || !ppn_set) 446 return VMCI_ERROR_INVALID_ARGS; 447 448 if (ppn_set->initialized) 449 return VMCI_ERROR_ALREADY_EXISTS; 450 451 produce_ppns = 452 kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL); 453 if (!produce_ppns) 454 return VMCI_ERROR_NO_MEM; 455 456 consume_ppns = 457 kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL); 458 if (!consume_ppns) { 459 kfree(produce_ppns); 460 return VMCI_ERROR_NO_MEM; 461 } 462 463 for (i = 0; i < num_produce_pages; i++) { 464 unsigned long pfn; 465 466 produce_ppns[i] = 467 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 468 pfn = produce_ppns[i]; 469 470 /* Fail allocation if PFN isn't supported by hypervisor. */ 471 if (sizeof(pfn) > sizeof(*produce_ppns) 472 && pfn != produce_ppns[i]) 473 goto ppn_error; 474 } 475 476 for (i = 0; i < num_consume_pages; i++) { 477 unsigned long pfn; 478 479 consume_ppns[i] = 480 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 481 pfn = consume_ppns[i]; 482 483 /* Fail allocation if PFN isn't supported by hypervisor. */ 484 if (sizeof(pfn) > sizeof(*consume_ppns) 485 && pfn != consume_ppns[i]) 486 goto ppn_error; 487 } 488 489 ppn_set->num_produce_pages = num_produce_pages; 490 ppn_set->num_consume_pages = num_consume_pages; 491 ppn_set->produce_ppns = produce_ppns; 492 ppn_set->consume_ppns = consume_ppns; 493 ppn_set->initialized = true; 494 return VMCI_SUCCESS; 495 496 ppn_error: 497 kfree(produce_ppns); 498 kfree(consume_ppns); 499 return VMCI_ERROR_INVALID_ARGS; 500 } 501 502 /* 503 * Frees the two list of PPNs for a queue pair. 504 */ 505 static void qp_free_ppn_set(struct ppn_set *ppn_set) 506 { 507 if (ppn_set->initialized) { 508 /* Do not call these functions on NULL inputs. */ 509 kfree(ppn_set->produce_ppns); 510 kfree(ppn_set->consume_ppns); 511 } 512 memset(ppn_set, 0, sizeof(*ppn_set)); 513 } 514 515 /* 516 * Populates the list of PPNs in the hypercall structure with the PPNS 517 * of the produce queue and the consume queue. 518 */ 519 static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set) 520 { 521 memcpy(call_buf, ppn_set->produce_ppns, 522 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns)); 523 memcpy(call_buf + 524 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns), 525 ppn_set->consume_ppns, 526 ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns)); 527 528 return VMCI_SUCCESS; 529 } 530 531 /* 532 * Allocates kernel VA space of specified size plus space for the queue 533 * and kernel interface. This is different from the guest queue allocator, 534 * because we do not allocate our own queue header/data pages here but 535 * share those of the guest. 536 */ 537 static struct vmci_queue *qp_host_alloc_queue(u64 size) 538 { 539 struct vmci_queue *queue; 540 size_t queue_page_size; 541 u64 num_pages; 542 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); 543 544 if (size > SIZE_MAX - PAGE_SIZE) 545 return NULL; 546 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 547 if (num_pages > (SIZE_MAX - queue_size) / 548 sizeof(*queue->kernel_if->u.h.page)) 549 return NULL; 550 551 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); 552 553 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); 554 if (queue) { 555 queue->q_header = NULL; 556 queue->saved_header = NULL; 557 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 558 queue->kernel_if->host = true; 559 queue->kernel_if->mutex = NULL; 560 queue->kernel_if->num_pages = num_pages; 561 queue->kernel_if->u.h.header_page = 562 (struct page **)((u8 *)queue + queue_size); 563 queue->kernel_if->u.h.page = 564 &queue->kernel_if->u.h.header_page[1]; 565 } 566 567 return queue; 568 } 569 570 /* 571 * Frees kernel memory for a given queue (header plus translation 572 * structure). 573 */ 574 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size) 575 { 576 kfree(queue); 577 } 578 579 /* 580 * Initialize the mutex for the pair of queues. This mutex is used to 581 * protect the q_header and the buffer from changing out from under any 582 * users of either queue. Of course, it's only any good if the mutexes 583 * are actually acquired. Queue structure must lie on non-paged memory 584 * or we cannot guarantee access to the mutex. 585 */ 586 static void qp_init_queue_mutex(struct vmci_queue *produce_q, 587 struct vmci_queue *consume_q) 588 { 589 /* 590 * Only the host queue has shared state - the guest queues do not 591 * need to synchronize access using a queue mutex. 592 */ 593 594 if (produce_q->kernel_if->host) { 595 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; 596 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; 597 mutex_init(produce_q->kernel_if->mutex); 598 } 599 } 600 601 /* 602 * Cleans up the mutex for the pair of queues. 603 */ 604 static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q, 605 struct vmci_queue *consume_q) 606 { 607 if (produce_q->kernel_if->host) { 608 produce_q->kernel_if->mutex = NULL; 609 consume_q->kernel_if->mutex = NULL; 610 } 611 } 612 613 /* 614 * Acquire the mutex for the queue. Note that the produce_q and 615 * the consume_q share a mutex. So, only one of the two need to 616 * be passed in to this routine. Either will work just fine. 617 */ 618 static void qp_acquire_queue_mutex(struct vmci_queue *queue) 619 { 620 if (queue->kernel_if->host) 621 mutex_lock(queue->kernel_if->mutex); 622 } 623 624 /* 625 * Release the mutex for the queue. Note that the produce_q and 626 * the consume_q share a mutex. So, only one of the two need to 627 * be passed in to this routine. Either will work just fine. 628 */ 629 static void qp_release_queue_mutex(struct vmci_queue *queue) 630 { 631 if (queue->kernel_if->host) 632 mutex_unlock(queue->kernel_if->mutex); 633 } 634 635 /* 636 * Helper function to release pages in the PageStoreAttachInfo 637 * previously obtained using get_user_pages. 638 */ 639 static void qp_release_pages(struct page **pages, 640 u64 num_pages, bool dirty) 641 { 642 int i; 643 644 for (i = 0; i < num_pages; i++) { 645 if (dirty) 646 set_page_dirty(pages[i]); 647 648 put_page(pages[i]); 649 pages[i] = NULL; 650 } 651 } 652 653 /* 654 * Lock the user pages referenced by the {produce,consume}Buffer 655 * struct into memory and populate the {produce,consume}Pages 656 * arrays in the attach structure with them. 657 */ 658 static int qp_host_get_user_memory(u64 produce_uva, 659 u64 consume_uva, 660 struct vmci_queue *produce_q, 661 struct vmci_queue *consume_q) 662 { 663 int retval; 664 int err = VMCI_SUCCESS; 665 666 retval = get_user_pages_fast((uintptr_t) produce_uva, 667 produce_q->kernel_if->num_pages, 1, 668 produce_q->kernel_if->u.h.header_page); 669 if (retval < produce_q->kernel_if->num_pages) { 670 pr_debug("get_user_pages_fast(produce) failed (retval=%d)", 671 retval); 672 qp_release_pages(produce_q->kernel_if->u.h.header_page, 673 retval, false); 674 err = VMCI_ERROR_NO_MEM; 675 goto out; 676 } 677 678 retval = get_user_pages_fast((uintptr_t) consume_uva, 679 consume_q->kernel_if->num_pages, 1, 680 consume_q->kernel_if->u.h.header_page); 681 if (retval < consume_q->kernel_if->num_pages) { 682 pr_debug("get_user_pages_fast(consume) failed (retval=%d)", 683 retval); 684 qp_release_pages(consume_q->kernel_if->u.h.header_page, 685 retval, false); 686 qp_release_pages(produce_q->kernel_if->u.h.header_page, 687 produce_q->kernel_if->num_pages, false); 688 err = VMCI_ERROR_NO_MEM; 689 } 690 691 out: 692 return err; 693 } 694 695 /* 696 * Registers the specification of the user pages used for backing a queue 697 * pair. Enough information to map in pages is stored in the OS specific 698 * part of the struct vmci_queue structure. 699 */ 700 static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store, 701 struct vmci_queue *produce_q, 702 struct vmci_queue *consume_q) 703 { 704 u64 produce_uva; 705 u64 consume_uva; 706 707 /* 708 * The new style and the old style mapping only differs in 709 * that we either get a single or two UVAs, so we split the 710 * single UVA range at the appropriate spot. 711 */ 712 produce_uva = page_store->pages; 713 consume_uva = page_store->pages + 714 produce_q->kernel_if->num_pages * PAGE_SIZE; 715 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q, 716 consume_q); 717 } 718 719 /* 720 * Releases and removes the references to user pages stored in the attach 721 * struct. Pages are released from the page cache and may become 722 * swappable again. 723 */ 724 static void qp_host_unregister_user_memory(struct vmci_queue *produce_q, 725 struct vmci_queue *consume_q) 726 { 727 qp_release_pages(produce_q->kernel_if->u.h.header_page, 728 produce_q->kernel_if->num_pages, true); 729 memset(produce_q->kernel_if->u.h.header_page, 0, 730 sizeof(*produce_q->kernel_if->u.h.header_page) * 731 produce_q->kernel_if->num_pages); 732 qp_release_pages(consume_q->kernel_if->u.h.header_page, 733 consume_q->kernel_if->num_pages, true); 734 memset(consume_q->kernel_if->u.h.header_page, 0, 735 sizeof(*consume_q->kernel_if->u.h.header_page) * 736 consume_q->kernel_if->num_pages); 737 } 738 739 /* 740 * Once qp_host_register_user_memory has been performed on a 741 * queue, the queue pair headers can be mapped into the 742 * kernel. Once mapped, they must be unmapped with 743 * qp_host_unmap_queues prior to calling 744 * qp_host_unregister_user_memory. 745 * Pages are pinned. 746 */ 747 static int qp_host_map_queues(struct vmci_queue *produce_q, 748 struct vmci_queue *consume_q) 749 { 750 int result; 751 752 if (!produce_q->q_header || !consume_q->q_header) { 753 struct page *headers[2]; 754 755 if (produce_q->q_header != consume_q->q_header) 756 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 757 758 if (produce_q->kernel_if->u.h.header_page == NULL || 759 *produce_q->kernel_if->u.h.header_page == NULL) 760 return VMCI_ERROR_UNAVAILABLE; 761 762 headers[0] = *produce_q->kernel_if->u.h.header_page; 763 headers[1] = *consume_q->kernel_if->u.h.header_page; 764 765 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); 766 if (produce_q->q_header != NULL) { 767 consume_q->q_header = 768 (struct vmci_queue_header *)((u8 *) 769 produce_q->q_header + 770 PAGE_SIZE); 771 result = VMCI_SUCCESS; 772 } else { 773 pr_warn("vmap failed\n"); 774 result = VMCI_ERROR_NO_MEM; 775 } 776 } else { 777 result = VMCI_SUCCESS; 778 } 779 780 return result; 781 } 782 783 /* 784 * Unmaps previously mapped queue pair headers from the kernel. 785 * Pages are unpinned. 786 */ 787 static int qp_host_unmap_queues(u32 gid, 788 struct vmci_queue *produce_q, 789 struct vmci_queue *consume_q) 790 { 791 if (produce_q->q_header) { 792 if (produce_q->q_header < consume_q->q_header) 793 vunmap(produce_q->q_header); 794 else 795 vunmap(consume_q->q_header); 796 797 produce_q->q_header = NULL; 798 consume_q->q_header = NULL; 799 } 800 801 return VMCI_SUCCESS; 802 } 803 804 /* 805 * Finds the entry in the list corresponding to a given handle. Assumes 806 * that the list is locked. 807 */ 808 static struct qp_entry *qp_list_find(struct qp_list *qp_list, 809 struct vmci_handle handle) 810 { 811 struct qp_entry *entry; 812 813 if (vmci_handle_is_invalid(handle)) 814 return NULL; 815 816 list_for_each_entry(entry, &qp_list->head, list_item) { 817 if (vmci_handle_is_equal(entry->handle, handle)) 818 return entry; 819 } 820 821 return NULL; 822 } 823 824 /* 825 * Finds the entry in the list corresponding to a given handle. 826 */ 827 static struct qp_guest_endpoint * 828 qp_guest_handle_to_entry(struct vmci_handle handle) 829 { 830 struct qp_guest_endpoint *entry; 831 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle); 832 833 entry = qp ? container_of( 834 qp, struct qp_guest_endpoint, qp) : NULL; 835 return entry; 836 } 837 838 /* 839 * Finds the entry in the list corresponding to a given handle. 840 */ 841 static struct qp_broker_entry * 842 qp_broker_handle_to_entry(struct vmci_handle handle) 843 { 844 struct qp_broker_entry *entry; 845 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle); 846 847 entry = qp ? container_of( 848 qp, struct qp_broker_entry, qp) : NULL; 849 return entry; 850 } 851 852 /* 853 * Dispatches a queue pair event message directly into the local event 854 * queue. 855 */ 856 static int qp_notify_peer_local(bool attach, struct vmci_handle handle) 857 { 858 u32 context_id = vmci_get_context_id(); 859 struct vmci_event_qp ev; 860 861 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); 862 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 863 VMCI_CONTEXT_RESOURCE_ID); 864 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 865 ev.msg.event_data.event = 866 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; 867 ev.payload.peer_id = context_id; 868 ev.payload.handle = handle; 869 870 return vmci_event_dispatch(&ev.msg.hdr); 871 } 872 873 /* 874 * Allocates and initializes a qp_guest_endpoint structure. 875 * Allocates a queue_pair rid (and handle) iff the given entry has 876 * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX 877 * are reserved handles. Assumes that the QP list mutex is held 878 * by the caller. 879 */ 880 static struct qp_guest_endpoint * 881 qp_guest_endpoint_create(struct vmci_handle handle, 882 u32 peer, 883 u32 flags, 884 u64 produce_size, 885 u64 consume_size, 886 void *produce_q, 887 void *consume_q) 888 { 889 int result; 890 struct qp_guest_endpoint *entry; 891 /* One page each for the queue headers. */ 892 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) + 893 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2; 894 895 if (vmci_handle_is_invalid(handle)) { 896 u32 context_id = vmci_get_context_id(); 897 898 handle = vmci_make_handle(context_id, VMCI_INVALID_ID); 899 } 900 901 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 902 if (entry) { 903 entry->qp.peer = peer; 904 entry->qp.flags = flags; 905 entry->qp.produce_size = produce_size; 906 entry->qp.consume_size = consume_size; 907 entry->qp.ref_count = 0; 908 entry->num_ppns = num_ppns; 909 entry->produce_q = produce_q; 910 entry->consume_q = consume_q; 911 INIT_LIST_HEAD(&entry->qp.list_item); 912 913 /* Add resource obj */ 914 result = vmci_resource_add(&entry->resource, 915 VMCI_RESOURCE_TYPE_QPAIR_GUEST, 916 handle); 917 entry->qp.handle = vmci_resource_handle(&entry->resource); 918 if ((result != VMCI_SUCCESS) || 919 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { 920 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", 921 handle.context, handle.resource, result); 922 kfree(entry); 923 entry = NULL; 924 } 925 } 926 return entry; 927 } 928 929 /* 930 * Frees a qp_guest_endpoint structure. 931 */ 932 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry) 933 { 934 qp_free_ppn_set(&entry->ppn_set); 935 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); 936 qp_free_queue(entry->produce_q, entry->qp.produce_size); 937 qp_free_queue(entry->consume_q, entry->qp.consume_size); 938 /* Unlink from resource hash table and free callback */ 939 vmci_resource_remove(&entry->resource); 940 941 kfree(entry); 942 } 943 944 /* 945 * Helper to make a queue_pairAlloc hypercall when the driver is 946 * supporting a guest device. 947 */ 948 static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry) 949 { 950 struct vmci_qp_alloc_msg *alloc_msg; 951 size_t msg_size; 952 int result; 953 954 if (!entry || entry->num_ppns <= 2) 955 return VMCI_ERROR_INVALID_ARGS; 956 957 msg_size = sizeof(*alloc_msg) + 958 (size_t) entry->num_ppns * sizeof(u32); 959 alloc_msg = kmalloc(msg_size, GFP_KERNEL); 960 if (!alloc_msg) 961 return VMCI_ERROR_NO_MEM; 962 963 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 964 VMCI_QUEUEPAIR_ALLOC); 965 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE; 966 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE; 967 alloc_msg->handle = entry->qp.handle; 968 alloc_msg->peer = entry->qp.peer; 969 alloc_msg->flags = entry->qp.flags; 970 alloc_msg->produce_size = entry->qp.produce_size; 971 alloc_msg->consume_size = entry->qp.consume_size; 972 alloc_msg->num_ppns = entry->num_ppns; 973 974 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg), 975 &entry->ppn_set); 976 if (result == VMCI_SUCCESS) 977 result = vmci_send_datagram(&alloc_msg->hdr); 978 979 kfree(alloc_msg); 980 981 return result; 982 } 983 984 /* 985 * Helper to make a queue_pairDetach hypercall when the driver is 986 * supporting a guest device. 987 */ 988 static int qp_detatch_hypercall(struct vmci_handle handle) 989 { 990 struct vmci_qp_detach_msg detach_msg; 991 992 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 993 VMCI_QUEUEPAIR_DETACH); 994 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE; 995 detach_msg.hdr.payload_size = sizeof(handle); 996 detach_msg.handle = handle; 997 998 return vmci_send_datagram(&detach_msg.hdr); 999 } 1000 1001 /* 1002 * Adds the given entry to the list. Assumes that the list is locked. 1003 */ 1004 static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry) 1005 { 1006 if (entry) 1007 list_add(&entry->list_item, &qp_list->head); 1008 } 1009 1010 /* 1011 * Removes the given entry from the list. Assumes that the list is locked. 1012 */ 1013 static void qp_list_remove_entry(struct qp_list *qp_list, 1014 struct qp_entry *entry) 1015 { 1016 if (entry) 1017 list_del(&entry->list_item); 1018 } 1019 1020 /* 1021 * Helper for VMCI queue_pair detach interface. Frees the physical 1022 * pages for the queue pair. 1023 */ 1024 static int qp_detatch_guest_work(struct vmci_handle handle) 1025 { 1026 int result; 1027 struct qp_guest_endpoint *entry; 1028 u32 ref_count = ~0; /* To avoid compiler warning below */ 1029 1030 mutex_lock(&qp_guest_endpoints.mutex); 1031 1032 entry = qp_guest_handle_to_entry(handle); 1033 if (!entry) { 1034 mutex_unlock(&qp_guest_endpoints.mutex); 1035 return VMCI_ERROR_NOT_FOUND; 1036 } 1037 1038 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1039 result = VMCI_SUCCESS; 1040 1041 if (entry->qp.ref_count > 1) { 1042 result = qp_notify_peer_local(false, handle); 1043 /* 1044 * We can fail to notify a local queuepair 1045 * because we can't allocate. We still want 1046 * to release the entry if that happens, so 1047 * don't bail out yet. 1048 */ 1049 } 1050 } else { 1051 result = qp_detatch_hypercall(handle); 1052 if (result < VMCI_SUCCESS) { 1053 /* 1054 * We failed to notify a non-local queuepair. 1055 * That other queuepair might still be 1056 * accessing the shared memory, so don't 1057 * release the entry yet. It will get cleaned 1058 * up by VMCIqueue_pair_Exit() if necessary 1059 * (assuming we are going away, otherwise why 1060 * did this fail?). 1061 */ 1062 1063 mutex_unlock(&qp_guest_endpoints.mutex); 1064 return result; 1065 } 1066 } 1067 1068 /* 1069 * If we get here then we either failed to notify a local queuepair, or 1070 * we succeeded in all cases. Release the entry if required. 1071 */ 1072 1073 entry->qp.ref_count--; 1074 if (entry->qp.ref_count == 0) 1075 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); 1076 1077 /* If we didn't remove the entry, this could change once we unlock. */ 1078 if (entry) 1079 ref_count = entry->qp.ref_count; 1080 1081 mutex_unlock(&qp_guest_endpoints.mutex); 1082 1083 if (ref_count == 0) 1084 qp_guest_endpoint_destroy(entry); 1085 1086 return result; 1087 } 1088 1089 /* 1090 * This functions handles the actual allocation of a VMCI queue 1091 * pair guest endpoint. Allocates physical pages for the queue 1092 * pair. It makes OS dependent calls through generic wrappers. 1093 */ 1094 static int qp_alloc_guest_work(struct vmci_handle *handle, 1095 struct vmci_queue **produce_q, 1096 u64 produce_size, 1097 struct vmci_queue **consume_q, 1098 u64 consume_size, 1099 u32 peer, 1100 u32 flags, 1101 u32 priv_flags) 1102 { 1103 const u64 num_produce_pages = 1104 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1; 1105 const u64 num_consume_pages = 1106 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1; 1107 void *my_produce_q = NULL; 1108 void *my_consume_q = NULL; 1109 int result; 1110 struct qp_guest_endpoint *queue_pair_entry = NULL; 1111 1112 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS) 1113 return VMCI_ERROR_NO_ACCESS; 1114 1115 mutex_lock(&qp_guest_endpoints.mutex); 1116 1117 queue_pair_entry = qp_guest_handle_to_entry(*handle); 1118 if (queue_pair_entry) { 1119 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1120 /* Local attach case. */ 1121 if (queue_pair_entry->qp.ref_count > 1) { 1122 pr_devel("Error attempting to attach more than once\n"); 1123 result = VMCI_ERROR_UNAVAILABLE; 1124 goto error_keep_entry; 1125 } 1126 1127 if (queue_pair_entry->qp.produce_size != consume_size || 1128 queue_pair_entry->qp.consume_size != 1129 produce_size || 1130 queue_pair_entry->qp.flags != 1131 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) { 1132 pr_devel("Error mismatched queue pair in local attach\n"); 1133 result = VMCI_ERROR_QUEUEPAIR_MISMATCH; 1134 goto error_keep_entry; 1135 } 1136 1137 /* 1138 * Do a local attach. We swap the consume and 1139 * produce queues for the attacher and deliver 1140 * an attach event. 1141 */ 1142 result = qp_notify_peer_local(true, *handle); 1143 if (result < VMCI_SUCCESS) 1144 goto error_keep_entry; 1145 1146 my_produce_q = queue_pair_entry->consume_q; 1147 my_consume_q = queue_pair_entry->produce_q; 1148 goto out; 1149 } 1150 1151 result = VMCI_ERROR_ALREADY_EXISTS; 1152 goto error_keep_entry; 1153 } 1154 1155 my_produce_q = qp_alloc_queue(produce_size, flags); 1156 if (!my_produce_q) { 1157 pr_warn("Error allocating pages for produce queue\n"); 1158 result = VMCI_ERROR_NO_MEM; 1159 goto error; 1160 } 1161 1162 my_consume_q = qp_alloc_queue(consume_size, flags); 1163 if (!my_consume_q) { 1164 pr_warn("Error allocating pages for consume queue\n"); 1165 result = VMCI_ERROR_NO_MEM; 1166 goto error; 1167 } 1168 1169 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags, 1170 produce_size, consume_size, 1171 my_produce_q, my_consume_q); 1172 if (!queue_pair_entry) { 1173 pr_warn("Error allocating memory in %s\n", __func__); 1174 result = VMCI_ERROR_NO_MEM; 1175 goto error; 1176 } 1177 1178 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q, 1179 num_consume_pages, 1180 &queue_pair_entry->ppn_set); 1181 if (result < VMCI_SUCCESS) { 1182 pr_warn("qp_alloc_ppn_set failed\n"); 1183 goto error; 1184 } 1185 1186 /* 1187 * It's only necessary to notify the host if this queue pair will be 1188 * attached to from another context. 1189 */ 1190 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1191 /* Local create case. */ 1192 u32 context_id = vmci_get_context_id(); 1193 1194 /* 1195 * Enforce similar checks on local queue pairs as we 1196 * do for regular ones. The handle's context must 1197 * match the creator or attacher context id (here they 1198 * are both the current context id) and the 1199 * attach-only flag cannot exist during create. We 1200 * also ensure specified peer is this context or an 1201 * invalid one. 1202 */ 1203 if (queue_pair_entry->qp.handle.context != context_id || 1204 (queue_pair_entry->qp.peer != VMCI_INVALID_ID && 1205 queue_pair_entry->qp.peer != context_id)) { 1206 result = VMCI_ERROR_NO_ACCESS; 1207 goto error; 1208 } 1209 1210 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) { 1211 result = VMCI_ERROR_NOT_FOUND; 1212 goto error; 1213 } 1214 } else { 1215 result = qp_alloc_hypercall(queue_pair_entry); 1216 if (result < VMCI_SUCCESS) { 1217 pr_warn("qp_alloc_hypercall result = %d\n", result); 1218 goto error; 1219 } 1220 } 1221 1222 qp_init_queue_mutex((struct vmci_queue *)my_produce_q, 1223 (struct vmci_queue *)my_consume_q); 1224 1225 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp); 1226 1227 out: 1228 queue_pair_entry->qp.ref_count++; 1229 *handle = queue_pair_entry->qp.handle; 1230 *produce_q = (struct vmci_queue *)my_produce_q; 1231 *consume_q = (struct vmci_queue *)my_consume_q; 1232 1233 /* 1234 * We should initialize the queue pair header pages on a local 1235 * queue pair create. For non-local queue pairs, the 1236 * hypervisor initializes the header pages in the create step. 1237 */ 1238 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) && 1239 queue_pair_entry->qp.ref_count == 1) { 1240 vmci_q_header_init((*produce_q)->q_header, *handle); 1241 vmci_q_header_init((*consume_q)->q_header, *handle); 1242 } 1243 1244 mutex_unlock(&qp_guest_endpoints.mutex); 1245 1246 return VMCI_SUCCESS; 1247 1248 error: 1249 mutex_unlock(&qp_guest_endpoints.mutex); 1250 if (queue_pair_entry) { 1251 /* The queues will be freed inside the destroy routine. */ 1252 qp_guest_endpoint_destroy(queue_pair_entry); 1253 } else { 1254 qp_free_queue(my_produce_q, produce_size); 1255 qp_free_queue(my_consume_q, consume_size); 1256 } 1257 return result; 1258 1259 error_keep_entry: 1260 /* This path should only be used when an existing entry was found. */ 1261 mutex_unlock(&qp_guest_endpoints.mutex); 1262 return result; 1263 } 1264 1265 /* 1266 * The first endpoint issuing a queue pair allocation will create the state 1267 * of the queue pair in the queue pair broker. 1268 * 1269 * If the creator is a guest, it will associate a VMX virtual address range 1270 * with the queue pair as specified by the page_store. For compatibility with 1271 * older VMX'en, that would use a separate step to set the VMX virtual 1272 * address range, the virtual address range can be registered later using 1273 * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be 1274 * used. 1275 * 1276 * If the creator is the host, a page_store of NULL should be used as well, 1277 * since the host is not able to supply a page store for the queue pair. 1278 * 1279 * For older VMX and host callers, the queue pair will be created in the 1280 * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be 1281 * created in VMCOQPB_CREATED_MEM state. 1282 */ 1283 static int qp_broker_create(struct vmci_handle handle, 1284 u32 peer, 1285 u32 flags, 1286 u32 priv_flags, 1287 u64 produce_size, 1288 u64 consume_size, 1289 struct vmci_qp_page_store *page_store, 1290 struct vmci_ctx *context, 1291 vmci_event_release_cb wakeup_cb, 1292 void *client_data, struct qp_broker_entry **ent) 1293 { 1294 struct qp_broker_entry *entry = NULL; 1295 const u32 context_id = vmci_ctx_get_id(context); 1296 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1297 int result; 1298 u64 guest_produce_size; 1299 u64 guest_consume_size; 1300 1301 /* Do not create if the caller asked not to. */ 1302 if (flags & VMCI_QPFLAG_ATTACH_ONLY) 1303 return VMCI_ERROR_NOT_FOUND; 1304 1305 /* 1306 * Creator's context ID should match handle's context ID or the creator 1307 * must allow the context in handle's context ID as the "peer". 1308 */ 1309 if (handle.context != context_id && handle.context != peer) 1310 return VMCI_ERROR_NO_ACCESS; 1311 1312 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer)) 1313 return VMCI_ERROR_DST_UNREACHABLE; 1314 1315 /* 1316 * Creator's context ID for local queue pairs should match the 1317 * peer, if a peer is specified. 1318 */ 1319 if (is_local && peer != VMCI_INVALID_ID && context_id != peer) 1320 return VMCI_ERROR_NO_ACCESS; 1321 1322 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 1323 if (!entry) 1324 return VMCI_ERROR_NO_MEM; 1325 1326 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) { 1327 /* 1328 * The queue pair broker entry stores values from the guest 1329 * point of view, so a creating host side endpoint should swap 1330 * produce and consume values -- unless it is a local queue 1331 * pair, in which case no swapping is necessary, since the local 1332 * attacher will swap queues. 1333 */ 1334 1335 guest_produce_size = consume_size; 1336 guest_consume_size = produce_size; 1337 } else { 1338 guest_produce_size = produce_size; 1339 guest_consume_size = consume_size; 1340 } 1341 1342 entry->qp.handle = handle; 1343 entry->qp.peer = peer; 1344 entry->qp.flags = flags; 1345 entry->qp.produce_size = guest_produce_size; 1346 entry->qp.consume_size = guest_consume_size; 1347 entry->qp.ref_count = 1; 1348 entry->create_id = context_id; 1349 entry->attach_id = VMCI_INVALID_ID; 1350 entry->state = VMCIQPB_NEW; 1351 entry->require_trusted_attach = 1352 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED); 1353 entry->created_by_trusted = 1354 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED); 1355 entry->vmci_page_files = false; 1356 entry->wakeup_cb = wakeup_cb; 1357 entry->client_data = client_data; 1358 entry->produce_q = qp_host_alloc_queue(guest_produce_size); 1359 if (entry->produce_q == NULL) { 1360 result = VMCI_ERROR_NO_MEM; 1361 goto error; 1362 } 1363 entry->consume_q = qp_host_alloc_queue(guest_consume_size); 1364 if (entry->consume_q == NULL) { 1365 result = VMCI_ERROR_NO_MEM; 1366 goto error; 1367 } 1368 1369 qp_init_queue_mutex(entry->produce_q, entry->consume_q); 1370 1371 INIT_LIST_HEAD(&entry->qp.list_item); 1372 1373 if (is_local) { 1374 u8 *tmp; 1375 1376 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), 1377 PAGE_SIZE, GFP_KERNEL); 1378 if (entry->local_mem == NULL) { 1379 result = VMCI_ERROR_NO_MEM; 1380 goto error; 1381 } 1382 entry->state = VMCIQPB_CREATED_MEM; 1383 entry->produce_q->q_header = entry->local_mem; 1384 tmp = (u8 *)entry->local_mem + PAGE_SIZE * 1385 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); 1386 entry->consume_q->q_header = (struct vmci_queue_header *)tmp; 1387 } else if (page_store) { 1388 /* 1389 * The VMX already initialized the queue pair headers, so no 1390 * need for the kernel side to do that. 1391 */ 1392 result = qp_host_register_user_memory(page_store, 1393 entry->produce_q, 1394 entry->consume_q); 1395 if (result < VMCI_SUCCESS) 1396 goto error; 1397 1398 entry->state = VMCIQPB_CREATED_MEM; 1399 } else { 1400 /* 1401 * A create without a page_store may be either a host 1402 * side create (in which case we are waiting for the 1403 * guest side to supply the memory) or an old style 1404 * queue pair create (in which case we will expect a 1405 * set page store call as the next step). 1406 */ 1407 entry->state = VMCIQPB_CREATED_NO_MEM; 1408 } 1409 1410 qp_list_add_entry(&qp_broker_list, &entry->qp); 1411 if (ent != NULL) 1412 *ent = entry; 1413 1414 /* Add to resource obj */ 1415 result = vmci_resource_add(&entry->resource, 1416 VMCI_RESOURCE_TYPE_QPAIR_HOST, 1417 handle); 1418 if (result != VMCI_SUCCESS) { 1419 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", 1420 handle.context, handle.resource, result); 1421 goto error; 1422 } 1423 1424 entry->qp.handle = vmci_resource_handle(&entry->resource); 1425 if (is_local) { 1426 vmci_q_header_init(entry->produce_q->q_header, 1427 entry->qp.handle); 1428 vmci_q_header_init(entry->consume_q->q_header, 1429 entry->qp.handle); 1430 } 1431 1432 vmci_ctx_qp_create(context, entry->qp.handle); 1433 1434 return VMCI_SUCCESS; 1435 1436 error: 1437 if (entry != NULL) { 1438 qp_host_free_queue(entry->produce_q, guest_produce_size); 1439 qp_host_free_queue(entry->consume_q, guest_consume_size); 1440 kfree(entry); 1441 } 1442 1443 return result; 1444 } 1445 1446 /* 1447 * Enqueues an event datagram to notify the peer VM attached to 1448 * the given queue pair handle about attach/detach event by the 1449 * given VM. Returns Payload size of datagram enqueued on 1450 * success, error code otherwise. 1451 */ 1452 static int qp_notify_peer(bool attach, 1453 struct vmci_handle handle, 1454 u32 my_id, 1455 u32 peer_id) 1456 { 1457 int rv; 1458 struct vmci_event_qp ev; 1459 1460 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID || 1461 peer_id == VMCI_INVALID_ID) 1462 return VMCI_ERROR_INVALID_ARGS; 1463 1464 /* 1465 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on 1466 * number of pending events from the hypervisor to a given VM 1467 * otherwise a rogue VM could do an arbitrary number of attach 1468 * and detach operations causing memory pressure in the host 1469 * kernel. 1470 */ 1471 1472 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); 1473 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1474 VMCI_CONTEXT_RESOURCE_ID); 1475 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 1476 ev.msg.event_data.event = attach ? 1477 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; 1478 ev.payload.handle = handle; 1479 ev.payload.peer_id = my_id; 1480 1481 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, 1482 &ev.msg.hdr, false); 1483 if (rv < VMCI_SUCCESS) 1484 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n", 1485 attach ? "ATTACH" : "DETACH", peer_id); 1486 1487 return rv; 1488 } 1489 1490 /* 1491 * The second endpoint issuing a queue pair allocation will attach to 1492 * the queue pair registered with the queue pair broker. 1493 * 1494 * If the attacher is a guest, it will associate a VMX virtual address 1495 * range with the queue pair as specified by the page_store. At this 1496 * point, the already attach host endpoint may start using the queue 1497 * pair, and an attach event is sent to it. For compatibility with 1498 * older VMX'en, that used a separate step to set the VMX virtual 1499 * address range, the virtual address range can be registered later 1500 * using vmci_qp_broker_set_page_store. In that case, a page_store of 1501 * NULL should be used, and the attach event will be generated once 1502 * the actual page store has been set. 1503 * 1504 * If the attacher is the host, a page_store of NULL should be used as 1505 * well, since the page store information is already set by the guest. 1506 * 1507 * For new VMX and host callers, the queue pair will be moved to the 1508 * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be 1509 * moved to the VMCOQPB_ATTACHED_NO_MEM state. 1510 */ 1511 static int qp_broker_attach(struct qp_broker_entry *entry, 1512 u32 peer, 1513 u32 flags, 1514 u32 priv_flags, 1515 u64 produce_size, 1516 u64 consume_size, 1517 struct vmci_qp_page_store *page_store, 1518 struct vmci_ctx *context, 1519 vmci_event_release_cb wakeup_cb, 1520 void *client_data, 1521 struct qp_broker_entry **ent) 1522 { 1523 const u32 context_id = vmci_ctx_get_id(context); 1524 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1525 int result; 1526 1527 if (entry->state != VMCIQPB_CREATED_NO_MEM && 1528 entry->state != VMCIQPB_CREATED_MEM) 1529 return VMCI_ERROR_UNAVAILABLE; 1530 1531 if (is_local) { 1532 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || 1533 context_id != entry->create_id) { 1534 return VMCI_ERROR_INVALID_ARGS; 1535 } 1536 } else if (context_id == entry->create_id || 1537 context_id == entry->attach_id) { 1538 return VMCI_ERROR_ALREADY_EXISTS; 1539 } 1540 1541 if (VMCI_CONTEXT_IS_VM(context_id) && 1542 VMCI_CONTEXT_IS_VM(entry->create_id)) 1543 return VMCI_ERROR_DST_UNREACHABLE; 1544 1545 /* 1546 * If we are attaching from a restricted context then the queuepair 1547 * must have been created by a trusted endpoint. 1548 */ 1549 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) && 1550 !entry->created_by_trusted) 1551 return VMCI_ERROR_NO_ACCESS; 1552 1553 /* 1554 * If we are attaching to a queuepair that was created by a restricted 1555 * context then we must be trusted. 1556 */ 1557 if (entry->require_trusted_attach && 1558 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED))) 1559 return VMCI_ERROR_NO_ACCESS; 1560 1561 /* 1562 * If the creator specifies VMCI_INVALID_ID in "peer" field, access 1563 * control check is not performed. 1564 */ 1565 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) 1566 return VMCI_ERROR_NO_ACCESS; 1567 1568 if (entry->create_id == VMCI_HOST_CONTEXT_ID) { 1569 /* 1570 * Do not attach if the caller doesn't support Host Queue Pairs 1571 * and a host created this queue pair. 1572 */ 1573 1574 if (!vmci_ctx_supports_host_qp(context)) 1575 return VMCI_ERROR_INVALID_RESOURCE; 1576 1577 } else if (context_id == VMCI_HOST_CONTEXT_ID) { 1578 struct vmci_ctx *create_context; 1579 bool supports_host_qp; 1580 1581 /* 1582 * Do not attach a host to a user created queue pair if that 1583 * user doesn't support host queue pair end points. 1584 */ 1585 1586 create_context = vmci_ctx_get(entry->create_id); 1587 supports_host_qp = vmci_ctx_supports_host_qp(create_context); 1588 vmci_ctx_put(create_context); 1589 1590 if (!supports_host_qp) 1591 return VMCI_ERROR_INVALID_RESOURCE; 1592 } 1593 1594 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) 1595 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1596 1597 if (context_id != VMCI_HOST_CONTEXT_ID) { 1598 /* 1599 * The queue pair broker entry stores values from the guest 1600 * point of view, so an attaching guest should match the values 1601 * stored in the entry. 1602 */ 1603 1604 if (entry->qp.produce_size != produce_size || 1605 entry->qp.consume_size != consume_size) { 1606 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1607 } 1608 } else if (entry->qp.produce_size != consume_size || 1609 entry->qp.consume_size != produce_size) { 1610 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1611 } 1612 1613 if (context_id != VMCI_HOST_CONTEXT_ID) { 1614 /* 1615 * If a guest attached to a queue pair, it will supply 1616 * the backing memory. If this is a pre NOVMVM vmx, 1617 * the backing memory will be supplied by calling 1618 * vmci_qp_broker_set_page_store() following the 1619 * return of the vmci_qp_broker_alloc() call. If it is 1620 * a vmx of version NOVMVM or later, the page store 1621 * must be supplied as part of the 1622 * vmci_qp_broker_alloc call. Under all circumstances 1623 * must the initially created queue pair not have any 1624 * memory associated with it already. 1625 */ 1626 1627 if (entry->state != VMCIQPB_CREATED_NO_MEM) 1628 return VMCI_ERROR_INVALID_ARGS; 1629 1630 if (page_store != NULL) { 1631 /* 1632 * Patch up host state to point to guest 1633 * supplied memory. The VMX already 1634 * initialized the queue pair headers, so no 1635 * need for the kernel side to do that. 1636 */ 1637 1638 result = qp_host_register_user_memory(page_store, 1639 entry->produce_q, 1640 entry->consume_q); 1641 if (result < VMCI_SUCCESS) 1642 return result; 1643 1644 entry->state = VMCIQPB_ATTACHED_MEM; 1645 } else { 1646 entry->state = VMCIQPB_ATTACHED_NO_MEM; 1647 } 1648 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) { 1649 /* 1650 * The host side is attempting to attach to a queue 1651 * pair that doesn't have any memory associated with 1652 * it. This must be a pre NOVMVM vmx that hasn't set 1653 * the page store information yet, or a quiesced VM. 1654 */ 1655 1656 return VMCI_ERROR_UNAVAILABLE; 1657 } else { 1658 /* The host side has successfully attached to a queue pair. */ 1659 entry->state = VMCIQPB_ATTACHED_MEM; 1660 } 1661 1662 if (entry->state == VMCIQPB_ATTACHED_MEM) { 1663 result = 1664 qp_notify_peer(true, entry->qp.handle, context_id, 1665 entry->create_id); 1666 if (result < VMCI_SUCCESS) 1667 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", 1668 entry->create_id, entry->qp.handle.context, 1669 entry->qp.handle.resource); 1670 } 1671 1672 entry->attach_id = context_id; 1673 entry->qp.ref_count++; 1674 if (wakeup_cb) { 1675 entry->wakeup_cb = wakeup_cb; 1676 entry->client_data = client_data; 1677 } 1678 1679 /* 1680 * When attaching to local queue pairs, the context already has 1681 * an entry tracking the queue pair, so don't add another one. 1682 */ 1683 if (!is_local) 1684 vmci_ctx_qp_create(context, entry->qp.handle); 1685 1686 if (ent != NULL) 1687 *ent = entry; 1688 1689 return VMCI_SUCCESS; 1690 } 1691 1692 /* 1693 * queue_pair_Alloc for use when setting up queue pair endpoints 1694 * on the host. 1695 */ 1696 static int qp_broker_alloc(struct vmci_handle handle, 1697 u32 peer, 1698 u32 flags, 1699 u32 priv_flags, 1700 u64 produce_size, 1701 u64 consume_size, 1702 struct vmci_qp_page_store *page_store, 1703 struct vmci_ctx *context, 1704 vmci_event_release_cb wakeup_cb, 1705 void *client_data, 1706 struct qp_broker_entry **ent, 1707 bool *swap) 1708 { 1709 const u32 context_id = vmci_ctx_get_id(context); 1710 bool create; 1711 struct qp_broker_entry *entry = NULL; 1712 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1713 int result; 1714 1715 if (vmci_handle_is_invalid(handle) || 1716 (flags & ~VMCI_QP_ALL_FLAGS) || is_local || 1717 !(produce_size || consume_size) || 1718 !context || context_id == VMCI_INVALID_ID || 1719 handle.context == VMCI_INVALID_ID) { 1720 return VMCI_ERROR_INVALID_ARGS; 1721 } 1722 1723 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store)) 1724 return VMCI_ERROR_INVALID_ARGS; 1725 1726 /* 1727 * In the initial argument check, we ensure that non-vmkernel hosts 1728 * are not allowed to create local queue pairs. 1729 */ 1730 1731 mutex_lock(&qp_broker_list.mutex); 1732 1733 if (!is_local && vmci_ctx_qp_exists(context, handle)) { 1734 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n", 1735 context_id, handle.context, handle.resource); 1736 mutex_unlock(&qp_broker_list.mutex); 1737 return VMCI_ERROR_ALREADY_EXISTS; 1738 } 1739 1740 if (handle.resource != VMCI_INVALID_ID) 1741 entry = qp_broker_handle_to_entry(handle); 1742 1743 if (!entry) { 1744 create = true; 1745 result = 1746 qp_broker_create(handle, peer, flags, priv_flags, 1747 produce_size, consume_size, page_store, 1748 context, wakeup_cb, client_data, ent); 1749 } else { 1750 create = false; 1751 result = 1752 qp_broker_attach(entry, peer, flags, priv_flags, 1753 produce_size, consume_size, page_store, 1754 context, wakeup_cb, client_data, ent); 1755 } 1756 1757 mutex_unlock(&qp_broker_list.mutex); 1758 1759 if (swap) 1760 *swap = (context_id == VMCI_HOST_CONTEXT_ID) && 1761 !(create && is_local); 1762 1763 return result; 1764 } 1765 1766 /* 1767 * This function implements the kernel API for allocating a queue 1768 * pair. 1769 */ 1770 static int qp_alloc_host_work(struct vmci_handle *handle, 1771 struct vmci_queue **produce_q, 1772 u64 produce_size, 1773 struct vmci_queue **consume_q, 1774 u64 consume_size, 1775 u32 peer, 1776 u32 flags, 1777 u32 priv_flags, 1778 vmci_event_release_cb wakeup_cb, 1779 void *client_data) 1780 { 1781 struct vmci_handle new_handle; 1782 struct vmci_ctx *context; 1783 struct qp_broker_entry *entry; 1784 int result; 1785 bool swap; 1786 1787 if (vmci_handle_is_invalid(*handle)) { 1788 new_handle = vmci_make_handle( 1789 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID); 1790 } else 1791 new_handle = *handle; 1792 1793 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); 1794 entry = NULL; 1795 result = 1796 qp_broker_alloc(new_handle, peer, flags, priv_flags, 1797 produce_size, consume_size, NULL, context, 1798 wakeup_cb, client_data, &entry, &swap); 1799 if (result == VMCI_SUCCESS) { 1800 if (swap) { 1801 /* 1802 * If this is a local queue pair, the attacher 1803 * will swap around produce and consume 1804 * queues. 1805 */ 1806 1807 *produce_q = entry->consume_q; 1808 *consume_q = entry->produce_q; 1809 } else { 1810 *produce_q = entry->produce_q; 1811 *consume_q = entry->consume_q; 1812 } 1813 1814 *handle = vmci_resource_handle(&entry->resource); 1815 } else { 1816 *handle = VMCI_INVALID_HANDLE; 1817 pr_devel("queue pair broker failed to alloc (result=%d)\n", 1818 result); 1819 } 1820 vmci_ctx_put(context); 1821 return result; 1822 } 1823 1824 /* 1825 * Allocates a VMCI queue_pair. Only checks validity of input 1826 * arguments. The real work is done in the host or guest 1827 * specific function. 1828 */ 1829 int vmci_qp_alloc(struct vmci_handle *handle, 1830 struct vmci_queue **produce_q, 1831 u64 produce_size, 1832 struct vmci_queue **consume_q, 1833 u64 consume_size, 1834 u32 peer, 1835 u32 flags, 1836 u32 priv_flags, 1837 bool guest_endpoint, 1838 vmci_event_release_cb wakeup_cb, 1839 void *client_data) 1840 { 1841 if (!handle || !produce_q || !consume_q || 1842 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS)) 1843 return VMCI_ERROR_INVALID_ARGS; 1844 1845 if (guest_endpoint) { 1846 return qp_alloc_guest_work(handle, produce_q, 1847 produce_size, consume_q, 1848 consume_size, peer, 1849 flags, priv_flags); 1850 } else { 1851 return qp_alloc_host_work(handle, produce_q, 1852 produce_size, consume_q, 1853 consume_size, peer, flags, 1854 priv_flags, wakeup_cb, client_data); 1855 } 1856 } 1857 1858 /* 1859 * This function implements the host kernel API for detaching from 1860 * a queue pair. 1861 */ 1862 static int qp_detatch_host_work(struct vmci_handle handle) 1863 { 1864 int result; 1865 struct vmci_ctx *context; 1866 1867 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); 1868 1869 result = vmci_qp_broker_detach(handle, context); 1870 1871 vmci_ctx_put(context); 1872 return result; 1873 } 1874 1875 /* 1876 * Detaches from a VMCI queue_pair. Only checks validity of input argument. 1877 * Real work is done in the host or guest specific function. 1878 */ 1879 static int qp_detatch(struct vmci_handle handle, bool guest_endpoint) 1880 { 1881 if (vmci_handle_is_invalid(handle)) 1882 return VMCI_ERROR_INVALID_ARGS; 1883 1884 if (guest_endpoint) 1885 return qp_detatch_guest_work(handle); 1886 else 1887 return qp_detatch_host_work(handle); 1888 } 1889 1890 /* 1891 * Returns the entry from the head of the list. Assumes that the list is 1892 * locked. 1893 */ 1894 static struct qp_entry *qp_list_get_head(struct qp_list *qp_list) 1895 { 1896 if (!list_empty(&qp_list->head)) { 1897 struct qp_entry *entry = 1898 list_first_entry(&qp_list->head, struct qp_entry, 1899 list_item); 1900 return entry; 1901 } 1902 1903 return NULL; 1904 } 1905 1906 void vmci_qp_broker_exit(void) 1907 { 1908 struct qp_entry *entry; 1909 struct qp_broker_entry *be; 1910 1911 mutex_lock(&qp_broker_list.mutex); 1912 1913 while ((entry = qp_list_get_head(&qp_broker_list))) { 1914 be = (struct qp_broker_entry *)entry; 1915 1916 qp_list_remove_entry(&qp_broker_list, entry); 1917 kfree(be); 1918 } 1919 1920 mutex_unlock(&qp_broker_list.mutex); 1921 } 1922 1923 /* 1924 * Requests that a queue pair be allocated with the VMCI queue 1925 * pair broker. Allocates a queue pair entry if one does not 1926 * exist. Attaches to one if it exists, and retrieves the page 1927 * files backing that queue_pair. Assumes that the queue pair 1928 * broker lock is held. 1929 */ 1930 int vmci_qp_broker_alloc(struct vmci_handle handle, 1931 u32 peer, 1932 u32 flags, 1933 u32 priv_flags, 1934 u64 produce_size, 1935 u64 consume_size, 1936 struct vmci_qp_page_store *page_store, 1937 struct vmci_ctx *context) 1938 { 1939 return qp_broker_alloc(handle, peer, flags, priv_flags, 1940 produce_size, consume_size, 1941 page_store, context, NULL, NULL, NULL, NULL); 1942 } 1943 1944 /* 1945 * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate 1946 * step to add the UVAs of the VMX mapping of the queue pair. This function 1947 * provides backwards compatibility with such VMX'en, and takes care of 1948 * registering the page store for a queue pair previously allocated by the 1949 * VMX during create or attach. This function will move the queue pair state 1950 * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or 1951 * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the 1952 * attached state with memory, the queue pair is ready to be used by the 1953 * host peer, and an attached event will be generated. 1954 * 1955 * Assumes that the queue pair broker lock is held. 1956 * 1957 * This function is only used by the hosted platform, since there is no 1958 * issue with backwards compatibility for vmkernel. 1959 */ 1960 int vmci_qp_broker_set_page_store(struct vmci_handle handle, 1961 u64 produce_uva, 1962 u64 consume_uva, 1963 struct vmci_ctx *context) 1964 { 1965 struct qp_broker_entry *entry; 1966 int result; 1967 const u32 context_id = vmci_ctx_get_id(context); 1968 1969 if (vmci_handle_is_invalid(handle) || !context || 1970 context_id == VMCI_INVALID_ID) 1971 return VMCI_ERROR_INVALID_ARGS; 1972 1973 /* 1974 * We only support guest to host queue pairs, so the VMX must 1975 * supply UVAs for the mapped page files. 1976 */ 1977 1978 if (produce_uva == 0 || consume_uva == 0) 1979 return VMCI_ERROR_INVALID_ARGS; 1980 1981 mutex_lock(&qp_broker_list.mutex); 1982 1983 if (!vmci_ctx_qp_exists(context, handle)) { 1984 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 1985 context_id, handle.context, handle.resource); 1986 result = VMCI_ERROR_NOT_FOUND; 1987 goto out; 1988 } 1989 1990 entry = qp_broker_handle_to_entry(handle); 1991 if (!entry) { 1992 result = VMCI_ERROR_NOT_FOUND; 1993 goto out; 1994 } 1995 1996 /* 1997 * If I'm the owner then I can set the page store. 1998 * 1999 * Or, if a host created the queue_pair and I'm the attached peer 2000 * then I can set the page store. 2001 */ 2002 if (entry->create_id != context_id && 2003 (entry->create_id != VMCI_HOST_CONTEXT_ID || 2004 entry->attach_id != context_id)) { 2005 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER; 2006 goto out; 2007 } 2008 2009 if (entry->state != VMCIQPB_CREATED_NO_MEM && 2010 entry->state != VMCIQPB_ATTACHED_NO_MEM) { 2011 result = VMCI_ERROR_UNAVAILABLE; 2012 goto out; 2013 } 2014 2015 result = qp_host_get_user_memory(produce_uva, consume_uva, 2016 entry->produce_q, entry->consume_q); 2017 if (result < VMCI_SUCCESS) 2018 goto out; 2019 2020 result = qp_host_map_queues(entry->produce_q, entry->consume_q); 2021 if (result < VMCI_SUCCESS) { 2022 qp_host_unregister_user_memory(entry->produce_q, 2023 entry->consume_q); 2024 goto out; 2025 } 2026 2027 if (entry->state == VMCIQPB_CREATED_NO_MEM) 2028 entry->state = VMCIQPB_CREATED_MEM; 2029 else 2030 entry->state = VMCIQPB_ATTACHED_MEM; 2031 2032 entry->vmci_page_files = true; 2033 2034 if (entry->state == VMCIQPB_ATTACHED_MEM) { 2035 result = 2036 qp_notify_peer(true, handle, context_id, entry->create_id); 2037 if (result < VMCI_SUCCESS) { 2038 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", 2039 entry->create_id, entry->qp.handle.context, 2040 entry->qp.handle.resource); 2041 } 2042 } 2043 2044 result = VMCI_SUCCESS; 2045 out: 2046 mutex_unlock(&qp_broker_list.mutex); 2047 return result; 2048 } 2049 2050 /* 2051 * Resets saved queue headers for the given QP broker 2052 * entry. Should be used when guest memory becomes available 2053 * again, or the guest detaches. 2054 */ 2055 static void qp_reset_saved_headers(struct qp_broker_entry *entry) 2056 { 2057 entry->produce_q->saved_header = NULL; 2058 entry->consume_q->saved_header = NULL; 2059 } 2060 2061 /* 2062 * The main entry point for detaching from a queue pair registered with the 2063 * queue pair broker. If more than one endpoint is attached to the queue 2064 * pair, the first endpoint will mainly decrement a reference count and 2065 * generate a notification to its peer. The last endpoint will clean up 2066 * the queue pair state registered with the broker. 2067 * 2068 * When a guest endpoint detaches, it will unmap and unregister the guest 2069 * memory backing the queue pair. If the host is still attached, it will 2070 * no longer be able to access the queue pair content. 2071 * 2072 * If the queue pair is already in a state where there is no memory 2073 * registered for the queue pair (any *_NO_MEM state), it will transition to 2074 * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest 2075 * endpoint is the first of two endpoints to detach. If the host endpoint is 2076 * the first out of two to detach, the queue pair will move to the 2077 * VMCIQPB_SHUTDOWN_MEM state. 2078 */ 2079 int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context) 2080 { 2081 struct qp_broker_entry *entry; 2082 const u32 context_id = vmci_ctx_get_id(context); 2083 u32 peer_id; 2084 bool is_local = false; 2085 int result; 2086 2087 if (vmci_handle_is_invalid(handle) || !context || 2088 context_id == VMCI_INVALID_ID) { 2089 return VMCI_ERROR_INVALID_ARGS; 2090 } 2091 2092 mutex_lock(&qp_broker_list.mutex); 2093 2094 if (!vmci_ctx_qp_exists(context, handle)) { 2095 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2096 context_id, handle.context, handle.resource); 2097 result = VMCI_ERROR_NOT_FOUND; 2098 goto out; 2099 } 2100 2101 entry = qp_broker_handle_to_entry(handle); 2102 if (!entry) { 2103 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n", 2104 context_id, handle.context, handle.resource); 2105 result = VMCI_ERROR_NOT_FOUND; 2106 goto out; 2107 } 2108 2109 if (context_id != entry->create_id && context_id != entry->attach_id) { 2110 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2111 goto out; 2112 } 2113 2114 if (context_id == entry->create_id) { 2115 peer_id = entry->attach_id; 2116 entry->create_id = VMCI_INVALID_ID; 2117 } else { 2118 peer_id = entry->create_id; 2119 entry->attach_id = VMCI_INVALID_ID; 2120 } 2121 entry->qp.ref_count--; 2122 2123 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2124 2125 if (context_id != VMCI_HOST_CONTEXT_ID) { 2126 bool headers_mapped; 2127 2128 /* 2129 * Pre NOVMVM vmx'en may detach from a queue pair 2130 * before setting the page store, and in that case 2131 * there is no user memory to detach from. Also, more 2132 * recent VMX'en may detach from a queue pair in the 2133 * quiesced state. 2134 */ 2135 2136 qp_acquire_queue_mutex(entry->produce_q); 2137 headers_mapped = entry->produce_q->q_header || 2138 entry->consume_q->q_header; 2139 if (QPBROKERSTATE_HAS_MEM(entry)) { 2140 result = 2141 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID, 2142 entry->produce_q, 2143 entry->consume_q); 2144 if (result < VMCI_SUCCESS) 2145 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", 2146 handle.context, handle.resource, 2147 result); 2148 2149 qp_host_unregister_user_memory(entry->produce_q, 2150 entry->consume_q); 2151 2152 } 2153 2154 if (!headers_mapped) 2155 qp_reset_saved_headers(entry); 2156 2157 qp_release_queue_mutex(entry->produce_q); 2158 2159 if (!headers_mapped && entry->wakeup_cb) 2160 entry->wakeup_cb(entry->client_data); 2161 2162 } else { 2163 if (entry->wakeup_cb) { 2164 entry->wakeup_cb = NULL; 2165 entry->client_data = NULL; 2166 } 2167 } 2168 2169 if (entry->qp.ref_count == 0) { 2170 qp_list_remove_entry(&qp_broker_list, &entry->qp); 2171 2172 if (is_local) 2173 kfree(entry->local_mem); 2174 2175 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); 2176 qp_host_free_queue(entry->produce_q, entry->qp.produce_size); 2177 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); 2178 /* Unlink from resource hash table and free callback */ 2179 vmci_resource_remove(&entry->resource); 2180 2181 kfree(entry); 2182 2183 vmci_ctx_qp_destroy(context, handle); 2184 } else { 2185 qp_notify_peer(false, handle, context_id, peer_id); 2186 if (context_id == VMCI_HOST_CONTEXT_ID && 2187 QPBROKERSTATE_HAS_MEM(entry)) { 2188 entry->state = VMCIQPB_SHUTDOWN_MEM; 2189 } else { 2190 entry->state = VMCIQPB_SHUTDOWN_NO_MEM; 2191 } 2192 2193 if (!is_local) 2194 vmci_ctx_qp_destroy(context, handle); 2195 2196 } 2197 result = VMCI_SUCCESS; 2198 out: 2199 mutex_unlock(&qp_broker_list.mutex); 2200 return result; 2201 } 2202 2203 /* 2204 * Establishes the necessary mappings for a queue pair given a 2205 * reference to the queue pair guest memory. This is usually 2206 * called when a guest is unquiesced and the VMX is allowed to 2207 * map guest memory once again. 2208 */ 2209 int vmci_qp_broker_map(struct vmci_handle handle, 2210 struct vmci_ctx *context, 2211 u64 guest_mem) 2212 { 2213 struct qp_broker_entry *entry; 2214 const u32 context_id = vmci_ctx_get_id(context); 2215 bool is_local = false; 2216 int result; 2217 2218 if (vmci_handle_is_invalid(handle) || !context || 2219 context_id == VMCI_INVALID_ID) 2220 return VMCI_ERROR_INVALID_ARGS; 2221 2222 mutex_lock(&qp_broker_list.mutex); 2223 2224 if (!vmci_ctx_qp_exists(context, handle)) { 2225 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2226 context_id, handle.context, handle.resource); 2227 result = VMCI_ERROR_NOT_FOUND; 2228 goto out; 2229 } 2230 2231 entry = qp_broker_handle_to_entry(handle); 2232 if (!entry) { 2233 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", 2234 context_id, handle.context, handle.resource); 2235 result = VMCI_ERROR_NOT_FOUND; 2236 goto out; 2237 } 2238 2239 if (context_id != entry->create_id && context_id != entry->attach_id) { 2240 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2241 goto out; 2242 } 2243 2244 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2245 result = VMCI_SUCCESS; 2246 2247 if (context_id != VMCI_HOST_CONTEXT_ID) { 2248 struct vmci_qp_page_store page_store; 2249 2250 page_store.pages = guest_mem; 2251 page_store.len = QPE_NUM_PAGES(entry->qp); 2252 2253 qp_acquire_queue_mutex(entry->produce_q); 2254 qp_reset_saved_headers(entry); 2255 result = 2256 qp_host_register_user_memory(&page_store, 2257 entry->produce_q, 2258 entry->consume_q); 2259 qp_release_queue_mutex(entry->produce_q); 2260 if (result == VMCI_SUCCESS) { 2261 /* Move state from *_NO_MEM to *_MEM */ 2262 2263 entry->state++; 2264 2265 if (entry->wakeup_cb) 2266 entry->wakeup_cb(entry->client_data); 2267 } 2268 } 2269 2270 out: 2271 mutex_unlock(&qp_broker_list.mutex); 2272 return result; 2273 } 2274 2275 /* 2276 * Saves a snapshot of the queue headers for the given QP broker 2277 * entry. Should be used when guest memory is unmapped. 2278 * Results: 2279 * VMCI_SUCCESS on success, appropriate error code if guest memory 2280 * can't be accessed.. 2281 */ 2282 static int qp_save_headers(struct qp_broker_entry *entry) 2283 { 2284 int result; 2285 2286 if (entry->produce_q->saved_header != NULL && 2287 entry->consume_q->saved_header != NULL) { 2288 /* 2289 * If the headers have already been saved, we don't need to do 2290 * it again, and we don't want to map in the headers 2291 * unnecessarily. 2292 */ 2293 2294 return VMCI_SUCCESS; 2295 } 2296 2297 if (NULL == entry->produce_q->q_header || 2298 NULL == entry->consume_q->q_header) { 2299 result = qp_host_map_queues(entry->produce_q, entry->consume_q); 2300 if (result < VMCI_SUCCESS) 2301 return result; 2302 } 2303 2304 memcpy(&entry->saved_produce_q, entry->produce_q->q_header, 2305 sizeof(entry->saved_produce_q)); 2306 entry->produce_q->saved_header = &entry->saved_produce_q; 2307 memcpy(&entry->saved_consume_q, entry->consume_q->q_header, 2308 sizeof(entry->saved_consume_q)); 2309 entry->consume_q->saved_header = &entry->saved_consume_q; 2310 2311 return VMCI_SUCCESS; 2312 } 2313 2314 /* 2315 * Removes all references to the guest memory of a given queue pair, and 2316 * will move the queue pair from state *_MEM to *_NO_MEM. It is usually 2317 * called when a VM is being quiesced where access to guest memory should 2318 * avoided. 2319 */ 2320 int vmci_qp_broker_unmap(struct vmci_handle handle, 2321 struct vmci_ctx *context, 2322 u32 gid) 2323 { 2324 struct qp_broker_entry *entry; 2325 const u32 context_id = vmci_ctx_get_id(context); 2326 bool is_local = false; 2327 int result; 2328 2329 if (vmci_handle_is_invalid(handle) || !context || 2330 context_id == VMCI_INVALID_ID) 2331 return VMCI_ERROR_INVALID_ARGS; 2332 2333 mutex_lock(&qp_broker_list.mutex); 2334 2335 if (!vmci_ctx_qp_exists(context, handle)) { 2336 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2337 context_id, handle.context, handle.resource); 2338 result = VMCI_ERROR_NOT_FOUND; 2339 goto out; 2340 } 2341 2342 entry = qp_broker_handle_to_entry(handle); 2343 if (!entry) { 2344 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", 2345 context_id, handle.context, handle.resource); 2346 result = VMCI_ERROR_NOT_FOUND; 2347 goto out; 2348 } 2349 2350 if (context_id != entry->create_id && context_id != entry->attach_id) { 2351 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2352 goto out; 2353 } 2354 2355 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2356 2357 if (context_id != VMCI_HOST_CONTEXT_ID) { 2358 qp_acquire_queue_mutex(entry->produce_q); 2359 result = qp_save_headers(entry); 2360 if (result < VMCI_SUCCESS) 2361 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", 2362 handle.context, handle.resource, result); 2363 2364 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); 2365 2366 /* 2367 * On hosted, when we unmap queue pairs, the VMX will also 2368 * unmap the guest memory, so we invalidate the previously 2369 * registered memory. If the queue pair is mapped again at a 2370 * later point in time, we will need to reregister the user 2371 * memory with a possibly new user VA. 2372 */ 2373 qp_host_unregister_user_memory(entry->produce_q, 2374 entry->consume_q); 2375 2376 /* 2377 * Move state from *_MEM to *_NO_MEM. 2378 */ 2379 entry->state--; 2380 2381 qp_release_queue_mutex(entry->produce_q); 2382 } 2383 2384 result = VMCI_SUCCESS; 2385 2386 out: 2387 mutex_unlock(&qp_broker_list.mutex); 2388 return result; 2389 } 2390 2391 /* 2392 * Destroys all guest queue pair endpoints. If active guest queue 2393 * pairs still exist, hypercalls to attempt detach from these 2394 * queue pairs will be made. Any failure to detach is silently 2395 * ignored. 2396 */ 2397 void vmci_qp_guest_endpoints_exit(void) 2398 { 2399 struct qp_entry *entry; 2400 struct qp_guest_endpoint *ep; 2401 2402 mutex_lock(&qp_guest_endpoints.mutex); 2403 2404 while ((entry = qp_list_get_head(&qp_guest_endpoints))) { 2405 ep = (struct qp_guest_endpoint *)entry; 2406 2407 /* Don't make a hypercall for local queue_pairs. */ 2408 if (!(entry->flags & VMCI_QPFLAG_LOCAL)) 2409 qp_detatch_hypercall(entry->handle); 2410 2411 /* We cannot fail the exit, so let's reset ref_count. */ 2412 entry->ref_count = 0; 2413 qp_list_remove_entry(&qp_guest_endpoints, entry); 2414 2415 qp_guest_endpoint_destroy(ep); 2416 } 2417 2418 mutex_unlock(&qp_guest_endpoints.mutex); 2419 } 2420 2421 /* 2422 * Helper routine that will lock the queue pair before subsequent 2423 * operations. 2424 * Note: Non-blocking on the host side is currently only implemented in ESX. 2425 * Since non-blocking isn't yet implemented on the host personality we 2426 * have no reason to acquire a spin lock. So to avoid the use of an 2427 * unnecessary lock only acquire the mutex if we can block. 2428 */ 2429 static void qp_lock(const struct vmci_qp *qpair) 2430 { 2431 qp_acquire_queue_mutex(qpair->produce_q); 2432 } 2433 2434 /* 2435 * Helper routine that unlocks the queue pair after calling 2436 * qp_lock. 2437 */ 2438 static void qp_unlock(const struct vmci_qp *qpair) 2439 { 2440 qp_release_queue_mutex(qpair->produce_q); 2441 } 2442 2443 /* 2444 * The queue headers may not be mapped at all times. If a queue is 2445 * currently not mapped, it will be attempted to do so. 2446 */ 2447 static int qp_map_queue_headers(struct vmci_queue *produce_q, 2448 struct vmci_queue *consume_q) 2449 { 2450 int result; 2451 2452 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { 2453 result = qp_host_map_queues(produce_q, consume_q); 2454 if (result < VMCI_SUCCESS) 2455 return (produce_q->saved_header && 2456 consume_q->saved_header) ? 2457 VMCI_ERROR_QUEUEPAIR_NOT_READY : 2458 VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2459 } 2460 2461 return VMCI_SUCCESS; 2462 } 2463 2464 /* 2465 * Helper routine that will retrieve the produce and consume 2466 * headers of a given queue pair. If the guest memory of the 2467 * queue pair is currently not available, the saved queue headers 2468 * will be returned, if these are available. 2469 */ 2470 static int qp_get_queue_headers(const struct vmci_qp *qpair, 2471 struct vmci_queue_header **produce_q_header, 2472 struct vmci_queue_header **consume_q_header) 2473 { 2474 int result; 2475 2476 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); 2477 if (result == VMCI_SUCCESS) { 2478 *produce_q_header = qpair->produce_q->q_header; 2479 *consume_q_header = qpair->consume_q->q_header; 2480 } else if (qpair->produce_q->saved_header && 2481 qpair->consume_q->saved_header) { 2482 *produce_q_header = qpair->produce_q->saved_header; 2483 *consume_q_header = qpair->consume_q->saved_header; 2484 result = VMCI_SUCCESS; 2485 } 2486 2487 return result; 2488 } 2489 2490 /* 2491 * Callback from VMCI queue pair broker indicating that a queue 2492 * pair that was previously not ready, now either is ready or 2493 * gone forever. 2494 */ 2495 static int qp_wakeup_cb(void *client_data) 2496 { 2497 struct vmci_qp *qpair = (struct vmci_qp *)client_data; 2498 2499 qp_lock(qpair); 2500 while (qpair->blocked > 0) { 2501 qpair->blocked--; 2502 qpair->generation++; 2503 wake_up(&qpair->event); 2504 } 2505 qp_unlock(qpair); 2506 2507 return VMCI_SUCCESS; 2508 } 2509 2510 /* 2511 * Makes the calling thread wait for the queue pair to become 2512 * ready for host side access. Returns true when thread is 2513 * woken up after queue pair state change, false otherwise. 2514 */ 2515 static bool qp_wait_for_ready_queue(struct vmci_qp *qpair) 2516 { 2517 unsigned int generation; 2518 2519 qpair->blocked++; 2520 generation = qpair->generation; 2521 qp_unlock(qpair); 2522 wait_event(qpair->event, generation != qpair->generation); 2523 qp_lock(qpair); 2524 2525 return true; 2526 } 2527 2528 /* 2529 * Enqueues a given buffer to the produce queue using the provided 2530 * function. As many bytes as possible (space available in the queue) 2531 * are enqueued. Assumes the queue->mutex has been acquired. Returns 2532 * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue 2533 * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the 2534 * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if 2535 * an error occured when accessing the buffer, 2536 * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't 2537 * available. Otherwise, the number of bytes written to the queue is 2538 * returned. Updates the tail pointer of the produce queue. 2539 */ 2540 static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, 2541 struct vmci_queue *consume_q, 2542 const u64 produce_q_size, 2543 struct iov_iter *from) 2544 { 2545 s64 free_space; 2546 u64 tail; 2547 size_t buf_size = iov_iter_count(from); 2548 size_t written; 2549 ssize_t result; 2550 2551 result = qp_map_queue_headers(produce_q, consume_q); 2552 if (unlikely(result != VMCI_SUCCESS)) 2553 return result; 2554 2555 free_space = vmci_q_header_free_space(produce_q->q_header, 2556 consume_q->q_header, 2557 produce_q_size); 2558 if (free_space == 0) 2559 return VMCI_ERROR_QUEUEPAIR_NOSPACE; 2560 2561 if (free_space < VMCI_SUCCESS) 2562 return (ssize_t) free_space; 2563 2564 written = (size_t) (free_space > buf_size ? buf_size : free_space); 2565 tail = vmci_q_header_producer_tail(produce_q->q_header); 2566 if (likely(tail + written < produce_q_size)) { 2567 result = qp_memcpy_to_queue_iter(produce_q, tail, from, written); 2568 } else { 2569 /* Tail pointer wraps around. */ 2570 2571 const size_t tmp = (size_t) (produce_q_size - tail); 2572 2573 result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp); 2574 if (result >= VMCI_SUCCESS) 2575 result = qp_memcpy_to_queue_iter(produce_q, 0, from, 2576 written - tmp); 2577 } 2578 2579 if (result < VMCI_SUCCESS) 2580 return result; 2581 2582 vmci_q_header_add_producer_tail(produce_q->q_header, written, 2583 produce_q_size); 2584 return written; 2585 } 2586 2587 /* 2588 * Dequeues data (if available) from the given consume queue. Writes data 2589 * to the user provided buffer using the provided function. 2590 * Assumes the queue->mutex has been acquired. 2591 * Results: 2592 * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue. 2593 * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue 2594 * (as defined by the queue size). 2595 * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer. 2596 * Otherwise the number of bytes dequeued is returned. 2597 * Side effects: 2598 * Updates the head pointer of the consume queue. 2599 */ 2600 static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, 2601 struct vmci_queue *consume_q, 2602 const u64 consume_q_size, 2603 struct iov_iter *to, 2604 bool update_consumer) 2605 { 2606 size_t buf_size = iov_iter_count(to); 2607 s64 buf_ready; 2608 u64 head; 2609 size_t read; 2610 ssize_t result; 2611 2612 result = qp_map_queue_headers(produce_q, consume_q); 2613 if (unlikely(result != VMCI_SUCCESS)) 2614 return result; 2615 2616 buf_ready = vmci_q_header_buf_ready(consume_q->q_header, 2617 produce_q->q_header, 2618 consume_q_size); 2619 if (buf_ready == 0) 2620 return VMCI_ERROR_QUEUEPAIR_NODATA; 2621 2622 if (buf_ready < VMCI_SUCCESS) 2623 return (ssize_t) buf_ready; 2624 2625 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready); 2626 head = vmci_q_header_consumer_head(produce_q->q_header); 2627 if (likely(head + read < consume_q_size)) { 2628 result = qp_memcpy_from_queue_iter(to, consume_q, head, read); 2629 } else { 2630 /* Head pointer wraps around. */ 2631 2632 const size_t tmp = (size_t) (consume_q_size - head); 2633 2634 result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp); 2635 if (result >= VMCI_SUCCESS) 2636 result = qp_memcpy_from_queue_iter(to, consume_q, 0, 2637 read - tmp); 2638 2639 } 2640 2641 if (result < VMCI_SUCCESS) 2642 return result; 2643 2644 if (update_consumer) 2645 vmci_q_header_add_consumer_head(produce_q->q_header, 2646 read, consume_q_size); 2647 2648 return read; 2649 } 2650 2651 /* 2652 * vmci_qpair_alloc() - Allocates a queue pair. 2653 * @qpair: Pointer for the new vmci_qp struct. 2654 * @handle: Handle to track the resource. 2655 * @produce_qsize: Desired size of the producer queue. 2656 * @consume_qsize: Desired size of the consumer queue. 2657 * @peer: ContextID of the peer. 2658 * @flags: VMCI flags. 2659 * @priv_flags: VMCI priviledge flags. 2660 * 2661 * This is the client interface for allocating the memory for a 2662 * vmci_qp structure and then attaching to the underlying 2663 * queue. If an error occurs allocating the memory for the 2664 * vmci_qp structure no attempt is made to attach. If an 2665 * error occurs attaching, then the structure is freed. 2666 */ 2667 int vmci_qpair_alloc(struct vmci_qp **qpair, 2668 struct vmci_handle *handle, 2669 u64 produce_qsize, 2670 u64 consume_qsize, 2671 u32 peer, 2672 u32 flags, 2673 u32 priv_flags) 2674 { 2675 struct vmci_qp *my_qpair; 2676 int retval; 2677 struct vmci_handle src = VMCI_INVALID_HANDLE; 2678 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID); 2679 enum vmci_route route; 2680 vmci_event_release_cb wakeup_cb; 2681 void *client_data; 2682 2683 /* 2684 * Restrict the size of a queuepair. The device already 2685 * enforces a limit on the total amount of memory that can be 2686 * allocated to queuepairs for a guest. However, we try to 2687 * allocate this memory before we make the queuepair 2688 * allocation hypercall. On Linux, we allocate each page 2689 * separately, which means rather than fail, the guest will 2690 * thrash while it tries to allocate, and will become 2691 * increasingly unresponsive to the point where it appears to 2692 * be hung. So we place a limit on the size of an individual 2693 * queuepair here, and leave the device to enforce the 2694 * restriction on total queuepair memory. (Note that this 2695 * doesn't prevent all cases; a user with only this much 2696 * physical memory could still get into trouble.) The error 2697 * used by the device is NO_RESOURCES, so use that here too. 2698 */ 2699 2700 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) || 2701 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY) 2702 return VMCI_ERROR_NO_RESOURCES; 2703 2704 retval = vmci_route(&src, &dst, false, &route); 2705 if (retval < VMCI_SUCCESS) 2706 route = vmci_guest_code_active() ? 2707 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST; 2708 2709 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) { 2710 pr_devel("NONBLOCK OR PINNED set"); 2711 return VMCI_ERROR_INVALID_ARGS; 2712 } 2713 2714 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL); 2715 if (!my_qpair) 2716 return VMCI_ERROR_NO_MEM; 2717 2718 my_qpair->produce_q_size = produce_qsize; 2719 my_qpair->consume_q_size = consume_qsize; 2720 my_qpair->peer = peer; 2721 my_qpair->flags = flags; 2722 my_qpair->priv_flags = priv_flags; 2723 2724 wakeup_cb = NULL; 2725 client_data = NULL; 2726 2727 if (VMCI_ROUTE_AS_HOST == route) { 2728 my_qpair->guest_endpoint = false; 2729 if (!(flags & VMCI_QPFLAG_LOCAL)) { 2730 my_qpair->blocked = 0; 2731 my_qpair->generation = 0; 2732 init_waitqueue_head(&my_qpair->event); 2733 wakeup_cb = qp_wakeup_cb; 2734 client_data = (void *)my_qpair; 2735 } 2736 } else { 2737 my_qpair->guest_endpoint = true; 2738 } 2739 2740 retval = vmci_qp_alloc(handle, 2741 &my_qpair->produce_q, 2742 my_qpair->produce_q_size, 2743 &my_qpair->consume_q, 2744 my_qpair->consume_q_size, 2745 my_qpair->peer, 2746 my_qpair->flags, 2747 my_qpair->priv_flags, 2748 my_qpair->guest_endpoint, 2749 wakeup_cb, client_data); 2750 2751 if (retval < VMCI_SUCCESS) { 2752 kfree(my_qpair); 2753 return retval; 2754 } 2755 2756 *qpair = my_qpair; 2757 my_qpair->handle = *handle; 2758 2759 return retval; 2760 } 2761 EXPORT_SYMBOL_GPL(vmci_qpair_alloc); 2762 2763 /* 2764 * vmci_qpair_detach() - Detatches the client from a queue pair. 2765 * @qpair: Reference of a pointer to the qpair struct. 2766 * 2767 * This is the client interface for detaching from a VMCIQPair. 2768 * Note that this routine will free the memory allocated for the 2769 * vmci_qp structure too. 2770 */ 2771 int vmci_qpair_detach(struct vmci_qp **qpair) 2772 { 2773 int result; 2774 struct vmci_qp *old_qpair; 2775 2776 if (!qpair || !(*qpair)) 2777 return VMCI_ERROR_INVALID_ARGS; 2778 2779 old_qpair = *qpair; 2780 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint); 2781 2782 /* 2783 * The guest can fail to detach for a number of reasons, and 2784 * if it does so, it will cleanup the entry (if there is one). 2785 * The host can fail too, but it won't cleanup the entry 2786 * immediately, it will do that later when the context is 2787 * freed. Either way, we need to release the qpair struct 2788 * here; there isn't much the caller can do, and we don't want 2789 * to leak. 2790 */ 2791 2792 memset(old_qpair, 0, sizeof(*old_qpair)); 2793 old_qpair->handle = VMCI_INVALID_HANDLE; 2794 old_qpair->peer = VMCI_INVALID_ID; 2795 kfree(old_qpair); 2796 *qpair = NULL; 2797 2798 return result; 2799 } 2800 EXPORT_SYMBOL_GPL(vmci_qpair_detach); 2801 2802 /* 2803 * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer. 2804 * @qpair: Pointer to the queue pair struct. 2805 * @producer_tail: Reference used for storing producer tail index. 2806 * @consumer_head: Reference used for storing the consumer head index. 2807 * 2808 * This is the client interface for getting the current indexes of the 2809 * QPair from the point of the view of the caller as the producer. 2810 */ 2811 int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair, 2812 u64 *producer_tail, 2813 u64 *consumer_head) 2814 { 2815 struct vmci_queue_header *produce_q_header; 2816 struct vmci_queue_header *consume_q_header; 2817 int result; 2818 2819 if (!qpair) 2820 return VMCI_ERROR_INVALID_ARGS; 2821 2822 qp_lock(qpair); 2823 result = 2824 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2825 if (result == VMCI_SUCCESS) 2826 vmci_q_header_get_pointers(produce_q_header, consume_q_header, 2827 producer_tail, consumer_head); 2828 qp_unlock(qpair); 2829 2830 if (result == VMCI_SUCCESS && 2831 ((producer_tail && *producer_tail >= qpair->produce_q_size) || 2832 (consumer_head && *consumer_head >= qpair->produce_q_size))) 2833 return VMCI_ERROR_INVALID_SIZE; 2834 2835 return result; 2836 } 2837 EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes); 2838 2839 /* 2840 * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the consumer. 2841 * @qpair: Pointer to the queue pair struct. 2842 * @consumer_tail: Reference used for storing consumer tail index. 2843 * @producer_head: Reference used for storing the producer head index. 2844 * 2845 * This is the client interface for getting the current indexes of the 2846 * QPair from the point of the view of the caller as the consumer. 2847 */ 2848 int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair, 2849 u64 *consumer_tail, 2850 u64 *producer_head) 2851 { 2852 struct vmci_queue_header *produce_q_header; 2853 struct vmci_queue_header *consume_q_header; 2854 int result; 2855 2856 if (!qpair) 2857 return VMCI_ERROR_INVALID_ARGS; 2858 2859 qp_lock(qpair); 2860 result = 2861 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2862 if (result == VMCI_SUCCESS) 2863 vmci_q_header_get_pointers(consume_q_header, produce_q_header, 2864 consumer_tail, producer_head); 2865 qp_unlock(qpair); 2866 2867 if (result == VMCI_SUCCESS && 2868 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) || 2869 (producer_head && *producer_head >= qpair->consume_q_size))) 2870 return VMCI_ERROR_INVALID_SIZE; 2871 2872 return result; 2873 } 2874 EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes); 2875 2876 /* 2877 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue. 2878 * @qpair: Pointer to the queue pair struct. 2879 * 2880 * This is the client interface for getting the amount of free 2881 * space in the QPair from the point of the view of the caller as 2882 * the producer which is the common case. Returns < 0 if err, else 2883 * available bytes into which data can be enqueued if > 0. 2884 */ 2885 s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair) 2886 { 2887 struct vmci_queue_header *produce_q_header; 2888 struct vmci_queue_header *consume_q_header; 2889 s64 result; 2890 2891 if (!qpair) 2892 return VMCI_ERROR_INVALID_ARGS; 2893 2894 qp_lock(qpair); 2895 result = 2896 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2897 if (result == VMCI_SUCCESS) 2898 result = vmci_q_header_free_space(produce_q_header, 2899 consume_q_header, 2900 qpair->produce_q_size); 2901 else 2902 result = 0; 2903 2904 qp_unlock(qpair); 2905 2906 return result; 2907 } 2908 EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space); 2909 2910 /* 2911 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue. 2912 * @qpair: Pointer to the queue pair struct. 2913 * 2914 * This is the client interface for getting the amount of free 2915 * space in the QPair from the point of the view of the caller as 2916 * the consumer which is not the common case. Returns < 0 if err, else 2917 * available bytes into which data can be enqueued if > 0. 2918 */ 2919 s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair) 2920 { 2921 struct vmci_queue_header *produce_q_header; 2922 struct vmci_queue_header *consume_q_header; 2923 s64 result; 2924 2925 if (!qpair) 2926 return VMCI_ERROR_INVALID_ARGS; 2927 2928 qp_lock(qpair); 2929 result = 2930 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2931 if (result == VMCI_SUCCESS) 2932 result = vmci_q_header_free_space(consume_q_header, 2933 produce_q_header, 2934 qpair->consume_q_size); 2935 else 2936 result = 0; 2937 2938 qp_unlock(qpair); 2939 2940 return result; 2941 } 2942 EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space); 2943 2944 /* 2945 * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from 2946 * producer queue. 2947 * @qpair: Pointer to the queue pair struct. 2948 * 2949 * This is the client interface for getting the amount of 2950 * enqueued data in the QPair from the point of the view of the 2951 * caller as the producer which is not the common case. Returns < 0 if err, 2952 * else available bytes that may be read. 2953 */ 2954 s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair) 2955 { 2956 struct vmci_queue_header *produce_q_header; 2957 struct vmci_queue_header *consume_q_header; 2958 s64 result; 2959 2960 if (!qpair) 2961 return VMCI_ERROR_INVALID_ARGS; 2962 2963 qp_lock(qpair); 2964 result = 2965 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2966 if (result == VMCI_SUCCESS) 2967 result = vmci_q_header_buf_ready(produce_q_header, 2968 consume_q_header, 2969 qpair->produce_q_size); 2970 else 2971 result = 0; 2972 2973 qp_unlock(qpair); 2974 2975 return result; 2976 } 2977 EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready); 2978 2979 /* 2980 * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from 2981 * consumer queue. 2982 * @qpair: Pointer to the queue pair struct. 2983 * 2984 * This is the client interface for getting the amount of 2985 * enqueued data in the QPair from the point of the view of the 2986 * caller as the consumer which is the normal case. Returns < 0 if err, 2987 * else available bytes that may be read. 2988 */ 2989 s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair) 2990 { 2991 struct vmci_queue_header *produce_q_header; 2992 struct vmci_queue_header *consume_q_header; 2993 s64 result; 2994 2995 if (!qpair) 2996 return VMCI_ERROR_INVALID_ARGS; 2997 2998 qp_lock(qpair); 2999 result = 3000 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 3001 if (result == VMCI_SUCCESS) 3002 result = vmci_q_header_buf_ready(consume_q_header, 3003 produce_q_header, 3004 qpair->consume_q_size); 3005 else 3006 result = 0; 3007 3008 qp_unlock(qpair); 3009 3010 return result; 3011 } 3012 EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready); 3013 3014 /* 3015 * vmci_qpair_enqueue() - Throw data on the queue. 3016 * @qpair: Pointer to the queue pair struct. 3017 * @buf: Pointer to buffer containing data 3018 * @buf_size: Length of buffer. 3019 * @buf_type: Buffer type (Unused). 3020 * 3021 * This is the client interface for enqueueing data into the queue. 3022 * Returns number of bytes enqueued or < 0 on error. 3023 */ 3024 ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, 3025 const void *buf, 3026 size_t buf_size, 3027 int buf_type) 3028 { 3029 ssize_t result; 3030 struct iov_iter from; 3031 struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size}; 3032 3033 if (!qpair || !buf) 3034 return VMCI_ERROR_INVALID_ARGS; 3035 3036 iov_iter_kvec(&from, WRITE | ITER_KVEC, &v, 1, buf_size); 3037 3038 qp_lock(qpair); 3039 3040 do { 3041 result = qp_enqueue_locked(qpair->produce_q, 3042 qpair->consume_q, 3043 qpair->produce_q_size, 3044 &from); 3045 3046 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3047 !qp_wait_for_ready_queue(qpair)) 3048 result = VMCI_ERROR_WOULD_BLOCK; 3049 3050 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3051 3052 qp_unlock(qpair); 3053 3054 return result; 3055 } 3056 EXPORT_SYMBOL_GPL(vmci_qpair_enqueue); 3057 3058 /* 3059 * vmci_qpair_dequeue() - Get data from the queue. 3060 * @qpair: Pointer to the queue pair struct. 3061 * @buf: Pointer to buffer for the data 3062 * @buf_size: Length of buffer. 3063 * @buf_type: Buffer type (Unused). 3064 * 3065 * This is the client interface for dequeueing data from the queue. 3066 * Returns number of bytes dequeued or < 0 on error. 3067 */ 3068 ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, 3069 void *buf, 3070 size_t buf_size, 3071 int buf_type) 3072 { 3073 ssize_t result; 3074 struct iov_iter to; 3075 struct kvec v = {.iov_base = buf, .iov_len = buf_size}; 3076 3077 if (!qpair || !buf) 3078 return VMCI_ERROR_INVALID_ARGS; 3079 3080 iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size); 3081 3082 qp_lock(qpair); 3083 3084 do { 3085 result = qp_dequeue_locked(qpair->produce_q, 3086 qpair->consume_q, 3087 qpair->consume_q_size, 3088 &to, true); 3089 3090 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3091 !qp_wait_for_ready_queue(qpair)) 3092 result = VMCI_ERROR_WOULD_BLOCK; 3093 3094 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3095 3096 qp_unlock(qpair); 3097 3098 return result; 3099 } 3100 EXPORT_SYMBOL_GPL(vmci_qpair_dequeue); 3101 3102 /* 3103 * vmci_qpair_peek() - Peek at the data in the queue. 3104 * @qpair: Pointer to the queue pair struct. 3105 * @buf: Pointer to buffer for the data 3106 * @buf_size: Length of buffer. 3107 * @buf_type: Buffer type (Unused on Linux). 3108 * 3109 * This is the client interface for peeking into a queue. (I.e., 3110 * copy data from the queue without updating the head pointer.) 3111 * Returns number of bytes dequeued or < 0 on error. 3112 */ 3113 ssize_t vmci_qpair_peek(struct vmci_qp *qpair, 3114 void *buf, 3115 size_t buf_size, 3116 int buf_type) 3117 { 3118 struct iov_iter to; 3119 struct kvec v = {.iov_base = buf, .iov_len = buf_size}; 3120 ssize_t result; 3121 3122 if (!qpair || !buf) 3123 return VMCI_ERROR_INVALID_ARGS; 3124 3125 iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size); 3126 3127 qp_lock(qpair); 3128 3129 do { 3130 result = qp_dequeue_locked(qpair->produce_q, 3131 qpair->consume_q, 3132 qpair->consume_q_size, 3133 &to, false); 3134 3135 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3136 !qp_wait_for_ready_queue(qpair)) 3137 result = VMCI_ERROR_WOULD_BLOCK; 3138 3139 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3140 3141 qp_unlock(qpair); 3142 3143 return result; 3144 } 3145 EXPORT_SYMBOL_GPL(vmci_qpair_peek); 3146 3147 /* 3148 * vmci_qpair_enquev() - Throw data on the queue using iov. 3149 * @qpair: Pointer to the queue pair struct. 3150 * @iov: Pointer to buffer containing data 3151 * @iov_size: Length of buffer. 3152 * @buf_type: Buffer type (Unused). 3153 * 3154 * This is the client interface for enqueueing data into the queue. 3155 * This function uses IO vectors to handle the work. Returns number 3156 * of bytes enqueued or < 0 on error. 3157 */ 3158 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, 3159 struct msghdr *msg, 3160 size_t iov_size, 3161 int buf_type) 3162 { 3163 ssize_t result; 3164 3165 if (!qpair) 3166 return VMCI_ERROR_INVALID_ARGS; 3167 3168 qp_lock(qpair); 3169 3170 do { 3171 result = qp_enqueue_locked(qpair->produce_q, 3172 qpair->consume_q, 3173 qpair->produce_q_size, 3174 &msg->msg_iter); 3175 3176 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3177 !qp_wait_for_ready_queue(qpair)) 3178 result = VMCI_ERROR_WOULD_BLOCK; 3179 3180 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3181 3182 qp_unlock(qpair); 3183 3184 return result; 3185 } 3186 EXPORT_SYMBOL_GPL(vmci_qpair_enquev); 3187 3188 /* 3189 * vmci_qpair_dequev() - Get data from the queue using iov. 3190 * @qpair: Pointer to the queue pair struct. 3191 * @iov: Pointer to buffer for the data 3192 * @iov_size: Length of buffer. 3193 * @buf_type: Buffer type (Unused). 3194 * 3195 * This is the client interface for dequeueing data from the queue. 3196 * This function uses IO vectors to handle the work. Returns number 3197 * of bytes dequeued or < 0 on error. 3198 */ 3199 ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, 3200 struct msghdr *msg, 3201 size_t iov_size, 3202 int buf_type) 3203 { 3204 ssize_t result; 3205 3206 if (!qpair) 3207 return VMCI_ERROR_INVALID_ARGS; 3208 3209 qp_lock(qpair); 3210 3211 do { 3212 result = qp_dequeue_locked(qpair->produce_q, 3213 qpair->consume_q, 3214 qpair->consume_q_size, 3215 &msg->msg_iter, true); 3216 3217 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3218 !qp_wait_for_ready_queue(qpair)) 3219 result = VMCI_ERROR_WOULD_BLOCK; 3220 3221 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3222 3223 qp_unlock(qpair); 3224 3225 return result; 3226 } 3227 EXPORT_SYMBOL_GPL(vmci_qpair_dequev); 3228 3229 /* 3230 * vmci_qpair_peekv() - Peek at the data in the queue using iov. 3231 * @qpair: Pointer to the queue pair struct. 3232 * @iov: Pointer to buffer for the data 3233 * @iov_size: Length of buffer. 3234 * @buf_type: Buffer type (Unused on Linux). 3235 * 3236 * This is the client interface for peeking into a queue. (I.e., 3237 * copy data from the queue without updating the head pointer.) 3238 * This function uses IO vectors to handle the work. Returns number 3239 * of bytes peeked or < 0 on error. 3240 */ 3241 ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, 3242 struct msghdr *msg, 3243 size_t iov_size, 3244 int buf_type) 3245 { 3246 ssize_t result; 3247 3248 if (!qpair) 3249 return VMCI_ERROR_INVALID_ARGS; 3250 3251 qp_lock(qpair); 3252 3253 do { 3254 result = qp_dequeue_locked(qpair->produce_q, 3255 qpair->consume_q, 3256 qpair->consume_q_size, 3257 &msg->msg_iter, false); 3258 3259 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3260 !qp_wait_for_ready_queue(qpair)) 3261 result = VMCI_ERROR_WOULD_BLOCK; 3262 3263 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3264 3265 qp_unlock(qpair); 3266 return result; 3267 } 3268 EXPORT_SYMBOL_GPL(vmci_qpair_peekv); 3269