1 /* 2 * VMware VMCI Driver 3 * 4 * Copyright (C) 2012 VMware, Inc. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation version 2 and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * for more details. 14 */ 15 16 #include <linux/vmw_vmci_defs.h> 17 #include <linux/vmw_vmci_api.h> 18 #include <linux/highmem.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/pagemap.h> 24 #include <linux/pci.h> 25 #include <linux/sched.h> 26 #include <linux/slab.h> 27 #include <linux/uio.h> 28 #include <linux/wait.h> 29 #include <linux/vmalloc.h> 30 #include <linux/skbuff.h> 31 32 #include "vmci_handle_array.h" 33 #include "vmci_queue_pair.h" 34 #include "vmci_datagram.h" 35 #include "vmci_resource.h" 36 #include "vmci_context.h" 37 #include "vmci_driver.h" 38 #include "vmci_event.h" 39 #include "vmci_route.h" 40 41 /* 42 * In the following, we will distinguish between two kinds of VMX processes - 43 * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized 44 * VMCI page files in the VMX and supporting VM to VM communication and the 45 * newer ones that use the guest memory directly. We will in the following 46 * refer to the older VMX versions as old-style VMX'en, and the newer ones as 47 * new-style VMX'en. 48 * 49 * The state transition datagram is as follows (the VMCIQPB_ prefix has been 50 * removed for readability) - see below for more details on the transtions: 51 * 52 * -------------- NEW ------------- 53 * | | 54 * \_/ \_/ 55 * CREATED_NO_MEM <-----------------> CREATED_MEM 56 * | | | 57 * | o-----------------------o | 58 * | | | 59 * \_/ \_/ \_/ 60 * ATTACHED_NO_MEM <----------------> ATTACHED_MEM 61 * | | | 62 * | o----------------------o | 63 * | | | 64 * \_/ \_/ \_/ 65 * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM 66 * | | 67 * | | 68 * -------------> gone <------------- 69 * 70 * In more detail. When a VMCI queue pair is first created, it will be in the 71 * VMCIQPB_NEW state. It will then move into one of the following states: 72 * 73 * - VMCIQPB_CREATED_NO_MEM: this state indicates that either: 74 * 75 * - the created was performed by a host endpoint, in which case there is 76 * no backing memory yet. 77 * 78 * - the create was initiated by an old-style VMX, that uses 79 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at 80 * a later point in time. This state can be distinguished from the one 81 * above by the context ID of the creator. A host side is not allowed to 82 * attach until the page store has been set. 83 * 84 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair 85 * is created by a VMX using the queue pair device backend that 86 * sets the UVAs of the queue pair immediately and stores the 87 * information for later attachers. At this point, it is ready for 88 * the host side to attach to it. 89 * 90 * Once the queue pair is in one of the created states (with the exception of 91 * the case mentioned for older VMX'en above), it is possible to attach to the 92 * queue pair. Again we have two new states possible: 93 * 94 * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following 95 * paths: 96 * 97 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue 98 * pair, and attaches to a queue pair previously created by the host side. 99 * 100 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair 101 * already created by a guest. 102 * 103 * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls 104 * vmci_qp_broker_set_page_store (see below). 105 * 106 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the 107 * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will 108 * bring the queue pair into this state. Once vmci_qp_broker_set_page_store 109 * is called to register the user memory, the VMCIQPB_ATTACH_MEM state 110 * will be entered. 111 * 112 * From the attached queue pair, the queue pair can enter the shutdown states 113 * when either side of the queue pair detaches. If the guest side detaches 114 * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where 115 * the content of the queue pair will no longer be available. If the host 116 * side detaches first, the queue pair will either enter the 117 * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or 118 * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped 119 * (e.g., the host detaches while a guest is stunned). 120 * 121 * New-style VMX'en will also unmap guest memory, if the guest is 122 * quiesced, e.g., during a snapshot operation. In that case, the guest 123 * memory will no longer be available, and the queue pair will transition from 124 * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more, 125 * in which case the queue pair will transition from the *_NO_MEM state at that 126 * point back to the *_MEM state. Note that the *_NO_MEM state may have changed, 127 * since the peer may have either attached or detached in the meantime. The 128 * values are laid out such that ++ on a state will move from a *_NO_MEM to a 129 * *_MEM state, and vice versa. 130 */ 131 132 /* The Kernel specific component of the struct vmci_queue structure. */ 133 struct vmci_queue_kern_if { 134 struct mutex __mutex; /* Protects the queue. */ 135 struct mutex *mutex; /* Shared by producer and consumer queues. */ 136 size_t num_pages; /* Number of pages incl. header. */ 137 bool host; /* Host or guest? */ 138 union { 139 struct { 140 dma_addr_t *pas; 141 void **vas; 142 } g; /* Used by the guest. */ 143 struct { 144 struct page **page; 145 struct page **header_page; 146 } h; /* Used by the host. */ 147 } u; 148 }; 149 150 /* 151 * This structure is opaque to the clients. 152 */ 153 struct vmci_qp { 154 struct vmci_handle handle; 155 struct vmci_queue *produce_q; 156 struct vmci_queue *consume_q; 157 u64 produce_q_size; 158 u64 consume_q_size; 159 u32 peer; 160 u32 flags; 161 u32 priv_flags; 162 bool guest_endpoint; 163 unsigned int blocked; 164 unsigned int generation; 165 wait_queue_head_t event; 166 }; 167 168 enum qp_broker_state { 169 VMCIQPB_NEW, 170 VMCIQPB_CREATED_NO_MEM, 171 VMCIQPB_CREATED_MEM, 172 VMCIQPB_ATTACHED_NO_MEM, 173 VMCIQPB_ATTACHED_MEM, 174 VMCIQPB_SHUTDOWN_NO_MEM, 175 VMCIQPB_SHUTDOWN_MEM, 176 VMCIQPB_GONE 177 }; 178 179 #define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \ 180 _qpb->state == VMCIQPB_ATTACHED_MEM || \ 181 _qpb->state == VMCIQPB_SHUTDOWN_MEM) 182 183 /* 184 * In the queue pair broker, we always use the guest point of view for 185 * the produce and consume queue values and references, e.g., the 186 * produce queue size stored is the guests produce queue size. The 187 * host endpoint will need to swap these around. The only exception is 188 * the local queue pairs on the host, in which case the host endpoint 189 * that creates the queue pair will have the right orientation, and 190 * the attaching host endpoint will need to swap. 191 */ 192 struct qp_entry { 193 struct list_head list_item; 194 struct vmci_handle handle; 195 u32 peer; 196 u32 flags; 197 u64 produce_size; 198 u64 consume_size; 199 u32 ref_count; 200 }; 201 202 struct qp_broker_entry { 203 struct vmci_resource resource; 204 struct qp_entry qp; 205 u32 create_id; 206 u32 attach_id; 207 enum qp_broker_state state; 208 bool require_trusted_attach; 209 bool created_by_trusted; 210 bool vmci_page_files; /* Created by VMX using VMCI page files */ 211 struct vmci_queue *produce_q; 212 struct vmci_queue *consume_q; 213 struct vmci_queue_header saved_produce_q; 214 struct vmci_queue_header saved_consume_q; 215 vmci_event_release_cb wakeup_cb; 216 void *client_data; 217 void *local_mem; /* Kernel memory for local queue pair */ 218 }; 219 220 struct qp_guest_endpoint { 221 struct vmci_resource resource; 222 struct qp_entry qp; 223 u64 num_ppns; 224 void *produce_q; 225 void *consume_q; 226 struct ppn_set ppn_set; 227 }; 228 229 struct qp_list { 230 struct list_head head; 231 struct mutex mutex; /* Protect queue list. */ 232 }; 233 234 static struct qp_list qp_broker_list = { 235 .head = LIST_HEAD_INIT(qp_broker_list.head), 236 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex), 237 }; 238 239 static struct qp_list qp_guest_endpoints = { 240 .head = LIST_HEAD_INIT(qp_guest_endpoints.head), 241 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex), 242 }; 243 244 #define INVALID_VMCI_GUEST_MEM_ID 0 245 #define QPE_NUM_PAGES(_QPE) ((u32) \ 246 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \ 247 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2)) 248 249 250 /* 251 * Frees kernel VA space for a given queue and its queue header, and 252 * frees physical data pages. 253 */ 254 static void qp_free_queue(void *q, u64 size) 255 { 256 struct vmci_queue *queue = q; 257 258 if (queue) { 259 u64 i; 260 261 /* Given size does not include header, so add in a page here. */ 262 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) { 263 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE, 264 queue->kernel_if->u.g.vas[i], 265 queue->kernel_if->u.g.pas[i]); 266 } 267 268 vfree(queue); 269 } 270 } 271 272 /* 273 * Allocates kernel queue pages of specified size with IOMMU mappings, 274 * plus space for the queue structure/kernel interface and the queue 275 * header. 276 */ 277 static void *qp_alloc_queue(u64 size, u32 flags) 278 { 279 u64 i; 280 struct vmci_queue *queue; 281 size_t pas_size; 282 size_t vas_size; 283 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); 284 u64 num_pages; 285 286 if (size > SIZE_MAX - PAGE_SIZE) 287 return NULL; 288 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 289 if (num_pages > 290 (SIZE_MAX - queue_size) / 291 (sizeof(*queue->kernel_if->u.g.pas) + 292 sizeof(*queue->kernel_if->u.g.vas))) 293 return NULL; 294 295 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); 296 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); 297 queue_size += pas_size + vas_size; 298 299 queue = vmalloc(queue_size); 300 if (!queue) 301 return NULL; 302 303 queue->q_header = NULL; 304 queue->saved_header = NULL; 305 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 306 queue->kernel_if->mutex = NULL; 307 queue->kernel_if->num_pages = num_pages; 308 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1); 309 queue->kernel_if->u.g.vas = 310 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size); 311 queue->kernel_if->host = false; 312 313 for (i = 0; i < num_pages; i++) { 314 queue->kernel_if->u.g.vas[i] = 315 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE, 316 &queue->kernel_if->u.g.pas[i], 317 GFP_KERNEL); 318 if (!queue->kernel_if->u.g.vas[i]) { 319 /* Size excl. the header. */ 320 qp_free_queue(queue, i * PAGE_SIZE); 321 return NULL; 322 } 323 } 324 325 /* Queue header is the first page. */ 326 queue->q_header = queue->kernel_if->u.g.vas[0]; 327 328 return queue; 329 } 330 331 /* 332 * Copies from a given buffer or iovector to a VMCI Queue. Uses 333 * kmap()/kunmap() to dynamically map/unmap required portions of the queue 334 * by traversing the offset -> page translation structure for the queue. 335 * Assumes that offset + size does not wrap around in the queue. 336 */ 337 static int qp_memcpy_to_queue_iter(struct vmci_queue *queue, 338 u64 queue_offset, 339 struct iov_iter *from, 340 size_t size) 341 { 342 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 343 size_t bytes_copied = 0; 344 345 while (bytes_copied < size) { 346 const u64 page_index = 347 (queue_offset + bytes_copied) / PAGE_SIZE; 348 const size_t page_offset = 349 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 350 void *va; 351 size_t to_copy; 352 353 if (kernel_if->host) 354 va = kmap(kernel_if->u.h.page[page_index]); 355 else 356 va = kernel_if->u.g.vas[page_index + 1]; 357 /* Skip header. */ 358 359 if (size - bytes_copied > PAGE_SIZE - page_offset) 360 /* Enough payload to fill up from this page. */ 361 to_copy = PAGE_SIZE - page_offset; 362 else 363 to_copy = size - bytes_copied; 364 365 if (!copy_from_iter_full((u8 *)va + page_offset, to_copy, 366 from)) { 367 if (kernel_if->host) 368 kunmap(kernel_if->u.h.page[page_index]); 369 return VMCI_ERROR_INVALID_ARGS; 370 } 371 bytes_copied += to_copy; 372 if (kernel_if->host) 373 kunmap(kernel_if->u.h.page[page_index]); 374 } 375 376 return VMCI_SUCCESS; 377 } 378 379 /* 380 * Copies to a given buffer or iovector from a VMCI Queue. Uses 381 * kmap()/kunmap() to dynamically map/unmap required portions of the queue 382 * by traversing the offset -> page translation structure for the queue. 383 * Assumes that offset + size does not wrap around in the queue. 384 */ 385 static int qp_memcpy_from_queue_iter(struct iov_iter *to, 386 const struct vmci_queue *queue, 387 u64 queue_offset, size_t size) 388 { 389 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 390 size_t bytes_copied = 0; 391 392 while (bytes_copied < size) { 393 const u64 page_index = 394 (queue_offset + bytes_copied) / PAGE_SIZE; 395 const size_t page_offset = 396 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 397 void *va; 398 size_t to_copy; 399 int err; 400 401 if (kernel_if->host) 402 va = kmap(kernel_if->u.h.page[page_index]); 403 else 404 va = kernel_if->u.g.vas[page_index + 1]; 405 /* Skip header. */ 406 407 if (size - bytes_copied > PAGE_SIZE - page_offset) 408 /* Enough payload to fill up this page. */ 409 to_copy = PAGE_SIZE - page_offset; 410 else 411 to_copy = size - bytes_copied; 412 413 err = copy_to_iter((u8 *)va + page_offset, to_copy, to); 414 if (err != to_copy) { 415 if (kernel_if->host) 416 kunmap(kernel_if->u.h.page[page_index]); 417 return VMCI_ERROR_INVALID_ARGS; 418 } 419 bytes_copied += to_copy; 420 if (kernel_if->host) 421 kunmap(kernel_if->u.h.page[page_index]); 422 } 423 424 return VMCI_SUCCESS; 425 } 426 427 /* 428 * Allocates two list of PPNs --- one for the pages in the produce queue, 429 * and the other for the pages in the consume queue. Intializes the list 430 * of PPNs with the page frame numbers of the KVA for the two queues (and 431 * the queue headers). 432 */ 433 static int qp_alloc_ppn_set(void *prod_q, 434 u64 num_produce_pages, 435 void *cons_q, 436 u64 num_consume_pages, struct ppn_set *ppn_set) 437 { 438 u64 *produce_ppns; 439 u64 *consume_ppns; 440 struct vmci_queue *produce_q = prod_q; 441 struct vmci_queue *consume_q = cons_q; 442 u64 i; 443 444 if (!produce_q || !num_produce_pages || !consume_q || 445 !num_consume_pages || !ppn_set) 446 return VMCI_ERROR_INVALID_ARGS; 447 448 if (ppn_set->initialized) 449 return VMCI_ERROR_ALREADY_EXISTS; 450 451 produce_ppns = 452 kmalloc_array(num_produce_pages, sizeof(*produce_ppns), 453 GFP_KERNEL); 454 if (!produce_ppns) 455 return VMCI_ERROR_NO_MEM; 456 457 consume_ppns = 458 kmalloc_array(num_consume_pages, sizeof(*consume_ppns), 459 GFP_KERNEL); 460 if (!consume_ppns) { 461 kfree(produce_ppns); 462 return VMCI_ERROR_NO_MEM; 463 } 464 465 for (i = 0; i < num_produce_pages; i++) 466 produce_ppns[i] = 467 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 468 469 for (i = 0; i < num_consume_pages; i++) 470 consume_ppns[i] = 471 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 472 473 ppn_set->num_produce_pages = num_produce_pages; 474 ppn_set->num_consume_pages = num_consume_pages; 475 ppn_set->produce_ppns = produce_ppns; 476 ppn_set->consume_ppns = consume_ppns; 477 ppn_set->initialized = true; 478 return VMCI_SUCCESS; 479 } 480 481 /* 482 * Frees the two list of PPNs for a queue pair. 483 */ 484 static void qp_free_ppn_set(struct ppn_set *ppn_set) 485 { 486 if (ppn_set->initialized) { 487 /* Do not call these functions on NULL inputs. */ 488 kfree(ppn_set->produce_ppns); 489 kfree(ppn_set->consume_ppns); 490 } 491 memset(ppn_set, 0, sizeof(*ppn_set)); 492 } 493 494 /* 495 * Populates the list of PPNs in the hypercall structure with the PPNS 496 * of the produce queue and the consume queue. 497 */ 498 static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set) 499 { 500 if (vmci_use_ppn64()) { 501 memcpy(call_buf, ppn_set->produce_ppns, 502 ppn_set->num_produce_pages * 503 sizeof(*ppn_set->produce_ppns)); 504 memcpy(call_buf + 505 ppn_set->num_produce_pages * 506 sizeof(*ppn_set->produce_ppns), 507 ppn_set->consume_ppns, 508 ppn_set->num_consume_pages * 509 sizeof(*ppn_set->consume_ppns)); 510 } else { 511 int i; 512 u32 *ppns = (u32 *) call_buf; 513 514 for (i = 0; i < ppn_set->num_produce_pages; i++) 515 ppns[i] = (u32) ppn_set->produce_ppns[i]; 516 517 ppns = &ppns[ppn_set->num_produce_pages]; 518 519 for (i = 0; i < ppn_set->num_consume_pages; i++) 520 ppns[i] = (u32) ppn_set->consume_ppns[i]; 521 } 522 523 return VMCI_SUCCESS; 524 } 525 526 /* 527 * Allocates kernel VA space of specified size plus space for the queue 528 * and kernel interface. This is different from the guest queue allocator, 529 * because we do not allocate our own queue header/data pages here but 530 * share those of the guest. 531 */ 532 static struct vmci_queue *qp_host_alloc_queue(u64 size) 533 { 534 struct vmci_queue *queue; 535 size_t queue_page_size; 536 u64 num_pages; 537 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); 538 539 if (size > SIZE_MAX - PAGE_SIZE) 540 return NULL; 541 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 542 if (num_pages > (SIZE_MAX - queue_size) / 543 sizeof(*queue->kernel_if->u.h.page)) 544 return NULL; 545 546 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); 547 548 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); 549 if (queue) { 550 queue->q_header = NULL; 551 queue->saved_header = NULL; 552 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 553 queue->kernel_if->host = true; 554 queue->kernel_if->mutex = NULL; 555 queue->kernel_if->num_pages = num_pages; 556 queue->kernel_if->u.h.header_page = 557 (struct page **)((u8 *)queue + queue_size); 558 queue->kernel_if->u.h.page = 559 &queue->kernel_if->u.h.header_page[1]; 560 } 561 562 return queue; 563 } 564 565 /* 566 * Frees kernel memory for a given queue (header plus translation 567 * structure). 568 */ 569 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size) 570 { 571 kfree(queue); 572 } 573 574 /* 575 * Initialize the mutex for the pair of queues. This mutex is used to 576 * protect the q_header and the buffer from changing out from under any 577 * users of either queue. Of course, it's only any good if the mutexes 578 * are actually acquired. Queue structure must lie on non-paged memory 579 * or we cannot guarantee access to the mutex. 580 */ 581 static void qp_init_queue_mutex(struct vmci_queue *produce_q, 582 struct vmci_queue *consume_q) 583 { 584 /* 585 * Only the host queue has shared state - the guest queues do not 586 * need to synchronize access using a queue mutex. 587 */ 588 589 if (produce_q->kernel_if->host) { 590 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; 591 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; 592 mutex_init(produce_q->kernel_if->mutex); 593 } 594 } 595 596 /* 597 * Cleans up the mutex for the pair of queues. 598 */ 599 static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q, 600 struct vmci_queue *consume_q) 601 { 602 if (produce_q->kernel_if->host) { 603 produce_q->kernel_if->mutex = NULL; 604 consume_q->kernel_if->mutex = NULL; 605 } 606 } 607 608 /* 609 * Acquire the mutex for the queue. Note that the produce_q and 610 * the consume_q share a mutex. So, only one of the two need to 611 * be passed in to this routine. Either will work just fine. 612 */ 613 static void qp_acquire_queue_mutex(struct vmci_queue *queue) 614 { 615 if (queue->kernel_if->host) 616 mutex_lock(queue->kernel_if->mutex); 617 } 618 619 /* 620 * Release the mutex for the queue. Note that the produce_q and 621 * the consume_q share a mutex. So, only one of the two need to 622 * be passed in to this routine. Either will work just fine. 623 */ 624 static void qp_release_queue_mutex(struct vmci_queue *queue) 625 { 626 if (queue->kernel_if->host) 627 mutex_unlock(queue->kernel_if->mutex); 628 } 629 630 /* 631 * Helper function to release pages in the PageStoreAttachInfo 632 * previously obtained using get_user_pages. 633 */ 634 static void qp_release_pages(struct page **pages, 635 u64 num_pages, bool dirty) 636 { 637 int i; 638 639 for (i = 0; i < num_pages; i++) { 640 if (dirty) 641 set_page_dirty(pages[i]); 642 643 put_page(pages[i]); 644 pages[i] = NULL; 645 } 646 } 647 648 /* 649 * Lock the user pages referenced by the {produce,consume}Buffer 650 * struct into memory and populate the {produce,consume}Pages 651 * arrays in the attach structure with them. 652 */ 653 static int qp_host_get_user_memory(u64 produce_uva, 654 u64 consume_uva, 655 struct vmci_queue *produce_q, 656 struct vmci_queue *consume_q) 657 { 658 int retval; 659 int err = VMCI_SUCCESS; 660 661 retval = get_user_pages_fast((uintptr_t) produce_uva, 662 produce_q->kernel_if->num_pages, 663 FOLL_WRITE, 664 produce_q->kernel_if->u.h.header_page); 665 if (retval < (int)produce_q->kernel_if->num_pages) { 666 pr_debug("get_user_pages_fast(produce) failed (retval=%d)", 667 retval); 668 qp_release_pages(produce_q->kernel_if->u.h.header_page, 669 retval, false); 670 err = VMCI_ERROR_NO_MEM; 671 goto out; 672 } 673 674 retval = get_user_pages_fast((uintptr_t) consume_uva, 675 consume_q->kernel_if->num_pages, 676 FOLL_WRITE, 677 consume_q->kernel_if->u.h.header_page); 678 if (retval < (int)consume_q->kernel_if->num_pages) { 679 pr_debug("get_user_pages_fast(consume) failed (retval=%d)", 680 retval); 681 qp_release_pages(consume_q->kernel_if->u.h.header_page, 682 retval, false); 683 qp_release_pages(produce_q->kernel_if->u.h.header_page, 684 produce_q->kernel_if->num_pages, false); 685 err = VMCI_ERROR_NO_MEM; 686 } 687 688 out: 689 return err; 690 } 691 692 /* 693 * Registers the specification of the user pages used for backing a queue 694 * pair. Enough information to map in pages is stored in the OS specific 695 * part of the struct vmci_queue structure. 696 */ 697 static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store, 698 struct vmci_queue *produce_q, 699 struct vmci_queue *consume_q) 700 { 701 u64 produce_uva; 702 u64 consume_uva; 703 704 /* 705 * The new style and the old style mapping only differs in 706 * that we either get a single or two UVAs, so we split the 707 * single UVA range at the appropriate spot. 708 */ 709 produce_uva = page_store->pages; 710 consume_uva = page_store->pages + 711 produce_q->kernel_if->num_pages * PAGE_SIZE; 712 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q, 713 consume_q); 714 } 715 716 /* 717 * Releases and removes the references to user pages stored in the attach 718 * struct. Pages are released from the page cache and may become 719 * swappable again. 720 */ 721 static void qp_host_unregister_user_memory(struct vmci_queue *produce_q, 722 struct vmci_queue *consume_q) 723 { 724 qp_release_pages(produce_q->kernel_if->u.h.header_page, 725 produce_q->kernel_if->num_pages, true); 726 memset(produce_q->kernel_if->u.h.header_page, 0, 727 sizeof(*produce_q->kernel_if->u.h.header_page) * 728 produce_q->kernel_if->num_pages); 729 qp_release_pages(consume_q->kernel_if->u.h.header_page, 730 consume_q->kernel_if->num_pages, true); 731 memset(consume_q->kernel_if->u.h.header_page, 0, 732 sizeof(*consume_q->kernel_if->u.h.header_page) * 733 consume_q->kernel_if->num_pages); 734 } 735 736 /* 737 * Once qp_host_register_user_memory has been performed on a 738 * queue, the queue pair headers can be mapped into the 739 * kernel. Once mapped, they must be unmapped with 740 * qp_host_unmap_queues prior to calling 741 * qp_host_unregister_user_memory. 742 * Pages are pinned. 743 */ 744 static int qp_host_map_queues(struct vmci_queue *produce_q, 745 struct vmci_queue *consume_q) 746 { 747 int result; 748 749 if (!produce_q->q_header || !consume_q->q_header) { 750 struct page *headers[2]; 751 752 if (produce_q->q_header != consume_q->q_header) 753 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 754 755 if (produce_q->kernel_if->u.h.header_page == NULL || 756 *produce_q->kernel_if->u.h.header_page == NULL) 757 return VMCI_ERROR_UNAVAILABLE; 758 759 headers[0] = *produce_q->kernel_if->u.h.header_page; 760 headers[1] = *consume_q->kernel_if->u.h.header_page; 761 762 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); 763 if (produce_q->q_header != NULL) { 764 consume_q->q_header = 765 (struct vmci_queue_header *)((u8 *) 766 produce_q->q_header + 767 PAGE_SIZE); 768 result = VMCI_SUCCESS; 769 } else { 770 pr_warn("vmap failed\n"); 771 result = VMCI_ERROR_NO_MEM; 772 } 773 } else { 774 result = VMCI_SUCCESS; 775 } 776 777 return result; 778 } 779 780 /* 781 * Unmaps previously mapped queue pair headers from the kernel. 782 * Pages are unpinned. 783 */ 784 static int qp_host_unmap_queues(u32 gid, 785 struct vmci_queue *produce_q, 786 struct vmci_queue *consume_q) 787 { 788 if (produce_q->q_header) { 789 if (produce_q->q_header < consume_q->q_header) 790 vunmap(produce_q->q_header); 791 else 792 vunmap(consume_q->q_header); 793 794 produce_q->q_header = NULL; 795 consume_q->q_header = NULL; 796 } 797 798 return VMCI_SUCCESS; 799 } 800 801 /* 802 * Finds the entry in the list corresponding to a given handle. Assumes 803 * that the list is locked. 804 */ 805 static struct qp_entry *qp_list_find(struct qp_list *qp_list, 806 struct vmci_handle handle) 807 { 808 struct qp_entry *entry; 809 810 if (vmci_handle_is_invalid(handle)) 811 return NULL; 812 813 list_for_each_entry(entry, &qp_list->head, list_item) { 814 if (vmci_handle_is_equal(entry->handle, handle)) 815 return entry; 816 } 817 818 return NULL; 819 } 820 821 /* 822 * Finds the entry in the list corresponding to a given handle. 823 */ 824 static struct qp_guest_endpoint * 825 qp_guest_handle_to_entry(struct vmci_handle handle) 826 { 827 struct qp_guest_endpoint *entry; 828 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle); 829 830 entry = qp ? container_of( 831 qp, struct qp_guest_endpoint, qp) : NULL; 832 return entry; 833 } 834 835 /* 836 * Finds the entry in the list corresponding to a given handle. 837 */ 838 static struct qp_broker_entry * 839 qp_broker_handle_to_entry(struct vmci_handle handle) 840 { 841 struct qp_broker_entry *entry; 842 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle); 843 844 entry = qp ? container_of( 845 qp, struct qp_broker_entry, qp) : NULL; 846 return entry; 847 } 848 849 /* 850 * Dispatches a queue pair event message directly into the local event 851 * queue. 852 */ 853 static int qp_notify_peer_local(bool attach, struct vmci_handle handle) 854 { 855 u32 context_id = vmci_get_context_id(); 856 struct vmci_event_qp ev; 857 858 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); 859 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 860 VMCI_CONTEXT_RESOURCE_ID); 861 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 862 ev.msg.event_data.event = 863 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; 864 ev.payload.peer_id = context_id; 865 ev.payload.handle = handle; 866 867 return vmci_event_dispatch(&ev.msg.hdr); 868 } 869 870 /* 871 * Allocates and initializes a qp_guest_endpoint structure. 872 * Allocates a queue_pair rid (and handle) iff the given entry has 873 * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX 874 * are reserved handles. Assumes that the QP list mutex is held 875 * by the caller. 876 */ 877 static struct qp_guest_endpoint * 878 qp_guest_endpoint_create(struct vmci_handle handle, 879 u32 peer, 880 u32 flags, 881 u64 produce_size, 882 u64 consume_size, 883 void *produce_q, 884 void *consume_q) 885 { 886 int result; 887 struct qp_guest_endpoint *entry; 888 /* One page each for the queue headers. */ 889 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) + 890 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2; 891 892 if (vmci_handle_is_invalid(handle)) { 893 u32 context_id = vmci_get_context_id(); 894 895 handle = vmci_make_handle(context_id, VMCI_INVALID_ID); 896 } 897 898 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 899 if (entry) { 900 entry->qp.peer = peer; 901 entry->qp.flags = flags; 902 entry->qp.produce_size = produce_size; 903 entry->qp.consume_size = consume_size; 904 entry->qp.ref_count = 0; 905 entry->num_ppns = num_ppns; 906 entry->produce_q = produce_q; 907 entry->consume_q = consume_q; 908 INIT_LIST_HEAD(&entry->qp.list_item); 909 910 /* Add resource obj */ 911 result = vmci_resource_add(&entry->resource, 912 VMCI_RESOURCE_TYPE_QPAIR_GUEST, 913 handle); 914 entry->qp.handle = vmci_resource_handle(&entry->resource); 915 if ((result != VMCI_SUCCESS) || 916 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { 917 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", 918 handle.context, handle.resource, result); 919 kfree(entry); 920 entry = NULL; 921 } 922 } 923 return entry; 924 } 925 926 /* 927 * Frees a qp_guest_endpoint structure. 928 */ 929 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry) 930 { 931 qp_free_ppn_set(&entry->ppn_set); 932 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); 933 qp_free_queue(entry->produce_q, entry->qp.produce_size); 934 qp_free_queue(entry->consume_q, entry->qp.consume_size); 935 /* Unlink from resource hash table and free callback */ 936 vmci_resource_remove(&entry->resource); 937 938 kfree(entry); 939 } 940 941 /* 942 * Helper to make a queue_pairAlloc hypercall when the driver is 943 * supporting a guest device. 944 */ 945 static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry) 946 { 947 struct vmci_qp_alloc_msg *alloc_msg; 948 size_t msg_size; 949 size_t ppn_size; 950 int result; 951 952 if (!entry || entry->num_ppns <= 2) 953 return VMCI_ERROR_INVALID_ARGS; 954 955 ppn_size = vmci_use_ppn64() ? sizeof(u64) : sizeof(u32); 956 msg_size = sizeof(*alloc_msg) + 957 (size_t) entry->num_ppns * ppn_size; 958 alloc_msg = kmalloc(msg_size, GFP_KERNEL); 959 if (!alloc_msg) 960 return VMCI_ERROR_NO_MEM; 961 962 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 963 VMCI_QUEUEPAIR_ALLOC); 964 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE; 965 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE; 966 alloc_msg->handle = entry->qp.handle; 967 alloc_msg->peer = entry->qp.peer; 968 alloc_msg->flags = entry->qp.flags; 969 alloc_msg->produce_size = entry->qp.produce_size; 970 alloc_msg->consume_size = entry->qp.consume_size; 971 alloc_msg->num_ppns = entry->num_ppns; 972 973 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg), 974 &entry->ppn_set); 975 if (result == VMCI_SUCCESS) 976 result = vmci_send_datagram(&alloc_msg->hdr); 977 978 kfree(alloc_msg); 979 980 return result; 981 } 982 983 /* 984 * Helper to make a queue_pairDetach hypercall when the driver is 985 * supporting a guest device. 986 */ 987 static int qp_detatch_hypercall(struct vmci_handle handle) 988 { 989 struct vmci_qp_detach_msg detach_msg; 990 991 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 992 VMCI_QUEUEPAIR_DETACH); 993 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE; 994 detach_msg.hdr.payload_size = sizeof(handle); 995 detach_msg.handle = handle; 996 997 return vmci_send_datagram(&detach_msg.hdr); 998 } 999 1000 /* 1001 * Adds the given entry to the list. Assumes that the list is locked. 1002 */ 1003 static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry) 1004 { 1005 if (entry) 1006 list_add(&entry->list_item, &qp_list->head); 1007 } 1008 1009 /* 1010 * Removes the given entry from the list. Assumes that the list is locked. 1011 */ 1012 static void qp_list_remove_entry(struct qp_list *qp_list, 1013 struct qp_entry *entry) 1014 { 1015 if (entry) 1016 list_del(&entry->list_item); 1017 } 1018 1019 /* 1020 * Helper for VMCI queue_pair detach interface. Frees the physical 1021 * pages for the queue pair. 1022 */ 1023 static int qp_detatch_guest_work(struct vmci_handle handle) 1024 { 1025 int result; 1026 struct qp_guest_endpoint *entry; 1027 u32 ref_count = ~0; /* To avoid compiler warning below */ 1028 1029 mutex_lock(&qp_guest_endpoints.mutex); 1030 1031 entry = qp_guest_handle_to_entry(handle); 1032 if (!entry) { 1033 mutex_unlock(&qp_guest_endpoints.mutex); 1034 return VMCI_ERROR_NOT_FOUND; 1035 } 1036 1037 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1038 result = VMCI_SUCCESS; 1039 1040 if (entry->qp.ref_count > 1) { 1041 result = qp_notify_peer_local(false, handle); 1042 /* 1043 * We can fail to notify a local queuepair 1044 * because we can't allocate. We still want 1045 * to release the entry if that happens, so 1046 * don't bail out yet. 1047 */ 1048 } 1049 } else { 1050 result = qp_detatch_hypercall(handle); 1051 if (result < VMCI_SUCCESS) { 1052 /* 1053 * We failed to notify a non-local queuepair. 1054 * That other queuepair might still be 1055 * accessing the shared memory, so don't 1056 * release the entry yet. It will get cleaned 1057 * up by VMCIqueue_pair_Exit() if necessary 1058 * (assuming we are going away, otherwise why 1059 * did this fail?). 1060 */ 1061 1062 mutex_unlock(&qp_guest_endpoints.mutex); 1063 return result; 1064 } 1065 } 1066 1067 /* 1068 * If we get here then we either failed to notify a local queuepair, or 1069 * we succeeded in all cases. Release the entry if required. 1070 */ 1071 1072 entry->qp.ref_count--; 1073 if (entry->qp.ref_count == 0) 1074 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); 1075 1076 /* If we didn't remove the entry, this could change once we unlock. */ 1077 if (entry) 1078 ref_count = entry->qp.ref_count; 1079 1080 mutex_unlock(&qp_guest_endpoints.mutex); 1081 1082 if (ref_count == 0) 1083 qp_guest_endpoint_destroy(entry); 1084 1085 return result; 1086 } 1087 1088 /* 1089 * This functions handles the actual allocation of a VMCI queue 1090 * pair guest endpoint. Allocates physical pages for the queue 1091 * pair. It makes OS dependent calls through generic wrappers. 1092 */ 1093 static int qp_alloc_guest_work(struct vmci_handle *handle, 1094 struct vmci_queue **produce_q, 1095 u64 produce_size, 1096 struct vmci_queue **consume_q, 1097 u64 consume_size, 1098 u32 peer, 1099 u32 flags, 1100 u32 priv_flags) 1101 { 1102 const u64 num_produce_pages = 1103 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1; 1104 const u64 num_consume_pages = 1105 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1; 1106 void *my_produce_q = NULL; 1107 void *my_consume_q = NULL; 1108 int result; 1109 struct qp_guest_endpoint *queue_pair_entry = NULL; 1110 1111 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS) 1112 return VMCI_ERROR_NO_ACCESS; 1113 1114 mutex_lock(&qp_guest_endpoints.mutex); 1115 1116 queue_pair_entry = qp_guest_handle_to_entry(*handle); 1117 if (queue_pair_entry) { 1118 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1119 /* Local attach case. */ 1120 if (queue_pair_entry->qp.ref_count > 1) { 1121 pr_devel("Error attempting to attach more than once\n"); 1122 result = VMCI_ERROR_UNAVAILABLE; 1123 goto error_keep_entry; 1124 } 1125 1126 if (queue_pair_entry->qp.produce_size != consume_size || 1127 queue_pair_entry->qp.consume_size != 1128 produce_size || 1129 queue_pair_entry->qp.flags != 1130 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) { 1131 pr_devel("Error mismatched queue pair in local attach\n"); 1132 result = VMCI_ERROR_QUEUEPAIR_MISMATCH; 1133 goto error_keep_entry; 1134 } 1135 1136 /* 1137 * Do a local attach. We swap the consume and 1138 * produce queues for the attacher and deliver 1139 * an attach event. 1140 */ 1141 result = qp_notify_peer_local(true, *handle); 1142 if (result < VMCI_SUCCESS) 1143 goto error_keep_entry; 1144 1145 my_produce_q = queue_pair_entry->consume_q; 1146 my_consume_q = queue_pair_entry->produce_q; 1147 goto out; 1148 } 1149 1150 result = VMCI_ERROR_ALREADY_EXISTS; 1151 goto error_keep_entry; 1152 } 1153 1154 my_produce_q = qp_alloc_queue(produce_size, flags); 1155 if (!my_produce_q) { 1156 pr_warn("Error allocating pages for produce queue\n"); 1157 result = VMCI_ERROR_NO_MEM; 1158 goto error; 1159 } 1160 1161 my_consume_q = qp_alloc_queue(consume_size, flags); 1162 if (!my_consume_q) { 1163 pr_warn("Error allocating pages for consume queue\n"); 1164 result = VMCI_ERROR_NO_MEM; 1165 goto error; 1166 } 1167 1168 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags, 1169 produce_size, consume_size, 1170 my_produce_q, my_consume_q); 1171 if (!queue_pair_entry) { 1172 pr_warn("Error allocating memory in %s\n", __func__); 1173 result = VMCI_ERROR_NO_MEM; 1174 goto error; 1175 } 1176 1177 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q, 1178 num_consume_pages, 1179 &queue_pair_entry->ppn_set); 1180 if (result < VMCI_SUCCESS) { 1181 pr_warn("qp_alloc_ppn_set failed\n"); 1182 goto error; 1183 } 1184 1185 /* 1186 * It's only necessary to notify the host if this queue pair will be 1187 * attached to from another context. 1188 */ 1189 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1190 /* Local create case. */ 1191 u32 context_id = vmci_get_context_id(); 1192 1193 /* 1194 * Enforce similar checks on local queue pairs as we 1195 * do for regular ones. The handle's context must 1196 * match the creator or attacher context id (here they 1197 * are both the current context id) and the 1198 * attach-only flag cannot exist during create. We 1199 * also ensure specified peer is this context or an 1200 * invalid one. 1201 */ 1202 if (queue_pair_entry->qp.handle.context != context_id || 1203 (queue_pair_entry->qp.peer != VMCI_INVALID_ID && 1204 queue_pair_entry->qp.peer != context_id)) { 1205 result = VMCI_ERROR_NO_ACCESS; 1206 goto error; 1207 } 1208 1209 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) { 1210 result = VMCI_ERROR_NOT_FOUND; 1211 goto error; 1212 } 1213 } else { 1214 result = qp_alloc_hypercall(queue_pair_entry); 1215 if (result < VMCI_SUCCESS) { 1216 pr_warn("qp_alloc_hypercall result = %d\n", result); 1217 goto error; 1218 } 1219 } 1220 1221 qp_init_queue_mutex((struct vmci_queue *)my_produce_q, 1222 (struct vmci_queue *)my_consume_q); 1223 1224 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp); 1225 1226 out: 1227 queue_pair_entry->qp.ref_count++; 1228 *handle = queue_pair_entry->qp.handle; 1229 *produce_q = (struct vmci_queue *)my_produce_q; 1230 *consume_q = (struct vmci_queue *)my_consume_q; 1231 1232 /* 1233 * We should initialize the queue pair header pages on a local 1234 * queue pair create. For non-local queue pairs, the 1235 * hypervisor initializes the header pages in the create step. 1236 */ 1237 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) && 1238 queue_pair_entry->qp.ref_count == 1) { 1239 vmci_q_header_init((*produce_q)->q_header, *handle); 1240 vmci_q_header_init((*consume_q)->q_header, *handle); 1241 } 1242 1243 mutex_unlock(&qp_guest_endpoints.mutex); 1244 1245 return VMCI_SUCCESS; 1246 1247 error: 1248 mutex_unlock(&qp_guest_endpoints.mutex); 1249 if (queue_pair_entry) { 1250 /* The queues will be freed inside the destroy routine. */ 1251 qp_guest_endpoint_destroy(queue_pair_entry); 1252 } else { 1253 qp_free_queue(my_produce_q, produce_size); 1254 qp_free_queue(my_consume_q, consume_size); 1255 } 1256 return result; 1257 1258 error_keep_entry: 1259 /* This path should only be used when an existing entry was found. */ 1260 mutex_unlock(&qp_guest_endpoints.mutex); 1261 return result; 1262 } 1263 1264 /* 1265 * The first endpoint issuing a queue pair allocation will create the state 1266 * of the queue pair in the queue pair broker. 1267 * 1268 * If the creator is a guest, it will associate a VMX virtual address range 1269 * with the queue pair as specified by the page_store. For compatibility with 1270 * older VMX'en, that would use a separate step to set the VMX virtual 1271 * address range, the virtual address range can be registered later using 1272 * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be 1273 * used. 1274 * 1275 * If the creator is the host, a page_store of NULL should be used as well, 1276 * since the host is not able to supply a page store for the queue pair. 1277 * 1278 * For older VMX and host callers, the queue pair will be created in the 1279 * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be 1280 * created in VMCOQPB_CREATED_MEM state. 1281 */ 1282 static int qp_broker_create(struct vmci_handle handle, 1283 u32 peer, 1284 u32 flags, 1285 u32 priv_flags, 1286 u64 produce_size, 1287 u64 consume_size, 1288 struct vmci_qp_page_store *page_store, 1289 struct vmci_ctx *context, 1290 vmci_event_release_cb wakeup_cb, 1291 void *client_data, struct qp_broker_entry **ent) 1292 { 1293 struct qp_broker_entry *entry = NULL; 1294 const u32 context_id = vmci_ctx_get_id(context); 1295 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1296 int result; 1297 u64 guest_produce_size; 1298 u64 guest_consume_size; 1299 1300 /* Do not create if the caller asked not to. */ 1301 if (flags & VMCI_QPFLAG_ATTACH_ONLY) 1302 return VMCI_ERROR_NOT_FOUND; 1303 1304 /* 1305 * Creator's context ID should match handle's context ID or the creator 1306 * must allow the context in handle's context ID as the "peer". 1307 */ 1308 if (handle.context != context_id && handle.context != peer) 1309 return VMCI_ERROR_NO_ACCESS; 1310 1311 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer)) 1312 return VMCI_ERROR_DST_UNREACHABLE; 1313 1314 /* 1315 * Creator's context ID for local queue pairs should match the 1316 * peer, if a peer is specified. 1317 */ 1318 if (is_local && peer != VMCI_INVALID_ID && context_id != peer) 1319 return VMCI_ERROR_NO_ACCESS; 1320 1321 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 1322 if (!entry) 1323 return VMCI_ERROR_NO_MEM; 1324 1325 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) { 1326 /* 1327 * The queue pair broker entry stores values from the guest 1328 * point of view, so a creating host side endpoint should swap 1329 * produce and consume values -- unless it is a local queue 1330 * pair, in which case no swapping is necessary, since the local 1331 * attacher will swap queues. 1332 */ 1333 1334 guest_produce_size = consume_size; 1335 guest_consume_size = produce_size; 1336 } else { 1337 guest_produce_size = produce_size; 1338 guest_consume_size = consume_size; 1339 } 1340 1341 entry->qp.handle = handle; 1342 entry->qp.peer = peer; 1343 entry->qp.flags = flags; 1344 entry->qp.produce_size = guest_produce_size; 1345 entry->qp.consume_size = guest_consume_size; 1346 entry->qp.ref_count = 1; 1347 entry->create_id = context_id; 1348 entry->attach_id = VMCI_INVALID_ID; 1349 entry->state = VMCIQPB_NEW; 1350 entry->require_trusted_attach = 1351 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED); 1352 entry->created_by_trusted = 1353 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED); 1354 entry->vmci_page_files = false; 1355 entry->wakeup_cb = wakeup_cb; 1356 entry->client_data = client_data; 1357 entry->produce_q = qp_host_alloc_queue(guest_produce_size); 1358 if (entry->produce_q == NULL) { 1359 result = VMCI_ERROR_NO_MEM; 1360 goto error; 1361 } 1362 entry->consume_q = qp_host_alloc_queue(guest_consume_size); 1363 if (entry->consume_q == NULL) { 1364 result = VMCI_ERROR_NO_MEM; 1365 goto error; 1366 } 1367 1368 qp_init_queue_mutex(entry->produce_q, entry->consume_q); 1369 1370 INIT_LIST_HEAD(&entry->qp.list_item); 1371 1372 if (is_local) { 1373 u8 *tmp; 1374 1375 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), 1376 PAGE_SIZE, GFP_KERNEL); 1377 if (entry->local_mem == NULL) { 1378 result = VMCI_ERROR_NO_MEM; 1379 goto error; 1380 } 1381 entry->state = VMCIQPB_CREATED_MEM; 1382 entry->produce_q->q_header = entry->local_mem; 1383 tmp = (u8 *)entry->local_mem + PAGE_SIZE * 1384 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); 1385 entry->consume_q->q_header = (struct vmci_queue_header *)tmp; 1386 } else if (page_store) { 1387 /* 1388 * The VMX already initialized the queue pair headers, so no 1389 * need for the kernel side to do that. 1390 */ 1391 result = qp_host_register_user_memory(page_store, 1392 entry->produce_q, 1393 entry->consume_q); 1394 if (result < VMCI_SUCCESS) 1395 goto error; 1396 1397 entry->state = VMCIQPB_CREATED_MEM; 1398 } else { 1399 /* 1400 * A create without a page_store may be either a host 1401 * side create (in which case we are waiting for the 1402 * guest side to supply the memory) or an old style 1403 * queue pair create (in which case we will expect a 1404 * set page store call as the next step). 1405 */ 1406 entry->state = VMCIQPB_CREATED_NO_MEM; 1407 } 1408 1409 qp_list_add_entry(&qp_broker_list, &entry->qp); 1410 if (ent != NULL) 1411 *ent = entry; 1412 1413 /* Add to resource obj */ 1414 result = vmci_resource_add(&entry->resource, 1415 VMCI_RESOURCE_TYPE_QPAIR_HOST, 1416 handle); 1417 if (result != VMCI_SUCCESS) { 1418 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", 1419 handle.context, handle.resource, result); 1420 goto error; 1421 } 1422 1423 entry->qp.handle = vmci_resource_handle(&entry->resource); 1424 if (is_local) { 1425 vmci_q_header_init(entry->produce_q->q_header, 1426 entry->qp.handle); 1427 vmci_q_header_init(entry->consume_q->q_header, 1428 entry->qp.handle); 1429 } 1430 1431 vmci_ctx_qp_create(context, entry->qp.handle); 1432 1433 return VMCI_SUCCESS; 1434 1435 error: 1436 if (entry != NULL) { 1437 qp_host_free_queue(entry->produce_q, guest_produce_size); 1438 qp_host_free_queue(entry->consume_q, guest_consume_size); 1439 kfree(entry); 1440 } 1441 1442 return result; 1443 } 1444 1445 /* 1446 * Enqueues an event datagram to notify the peer VM attached to 1447 * the given queue pair handle about attach/detach event by the 1448 * given VM. Returns Payload size of datagram enqueued on 1449 * success, error code otherwise. 1450 */ 1451 static int qp_notify_peer(bool attach, 1452 struct vmci_handle handle, 1453 u32 my_id, 1454 u32 peer_id) 1455 { 1456 int rv; 1457 struct vmci_event_qp ev; 1458 1459 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID || 1460 peer_id == VMCI_INVALID_ID) 1461 return VMCI_ERROR_INVALID_ARGS; 1462 1463 /* 1464 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on 1465 * number of pending events from the hypervisor to a given VM 1466 * otherwise a rogue VM could do an arbitrary number of attach 1467 * and detach operations causing memory pressure in the host 1468 * kernel. 1469 */ 1470 1471 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); 1472 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1473 VMCI_CONTEXT_RESOURCE_ID); 1474 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 1475 ev.msg.event_data.event = attach ? 1476 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; 1477 ev.payload.handle = handle; 1478 ev.payload.peer_id = my_id; 1479 1480 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, 1481 &ev.msg.hdr, false); 1482 if (rv < VMCI_SUCCESS) 1483 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n", 1484 attach ? "ATTACH" : "DETACH", peer_id); 1485 1486 return rv; 1487 } 1488 1489 /* 1490 * The second endpoint issuing a queue pair allocation will attach to 1491 * the queue pair registered with the queue pair broker. 1492 * 1493 * If the attacher is a guest, it will associate a VMX virtual address 1494 * range with the queue pair as specified by the page_store. At this 1495 * point, the already attach host endpoint may start using the queue 1496 * pair, and an attach event is sent to it. For compatibility with 1497 * older VMX'en, that used a separate step to set the VMX virtual 1498 * address range, the virtual address range can be registered later 1499 * using vmci_qp_broker_set_page_store. In that case, a page_store of 1500 * NULL should be used, and the attach event will be generated once 1501 * the actual page store has been set. 1502 * 1503 * If the attacher is the host, a page_store of NULL should be used as 1504 * well, since the page store information is already set by the guest. 1505 * 1506 * For new VMX and host callers, the queue pair will be moved to the 1507 * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be 1508 * moved to the VMCOQPB_ATTACHED_NO_MEM state. 1509 */ 1510 static int qp_broker_attach(struct qp_broker_entry *entry, 1511 u32 peer, 1512 u32 flags, 1513 u32 priv_flags, 1514 u64 produce_size, 1515 u64 consume_size, 1516 struct vmci_qp_page_store *page_store, 1517 struct vmci_ctx *context, 1518 vmci_event_release_cb wakeup_cb, 1519 void *client_data, 1520 struct qp_broker_entry **ent) 1521 { 1522 const u32 context_id = vmci_ctx_get_id(context); 1523 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1524 int result; 1525 1526 if (entry->state != VMCIQPB_CREATED_NO_MEM && 1527 entry->state != VMCIQPB_CREATED_MEM) 1528 return VMCI_ERROR_UNAVAILABLE; 1529 1530 if (is_local) { 1531 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || 1532 context_id != entry->create_id) { 1533 return VMCI_ERROR_INVALID_ARGS; 1534 } 1535 } else if (context_id == entry->create_id || 1536 context_id == entry->attach_id) { 1537 return VMCI_ERROR_ALREADY_EXISTS; 1538 } 1539 1540 if (VMCI_CONTEXT_IS_VM(context_id) && 1541 VMCI_CONTEXT_IS_VM(entry->create_id)) 1542 return VMCI_ERROR_DST_UNREACHABLE; 1543 1544 /* 1545 * If we are attaching from a restricted context then the queuepair 1546 * must have been created by a trusted endpoint. 1547 */ 1548 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) && 1549 !entry->created_by_trusted) 1550 return VMCI_ERROR_NO_ACCESS; 1551 1552 /* 1553 * If we are attaching to a queuepair that was created by a restricted 1554 * context then we must be trusted. 1555 */ 1556 if (entry->require_trusted_attach && 1557 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED))) 1558 return VMCI_ERROR_NO_ACCESS; 1559 1560 /* 1561 * If the creator specifies VMCI_INVALID_ID in "peer" field, access 1562 * control check is not performed. 1563 */ 1564 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) 1565 return VMCI_ERROR_NO_ACCESS; 1566 1567 if (entry->create_id == VMCI_HOST_CONTEXT_ID) { 1568 /* 1569 * Do not attach if the caller doesn't support Host Queue Pairs 1570 * and a host created this queue pair. 1571 */ 1572 1573 if (!vmci_ctx_supports_host_qp(context)) 1574 return VMCI_ERROR_INVALID_RESOURCE; 1575 1576 } else if (context_id == VMCI_HOST_CONTEXT_ID) { 1577 struct vmci_ctx *create_context; 1578 bool supports_host_qp; 1579 1580 /* 1581 * Do not attach a host to a user created queue pair if that 1582 * user doesn't support host queue pair end points. 1583 */ 1584 1585 create_context = vmci_ctx_get(entry->create_id); 1586 supports_host_qp = vmci_ctx_supports_host_qp(create_context); 1587 vmci_ctx_put(create_context); 1588 1589 if (!supports_host_qp) 1590 return VMCI_ERROR_INVALID_RESOURCE; 1591 } 1592 1593 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) 1594 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1595 1596 if (context_id != VMCI_HOST_CONTEXT_ID) { 1597 /* 1598 * The queue pair broker entry stores values from the guest 1599 * point of view, so an attaching guest should match the values 1600 * stored in the entry. 1601 */ 1602 1603 if (entry->qp.produce_size != produce_size || 1604 entry->qp.consume_size != consume_size) { 1605 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1606 } 1607 } else if (entry->qp.produce_size != consume_size || 1608 entry->qp.consume_size != produce_size) { 1609 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1610 } 1611 1612 if (context_id != VMCI_HOST_CONTEXT_ID) { 1613 /* 1614 * If a guest attached to a queue pair, it will supply 1615 * the backing memory. If this is a pre NOVMVM vmx, 1616 * the backing memory will be supplied by calling 1617 * vmci_qp_broker_set_page_store() following the 1618 * return of the vmci_qp_broker_alloc() call. If it is 1619 * a vmx of version NOVMVM or later, the page store 1620 * must be supplied as part of the 1621 * vmci_qp_broker_alloc call. Under all circumstances 1622 * must the initially created queue pair not have any 1623 * memory associated with it already. 1624 */ 1625 1626 if (entry->state != VMCIQPB_CREATED_NO_MEM) 1627 return VMCI_ERROR_INVALID_ARGS; 1628 1629 if (page_store != NULL) { 1630 /* 1631 * Patch up host state to point to guest 1632 * supplied memory. The VMX already 1633 * initialized the queue pair headers, so no 1634 * need for the kernel side to do that. 1635 */ 1636 1637 result = qp_host_register_user_memory(page_store, 1638 entry->produce_q, 1639 entry->consume_q); 1640 if (result < VMCI_SUCCESS) 1641 return result; 1642 1643 entry->state = VMCIQPB_ATTACHED_MEM; 1644 } else { 1645 entry->state = VMCIQPB_ATTACHED_NO_MEM; 1646 } 1647 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) { 1648 /* 1649 * The host side is attempting to attach to a queue 1650 * pair that doesn't have any memory associated with 1651 * it. This must be a pre NOVMVM vmx that hasn't set 1652 * the page store information yet, or a quiesced VM. 1653 */ 1654 1655 return VMCI_ERROR_UNAVAILABLE; 1656 } else { 1657 /* The host side has successfully attached to a queue pair. */ 1658 entry->state = VMCIQPB_ATTACHED_MEM; 1659 } 1660 1661 if (entry->state == VMCIQPB_ATTACHED_MEM) { 1662 result = 1663 qp_notify_peer(true, entry->qp.handle, context_id, 1664 entry->create_id); 1665 if (result < VMCI_SUCCESS) 1666 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", 1667 entry->create_id, entry->qp.handle.context, 1668 entry->qp.handle.resource); 1669 } 1670 1671 entry->attach_id = context_id; 1672 entry->qp.ref_count++; 1673 if (wakeup_cb) { 1674 entry->wakeup_cb = wakeup_cb; 1675 entry->client_data = client_data; 1676 } 1677 1678 /* 1679 * When attaching to local queue pairs, the context already has 1680 * an entry tracking the queue pair, so don't add another one. 1681 */ 1682 if (!is_local) 1683 vmci_ctx_qp_create(context, entry->qp.handle); 1684 1685 if (ent != NULL) 1686 *ent = entry; 1687 1688 return VMCI_SUCCESS; 1689 } 1690 1691 /* 1692 * queue_pair_Alloc for use when setting up queue pair endpoints 1693 * on the host. 1694 */ 1695 static int qp_broker_alloc(struct vmci_handle handle, 1696 u32 peer, 1697 u32 flags, 1698 u32 priv_flags, 1699 u64 produce_size, 1700 u64 consume_size, 1701 struct vmci_qp_page_store *page_store, 1702 struct vmci_ctx *context, 1703 vmci_event_release_cb wakeup_cb, 1704 void *client_data, 1705 struct qp_broker_entry **ent, 1706 bool *swap) 1707 { 1708 const u32 context_id = vmci_ctx_get_id(context); 1709 bool create; 1710 struct qp_broker_entry *entry = NULL; 1711 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1712 int result; 1713 1714 if (vmci_handle_is_invalid(handle) || 1715 (flags & ~VMCI_QP_ALL_FLAGS) || is_local || 1716 !(produce_size || consume_size) || 1717 !context || context_id == VMCI_INVALID_ID || 1718 handle.context == VMCI_INVALID_ID) { 1719 return VMCI_ERROR_INVALID_ARGS; 1720 } 1721 1722 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store)) 1723 return VMCI_ERROR_INVALID_ARGS; 1724 1725 /* 1726 * In the initial argument check, we ensure that non-vmkernel hosts 1727 * are not allowed to create local queue pairs. 1728 */ 1729 1730 mutex_lock(&qp_broker_list.mutex); 1731 1732 if (!is_local && vmci_ctx_qp_exists(context, handle)) { 1733 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n", 1734 context_id, handle.context, handle.resource); 1735 mutex_unlock(&qp_broker_list.mutex); 1736 return VMCI_ERROR_ALREADY_EXISTS; 1737 } 1738 1739 if (handle.resource != VMCI_INVALID_ID) 1740 entry = qp_broker_handle_to_entry(handle); 1741 1742 if (!entry) { 1743 create = true; 1744 result = 1745 qp_broker_create(handle, peer, flags, priv_flags, 1746 produce_size, consume_size, page_store, 1747 context, wakeup_cb, client_data, ent); 1748 } else { 1749 create = false; 1750 result = 1751 qp_broker_attach(entry, peer, flags, priv_flags, 1752 produce_size, consume_size, page_store, 1753 context, wakeup_cb, client_data, ent); 1754 } 1755 1756 mutex_unlock(&qp_broker_list.mutex); 1757 1758 if (swap) 1759 *swap = (context_id == VMCI_HOST_CONTEXT_ID) && 1760 !(create && is_local); 1761 1762 return result; 1763 } 1764 1765 /* 1766 * This function implements the kernel API for allocating a queue 1767 * pair. 1768 */ 1769 static int qp_alloc_host_work(struct vmci_handle *handle, 1770 struct vmci_queue **produce_q, 1771 u64 produce_size, 1772 struct vmci_queue **consume_q, 1773 u64 consume_size, 1774 u32 peer, 1775 u32 flags, 1776 u32 priv_flags, 1777 vmci_event_release_cb wakeup_cb, 1778 void *client_data) 1779 { 1780 struct vmci_handle new_handle; 1781 struct vmci_ctx *context; 1782 struct qp_broker_entry *entry; 1783 int result; 1784 bool swap; 1785 1786 if (vmci_handle_is_invalid(*handle)) { 1787 new_handle = vmci_make_handle( 1788 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID); 1789 } else 1790 new_handle = *handle; 1791 1792 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); 1793 entry = NULL; 1794 result = 1795 qp_broker_alloc(new_handle, peer, flags, priv_flags, 1796 produce_size, consume_size, NULL, context, 1797 wakeup_cb, client_data, &entry, &swap); 1798 if (result == VMCI_SUCCESS) { 1799 if (swap) { 1800 /* 1801 * If this is a local queue pair, the attacher 1802 * will swap around produce and consume 1803 * queues. 1804 */ 1805 1806 *produce_q = entry->consume_q; 1807 *consume_q = entry->produce_q; 1808 } else { 1809 *produce_q = entry->produce_q; 1810 *consume_q = entry->consume_q; 1811 } 1812 1813 *handle = vmci_resource_handle(&entry->resource); 1814 } else { 1815 *handle = VMCI_INVALID_HANDLE; 1816 pr_devel("queue pair broker failed to alloc (result=%d)\n", 1817 result); 1818 } 1819 vmci_ctx_put(context); 1820 return result; 1821 } 1822 1823 /* 1824 * Allocates a VMCI queue_pair. Only checks validity of input 1825 * arguments. The real work is done in the host or guest 1826 * specific function. 1827 */ 1828 int vmci_qp_alloc(struct vmci_handle *handle, 1829 struct vmci_queue **produce_q, 1830 u64 produce_size, 1831 struct vmci_queue **consume_q, 1832 u64 consume_size, 1833 u32 peer, 1834 u32 flags, 1835 u32 priv_flags, 1836 bool guest_endpoint, 1837 vmci_event_release_cb wakeup_cb, 1838 void *client_data) 1839 { 1840 if (!handle || !produce_q || !consume_q || 1841 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS)) 1842 return VMCI_ERROR_INVALID_ARGS; 1843 1844 if (guest_endpoint) { 1845 return qp_alloc_guest_work(handle, produce_q, 1846 produce_size, consume_q, 1847 consume_size, peer, 1848 flags, priv_flags); 1849 } else { 1850 return qp_alloc_host_work(handle, produce_q, 1851 produce_size, consume_q, 1852 consume_size, peer, flags, 1853 priv_flags, wakeup_cb, client_data); 1854 } 1855 } 1856 1857 /* 1858 * This function implements the host kernel API for detaching from 1859 * a queue pair. 1860 */ 1861 static int qp_detatch_host_work(struct vmci_handle handle) 1862 { 1863 int result; 1864 struct vmci_ctx *context; 1865 1866 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); 1867 1868 result = vmci_qp_broker_detach(handle, context); 1869 1870 vmci_ctx_put(context); 1871 return result; 1872 } 1873 1874 /* 1875 * Detaches from a VMCI queue_pair. Only checks validity of input argument. 1876 * Real work is done in the host or guest specific function. 1877 */ 1878 static int qp_detatch(struct vmci_handle handle, bool guest_endpoint) 1879 { 1880 if (vmci_handle_is_invalid(handle)) 1881 return VMCI_ERROR_INVALID_ARGS; 1882 1883 if (guest_endpoint) 1884 return qp_detatch_guest_work(handle); 1885 else 1886 return qp_detatch_host_work(handle); 1887 } 1888 1889 /* 1890 * Returns the entry from the head of the list. Assumes that the list is 1891 * locked. 1892 */ 1893 static struct qp_entry *qp_list_get_head(struct qp_list *qp_list) 1894 { 1895 if (!list_empty(&qp_list->head)) { 1896 struct qp_entry *entry = 1897 list_first_entry(&qp_list->head, struct qp_entry, 1898 list_item); 1899 return entry; 1900 } 1901 1902 return NULL; 1903 } 1904 1905 void vmci_qp_broker_exit(void) 1906 { 1907 struct qp_entry *entry; 1908 struct qp_broker_entry *be; 1909 1910 mutex_lock(&qp_broker_list.mutex); 1911 1912 while ((entry = qp_list_get_head(&qp_broker_list))) { 1913 be = (struct qp_broker_entry *)entry; 1914 1915 qp_list_remove_entry(&qp_broker_list, entry); 1916 kfree(be); 1917 } 1918 1919 mutex_unlock(&qp_broker_list.mutex); 1920 } 1921 1922 /* 1923 * Requests that a queue pair be allocated with the VMCI queue 1924 * pair broker. Allocates a queue pair entry if one does not 1925 * exist. Attaches to one if it exists, and retrieves the page 1926 * files backing that queue_pair. Assumes that the queue pair 1927 * broker lock is held. 1928 */ 1929 int vmci_qp_broker_alloc(struct vmci_handle handle, 1930 u32 peer, 1931 u32 flags, 1932 u32 priv_flags, 1933 u64 produce_size, 1934 u64 consume_size, 1935 struct vmci_qp_page_store *page_store, 1936 struct vmci_ctx *context) 1937 { 1938 return qp_broker_alloc(handle, peer, flags, priv_flags, 1939 produce_size, consume_size, 1940 page_store, context, NULL, NULL, NULL, NULL); 1941 } 1942 1943 /* 1944 * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate 1945 * step to add the UVAs of the VMX mapping of the queue pair. This function 1946 * provides backwards compatibility with such VMX'en, and takes care of 1947 * registering the page store for a queue pair previously allocated by the 1948 * VMX during create or attach. This function will move the queue pair state 1949 * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or 1950 * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the 1951 * attached state with memory, the queue pair is ready to be used by the 1952 * host peer, and an attached event will be generated. 1953 * 1954 * Assumes that the queue pair broker lock is held. 1955 * 1956 * This function is only used by the hosted platform, since there is no 1957 * issue with backwards compatibility for vmkernel. 1958 */ 1959 int vmci_qp_broker_set_page_store(struct vmci_handle handle, 1960 u64 produce_uva, 1961 u64 consume_uva, 1962 struct vmci_ctx *context) 1963 { 1964 struct qp_broker_entry *entry; 1965 int result; 1966 const u32 context_id = vmci_ctx_get_id(context); 1967 1968 if (vmci_handle_is_invalid(handle) || !context || 1969 context_id == VMCI_INVALID_ID) 1970 return VMCI_ERROR_INVALID_ARGS; 1971 1972 /* 1973 * We only support guest to host queue pairs, so the VMX must 1974 * supply UVAs for the mapped page files. 1975 */ 1976 1977 if (produce_uva == 0 || consume_uva == 0) 1978 return VMCI_ERROR_INVALID_ARGS; 1979 1980 mutex_lock(&qp_broker_list.mutex); 1981 1982 if (!vmci_ctx_qp_exists(context, handle)) { 1983 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 1984 context_id, handle.context, handle.resource); 1985 result = VMCI_ERROR_NOT_FOUND; 1986 goto out; 1987 } 1988 1989 entry = qp_broker_handle_to_entry(handle); 1990 if (!entry) { 1991 result = VMCI_ERROR_NOT_FOUND; 1992 goto out; 1993 } 1994 1995 /* 1996 * If I'm the owner then I can set the page store. 1997 * 1998 * Or, if a host created the queue_pair and I'm the attached peer 1999 * then I can set the page store. 2000 */ 2001 if (entry->create_id != context_id && 2002 (entry->create_id != VMCI_HOST_CONTEXT_ID || 2003 entry->attach_id != context_id)) { 2004 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER; 2005 goto out; 2006 } 2007 2008 if (entry->state != VMCIQPB_CREATED_NO_MEM && 2009 entry->state != VMCIQPB_ATTACHED_NO_MEM) { 2010 result = VMCI_ERROR_UNAVAILABLE; 2011 goto out; 2012 } 2013 2014 result = qp_host_get_user_memory(produce_uva, consume_uva, 2015 entry->produce_q, entry->consume_q); 2016 if (result < VMCI_SUCCESS) 2017 goto out; 2018 2019 result = qp_host_map_queues(entry->produce_q, entry->consume_q); 2020 if (result < VMCI_SUCCESS) { 2021 qp_host_unregister_user_memory(entry->produce_q, 2022 entry->consume_q); 2023 goto out; 2024 } 2025 2026 if (entry->state == VMCIQPB_CREATED_NO_MEM) 2027 entry->state = VMCIQPB_CREATED_MEM; 2028 else 2029 entry->state = VMCIQPB_ATTACHED_MEM; 2030 2031 entry->vmci_page_files = true; 2032 2033 if (entry->state == VMCIQPB_ATTACHED_MEM) { 2034 result = 2035 qp_notify_peer(true, handle, context_id, entry->create_id); 2036 if (result < VMCI_SUCCESS) { 2037 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", 2038 entry->create_id, entry->qp.handle.context, 2039 entry->qp.handle.resource); 2040 } 2041 } 2042 2043 result = VMCI_SUCCESS; 2044 out: 2045 mutex_unlock(&qp_broker_list.mutex); 2046 return result; 2047 } 2048 2049 /* 2050 * Resets saved queue headers for the given QP broker 2051 * entry. Should be used when guest memory becomes available 2052 * again, or the guest detaches. 2053 */ 2054 static void qp_reset_saved_headers(struct qp_broker_entry *entry) 2055 { 2056 entry->produce_q->saved_header = NULL; 2057 entry->consume_q->saved_header = NULL; 2058 } 2059 2060 /* 2061 * The main entry point for detaching from a queue pair registered with the 2062 * queue pair broker. If more than one endpoint is attached to the queue 2063 * pair, the first endpoint will mainly decrement a reference count and 2064 * generate a notification to its peer. The last endpoint will clean up 2065 * the queue pair state registered with the broker. 2066 * 2067 * When a guest endpoint detaches, it will unmap and unregister the guest 2068 * memory backing the queue pair. If the host is still attached, it will 2069 * no longer be able to access the queue pair content. 2070 * 2071 * If the queue pair is already in a state where there is no memory 2072 * registered for the queue pair (any *_NO_MEM state), it will transition to 2073 * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest 2074 * endpoint is the first of two endpoints to detach. If the host endpoint is 2075 * the first out of two to detach, the queue pair will move to the 2076 * VMCIQPB_SHUTDOWN_MEM state. 2077 */ 2078 int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context) 2079 { 2080 struct qp_broker_entry *entry; 2081 const u32 context_id = vmci_ctx_get_id(context); 2082 u32 peer_id; 2083 bool is_local = false; 2084 int result; 2085 2086 if (vmci_handle_is_invalid(handle) || !context || 2087 context_id == VMCI_INVALID_ID) { 2088 return VMCI_ERROR_INVALID_ARGS; 2089 } 2090 2091 mutex_lock(&qp_broker_list.mutex); 2092 2093 if (!vmci_ctx_qp_exists(context, handle)) { 2094 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2095 context_id, handle.context, handle.resource); 2096 result = VMCI_ERROR_NOT_FOUND; 2097 goto out; 2098 } 2099 2100 entry = qp_broker_handle_to_entry(handle); 2101 if (!entry) { 2102 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n", 2103 context_id, handle.context, handle.resource); 2104 result = VMCI_ERROR_NOT_FOUND; 2105 goto out; 2106 } 2107 2108 if (context_id != entry->create_id && context_id != entry->attach_id) { 2109 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2110 goto out; 2111 } 2112 2113 if (context_id == entry->create_id) { 2114 peer_id = entry->attach_id; 2115 entry->create_id = VMCI_INVALID_ID; 2116 } else { 2117 peer_id = entry->create_id; 2118 entry->attach_id = VMCI_INVALID_ID; 2119 } 2120 entry->qp.ref_count--; 2121 2122 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2123 2124 if (context_id != VMCI_HOST_CONTEXT_ID) { 2125 bool headers_mapped; 2126 2127 /* 2128 * Pre NOVMVM vmx'en may detach from a queue pair 2129 * before setting the page store, and in that case 2130 * there is no user memory to detach from. Also, more 2131 * recent VMX'en may detach from a queue pair in the 2132 * quiesced state. 2133 */ 2134 2135 qp_acquire_queue_mutex(entry->produce_q); 2136 headers_mapped = entry->produce_q->q_header || 2137 entry->consume_q->q_header; 2138 if (QPBROKERSTATE_HAS_MEM(entry)) { 2139 result = 2140 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID, 2141 entry->produce_q, 2142 entry->consume_q); 2143 if (result < VMCI_SUCCESS) 2144 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", 2145 handle.context, handle.resource, 2146 result); 2147 2148 qp_host_unregister_user_memory(entry->produce_q, 2149 entry->consume_q); 2150 2151 } 2152 2153 if (!headers_mapped) 2154 qp_reset_saved_headers(entry); 2155 2156 qp_release_queue_mutex(entry->produce_q); 2157 2158 if (!headers_mapped && entry->wakeup_cb) 2159 entry->wakeup_cb(entry->client_data); 2160 2161 } else { 2162 if (entry->wakeup_cb) { 2163 entry->wakeup_cb = NULL; 2164 entry->client_data = NULL; 2165 } 2166 } 2167 2168 if (entry->qp.ref_count == 0) { 2169 qp_list_remove_entry(&qp_broker_list, &entry->qp); 2170 2171 if (is_local) 2172 kfree(entry->local_mem); 2173 2174 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); 2175 qp_host_free_queue(entry->produce_q, entry->qp.produce_size); 2176 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); 2177 /* Unlink from resource hash table and free callback */ 2178 vmci_resource_remove(&entry->resource); 2179 2180 kfree(entry); 2181 2182 vmci_ctx_qp_destroy(context, handle); 2183 } else { 2184 qp_notify_peer(false, handle, context_id, peer_id); 2185 if (context_id == VMCI_HOST_CONTEXT_ID && 2186 QPBROKERSTATE_HAS_MEM(entry)) { 2187 entry->state = VMCIQPB_SHUTDOWN_MEM; 2188 } else { 2189 entry->state = VMCIQPB_SHUTDOWN_NO_MEM; 2190 } 2191 2192 if (!is_local) 2193 vmci_ctx_qp_destroy(context, handle); 2194 2195 } 2196 result = VMCI_SUCCESS; 2197 out: 2198 mutex_unlock(&qp_broker_list.mutex); 2199 return result; 2200 } 2201 2202 /* 2203 * Establishes the necessary mappings for a queue pair given a 2204 * reference to the queue pair guest memory. This is usually 2205 * called when a guest is unquiesced and the VMX is allowed to 2206 * map guest memory once again. 2207 */ 2208 int vmci_qp_broker_map(struct vmci_handle handle, 2209 struct vmci_ctx *context, 2210 u64 guest_mem) 2211 { 2212 struct qp_broker_entry *entry; 2213 const u32 context_id = vmci_ctx_get_id(context); 2214 int result; 2215 2216 if (vmci_handle_is_invalid(handle) || !context || 2217 context_id == VMCI_INVALID_ID) 2218 return VMCI_ERROR_INVALID_ARGS; 2219 2220 mutex_lock(&qp_broker_list.mutex); 2221 2222 if (!vmci_ctx_qp_exists(context, handle)) { 2223 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2224 context_id, handle.context, handle.resource); 2225 result = VMCI_ERROR_NOT_FOUND; 2226 goto out; 2227 } 2228 2229 entry = qp_broker_handle_to_entry(handle); 2230 if (!entry) { 2231 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", 2232 context_id, handle.context, handle.resource); 2233 result = VMCI_ERROR_NOT_FOUND; 2234 goto out; 2235 } 2236 2237 if (context_id != entry->create_id && context_id != entry->attach_id) { 2238 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2239 goto out; 2240 } 2241 2242 result = VMCI_SUCCESS; 2243 2244 if (context_id != VMCI_HOST_CONTEXT_ID) { 2245 struct vmci_qp_page_store page_store; 2246 2247 page_store.pages = guest_mem; 2248 page_store.len = QPE_NUM_PAGES(entry->qp); 2249 2250 qp_acquire_queue_mutex(entry->produce_q); 2251 qp_reset_saved_headers(entry); 2252 result = 2253 qp_host_register_user_memory(&page_store, 2254 entry->produce_q, 2255 entry->consume_q); 2256 qp_release_queue_mutex(entry->produce_q); 2257 if (result == VMCI_SUCCESS) { 2258 /* Move state from *_NO_MEM to *_MEM */ 2259 2260 entry->state++; 2261 2262 if (entry->wakeup_cb) 2263 entry->wakeup_cb(entry->client_data); 2264 } 2265 } 2266 2267 out: 2268 mutex_unlock(&qp_broker_list.mutex); 2269 return result; 2270 } 2271 2272 /* 2273 * Saves a snapshot of the queue headers for the given QP broker 2274 * entry. Should be used when guest memory is unmapped. 2275 * Results: 2276 * VMCI_SUCCESS on success, appropriate error code if guest memory 2277 * can't be accessed.. 2278 */ 2279 static int qp_save_headers(struct qp_broker_entry *entry) 2280 { 2281 int result; 2282 2283 if (entry->produce_q->saved_header != NULL && 2284 entry->consume_q->saved_header != NULL) { 2285 /* 2286 * If the headers have already been saved, we don't need to do 2287 * it again, and we don't want to map in the headers 2288 * unnecessarily. 2289 */ 2290 2291 return VMCI_SUCCESS; 2292 } 2293 2294 if (NULL == entry->produce_q->q_header || 2295 NULL == entry->consume_q->q_header) { 2296 result = qp_host_map_queues(entry->produce_q, entry->consume_q); 2297 if (result < VMCI_SUCCESS) 2298 return result; 2299 } 2300 2301 memcpy(&entry->saved_produce_q, entry->produce_q->q_header, 2302 sizeof(entry->saved_produce_q)); 2303 entry->produce_q->saved_header = &entry->saved_produce_q; 2304 memcpy(&entry->saved_consume_q, entry->consume_q->q_header, 2305 sizeof(entry->saved_consume_q)); 2306 entry->consume_q->saved_header = &entry->saved_consume_q; 2307 2308 return VMCI_SUCCESS; 2309 } 2310 2311 /* 2312 * Removes all references to the guest memory of a given queue pair, and 2313 * will move the queue pair from state *_MEM to *_NO_MEM. It is usually 2314 * called when a VM is being quiesced where access to guest memory should 2315 * avoided. 2316 */ 2317 int vmci_qp_broker_unmap(struct vmci_handle handle, 2318 struct vmci_ctx *context, 2319 u32 gid) 2320 { 2321 struct qp_broker_entry *entry; 2322 const u32 context_id = vmci_ctx_get_id(context); 2323 int result; 2324 2325 if (vmci_handle_is_invalid(handle) || !context || 2326 context_id == VMCI_INVALID_ID) 2327 return VMCI_ERROR_INVALID_ARGS; 2328 2329 mutex_lock(&qp_broker_list.mutex); 2330 2331 if (!vmci_ctx_qp_exists(context, handle)) { 2332 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2333 context_id, handle.context, handle.resource); 2334 result = VMCI_ERROR_NOT_FOUND; 2335 goto out; 2336 } 2337 2338 entry = qp_broker_handle_to_entry(handle); 2339 if (!entry) { 2340 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", 2341 context_id, handle.context, handle.resource); 2342 result = VMCI_ERROR_NOT_FOUND; 2343 goto out; 2344 } 2345 2346 if (context_id != entry->create_id && context_id != entry->attach_id) { 2347 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2348 goto out; 2349 } 2350 2351 if (context_id != VMCI_HOST_CONTEXT_ID) { 2352 qp_acquire_queue_mutex(entry->produce_q); 2353 result = qp_save_headers(entry); 2354 if (result < VMCI_SUCCESS) 2355 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", 2356 handle.context, handle.resource, result); 2357 2358 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); 2359 2360 /* 2361 * On hosted, when we unmap queue pairs, the VMX will also 2362 * unmap the guest memory, so we invalidate the previously 2363 * registered memory. If the queue pair is mapped again at a 2364 * later point in time, we will need to reregister the user 2365 * memory with a possibly new user VA. 2366 */ 2367 qp_host_unregister_user_memory(entry->produce_q, 2368 entry->consume_q); 2369 2370 /* 2371 * Move state from *_MEM to *_NO_MEM. 2372 */ 2373 entry->state--; 2374 2375 qp_release_queue_mutex(entry->produce_q); 2376 } 2377 2378 result = VMCI_SUCCESS; 2379 2380 out: 2381 mutex_unlock(&qp_broker_list.mutex); 2382 return result; 2383 } 2384 2385 /* 2386 * Destroys all guest queue pair endpoints. If active guest queue 2387 * pairs still exist, hypercalls to attempt detach from these 2388 * queue pairs will be made. Any failure to detach is silently 2389 * ignored. 2390 */ 2391 void vmci_qp_guest_endpoints_exit(void) 2392 { 2393 struct qp_entry *entry; 2394 struct qp_guest_endpoint *ep; 2395 2396 mutex_lock(&qp_guest_endpoints.mutex); 2397 2398 while ((entry = qp_list_get_head(&qp_guest_endpoints))) { 2399 ep = (struct qp_guest_endpoint *)entry; 2400 2401 /* Don't make a hypercall for local queue_pairs. */ 2402 if (!(entry->flags & VMCI_QPFLAG_LOCAL)) 2403 qp_detatch_hypercall(entry->handle); 2404 2405 /* We cannot fail the exit, so let's reset ref_count. */ 2406 entry->ref_count = 0; 2407 qp_list_remove_entry(&qp_guest_endpoints, entry); 2408 2409 qp_guest_endpoint_destroy(ep); 2410 } 2411 2412 mutex_unlock(&qp_guest_endpoints.mutex); 2413 } 2414 2415 /* 2416 * Helper routine that will lock the queue pair before subsequent 2417 * operations. 2418 * Note: Non-blocking on the host side is currently only implemented in ESX. 2419 * Since non-blocking isn't yet implemented on the host personality we 2420 * have no reason to acquire a spin lock. So to avoid the use of an 2421 * unnecessary lock only acquire the mutex if we can block. 2422 */ 2423 static void qp_lock(const struct vmci_qp *qpair) 2424 { 2425 qp_acquire_queue_mutex(qpair->produce_q); 2426 } 2427 2428 /* 2429 * Helper routine that unlocks the queue pair after calling 2430 * qp_lock. 2431 */ 2432 static void qp_unlock(const struct vmci_qp *qpair) 2433 { 2434 qp_release_queue_mutex(qpair->produce_q); 2435 } 2436 2437 /* 2438 * The queue headers may not be mapped at all times. If a queue is 2439 * currently not mapped, it will be attempted to do so. 2440 */ 2441 static int qp_map_queue_headers(struct vmci_queue *produce_q, 2442 struct vmci_queue *consume_q) 2443 { 2444 int result; 2445 2446 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { 2447 result = qp_host_map_queues(produce_q, consume_q); 2448 if (result < VMCI_SUCCESS) 2449 return (produce_q->saved_header && 2450 consume_q->saved_header) ? 2451 VMCI_ERROR_QUEUEPAIR_NOT_READY : 2452 VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2453 } 2454 2455 return VMCI_SUCCESS; 2456 } 2457 2458 /* 2459 * Helper routine that will retrieve the produce and consume 2460 * headers of a given queue pair. If the guest memory of the 2461 * queue pair is currently not available, the saved queue headers 2462 * will be returned, if these are available. 2463 */ 2464 static int qp_get_queue_headers(const struct vmci_qp *qpair, 2465 struct vmci_queue_header **produce_q_header, 2466 struct vmci_queue_header **consume_q_header) 2467 { 2468 int result; 2469 2470 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); 2471 if (result == VMCI_SUCCESS) { 2472 *produce_q_header = qpair->produce_q->q_header; 2473 *consume_q_header = qpair->consume_q->q_header; 2474 } else if (qpair->produce_q->saved_header && 2475 qpair->consume_q->saved_header) { 2476 *produce_q_header = qpair->produce_q->saved_header; 2477 *consume_q_header = qpair->consume_q->saved_header; 2478 result = VMCI_SUCCESS; 2479 } 2480 2481 return result; 2482 } 2483 2484 /* 2485 * Callback from VMCI queue pair broker indicating that a queue 2486 * pair that was previously not ready, now either is ready or 2487 * gone forever. 2488 */ 2489 static int qp_wakeup_cb(void *client_data) 2490 { 2491 struct vmci_qp *qpair = (struct vmci_qp *)client_data; 2492 2493 qp_lock(qpair); 2494 while (qpair->blocked > 0) { 2495 qpair->blocked--; 2496 qpair->generation++; 2497 wake_up(&qpair->event); 2498 } 2499 qp_unlock(qpair); 2500 2501 return VMCI_SUCCESS; 2502 } 2503 2504 /* 2505 * Makes the calling thread wait for the queue pair to become 2506 * ready for host side access. Returns true when thread is 2507 * woken up after queue pair state change, false otherwise. 2508 */ 2509 static bool qp_wait_for_ready_queue(struct vmci_qp *qpair) 2510 { 2511 unsigned int generation; 2512 2513 qpair->blocked++; 2514 generation = qpair->generation; 2515 qp_unlock(qpair); 2516 wait_event(qpair->event, generation != qpair->generation); 2517 qp_lock(qpair); 2518 2519 return true; 2520 } 2521 2522 /* 2523 * Enqueues a given buffer to the produce queue using the provided 2524 * function. As many bytes as possible (space available in the queue) 2525 * are enqueued. Assumes the queue->mutex has been acquired. Returns 2526 * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue 2527 * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the 2528 * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if 2529 * an error occured when accessing the buffer, 2530 * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't 2531 * available. Otherwise, the number of bytes written to the queue is 2532 * returned. Updates the tail pointer of the produce queue. 2533 */ 2534 static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, 2535 struct vmci_queue *consume_q, 2536 const u64 produce_q_size, 2537 struct iov_iter *from) 2538 { 2539 s64 free_space; 2540 u64 tail; 2541 size_t buf_size = iov_iter_count(from); 2542 size_t written; 2543 ssize_t result; 2544 2545 result = qp_map_queue_headers(produce_q, consume_q); 2546 if (unlikely(result != VMCI_SUCCESS)) 2547 return result; 2548 2549 free_space = vmci_q_header_free_space(produce_q->q_header, 2550 consume_q->q_header, 2551 produce_q_size); 2552 if (free_space == 0) 2553 return VMCI_ERROR_QUEUEPAIR_NOSPACE; 2554 2555 if (free_space < VMCI_SUCCESS) 2556 return (ssize_t) free_space; 2557 2558 written = (size_t) (free_space > buf_size ? buf_size : free_space); 2559 tail = vmci_q_header_producer_tail(produce_q->q_header); 2560 if (likely(tail + written < produce_q_size)) { 2561 result = qp_memcpy_to_queue_iter(produce_q, tail, from, written); 2562 } else { 2563 /* Tail pointer wraps around. */ 2564 2565 const size_t tmp = (size_t) (produce_q_size - tail); 2566 2567 result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp); 2568 if (result >= VMCI_SUCCESS) 2569 result = qp_memcpy_to_queue_iter(produce_q, 0, from, 2570 written - tmp); 2571 } 2572 2573 if (result < VMCI_SUCCESS) 2574 return result; 2575 2576 vmci_q_header_add_producer_tail(produce_q->q_header, written, 2577 produce_q_size); 2578 return written; 2579 } 2580 2581 /* 2582 * Dequeues data (if available) from the given consume queue. Writes data 2583 * to the user provided buffer using the provided function. 2584 * Assumes the queue->mutex has been acquired. 2585 * Results: 2586 * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue. 2587 * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue 2588 * (as defined by the queue size). 2589 * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer. 2590 * Otherwise the number of bytes dequeued is returned. 2591 * Side effects: 2592 * Updates the head pointer of the consume queue. 2593 */ 2594 static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, 2595 struct vmci_queue *consume_q, 2596 const u64 consume_q_size, 2597 struct iov_iter *to, 2598 bool update_consumer) 2599 { 2600 size_t buf_size = iov_iter_count(to); 2601 s64 buf_ready; 2602 u64 head; 2603 size_t read; 2604 ssize_t result; 2605 2606 result = qp_map_queue_headers(produce_q, consume_q); 2607 if (unlikely(result != VMCI_SUCCESS)) 2608 return result; 2609 2610 buf_ready = vmci_q_header_buf_ready(consume_q->q_header, 2611 produce_q->q_header, 2612 consume_q_size); 2613 if (buf_ready == 0) 2614 return VMCI_ERROR_QUEUEPAIR_NODATA; 2615 2616 if (buf_ready < VMCI_SUCCESS) 2617 return (ssize_t) buf_ready; 2618 2619 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready); 2620 head = vmci_q_header_consumer_head(produce_q->q_header); 2621 if (likely(head + read < consume_q_size)) { 2622 result = qp_memcpy_from_queue_iter(to, consume_q, head, read); 2623 } else { 2624 /* Head pointer wraps around. */ 2625 2626 const size_t tmp = (size_t) (consume_q_size - head); 2627 2628 result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp); 2629 if (result >= VMCI_SUCCESS) 2630 result = qp_memcpy_from_queue_iter(to, consume_q, 0, 2631 read - tmp); 2632 2633 } 2634 2635 if (result < VMCI_SUCCESS) 2636 return result; 2637 2638 if (update_consumer) 2639 vmci_q_header_add_consumer_head(produce_q->q_header, 2640 read, consume_q_size); 2641 2642 return read; 2643 } 2644 2645 /* 2646 * vmci_qpair_alloc() - Allocates a queue pair. 2647 * @qpair: Pointer for the new vmci_qp struct. 2648 * @handle: Handle to track the resource. 2649 * @produce_qsize: Desired size of the producer queue. 2650 * @consume_qsize: Desired size of the consumer queue. 2651 * @peer: ContextID of the peer. 2652 * @flags: VMCI flags. 2653 * @priv_flags: VMCI priviledge flags. 2654 * 2655 * This is the client interface for allocating the memory for a 2656 * vmci_qp structure and then attaching to the underlying 2657 * queue. If an error occurs allocating the memory for the 2658 * vmci_qp structure no attempt is made to attach. If an 2659 * error occurs attaching, then the structure is freed. 2660 */ 2661 int vmci_qpair_alloc(struct vmci_qp **qpair, 2662 struct vmci_handle *handle, 2663 u64 produce_qsize, 2664 u64 consume_qsize, 2665 u32 peer, 2666 u32 flags, 2667 u32 priv_flags) 2668 { 2669 struct vmci_qp *my_qpair; 2670 int retval; 2671 struct vmci_handle src = VMCI_INVALID_HANDLE; 2672 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID); 2673 enum vmci_route route; 2674 vmci_event_release_cb wakeup_cb; 2675 void *client_data; 2676 2677 /* 2678 * Restrict the size of a queuepair. The device already 2679 * enforces a limit on the total amount of memory that can be 2680 * allocated to queuepairs for a guest. However, we try to 2681 * allocate this memory before we make the queuepair 2682 * allocation hypercall. On Linux, we allocate each page 2683 * separately, which means rather than fail, the guest will 2684 * thrash while it tries to allocate, and will become 2685 * increasingly unresponsive to the point where it appears to 2686 * be hung. So we place a limit on the size of an individual 2687 * queuepair here, and leave the device to enforce the 2688 * restriction on total queuepair memory. (Note that this 2689 * doesn't prevent all cases; a user with only this much 2690 * physical memory could still get into trouble.) The error 2691 * used by the device is NO_RESOURCES, so use that here too. 2692 */ 2693 2694 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) || 2695 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY) 2696 return VMCI_ERROR_NO_RESOURCES; 2697 2698 retval = vmci_route(&src, &dst, false, &route); 2699 if (retval < VMCI_SUCCESS) 2700 route = vmci_guest_code_active() ? 2701 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST; 2702 2703 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) { 2704 pr_devel("NONBLOCK OR PINNED set"); 2705 return VMCI_ERROR_INVALID_ARGS; 2706 } 2707 2708 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL); 2709 if (!my_qpair) 2710 return VMCI_ERROR_NO_MEM; 2711 2712 my_qpair->produce_q_size = produce_qsize; 2713 my_qpair->consume_q_size = consume_qsize; 2714 my_qpair->peer = peer; 2715 my_qpair->flags = flags; 2716 my_qpair->priv_flags = priv_flags; 2717 2718 wakeup_cb = NULL; 2719 client_data = NULL; 2720 2721 if (VMCI_ROUTE_AS_HOST == route) { 2722 my_qpair->guest_endpoint = false; 2723 if (!(flags & VMCI_QPFLAG_LOCAL)) { 2724 my_qpair->blocked = 0; 2725 my_qpair->generation = 0; 2726 init_waitqueue_head(&my_qpair->event); 2727 wakeup_cb = qp_wakeup_cb; 2728 client_data = (void *)my_qpair; 2729 } 2730 } else { 2731 my_qpair->guest_endpoint = true; 2732 } 2733 2734 retval = vmci_qp_alloc(handle, 2735 &my_qpair->produce_q, 2736 my_qpair->produce_q_size, 2737 &my_qpair->consume_q, 2738 my_qpair->consume_q_size, 2739 my_qpair->peer, 2740 my_qpair->flags, 2741 my_qpair->priv_flags, 2742 my_qpair->guest_endpoint, 2743 wakeup_cb, client_data); 2744 2745 if (retval < VMCI_SUCCESS) { 2746 kfree(my_qpair); 2747 return retval; 2748 } 2749 2750 *qpair = my_qpair; 2751 my_qpair->handle = *handle; 2752 2753 return retval; 2754 } 2755 EXPORT_SYMBOL_GPL(vmci_qpair_alloc); 2756 2757 /* 2758 * vmci_qpair_detach() - Detatches the client from a queue pair. 2759 * @qpair: Reference of a pointer to the qpair struct. 2760 * 2761 * This is the client interface for detaching from a VMCIQPair. 2762 * Note that this routine will free the memory allocated for the 2763 * vmci_qp structure too. 2764 */ 2765 int vmci_qpair_detach(struct vmci_qp **qpair) 2766 { 2767 int result; 2768 struct vmci_qp *old_qpair; 2769 2770 if (!qpair || !(*qpair)) 2771 return VMCI_ERROR_INVALID_ARGS; 2772 2773 old_qpair = *qpair; 2774 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint); 2775 2776 /* 2777 * The guest can fail to detach for a number of reasons, and 2778 * if it does so, it will cleanup the entry (if there is one). 2779 * The host can fail too, but it won't cleanup the entry 2780 * immediately, it will do that later when the context is 2781 * freed. Either way, we need to release the qpair struct 2782 * here; there isn't much the caller can do, and we don't want 2783 * to leak. 2784 */ 2785 2786 memset(old_qpair, 0, sizeof(*old_qpair)); 2787 old_qpair->handle = VMCI_INVALID_HANDLE; 2788 old_qpair->peer = VMCI_INVALID_ID; 2789 kfree(old_qpair); 2790 *qpair = NULL; 2791 2792 return result; 2793 } 2794 EXPORT_SYMBOL_GPL(vmci_qpair_detach); 2795 2796 /* 2797 * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer. 2798 * @qpair: Pointer to the queue pair struct. 2799 * @producer_tail: Reference used for storing producer tail index. 2800 * @consumer_head: Reference used for storing the consumer head index. 2801 * 2802 * This is the client interface for getting the current indexes of the 2803 * QPair from the point of the view of the caller as the producer. 2804 */ 2805 int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair, 2806 u64 *producer_tail, 2807 u64 *consumer_head) 2808 { 2809 struct vmci_queue_header *produce_q_header; 2810 struct vmci_queue_header *consume_q_header; 2811 int result; 2812 2813 if (!qpair) 2814 return VMCI_ERROR_INVALID_ARGS; 2815 2816 qp_lock(qpair); 2817 result = 2818 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2819 if (result == VMCI_SUCCESS) 2820 vmci_q_header_get_pointers(produce_q_header, consume_q_header, 2821 producer_tail, consumer_head); 2822 qp_unlock(qpair); 2823 2824 if (result == VMCI_SUCCESS && 2825 ((producer_tail && *producer_tail >= qpair->produce_q_size) || 2826 (consumer_head && *consumer_head >= qpair->produce_q_size))) 2827 return VMCI_ERROR_INVALID_SIZE; 2828 2829 return result; 2830 } 2831 EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes); 2832 2833 /* 2834 * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the consumer. 2835 * @qpair: Pointer to the queue pair struct. 2836 * @consumer_tail: Reference used for storing consumer tail index. 2837 * @producer_head: Reference used for storing the producer head index. 2838 * 2839 * This is the client interface for getting the current indexes of the 2840 * QPair from the point of the view of the caller as the consumer. 2841 */ 2842 int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair, 2843 u64 *consumer_tail, 2844 u64 *producer_head) 2845 { 2846 struct vmci_queue_header *produce_q_header; 2847 struct vmci_queue_header *consume_q_header; 2848 int result; 2849 2850 if (!qpair) 2851 return VMCI_ERROR_INVALID_ARGS; 2852 2853 qp_lock(qpair); 2854 result = 2855 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2856 if (result == VMCI_SUCCESS) 2857 vmci_q_header_get_pointers(consume_q_header, produce_q_header, 2858 consumer_tail, producer_head); 2859 qp_unlock(qpair); 2860 2861 if (result == VMCI_SUCCESS && 2862 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) || 2863 (producer_head && *producer_head >= qpair->consume_q_size))) 2864 return VMCI_ERROR_INVALID_SIZE; 2865 2866 return result; 2867 } 2868 EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes); 2869 2870 /* 2871 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue. 2872 * @qpair: Pointer to the queue pair struct. 2873 * 2874 * This is the client interface for getting the amount of free 2875 * space in the QPair from the point of the view of the caller as 2876 * the producer which is the common case. Returns < 0 if err, else 2877 * available bytes into which data can be enqueued if > 0. 2878 */ 2879 s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair) 2880 { 2881 struct vmci_queue_header *produce_q_header; 2882 struct vmci_queue_header *consume_q_header; 2883 s64 result; 2884 2885 if (!qpair) 2886 return VMCI_ERROR_INVALID_ARGS; 2887 2888 qp_lock(qpair); 2889 result = 2890 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2891 if (result == VMCI_SUCCESS) 2892 result = vmci_q_header_free_space(produce_q_header, 2893 consume_q_header, 2894 qpair->produce_q_size); 2895 else 2896 result = 0; 2897 2898 qp_unlock(qpair); 2899 2900 return result; 2901 } 2902 EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space); 2903 2904 /* 2905 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue. 2906 * @qpair: Pointer to the queue pair struct. 2907 * 2908 * This is the client interface for getting the amount of free 2909 * space in the QPair from the point of the view of the caller as 2910 * the consumer which is not the common case. Returns < 0 if err, else 2911 * available bytes into which data can be enqueued if > 0. 2912 */ 2913 s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair) 2914 { 2915 struct vmci_queue_header *produce_q_header; 2916 struct vmci_queue_header *consume_q_header; 2917 s64 result; 2918 2919 if (!qpair) 2920 return VMCI_ERROR_INVALID_ARGS; 2921 2922 qp_lock(qpair); 2923 result = 2924 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2925 if (result == VMCI_SUCCESS) 2926 result = vmci_q_header_free_space(consume_q_header, 2927 produce_q_header, 2928 qpair->consume_q_size); 2929 else 2930 result = 0; 2931 2932 qp_unlock(qpair); 2933 2934 return result; 2935 } 2936 EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space); 2937 2938 /* 2939 * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from 2940 * producer queue. 2941 * @qpair: Pointer to the queue pair struct. 2942 * 2943 * This is the client interface for getting the amount of 2944 * enqueued data in the QPair from the point of the view of the 2945 * caller as the producer which is not the common case. Returns < 0 if err, 2946 * else available bytes that may be read. 2947 */ 2948 s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair) 2949 { 2950 struct vmci_queue_header *produce_q_header; 2951 struct vmci_queue_header *consume_q_header; 2952 s64 result; 2953 2954 if (!qpair) 2955 return VMCI_ERROR_INVALID_ARGS; 2956 2957 qp_lock(qpair); 2958 result = 2959 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2960 if (result == VMCI_SUCCESS) 2961 result = vmci_q_header_buf_ready(produce_q_header, 2962 consume_q_header, 2963 qpair->produce_q_size); 2964 else 2965 result = 0; 2966 2967 qp_unlock(qpair); 2968 2969 return result; 2970 } 2971 EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready); 2972 2973 /* 2974 * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from 2975 * consumer queue. 2976 * @qpair: Pointer to the queue pair struct. 2977 * 2978 * This is the client interface for getting the amount of 2979 * enqueued data in the QPair from the point of the view of the 2980 * caller as the consumer which is the normal case. Returns < 0 if err, 2981 * else available bytes that may be read. 2982 */ 2983 s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair) 2984 { 2985 struct vmci_queue_header *produce_q_header; 2986 struct vmci_queue_header *consume_q_header; 2987 s64 result; 2988 2989 if (!qpair) 2990 return VMCI_ERROR_INVALID_ARGS; 2991 2992 qp_lock(qpair); 2993 result = 2994 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2995 if (result == VMCI_SUCCESS) 2996 result = vmci_q_header_buf_ready(consume_q_header, 2997 produce_q_header, 2998 qpair->consume_q_size); 2999 else 3000 result = 0; 3001 3002 qp_unlock(qpair); 3003 3004 return result; 3005 } 3006 EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready); 3007 3008 /* 3009 * vmci_qpair_enqueue() - Throw data on the queue. 3010 * @qpair: Pointer to the queue pair struct. 3011 * @buf: Pointer to buffer containing data 3012 * @buf_size: Length of buffer. 3013 * @buf_type: Buffer type (Unused). 3014 * 3015 * This is the client interface for enqueueing data into the queue. 3016 * Returns number of bytes enqueued or < 0 on error. 3017 */ 3018 ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, 3019 const void *buf, 3020 size_t buf_size, 3021 int buf_type) 3022 { 3023 ssize_t result; 3024 struct iov_iter from; 3025 struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size}; 3026 3027 if (!qpair || !buf) 3028 return VMCI_ERROR_INVALID_ARGS; 3029 3030 iov_iter_kvec(&from, WRITE, &v, 1, buf_size); 3031 3032 qp_lock(qpair); 3033 3034 do { 3035 result = qp_enqueue_locked(qpair->produce_q, 3036 qpair->consume_q, 3037 qpair->produce_q_size, 3038 &from); 3039 3040 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3041 !qp_wait_for_ready_queue(qpair)) 3042 result = VMCI_ERROR_WOULD_BLOCK; 3043 3044 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3045 3046 qp_unlock(qpair); 3047 3048 return result; 3049 } 3050 EXPORT_SYMBOL_GPL(vmci_qpair_enqueue); 3051 3052 /* 3053 * vmci_qpair_dequeue() - Get data from the queue. 3054 * @qpair: Pointer to the queue pair struct. 3055 * @buf: Pointer to buffer for the data 3056 * @buf_size: Length of buffer. 3057 * @buf_type: Buffer type (Unused). 3058 * 3059 * This is the client interface for dequeueing data from the queue. 3060 * Returns number of bytes dequeued or < 0 on error. 3061 */ 3062 ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, 3063 void *buf, 3064 size_t buf_size, 3065 int buf_type) 3066 { 3067 ssize_t result; 3068 struct iov_iter to; 3069 struct kvec v = {.iov_base = buf, .iov_len = buf_size}; 3070 3071 if (!qpair || !buf) 3072 return VMCI_ERROR_INVALID_ARGS; 3073 3074 iov_iter_kvec(&to, READ, &v, 1, buf_size); 3075 3076 qp_lock(qpair); 3077 3078 do { 3079 result = qp_dequeue_locked(qpair->produce_q, 3080 qpair->consume_q, 3081 qpair->consume_q_size, 3082 &to, true); 3083 3084 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3085 !qp_wait_for_ready_queue(qpair)) 3086 result = VMCI_ERROR_WOULD_BLOCK; 3087 3088 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3089 3090 qp_unlock(qpair); 3091 3092 return result; 3093 } 3094 EXPORT_SYMBOL_GPL(vmci_qpair_dequeue); 3095 3096 /* 3097 * vmci_qpair_peek() - Peek at the data in the queue. 3098 * @qpair: Pointer to the queue pair struct. 3099 * @buf: Pointer to buffer for the data 3100 * @buf_size: Length of buffer. 3101 * @buf_type: Buffer type (Unused on Linux). 3102 * 3103 * This is the client interface for peeking into a queue. (I.e., 3104 * copy data from the queue without updating the head pointer.) 3105 * Returns number of bytes dequeued or < 0 on error. 3106 */ 3107 ssize_t vmci_qpair_peek(struct vmci_qp *qpair, 3108 void *buf, 3109 size_t buf_size, 3110 int buf_type) 3111 { 3112 struct iov_iter to; 3113 struct kvec v = {.iov_base = buf, .iov_len = buf_size}; 3114 ssize_t result; 3115 3116 if (!qpair || !buf) 3117 return VMCI_ERROR_INVALID_ARGS; 3118 3119 iov_iter_kvec(&to, READ, &v, 1, buf_size); 3120 3121 qp_lock(qpair); 3122 3123 do { 3124 result = qp_dequeue_locked(qpair->produce_q, 3125 qpair->consume_q, 3126 qpair->consume_q_size, 3127 &to, false); 3128 3129 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3130 !qp_wait_for_ready_queue(qpair)) 3131 result = VMCI_ERROR_WOULD_BLOCK; 3132 3133 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3134 3135 qp_unlock(qpair); 3136 3137 return result; 3138 } 3139 EXPORT_SYMBOL_GPL(vmci_qpair_peek); 3140 3141 /* 3142 * vmci_qpair_enquev() - Throw data on the queue using iov. 3143 * @qpair: Pointer to the queue pair struct. 3144 * @iov: Pointer to buffer containing data 3145 * @iov_size: Length of buffer. 3146 * @buf_type: Buffer type (Unused). 3147 * 3148 * This is the client interface for enqueueing data into the queue. 3149 * This function uses IO vectors to handle the work. Returns number 3150 * of bytes enqueued or < 0 on error. 3151 */ 3152 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, 3153 struct msghdr *msg, 3154 size_t iov_size, 3155 int buf_type) 3156 { 3157 ssize_t result; 3158 3159 if (!qpair) 3160 return VMCI_ERROR_INVALID_ARGS; 3161 3162 qp_lock(qpair); 3163 3164 do { 3165 result = qp_enqueue_locked(qpair->produce_q, 3166 qpair->consume_q, 3167 qpair->produce_q_size, 3168 &msg->msg_iter); 3169 3170 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3171 !qp_wait_for_ready_queue(qpair)) 3172 result = VMCI_ERROR_WOULD_BLOCK; 3173 3174 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3175 3176 qp_unlock(qpair); 3177 3178 return result; 3179 } 3180 EXPORT_SYMBOL_GPL(vmci_qpair_enquev); 3181 3182 /* 3183 * vmci_qpair_dequev() - Get data from the queue using iov. 3184 * @qpair: Pointer to the queue pair struct. 3185 * @iov: Pointer to buffer for the data 3186 * @iov_size: Length of buffer. 3187 * @buf_type: Buffer type (Unused). 3188 * 3189 * This is the client interface for dequeueing data from the queue. 3190 * This function uses IO vectors to handle the work. Returns number 3191 * of bytes dequeued or < 0 on error. 3192 */ 3193 ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, 3194 struct msghdr *msg, 3195 size_t iov_size, 3196 int buf_type) 3197 { 3198 ssize_t result; 3199 3200 if (!qpair) 3201 return VMCI_ERROR_INVALID_ARGS; 3202 3203 qp_lock(qpair); 3204 3205 do { 3206 result = qp_dequeue_locked(qpair->produce_q, 3207 qpair->consume_q, 3208 qpair->consume_q_size, 3209 &msg->msg_iter, true); 3210 3211 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3212 !qp_wait_for_ready_queue(qpair)) 3213 result = VMCI_ERROR_WOULD_BLOCK; 3214 3215 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3216 3217 qp_unlock(qpair); 3218 3219 return result; 3220 } 3221 EXPORT_SYMBOL_GPL(vmci_qpair_dequev); 3222 3223 /* 3224 * vmci_qpair_peekv() - Peek at the data in the queue using iov. 3225 * @qpair: Pointer to the queue pair struct. 3226 * @iov: Pointer to buffer for the data 3227 * @iov_size: Length of buffer. 3228 * @buf_type: Buffer type (Unused on Linux). 3229 * 3230 * This is the client interface for peeking into a queue. (I.e., 3231 * copy data from the queue without updating the head pointer.) 3232 * This function uses IO vectors to handle the work. Returns number 3233 * of bytes peeked or < 0 on error. 3234 */ 3235 ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, 3236 struct msghdr *msg, 3237 size_t iov_size, 3238 int buf_type) 3239 { 3240 ssize_t result; 3241 3242 if (!qpair) 3243 return VMCI_ERROR_INVALID_ARGS; 3244 3245 qp_lock(qpair); 3246 3247 do { 3248 result = qp_dequeue_locked(qpair->produce_q, 3249 qpair->consume_q, 3250 qpair->consume_q_size, 3251 &msg->msg_iter, false); 3252 3253 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3254 !qp_wait_for_ready_queue(qpair)) 3255 result = VMCI_ERROR_WOULD_BLOCK; 3256 3257 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3258 3259 qp_unlock(qpair); 3260 return result; 3261 } 3262 EXPORT_SYMBOL_GPL(vmci_qpair_peekv); 3263