1 /* 2 * VMware VMCI Driver 3 * 4 * Copyright (C) 2012 VMware, Inc. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation version 2 and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * for more details. 14 */ 15 16 #include <linux/vmw_vmci_defs.h> 17 #include <linux/vmw_vmci_api.h> 18 #include <linux/highmem.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/pagemap.h> 24 #include <linux/pci.h> 25 #include <linux/sched.h> 26 #include <linux/slab.h> 27 #include <linux/uio.h> 28 #include <linux/wait.h> 29 #include <linux/vmalloc.h> 30 31 #include "vmci_handle_array.h" 32 #include "vmci_queue_pair.h" 33 #include "vmci_datagram.h" 34 #include "vmci_resource.h" 35 #include "vmci_context.h" 36 #include "vmci_driver.h" 37 #include "vmci_event.h" 38 #include "vmci_route.h" 39 40 /* 41 * In the following, we will distinguish between two kinds of VMX processes - 42 * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized 43 * VMCI page files in the VMX and supporting VM to VM communication and the 44 * newer ones that use the guest memory directly. We will in the following 45 * refer to the older VMX versions as old-style VMX'en, and the newer ones as 46 * new-style VMX'en. 47 * 48 * The state transition datagram is as follows (the VMCIQPB_ prefix has been 49 * removed for readability) - see below for more details on the transtions: 50 * 51 * -------------- NEW ------------- 52 * | | 53 * \_/ \_/ 54 * CREATED_NO_MEM <-----------------> CREATED_MEM 55 * | | | 56 * | o-----------------------o | 57 * | | | 58 * \_/ \_/ \_/ 59 * ATTACHED_NO_MEM <----------------> ATTACHED_MEM 60 * | | | 61 * | o----------------------o | 62 * | | | 63 * \_/ \_/ \_/ 64 * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM 65 * | | 66 * | | 67 * -------------> gone <------------- 68 * 69 * In more detail. When a VMCI queue pair is first created, it will be in the 70 * VMCIQPB_NEW state. It will then move into one of the following states: 71 * 72 * - VMCIQPB_CREATED_NO_MEM: this state indicates that either: 73 * 74 * - the created was performed by a host endpoint, in which case there is 75 * no backing memory yet. 76 * 77 * - the create was initiated by an old-style VMX, that uses 78 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at 79 * a later point in time. This state can be distinguished from the one 80 * above by the context ID of the creator. A host side is not allowed to 81 * attach until the page store has been set. 82 * 83 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair 84 * is created by a VMX using the queue pair device backend that 85 * sets the UVAs of the queue pair immediately and stores the 86 * information for later attachers. At this point, it is ready for 87 * the host side to attach to it. 88 * 89 * Once the queue pair is in one of the created states (with the exception of 90 * the case mentioned for older VMX'en above), it is possible to attach to the 91 * queue pair. Again we have two new states possible: 92 * 93 * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following 94 * paths: 95 * 96 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue 97 * pair, and attaches to a queue pair previously created by the host side. 98 * 99 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair 100 * already created by a guest. 101 * 102 * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls 103 * vmci_qp_broker_set_page_store (see below). 104 * 105 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the 106 * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will 107 * bring the queue pair into this state. Once vmci_qp_broker_set_page_store 108 * is called to register the user memory, the VMCIQPB_ATTACH_MEM state 109 * will be entered. 110 * 111 * From the attached queue pair, the queue pair can enter the shutdown states 112 * when either side of the queue pair detaches. If the guest side detaches 113 * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where 114 * the content of the queue pair will no longer be available. If the host 115 * side detaches first, the queue pair will either enter the 116 * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or 117 * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped 118 * (e.g., the host detaches while a guest is stunned). 119 * 120 * New-style VMX'en will also unmap guest memory, if the guest is 121 * quiesced, e.g., during a snapshot operation. In that case, the guest 122 * memory will no longer be available, and the queue pair will transition from 123 * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more, 124 * in which case the queue pair will transition from the *_NO_MEM state at that 125 * point back to the *_MEM state. Note that the *_NO_MEM state may have changed, 126 * since the peer may have either attached or detached in the meantime. The 127 * values are laid out such that ++ on a state will move from a *_NO_MEM to a 128 * *_MEM state, and vice versa. 129 */ 130 131 /* 132 * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these 133 * types are passed around to enqueue and dequeue routines. Note that 134 * often the functions passed are simply wrappers around memcpy 135 * itself. 136 * 137 * Note: In order for the memcpy typedefs to be compatible with the VMKernel, 138 * there's an unused last parameter for the hosted side. In 139 * ESX, that parameter holds a buffer type. 140 */ 141 typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue, 142 u64 queue_offset, const void *src, 143 size_t src_offset, size_t size); 144 typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset, 145 const struct vmci_queue *queue, 146 u64 queue_offset, size_t size); 147 148 /* The Kernel specific component of the struct vmci_queue structure. */ 149 struct vmci_queue_kern_if { 150 struct mutex __mutex; /* Protects the queue. */ 151 struct mutex *mutex; /* Shared by producer and consumer queues. */ 152 size_t num_pages; /* Number of pages incl. header. */ 153 bool host; /* Host or guest? */ 154 union { 155 struct { 156 dma_addr_t *pas; 157 void **vas; 158 } g; /* Used by the guest. */ 159 struct { 160 struct page **page; 161 struct page **header_page; 162 } h; /* Used by the host. */ 163 } u; 164 }; 165 166 /* 167 * This structure is opaque to the clients. 168 */ 169 struct vmci_qp { 170 struct vmci_handle handle; 171 struct vmci_queue *produce_q; 172 struct vmci_queue *consume_q; 173 u64 produce_q_size; 174 u64 consume_q_size; 175 u32 peer; 176 u32 flags; 177 u32 priv_flags; 178 bool guest_endpoint; 179 unsigned int blocked; 180 unsigned int generation; 181 wait_queue_head_t event; 182 }; 183 184 enum qp_broker_state { 185 VMCIQPB_NEW, 186 VMCIQPB_CREATED_NO_MEM, 187 VMCIQPB_CREATED_MEM, 188 VMCIQPB_ATTACHED_NO_MEM, 189 VMCIQPB_ATTACHED_MEM, 190 VMCIQPB_SHUTDOWN_NO_MEM, 191 VMCIQPB_SHUTDOWN_MEM, 192 VMCIQPB_GONE 193 }; 194 195 #define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \ 196 _qpb->state == VMCIQPB_ATTACHED_MEM || \ 197 _qpb->state == VMCIQPB_SHUTDOWN_MEM) 198 199 /* 200 * In the queue pair broker, we always use the guest point of view for 201 * the produce and consume queue values and references, e.g., the 202 * produce queue size stored is the guests produce queue size. The 203 * host endpoint will need to swap these around. The only exception is 204 * the local queue pairs on the host, in which case the host endpoint 205 * that creates the queue pair will have the right orientation, and 206 * the attaching host endpoint will need to swap. 207 */ 208 struct qp_entry { 209 struct list_head list_item; 210 struct vmci_handle handle; 211 u32 peer; 212 u32 flags; 213 u64 produce_size; 214 u64 consume_size; 215 u32 ref_count; 216 }; 217 218 struct qp_broker_entry { 219 struct vmci_resource resource; 220 struct qp_entry qp; 221 u32 create_id; 222 u32 attach_id; 223 enum qp_broker_state state; 224 bool require_trusted_attach; 225 bool created_by_trusted; 226 bool vmci_page_files; /* Created by VMX using VMCI page files */ 227 struct vmci_queue *produce_q; 228 struct vmci_queue *consume_q; 229 struct vmci_queue_header saved_produce_q; 230 struct vmci_queue_header saved_consume_q; 231 vmci_event_release_cb wakeup_cb; 232 void *client_data; 233 void *local_mem; /* Kernel memory for local queue pair */ 234 }; 235 236 struct qp_guest_endpoint { 237 struct vmci_resource resource; 238 struct qp_entry qp; 239 u64 num_ppns; 240 void *produce_q; 241 void *consume_q; 242 struct ppn_set ppn_set; 243 }; 244 245 struct qp_list { 246 struct list_head head; 247 struct mutex mutex; /* Protect queue list. */ 248 }; 249 250 static struct qp_list qp_broker_list = { 251 .head = LIST_HEAD_INIT(qp_broker_list.head), 252 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex), 253 }; 254 255 static struct qp_list qp_guest_endpoints = { 256 .head = LIST_HEAD_INIT(qp_guest_endpoints.head), 257 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex), 258 }; 259 260 #define INVALID_VMCI_GUEST_MEM_ID 0 261 #define QPE_NUM_PAGES(_QPE) ((u32) \ 262 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \ 263 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2)) 264 265 266 /* 267 * Frees kernel VA space for a given queue and its queue header, and 268 * frees physical data pages. 269 */ 270 static void qp_free_queue(void *q, u64 size) 271 { 272 struct vmci_queue *queue = q; 273 274 if (queue) { 275 u64 i; 276 277 /* Given size does not include header, so add in a page here. */ 278 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) { 279 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE, 280 queue->kernel_if->u.g.vas[i], 281 queue->kernel_if->u.g.pas[i]); 282 } 283 284 vfree(queue); 285 } 286 } 287 288 /* 289 * Allocates kernel queue pages of specified size with IOMMU mappings, 290 * plus space for the queue structure/kernel interface and the queue 291 * header. 292 */ 293 static void *qp_alloc_queue(u64 size, u32 flags) 294 { 295 u64 i; 296 struct vmci_queue *queue; 297 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 298 const size_t pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); 299 const size_t vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); 300 const size_t queue_size = 301 sizeof(*queue) + sizeof(*queue->kernel_if) + 302 pas_size + vas_size; 303 304 queue = vmalloc(queue_size); 305 if (!queue) 306 return NULL; 307 308 queue->q_header = NULL; 309 queue->saved_header = NULL; 310 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 311 queue->kernel_if->mutex = NULL; 312 queue->kernel_if->num_pages = num_pages; 313 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1); 314 queue->kernel_if->u.g.vas = 315 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size); 316 queue->kernel_if->host = false; 317 318 for (i = 0; i < num_pages; i++) { 319 queue->kernel_if->u.g.vas[i] = 320 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE, 321 &queue->kernel_if->u.g.pas[i], 322 GFP_KERNEL); 323 if (!queue->kernel_if->u.g.vas[i]) { 324 /* Size excl. the header. */ 325 qp_free_queue(queue, i * PAGE_SIZE); 326 return NULL; 327 } 328 } 329 330 /* Queue header is the first page. */ 331 queue->q_header = queue->kernel_if->u.g.vas[0]; 332 333 return queue; 334 } 335 336 /* 337 * Copies from a given buffer or iovector to a VMCI Queue. Uses 338 * kmap()/kunmap() to dynamically map/unmap required portions of the queue 339 * by traversing the offset -> page translation structure for the queue. 340 * Assumes that offset + size does not wrap around in the queue. 341 */ 342 static int __qp_memcpy_to_queue(struct vmci_queue *queue, 343 u64 queue_offset, 344 const void *src, 345 size_t size, 346 bool is_iovec) 347 { 348 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 349 size_t bytes_copied = 0; 350 351 while (bytes_copied < size) { 352 const u64 page_index = 353 (queue_offset + bytes_copied) / PAGE_SIZE; 354 const size_t page_offset = 355 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 356 void *va; 357 size_t to_copy; 358 359 if (kernel_if->host) 360 va = kmap(kernel_if->u.h.page[page_index]); 361 else 362 va = kernel_if->u.g.vas[page_index + 1]; 363 /* Skip header. */ 364 365 if (size - bytes_copied > PAGE_SIZE - page_offset) 366 /* Enough payload to fill up from this page. */ 367 to_copy = PAGE_SIZE - page_offset; 368 else 369 to_copy = size - bytes_copied; 370 371 if (is_iovec) { 372 struct iovec *iov = (struct iovec *)src; 373 int err; 374 375 /* The iovec will track bytes_copied internally. */ 376 err = memcpy_fromiovec((u8 *)va + page_offset, 377 iov, to_copy); 378 if (err != 0) { 379 if (kernel_if->host) 380 kunmap(kernel_if->u.h.page[page_index]); 381 return VMCI_ERROR_INVALID_ARGS; 382 } 383 } else { 384 memcpy((u8 *)va + page_offset, 385 (u8 *)src + bytes_copied, to_copy); 386 } 387 388 bytes_copied += to_copy; 389 if (kernel_if->host) 390 kunmap(kernel_if->u.h.page[page_index]); 391 } 392 393 return VMCI_SUCCESS; 394 } 395 396 /* 397 * Copies to a given buffer or iovector from a VMCI Queue. Uses 398 * kmap()/kunmap() to dynamically map/unmap required portions of the queue 399 * by traversing the offset -> page translation structure for the queue. 400 * Assumes that offset + size does not wrap around in the queue. 401 */ 402 static int __qp_memcpy_from_queue(void *dest, 403 const struct vmci_queue *queue, 404 u64 queue_offset, 405 size_t size, 406 bool is_iovec) 407 { 408 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 409 size_t bytes_copied = 0; 410 411 while (bytes_copied < size) { 412 const u64 page_index = 413 (queue_offset + bytes_copied) / PAGE_SIZE; 414 const size_t page_offset = 415 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 416 void *va; 417 size_t to_copy; 418 419 if (kernel_if->host) 420 va = kmap(kernel_if->u.h.page[page_index]); 421 else 422 va = kernel_if->u.g.vas[page_index + 1]; 423 /* Skip header. */ 424 425 if (size - bytes_copied > PAGE_SIZE - page_offset) 426 /* Enough payload to fill up this page. */ 427 to_copy = PAGE_SIZE - page_offset; 428 else 429 to_copy = size - bytes_copied; 430 431 if (is_iovec) { 432 struct iovec *iov = (struct iovec *)dest; 433 int err; 434 435 /* The iovec will track bytes_copied internally. */ 436 err = memcpy_toiovec(iov, (u8 *)va + page_offset, 437 to_copy); 438 if (err != 0) { 439 if (kernel_if->host) 440 kunmap(kernel_if->u.h.page[page_index]); 441 return VMCI_ERROR_INVALID_ARGS; 442 } 443 } else { 444 memcpy((u8 *)dest + bytes_copied, 445 (u8 *)va + page_offset, to_copy); 446 } 447 448 bytes_copied += to_copy; 449 if (kernel_if->host) 450 kunmap(kernel_if->u.h.page[page_index]); 451 } 452 453 return VMCI_SUCCESS; 454 } 455 456 /* 457 * Allocates two list of PPNs --- one for the pages in the produce queue, 458 * and the other for the pages in the consume queue. Intializes the list 459 * of PPNs with the page frame numbers of the KVA for the two queues (and 460 * the queue headers). 461 */ 462 static int qp_alloc_ppn_set(void *prod_q, 463 u64 num_produce_pages, 464 void *cons_q, 465 u64 num_consume_pages, struct ppn_set *ppn_set) 466 { 467 u32 *produce_ppns; 468 u32 *consume_ppns; 469 struct vmci_queue *produce_q = prod_q; 470 struct vmci_queue *consume_q = cons_q; 471 u64 i; 472 473 if (!produce_q || !num_produce_pages || !consume_q || 474 !num_consume_pages || !ppn_set) 475 return VMCI_ERROR_INVALID_ARGS; 476 477 if (ppn_set->initialized) 478 return VMCI_ERROR_ALREADY_EXISTS; 479 480 produce_ppns = 481 kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL); 482 if (!produce_ppns) 483 return VMCI_ERROR_NO_MEM; 484 485 consume_ppns = 486 kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL); 487 if (!consume_ppns) { 488 kfree(produce_ppns); 489 return VMCI_ERROR_NO_MEM; 490 } 491 492 for (i = 0; i < num_produce_pages; i++) { 493 unsigned long pfn; 494 495 produce_ppns[i] = 496 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 497 pfn = produce_ppns[i]; 498 499 /* Fail allocation if PFN isn't supported by hypervisor. */ 500 if (sizeof(pfn) > sizeof(*produce_ppns) 501 && pfn != produce_ppns[i]) 502 goto ppn_error; 503 } 504 505 for (i = 0; i < num_consume_pages; i++) { 506 unsigned long pfn; 507 508 consume_ppns[i] = 509 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 510 pfn = consume_ppns[i]; 511 512 /* Fail allocation if PFN isn't supported by hypervisor. */ 513 if (sizeof(pfn) > sizeof(*consume_ppns) 514 && pfn != consume_ppns[i]) 515 goto ppn_error; 516 } 517 518 ppn_set->num_produce_pages = num_produce_pages; 519 ppn_set->num_consume_pages = num_consume_pages; 520 ppn_set->produce_ppns = produce_ppns; 521 ppn_set->consume_ppns = consume_ppns; 522 ppn_set->initialized = true; 523 return VMCI_SUCCESS; 524 525 ppn_error: 526 kfree(produce_ppns); 527 kfree(consume_ppns); 528 return VMCI_ERROR_INVALID_ARGS; 529 } 530 531 /* 532 * Frees the two list of PPNs for a queue pair. 533 */ 534 static void qp_free_ppn_set(struct ppn_set *ppn_set) 535 { 536 if (ppn_set->initialized) { 537 /* Do not call these functions on NULL inputs. */ 538 kfree(ppn_set->produce_ppns); 539 kfree(ppn_set->consume_ppns); 540 } 541 memset(ppn_set, 0, sizeof(*ppn_set)); 542 } 543 544 /* 545 * Populates the list of PPNs in the hypercall structure with the PPNS 546 * of the produce queue and the consume queue. 547 */ 548 static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set) 549 { 550 memcpy(call_buf, ppn_set->produce_ppns, 551 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns)); 552 memcpy(call_buf + 553 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns), 554 ppn_set->consume_ppns, 555 ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns)); 556 557 return VMCI_SUCCESS; 558 } 559 560 static int qp_memcpy_to_queue(struct vmci_queue *queue, 561 u64 queue_offset, 562 const void *src, size_t src_offset, size_t size) 563 { 564 return __qp_memcpy_to_queue(queue, queue_offset, 565 (u8 *)src + src_offset, size, false); 566 } 567 568 static int qp_memcpy_from_queue(void *dest, 569 size_t dest_offset, 570 const struct vmci_queue *queue, 571 u64 queue_offset, size_t size) 572 { 573 return __qp_memcpy_from_queue((u8 *)dest + dest_offset, 574 queue, queue_offset, size, false); 575 } 576 577 /* 578 * Copies from a given iovec from a VMCI Queue. 579 */ 580 static int qp_memcpy_to_queue_iov(struct vmci_queue *queue, 581 u64 queue_offset, 582 const void *src, 583 size_t src_offset, size_t size) 584 { 585 586 /* 587 * We ignore src_offset because src is really a struct iovec * and will 588 * maintain offset internally. 589 */ 590 return __qp_memcpy_to_queue(queue, queue_offset, src, size, true); 591 } 592 593 /* 594 * Copies to a given iovec from a VMCI Queue. 595 */ 596 static int qp_memcpy_from_queue_iov(void *dest, 597 size_t dest_offset, 598 const struct vmci_queue *queue, 599 u64 queue_offset, size_t size) 600 { 601 /* 602 * We ignore dest_offset because dest is really a struct iovec * and 603 * will maintain offset internally. 604 */ 605 return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true); 606 } 607 608 /* 609 * Allocates kernel VA space of specified size plus space for the queue 610 * and kernel interface. This is different from the guest queue allocator, 611 * because we do not allocate our own queue header/data pages here but 612 * share those of the guest. 613 */ 614 static struct vmci_queue *qp_host_alloc_queue(u64 size) 615 { 616 struct vmci_queue *queue; 617 const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 618 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); 619 const size_t queue_page_size = 620 num_pages * sizeof(*queue->kernel_if->u.h.page); 621 622 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); 623 if (queue) { 624 queue->q_header = NULL; 625 queue->saved_header = NULL; 626 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 627 queue->kernel_if->host = true; 628 queue->kernel_if->mutex = NULL; 629 queue->kernel_if->num_pages = num_pages; 630 queue->kernel_if->u.h.header_page = 631 (struct page **)((u8 *)queue + queue_size); 632 queue->kernel_if->u.h.page = 633 &queue->kernel_if->u.h.header_page[1]; 634 } 635 636 return queue; 637 } 638 639 /* 640 * Frees kernel memory for a given queue (header plus translation 641 * structure). 642 */ 643 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size) 644 { 645 kfree(queue); 646 } 647 648 /* 649 * Initialize the mutex for the pair of queues. This mutex is used to 650 * protect the q_header and the buffer from changing out from under any 651 * users of either queue. Of course, it's only any good if the mutexes 652 * are actually acquired. Queue structure must lie on non-paged memory 653 * or we cannot guarantee access to the mutex. 654 */ 655 static void qp_init_queue_mutex(struct vmci_queue *produce_q, 656 struct vmci_queue *consume_q) 657 { 658 /* 659 * Only the host queue has shared state - the guest queues do not 660 * need to synchronize access using a queue mutex. 661 */ 662 663 if (produce_q->kernel_if->host) { 664 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; 665 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; 666 mutex_init(produce_q->kernel_if->mutex); 667 } 668 } 669 670 /* 671 * Cleans up the mutex for the pair of queues. 672 */ 673 static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q, 674 struct vmci_queue *consume_q) 675 { 676 if (produce_q->kernel_if->host) { 677 produce_q->kernel_if->mutex = NULL; 678 consume_q->kernel_if->mutex = NULL; 679 } 680 } 681 682 /* 683 * Acquire the mutex for the queue. Note that the produce_q and 684 * the consume_q share a mutex. So, only one of the two need to 685 * be passed in to this routine. Either will work just fine. 686 */ 687 static void qp_acquire_queue_mutex(struct vmci_queue *queue) 688 { 689 if (queue->kernel_if->host) 690 mutex_lock(queue->kernel_if->mutex); 691 } 692 693 /* 694 * Release the mutex for the queue. Note that the produce_q and 695 * the consume_q share a mutex. So, only one of the two need to 696 * be passed in to this routine. Either will work just fine. 697 */ 698 static void qp_release_queue_mutex(struct vmci_queue *queue) 699 { 700 if (queue->kernel_if->host) 701 mutex_unlock(queue->kernel_if->mutex); 702 } 703 704 /* 705 * Helper function to release pages in the PageStoreAttachInfo 706 * previously obtained using get_user_pages. 707 */ 708 static void qp_release_pages(struct page **pages, 709 u64 num_pages, bool dirty) 710 { 711 int i; 712 713 for (i = 0; i < num_pages; i++) { 714 if (dirty) 715 set_page_dirty(pages[i]); 716 717 page_cache_release(pages[i]); 718 pages[i] = NULL; 719 } 720 } 721 722 /* 723 * Lock the user pages referenced by the {produce,consume}Buffer 724 * struct into memory and populate the {produce,consume}Pages 725 * arrays in the attach structure with them. 726 */ 727 static int qp_host_get_user_memory(u64 produce_uva, 728 u64 consume_uva, 729 struct vmci_queue *produce_q, 730 struct vmci_queue *consume_q) 731 { 732 int retval; 733 int err = VMCI_SUCCESS; 734 735 down_write(¤t->mm->mmap_sem); 736 retval = get_user_pages(current, 737 current->mm, 738 (uintptr_t) produce_uva, 739 produce_q->kernel_if->num_pages, 740 1, 0, 741 produce_q->kernel_if->u.h.header_page, NULL); 742 if (retval < produce_q->kernel_if->num_pages) { 743 pr_warn("get_user_pages(produce) failed (retval=%d)", retval); 744 qp_release_pages(produce_q->kernel_if->u.h.header_page, 745 retval, false); 746 err = VMCI_ERROR_NO_MEM; 747 goto out; 748 } 749 750 retval = get_user_pages(current, 751 current->mm, 752 (uintptr_t) consume_uva, 753 consume_q->kernel_if->num_pages, 754 1, 0, 755 consume_q->kernel_if->u.h.header_page, NULL); 756 if (retval < consume_q->kernel_if->num_pages) { 757 pr_warn("get_user_pages(consume) failed (retval=%d)", retval); 758 qp_release_pages(consume_q->kernel_if->u.h.header_page, 759 retval, false); 760 qp_release_pages(produce_q->kernel_if->u.h.header_page, 761 produce_q->kernel_if->num_pages, false); 762 err = VMCI_ERROR_NO_MEM; 763 } 764 765 out: 766 up_write(¤t->mm->mmap_sem); 767 768 return err; 769 } 770 771 /* 772 * Registers the specification of the user pages used for backing a queue 773 * pair. Enough information to map in pages is stored in the OS specific 774 * part of the struct vmci_queue structure. 775 */ 776 static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store, 777 struct vmci_queue *produce_q, 778 struct vmci_queue *consume_q) 779 { 780 u64 produce_uva; 781 u64 consume_uva; 782 783 /* 784 * The new style and the old style mapping only differs in 785 * that we either get a single or two UVAs, so we split the 786 * single UVA range at the appropriate spot. 787 */ 788 produce_uva = page_store->pages; 789 consume_uva = page_store->pages + 790 produce_q->kernel_if->num_pages * PAGE_SIZE; 791 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q, 792 consume_q); 793 } 794 795 /* 796 * Releases and removes the references to user pages stored in the attach 797 * struct. Pages are released from the page cache and may become 798 * swappable again. 799 */ 800 static void qp_host_unregister_user_memory(struct vmci_queue *produce_q, 801 struct vmci_queue *consume_q) 802 { 803 qp_release_pages(produce_q->kernel_if->u.h.header_page, 804 produce_q->kernel_if->num_pages, true); 805 memset(produce_q->kernel_if->u.h.header_page, 0, 806 sizeof(*produce_q->kernel_if->u.h.header_page) * 807 produce_q->kernel_if->num_pages); 808 qp_release_pages(consume_q->kernel_if->u.h.header_page, 809 consume_q->kernel_if->num_pages, true); 810 memset(consume_q->kernel_if->u.h.header_page, 0, 811 sizeof(*consume_q->kernel_if->u.h.header_page) * 812 consume_q->kernel_if->num_pages); 813 } 814 815 /* 816 * Once qp_host_register_user_memory has been performed on a 817 * queue, the queue pair headers can be mapped into the 818 * kernel. Once mapped, they must be unmapped with 819 * qp_host_unmap_queues prior to calling 820 * qp_host_unregister_user_memory. 821 * Pages are pinned. 822 */ 823 static int qp_host_map_queues(struct vmci_queue *produce_q, 824 struct vmci_queue *consume_q) 825 { 826 int result; 827 828 if (!produce_q->q_header || !consume_q->q_header) { 829 struct page *headers[2]; 830 831 if (produce_q->q_header != consume_q->q_header) 832 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 833 834 if (produce_q->kernel_if->u.h.header_page == NULL || 835 *produce_q->kernel_if->u.h.header_page == NULL) 836 return VMCI_ERROR_UNAVAILABLE; 837 838 headers[0] = *produce_q->kernel_if->u.h.header_page; 839 headers[1] = *consume_q->kernel_if->u.h.header_page; 840 841 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); 842 if (produce_q->q_header != NULL) { 843 consume_q->q_header = 844 (struct vmci_queue_header *)((u8 *) 845 produce_q->q_header + 846 PAGE_SIZE); 847 result = VMCI_SUCCESS; 848 } else { 849 pr_warn("vmap failed\n"); 850 result = VMCI_ERROR_NO_MEM; 851 } 852 } else { 853 result = VMCI_SUCCESS; 854 } 855 856 return result; 857 } 858 859 /* 860 * Unmaps previously mapped queue pair headers from the kernel. 861 * Pages are unpinned. 862 */ 863 static int qp_host_unmap_queues(u32 gid, 864 struct vmci_queue *produce_q, 865 struct vmci_queue *consume_q) 866 { 867 if (produce_q->q_header) { 868 if (produce_q->q_header < consume_q->q_header) 869 vunmap(produce_q->q_header); 870 else 871 vunmap(consume_q->q_header); 872 873 produce_q->q_header = NULL; 874 consume_q->q_header = NULL; 875 } 876 877 return VMCI_SUCCESS; 878 } 879 880 /* 881 * Finds the entry in the list corresponding to a given handle. Assumes 882 * that the list is locked. 883 */ 884 static struct qp_entry *qp_list_find(struct qp_list *qp_list, 885 struct vmci_handle handle) 886 { 887 struct qp_entry *entry; 888 889 if (vmci_handle_is_invalid(handle)) 890 return NULL; 891 892 list_for_each_entry(entry, &qp_list->head, list_item) { 893 if (vmci_handle_is_equal(entry->handle, handle)) 894 return entry; 895 } 896 897 return NULL; 898 } 899 900 /* 901 * Finds the entry in the list corresponding to a given handle. 902 */ 903 static struct qp_guest_endpoint * 904 qp_guest_handle_to_entry(struct vmci_handle handle) 905 { 906 struct qp_guest_endpoint *entry; 907 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle); 908 909 entry = qp ? container_of( 910 qp, struct qp_guest_endpoint, qp) : NULL; 911 return entry; 912 } 913 914 /* 915 * Finds the entry in the list corresponding to a given handle. 916 */ 917 static struct qp_broker_entry * 918 qp_broker_handle_to_entry(struct vmci_handle handle) 919 { 920 struct qp_broker_entry *entry; 921 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle); 922 923 entry = qp ? container_of( 924 qp, struct qp_broker_entry, qp) : NULL; 925 return entry; 926 } 927 928 /* 929 * Dispatches a queue pair event message directly into the local event 930 * queue. 931 */ 932 static int qp_notify_peer_local(bool attach, struct vmci_handle handle) 933 { 934 u32 context_id = vmci_get_context_id(); 935 struct vmci_event_qp ev; 936 937 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); 938 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 939 VMCI_CONTEXT_RESOURCE_ID); 940 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 941 ev.msg.event_data.event = 942 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; 943 ev.payload.peer_id = context_id; 944 ev.payload.handle = handle; 945 946 return vmci_event_dispatch(&ev.msg.hdr); 947 } 948 949 /* 950 * Allocates and initializes a qp_guest_endpoint structure. 951 * Allocates a queue_pair rid (and handle) iff the given entry has 952 * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX 953 * are reserved handles. Assumes that the QP list mutex is held 954 * by the caller. 955 */ 956 static struct qp_guest_endpoint * 957 qp_guest_endpoint_create(struct vmci_handle handle, 958 u32 peer, 959 u32 flags, 960 u64 produce_size, 961 u64 consume_size, 962 void *produce_q, 963 void *consume_q) 964 { 965 int result; 966 struct qp_guest_endpoint *entry; 967 /* One page each for the queue headers. */ 968 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) + 969 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2; 970 971 if (vmci_handle_is_invalid(handle)) { 972 u32 context_id = vmci_get_context_id(); 973 974 handle = vmci_make_handle(context_id, VMCI_INVALID_ID); 975 } 976 977 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 978 if (entry) { 979 entry->qp.peer = peer; 980 entry->qp.flags = flags; 981 entry->qp.produce_size = produce_size; 982 entry->qp.consume_size = consume_size; 983 entry->qp.ref_count = 0; 984 entry->num_ppns = num_ppns; 985 entry->produce_q = produce_q; 986 entry->consume_q = consume_q; 987 INIT_LIST_HEAD(&entry->qp.list_item); 988 989 /* Add resource obj */ 990 result = vmci_resource_add(&entry->resource, 991 VMCI_RESOURCE_TYPE_QPAIR_GUEST, 992 handle); 993 entry->qp.handle = vmci_resource_handle(&entry->resource); 994 if ((result != VMCI_SUCCESS) || 995 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { 996 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", 997 handle.context, handle.resource, result); 998 kfree(entry); 999 entry = NULL; 1000 } 1001 } 1002 return entry; 1003 } 1004 1005 /* 1006 * Frees a qp_guest_endpoint structure. 1007 */ 1008 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry) 1009 { 1010 qp_free_ppn_set(&entry->ppn_set); 1011 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); 1012 qp_free_queue(entry->produce_q, entry->qp.produce_size); 1013 qp_free_queue(entry->consume_q, entry->qp.consume_size); 1014 /* Unlink from resource hash table and free callback */ 1015 vmci_resource_remove(&entry->resource); 1016 1017 kfree(entry); 1018 } 1019 1020 /* 1021 * Helper to make a queue_pairAlloc hypercall when the driver is 1022 * supporting a guest device. 1023 */ 1024 static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry) 1025 { 1026 struct vmci_qp_alloc_msg *alloc_msg; 1027 size_t msg_size; 1028 int result; 1029 1030 if (!entry || entry->num_ppns <= 2) 1031 return VMCI_ERROR_INVALID_ARGS; 1032 1033 msg_size = sizeof(*alloc_msg) + 1034 (size_t) entry->num_ppns * sizeof(u32); 1035 alloc_msg = kmalloc(msg_size, GFP_KERNEL); 1036 if (!alloc_msg) 1037 return VMCI_ERROR_NO_MEM; 1038 1039 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1040 VMCI_QUEUEPAIR_ALLOC); 1041 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE; 1042 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE; 1043 alloc_msg->handle = entry->qp.handle; 1044 alloc_msg->peer = entry->qp.peer; 1045 alloc_msg->flags = entry->qp.flags; 1046 alloc_msg->produce_size = entry->qp.produce_size; 1047 alloc_msg->consume_size = entry->qp.consume_size; 1048 alloc_msg->num_ppns = entry->num_ppns; 1049 1050 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg), 1051 &entry->ppn_set); 1052 if (result == VMCI_SUCCESS) 1053 result = vmci_send_datagram(&alloc_msg->hdr); 1054 1055 kfree(alloc_msg); 1056 1057 return result; 1058 } 1059 1060 /* 1061 * Helper to make a queue_pairDetach hypercall when the driver is 1062 * supporting a guest device. 1063 */ 1064 static int qp_detatch_hypercall(struct vmci_handle handle) 1065 { 1066 struct vmci_qp_detach_msg detach_msg; 1067 1068 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1069 VMCI_QUEUEPAIR_DETACH); 1070 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE; 1071 detach_msg.hdr.payload_size = sizeof(handle); 1072 detach_msg.handle = handle; 1073 1074 return vmci_send_datagram(&detach_msg.hdr); 1075 } 1076 1077 /* 1078 * Adds the given entry to the list. Assumes that the list is locked. 1079 */ 1080 static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry) 1081 { 1082 if (entry) 1083 list_add(&entry->list_item, &qp_list->head); 1084 } 1085 1086 /* 1087 * Removes the given entry from the list. Assumes that the list is locked. 1088 */ 1089 static void qp_list_remove_entry(struct qp_list *qp_list, 1090 struct qp_entry *entry) 1091 { 1092 if (entry) 1093 list_del(&entry->list_item); 1094 } 1095 1096 /* 1097 * Helper for VMCI queue_pair detach interface. Frees the physical 1098 * pages for the queue pair. 1099 */ 1100 static int qp_detatch_guest_work(struct vmci_handle handle) 1101 { 1102 int result; 1103 struct qp_guest_endpoint *entry; 1104 u32 ref_count = ~0; /* To avoid compiler warning below */ 1105 1106 mutex_lock(&qp_guest_endpoints.mutex); 1107 1108 entry = qp_guest_handle_to_entry(handle); 1109 if (!entry) { 1110 mutex_unlock(&qp_guest_endpoints.mutex); 1111 return VMCI_ERROR_NOT_FOUND; 1112 } 1113 1114 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1115 result = VMCI_SUCCESS; 1116 1117 if (entry->qp.ref_count > 1) { 1118 result = qp_notify_peer_local(false, handle); 1119 /* 1120 * We can fail to notify a local queuepair 1121 * because we can't allocate. We still want 1122 * to release the entry if that happens, so 1123 * don't bail out yet. 1124 */ 1125 } 1126 } else { 1127 result = qp_detatch_hypercall(handle); 1128 if (result < VMCI_SUCCESS) { 1129 /* 1130 * We failed to notify a non-local queuepair. 1131 * That other queuepair might still be 1132 * accessing the shared memory, so don't 1133 * release the entry yet. It will get cleaned 1134 * up by VMCIqueue_pair_Exit() if necessary 1135 * (assuming we are going away, otherwise why 1136 * did this fail?). 1137 */ 1138 1139 mutex_unlock(&qp_guest_endpoints.mutex); 1140 return result; 1141 } 1142 } 1143 1144 /* 1145 * If we get here then we either failed to notify a local queuepair, or 1146 * we succeeded in all cases. Release the entry if required. 1147 */ 1148 1149 entry->qp.ref_count--; 1150 if (entry->qp.ref_count == 0) 1151 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); 1152 1153 /* If we didn't remove the entry, this could change once we unlock. */ 1154 if (entry) 1155 ref_count = entry->qp.ref_count; 1156 1157 mutex_unlock(&qp_guest_endpoints.mutex); 1158 1159 if (ref_count == 0) 1160 qp_guest_endpoint_destroy(entry); 1161 1162 return result; 1163 } 1164 1165 /* 1166 * This functions handles the actual allocation of a VMCI queue 1167 * pair guest endpoint. Allocates physical pages for the queue 1168 * pair. It makes OS dependent calls through generic wrappers. 1169 */ 1170 static int qp_alloc_guest_work(struct vmci_handle *handle, 1171 struct vmci_queue **produce_q, 1172 u64 produce_size, 1173 struct vmci_queue **consume_q, 1174 u64 consume_size, 1175 u32 peer, 1176 u32 flags, 1177 u32 priv_flags) 1178 { 1179 const u64 num_produce_pages = 1180 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1; 1181 const u64 num_consume_pages = 1182 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1; 1183 void *my_produce_q = NULL; 1184 void *my_consume_q = NULL; 1185 int result; 1186 struct qp_guest_endpoint *queue_pair_entry = NULL; 1187 1188 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS) 1189 return VMCI_ERROR_NO_ACCESS; 1190 1191 mutex_lock(&qp_guest_endpoints.mutex); 1192 1193 queue_pair_entry = qp_guest_handle_to_entry(*handle); 1194 if (queue_pair_entry) { 1195 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1196 /* Local attach case. */ 1197 if (queue_pair_entry->qp.ref_count > 1) { 1198 pr_devel("Error attempting to attach more than once\n"); 1199 result = VMCI_ERROR_UNAVAILABLE; 1200 goto error_keep_entry; 1201 } 1202 1203 if (queue_pair_entry->qp.produce_size != consume_size || 1204 queue_pair_entry->qp.consume_size != 1205 produce_size || 1206 queue_pair_entry->qp.flags != 1207 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) { 1208 pr_devel("Error mismatched queue pair in local attach\n"); 1209 result = VMCI_ERROR_QUEUEPAIR_MISMATCH; 1210 goto error_keep_entry; 1211 } 1212 1213 /* 1214 * Do a local attach. We swap the consume and 1215 * produce queues for the attacher and deliver 1216 * an attach event. 1217 */ 1218 result = qp_notify_peer_local(true, *handle); 1219 if (result < VMCI_SUCCESS) 1220 goto error_keep_entry; 1221 1222 my_produce_q = queue_pair_entry->consume_q; 1223 my_consume_q = queue_pair_entry->produce_q; 1224 goto out; 1225 } 1226 1227 result = VMCI_ERROR_ALREADY_EXISTS; 1228 goto error_keep_entry; 1229 } 1230 1231 my_produce_q = qp_alloc_queue(produce_size, flags); 1232 if (!my_produce_q) { 1233 pr_warn("Error allocating pages for produce queue\n"); 1234 result = VMCI_ERROR_NO_MEM; 1235 goto error; 1236 } 1237 1238 my_consume_q = qp_alloc_queue(consume_size, flags); 1239 if (!my_consume_q) { 1240 pr_warn("Error allocating pages for consume queue\n"); 1241 result = VMCI_ERROR_NO_MEM; 1242 goto error; 1243 } 1244 1245 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags, 1246 produce_size, consume_size, 1247 my_produce_q, my_consume_q); 1248 if (!queue_pair_entry) { 1249 pr_warn("Error allocating memory in %s\n", __func__); 1250 result = VMCI_ERROR_NO_MEM; 1251 goto error; 1252 } 1253 1254 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q, 1255 num_consume_pages, 1256 &queue_pair_entry->ppn_set); 1257 if (result < VMCI_SUCCESS) { 1258 pr_warn("qp_alloc_ppn_set failed\n"); 1259 goto error; 1260 } 1261 1262 /* 1263 * It's only necessary to notify the host if this queue pair will be 1264 * attached to from another context. 1265 */ 1266 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1267 /* Local create case. */ 1268 u32 context_id = vmci_get_context_id(); 1269 1270 /* 1271 * Enforce similar checks on local queue pairs as we 1272 * do for regular ones. The handle's context must 1273 * match the creator or attacher context id (here they 1274 * are both the current context id) and the 1275 * attach-only flag cannot exist during create. We 1276 * also ensure specified peer is this context or an 1277 * invalid one. 1278 */ 1279 if (queue_pair_entry->qp.handle.context != context_id || 1280 (queue_pair_entry->qp.peer != VMCI_INVALID_ID && 1281 queue_pair_entry->qp.peer != context_id)) { 1282 result = VMCI_ERROR_NO_ACCESS; 1283 goto error; 1284 } 1285 1286 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) { 1287 result = VMCI_ERROR_NOT_FOUND; 1288 goto error; 1289 } 1290 } else { 1291 result = qp_alloc_hypercall(queue_pair_entry); 1292 if (result < VMCI_SUCCESS) { 1293 pr_warn("qp_alloc_hypercall result = %d\n", result); 1294 goto error; 1295 } 1296 } 1297 1298 qp_init_queue_mutex((struct vmci_queue *)my_produce_q, 1299 (struct vmci_queue *)my_consume_q); 1300 1301 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp); 1302 1303 out: 1304 queue_pair_entry->qp.ref_count++; 1305 *handle = queue_pair_entry->qp.handle; 1306 *produce_q = (struct vmci_queue *)my_produce_q; 1307 *consume_q = (struct vmci_queue *)my_consume_q; 1308 1309 /* 1310 * We should initialize the queue pair header pages on a local 1311 * queue pair create. For non-local queue pairs, the 1312 * hypervisor initializes the header pages in the create step. 1313 */ 1314 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) && 1315 queue_pair_entry->qp.ref_count == 1) { 1316 vmci_q_header_init((*produce_q)->q_header, *handle); 1317 vmci_q_header_init((*consume_q)->q_header, *handle); 1318 } 1319 1320 mutex_unlock(&qp_guest_endpoints.mutex); 1321 1322 return VMCI_SUCCESS; 1323 1324 error: 1325 mutex_unlock(&qp_guest_endpoints.mutex); 1326 if (queue_pair_entry) { 1327 /* The queues will be freed inside the destroy routine. */ 1328 qp_guest_endpoint_destroy(queue_pair_entry); 1329 } else { 1330 qp_free_queue(my_produce_q, produce_size); 1331 qp_free_queue(my_consume_q, consume_size); 1332 } 1333 return result; 1334 1335 error_keep_entry: 1336 /* This path should only be used when an existing entry was found. */ 1337 mutex_unlock(&qp_guest_endpoints.mutex); 1338 return result; 1339 } 1340 1341 /* 1342 * The first endpoint issuing a queue pair allocation will create the state 1343 * of the queue pair in the queue pair broker. 1344 * 1345 * If the creator is a guest, it will associate a VMX virtual address range 1346 * with the queue pair as specified by the page_store. For compatibility with 1347 * older VMX'en, that would use a separate step to set the VMX virtual 1348 * address range, the virtual address range can be registered later using 1349 * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be 1350 * used. 1351 * 1352 * If the creator is the host, a page_store of NULL should be used as well, 1353 * since the host is not able to supply a page store for the queue pair. 1354 * 1355 * For older VMX and host callers, the queue pair will be created in the 1356 * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be 1357 * created in VMCOQPB_CREATED_MEM state. 1358 */ 1359 static int qp_broker_create(struct vmci_handle handle, 1360 u32 peer, 1361 u32 flags, 1362 u32 priv_flags, 1363 u64 produce_size, 1364 u64 consume_size, 1365 struct vmci_qp_page_store *page_store, 1366 struct vmci_ctx *context, 1367 vmci_event_release_cb wakeup_cb, 1368 void *client_data, struct qp_broker_entry **ent) 1369 { 1370 struct qp_broker_entry *entry = NULL; 1371 const u32 context_id = vmci_ctx_get_id(context); 1372 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1373 int result; 1374 u64 guest_produce_size; 1375 u64 guest_consume_size; 1376 1377 /* Do not create if the caller asked not to. */ 1378 if (flags & VMCI_QPFLAG_ATTACH_ONLY) 1379 return VMCI_ERROR_NOT_FOUND; 1380 1381 /* 1382 * Creator's context ID should match handle's context ID or the creator 1383 * must allow the context in handle's context ID as the "peer". 1384 */ 1385 if (handle.context != context_id && handle.context != peer) 1386 return VMCI_ERROR_NO_ACCESS; 1387 1388 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer)) 1389 return VMCI_ERROR_DST_UNREACHABLE; 1390 1391 /* 1392 * Creator's context ID for local queue pairs should match the 1393 * peer, if a peer is specified. 1394 */ 1395 if (is_local && peer != VMCI_INVALID_ID && context_id != peer) 1396 return VMCI_ERROR_NO_ACCESS; 1397 1398 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 1399 if (!entry) 1400 return VMCI_ERROR_NO_MEM; 1401 1402 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) { 1403 /* 1404 * The queue pair broker entry stores values from the guest 1405 * point of view, so a creating host side endpoint should swap 1406 * produce and consume values -- unless it is a local queue 1407 * pair, in which case no swapping is necessary, since the local 1408 * attacher will swap queues. 1409 */ 1410 1411 guest_produce_size = consume_size; 1412 guest_consume_size = produce_size; 1413 } else { 1414 guest_produce_size = produce_size; 1415 guest_consume_size = consume_size; 1416 } 1417 1418 entry->qp.handle = handle; 1419 entry->qp.peer = peer; 1420 entry->qp.flags = flags; 1421 entry->qp.produce_size = guest_produce_size; 1422 entry->qp.consume_size = guest_consume_size; 1423 entry->qp.ref_count = 1; 1424 entry->create_id = context_id; 1425 entry->attach_id = VMCI_INVALID_ID; 1426 entry->state = VMCIQPB_NEW; 1427 entry->require_trusted_attach = 1428 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED); 1429 entry->created_by_trusted = 1430 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED); 1431 entry->vmci_page_files = false; 1432 entry->wakeup_cb = wakeup_cb; 1433 entry->client_data = client_data; 1434 entry->produce_q = qp_host_alloc_queue(guest_produce_size); 1435 if (entry->produce_q == NULL) { 1436 result = VMCI_ERROR_NO_MEM; 1437 goto error; 1438 } 1439 entry->consume_q = qp_host_alloc_queue(guest_consume_size); 1440 if (entry->consume_q == NULL) { 1441 result = VMCI_ERROR_NO_MEM; 1442 goto error; 1443 } 1444 1445 qp_init_queue_mutex(entry->produce_q, entry->consume_q); 1446 1447 INIT_LIST_HEAD(&entry->qp.list_item); 1448 1449 if (is_local) { 1450 u8 *tmp; 1451 1452 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), 1453 PAGE_SIZE, GFP_KERNEL); 1454 if (entry->local_mem == NULL) { 1455 result = VMCI_ERROR_NO_MEM; 1456 goto error; 1457 } 1458 entry->state = VMCIQPB_CREATED_MEM; 1459 entry->produce_q->q_header = entry->local_mem; 1460 tmp = (u8 *)entry->local_mem + PAGE_SIZE * 1461 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); 1462 entry->consume_q->q_header = (struct vmci_queue_header *)tmp; 1463 } else if (page_store) { 1464 /* 1465 * The VMX already initialized the queue pair headers, so no 1466 * need for the kernel side to do that. 1467 */ 1468 result = qp_host_register_user_memory(page_store, 1469 entry->produce_q, 1470 entry->consume_q); 1471 if (result < VMCI_SUCCESS) 1472 goto error; 1473 1474 entry->state = VMCIQPB_CREATED_MEM; 1475 } else { 1476 /* 1477 * A create without a page_store may be either a host 1478 * side create (in which case we are waiting for the 1479 * guest side to supply the memory) or an old style 1480 * queue pair create (in which case we will expect a 1481 * set page store call as the next step). 1482 */ 1483 entry->state = VMCIQPB_CREATED_NO_MEM; 1484 } 1485 1486 qp_list_add_entry(&qp_broker_list, &entry->qp); 1487 if (ent != NULL) 1488 *ent = entry; 1489 1490 /* Add to resource obj */ 1491 result = vmci_resource_add(&entry->resource, 1492 VMCI_RESOURCE_TYPE_QPAIR_HOST, 1493 handle); 1494 if (result != VMCI_SUCCESS) { 1495 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", 1496 handle.context, handle.resource, result); 1497 goto error; 1498 } 1499 1500 entry->qp.handle = vmci_resource_handle(&entry->resource); 1501 if (is_local) { 1502 vmci_q_header_init(entry->produce_q->q_header, 1503 entry->qp.handle); 1504 vmci_q_header_init(entry->consume_q->q_header, 1505 entry->qp.handle); 1506 } 1507 1508 vmci_ctx_qp_create(context, entry->qp.handle); 1509 1510 return VMCI_SUCCESS; 1511 1512 error: 1513 if (entry != NULL) { 1514 qp_host_free_queue(entry->produce_q, guest_produce_size); 1515 qp_host_free_queue(entry->consume_q, guest_consume_size); 1516 kfree(entry); 1517 } 1518 1519 return result; 1520 } 1521 1522 /* 1523 * Enqueues an event datagram to notify the peer VM attached to 1524 * the given queue pair handle about attach/detach event by the 1525 * given VM. Returns Payload size of datagram enqueued on 1526 * success, error code otherwise. 1527 */ 1528 static int qp_notify_peer(bool attach, 1529 struct vmci_handle handle, 1530 u32 my_id, 1531 u32 peer_id) 1532 { 1533 int rv; 1534 struct vmci_event_qp ev; 1535 1536 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID || 1537 peer_id == VMCI_INVALID_ID) 1538 return VMCI_ERROR_INVALID_ARGS; 1539 1540 /* 1541 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on 1542 * number of pending events from the hypervisor to a given VM 1543 * otherwise a rogue VM could do an arbitrary number of attach 1544 * and detach operations causing memory pressure in the host 1545 * kernel. 1546 */ 1547 1548 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); 1549 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1550 VMCI_CONTEXT_RESOURCE_ID); 1551 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 1552 ev.msg.event_data.event = attach ? 1553 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; 1554 ev.payload.handle = handle; 1555 ev.payload.peer_id = my_id; 1556 1557 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, 1558 &ev.msg.hdr, false); 1559 if (rv < VMCI_SUCCESS) 1560 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n", 1561 attach ? "ATTACH" : "DETACH", peer_id); 1562 1563 return rv; 1564 } 1565 1566 /* 1567 * The second endpoint issuing a queue pair allocation will attach to 1568 * the queue pair registered with the queue pair broker. 1569 * 1570 * If the attacher is a guest, it will associate a VMX virtual address 1571 * range with the queue pair as specified by the page_store. At this 1572 * point, the already attach host endpoint may start using the queue 1573 * pair, and an attach event is sent to it. For compatibility with 1574 * older VMX'en, that used a separate step to set the VMX virtual 1575 * address range, the virtual address range can be registered later 1576 * using vmci_qp_broker_set_page_store. In that case, a page_store of 1577 * NULL should be used, and the attach event will be generated once 1578 * the actual page store has been set. 1579 * 1580 * If the attacher is the host, a page_store of NULL should be used as 1581 * well, since the page store information is already set by the guest. 1582 * 1583 * For new VMX and host callers, the queue pair will be moved to the 1584 * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be 1585 * moved to the VMCOQPB_ATTACHED_NO_MEM state. 1586 */ 1587 static int qp_broker_attach(struct qp_broker_entry *entry, 1588 u32 peer, 1589 u32 flags, 1590 u32 priv_flags, 1591 u64 produce_size, 1592 u64 consume_size, 1593 struct vmci_qp_page_store *page_store, 1594 struct vmci_ctx *context, 1595 vmci_event_release_cb wakeup_cb, 1596 void *client_data, 1597 struct qp_broker_entry **ent) 1598 { 1599 const u32 context_id = vmci_ctx_get_id(context); 1600 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1601 int result; 1602 1603 if (entry->state != VMCIQPB_CREATED_NO_MEM && 1604 entry->state != VMCIQPB_CREATED_MEM) 1605 return VMCI_ERROR_UNAVAILABLE; 1606 1607 if (is_local) { 1608 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || 1609 context_id != entry->create_id) { 1610 return VMCI_ERROR_INVALID_ARGS; 1611 } 1612 } else if (context_id == entry->create_id || 1613 context_id == entry->attach_id) { 1614 return VMCI_ERROR_ALREADY_EXISTS; 1615 } 1616 1617 if (VMCI_CONTEXT_IS_VM(context_id) && 1618 VMCI_CONTEXT_IS_VM(entry->create_id)) 1619 return VMCI_ERROR_DST_UNREACHABLE; 1620 1621 /* 1622 * If we are attaching from a restricted context then the queuepair 1623 * must have been created by a trusted endpoint. 1624 */ 1625 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) && 1626 !entry->created_by_trusted) 1627 return VMCI_ERROR_NO_ACCESS; 1628 1629 /* 1630 * If we are attaching to a queuepair that was created by a restricted 1631 * context then we must be trusted. 1632 */ 1633 if (entry->require_trusted_attach && 1634 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED))) 1635 return VMCI_ERROR_NO_ACCESS; 1636 1637 /* 1638 * If the creator specifies VMCI_INVALID_ID in "peer" field, access 1639 * control check is not performed. 1640 */ 1641 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) 1642 return VMCI_ERROR_NO_ACCESS; 1643 1644 if (entry->create_id == VMCI_HOST_CONTEXT_ID) { 1645 /* 1646 * Do not attach if the caller doesn't support Host Queue Pairs 1647 * and a host created this queue pair. 1648 */ 1649 1650 if (!vmci_ctx_supports_host_qp(context)) 1651 return VMCI_ERROR_INVALID_RESOURCE; 1652 1653 } else if (context_id == VMCI_HOST_CONTEXT_ID) { 1654 struct vmci_ctx *create_context; 1655 bool supports_host_qp; 1656 1657 /* 1658 * Do not attach a host to a user created queue pair if that 1659 * user doesn't support host queue pair end points. 1660 */ 1661 1662 create_context = vmci_ctx_get(entry->create_id); 1663 supports_host_qp = vmci_ctx_supports_host_qp(create_context); 1664 vmci_ctx_put(create_context); 1665 1666 if (!supports_host_qp) 1667 return VMCI_ERROR_INVALID_RESOURCE; 1668 } 1669 1670 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) 1671 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1672 1673 if (context_id != VMCI_HOST_CONTEXT_ID) { 1674 /* 1675 * The queue pair broker entry stores values from the guest 1676 * point of view, so an attaching guest should match the values 1677 * stored in the entry. 1678 */ 1679 1680 if (entry->qp.produce_size != produce_size || 1681 entry->qp.consume_size != consume_size) { 1682 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1683 } 1684 } else if (entry->qp.produce_size != consume_size || 1685 entry->qp.consume_size != produce_size) { 1686 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1687 } 1688 1689 if (context_id != VMCI_HOST_CONTEXT_ID) { 1690 /* 1691 * If a guest attached to a queue pair, it will supply 1692 * the backing memory. If this is a pre NOVMVM vmx, 1693 * the backing memory will be supplied by calling 1694 * vmci_qp_broker_set_page_store() following the 1695 * return of the vmci_qp_broker_alloc() call. If it is 1696 * a vmx of version NOVMVM or later, the page store 1697 * must be supplied as part of the 1698 * vmci_qp_broker_alloc call. Under all circumstances 1699 * must the initially created queue pair not have any 1700 * memory associated with it already. 1701 */ 1702 1703 if (entry->state != VMCIQPB_CREATED_NO_MEM) 1704 return VMCI_ERROR_INVALID_ARGS; 1705 1706 if (page_store != NULL) { 1707 /* 1708 * Patch up host state to point to guest 1709 * supplied memory. The VMX already 1710 * initialized the queue pair headers, so no 1711 * need for the kernel side to do that. 1712 */ 1713 1714 result = qp_host_register_user_memory(page_store, 1715 entry->produce_q, 1716 entry->consume_q); 1717 if (result < VMCI_SUCCESS) 1718 return result; 1719 1720 entry->state = VMCIQPB_ATTACHED_MEM; 1721 } else { 1722 entry->state = VMCIQPB_ATTACHED_NO_MEM; 1723 } 1724 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) { 1725 /* 1726 * The host side is attempting to attach to a queue 1727 * pair that doesn't have any memory associated with 1728 * it. This must be a pre NOVMVM vmx that hasn't set 1729 * the page store information yet, or a quiesced VM. 1730 */ 1731 1732 return VMCI_ERROR_UNAVAILABLE; 1733 } else { 1734 /* The host side has successfully attached to a queue pair. */ 1735 entry->state = VMCIQPB_ATTACHED_MEM; 1736 } 1737 1738 if (entry->state == VMCIQPB_ATTACHED_MEM) { 1739 result = 1740 qp_notify_peer(true, entry->qp.handle, context_id, 1741 entry->create_id); 1742 if (result < VMCI_SUCCESS) 1743 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", 1744 entry->create_id, entry->qp.handle.context, 1745 entry->qp.handle.resource); 1746 } 1747 1748 entry->attach_id = context_id; 1749 entry->qp.ref_count++; 1750 if (wakeup_cb) { 1751 entry->wakeup_cb = wakeup_cb; 1752 entry->client_data = client_data; 1753 } 1754 1755 /* 1756 * When attaching to local queue pairs, the context already has 1757 * an entry tracking the queue pair, so don't add another one. 1758 */ 1759 if (!is_local) 1760 vmci_ctx_qp_create(context, entry->qp.handle); 1761 1762 if (ent != NULL) 1763 *ent = entry; 1764 1765 return VMCI_SUCCESS; 1766 } 1767 1768 /* 1769 * queue_pair_Alloc for use when setting up queue pair endpoints 1770 * on the host. 1771 */ 1772 static int qp_broker_alloc(struct vmci_handle handle, 1773 u32 peer, 1774 u32 flags, 1775 u32 priv_flags, 1776 u64 produce_size, 1777 u64 consume_size, 1778 struct vmci_qp_page_store *page_store, 1779 struct vmci_ctx *context, 1780 vmci_event_release_cb wakeup_cb, 1781 void *client_data, 1782 struct qp_broker_entry **ent, 1783 bool *swap) 1784 { 1785 const u32 context_id = vmci_ctx_get_id(context); 1786 bool create; 1787 struct qp_broker_entry *entry = NULL; 1788 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1789 int result; 1790 1791 if (vmci_handle_is_invalid(handle) || 1792 (flags & ~VMCI_QP_ALL_FLAGS) || is_local || 1793 !(produce_size || consume_size) || 1794 !context || context_id == VMCI_INVALID_ID || 1795 handle.context == VMCI_INVALID_ID) { 1796 return VMCI_ERROR_INVALID_ARGS; 1797 } 1798 1799 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store)) 1800 return VMCI_ERROR_INVALID_ARGS; 1801 1802 /* 1803 * In the initial argument check, we ensure that non-vmkernel hosts 1804 * are not allowed to create local queue pairs. 1805 */ 1806 1807 mutex_lock(&qp_broker_list.mutex); 1808 1809 if (!is_local && vmci_ctx_qp_exists(context, handle)) { 1810 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n", 1811 context_id, handle.context, handle.resource); 1812 mutex_unlock(&qp_broker_list.mutex); 1813 return VMCI_ERROR_ALREADY_EXISTS; 1814 } 1815 1816 if (handle.resource != VMCI_INVALID_ID) 1817 entry = qp_broker_handle_to_entry(handle); 1818 1819 if (!entry) { 1820 create = true; 1821 result = 1822 qp_broker_create(handle, peer, flags, priv_flags, 1823 produce_size, consume_size, page_store, 1824 context, wakeup_cb, client_data, ent); 1825 } else { 1826 create = false; 1827 result = 1828 qp_broker_attach(entry, peer, flags, priv_flags, 1829 produce_size, consume_size, page_store, 1830 context, wakeup_cb, client_data, ent); 1831 } 1832 1833 mutex_unlock(&qp_broker_list.mutex); 1834 1835 if (swap) 1836 *swap = (context_id == VMCI_HOST_CONTEXT_ID) && 1837 !(create && is_local); 1838 1839 return result; 1840 } 1841 1842 /* 1843 * This function implements the kernel API for allocating a queue 1844 * pair. 1845 */ 1846 static int qp_alloc_host_work(struct vmci_handle *handle, 1847 struct vmci_queue **produce_q, 1848 u64 produce_size, 1849 struct vmci_queue **consume_q, 1850 u64 consume_size, 1851 u32 peer, 1852 u32 flags, 1853 u32 priv_flags, 1854 vmci_event_release_cb wakeup_cb, 1855 void *client_data) 1856 { 1857 struct vmci_handle new_handle; 1858 struct vmci_ctx *context; 1859 struct qp_broker_entry *entry; 1860 int result; 1861 bool swap; 1862 1863 if (vmci_handle_is_invalid(*handle)) { 1864 new_handle = vmci_make_handle( 1865 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID); 1866 } else 1867 new_handle = *handle; 1868 1869 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); 1870 entry = NULL; 1871 result = 1872 qp_broker_alloc(new_handle, peer, flags, priv_flags, 1873 produce_size, consume_size, NULL, context, 1874 wakeup_cb, client_data, &entry, &swap); 1875 if (result == VMCI_SUCCESS) { 1876 if (swap) { 1877 /* 1878 * If this is a local queue pair, the attacher 1879 * will swap around produce and consume 1880 * queues. 1881 */ 1882 1883 *produce_q = entry->consume_q; 1884 *consume_q = entry->produce_q; 1885 } else { 1886 *produce_q = entry->produce_q; 1887 *consume_q = entry->consume_q; 1888 } 1889 1890 *handle = vmci_resource_handle(&entry->resource); 1891 } else { 1892 *handle = VMCI_INVALID_HANDLE; 1893 pr_devel("queue pair broker failed to alloc (result=%d)\n", 1894 result); 1895 } 1896 vmci_ctx_put(context); 1897 return result; 1898 } 1899 1900 /* 1901 * Allocates a VMCI queue_pair. Only checks validity of input 1902 * arguments. The real work is done in the host or guest 1903 * specific function. 1904 */ 1905 int vmci_qp_alloc(struct vmci_handle *handle, 1906 struct vmci_queue **produce_q, 1907 u64 produce_size, 1908 struct vmci_queue **consume_q, 1909 u64 consume_size, 1910 u32 peer, 1911 u32 flags, 1912 u32 priv_flags, 1913 bool guest_endpoint, 1914 vmci_event_release_cb wakeup_cb, 1915 void *client_data) 1916 { 1917 if (!handle || !produce_q || !consume_q || 1918 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS)) 1919 return VMCI_ERROR_INVALID_ARGS; 1920 1921 if (guest_endpoint) { 1922 return qp_alloc_guest_work(handle, produce_q, 1923 produce_size, consume_q, 1924 consume_size, peer, 1925 flags, priv_flags); 1926 } else { 1927 return qp_alloc_host_work(handle, produce_q, 1928 produce_size, consume_q, 1929 consume_size, peer, flags, 1930 priv_flags, wakeup_cb, client_data); 1931 } 1932 } 1933 1934 /* 1935 * This function implements the host kernel API for detaching from 1936 * a queue pair. 1937 */ 1938 static int qp_detatch_host_work(struct vmci_handle handle) 1939 { 1940 int result; 1941 struct vmci_ctx *context; 1942 1943 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); 1944 1945 result = vmci_qp_broker_detach(handle, context); 1946 1947 vmci_ctx_put(context); 1948 return result; 1949 } 1950 1951 /* 1952 * Detaches from a VMCI queue_pair. Only checks validity of input argument. 1953 * Real work is done in the host or guest specific function. 1954 */ 1955 static int qp_detatch(struct vmci_handle handle, bool guest_endpoint) 1956 { 1957 if (vmci_handle_is_invalid(handle)) 1958 return VMCI_ERROR_INVALID_ARGS; 1959 1960 if (guest_endpoint) 1961 return qp_detatch_guest_work(handle); 1962 else 1963 return qp_detatch_host_work(handle); 1964 } 1965 1966 /* 1967 * Returns the entry from the head of the list. Assumes that the list is 1968 * locked. 1969 */ 1970 static struct qp_entry *qp_list_get_head(struct qp_list *qp_list) 1971 { 1972 if (!list_empty(&qp_list->head)) { 1973 struct qp_entry *entry = 1974 list_first_entry(&qp_list->head, struct qp_entry, 1975 list_item); 1976 return entry; 1977 } 1978 1979 return NULL; 1980 } 1981 1982 void vmci_qp_broker_exit(void) 1983 { 1984 struct qp_entry *entry; 1985 struct qp_broker_entry *be; 1986 1987 mutex_lock(&qp_broker_list.mutex); 1988 1989 while ((entry = qp_list_get_head(&qp_broker_list))) { 1990 be = (struct qp_broker_entry *)entry; 1991 1992 qp_list_remove_entry(&qp_broker_list, entry); 1993 kfree(be); 1994 } 1995 1996 mutex_unlock(&qp_broker_list.mutex); 1997 } 1998 1999 /* 2000 * Requests that a queue pair be allocated with the VMCI queue 2001 * pair broker. Allocates a queue pair entry if one does not 2002 * exist. Attaches to one if it exists, and retrieves the page 2003 * files backing that queue_pair. Assumes that the queue pair 2004 * broker lock is held. 2005 */ 2006 int vmci_qp_broker_alloc(struct vmci_handle handle, 2007 u32 peer, 2008 u32 flags, 2009 u32 priv_flags, 2010 u64 produce_size, 2011 u64 consume_size, 2012 struct vmci_qp_page_store *page_store, 2013 struct vmci_ctx *context) 2014 { 2015 return qp_broker_alloc(handle, peer, flags, priv_flags, 2016 produce_size, consume_size, 2017 page_store, context, NULL, NULL, NULL, NULL); 2018 } 2019 2020 /* 2021 * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate 2022 * step to add the UVAs of the VMX mapping of the queue pair. This function 2023 * provides backwards compatibility with such VMX'en, and takes care of 2024 * registering the page store for a queue pair previously allocated by the 2025 * VMX during create or attach. This function will move the queue pair state 2026 * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or 2027 * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the 2028 * attached state with memory, the queue pair is ready to be used by the 2029 * host peer, and an attached event will be generated. 2030 * 2031 * Assumes that the queue pair broker lock is held. 2032 * 2033 * This function is only used by the hosted platform, since there is no 2034 * issue with backwards compatibility for vmkernel. 2035 */ 2036 int vmci_qp_broker_set_page_store(struct vmci_handle handle, 2037 u64 produce_uva, 2038 u64 consume_uva, 2039 struct vmci_ctx *context) 2040 { 2041 struct qp_broker_entry *entry; 2042 int result; 2043 const u32 context_id = vmci_ctx_get_id(context); 2044 2045 if (vmci_handle_is_invalid(handle) || !context || 2046 context_id == VMCI_INVALID_ID) 2047 return VMCI_ERROR_INVALID_ARGS; 2048 2049 /* 2050 * We only support guest to host queue pairs, so the VMX must 2051 * supply UVAs for the mapped page files. 2052 */ 2053 2054 if (produce_uva == 0 || consume_uva == 0) 2055 return VMCI_ERROR_INVALID_ARGS; 2056 2057 mutex_lock(&qp_broker_list.mutex); 2058 2059 if (!vmci_ctx_qp_exists(context, handle)) { 2060 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2061 context_id, handle.context, handle.resource); 2062 result = VMCI_ERROR_NOT_FOUND; 2063 goto out; 2064 } 2065 2066 entry = qp_broker_handle_to_entry(handle); 2067 if (!entry) { 2068 result = VMCI_ERROR_NOT_FOUND; 2069 goto out; 2070 } 2071 2072 /* 2073 * If I'm the owner then I can set the page store. 2074 * 2075 * Or, if a host created the queue_pair and I'm the attached peer 2076 * then I can set the page store. 2077 */ 2078 if (entry->create_id != context_id && 2079 (entry->create_id != VMCI_HOST_CONTEXT_ID || 2080 entry->attach_id != context_id)) { 2081 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER; 2082 goto out; 2083 } 2084 2085 if (entry->state != VMCIQPB_CREATED_NO_MEM && 2086 entry->state != VMCIQPB_ATTACHED_NO_MEM) { 2087 result = VMCI_ERROR_UNAVAILABLE; 2088 goto out; 2089 } 2090 2091 result = qp_host_get_user_memory(produce_uva, consume_uva, 2092 entry->produce_q, entry->consume_q); 2093 if (result < VMCI_SUCCESS) 2094 goto out; 2095 2096 result = qp_host_map_queues(entry->produce_q, entry->consume_q); 2097 if (result < VMCI_SUCCESS) { 2098 qp_host_unregister_user_memory(entry->produce_q, 2099 entry->consume_q); 2100 goto out; 2101 } 2102 2103 if (entry->state == VMCIQPB_CREATED_NO_MEM) 2104 entry->state = VMCIQPB_CREATED_MEM; 2105 else 2106 entry->state = VMCIQPB_ATTACHED_MEM; 2107 2108 entry->vmci_page_files = true; 2109 2110 if (entry->state == VMCIQPB_ATTACHED_MEM) { 2111 result = 2112 qp_notify_peer(true, handle, context_id, entry->create_id); 2113 if (result < VMCI_SUCCESS) { 2114 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", 2115 entry->create_id, entry->qp.handle.context, 2116 entry->qp.handle.resource); 2117 } 2118 } 2119 2120 result = VMCI_SUCCESS; 2121 out: 2122 mutex_unlock(&qp_broker_list.mutex); 2123 return result; 2124 } 2125 2126 /* 2127 * Resets saved queue headers for the given QP broker 2128 * entry. Should be used when guest memory becomes available 2129 * again, or the guest detaches. 2130 */ 2131 static void qp_reset_saved_headers(struct qp_broker_entry *entry) 2132 { 2133 entry->produce_q->saved_header = NULL; 2134 entry->consume_q->saved_header = NULL; 2135 } 2136 2137 /* 2138 * The main entry point for detaching from a queue pair registered with the 2139 * queue pair broker. If more than one endpoint is attached to the queue 2140 * pair, the first endpoint will mainly decrement a reference count and 2141 * generate a notification to its peer. The last endpoint will clean up 2142 * the queue pair state registered with the broker. 2143 * 2144 * When a guest endpoint detaches, it will unmap and unregister the guest 2145 * memory backing the queue pair. If the host is still attached, it will 2146 * no longer be able to access the queue pair content. 2147 * 2148 * If the queue pair is already in a state where there is no memory 2149 * registered for the queue pair (any *_NO_MEM state), it will transition to 2150 * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest 2151 * endpoint is the first of two endpoints to detach. If the host endpoint is 2152 * the first out of two to detach, the queue pair will move to the 2153 * VMCIQPB_SHUTDOWN_MEM state. 2154 */ 2155 int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context) 2156 { 2157 struct qp_broker_entry *entry; 2158 const u32 context_id = vmci_ctx_get_id(context); 2159 u32 peer_id; 2160 bool is_local = false; 2161 int result; 2162 2163 if (vmci_handle_is_invalid(handle) || !context || 2164 context_id == VMCI_INVALID_ID) { 2165 return VMCI_ERROR_INVALID_ARGS; 2166 } 2167 2168 mutex_lock(&qp_broker_list.mutex); 2169 2170 if (!vmci_ctx_qp_exists(context, handle)) { 2171 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2172 context_id, handle.context, handle.resource); 2173 result = VMCI_ERROR_NOT_FOUND; 2174 goto out; 2175 } 2176 2177 entry = qp_broker_handle_to_entry(handle); 2178 if (!entry) { 2179 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n", 2180 context_id, handle.context, handle.resource); 2181 result = VMCI_ERROR_NOT_FOUND; 2182 goto out; 2183 } 2184 2185 if (context_id != entry->create_id && context_id != entry->attach_id) { 2186 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2187 goto out; 2188 } 2189 2190 if (context_id == entry->create_id) { 2191 peer_id = entry->attach_id; 2192 entry->create_id = VMCI_INVALID_ID; 2193 } else { 2194 peer_id = entry->create_id; 2195 entry->attach_id = VMCI_INVALID_ID; 2196 } 2197 entry->qp.ref_count--; 2198 2199 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2200 2201 if (context_id != VMCI_HOST_CONTEXT_ID) { 2202 bool headers_mapped; 2203 2204 /* 2205 * Pre NOVMVM vmx'en may detach from a queue pair 2206 * before setting the page store, and in that case 2207 * there is no user memory to detach from. Also, more 2208 * recent VMX'en may detach from a queue pair in the 2209 * quiesced state. 2210 */ 2211 2212 qp_acquire_queue_mutex(entry->produce_q); 2213 headers_mapped = entry->produce_q->q_header || 2214 entry->consume_q->q_header; 2215 if (QPBROKERSTATE_HAS_MEM(entry)) { 2216 result = 2217 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID, 2218 entry->produce_q, 2219 entry->consume_q); 2220 if (result < VMCI_SUCCESS) 2221 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", 2222 handle.context, handle.resource, 2223 result); 2224 2225 if (entry->vmci_page_files) 2226 qp_host_unregister_user_memory(entry->produce_q, 2227 entry-> 2228 consume_q); 2229 else 2230 qp_host_unregister_user_memory(entry->produce_q, 2231 entry-> 2232 consume_q); 2233 2234 } 2235 2236 if (!headers_mapped) 2237 qp_reset_saved_headers(entry); 2238 2239 qp_release_queue_mutex(entry->produce_q); 2240 2241 if (!headers_mapped && entry->wakeup_cb) 2242 entry->wakeup_cb(entry->client_data); 2243 2244 } else { 2245 if (entry->wakeup_cb) { 2246 entry->wakeup_cb = NULL; 2247 entry->client_data = NULL; 2248 } 2249 } 2250 2251 if (entry->qp.ref_count == 0) { 2252 qp_list_remove_entry(&qp_broker_list, &entry->qp); 2253 2254 if (is_local) 2255 kfree(entry->local_mem); 2256 2257 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); 2258 qp_host_free_queue(entry->produce_q, entry->qp.produce_size); 2259 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); 2260 /* Unlink from resource hash table and free callback */ 2261 vmci_resource_remove(&entry->resource); 2262 2263 kfree(entry); 2264 2265 vmci_ctx_qp_destroy(context, handle); 2266 } else { 2267 qp_notify_peer(false, handle, context_id, peer_id); 2268 if (context_id == VMCI_HOST_CONTEXT_ID && 2269 QPBROKERSTATE_HAS_MEM(entry)) { 2270 entry->state = VMCIQPB_SHUTDOWN_MEM; 2271 } else { 2272 entry->state = VMCIQPB_SHUTDOWN_NO_MEM; 2273 } 2274 2275 if (!is_local) 2276 vmci_ctx_qp_destroy(context, handle); 2277 2278 } 2279 result = VMCI_SUCCESS; 2280 out: 2281 mutex_unlock(&qp_broker_list.mutex); 2282 return result; 2283 } 2284 2285 /* 2286 * Establishes the necessary mappings for a queue pair given a 2287 * reference to the queue pair guest memory. This is usually 2288 * called when a guest is unquiesced and the VMX is allowed to 2289 * map guest memory once again. 2290 */ 2291 int vmci_qp_broker_map(struct vmci_handle handle, 2292 struct vmci_ctx *context, 2293 u64 guest_mem) 2294 { 2295 struct qp_broker_entry *entry; 2296 const u32 context_id = vmci_ctx_get_id(context); 2297 bool is_local = false; 2298 int result; 2299 2300 if (vmci_handle_is_invalid(handle) || !context || 2301 context_id == VMCI_INVALID_ID) 2302 return VMCI_ERROR_INVALID_ARGS; 2303 2304 mutex_lock(&qp_broker_list.mutex); 2305 2306 if (!vmci_ctx_qp_exists(context, handle)) { 2307 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2308 context_id, handle.context, handle.resource); 2309 result = VMCI_ERROR_NOT_FOUND; 2310 goto out; 2311 } 2312 2313 entry = qp_broker_handle_to_entry(handle); 2314 if (!entry) { 2315 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", 2316 context_id, handle.context, handle.resource); 2317 result = VMCI_ERROR_NOT_FOUND; 2318 goto out; 2319 } 2320 2321 if (context_id != entry->create_id && context_id != entry->attach_id) { 2322 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2323 goto out; 2324 } 2325 2326 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2327 result = VMCI_SUCCESS; 2328 2329 if (context_id != VMCI_HOST_CONTEXT_ID) { 2330 struct vmci_qp_page_store page_store; 2331 2332 page_store.pages = guest_mem; 2333 page_store.len = QPE_NUM_PAGES(entry->qp); 2334 2335 qp_acquire_queue_mutex(entry->produce_q); 2336 qp_reset_saved_headers(entry); 2337 result = 2338 qp_host_register_user_memory(&page_store, 2339 entry->produce_q, 2340 entry->consume_q); 2341 qp_release_queue_mutex(entry->produce_q); 2342 if (result == VMCI_SUCCESS) { 2343 /* Move state from *_NO_MEM to *_MEM */ 2344 2345 entry->state++; 2346 2347 if (entry->wakeup_cb) 2348 entry->wakeup_cb(entry->client_data); 2349 } 2350 } 2351 2352 out: 2353 mutex_unlock(&qp_broker_list.mutex); 2354 return result; 2355 } 2356 2357 /* 2358 * Saves a snapshot of the queue headers for the given QP broker 2359 * entry. Should be used when guest memory is unmapped. 2360 * Results: 2361 * VMCI_SUCCESS on success, appropriate error code if guest memory 2362 * can't be accessed.. 2363 */ 2364 static int qp_save_headers(struct qp_broker_entry *entry) 2365 { 2366 int result; 2367 2368 if (entry->produce_q->saved_header != NULL && 2369 entry->consume_q->saved_header != NULL) { 2370 /* 2371 * If the headers have already been saved, we don't need to do 2372 * it again, and we don't want to map in the headers 2373 * unnecessarily. 2374 */ 2375 2376 return VMCI_SUCCESS; 2377 } 2378 2379 if (NULL == entry->produce_q->q_header || 2380 NULL == entry->consume_q->q_header) { 2381 result = qp_host_map_queues(entry->produce_q, entry->consume_q); 2382 if (result < VMCI_SUCCESS) 2383 return result; 2384 } 2385 2386 memcpy(&entry->saved_produce_q, entry->produce_q->q_header, 2387 sizeof(entry->saved_produce_q)); 2388 entry->produce_q->saved_header = &entry->saved_produce_q; 2389 memcpy(&entry->saved_consume_q, entry->consume_q->q_header, 2390 sizeof(entry->saved_consume_q)); 2391 entry->consume_q->saved_header = &entry->saved_consume_q; 2392 2393 return VMCI_SUCCESS; 2394 } 2395 2396 /* 2397 * Removes all references to the guest memory of a given queue pair, and 2398 * will move the queue pair from state *_MEM to *_NO_MEM. It is usually 2399 * called when a VM is being quiesced where access to guest memory should 2400 * avoided. 2401 */ 2402 int vmci_qp_broker_unmap(struct vmci_handle handle, 2403 struct vmci_ctx *context, 2404 u32 gid) 2405 { 2406 struct qp_broker_entry *entry; 2407 const u32 context_id = vmci_ctx_get_id(context); 2408 bool is_local = false; 2409 int result; 2410 2411 if (vmci_handle_is_invalid(handle) || !context || 2412 context_id == VMCI_INVALID_ID) 2413 return VMCI_ERROR_INVALID_ARGS; 2414 2415 mutex_lock(&qp_broker_list.mutex); 2416 2417 if (!vmci_ctx_qp_exists(context, handle)) { 2418 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2419 context_id, handle.context, handle.resource); 2420 result = VMCI_ERROR_NOT_FOUND; 2421 goto out; 2422 } 2423 2424 entry = qp_broker_handle_to_entry(handle); 2425 if (!entry) { 2426 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", 2427 context_id, handle.context, handle.resource); 2428 result = VMCI_ERROR_NOT_FOUND; 2429 goto out; 2430 } 2431 2432 if (context_id != entry->create_id && context_id != entry->attach_id) { 2433 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2434 goto out; 2435 } 2436 2437 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2438 2439 if (context_id != VMCI_HOST_CONTEXT_ID) { 2440 qp_acquire_queue_mutex(entry->produce_q); 2441 result = qp_save_headers(entry); 2442 if (result < VMCI_SUCCESS) 2443 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", 2444 handle.context, handle.resource, result); 2445 2446 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); 2447 2448 /* 2449 * On hosted, when we unmap queue pairs, the VMX will also 2450 * unmap the guest memory, so we invalidate the previously 2451 * registered memory. If the queue pair is mapped again at a 2452 * later point in time, we will need to reregister the user 2453 * memory with a possibly new user VA. 2454 */ 2455 qp_host_unregister_user_memory(entry->produce_q, 2456 entry->consume_q); 2457 2458 /* 2459 * Move state from *_MEM to *_NO_MEM. 2460 */ 2461 entry->state--; 2462 2463 qp_release_queue_mutex(entry->produce_q); 2464 } 2465 2466 result = VMCI_SUCCESS; 2467 2468 out: 2469 mutex_unlock(&qp_broker_list.mutex); 2470 return result; 2471 } 2472 2473 /* 2474 * Destroys all guest queue pair endpoints. If active guest queue 2475 * pairs still exist, hypercalls to attempt detach from these 2476 * queue pairs will be made. Any failure to detach is silently 2477 * ignored. 2478 */ 2479 void vmci_qp_guest_endpoints_exit(void) 2480 { 2481 struct qp_entry *entry; 2482 struct qp_guest_endpoint *ep; 2483 2484 mutex_lock(&qp_guest_endpoints.mutex); 2485 2486 while ((entry = qp_list_get_head(&qp_guest_endpoints))) { 2487 ep = (struct qp_guest_endpoint *)entry; 2488 2489 /* Don't make a hypercall for local queue_pairs. */ 2490 if (!(entry->flags & VMCI_QPFLAG_LOCAL)) 2491 qp_detatch_hypercall(entry->handle); 2492 2493 /* We cannot fail the exit, so let's reset ref_count. */ 2494 entry->ref_count = 0; 2495 qp_list_remove_entry(&qp_guest_endpoints, entry); 2496 2497 qp_guest_endpoint_destroy(ep); 2498 } 2499 2500 mutex_unlock(&qp_guest_endpoints.mutex); 2501 } 2502 2503 /* 2504 * Helper routine that will lock the queue pair before subsequent 2505 * operations. 2506 * Note: Non-blocking on the host side is currently only implemented in ESX. 2507 * Since non-blocking isn't yet implemented on the host personality we 2508 * have no reason to acquire a spin lock. So to avoid the use of an 2509 * unnecessary lock only acquire the mutex if we can block. 2510 */ 2511 static void qp_lock(const struct vmci_qp *qpair) 2512 { 2513 qp_acquire_queue_mutex(qpair->produce_q); 2514 } 2515 2516 /* 2517 * Helper routine that unlocks the queue pair after calling 2518 * qp_lock. 2519 */ 2520 static void qp_unlock(const struct vmci_qp *qpair) 2521 { 2522 qp_release_queue_mutex(qpair->produce_q); 2523 } 2524 2525 /* 2526 * The queue headers may not be mapped at all times. If a queue is 2527 * currently not mapped, it will be attempted to do so. 2528 */ 2529 static int qp_map_queue_headers(struct vmci_queue *produce_q, 2530 struct vmci_queue *consume_q) 2531 { 2532 int result; 2533 2534 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { 2535 result = qp_host_map_queues(produce_q, consume_q); 2536 if (result < VMCI_SUCCESS) 2537 return (produce_q->saved_header && 2538 consume_q->saved_header) ? 2539 VMCI_ERROR_QUEUEPAIR_NOT_READY : 2540 VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2541 } 2542 2543 return VMCI_SUCCESS; 2544 } 2545 2546 /* 2547 * Helper routine that will retrieve the produce and consume 2548 * headers of a given queue pair. If the guest memory of the 2549 * queue pair is currently not available, the saved queue headers 2550 * will be returned, if these are available. 2551 */ 2552 static int qp_get_queue_headers(const struct vmci_qp *qpair, 2553 struct vmci_queue_header **produce_q_header, 2554 struct vmci_queue_header **consume_q_header) 2555 { 2556 int result; 2557 2558 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); 2559 if (result == VMCI_SUCCESS) { 2560 *produce_q_header = qpair->produce_q->q_header; 2561 *consume_q_header = qpair->consume_q->q_header; 2562 } else if (qpair->produce_q->saved_header && 2563 qpair->consume_q->saved_header) { 2564 *produce_q_header = qpair->produce_q->saved_header; 2565 *consume_q_header = qpair->consume_q->saved_header; 2566 result = VMCI_SUCCESS; 2567 } 2568 2569 return result; 2570 } 2571 2572 /* 2573 * Callback from VMCI queue pair broker indicating that a queue 2574 * pair that was previously not ready, now either is ready or 2575 * gone forever. 2576 */ 2577 static int qp_wakeup_cb(void *client_data) 2578 { 2579 struct vmci_qp *qpair = (struct vmci_qp *)client_data; 2580 2581 qp_lock(qpair); 2582 while (qpair->blocked > 0) { 2583 qpair->blocked--; 2584 qpair->generation++; 2585 wake_up(&qpair->event); 2586 } 2587 qp_unlock(qpair); 2588 2589 return VMCI_SUCCESS; 2590 } 2591 2592 /* 2593 * Makes the calling thread wait for the queue pair to become 2594 * ready for host side access. Returns true when thread is 2595 * woken up after queue pair state change, false otherwise. 2596 */ 2597 static bool qp_wait_for_ready_queue(struct vmci_qp *qpair) 2598 { 2599 unsigned int generation; 2600 2601 qpair->blocked++; 2602 generation = qpair->generation; 2603 qp_unlock(qpair); 2604 wait_event(qpair->event, generation != qpair->generation); 2605 qp_lock(qpair); 2606 2607 return true; 2608 } 2609 2610 /* 2611 * Enqueues a given buffer to the produce queue using the provided 2612 * function. As many bytes as possible (space available in the queue) 2613 * are enqueued. Assumes the queue->mutex has been acquired. Returns 2614 * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue 2615 * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the 2616 * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if 2617 * an error occured when accessing the buffer, 2618 * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't 2619 * available. Otherwise, the number of bytes written to the queue is 2620 * returned. Updates the tail pointer of the produce queue. 2621 */ 2622 static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, 2623 struct vmci_queue *consume_q, 2624 const u64 produce_q_size, 2625 const void *buf, 2626 size_t buf_size, 2627 vmci_memcpy_to_queue_func memcpy_to_queue) 2628 { 2629 s64 free_space; 2630 u64 tail; 2631 size_t written; 2632 ssize_t result; 2633 2634 result = qp_map_queue_headers(produce_q, consume_q); 2635 if (unlikely(result != VMCI_SUCCESS)) 2636 return result; 2637 2638 free_space = vmci_q_header_free_space(produce_q->q_header, 2639 consume_q->q_header, 2640 produce_q_size); 2641 if (free_space == 0) 2642 return VMCI_ERROR_QUEUEPAIR_NOSPACE; 2643 2644 if (free_space < VMCI_SUCCESS) 2645 return (ssize_t) free_space; 2646 2647 written = (size_t) (free_space > buf_size ? buf_size : free_space); 2648 tail = vmci_q_header_producer_tail(produce_q->q_header); 2649 if (likely(tail + written < produce_q_size)) { 2650 result = memcpy_to_queue(produce_q, tail, buf, 0, written); 2651 } else { 2652 /* Tail pointer wraps around. */ 2653 2654 const size_t tmp = (size_t) (produce_q_size - tail); 2655 2656 result = memcpy_to_queue(produce_q, tail, buf, 0, tmp); 2657 if (result >= VMCI_SUCCESS) 2658 result = memcpy_to_queue(produce_q, 0, buf, tmp, 2659 written - tmp); 2660 } 2661 2662 if (result < VMCI_SUCCESS) 2663 return result; 2664 2665 vmci_q_header_add_producer_tail(produce_q->q_header, written, 2666 produce_q_size); 2667 return written; 2668 } 2669 2670 /* 2671 * Dequeues data (if available) from the given consume queue. Writes data 2672 * to the user provided buffer using the provided function. 2673 * Assumes the queue->mutex has been acquired. 2674 * Results: 2675 * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue. 2676 * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue 2677 * (as defined by the queue size). 2678 * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer. 2679 * Otherwise the number of bytes dequeued is returned. 2680 * Side effects: 2681 * Updates the head pointer of the consume queue. 2682 */ 2683 static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, 2684 struct vmci_queue *consume_q, 2685 const u64 consume_q_size, 2686 void *buf, 2687 size_t buf_size, 2688 vmci_memcpy_from_queue_func memcpy_from_queue, 2689 bool update_consumer) 2690 { 2691 s64 buf_ready; 2692 u64 head; 2693 size_t read; 2694 ssize_t result; 2695 2696 result = qp_map_queue_headers(produce_q, consume_q); 2697 if (unlikely(result != VMCI_SUCCESS)) 2698 return result; 2699 2700 buf_ready = vmci_q_header_buf_ready(consume_q->q_header, 2701 produce_q->q_header, 2702 consume_q_size); 2703 if (buf_ready == 0) 2704 return VMCI_ERROR_QUEUEPAIR_NODATA; 2705 2706 if (buf_ready < VMCI_SUCCESS) 2707 return (ssize_t) buf_ready; 2708 2709 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready); 2710 head = vmci_q_header_consumer_head(produce_q->q_header); 2711 if (likely(head + read < consume_q_size)) { 2712 result = memcpy_from_queue(buf, 0, consume_q, head, read); 2713 } else { 2714 /* Head pointer wraps around. */ 2715 2716 const size_t tmp = (size_t) (consume_q_size - head); 2717 2718 result = memcpy_from_queue(buf, 0, consume_q, head, tmp); 2719 if (result >= VMCI_SUCCESS) 2720 result = memcpy_from_queue(buf, tmp, consume_q, 0, 2721 read - tmp); 2722 2723 } 2724 2725 if (result < VMCI_SUCCESS) 2726 return result; 2727 2728 if (update_consumer) 2729 vmci_q_header_add_consumer_head(produce_q->q_header, 2730 read, consume_q_size); 2731 2732 return read; 2733 } 2734 2735 /* 2736 * vmci_qpair_alloc() - Allocates a queue pair. 2737 * @qpair: Pointer for the new vmci_qp struct. 2738 * @handle: Handle to track the resource. 2739 * @produce_qsize: Desired size of the producer queue. 2740 * @consume_qsize: Desired size of the consumer queue. 2741 * @peer: ContextID of the peer. 2742 * @flags: VMCI flags. 2743 * @priv_flags: VMCI priviledge flags. 2744 * 2745 * This is the client interface for allocating the memory for a 2746 * vmci_qp structure and then attaching to the underlying 2747 * queue. If an error occurs allocating the memory for the 2748 * vmci_qp structure no attempt is made to attach. If an 2749 * error occurs attaching, then the structure is freed. 2750 */ 2751 int vmci_qpair_alloc(struct vmci_qp **qpair, 2752 struct vmci_handle *handle, 2753 u64 produce_qsize, 2754 u64 consume_qsize, 2755 u32 peer, 2756 u32 flags, 2757 u32 priv_flags) 2758 { 2759 struct vmci_qp *my_qpair; 2760 int retval; 2761 struct vmci_handle src = VMCI_INVALID_HANDLE; 2762 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID); 2763 enum vmci_route route; 2764 vmci_event_release_cb wakeup_cb; 2765 void *client_data; 2766 2767 /* 2768 * Restrict the size of a queuepair. The device already 2769 * enforces a limit on the total amount of memory that can be 2770 * allocated to queuepairs for a guest. However, we try to 2771 * allocate this memory before we make the queuepair 2772 * allocation hypercall. On Linux, we allocate each page 2773 * separately, which means rather than fail, the guest will 2774 * thrash while it tries to allocate, and will become 2775 * increasingly unresponsive to the point where it appears to 2776 * be hung. So we place a limit on the size of an individual 2777 * queuepair here, and leave the device to enforce the 2778 * restriction on total queuepair memory. (Note that this 2779 * doesn't prevent all cases; a user with only this much 2780 * physical memory could still get into trouble.) The error 2781 * used by the device is NO_RESOURCES, so use that here too. 2782 */ 2783 2784 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) || 2785 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY) 2786 return VMCI_ERROR_NO_RESOURCES; 2787 2788 retval = vmci_route(&src, &dst, false, &route); 2789 if (retval < VMCI_SUCCESS) 2790 route = vmci_guest_code_active() ? 2791 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST; 2792 2793 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) { 2794 pr_devel("NONBLOCK OR PINNED set"); 2795 return VMCI_ERROR_INVALID_ARGS; 2796 } 2797 2798 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL); 2799 if (!my_qpair) 2800 return VMCI_ERROR_NO_MEM; 2801 2802 my_qpair->produce_q_size = produce_qsize; 2803 my_qpair->consume_q_size = consume_qsize; 2804 my_qpair->peer = peer; 2805 my_qpair->flags = flags; 2806 my_qpair->priv_flags = priv_flags; 2807 2808 wakeup_cb = NULL; 2809 client_data = NULL; 2810 2811 if (VMCI_ROUTE_AS_HOST == route) { 2812 my_qpair->guest_endpoint = false; 2813 if (!(flags & VMCI_QPFLAG_LOCAL)) { 2814 my_qpair->blocked = 0; 2815 my_qpair->generation = 0; 2816 init_waitqueue_head(&my_qpair->event); 2817 wakeup_cb = qp_wakeup_cb; 2818 client_data = (void *)my_qpair; 2819 } 2820 } else { 2821 my_qpair->guest_endpoint = true; 2822 } 2823 2824 retval = vmci_qp_alloc(handle, 2825 &my_qpair->produce_q, 2826 my_qpair->produce_q_size, 2827 &my_qpair->consume_q, 2828 my_qpair->consume_q_size, 2829 my_qpair->peer, 2830 my_qpair->flags, 2831 my_qpair->priv_flags, 2832 my_qpair->guest_endpoint, 2833 wakeup_cb, client_data); 2834 2835 if (retval < VMCI_SUCCESS) { 2836 kfree(my_qpair); 2837 return retval; 2838 } 2839 2840 *qpair = my_qpair; 2841 my_qpair->handle = *handle; 2842 2843 return retval; 2844 } 2845 EXPORT_SYMBOL_GPL(vmci_qpair_alloc); 2846 2847 /* 2848 * vmci_qpair_detach() - Detatches the client from a queue pair. 2849 * @qpair: Reference of a pointer to the qpair struct. 2850 * 2851 * This is the client interface for detaching from a VMCIQPair. 2852 * Note that this routine will free the memory allocated for the 2853 * vmci_qp structure too. 2854 */ 2855 int vmci_qpair_detach(struct vmci_qp **qpair) 2856 { 2857 int result; 2858 struct vmci_qp *old_qpair; 2859 2860 if (!qpair || !(*qpair)) 2861 return VMCI_ERROR_INVALID_ARGS; 2862 2863 old_qpair = *qpair; 2864 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint); 2865 2866 /* 2867 * The guest can fail to detach for a number of reasons, and 2868 * if it does so, it will cleanup the entry (if there is one). 2869 * The host can fail too, but it won't cleanup the entry 2870 * immediately, it will do that later when the context is 2871 * freed. Either way, we need to release the qpair struct 2872 * here; there isn't much the caller can do, and we don't want 2873 * to leak. 2874 */ 2875 2876 memset(old_qpair, 0, sizeof(*old_qpair)); 2877 old_qpair->handle = VMCI_INVALID_HANDLE; 2878 old_qpair->peer = VMCI_INVALID_ID; 2879 kfree(old_qpair); 2880 *qpair = NULL; 2881 2882 return result; 2883 } 2884 EXPORT_SYMBOL_GPL(vmci_qpair_detach); 2885 2886 /* 2887 * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer. 2888 * @qpair: Pointer to the queue pair struct. 2889 * @producer_tail: Reference used for storing producer tail index. 2890 * @consumer_head: Reference used for storing the consumer head index. 2891 * 2892 * This is the client interface for getting the current indexes of the 2893 * QPair from the point of the view of the caller as the producer. 2894 */ 2895 int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair, 2896 u64 *producer_tail, 2897 u64 *consumer_head) 2898 { 2899 struct vmci_queue_header *produce_q_header; 2900 struct vmci_queue_header *consume_q_header; 2901 int result; 2902 2903 if (!qpair) 2904 return VMCI_ERROR_INVALID_ARGS; 2905 2906 qp_lock(qpair); 2907 result = 2908 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2909 if (result == VMCI_SUCCESS) 2910 vmci_q_header_get_pointers(produce_q_header, consume_q_header, 2911 producer_tail, consumer_head); 2912 qp_unlock(qpair); 2913 2914 if (result == VMCI_SUCCESS && 2915 ((producer_tail && *producer_tail >= qpair->produce_q_size) || 2916 (consumer_head && *consumer_head >= qpair->produce_q_size))) 2917 return VMCI_ERROR_INVALID_SIZE; 2918 2919 return result; 2920 } 2921 EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes); 2922 2923 /* 2924 * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer. 2925 * @qpair: Pointer to the queue pair struct. 2926 * @consumer_tail: Reference used for storing consumer tail index. 2927 * @producer_head: Reference used for storing the producer head index. 2928 * 2929 * This is the client interface for getting the current indexes of the 2930 * QPair from the point of the view of the caller as the consumer. 2931 */ 2932 int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair, 2933 u64 *consumer_tail, 2934 u64 *producer_head) 2935 { 2936 struct vmci_queue_header *produce_q_header; 2937 struct vmci_queue_header *consume_q_header; 2938 int result; 2939 2940 if (!qpair) 2941 return VMCI_ERROR_INVALID_ARGS; 2942 2943 qp_lock(qpair); 2944 result = 2945 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2946 if (result == VMCI_SUCCESS) 2947 vmci_q_header_get_pointers(consume_q_header, produce_q_header, 2948 consumer_tail, producer_head); 2949 qp_unlock(qpair); 2950 2951 if (result == VMCI_SUCCESS && 2952 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) || 2953 (producer_head && *producer_head >= qpair->consume_q_size))) 2954 return VMCI_ERROR_INVALID_SIZE; 2955 2956 return result; 2957 } 2958 EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes); 2959 2960 /* 2961 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue. 2962 * @qpair: Pointer to the queue pair struct. 2963 * 2964 * This is the client interface for getting the amount of free 2965 * space in the QPair from the point of the view of the caller as 2966 * the producer which is the common case. Returns < 0 if err, else 2967 * available bytes into which data can be enqueued if > 0. 2968 */ 2969 s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair) 2970 { 2971 struct vmci_queue_header *produce_q_header; 2972 struct vmci_queue_header *consume_q_header; 2973 s64 result; 2974 2975 if (!qpair) 2976 return VMCI_ERROR_INVALID_ARGS; 2977 2978 qp_lock(qpair); 2979 result = 2980 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2981 if (result == VMCI_SUCCESS) 2982 result = vmci_q_header_free_space(produce_q_header, 2983 consume_q_header, 2984 qpair->produce_q_size); 2985 else 2986 result = 0; 2987 2988 qp_unlock(qpair); 2989 2990 return result; 2991 } 2992 EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space); 2993 2994 /* 2995 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue. 2996 * @qpair: Pointer to the queue pair struct. 2997 * 2998 * This is the client interface for getting the amount of free 2999 * space in the QPair from the point of the view of the caller as 3000 * the consumer which is not the common case. Returns < 0 if err, else 3001 * available bytes into which data can be enqueued if > 0. 3002 */ 3003 s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair) 3004 { 3005 struct vmci_queue_header *produce_q_header; 3006 struct vmci_queue_header *consume_q_header; 3007 s64 result; 3008 3009 if (!qpair) 3010 return VMCI_ERROR_INVALID_ARGS; 3011 3012 qp_lock(qpair); 3013 result = 3014 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 3015 if (result == VMCI_SUCCESS) 3016 result = vmci_q_header_free_space(consume_q_header, 3017 produce_q_header, 3018 qpair->consume_q_size); 3019 else 3020 result = 0; 3021 3022 qp_unlock(qpair); 3023 3024 return result; 3025 } 3026 EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space); 3027 3028 /* 3029 * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from 3030 * producer queue. 3031 * @qpair: Pointer to the queue pair struct. 3032 * 3033 * This is the client interface for getting the amount of 3034 * enqueued data in the QPair from the point of the view of the 3035 * caller as the producer which is not the common case. Returns < 0 if err, 3036 * else available bytes that may be read. 3037 */ 3038 s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair) 3039 { 3040 struct vmci_queue_header *produce_q_header; 3041 struct vmci_queue_header *consume_q_header; 3042 s64 result; 3043 3044 if (!qpair) 3045 return VMCI_ERROR_INVALID_ARGS; 3046 3047 qp_lock(qpair); 3048 result = 3049 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 3050 if (result == VMCI_SUCCESS) 3051 result = vmci_q_header_buf_ready(produce_q_header, 3052 consume_q_header, 3053 qpair->produce_q_size); 3054 else 3055 result = 0; 3056 3057 qp_unlock(qpair); 3058 3059 return result; 3060 } 3061 EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready); 3062 3063 /* 3064 * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from 3065 * consumer queue. 3066 * @qpair: Pointer to the queue pair struct. 3067 * 3068 * This is the client interface for getting the amount of 3069 * enqueued data in the QPair from the point of the view of the 3070 * caller as the consumer which is the normal case. Returns < 0 if err, 3071 * else available bytes that may be read. 3072 */ 3073 s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair) 3074 { 3075 struct vmci_queue_header *produce_q_header; 3076 struct vmci_queue_header *consume_q_header; 3077 s64 result; 3078 3079 if (!qpair) 3080 return VMCI_ERROR_INVALID_ARGS; 3081 3082 qp_lock(qpair); 3083 result = 3084 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 3085 if (result == VMCI_SUCCESS) 3086 result = vmci_q_header_buf_ready(consume_q_header, 3087 produce_q_header, 3088 qpair->consume_q_size); 3089 else 3090 result = 0; 3091 3092 qp_unlock(qpair); 3093 3094 return result; 3095 } 3096 EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready); 3097 3098 /* 3099 * vmci_qpair_enqueue() - Throw data on the queue. 3100 * @qpair: Pointer to the queue pair struct. 3101 * @buf: Pointer to buffer containing data 3102 * @buf_size: Length of buffer. 3103 * @buf_type: Buffer type (Unused). 3104 * 3105 * This is the client interface for enqueueing data into the queue. 3106 * Returns number of bytes enqueued or < 0 on error. 3107 */ 3108 ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, 3109 const void *buf, 3110 size_t buf_size, 3111 int buf_type) 3112 { 3113 ssize_t result; 3114 3115 if (!qpair || !buf) 3116 return VMCI_ERROR_INVALID_ARGS; 3117 3118 qp_lock(qpair); 3119 3120 do { 3121 result = qp_enqueue_locked(qpair->produce_q, 3122 qpair->consume_q, 3123 qpair->produce_q_size, 3124 buf, buf_size, 3125 qp_memcpy_to_queue); 3126 3127 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3128 !qp_wait_for_ready_queue(qpair)) 3129 result = VMCI_ERROR_WOULD_BLOCK; 3130 3131 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3132 3133 qp_unlock(qpair); 3134 3135 return result; 3136 } 3137 EXPORT_SYMBOL_GPL(vmci_qpair_enqueue); 3138 3139 /* 3140 * vmci_qpair_dequeue() - Get data from the queue. 3141 * @qpair: Pointer to the queue pair struct. 3142 * @buf: Pointer to buffer for the data 3143 * @buf_size: Length of buffer. 3144 * @buf_type: Buffer type (Unused). 3145 * 3146 * This is the client interface for dequeueing data from the queue. 3147 * Returns number of bytes dequeued or < 0 on error. 3148 */ 3149 ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, 3150 void *buf, 3151 size_t buf_size, 3152 int buf_type) 3153 { 3154 ssize_t result; 3155 3156 if (!qpair || !buf) 3157 return VMCI_ERROR_INVALID_ARGS; 3158 3159 qp_lock(qpair); 3160 3161 do { 3162 result = qp_dequeue_locked(qpair->produce_q, 3163 qpair->consume_q, 3164 qpair->consume_q_size, 3165 buf, buf_size, 3166 qp_memcpy_from_queue, true); 3167 3168 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3169 !qp_wait_for_ready_queue(qpair)) 3170 result = VMCI_ERROR_WOULD_BLOCK; 3171 3172 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3173 3174 qp_unlock(qpair); 3175 3176 return result; 3177 } 3178 EXPORT_SYMBOL_GPL(vmci_qpair_dequeue); 3179 3180 /* 3181 * vmci_qpair_peek() - Peek at the data in the queue. 3182 * @qpair: Pointer to the queue pair struct. 3183 * @buf: Pointer to buffer for the data 3184 * @buf_size: Length of buffer. 3185 * @buf_type: Buffer type (Unused on Linux). 3186 * 3187 * This is the client interface for peeking into a queue. (I.e., 3188 * copy data from the queue without updating the head pointer.) 3189 * Returns number of bytes dequeued or < 0 on error. 3190 */ 3191 ssize_t vmci_qpair_peek(struct vmci_qp *qpair, 3192 void *buf, 3193 size_t buf_size, 3194 int buf_type) 3195 { 3196 ssize_t result; 3197 3198 if (!qpair || !buf) 3199 return VMCI_ERROR_INVALID_ARGS; 3200 3201 qp_lock(qpair); 3202 3203 do { 3204 result = qp_dequeue_locked(qpair->produce_q, 3205 qpair->consume_q, 3206 qpair->consume_q_size, 3207 buf, buf_size, 3208 qp_memcpy_from_queue, false); 3209 3210 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3211 !qp_wait_for_ready_queue(qpair)) 3212 result = VMCI_ERROR_WOULD_BLOCK; 3213 3214 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3215 3216 qp_unlock(qpair); 3217 3218 return result; 3219 } 3220 EXPORT_SYMBOL_GPL(vmci_qpair_peek); 3221 3222 /* 3223 * vmci_qpair_enquev() - Throw data on the queue using iov. 3224 * @qpair: Pointer to the queue pair struct. 3225 * @iov: Pointer to buffer containing data 3226 * @iov_size: Length of buffer. 3227 * @buf_type: Buffer type (Unused). 3228 * 3229 * This is the client interface for enqueueing data into the queue. 3230 * This function uses IO vectors to handle the work. Returns number 3231 * of bytes enqueued or < 0 on error. 3232 */ 3233 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, 3234 void *iov, 3235 size_t iov_size, 3236 int buf_type) 3237 { 3238 ssize_t result; 3239 3240 if (!qpair || !iov) 3241 return VMCI_ERROR_INVALID_ARGS; 3242 3243 qp_lock(qpair); 3244 3245 do { 3246 result = qp_enqueue_locked(qpair->produce_q, 3247 qpair->consume_q, 3248 qpair->produce_q_size, 3249 iov, iov_size, 3250 qp_memcpy_to_queue_iov); 3251 3252 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3253 !qp_wait_for_ready_queue(qpair)) 3254 result = VMCI_ERROR_WOULD_BLOCK; 3255 3256 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3257 3258 qp_unlock(qpair); 3259 3260 return result; 3261 } 3262 EXPORT_SYMBOL_GPL(vmci_qpair_enquev); 3263 3264 /* 3265 * vmci_qpair_dequev() - Get data from the queue using iov. 3266 * @qpair: Pointer to the queue pair struct. 3267 * @iov: Pointer to buffer for the data 3268 * @iov_size: Length of buffer. 3269 * @buf_type: Buffer type (Unused). 3270 * 3271 * This is the client interface for dequeueing data from the queue. 3272 * This function uses IO vectors to handle the work. Returns number 3273 * of bytes dequeued or < 0 on error. 3274 */ 3275 ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, 3276 void *iov, 3277 size_t iov_size, 3278 int buf_type) 3279 { 3280 ssize_t result; 3281 3282 if (!qpair || !iov) 3283 return VMCI_ERROR_INVALID_ARGS; 3284 3285 qp_lock(qpair); 3286 3287 do { 3288 result = qp_dequeue_locked(qpair->produce_q, 3289 qpair->consume_q, 3290 qpair->consume_q_size, 3291 iov, iov_size, 3292 qp_memcpy_from_queue_iov, 3293 true); 3294 3295 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3296 !qp_wait_for_ready_queue(qpair)) 3297 result = VMCI_ERROR_WOULD_BLOCK; 3298 3299 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3300 3301 qp_unlock(qpair); 3302 3303 return result; 3304 } 3305 EXPORT_SYMBOL_GPL(vmci_qpair_dequev); 3306 3307 /* 3308 * vmci_qpair_peekv() - Peek at the data in the queue using iov. 3309 * @qpair: Pointer to the queue pair struct. 3310 * @iov: Pointer to buffer for the data 3311 * @iov_size: Length of buffer. 3312 * @buf_type: Buffer type (Unused on Linux). 3313 * 3314 * This is the client interface for peeking into a queue. (I.e., 3315 * copy data from the queue without updating the head pointer.) 3316 * This function uses IO vectors to handle the work. Returns number 3317 * of bytes peeked or < 0 on error. 3318 */ 3319 ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, 3320 void *iov, 3321 size_t iov_size, 3322 int buf_type) 3323 { 3324 ssize_t result; 3325 3326 if (!qpair || !iov) 3327 return VMCI_ERROR_INVALID_ARGS; 3328 3329 qp_lock(qpair); 3330 3331 do { 3332 result = qp_dequeue_locked(qpair->produce_q, 3333 qpair->consume_q, 3334 qpair->consume_q_size, 3335 iov, iov_size, 3336 qp_memcpy_from_queue_iov, 3337 false); 3338 3339 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3340 !qp_wait_for_ready_queue(qpair)) 3341 result = VMCI_ERROR_WOULD_BLOCK; 3342 3343 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3344 3345 qp_unlock(qpair); 3346 return result; 3347 } 3348 EXPORT_SYMBOL_GPL(vmci_qpair_peekv); 3349