1 /* 2 * VMware VMCI Driver 3 * 4 * Copyright (C) 2012 VMware, Inc. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation version 2 and no later version. 9 * 10 * This program is distributed in the hope that it will be useful, but 11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * for more details. 14 */ 15 16 #include <linux/vmw_vmci_defs.h> 17 #include <linux/vmw_vmci_api.h> 18 #include <linux/highmem.h> 19 #include <linux/kernel.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/pagemap.h> 24 #include <linux/pci.h> 25 #include <linux/sched.h> 26 #include <linux/slab.h> 27 #include <linux/uio.h> 28 #include <linux/wait.h> 29 #include <linux/vmalloc.h> 30 #include <linux/skbuff.h> 31 32 #include "vmci_handle_array.h" 33 #include "vmci_queue_pair.h" 34 #include "vmci_datagram.h" 35 #include "vmci_resource.h" 36 #include "vmci_context.h" 37 #include "vmci_driver.h" 38 #include "vmci_event.h" 39 #include "vmci_route.h" 40 41 /* 42 * In the following, we will distinguish between two kinds of VMX processes - 43 * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized 44 * VMCI page files in the VMX and supporting VM to VM communication and the 45 * newer ones that use the guest memory directly. We will in the following 46 * refer to the older VMX versions as old-style VMX'en, and the newer ones as 47 * new-style VMX'en. 48 * 49 * The state transition datagram is as follows (the VMCIQPB_ prefix has been 50 * removed for readability) - see below for more details on the transtions: 51 * 52 * -------------- NEW ------------- 53 * | | 54 * \_/ \_/ 55 * CREATED_NO_MEM <-----------------> CREATED_MEM 56 * | | | 57 * | o-----------------------o | 58 * | | | 59 * \_/ \_/ \_/ 60 * ATTACHED_NO_MEM <----------------> ATTACHED_MEM 61 * | | | 62 * | o----------------------o | 63 * | | | 64 * \_/ \_/ \_/ 65 * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM 66 * | | 67 * | | 68 * -------------> gone <------------- 69 * 70 * In more detail. When a VMCI queue pair is first created, it will be in the 71 * VMCIQPB_NEW state. It will then move into one of the following states: 72 * 73 * - VMCIQPB_CREATED_NO_MEM: this state indicates that either: 74 * 75 * - the created was performed by a host endpoint, in which case there is 76 * no backing memory yet. 77 * 78 * - the create was initiated by an old-style VMX, that uses 79 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at 80 * a later point in time. This state can be distinguished from the one 81 * above by the context ID of the creator. A host side is not allowed to 82 * attach until the page store has been set. 83 * 84 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair 85 * is created by a VMX using the queue pair device backend that 86 * sets the UVAs of the queue pair immediately and stores the 87 * information for later attachers. At this point, it is ready for 88 * the host side to attach to it. 89 * 90 * Once the queue pair is in one of the created states (with the exception of 91 * the case mentioned for older VMX'en above), it is possible to attach to the 92 * queue pair. Again we have two new states possible: 93 * 94 * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following 95 * paths: 96 * 97 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue 98 * pair, and attaches to a queue pair previously created by the host side. 99 * 100 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair 101 * already created by a guest. 102 * 103 * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls 104 * vmci_qp_broker_set_page_store (see below). 105 * 106 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the 107 * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will 108 * bring the queue pair into this state. Once vmci_qp_broker_set_page_store 109 * is called to register the user memory, the VMCIQPB_ATTACH_MEM state 110 * will be entered. 111 * 112 * From the attached queue pair, the queue pair can enter the shutdown states 113 * when either side of the queue pair detaches. If the guest side detaches 114 * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where 115 * the content of the queue pair will no longer be available. If the host 116 * side detaches first, the queue pair will either enter the 117 * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or 118 * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped 119 * (e.g., the host detaches while a guest is stunned). 120 * 121 * New-style VMX'en will also unmap guest memory, if the guest is 122 * quiesced, e.g., during a snapshot operation. In that case, the guest 123 * memory will no longer be available, and the queue pair will transition from 124 * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more, 125 * in which case the queue pair will transition from the *_NO_MEM state at that 126 * point back to the *_MEM state. Note that the *_NO_MEM state may have changed, 127 * since the peer may have either attached or detached in the meantime. The 128 * values are laid out such that ++ on a state will move from a *_NO_MEM to a 129 * *_MEM state, and vice versa. 130 */ 131 132 /* 133 * VMCIMemcpy{To,From}QueueFunc() prototypes. Functions of these 134 * types are passed around to enqueue and dequeue routines. Note that 135 * often the functions passed are simply wrappers around memcpy 136 * itself. 137 * 138 * Note: In order for the memcpy typedefs to be compatible with the VMKernel, 139 * there's an unused last parameter for the hosted side. In 140 * ESX, that parameter holds a buffer type. 141 */ 142 typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue, 143 u64 queue_offset, const void *src, 144 size_t src_offset, size_t size); 145 typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset, 146 const struct vmci_queue *queue, 147 u64 queue_offset, size_t size); 148 149 /* The Kernel specific component of the struct vmci_queue structure. */ 150 struct vmci_queue_kern_if { 151 struct mutex __mutex; /* Protects the queue. */ 152 struct mutex *mutex; /* Shared by producer and consumer queues. */ 153 size_t num_pages; /* Number of pages incl. header. */ 154 bool host; /* Host or guest? */ 155 union { 156 struct { 157 dma_addr_t *pas; 158 void **vas; 159 } g; /* Used by the guest. */ 160 struct { 161 struct page **page; 162 struct page **header_page; 163 } h; /* Used by the host. */ 164 } u; 165 }; 166 167 /* 168 * This structure is opaque to the clients. 169 */ 170 struct vmci_qp { 171 struct vmci_handle handle; 172 struct vmci_queue *produce_q; 173 struct vmci_queue *consume_q; 174 u64 produce_q_size; 175 u64 consume_q_size; 176 u32 peer; 177 u32 flags; 178 u32 priv_flags; 179 bool guest_endpoint; 180 unsigned int blocked; 181 unsigned int generation; 182 wait_queue_head_t event; 183 }; 184 185 enum qp_broker_state { 186 VMCIQPB_NEW, 187 VMCIQPB_CREATED_NO_MEM, 188 VMCIQPB_CREATED_MEM, 189 VMCIQPB_ATTACHED_NO_MEM, 190 VMCIQPB_ATTACHED_MEM, 191 VMCIQPB_SHUTDOWN_NO_MEM, 192 VMCIQPB_SHUTDOWN_MEM, 193 VMCIQPB_GONE 194 }; 195 196 #define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \ 197 _qpb->state == VMCIQPB_ATTACHED_MEM || \ 198 _qpb->state == VMCIQPB_SHUTDOWN_MEM) 199 200 /* 201 * In the queue pair broker, we always use the guest point of view for 202 * the produce and consume queue values and references, e.g., the 203 * produce queue size stored is the guests produce queue size. The 204 * host endpoint will need to swap these around. The only exception is 205 * the local queue pairs on the host, in which case the host endpoint 206 * that creates the queue pair will have the right orientation, and 207 * the attaching host endpoint will need to swap. 208 */ 209 struct qp_entry { 210 struct list_head list_item; 211 struct vmci_handle handle; 212 u32 peer; 213 u32 flags; 214 u64 produce_size; 215 u64 consume_size; 216 u32 ref_count; 217 }; 218 219 struct qp_broker_entry { 220 struct vmci_resource resource; 221 struct qp_entry qp; 222 u32 create_id; 223 u32 attach_id; 224 enum qp_broker_state state; 225 bool require_trusted_attach; 226 bool created_by_trusted; 227 bool vmci_page_files; /* Created by VMX using VMCI page files */ 228 struct vmci_queue *produce_q; 229 struct vmci_queue *consume_q; 230 struct vmci_queue_header saved_produce_q; 231 struct vmci_queue_header saved_consume_q; 232 vmci_event_release_cb wakeup_cb; 233 void *client_data; 234 void *local_mem; /* Kernel memory for local queue pair */ 235 }; 236 237 struct qp_guest_endpoint { 238 struct vmci_resource resource; 239 struct qp_entry qp; 240 u64 num_ppns; 241 void *produce_q; 242 void *consume_q; 243 struct ppn_set ppn_set; 244 }; 245 246 struct qp_list { 247 struct list_head head; 248 struct mutex mutex; /* Protect queue list. */ 249 }; 250 251 static struct qp_list qp_broker_list = { 252 .head = LIST_HEAD_INIT(qp_broker_list.head), 253 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex), 254 }; 255 256 static struct qp_list qp_guest_endpoints = { 257 .head = LIST_HEAD_INIT(qp_guest_endpoints.head), 258 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex), 259 }; 260 261 #define INVALID_VMCI_GUEST_MEM_ID 0 262 #define QPE_NUM_PAGES(_QPE) ((u32) \ 263 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \ 264 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2)) 265 266 267 /* 268 * Frees kernel VA space for a given queue and its queue header, and 269 * frees physical data pages. 270 */ 271 static void qp_free_queue(void *q, u64 size) 272 { 273 struct vmci_queue *queue = q; 274 275 if (queue) { 276 u64 i; 277 278 /* Given size does not include header, so add in a page here. */ 279 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) { 280 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE, 281 queue->kernel_if->u.g.vas[i], 282 queue->kernel_if->u.g.pas[i]); 283 } 284 285 vfree(queue); 286 } 287 } 288 289 /* 290 * Allocates kernel queue pages of specified size with IOMMU mappings, 291 * plus space for the queue structure/kernel interface and the queue 292 * header. 293 */ 294 static void *qp_alloc_queue(u64 size, u32 flags) 295 { 296 u64 i; 297 struct vmci_queue *queue; 298 size_t pas_size; 299 size_t vas_size; 300 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); 301 u64 num_pages; 302 303 if (size > SIZE_MAX - PAGE_SIZE) 304 return NULL; 305 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 306 if (num_pages > 307 (SIZE_MAX - queue_size) / 308 (sizeof(*queue->kernel_if->u.g.pas) + 309 sizeof(*queue->kernel_if->u.g.vas))) 310 return NULL; 311 312 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); 313 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); 314 queue_size += pas_size + vas_size; 315 316 queue = vmalloc(queue_size); 317 if (!queue) 318 return NULL; 319 320 queue->q_header = NULL; 321 queue->saved_header = NULL; 322 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 323 queue->kernel_if->mutex = NULL; 324 queue->kernel_if->num_pages = num_pages; 325 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1); 326 queue->kernel_if->u.g.vas = 327 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size); 328 queue->kernel_if->host = false; 329 330 for (i = 0; i < num_pages; i++) { 331 queue->kernel_if->u.g.vas[i] = 332 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE, 333 &queue->kernel_if->u.g.pas[i], 334 GFP_KERNEL); 335 if (!queue->kernel_if->u.g.vas[i]) { 336 /* Size excl. the header. */ 337 qp_free_queue(queue, i * PAGE_SIZE); 338 return NULL; 339 } 340 } 341 342 /* Queue header is the first page. */ 343 queue->q_header = queue->kernel_if->u.g.vas[0]; 344 345 return queue; 346 } 347 348 /* 349 * Copies from a given buffer or iovector to a VMCI Queue. Uses 350 * kmap()/kunmap() to dynamically map/unmap required portions of the queue 351 * by traversing the offset -> page translation structure for the queue. 352 * Assumes that offset + size does not wrap around in the queue. 353 */ 354 static int __qp_memcpy_to_queue(struct vmci_queue *queue, 355 u64 queue_offset, 356 const void *src, 357 size_t size, 358 bool is_iovec) 359 { 360 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 361 size_t bytes_copied = 0; 362 363 while (bytes_copied < size) { 364 const u64 page_index = 365 (queue_offset + bytes_copied) / PAGE_SIZE; 366 const size_t page_offset = 367 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 368 void *va; 369 size_t to_copy; 370 371 if (kernel_if->host) 372 va = kmap(kernel_if->u.h.page[page_index]); 373 else 374 va = kernel_if->u.g.vas[page_index + 1]; 375 /* Skip header. */ 376 377 if (size - bytes_copied > PAGE_SIZE - page_offset) 378 /* Enough payload to fill up from this page. */ 379 to_copy = PAGE_SIZE - page_offset; 380 else 381 to_copy = size - bytes_copied; 382 383 if (is_iovec) { 384 struct msghdr *msg = (struct msghdr *)src; 385 int err; 386 387 /* The iovec will track bytes_copied internally. */ 388 err = memcpy_from_msg((u8 *)va + page_offset, 389 msg, to_copy); 390 if (err != 0) { 391 if (kernel_if->host) 392 kunmap(kernel_if->u.h.page[page_index]); 393 return VMCI_ERROR_INVALID_ARGS; 394 } 395 } else { 396 memcpy((u8 *)va + page_offset, 397 (u8 *)src + bytes_copied, to_copy); 398 } 399 400 bytes_copied += to_copy; 401 if (kernel_if->host) 402 kunmap(kernel_if->u.h.page[page_index]); 403 } 404 405 return VMCI_SUCCESS; 406 } 407 408 /* 409 * Copies to a given buffer or iovector from a VMCI Queue. Uses 410 * kmap()/kunmap() to dynamically map/unmap required portions of the queue 411 * by traversing the offset -> page translation structure for the queue. 412 * Assumes that offset + size does not wrap around in the queue. 413 */ 414 static int __qp_memcpy_from_queue(void *dest, 415 const struct vmci_queue *queue, 416 u64 queue_offset, 417 size_t size, 418 bool is_iovec) 419 { 420 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; 421 size_t bytes_copied = 0; 422 423 while (bytes_copied < size) { 424 const u64 page_index = 425 (queue_offset + bytes_copied) / PAGE_SIZE; 426 const size_t page_offset = 427 (queue_offset + bytes_copied) & (PAGE_SIZE - 1); 428 void *va; 429 size_t to_copy; 430 431 if (kernel_if->host) 432 va = kmap(kernel_if->u.h.page[page_index]); 433 else 434 va = kernel_if->u.g.vas[page_index + 1]; 435 /* Skip header. */ 436 437 if (size - bytes_copied > PAGE_SIZE - page_offset) 438 /* Enough payload to fill up this page. */ 439 to_copy = PAGE_SIZE - page_offset; 440 else 441 to_copy = size - bytes_copied; 442 443 if (is_iovec) { 444 struct msghdr *msg = dest; 445 int err; 446 447 /* The iovec will track bytes_copied internally. */ 448 err = memcpy_to_msg(msg, (u8 *)va + page_offset, 449 to_copy); 450 if (err != 0) { 451 if (kernel_if->host) 452 kunmap(kernel_if->u.h.page[page_index]); 453 return VMCI_ERROR_INVALID_ARGS; 454 } 455 } else { 456 memcpy((u8 *)dest + bytes_copied, 457 (u8 *)va + page_offset, to_copy); 458 } 459 460 bytes_copied += to_copy; 461 if (kernel_if->host) 462 kunmap(kernel_if->u.h.page[page_index]); 463 } 464 465 return VMCI_SUCCESS; 466 } 467 468 /* 469 * Allocates two list of PPNs --- one for the pages in the produce queue, 470 * and the other for the pages in the consume queue. Intializes the list 471 * of PPNs with the page frame numbers of the KVA for the two queues (and 472 * the queue headers). 473 */ 474 static int qp_alloc_ppn_set(void *prod_q, 475 u64 num_produce_pages, 476 void *cons_q, 477 u64 num_consume_pages, struct ppn_set *ppn_set) 478 { 479 u32 *produce_ppns; 480 u32 *consume_ppns; 481 struct vmci_queue *produce_q = prod_q; 482 struct vmci_queue *consume_q = cons_q; 483 u64 i; 484 485 if (!produce_q || !num_produce_pages || !consume_q || 486 !num_consume_pages || !ppn_set) 487 return VMCI_ERROR_INVALID_ARGS; 488 489 if (ppn_set->initialized) 490 return VMCI_ERROR_ALREADY_EXISTS; 491 492 produce_ppns = 493 kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL); 494 if (!produce_ppns) 495 return VMCI_ERROR_NO_MEM; 496 497 consume_ppns = 498 kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL); 499 if (!consume_ppns) { 500 kfree(produce_ppns); 501 return VMCI_ERROR_NO_MEM; 502 } 503 504 for (i = 0; i < num_produce_pages; i++) { 505 unsigned long pfn; 506 507 produce_ppns[i] = 508 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 509 pfn = produce_ppns[i]; 510 511 /* Fail allocation if PFN isn't supported by hypervisor. */ 512 if (sizeof(pfn) > sizeof(*produce_ppns) 513 && pfn != produce_ppns[i]) 514 goto ppn_error; 515 } 516 517 for (i = 0; i < num_consume_pages; i++) { 518 unsigned long pfn; 519 520 consume_ppns[i] = 521 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; 522 pfn = consume_ppns[i]; 523 524 /* Fail allocation if PFN isn't supported by hypervisor. */ 525 if (sizeof(pfn) > sizeof(*consume_ppns) 526 && pfn != consume_ppns[i]) 527 goto ppn_error; 528 } 529 530 ppn_set->num_produce_pages = num_produce_pages; 531 ppn_set->num_consume_pages = num_consume_pages; 532 ppn_set->produce_ppns = produce_ppns; 533 ppn_set->consume_ppns = consume_ppns; 534 ppn_set->initialized = true; 535 return VMCI_SUCCESS; 536 537 ppn_error: 538 kfree(produce_ppns); 539 kfree(consume_ppns); 540 return VMCI_ERROR_INVALID_ARGS; 541 } 542 543 /* 544 * Frees the two list of PPNs for a queue pair. 545 */ 546 static void qp_free_ppn_set(struct ppn_set *ppn_set) 547 { 548 if (ppn_set->initialized) { 549 /* Do not call these functions on NULL inputs. */ 550 kfree(ppn_set->produce_ppns); 551 kfree(ppn_set->consume_ppns); 552 } 553 memset(ppn_set, 0, sizeof(*ppn_set)); 554 } 555 556 /* 557 * Populates the list of PPNs in the hypercall structure with the PPNS 558 * of the produce queue and the consume queue. 559 */ 560 static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set) 561 { 562 memcpy(call_buf, ppn_set->produce_ppns, 563 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns)); 564 memcpy(call_buf + 565 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns), 566 ppn_set->consume_ppns, 567 ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns)); 568 569 return VMCI_SUCCESS; 570 } 571 572 static int qp_memcpy_to_queue(struct vmci_queue *queue, 573 u64 queue_offset, 574 const void *src, size_t src_offset, size_t size) 575 { 576 return __qp_memcpy_to_queue(queue, queue_offset, 577 (u8 *)src + src_offset, size, false); 578 } 579 580 static int qp_memcpy_from_queue(void *dest, 581 size_t dest_offset, 582 const struct vmci_queue *queue, 583 u64 queue_offset, size_t size) 584 { 585 return __qp_memcpy_from_queue((u8 *)dest + dest_offset, 586 queue, queue_offset, size, false); 587 } 588 589 /* 590 * Copies from a given iovec from a VMCI Queue. 591 */ 592 static int qp_memcpy_to_queue_iov(struct vmci_queue *queue, 593 u64 queue_offset, 594 const void *msg, 595 size_t src_offset, size_t size) 596 { 597 598 /* 599 * We ignore src_offset because src is really a struct iovec * and will 600 * maintain offset internally. 601 */ 602 return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true); 603 } 604 605 /* 606 * Copies to a given iovec from a VMCI Queue. 607 */ 608 static int qp_memcpy_from_queue_iov(void *dest, 609 size_t dest_offset, 610 const struct vmci_queue *queue, 611 u64 queue_offset, size_t size) 612 { 613 /* 614 * We ignore dest_offset because dest is really a struct iovec * and 615 * will maintain offset internally. 616 */ 617 return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true); 618 } 619 620 /* 621 * Allocates kernel VA space of specified size plus space for the queue 622 * and kernel interface. This is different from the guest queue allocator, 623 * because we do not allocate our own queue header/data pages here but 624 * share those of the guest. 625 */ 626 static struct vmci_queue *qp_host_alloc_queue(u64 size) 627 { 628 struct vmci_queue *queue; 629 size_t queue_page_size; 630 u64 num_pages; 631 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); 632 633 if (size > SIZE_MAX - PAGE_SIZE) 634 return NULL; 635 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 636 if (num_pages > (SIZE_MAX - queue_size) / 637 sizeof(*queue->kernel_if->u.h.page)) 638 return NULL; 639 640 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); 641 642 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); 643 if (queue) { 644 queue->q_header = NULL; 645 queue->saved_header = NULL; 646 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 647 queue->kernel_if->host = true; 648 queue->kernel_if->mutex = NULL; 649 queue->kernel_if->num_pages = num_pages; 650 queue->kernel_if->u.h.header_page = 651 (struct page **)((u8 *)queue + queue_size); 652 queue->kernel_if->u.h.page = 653 &queue->kernel_if->u.h.header_page[1]; 654 } 655 656 return queue; 657 } 658 659 /* 660 * Frees kernel memory for a given queue (header plus translation 661 * structure). 662 */ 663 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size) 664 { 665 kfree(queue); 666 } 667 668 /* 669 * Initialize the mutex for the pair of queues. This mutex is used to 670 * protect the q_header and the buffer from changing out from under any 671 * users of either queue. Of course, it's only any good if the mutexes 672 * are actually acquired. Queue structure must lie on non-paged memory 673 * or we cannot guarantee access to the mutex. 674 */ 675 static void qp_init_queue_mutex(struct vmci_queue *produce_q, 676 struct vmci_queue *consume_q) 677 { 678 /* 679 * Only the host queue has shared state - the guest queues do not 680 * need to synchronize access using a queue mutex. 681 */ 682 683 if (produce_q->kernel_if->host) { 684 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; 685 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex; 686 mutex_init(produce_q->kernel_if->mutex); 687 } 688 } 689 690 /* 691 * Cleans up the mutex for the pair of queues. 692 */ 693 static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q, 694 struct vmci_queue *consume_q) 695 { 696 if (produce_q->kernel_if->host) { 697 produce_q->kernel_if->mutex = NULL; 698 consume_q->kernel_if->mutex = NULL; 699 } 700 } 701 702 /* 703 * Acquire the mutex for the queue. Note that the produce_q and 704 * the consume_q share a mutex. So, only one of the two need to 705 * be passed in to this routine. Either will work just fine. 706 */ 707 static void qp_acquire_queue_mutex(struct vmci_queue *queue) 708 { 709 if (queue->kernel_if->host) 710 mutex_lock(queue->kernel_if->mutex); 711 } 712 713 /* 714 * Release the mutex for the queue. Note that the produce_q and 715 * the consume_q share a mutex. So, only one of the two need to 716 * be passed in to this routine. Either will work just fine. 717 */ 718 static void qp_release_queue_mutex(struct vmci_queue *queue) 719 { 720 if (queue->kernel_if->host) 721 mutex_unlock(queue->kernel_if->mutex); 722 } 723 724 /* 725 * Helper function to release pages in the PageStoreAttachInfo 726 * previously obtained using get_user_pages. 727 */ 728 static void qp_release_pages(struct page **pages, 729 u64 num_pages, bool dirty) 730 { 731 int i; 732 733 for (i = 0; i < num_pages; i++) { 734 if (dirty) 735 set_page_dirty(pages[i]); 736 737 put_page(pages[i]); 738 pages[i] = NULL; 739 } 740 } 741 742 /* 743 * Lock the user pages referenced by the {produce,consume}Buffer 744 * struct into memory and populate the {produce,consume}Pages 745 * arrays in the attach structure with them. 746 */ 747 static int qp_host_get_user_memory(u64 produce_uva, 748 u64 consume_uva, 749 struct vmci_queue *produce_q, 750 struct vmci_queue *consume_q) 751 { 752 int retval; 753 int err = VMCI_SUCCESS; 754 755 retval = get_user_pages_fast((uintptr_t) produce_uva, 756 produce_q->kernel_if->num_pages, 1, 757 produce_q->kernel_if->u.h.header_page); 758 if (retval < produce_q->kernel_if->num_pages) { 759 pr_debug("get_user_pages_fast(produce) failed (retval=%d)", 760 retval); 761 qp_release_pages(produce_q->kernel_if->u.h.header_page, 762 retval, false); 763 err = VMCI_ERROR_NO_MEM; 764 goto out; 765 } 766 767 retval = get_user_pages_fast((uintptr_t) consume_uva, 768 consume_q->kernel_if->num_pages, 1, 769 consume_q->kernel_if->u.h.header_page); 770 if (retval < consume_q->kernel_if->num_pages) { 771 pr_debug("get_user_pages_fast(consume) failed (retval=%d)", 772 retval); 773 qp_release_pages(consume_q->kernel_if->u.h.header_page, 774 retval, false); 775 qp_release_pages(produce_q->kernel_if->u.h.header_page, 776 produce_q->kernel_if->num_pages, false); 777 err = VMCI_ERROR_NO_MEM; 778 } 779 780 out: 781 return err; 782 } 783 784 /* 785 * Registers the specification of the user pages used for backing a queue 786 * pair. Enough information to map in pages is stored in the OS specific 787 * part of the struct vmci_queue structure. 788 */ 789 static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store, 790 struct vmci_queue *produce_q, 791 struct vmci_queue *consume_q) 792 { 793 u64 produce_uva; 794 u64 consume_uva; 795 796 /* 797 * The new style and the old style mapping only differs in 798 * that we either get a single or two UVAs, so we split the 799 * single UVA range at the appropriate spot. 800 */ 801 produce_uva = page_store->pages; 802 consume_uva = page_store->pages + 803 produce_q->kernel_if->num_pages * PAGE_SIZE; 804 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q, 805 consume_q); 806 } 807 808 /* 809 * Releases and removes the references to user pages stored in the attach 810 * struct. Pages are released from the page cache and may become 811 * swappable again. 812 */ 813 static void qp_host_unregister_user_memory(struct vmci_queue *produce_q, 814 struct vmci_queue *consume_q) 815 { 816 qp_release_pages(produce_q->kernel_if->u.h.header_page, 817 produce_q->kernel_if->num_pages, true); 818 memset(produce_q->kernel_if->u.h.header_page, 0, 819 sizeof(*produce_q->kernel_if->u.h.header_page) * 820 produce_q->kernel_if->num_pages); 821 qp_release_pages(consume_q->kernel_if->u.h.header_page, 822 consume_q->kernel_if->num_pages, true); 823 memset(consume_q->kernel_if->u.h.header_page, 0, 824 sizeof(*consume_q->kernel_if->u.h.header_page) * 825 consume_q->kernel_if->num_pages); 826 } 827 828 /* 829 * Once qp_host_register_user_memory has been performed on a 830 * queue, the queue pair headers can be mapped into the 831 * kernel. Once mapped, they must be unmapped with 832 * qp_host_unmap_queues prior to calling 833 * qp_host_unregister_user_memory. 834 * Pages are pinned. 835 */ 836 static int qp_host_map_queues(struct vmci_queue *produce_q, 837 struct vmci_queue *consume_q) 838 { 839 int result; 840 841 if (!produce_q->q_header || !consume_q->q_header) { 842 struct page *headers[2]; 843 844 if (produce_q->q_header != consume_q->q_header) 845 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 846 847 if (produce_q->kernel_if->u.h.header_page == NULL || 848 *produce_q->kernel_if->u.h.header_page == NULL) 849 return VMCI_ERROR_UNAVAILABLE; 850 851 headers[0] = *produce_q->kernel_if->u.h.header_page; 852 headers[1] = *consume_q->kernel_if->u.h.header_page; 853 854 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL); 855 if (produce_q->q_header != NULL) { 856 consume_q->q_header = 857 (struct vmci_queue_header *)((u8 *) 858 produce_q->q_header + 859 PAGE_SIZE); 860 result = VMCI_SUCCESS; 861 } else { 862 pr_warn("vmap failed\n"); 863 result = VMCI_ERROR_NO_MEM; 864 } 865 } else { 866 result = VMCI_SUCCESS; 867 } 868 869 return result; 870 } 871 872 /* 873 * Unmaps previously mapped queue pair headers from the kernel. 874 * Pages are unpinned. 875 */ 876 static int qp_host_unmap_queues(u32 gid, 877 struct vmci_queue *produce_q, 878 struct vmci_queue *consume_q) 879 { 880 if (produce_q->q_header) { 881 if (produce_q->q_header < consume_q->q_header) 882 vunmap(produce_q->q_header); 883 else 884 vunmap(consume_q->q_header); 885 886 produce_q->q_header = NULL; 887 consume_q->q_header = NULL; 888 } 889 890 return VMCI_SUCCESS; 891 } 892 893 /* 894 * Finds the entry in the list corresponding to a given handle. Assumes 895 * that the list is locked. 896 */ 897 static struct qp_entry *qp_list_find(struct qp_list *qp_list, 898 struct vmci_handle handle) 899 { 900 struct qp_entry *entry; 901 902 if (vmci_handle_is_invalid(handle)) 903 return NULL; 904 905 list_for_each_entry(entry, &qp_list->head, list_item) { 906 if (vmci_handle_is_equal(entry->handle, handle)) 907 return entry; 908 } 909 910 return NULL; 911 } 912 913 /* 914 * Finds the entry in the list corresponding to a given handle. 915 */ 916 static struct qp_guest_endpoint * 917 qp_guest_handle_to_entry(struct vmci_handle handle) 918 { 919 struct qp_guest_endpoint *entry; 920 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle); 921 922 entry = qp ? container_of( 923 qp, struct qp_guest_endpoint, qp) : NULL; 924 return entry; 925 } 926 927 /* 928 * Finds the entry in the list corresponding to a given handle. 929 */ 930 static struct qp_broker_entry * 931 qp_broker_handle_to_entry(struct vmci_handle handle) 932 { 933 struct qp_broker_entry *entry; 934 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle); 935 936 entry = qp ? container_of( 937 qp, struct qp_broker_entry, qp) : NULL; 938 return entry; 939 } 940 941 /* 942 * Dispatches a queue pair event message directly into the local event 943 * queue. 944 */ 945 static int qp_notify_peer_local(bool attach, struct vmci_handle handle) 946 { 947 u32 context_id = vmci_get_context_id(); 948 struct vmci_event_qp ev; 949 950 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); 951 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 952 VMCI_CONTEXT_RESOURCE_ID); 953 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 954 ev.msg.event_data.event = 955 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; 956 ev.payload.peer_id = context_id; 957 ev.payload.handle = handle; 958 959 return vmci_event_dispatch(&ev.msg.hdr); 960 } 961 962 /* 963 * Allocates and initializes a qp_guest_endpoint structure. 964 * Allocates a queue_pair rid (and handle) iff the given entry has 965 * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX 966 * are reserved handles. Assumes that the QP list mutex is held 967 * by the caller. 968 */ 969 static struct qp_guest_endpoint * 970 qp_guest_endpoint_create(struct vmci_handle handle, 971 u32 peer, 972 u32 flags, 973 u64 produce_size, 974 u64 consume_size, 975 void *produce_q, 976 void *consume_q) 977 { 978 int result; 979 struct qp_guest_endpoint *entry; 980 /* One page each for the queue headers. */ 981 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) + 982 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2; 983 984 if (vmci_handle_is_invalid(handle)) { 985 u32 context_id = vmci_get_context_id(); 986 987 handle = vmci_make_handle(context_id, VMCI_INVALID_ID); 988 } 989 990 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 991 if (entry) { 992 entry->qp.peer = peer; 993 entry->qp.flags = flags; 994 entry->qp.produce_size = produce_size; 995 entry->qp.consume_size = consume_size; 996 entry->qp.ref_count = 0; 997 entry->num_ppns = num_ppns; 998 entry->produce_q = produce_q; 999 entry->consume_q = consume_q; 1000 INIT_LIST_HEAD(&entry->qp.list_item); 1001 1002 /* Add resource obj */ 1003 result = vmci_resource_add(&entry->resource, 1004 VMCI_RESOURCE_TYPE_QPAIR_GUEST, 1005 handle); 1006 entry->qp.handle = vmci_resource_handle(&entry->resource); 1007 if ((result != VMCI_SUCCESS) || 1008 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { 1009 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", 1010 handle.context, handle.resource, result); 1011 kfree(entry); 1012 entry = NULL; 1013 } 1014 } 1015 return entry; 1016 } 1017 1018 /* 1019 * Frees a qp_guest_endpoint structure. 1020 */ 1021 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry) 1022 { 1023 qp_free_ppn_set(&entry->ppn_set); 1024 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); 1025 qp_free_queue(entry->produce_q, entry->qp.produce_size); 1026 qp_free_queue(entry->consume_q, entry->qp.consume_size); 1027 /* Unlink from resource hash table and free callback */ 1028 vmci_resource_remove(&entry->resource); 1029 1030 kfree(entry); 1031 } 1032 1033 /* 1034 * Helper to make a queue_pairAlloc hypercall when the driver is 1035 * supporting a guest device. 1036 */ 1037 static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry) 1038 { 1039 struct vmci_qp_alloc_msg *alloc_msg; 1040 size_t msg_size; 1041 int result; 1042 1043 if (!entry || entry->num_ppns <= 2) 1044 return VMCI_ERROR_INVALID_ARGS; 1045 1046 msg_size = sizeof(*alloc_msg) + 1047 (size_t) entry->num_ppns * sizeof(u32); 1048 alloc_msg = kmalloc(msg_size, GFP_KERNEL); 1049 if (!alloc_msg) 1050 return VMCI_ERROR_NO_MEM; 1051 1052 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1053 VMCI_QUEUEPAIR_ALLOC); 1054 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE; 1055 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE; 1056 alloc_msg->handle = entry->qp.handle; 1057 alloc_msg->peer = entry->qp.peer; 1058 alloc_msg->flags = entry->qp.flags; 1059 alloc_msg->produce_size = entry->qp.produce_size; 1060 alloc_msg->consume_size = entry->qp.consume_size; 1061 alloc_msg->num_ppns = entry->num_ppns; 1062 1063 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg), 1064 &entry->ppn_set); 1065 if (result == VMCI_SUCCESS) 1066 result = vmci_send_datagram(&alloc_msg->hdr); 1067 1068 kfree(alloc_msg); 1069 1070 return result; 1071 } 1072 1073 /* 1074 * Helper to make a queue_pairDetach hypercall when the driver is 1075 * supporting a guest device. 1076 */ 1077 static int qp_detatch_hypercall(struct vmci_handle handle) 1078 { 1079 struct vmci_qp_detach_msg detach_msg; 1080 1081 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1082 VMCI_QUEUEPAIR_DETACH); 1083 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE; 1084 detach_msg.hdr.payload_size = sizeof(handle); 1085 detach_msg.handle = handle; 1086 1087 return vmci_send_datagram(&detach_msg.hdr); 1088 } 1089 1090 /* 1091 * Adds the given entry to the list. Assumes that the list is locked. 1092 */ 1093 static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry) 1094 { 1095 if (entry) 1096 list_add(&entry->list_item, &qp_list->head); 1097 } 1098 1099 /* 1100 * Removes the given entry from the list. Assumes that the list is locked. 1101 */ 1102 static void qp_list_remove_entry(struct qp_list *qp_list, 1103 struct qp_entry *entry) 1104 { 1105 if (entry) 1106 list_del(&entry->list_item); 1107 } 1108 1109 /* 1110 * Helper for VMCI queue_pair detach interface. Frees the physical 1111 * pages for the queue pair. 1112 */ 1113 static int qp_detatch_guest_work(struct vmci_handle handle) 1114 { 1115 int result; 1116 struct qp_guest_endpoint *entry; 1117 u32 ref_count = ~0; /* To avoid compiler warning below */ 1118 1119 mutex_lock(&qp_guest_endpoints.mutex); 1120 1121 entry = qp_guest_handle_to_entry(handle); 1122 if (!entry) { 1123 mutex_unlock(&qp_guest_endpoints.mutex); 1124 return VMCI_ERROR_NOT_FOUND; 1125 } 1126 1127 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1128 result = VMCI_SUCCESS; 1129 1130 if (entry->qp.ref_count > 1) { 1131 result = qp_notify_peer_local(false, handle); 1132 /* 1133 * We can fail to notify a local queuepair 1134 * because we can't allocate. We still want 1135 * to release the entry if that happens, so 1136 * don't bail out yet. 1137 */ 1138 } 1139 } else { 1140 result = qp_detatch_hypercall(handle); 1141 if (result < VMCI_SUCCESS) { 1142 /* 1143 * We failed to notify a non-local queuepair. 1144 * That other queuepair might still be 1145 * accessing the shared memory, so don't 1146 * release the entry yet. It will get cleaned 1147 * up by VMCIqueue_pair_Exit() if necessary 1148 * (assuming we are going away, otherwise why 1149 * did this fail?). 1150 */ 1151 1152 mutex_unlock(&qp_guest_endpoints.mutex); 1153 return result; 1154 } 1155 } 1156 1157 /* 1158 * If we get here then we either failed to notify a local queuepair, or 1159 * we succeeded in all cases. Release the entry if required. 1160 */ 1161 1162 entry->qp.ref_count--; 1163 if (entry->qp.ref_count == 0) 1164 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); 1165 1166 /* If we didn't remove the entry, this could change once we unlock. */ 1167 if (entry) 1168 ref_count = entry->qp.ref_count; 1169 1170 mutex_unlock(&qp_guest_endpoints.mutex); 1171 1172 if (ref_count == 0) 1173 qp_guest_endpoint_destroy(entry); 1174 1175 return result; 1176 } 1177 1178 /* 1179 * This functions handles the actual allocation of a VMCI queue 1180 * pair guest endpoint. Allocates physical pages for the queue 1181 * pair. It makes OS dependent calls through generic wrappers. 1182 */ 1183 static int qp_alloc_guest_work(struct vmci_handle *handle, 1184 struct vmci_queue **produce_q, 1185 u64 produce_size, 1186 struct vmci_queue **consume_q, 1187 u64 consume_size, 1188 u32 peer, 1189 u32 flags, 1190 u32 priv_flags) 1191 { 1192 const u64 num_produce_pages = 1193 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1; 1194 const u64 num_consume_pages = 1195 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1; 1196 void *my_produce_q = NULL; 1197 void *my_consume_q = NULL; 1198 int result; 1199 struct qp_guest_endpoint *queue_pair_entry = NULL; 1200 1201 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS) 1202 return VMCI_ERROR_NO_ACCESS; 1203 1204 mutex_lock(&qp_guest_endpoints.mutex); 1205 1206 queue_pair_entry = qp_guest_handle_to_entry(*handle); 1207 if (queue_pair_entry) { 1208 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1209 /* Local attach case. */ 1210 if (queue_pair_entry->qp.ref_count > 1) { 1211 pr_devel("Error attempting to attach more than once\n"); 1212 result = VMCI_ERROR_UNAVAILABLE; 1213 goto error_keep_entry; 1214 } 1215 1216 if (queue_pair_entry->qp.produce_size != consume_size || 1217 queue_pair_entry->qp.consume_size != 1218 produce_size || 1219 queue_pair_entry->qp.flags != 1220 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) { 1221 pr_devel("Error mismatched queue pair in local attach\n"); 1222 result = VMCI_ERROR_QUEUEPAIR_MISMATCH; 1223 goto error_keep_entry; 1224 } 1225 1226 /* 1227 * Do a local attach. We swap the consume and 1228 * produce queues for the attacher and deliver 1229 * an attach event. 1230 */ 1231 result = qp_notify_peer_local(true, *handle); 1232 if (result < VMCI_SUCCESS) 1233 goto error_keep_entry; 1234 1235 my_produce_q = queue_pair_entry->consume_q; 1236 my_consume_q = queue_pair_entry->produce_q; 1237 goto out; 1238 } 1239 1240 result = VMCI_ERROR_ALREADY_EXISTS; 1241 goto error_keep_entry; 1242 } 1243 1244 my_produce_q = qp_alloc_queue(produce_size, flags); 1245 if (!my_produce_q) { 1246 pr_warn("Error allocating pages for produce queue\n"); 1247 result = VMCI_ERROR_NO_MEM; 1248 goto error; 1249 } 1250 1251 my_consume_q = qp_alloc_queue(consume_size, flags); 1252 if (!my_consume_q) { 1253 pr_warn("Error allocating pages for consume queue\n"); 1254 result = VMCI_ERROR_NO_MEM; 1255 goto error; 1256 } 1257 1258 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags, 1259 produce_size, consume_size, 1260 my_produce_q, my_consume_q); 1261 if (!queue_pair_entry) { 1262 pr_warn("Error allocating memory in %s\n", __func__); 1263 result = VMCI_ERROR_NO_MEM; 1264 goto error; 1265 } 1266 1267 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q, 1268 num_consume_pages, 1269 &queue_pair_entry->ppn_set); 1270 if (result < VMCI_SUCCESS) { 1271 pr_warn("qp_alloc_ppn_set failed\n"); 1272 goto error; 1273 } 1274 1275 /* 1276 * It's only necessary to notify the host if this queue pair will be 1277 * attached to from another context. 1278 */ 1279 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { 1280 /* Local create case. */ 1281 u32 context_id = vmci_get_context_id(); 1282 1283 /* 1284 * Enforce similar checks on local queue pairs as we 1285 * do for regular ones. The handle's context must 1286 * match the creator or attacher context id (here they 1287 * are both the current context id) and the 1288 * attach-only flag cannot exist during create. We 1289 * also ensure specified peer is this context or an 1290 * invalid one. 1291 */ 1292 if (queue_pair_entry->qp.handle.context != context_id || 1293 (queue_pair_entry->qp.peer != VMCI_INVALID_ID && 1294 queue_pair_entry->qp.peer != context_id)) { 1295 result = VMCI_ERROR_NO_ACCESS; 1296 goto error; 1297 } 1298 1299 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) { 1300 result = VMCI_ERROR_NOT_FOUND; 1301 goto error; 1302 } 1303 } else { 1304 result = qp_alloc_hypercall(queue_pair_entry); 1305 if (result < VMCI_SUCCESS) { 1306 pr_warn("qp_alloc_hypercall result = %d\n", result); 1307 goto error; 1308 } 1309 } 1310 1311 qp_init_queue_mutex((struct vmci_queue *)my_produce_q, 1312 (struct vmci_queue *)my_consume_q); 1313 1314 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp); 1315 1316 out: 1317 queue_pair_entry->qp.ref_count++; 1318 *handle = queue_pair_entry->qp.handle; 1319 *produce_q = (struct vmci_queue *)my_produce_q; 1320 *consume_q = (struct vmci_queue *)my_consume_q; 1321 1322 /* 1323 * We should initialize the queue pair header pages on a local 1324 * queue pair create. For non-local queue pairs, the 1325 * hypervisor initializes the header pages in the create step. 1326 */ 1327 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) && 1328 queue_pair_entry->qp.ref_count == 1) { 1329 vmci_q_header_init((*produce_q)->q_header, *handle); 1330 vmci_q_header_init((*consume_q)->q_header, *handle); 1331 } 1332 1333 mutex_unlock(&qp_guest_endpoints.mutex); 1334 1335 return VMCI_SUCCESS; 1336 1337 error: 1338 mutex_unlock(&qp_guest_endpoints.mutex); 1339 if (queue_pair_entry) { 1340 /* The queues will be freed inside the destroy routine. */ 1341 qp_guest_endpoint_destroy(queue_pair_entry); 1342 } else { 1343 qp_free_queue(my_produce_q, produce_size); 1344 qp_free_queue(my_consume_q, consume_size); 1345 } 1346 return result; 1347 1348 error_keep_entry: 1349 /* This path should only be used when an existing entry was found. */ 1350 mutex_unlock(&qp_guest_endpoints.mutex); 1351 return result; 1352 } 1353 1354 /* 1355 * The first endpoint issuing a queue pair allocation will create the state 1356 * of the queue pair in the queue pair broker. 1357 * 1358 * If the creator is a guest, it will associate a VMX virtual address range 1359 * with the queue pair as specified by the page_store. For compatibility with 1360 * older VMX'en, that would use a separate step to set the VMX virtual 1361 * address range, the virtual address range can be registered later using 1362 * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be 1363 * used. 1364 * 1365 * If the creator is the host, a page_store of NULL should be used as well, 1366 * since the host is not able to supply a page store for the queue pair. 1367 * 1368 * For older VMX and host callers, the queue pair will be created in the 1369 * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be 1370 * created in VMCOQPB_CREATED_MEM state. 1371 */ 1372 static int qp_broker_create(struct vmci_handle handle, 1373 u32 peer, 1374 u32 flags, 1375 u32 priv_flags, 1376 u64 produce_size, 1377 u64 consume_size, 1378 struct vmci_qp_page_store *page_store, 1379 struct vmci_ctx *context, 1380 vmci_event_release_cb wakeup_cb, 1381 void *client_data, struct qp_broker_entry **ent) 1382 { 1383 struct qp_broker_entry *entry = NULL; 1384 const u32 context_id = vmci_ctx_get_id(context); 1385 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1386 int result; 1387 u64 guest_produce_size; 1388 u64 guest_consume_size; 1389 1390 /* Do not create if the caller asked not to. */ 1391 if (flags & VMCI_QPFLAG_ATTACH_ONLY) 1392 return VMCI_ERROR_NOT_FOUND; 1393 1394 /* 1395 * Creator's context ID should match handle's context ID or the creator 1396 * must allow the context in handle's context ID as the "peer". 1397 */ 1398 if (handle.context != context_id && handle.context != peer) 1399 return VMCI_ERROR_NO_ACCESS; 1400 1401 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer)) 1402 return VMCI_ERROR_DST_UNREACHABLE; 1403 1404 /* 1405 * Creator's context ID for local queue pairs should match the 1406 * peer, if a peer is specified. 1407 */ 1408 if (is_local && peer != VMCI_INVALID_ID && context_id != peer) 1409 return VMCI_ERROR_NO_ACCESS; 1410 1411 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 1412 if (!entry) 1413 return VMCI_ERROR_NO_MEM; 1414 1415 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) { 1416 /* 1417 * The queue pair broker entry stores values from the guest 1418 * point of view, so a creating host side endpoint should swap 1419 * produce and consume values -- unless it is a local queue 1420 * pair, in which case no swapping is necessary, since the local 1421 * attacher will swap queues. 1422 */ 1423 1424 guest_produce_size = consume_size; 1425 guest_consume_size = produce_size; 1426 } else { 1427 guest_produce_size = produce_size; 1428 guest_consume_size = consume_size; 1429 } 1430 1431 entry->qp.handle = handle; 1432 entry->qp.peer = peer; 1433 entry->qp.flags = flags; 1434 entry->qp.produce_size = guest_produce_size; 1435 entry->qp.consume_size = guest_consume_size; 1436 entry->qp.ref_count = 1; 1437 entry->create_id = context_id; 1438 entry->attach_id = VMCI_INVALID_ID; 1439 entry->state = VMCIQPB_NEW; 1440 entry->require_trusted_attach = 1441 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED); 1442 entry->created_by_trusted = 1443 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED); 1444 entry->vmci_page_files = false; 1445 entry->wakeup_cb = wakeup_cb; 1446 entry->client_data = client_data; 1447 entry->produce_q = qp_host_alloc_queue(guest_produce_size); 1448 if (entry->produce_q == NULL) { 1449 result = VMCI_ERROR_NO_MEM; 1450 goto error; 1451 } 1452 entry->consume_q = qp_host_alloc_queue(guest_consume_size); 1453 if (entry->consume_q == NULL) { 1454 result = VMCI_ERROR_NO_MEM; 1455 goto error; 1456 } 1457 1458 qp_init_queue_mutex(entry->produce_q, entry->consume_q); 1459 1460 INIT_LIST_HEAD(&entry->qp.list_item); 1461 1462 if (is_local) { 1463 u8 *tmp; 1464 1465 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), 1466 PAGE_SIZE, GFP_KERNEL); 1467 if (entry->local_mem == NULL) { 1468 result = VMCI_ERROR_NO_MEM; 1469 goto error; 1470 } 1471 entry->state = VMCIQPB_CREATED_MEM; 1472 entry->produce_q->q_header = entry->local_mem; 1473 tmp = (u8 *)entry->local_mem + PAGE_SIZE * 1474 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); 1475 entry->consume_q->q_header = (struct vmci_queue_header *)tmp; 1476 } else if (page_store) { 1477 /* 1478 * The VMX already initialized the queue pair headers, so no 1479 * need for the kernel side to do that. 1480 */ 1481 result = qp_host_register_user_memory(page_store, 1482 entry->produce_q, 1483 entry->consume_q); 1484 if (result < VMCI_SUCCESS) 1485 goto error; 1486 1487 entry->state = VMCIQPB_CREATED_MEM; 1488 } else { 1489 /* 1490 * A create without a page_store may be either a host 1491 * side create (in which case we are waiting for the 1492 * guest side to supply the memory) or an old style 1493 * queue pair create (in which case we will expect a 1494 * set page store call as the next step). 1495 */ 1496 entry->state = VMCIQPB_CREATED_NO_MEM; 1497 } 1498 1499 qp_list_add_entry(&qp_broker_list, &entry->qp); 1500 if (ent != NULL) 1501 *ent = entry; 1502 1503 /* Add to resource obj */ 1504 result = vmci_resource_add(&entry->resource, 1505 VMCI_RESOURCE_TYPE_QPAIR_HOST, 1506 handle); 1507 if (result != VMCI_SUCCESS) { 1508 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d", 1509 handle.context, handle.resource, result); 1510 goto error; 1511 } 1512 1513 entry->qp.handle = vmci_resource_handle(&entry->resource); 1514 if (is_local) { 1515 vmci_q_header_init(entry->produce_q->q_header, 1516 entry->qp.handle); 1517 vmci_q_header_init(entry->consume_q->q_header, 1518 entry->qp.handle); 1519 } 1520 1521 vmci_ctx_qp_create(context, entry->qp.handle); 1522 1523 return VMCI_SUCCESS; 1524 1525 error: 1526 if (entry != NULL) { 1527 qp_host_free_queue(entry->produce_q, guest_produce_size); 1528 qp_host_free_queue(entry->consume_q, guest_consume_size); 1529 kfree(entry); 1530 } 1531 1532 return result; 1533 } 1534 1535 /* 1536 * Enqueues an event datagram to notify the peer VM attached to 1537 * the given queue pair handle about attach/detach event by the 1538 * given VM. Returns Payload size of datagram enqueued on 1539 * success, error code otherwise. 1540 */ 1541 static int qp_notify_peer(bool attach, 1542 struct vmci_handle handle, 1543 u32 my_id, 1544 u32 peer_id) 1545 { 1546 int rv; 1547 struct vmci_event_qp ev; 1548 1549 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID || 1550 peer_id == VMCI_INVALID_ID) 1551 return VMCI_ERROR_INVALID_ARGS; 1552 1553 /* 1554 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on 1555 * number of pending events from the hypervisor to a given VM 1556 * otherwise a rogue VM could do an arbitrary number of attach 1557 * and detach operations causing memory pressure in the host 1558 * kernel. 1559 */ 1560 1561 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); 1562 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, 1563 VMCI_CONTEXT_RESOURCE_ID); 1564 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); 1565 ev.msg.event_data.event = attach ? 1566 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH; 1567 ev.payload.handle = handle; 1568 ev.payload.peer_id = my_id; 1569 1570 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, 1571 &ev.msg.hdr, false); 1572 if (rv < VMCI_SUCCESS) 1573 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n", 1574 attach ? "ATTACH" : "DETACH", peer_id); 1575 1576 return rv; 1577 } 1578 1579 /* 1580 * The second endpoint issuing a queue pair allocation will attach to 1581 * the queue pair registered with the queue pair broker. 1582 * 1583 * If the attacher is a guest, it will associate a VMX virtual address 1584 * range with the queue pair as specified by the page_store. At this 1585 * point, the already attach host endpoint may start using the queue 1586 * pair, and an attach event is sent to it. For compatibility with 1587 * older VMX'en, that used a separate step to set the VMX virtual 1588 * address range, the virtual address range can be registered later 1589 * using vmci_qp_broker_set_page_store. In that case, a page_store of 1590 * NULL should be used, and the attach event will be generated once 1591 * the actual page store has been set. 1592 * 1593 * If the attacher is the host, a page_store of NULL should be used as 1594 * well, since the page store information is already set by the guest. 1595 * 1596 * For new VMX and host callers, the queue pair will be moved to the 1597 * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be 1598 * moved to the VMCOQPB_ATTACHED_NO_MEM state. 1599 */ 1600 static int qp_broker_attach(struct qp_broker_entry *entry, 1601 u32 peer, 1602 u32 flags, 1603 u32 priv_flags, 1604 u64 produce_size, 1605 u64 consume_size, 1606 struct vmci_qp_page_store *page_store, 1607 struct vmci_ctx *context, 1608 vmci_event_release_cb wakeup_cb, 1609 void *client_data, 1610 struct qp_broker_entry **ent) 1611 { 1612 const u32 context_id = vmci_ctx_get_id(context); 1613 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1614 int result; 1615 1616 if (entry->state != VMCIQPB_CREATED_NO_MEM && 1617 entry->state != VMCIQPB_CREATED_MEM) 1618 return VMCI_ERROR_UNAVAILABLE; 1619 1620 if (is_local) { 1621 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || 1622 context_id != entry->create_id) { 1623 return VMCI_ERROR_INVALID_ARGS; 1624 } 1625 } else if (context_id == entry->create_id || 1626 context_id == entry->attach_id) { 1627 return VMCI_ERROR_ALREADY_EXISTS; 1628 } 1629 1630 if (VMCI_CONTEXT_IS_VM(context_id) && 1631 VMCI_CONTEXT_IS_VM(entry->create_id)) 1632 return VMCI_ERROR_DST_UNREACHABLE; 1633 1634 /* 1635 * If we are attaching from a restricted context then the queuepair 1636 * must have been created by a trusted endpoint. 1637 */ 1638 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) && 1639 !entry->created_by_trusted) 1640 return VMCI_ERROR_NO_ACCESS; 1641 1642 /* 1643 * If we are attaching to a queuepair that was created by a restricted 1644 * context then we must be trusted. 1645 */ 1646 if (entry->require_trusted_attach && 1647 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED))) 1648 return VMCI_ERROR_NO_ACCESS; 1649 1650 /* 1651 * If the creator specifies VMCI_INVALID_ID in "peer" field, access 1652 * control check is not performed. 1653 */ 1654 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) 1655 return VMCI_ERROR_NO_ACCESS; 1656 1657 if (entry->create_id == VMCI_HOST_CONTEXT_ID) { 1658 /* 1659 * Do not attach if the caller doesn't support Host Queue Pairs 1660 * and a host created this queue pair. 1661 */ 1662 1663 if (!vmci_ctx_supports_host_qp(context)) 1664 return VMCI_ERROR_INVALID_RESOURCE; 1665 1666 } else if (context_id == VMCI_HOST_CONTEXT_ID) { 1667 struct vmci_ctx *create_context; 1668 bool supports_host_qp; 1669 1670 /* 1671 * Do not attach a host to a user created queue pair if that 1672 * user doesn't support host queue pair end points. 1673 */ 1674 1675 create_context = vmci_ctx_get(entry->create_id); 1676 supports_host_qp = vmci_ctx_supports_host_qp(create_context); 1677 vmci_ctx_put(create_context); 1678 1679 if (!supports_host_qp) 1680 return VMCI_ERROR_INVALID_RESOURCE; 1681 } 1682 1683 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) 1684 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1685 1686 if (context_id != VMCI_HOST_CONTEXT_ID) { 1687 /* 1688 * The queue pair broker entry stores values from the guest 1689 * point of view, so an attaching guest should match the values 1690 * stored in the entry. 1691 */ 1692 1693 if (entry->qp.produce_size != produce_size || 1694 entry->qp.consume_size != consume_size) { 1695 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1696 } 1697 } else if (entry->qp.produce_size != consume_size || 1698 entry->qp.consume_size != produce_size) { 1699 return VMCI_ERROR_QUEUEPAIR_MISMATCH; 1700 } 1701 1702 if (context_id != VMCI_HOST_CONTEXT_ID) { 1703 /* 1704 * If a guest attached to a queue pair, it will supply 1705 * the backing memory. If this is a pre NOVMVM vmx, 1706 * the backing memory will be supplied by calling 1707 * vmci_qp_broker_set_page_store() following the 1708 * return of the vmci_qp_broker_alloc() call. If it is 1709 * a vmx of version NOVMVM or later, the page store 1710 * must be supplied as part of the 1711 * vmci_qp_broker_alloc call. Under all circumstances 1712 * must the initially created queue pair not have any 1713 * memory associated with it already. 1714 */ 1715 1716 if (entry->state != VMCIQPB_CREATED_NO_MEM) 1717 return VMCI_ERROR_INVALID_ARGS; 1718 1719 if (page_store != NULL) { 1720 /* 1721 * Patch up host state to point to guest 1722 * supplied memory. The VMX already 1723 * initialized the queue pair headers, so no 1724 * need for the kernel side to do that. 1725 */ 1726 1727 result = qp_host_register_user_memory(page_store, 1728 entry->produce_q, 1729 entry->consume_q); 1730 if (result < VMCI_SUCCESS) 1731 return result; 1732 1733 entry->state = VMCIQPB_ATTACHED_MEM; 1734 } else { 1735 entry->state = VMCIQPB_ATTACHED_NO_MEM; 1736 } 1737 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) { 1738 /* 1739 * The host side is attempting to attach to a queue 1740 * pair that doesn't have any memory associated with 1741 * it. This must be a pre NOVMVM vmx that hasn't set 1742 * the page store information yet, or a quiesced VM. 1743 */ 1744 1745 return VMCI_ERROR_UNAVAILABLE; 1746 } else { 1747 /* The host side has successfully attached to a queue pair. */ 1748 entry->state = VMCIQPB_ATTACHED_MEM; 1749 } 1750 1751 if (entry->state == VMCIQPB_ATTACHED_MEM) { 1752 result = 1753 qp_notify_peer(true, entry->qp.handle, context_id, 1754 entry->create_id); 1755 if (result < VMCI_SUCCESS) 1756 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", 1757 entry->create_id, entry->qp.handle.context, 1758 entry->qp.handle.resource); 1759 } 1760 1761 entry->attach_id = context_id; 1762 entry->qp.ref_count++; 1763 if (wakeup_cb) { 1764 entry->wakeup_cb = wakeup_cb; 1765 entry->client_data = client_data; 1766 } 1767 1768 /* 1769 * When attaching to local queue pairs, the context already has 1770 * an entry tracking the queue pair, so don't add another one. 1771 */ 1772 if (!is_local) 1773 vmci_ctx_qp_create(context, entry->qp.handle); 1774 1775 if (ent != NULL) 1776 *ent = entry; 1777 1778 return VMCI_SUCCESS; 1779 } 1780 1781 /* 1782 * queue_pair_Alloc for use when setting up queue pair endpoints 1783 * on the host. 1784 */ 1785 static int qp_broker_alloc(struct vmci_handle handle, 1786 u32 peer, 1787 u32 flags, 1788 u32 priv_flags, 1789 u64 produce_size, 1790 u64 consume_size, 1791 struct vmci_qp_page_store *page_store, 1792 struct vmci_ctx *context, 1793 vmci_event_release_cb wakeup_cb, 1794 void *client_data, 1795 struct qp_broker_entry **ent, 1796 bool *swap) 1797 { 1798 const u32 context_id = vmci_ctx_get_id(context); 1799 bool create; 1800 struct qp_broker_entry *entry = NULL; 1801 bool is_local = flags & VMCI_QPFLAG_LOCAL; 1802 int result; 1803 1804 if (vmci_handle_is_invalid(handle) || 1805 (flags & ~VMCI_QP_ALL_FLAGS) || is_local || 1806 !(produce_size || consume_size) || 1807 !context || context_id == VMCI_INVALID_ID || 1808 handle.context == VMCI_INVALID_ID) { 1809 return VMCI_ERROR_INVALID_ARGS; 1810 } 1811 1812 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store)) 1813 return VMCI_ERROR_INVALID_ARGS; 1814 1815 /* 1816 * In the initial argument check, we ensure that non-vmkernel hosts 1817 * are not allowed to create local queue pairs. 1818 */ 1819 1820 mutex_lock(&qp_broker_list.mutex); 1821 1822 if (!is_local && vmci_ctx_qp_exists(context, handle)) { 1823 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n", 1824 context_id, handle.context, handle.resource); 1825 mutex_unlock(&qp_broker_list.mutex); 1826 return VMCI_ERROR_ALREADY_EXISTS; 1827 } 1828 1829 if (handle.resource != VMCI_INVALID_ID) 1830 entry = qp_broker_handle_to_entry(handle); 1831 1832 if (!entry) { 1833 create = true; 1834 result = 1835 qp_broker_create(handle, peer, flags, priv_flags, 1836 produce_size, consume_size, page_store, 1837 context, wakeup_cb, client_data, ent); 1838 } else { 1839 create = false; 1840 result = 1841 qp_broker_attach(entry, peer, flags, priv_flags, 1842 produce_size, consume_size, page_store, 1843 context, wakeup_cb, client_data, ent); 1844 } 1845 1846 mutex_unlock(&qp_broker_list.mutex); 1847 1848 if (swap) 1849 *swap = (context_id == VMCI_HOST_CONTEXT_ID) && 1850 !(create && is_local); 1851 1852 return result; 1853 } 1854 1855 /* 1856 * This function implements the kernel API for allocating a queue 1857 * pair. 1858 */ 1859 static int qp_alloc_host_work(struct vmci_handle *handle, 1860 struct vmci_queue **produce_q, 1861 u64 produce_size, 1862 struct vmci_queue **consume_q, 1863 u64 consume_size, 1864 u32 peer, 1865 u32 flags, 1866 u32 priv_flags, 1867 vmci_event_release_cb wakeup_cb, 1868 void *client_data) 1869 { 1870 struct vmci_handle new_handle; 1871 struct vmci_ctx *context; 1872 struct qp_broker_entry *entry; 1873 int result; 1874 bool swap; 1875 1876 if (vmci_handle_is_invalid(*handle)) { 1877 new_handle = vmci_make_handle( 1878 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID); 1879 } else 1880 new_handle = *handle; 1881 1882 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); 1883 entry = NULL; 1884 result = 1885 qp_broker_alloc(new_handle, peer, flags, priv_flags, 1886 produce_size, consume_size, NULL, context, 1887 wakeup_cb, client_data, &entry, &swap); 1888 if (result == VMCI_SUCCESS) { 1889 if (swap) { 1890 /* 1891 * If this is a local queue pair, the attacher 1892 * will swap around produce and consume 1893 * queues. 1894 */ 1895 1896 *produce_q = entry->consume_q; 1897 *consume_q = entry->produce_q; 1898 } else { 1899 *produce_q = entry->produce_q; 1900 *consume_q = entry->consume_q; 1901 } 1902 1903 *handle = vmci_resource_handle(&entry->resource); 1904 } else { 1905 *handle = VMCI_INVALID_HANDLE; 1906 pr_devel("queue pair broker failed to alloc (result=%d)\n", 1907 result); 1908 } 1909 vmci_ctx_put(context); 1910 return result; 1911 } 1912 1913 /* 1914 * Allocates a VMCI queue_pair. Only checks validity of input 1915 * arguments. The real work is done in the host or guest 1916 * specific function. 1917 */ 1918 int vmci_qp_alloc(struct vmci_handle *handle, 1919 struct vmci_queue **produce_q, 1920 u64 produce_size, 1921 struct vmci_queue **consume_q, 1922 u64 consume_size, 1923 u32 peer, 1924 u32 flags, 1925 u32 priv_flags, 1926 bool guest_endpoint, 1927 vmci_event_release_cb wakeup_cb, 1928 void *client_data) 1929 { 1930 if (!handle || !produce_q || !consume_q || 1931 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS)) 1932 return VMCI_ERROR_INVALID_ARGS; 1933 1934 if (guest_endpoint) { 1935 return qp_alloc_guest_work(handle, produce_q, 1936 produce_size, consume_q, 1937 consume_size, peer, 1938 flags, priv_flags); 1939 } else { 1940 return qp_alloc_host_work(handle, produce_q, 1941 produce_size, consume_q, 1942 consume_size, peer, flags, 1943 priv_flags, wakeup_cb, client_data); 1944 } 1945 } 1946 1947 /* 1948 * This function implements the host kernel API for detaching from 1949 * a queue pair. 1950 */ 1951 static int qp_detatch_host_work(struct vmci_handle handle) 1952 { 1953 int result; 1954 struct vmci_ctx *context; 1955 1956 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID); 1957 1958 result = vmci_qp_broker_detach(handle, context); 1959 1960 vmci_ctx_put(context); 1961 return result; 1962 } 1963 1964 /* 1965 * Detaches from a VMCI queue_pair. Only checks validity of input argument. 1966 * Real work is done in the host or guest specific function. 1967 */ 1968 static int qp_detatch(struct vmci_handle handle, bool guest_endpoint) 1969 { 1970 if (vmci_handle_is_invalid(handle)) 1971 return VMCI_ERROR_INVALID_ARGS; 1972 1973 if (guest_endpoint) 1974 return qp_detatch_guest_work(handle); 1975 else 1976 return qp_detatch_host_work(handle); 1977 } 1978 1979 /* 1980 * Returns the entry from the head of the list. Assumes that the list is 1981 * locked. 1982 */ 1983 static struct qp_entry *qp_list_get_head(struct qp_list *qp_list) 1984 { 1985 if (!list_empty(&qp_list->head)) { 1986 struct qp_entry *entry = 1987 list_first_entry(&qp_list->head, struct qp_entry, 1988 list_item); 1989 return entry; 1990 } 1991 1992 return NULL; 1993 } 1994 1995 void vmci_qp_broker_exit(void) 1996 { 1997 struct qp_entry *entry; 1998 struct qp_broker_entry *be; 1999 2000 mutex_lock(&qp_broker_list.mutex); 2001 2002 while ((entry = qp_list_get_head(&qp_broker_list))) { 2003 be = (struct qp_broker_entry *)entry; 2004 2005 qp_list_remove_entry(&qp_broker_list, entry); 2006 kfree(be); 2007 } 2008 2009 mutex_unlock(&qp_broker_list.mutex); 2010 } 2011 2012 /* 2013 * Requests that a queue pair be allocated with the VMCI queue 2014 * pair broker. Allocates a queue pair entry if one does not 2015 * exist. Attaches to one if it exists, and retrieves the page 2016 * files backing that queue_pair. Assumes that the queue pair 2017 * broker lock is held. 2018 */ 2019 int vmci_qp_broker_alloc(struct vmci_handle handle, 2020 u32 peer, 2021 u32 flags, 2022 u32 priv_flags, 2023 u64 produce_size, 2024 u64 consume_size, 2025 struct vmci_qp_page_store *page_store, 2026 struct vmci_ctx *context) 2027 { 2028 return qp_broker_alloc(handle, peer, flags, priv_flags, 2029 produce_size, consume_size, 2030 page_store, context, NULL, NULL, NULL, NULL); 2031 } 2032 2033 /* 2034 * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate 2035 * step to add the UVAs of the VMX mapping of the queue pair. This function 2036 * provides backwards compatibility with such VMX'en, and takes care of 2037 * registering the page store for a queue pair previously allocated by the 2038 * VMX during create or attach. This function will move the queue pair state 2039 * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or 2040 * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the 2041 * attached state with memory, the queue pair is ready to be used by the 2042 * host peer, and an attached event will be generated. 2043 * 2044 * Assumes that the queue pair broker lock is held. 2045 * 2046 * This function is only used by the hosted platform, since there is no 2047 * issue with backwards compatibility for vmkernel. 2048 */ 2049 int vmci_qp_broker_set_page_store(struct vmci_handle handle, 2050 u64 produce_uva, 2051 u64 consume_uva, 2052 struct vmci_ctx *context) 2053 { 2054 struct qp_broker_entry *entry; 2055 int result; 2056 const u32 context_id = vmci_ctx_get_id(context); 2057 2058 if (vmci_handle_is_invalid(handle) || !context || 2059 context_id == VMCI_INVALID_ID) 2060 return VMCI_ERROR_INVALID_ARGS; 2061 2062 /* 2063 * We only support guest to host queue pairs, so the VMX must 2064 * supply UVAs for the mapped page files. 2065 */ 2066 2067 if (produce_uva == 0 || consume_uva == 0) 2068 return VMCI_ERROR_INVALID_ARGS; 2069 2070 mutex_lock(&qp_broker_list.mutex); 2071 2072 if (!vmci_ctx_qp_exists(context, handle)) { 2073 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2074 context_id, handle.context, handle.resource); 2075 result = VMCI_ERROR_NOT_FOUND; 2076 goto out; 2077 } 2078 2079 entry = qp_broker_handle_to_entry(handle); 2080 if (!entry) { 2081 result = VMCI_ERROR_NOT_FOUND; 2082 goto out; 2083 } 2084 2085 /* 2086 * If I'm the owner then I can set the page store. 2087 * 2088 * Or, if a host created the queue_pair and I'm the attached peer 2089 * then I can set the page store. 2090 */ 2091 if (entry->create_id != context_id && 2092 (entry->create_id != VMCI_HOST_CONTEXT_ID || 2093 entry->attach_id != context_id)) { 2094 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER; 2095 goto out; 2096 } 2097 2098 if (entry->state != VMCIQPB_CREATED_NO_MEM && 2099 entry->state != VMCIQPB_ATTACHED_NO_MEM) { 2100 result = VMCI_ERROR_UNAVAILABLE; 2101 goto out; 2102 } 2103 2104 result = qp_host_get_user_memory(produce_uva, consume_uva, 2105 entry->produce_q, entry->consume_q); 2106 if (result < VMCI_SUCCESS) 2107 goto out; 2108 2109 result = qp_host_map_queues(entry->produce_q, entry->consume_q); 2110 if (result < VMCI_SUCCESS) { 2111 qp_host_unregister_user_memory(entry->produce_q, 2112 entry->consume_q); 2113 goto out; 2114 } 2115 2116 if (entry->state == VMCIQPB_CREATED_NO_MEM) 2117 entry->state = VMCIQPB_CREATED_MEM; 2118 else 2119 entry->state = VMCIQPB_ATTACHED_MEM; 2120 2121 entry->vmci_page_files = true; 2122 2123 if (entry->state == VMCIQPB_ATTACHED_MEM) { 2124 result = 2125 qp_notify_peer(true, handle, context_id, entry->create_id); 2126 if (result < VMCI_SUCCESS) { 2127 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", 2128 entry->create_id, entry->qp.handle.context, 2129 entry->qp.handle.resource); 2130 } 2131 } 2132 2133 result = VMCI_SUCCESS; 2134 out: 2135 mutex_unlock(&qp_broker_list.mutex); 2136 return result; 2137 } 2138 2139 /* 2140 * Resets saved queue headers for the given QP broker 2141 * entry. Should be used when guest memory becomes available 2142 * again, or the guest detaches. 2143 */ 2144 static void qp_reset_saved_headers(struct qp_broker_entry *entry) 2145 { 2146 entry->produce_q->saved_header = NULL; 2147 entry->consume_q->saved_header = NULL; 2148 } 2149 2150 /* 2151 * The main entry point for detaching from a queue pair registered with the 2152 * queue pair broker. If more than one endpoint is attached to the queue 2153 * pair, the first endpoint will mainly decrement a reference count and 2154 * generate a notification to its peer. The last endpoint will clean up 2155 * the queue pair state registered with the broker. 2156 * 2157 * When a guest endpoint detaches, it will unmap and unregister the guest 2158 * memory backing the queue pair. If the host is still attached, it will 2159 * no longer be able to access the queue pair content. 2160 * 2161 * If the queue pair is already in a state where there is no memory 2162 * registered for the queue pair (any *_NO_MEM state), it will transition to 2163 * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest 2164 * endpoint is the first of two endpoints to detach. If the host endpoint is 2165 * the first out of two to detach, the queue pair will move to the 2166 * VMCIQPB_SHUTDOWN_MEM state. 2167 */ 2168 int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context) 2169 { 2170 struct qp_broker_entry *entry; 2171 const u32 context_id = vmci_ctx_get_id(context); 2172 u32 peer_id; 2173 bool is_local = false; 2174 int result; 2175 2176 if (vmci_handle_is_invalid(handle) || !context || 2177 context_id == VMCI_INVALID_ID) { 2178 return VMCI_ERROR_INVALID_ARGS; 2179 } 2180 2181 mutex_lock(&qp_broker_list.mutex); 2182 2183 if (!vmci_ctx_qp_exists(context, handle)) { 2184 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2185 context_id, handle.context, handle.resource); 2186 result = VMCI_ERROR_NOT_FOUND; 2187 goto out; 2188 } 2189 2190 entry = qp_broker_handle_to_entry(handle); 2191 if (!entry) { 2192 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n", 2193 context_id, handle.context, handle.resource); 2194 result = VMCI_ERROR_NOT_FOUND; 2195 goto out; 2196 } 2197 2198 if (context_id != entry->create_id && context_id != entry->attach_id) { 2199 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2200 goto out; 2201 } 2202 2203 if (context_id == entry->create_id) { 2204 peer_id = entry->attach_id; 2205 entry->create_id = VMCI_INVALID_ID; 2206 } else { 2207 peer_id = entry->create_id; 2208 entry->attach_id = VMCI_INVALID_ID; 2209 } 2210 entry->qp.ref_count--; 2211 2212 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2213 2214 if (context_id != VMCI_HOST_CONTEXT_ID) { 2215 bool headers_mapped; 2216 2217 /* 2218 * Pre NOVMVM vmx'en may detach from a queue pair 2219 * before setting the page store, and in that case 2220 * there is no user memory to detach from. Also, more 2221 * recent VMX'en may detach from a queue pair in the 2222 * quiesced state. 2223 */ 2224 2225 qp_acquire_queue_mutex(entry->produce_q); 2226 headers_mapped = entry->produce_q->q_header || 2227 entry->consume_q->q_header; 2228 if (QPBROKERSTATE_HAS_MEM(entry)) { 2229 result = 2230 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID, 2231 entry->produce_q, 2232 entry->consume_q); 2233 if (result < VMCI_SUCCESS) 2234 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", 2235 handle.context, handle.resource, 2236 result); 2237 2238 qp_host_unregister_user_memory(entry->produce_q, 2239 entry->consume_q); 2240 2241 } 2242 2243 if (!headers_mapped) 2244 qp_reset_saved_headers(entry); 2245 2246 qp_release_queue_mutex(entry->produce_q); 2247 2248 if (!headers_mapped && entry->wakeup_cb) 2249 entry->wakeup_cb(entry->client_data); 2250 2251 } else { 2252 if (entry->wakeup_cb) { 2253 entry->wakeup_cb = NULL; 2254 entry->client_data = NULL; 2255 } 2256 } 2257 2258 if (entry->qp.ref_count == 0) { 2259 qp_list_remove_entry(&qp_broker_list, &entry->qp); 2260 2261 if (is_local) 2262 kfree(entry->local_mem); 2263 2264 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q); 2265 qp_host_free_queue(entry->produce_q, entry->qp.produce_size); 2266 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); 2267 /* Unlink from resource hash table and free callback */ 2268 vmci_resource_remove(&entry->resource); 2269 2270 kfree(entry); 2271 2272 vmci_ctx_qp_destroy(context, handle); 2273 } else { 2274 qp_notify_peer(false, handle, context_id, peer_id); 2275 if (context_id == VMCI_HOST_CONTEXT_ID && 2276 QPBROKERSTATE_HAS_MEM(entry)) { 2277 entry->state = VMCIQPB_SHUTDOWN_MEM; 2278 } else { 2279 entry->state = VMCIQPB_SHUTDOWN_NO_MEM; 2280 } 2281 2282 if (!is_local) 2283 vmci_ctx_qp_destroy(context, handle); 2284 2285 } 2286 result = VMCI_SUCCESS; 2287 out: 2288 mutex_unlock(&qp_broker_list.mutex); 2289 return result; 2290 } 2291 2292 /* 2293 * Establishes the necessary mappings for a queue pair given a 2294 * reference to the queue pair guest memory. This is usually 2295 * called when a guest is unquiesced and the VMX is allowed to 2296 * map guest memory once again. 2297 */ 2298 int vmci_qp_broker_map(struct vmci_handle handle, 2299 struct vmci_ctx *context, 2300 u64 guest_mem) 2301 { 2302 struct qp_broker_entry *entry; 2303 const u32 context_id = vmci_ctx_get_id(context); 2304 bool is_local = false; 2305 int result; 2306 2307 if (vmci_handle_is_invalid(handle) || !context || 2308 context_id == VMCI_INVALID_ID) 2309 return VMCI_ERROR_INVALID_ARGS; 2310 2311 mutex_lock(&qp_broker_list.mutex); 2312 2313 if (!vmci_ctx_qp_exists(context, handle)) { 2314 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2315 context_id, handle.context, handle.resource); 2316 result = VMCI_ERROR_NOT_FOUND; 2317 goto out; 2318 } 2319 2320 entry = qp_broker_handle_to_entry(handle); 2321 if (!entry) { 2322 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", 2323 context_id, handle.context, handle.resource); 2324 result = VMCI_ERROR_NOT_FOUND; 2325 goto out; 2326 } 2327 2328 if (context_id != entry->create_id && context_id != entry->attach_id) { 2329 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2330 goto out; 2331 } 2332 2333 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2334 result = VMCI_SUCCESS; 2335 2336 if (context_id != VMCI_HOST_CONTEXT_ID) { 2337 struct vmci_qp_page_store page_store; 2338 2339 page_store.pages = guest_mem; 2340 page_store.len = QPE_NUM_PAGES(entry->qp); 2341 2342 qp_acquire_queue_mutex(entry->produce_q); 2343 qp_reset_saved_headers(entry); 2344 result = 2345 qp_host_register_user_memory(&page_store, 2346 entry->produce_q, 2347 entry->consume_q); 2348 qp_release_queue_mutex(entry->produce_q); 2349 if (result == VMCI_SUCCESS) { 2350 /* Move state from *_NO_MEM to *_MEM */ 2351 2352 entry->state++; 2353 2354 if (entry->wakeup_cb) 2355 entry->wakeup_cb(entry->client_data); 2356 } 2357 } 2358 2359 out: 2360 mutex_unlock(&qp_broker_list.mutex); 2361 return result; 2362 } 2363 2364 /* 2365 * Saves a snapshot of the queue headers for the given QP broker 2366 * entry. Should be used when guest memory is unmapped. 2367 * Results: 2368 * VMCI_SUCCESS on success, appropriate error code if guest memory 2369 * can't be accessed.. 2370 */ 2371 static int qp_save_headers(struct qp_broker_entry *entry) 2372 { 2373 int result; 2374 2375 if (entry->produce_q->saved_header != NULL && 2376 entry->consume_q->saved_header != NULL) { 2377 /* 2378 * If the headers have already been saved, we don't need to do 2379 * it again, and we don't want to map in the headers 2380 * unnecessarily. 2381 */ 2382 2383 return VMCI_SUCCESS; 2384 } 2385 2386 if (NULL == entry->produce_q->q_header || 2387 NULL == entry->consume_q->q_header) { 2388 result = qp_host_map_queues(entry->produce_q, entry->consume_q); 2389 if (result < VMCI_SUCCESS) 2390 return result; 2391 } 2392 2393 memcpy(&entry->saved_produce_q, entry->produce_q->q_header, 2394 sizeof(entry->saved_produce_q)); 2395 entry->produce_q->saved_header = &entry->saved_produce_q; 2396 memcpy(&entry->saved_consume_q, entry->consume_q->q_header, 2397 sizeof(entry->saved_consume_q)); 2398 entry->consume_q->saved_header = &entry->saved_consume_q; 2399 2400 return VMCI_SUCCESS; 2401 } 2402 2403 /* 2404 * Removes all references to the guest memory of a given queue pair, and 2405 * will move the queue pair from state *_MEM to *_NO_MEM. It is usually 2406 * called when a VM is being quiesced where access to guest memory should 2407 * avoided. 2408 */ 2409 int vmci_qp_broker_unmap(struct vmci_handle handle, 2410 struct vmci_ctx *context, 2411 u32 gid) 2412 { 2413 struct qp_broker_entry *entry; 2414 const u32 context_id = vmci_ctx_get_id(context); 2415 bool is_local = false; 2416 int result; 2417 2418 if (vmci_handle_is_invalid(handle) || !context || 2419 context_id == VMCI_INVALID_ID) 2420 return VMCI_ERROR_INVALID_ARGS; 2421 2422 mutex_lock(&qp_broker_list.mutex); 2423 2424 if (!vmci_ctx_qp_exists(context, handle)) { 2425 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", 2426 context_id, handle.context, handle.resource); 2427 result = VMCI_ERROR_NOT_FOUND; 2428 goto out; 2429 } 2430 2431 entry = qp_broker_handle_to_entry(handle); 2432 if (!entry) { 2433 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", 2434 context_id, handle.context, handle.resource); 2435 result = VMCI_ERROR_NOT_FOUND; 2436 goto out; 2437 } 2438 2439 if (context_id != entry->create_id && context_id != entry->attach_id) { 2440 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2441 goto out; 2442 } 2443 2444 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; 2445 2446 if (context_id != VMCI_HOST_CONTEXT_ID) { 2447 qp_acquire_queue_mutex(entry->produce_q); 2448 result = qp_save_headers(entry); 2449 if (result < VMCI_SUCCESS) 2450 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", 2451 handle.context, handle.resource, result); 2452 2453 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q); 2454 2455 /* 2456 * On hosted, when we unmap queue pairs, the VMX will also 2457 * unmap the guest memory, so we invalidate the previously 2458 * registered memory. If the queue pair is mapped again at a 2459 * later point in time, we will need to reregister the user 2460 * memory with a possibly new user VA. 2461 */ 2462 qp_host_unregister_user_memory(entry->produce_q, 2463 entry->consume_q); 2464 2465 /* 2466 * Move state from *_MEM to *_NO_MEM. 2467 */ 2468 entry->state--; 2469 2470 qp_release_queue_mutex(entry->produce_q); 2471 } 2472 2473 result = VMCI_SUCCESS; 2474 2475 out: 2476 mutex_unlock(&qp_broker_list.mutex); 2477 return result; 2478 } 2479 2480 /* 2481 * Destroys all guest queue pair endpoints. If active guest queue 2482 * pairs still exist, hypercalls to attempt detach from these 2483 * queue pairs will be made. Any failure to detach is silently 2484 * ignored. 2485 */ 2486 void vmci_qp_guest_endpoints_exit(void) 2487 { 2488 struct qp_entry *entry; 2489 struct qp_guest_endpoint *ep; 2490 2491 mutex_lock(&qp_guest_endpoints.mutex); 2492 2493 while ((entry = qp_list_get_head(&qp_guest_endpoints))) { 2494 ep = (struct qp_guest_endpoint *)entry; 2495 2496 /* Don't make a hypercall for local queue_pairs. */ 2497 if (!(entry->flags & VMCI_QPFLAG_LOCAL)) 2498 qp_detatch_hypercall(entry->handle); 2499 2500 /* We cannot fail the exit, so let's reset ref_count. */ 2501 entry->ref_count = 0; 2502 qp_list_remove_entry(&qp_guest_endpoints, entry); 2503 2504 qp_guest_endpoint_destroy(ep); 2505 } 2506 2507 mutex_unlock(&qp_guest_endpoints.mutex); 2508 } 2509 2510 /* 2511 * Helper routine that will lock the queue pair before subsequent 2512 * operations. 2513 * Note: Non-blocking on the host side is currently only implemented in ESX. 2514 * Since non-blocking isn't yet implemented on the host personality we 2515 * have no reason to acquire a spin lock. So to avoid the use of an 2516 * unnecessary lock only acquire the mutex if we can block. 2517 */ 2518 static void qp_lock(const struct vmci_qp *qpair) 2519 { 2520 qp_acquire_queue_mutex(qpair->produce_q); 2521 } 2522 2523 /* 2524 * Helper routine that unlocks the queue pair after calling 2525 * qp_lock. 2526 */ 2527 static void qp_unlock(const struct vmci_qp *qpair) 2528 { 2529 qp_release_queue_mutex(qpair->produce_q); 2530 } 2531 2532 /* 2533 * The queue headers may not be mapped at all times. If a queue is 2534 * currently not mapped, it will be attempted to do so. 2535 */ 2536 static int qp_map_queue_headers(struct vmci_queue *produce_q, 2537 struct vmci_queue *consume_q) 2538 { 2539 int result; 2540 2541 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { 2542 result = qp_host_map_queues(produce_q, consume_q); 2543 if (result < VMCI_SUCCESS) 2544 return (produce_q->saved_header && 2545 consume_q->saved_header) ? 2546 VMCI_ERROR_QUEUEPAIR_NOT_READY : 2547 VMCI_ERROR_QUEUEPAIR_NOTATTACHED; 2548 } 2549 2550 return VMCI_SUCCESS; 2551 } 2552 2553 /* 2554 * Helper routine that will retrieve the produce and consume 2555 * headers of a given queue pair. If the guest memory of the 2556 * queue pair is currently not available, the saved queue headers 2557 * will be returned, if these are available. 2558 */ 2559 static int qp_get_queue_headers(const struct vmci_qp *qpair, 2560 struct vmci_queue_header **produce_q_header, 2561 struct vmci_queue_header **consume_q_header) 2562 { 2563 int result; 2564 2565 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); 2566 if (result == VMCI_SUCCESS) { 2567 *produce_q_header = qpair->produce_q->q_header; 2568 *consume_q_header = qpair->consume_q->q_header; 2569 } else if (qpair->produce_q->saved_header && 2570 qpair->consume_q->saved_header) { 2571 *produce_q_header = qpair->produce_q->saved_header; 2572 *consume_q_header = qpair->consume_q->saved_header; 2573 result = VMCI_SUCCESS; 2574 } 2575 2576 return result; 2577 } 2578 2579 /* 2580 * Callback from VMCI queue pair broker indicating that a queue 2581 * pair that was previously not ready, now either is ready or 2582 * gone forever. 2583 */ 2584 static int qp_wakeup_cb(void *client_data) 2585 { 2586 struct vmci_qp *qpair = (struct vmci_qp *)client_data; 2587 2588 qp_lock(qpair); 2589 while (qpair->blocked > 0) { 2590 qpair->blocked--; 2591 qpair->generation++; 2592 wake_up(&qpair->event); 2593 } 2594 qp_unlock(qpair); 2595 2596 return VMCI_SUCCESS; 2597 } 2598 2599 /* 2600 * Makes the calling thread wait for the queue pair to become 2601 * ready for host side access. Returns true when thread is 2602 * woken up after queue pair state change, false otherwise. 2603 */ 2604 static bool qp_wait_for_ready_queue(struct vmci_qp *qpair) 2605 { 2606 unsigned int generation; 2607 2608 qpair->blocked++; 2609 generation = qpair->generation; 2610 qp_unlock(qpair); 2611 wait_event(qpair->event, generation != qpair->generation); 2612 qp_lock(qpair); 2613 2614 return true; 2615 } 2616 2617 /* 2618 * Enqueues a given buffer to the produce queue using the provided 2619 * function. As many bytes as possible (space available in the queue) 2620 * are enqueued. Assumes the queue->mutex has been acquired. Returns 2621 * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue 2622 * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the 2623 * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if 2624 * an error occured when accessing the buffer, 2625 * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't 2626 * available. Otherwise, the number of bytes written to the queue is 2627 * returned. Updates the tail pointer of the produce queue. 2628 */ 2629 static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q, 2630 struct vmci_queue *consume_q, 2631 const u64 produce_q_size, 2632 const void *buf, 2633 size_t buf_size, 2634 vmci_memcpy_to_queue_func memcpy_to_queue) 2635 { 2636 s64 free_space; 2637 u64 tail; 2638 size_t written; 2639 ssize_t result; 2640 2641 result = qp_map_queue_headers(produce_q, consume_q); 2642 if (unlikely(result != VMCI_SUCCESS)) 2643 return result; 2644 2645 free_space = vmci_q_header_free_space(produce_q->q_header, 2646 consume_q->q_header, 2647 produce_q_size); 2648 if (free_space == 0) 2649 return VMCI_ERROR_QUEUEPAIR_NOSPACE; 2650 2651 if (free_space < VMCI_SUCCESS) 2652 return (ssize_t) free_space; 2653 2654 written = (size_t) (free_space > buf_size ? buf_size : free_space); 2655 tail = vmci_q_header_producer_tail(produce_q->q_header); 2656 if (likely(tail + written < produce_q_size)) { 2657 result = memcpy_to_queue(produce_q, tail, buf, 0, written); 2658 } else { 2659 /* Tail pointer wraps around. */ 2660 2661 const size_t tmp = (size_t) (produce_q_size - tail); 2662 2663 result = memcpy_to_queue(produce_q, tail, buf, 0, tmp); 2664 if (result >= VMCI_SUCCESS) 2665 result = memcpy_to_queue(produce_q, 0, buf, tmp, 2666 written - tmp); 2667 } 2668 2669 if (result < VMCI_SUCCESS) 2670 return result; 2671 2672 vmci_q_header_add_producer_tail(produce_q->q_header, written, 2673 produce_q_size); 2674 return written; 2675 } 2676 2677 /* 2678 * Dequeues data (if available) from the given consume queue. Writes data 2679 * to the user provided buffer using the provided function. 2680 * Assumes the queue->mutex has been acquired. 2681 * Results: 2682 * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue. 2683 * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue 2684 * (as defined by the queue size). 2685 * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer. 2686 * Otherwise the number of bytes dequeued is returned. 2687 * Side effects: 2688 * Updates the head pointer of the consume queue. 2689 */ 2690 static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q, 2691 struct vmci_queue *consume_q, 2692 const u64 consume_q_size, 2693 void *buf, 2694 size_t buf_size, 2695 vmci_memcpy_from_queue_func memcpy_from_queue, 2696 bool update_consumer) 2697 { 2698 s64 buf_ready; 2699 u64 head; 2700 size_t read; 2701 ssize_t result; 2702 2703 result = qp_map_queue_headers(produce_q, consume_q); 2704 if (unlikely(result != VMCI_SUCCESS)) 2705 return result; 2706 2707 buf_ready = vmci_q_header_buf_ready(consume_q->q_header, 2708 produce_q->q_header, 2709 consume_q_size); 2710 if (buf_ready == 0) 2711 return VMCI_ERROR_QUEUEPAIR_NODATA; 2712 2713 if (buf_ready < VMCI_SUCCESS) 2714 return (ssize_t) buf_ready; 2715 2716 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready); 2717 head = vmci_q_header_consumer_head(produce_q->q_header); 2718 if (likely(head + read < consume_q_size)) { 2719 result = memcpy_from_queue(buf, 0, consume_q, head, read); 2720 } else { 2721 /* Head pointer wraps around. */ 2722 2723 const size_t tmp = (size_t) (consume_q_size - head); 2724 2725 result = memcpy_from_queue(buf, 0, consume_q, head, tmp); 2726 if (result >= VMCI_SUCCESS) 2727 result = memcpy_from_queue(buf, tmp, consume_q, 0, 2728 read - tmp); 2729 2730 } 2731 2732 if (result < VMCI_SUCCESS) 2733 return result; 2734 2735 if (update_consumer) 2736 vmci_q_header_add_consumer_head(produce_q->q_header, 2737 read, consume_q_size); 2738 2739 return read; 2740 } 2741 2742 /* 2743 * vmci_qpair_alloc() - Allocates a queue pair. 2744 * @qpair: Pointer for the new vmci_qp struct. 2745 * @handle: Handle to track the resource. 2746 * @produce_qsize: Desired size of the producer queue. 2747 * @consume_qsize: Desired size of the consumer queue. 2748 * @peer: ContextID of the peer. 2749 * @flags: VMCI flags. 2750 * @priv_flags: VMCI priviledge flags. 2751 * 2752 * This is the client interface for allocating the memory for a 2753 * vmci_qp structure and then attaching to the underlying 2754 * queue. If an error occurs allocating the memory for the 2755 * vmci_qp structure no attempt is made to attach. If an 2756 * error occurs attaching, then the structure is freed. 2757 */ 2758 int vmci_qpair_alloc(struct vmci_qp **qpair, 2759 struct vmci_handle *handle, 2760 u64 produce_qsize, 2761 u64 consume_qsize, 2762 u32 peer, 2763 u32 flags, 2764 u32 priv_flags) 2765 { 2766 struct vmci_qp *my_qpair; 2767 int retval; 2768 struct vmci_handle src = VMCI_INVALID_HANDLE; 2769 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID); 2770 enum vmci_route route; 2771 vmci_event_release_cb wakeup_cb; 2772 void *client_data; 2773 2774 /* 2775 * Restrict the size of a queuepair. The device already 2776 * enforces a limit on the total amount of memory that can be 2777 * allocated to queuepairs for a guest. However, we try to 2778 * allocate this memory before we make the queuepair 2779 * allocation hypercall. On Linux, we allocate each page 2780 * separately, which means rather than fail, the guest will 2781 * thrash while it tries to allocate, and will become 2782 * increasingly unresponsive to the point where it appears to 2783 * be hung. So we place a limit on the size of an individual 2784 * queuepair here, and leave the device to enforce the 2785 * restriction on total queuepair memory. (Note that this 2786 * doesn't prevent all cases; a user with only this much 2787 * physical memory could still get into trouble.) The error 2788 * used by the device is NO_RESOURCES, so use that here too. 2789 */ 2790 2791 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) || 2792 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY) 2793 return VMCI_ERROR_NO_RESOURCES; 2794 2795 retval = vmci_route(&src, &dst, false, &route); 2796 if (retval < VMCI_SUCCESS) 2797 route = vmci_guest_code_active() ? 2798 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST; 2799 2800 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) { 2801 pr_devel("NONBLOCK OR PINNED set"); 2802 return VMCI_ERROR_INVALID_ARGS; 2803 } 2804 2805 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL); 2806 if (!my_qpair) 2807 return VMCI_ERROR_NO_MEM; 2808 2809 my_qpair->produce_q_size = produce_qsize; 2810 my_qpair->consume_q_size = consume_qsize; 2811 my_qpair->peer = peer; 2812 my_qpair->flags = flags; 2813 my_qpair->priv_flags = priv_flags; 2814 2815 wakeup_cb = NULL; 2816 client_data = NULL; 2817 2818 if (VMCI_ROUTE_AS_HOST == route) { 2819 my_qpair->guest_endpoint = false; 2820 if (!(flags & VMCI_QPFLAG_LOCAL)) { 2821 my_qpair->blocked = 0; 2822 my_qpair->generation = 0; 2823 init_waitqueue_head(&my_qpair->event); 2824 wakeup_cb = qp_wakeup_cb; 2825 client_data = (void *)my_qpair; 2826 } 2827 } else { 2828 my_qpair->guest_endpoint = true; 2829 } 2830 2831 retval = vmci_qp_alloc(handle, 2832 &my_qpair->produce_q, 2833 my_qpair->produce_q_size, 2834 &my_qpair->consume_q, 2835 my_qpair->consume_q_size, 2836 my_qpair->peer, 2837 my_qpair->flags, 2838 my_qpair->priv_flags, 2839 my_qpair->guest_endpoint, 2840 wakeup_cb, client_data); 2841 2842 if (retval < VMCI_SUCCESS) { 2843 kfree(my_qpair); 2844 return retval; 2845 } 2846 2847 *qpair = my_qpair; 2848 my_qpair->handle = *handle; 2849 2850 return retval; 2851 } 2852 EXPORT_SYMBOL_GPL(vmci_qpair_alloc); 2853 2854 /* 2855 * vmci_qpair_detach() - Detatches the client from a queue pair. 2856 * @qpair: Reference of a pointer to the qpair struct. 2857 * 2858 * This is the client interface for detaching from a VMCIQPair. 2859 * Note that this routine will free the memory allocated for the 2860 * vmci_qp structure too. 2861 */ 2862 int vmci_qpair_detach(struct vmci_qp **qpair) 2863 { 2864 int result; 2865 struct vmci_qp *old_qpair; 2866 2867 if (!qpair || !(*qpair)) 2868 return VMCI_ERROR_INVALID_ARGS; 2869 2870 old_qpair = *qpair; 2871 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint); 2872 2873 /* 2874 * The guest can fail to detach for a number of reasons, and 2875 * if it does so, it will cleanup the entry (if there is one). 2876 * The host can fail too, but it won't cleanup the entry 2877 * immediately, it will do that later when the context is 2878 * freed. Either way, we need to release the qpair struct 2879 * here; there isn't much the caller can do, and we don't want 2880 * to leak. 2881 */ 2882 2883 memset(old_qpair, 0, sizeof(*old_qpair)); 2884 old_qpair->handle = VMCI_INVALID_HANDLE; 2885 old_qpair->peer = VMCI_INVALID_ID; 2886 kfree(old_qpair); 2887 *qpair = NULL; 2888 2889 return result; 2890 } 2891 EXPORT_SYMBOL_GPL(vmci_qpair_detach); 2892 2893 /* 2894 * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer. 2895 * @qpair: Pointer to the queue pair struct. 2896 * @producer_tail: Reference used for storing producer tail index. 2897 * @consumer_head: Reference used for storing the consumer head index. 2898 * 2899 * This is the client interface for getting the current indexes of the 2900 * QPair from the point of the view of the caller as the producer. 2901 */ 2902 int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair, 2903 u64 *producer_tail, 2904 u64 *consumer_head) 2905 { 2906 struct vmci_queue_header *produce_q_header; 2907 struct vmci_queue_header *consume_q_header; 2908 int result; 2909 2910 if (!qpair) 2911 return VMCI_ERROR_INVALID_ARGS; 2912 2913 qp_lock(qpair); 2914 result = 2915 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2916 if (result == VMCI_SUCCESS) 2917 vmci_q_header_get_pointers(produce_q_header, consume_q_header, 2918 producer_tail, consumer_head); 2919 qp_unlock(qpair); 2920 2921 if (result == VMCI_SUCCESS && 2922 ((producer_tail && *producer_tail >= qpair->produce_q_size) || 2923 (consumer_head && *consumer_head >= qpair->produce_q_size))) 2924 return VMCI_ERROR_INVALID_SIZE; 2925 2926 return result; 2927 } 2928 EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes); 2929 2930 /* 2931 * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the consumer. 2932 * @qpair: Pointer to the queue pair struct. 2933 * @consumer_tail: Reference used for storing consumer tail index. 2934 * @producer_head: Reference used for storing the producer head index. 2935 * 2936 * This is the client interface for getting the current indexes of the 2937 * QPair from the point of the view of the caller as the consumer. 2938 */ 2939 int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair, 2940 u64 *consumer_tail, 2941 u64 *producer_head) 2942 { 2943 struct vmci_queue_header *produce_q_header; 2944 struct vmci_queue_header *consume_q_header; 2945 int result; 2946 2947 if (!qpair) 2948 return VMCI_ERROR_INVALID_ARGS; 2949 2950 qp_lock(qpair); 2951 result = 2952 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2953 if (result == VMCI_SUCCESS) 2954 vmci_q_header_get_pointers(consume_q_header, produce_q_header, 2955 consumer_tail, producer_head); 2956 qp_unlock(qpair); 2957 2958 if (result == VMCI_SUCCESS && 2959 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) || 2960 (producer_head && *producer_head >= qpair->consume_q_size))) 2961 return VMCI_ERROR_INVALID_SIZE; 2962 2963 return result; 2964 } 2965 EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes); 2966 2967 /* 2968 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue. 2969 * @qpair: Pointer to the queue pair struct. 2970 * 2971 * This is the client interface for getting the amount of free 2972 * space in the QPair from the point of the view of the caller as 2973 * the producer which is the common case. Returns < 0 if err, else 2974 * available bytes into which data can be enqueued if > 0. 2975 */ 2976 s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair) 2977 { 2978 struct vmci_queue_header *produce_q_header; 2979 struct vmci_queue_header *consume_q_header; 2980 s64 result; 2981 2982 if (!qpair) 2983 return VMCI_ERROR_INVALID_ARGS; 2984 2985 qp_lock(qpair); 2986 result = 2987 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 2988 if (result == VMCI_SUCCESS) 2989 result = vmci_q_header_free_space(produce_q_header, 2990 consume_q_header, 2991 qpair->produce_q_size); 2992 else 2993 result = 0; 2994 2995 qp_unlock(qpair); 2996 2997 return result; 2998 } 2999 EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space); 3000 3001 /* 3002 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue. 3003 * @qpair: Pointer to the queue pair struct. 3004 * 3005 * This is the client interface for getting the amount of free 3006 * space in the QPair from the point of the view of the caller as 3007 * the consumer which is not the common case. Returns < 0 if err, else 3008 * available bytes into which data can be enqueued if > 0. 3009 */ 3010 s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair) 3011 { 3012 struct vmci_queue_header *produce_q_header; 3013 struct vmci_queue_header *consume_q_header; 3014 s64 result; 3015 3016 if (!qpair) 3017 return VMCI_ERROR_INVALID_ARGS; 3018 3019 qp_lock(qpair); 3020 result = 3021 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 3022 if (result == VMCI_SUCCESS) 3023 result = vmci_q_header_free_space(consume_q_header, 3024 produce_q_header, 3025 qpair->consume_q_size); 3026 else 3027 result = 0; 3028 3029 qp_unlock(qpair); 3030 3031 return result; 3032 } 3033 EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space); 3034 3035 /* 3036 * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from 3037 * producer queue. 3038 * @qpair: Pointer to the queue pair struct. 3039 * 3040 * This is the client interface for getting the amount of 3041 * enqueued data in the QPair from the point of the view of the 3042 * caller as the producer which is not the common case. Returns < 0 if err, 3043 * else available bytes that may be read. 3044 */ 3045 s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair) 3046 { 3047 struct vmci_queue_header *produce_q_header; 3048 struct vmci_queue_header *consume_q_header; 3049 s64 result; 3050 3051 if (!qpair) 3052 return VMCI_ERROR_INVALID_ARGS; 3053 3054 qp_lock(qpair); 3055 result = 3056 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 3057 if (result == VMCI_SUCCESS) 3058 result = vmci_q_header_buf_ready(produce_q_header, 3059 consume_q_header, 3060 qpair->produce_q_size); 3061 else 3062 result = 0; 3063 3064 qp_unlock(qpair); 3065 3066 return result; 3067 } 3068 EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready); 3069 3070 /* 3071 * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from 3072 * consumer queue. 3073 * @qpair: Pointer to the queue pair struct. 3074 * 3075 * This is the client interface for getting the amount of 3076 * enqueued data in the QPair from the point of the view of the 3077 * caller as the consumer which is the normal case. Returns < 0 if err, 3078 * else available bytes that may be read. 3079 */ 3080 s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair) 3081 { 3082 struct vmci_queue_header *produce_q_header; 3083 struct vmci_queue_header *consume_q_header; 3084 s64 result; 3085 3086 if (!qpair) 3087 return VMCI_ERROR_INVALID_ARGS; 3088 3089 qp_lock(qpair); 3090 result = 3091 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header); 3092 if (result == VMCI_SUCCESS) 3093 result = vmci_q_header_buf_ready(consume_q_header, 3094 produce_q_header, 3095 qpair->consume_q_size); 3096 else 3097 result = 0; 3098 3099 qp_unlock(qpair); 3100 3101 return result; 3102 } 3103 EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready); 3104 3105 /* 3106 * vmci_qpair_enqueue() - Throw data on the queue. 3107 * @qpair: Pointer to the queue pair struct. 3108 * @buf: Pointer to buffer containing data 3109 * @buf_size: Length of buffer. 3110 * @buf_type: Buffer type (Unused). 3111 * 3112 * This is the client interface for enqueueing data into the queue. 3113 * Returns number of bytes enqueued or < 0 on error. 3114 */ 3115 ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair, 3116 const void *buf, 3117 size_t buf_size, 3118 int buf_type) 3119 { 3120 ssize_t result; 3121 3122 if (!qpair || !buf) 3123 return VMCI_ERROR_INVALID_ARGS; 3124 3125 qp_lock(qpair); 3126 3127 do { 3128 result = qp_enqueue_locked(qpair->produce_q, 3129 qpair->consume_q, 3130 qpair->produce_q_size, 3131 buf, buf_size, 3132 qp_memcpy_to_queue); 3133 3134 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3135 !qp_wait_for_ready_queue(qpair)) 3136 result = VMCI_ERROR_WOULD_BLOCK; 3137 3138 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3139 3140 qp_unlock(qpair); 3141 3142 return result; 3143 } 3144 EXPORT_SYMBOL_GPL(vmci_qpair_enqueue); 3145 3146 /* 3147 * vmci_qpair_dequeue() - Get data from the queue. 3148 * @qpair: Pointer to the queue pair struct. 3149 * @buf: Pointer to buffer for the data 3150 * @buf_size: Length of buffer. 3151 * @buf_type: Buffer type (Unused). 3152 * 3153 * This is the client interface for dequeueing data from the queue. 3154 * Returns number of bytes dequeued or < 0 on error. 3155 */ 3156 ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, 3157 void *buf, 3158 size_t buf_size, 3159 int buf_type) 3160 { 3161 ssize_t result; 3162 3163 if (!qpair || !buf) 3164 return VMCI_ERROR_INVALID_ARGS; 3165 3166 qp_lock(qpair); 3167 3168 do { 3169 result = qp_dequeue_locked(qpair->produce_q, 3170 qpair->consume_q, 3171 qpair->consume_q_size, 3172 buf, buf_size, 3173 qp_memcpy_from_queue, true); 3174 3175 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3176 !qp_wait_for_ready_queue(qpair)) 3177 result = VMCI_ERROR_WOULD_BLOCK; 3178 3179 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3180 3181 qp_unlock(qpair); 3182 3183 return result; 3184 } 3185 EXPORT_SYMBOL_GPL(vmci_qpair_dequeue); 3186 3187 /* 3188 * vmci_qpair_peek() - Peek at the data in the queue. 3189 * @qpair: Pointer to the queue pair struct. 3190 * @buf: Pointer to buffer for the data 3191 * @buf_size: Length of buffer. 3192 * @buf_type: Buffer type (Unused on Linux). 3193 * 3194 * This is the client interface for peeking into a queue. (I.e., 3195 * copy data from the queue without updating the head pointer.) 3196 * Returns number of bytes dequeued or < 0 on error. 3197 */ 3198 ssize_t vmci_qpair_peek(struct vmci_qp *qpair, 3199 void *buf, 3200 size_t buf_size, 3201 int buf_type) 3202 { 3203 ssize_t result; 3204 3205 if (!qpair || !buf) 3206 return VMCI_ERROR_INVALID_ARGS; 3207 3208 qp_lock(qpair); 3209 3210 do { 3211 result = qp_dequeue_locked(qpair->produce_q, 3212 qpair->consume_q, 3213 qpair->consume_q_size, 3214 buf, buf_size, 3215 qp_memcpy_from_queue, false); 3216 3217 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3218 !qp_wait_for_ready_queue(qpair)) 3219 result = VMCI_ERROR_WOULD_BLOCK; 3220 3221 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3222 3223 qp_unlock(qpair); 3224 3225 return result; 3226 } 3227 EXPORT_SYMBOL_GPL(vmci_qpair_peek); 3228 3229 /* 3230 * vmci_qpair_enquev() - Throw data on the queue using iov. 3231 * @qpair: Pointer to the queue pair struct. 3232 * @iov: Pointer to buffer containing data 3233 * @iov_size: Length of buffer. 3234 * @buf_type: Buffer type (Unused). 3235 * 3236 * This is the client interface for enqueueing data into the queue. 3237 * This function uses IO vectors to handle the work. Returns number 3238 * of bytes enqueued or < 0 on error. 3239 */ 3240 ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, 3241 struct msghdr *msg, 3242 size_t iov_size, 3243 int buf_type) 3244 { 3245 ssize_t result; 3246 3247 if (!qpair) 3248 return VMCI_ERROR_INVALID_ARGS; 3249 3250 qp_lock(qpair); 3251 3252 do { 3253 result = qp_enqueue_locked(qpair->produce_q, 3254 qpair->consume_q, 3255 qpair->produce_q_size, 3256 msg, iov_size, 3257 qp_memcpy_to_queue_iov); 3258 3259 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3260 !qp_wait_for_ready_queue(qpair)) 3261 result = VMCI_ERROR_WOULD_BLOCK; 3262 3263 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3264 3265 qp_unlock(qpair); 3266 3267 return result; 3268 } 3269 EXPORT_SYMBOL_GPL(vmci_qpair_enquev); 3270 3271 /* 3272 * vmci_qpair_dequev() - Get data from the queue using iov. 3273 * @qpair: Pointer to the queue pair struct. 3274 * @iov: Pointer to buffer for the data 3275 * @iov_size: Length of buffer. 3276 * @buf_type: Buffer type (Unused). 3277 * 3278 * This is the client interface for dequeueing data from the queue. 3279 * This function uses IO vectors to handle the work. Returns number 3280 * of bytes dequeued or < 0 on error. 3281 */ 3282 ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, 3283 struct msghdr *msg, 3284 size_t iov_size, 3285 int buf_type) 3286 { 3287 ssize_t result; 3288 3289 if (!qpair) 3290 return VMCI_ERROR_INVALID_ARGS; 3291 3292 qp_lock(qpair); 3293 3294 do { 3295 result = qp_dequeue_locked(qpair->produce_q, 3296 qpair->consume_q, 3297 qpair->consume_q_size, 3298 msg, iov_size, 3299 qp_memcpy_from_queue_iov, 3300 true); 3301 3302 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3303 !qp_wait_for_ready_queue(qpair)) 3304 result = VMCI_ERROR_WOULD_BLOCK; 3305 3306 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3307 3308 qp_unlock(qpair); 3309 3310 return result; 3311 } 3312 EXPORT_SYMBOL_GPL(vmci_qpair_dequev); 3313 3314 /* 3315 * vmci_qpair_peekv() - Peek at the data in the queue using iov. 3316 * @qpair: Pointer to the queue pair struct. 3317 * @iov: Pointer to buffer for the data 3318 * @iov_size: Length of buffer. 3319 * @buf_type: Buffer type (Unused on Linux). 3320 * 3321 * This is the client interface for peeking into a queue. (I.e., 3322 * copy data from the queue without updating the head pointer.) 3323 * This function uses IO vectors to handle the work. Returns number 3324 * of bytes peeked or < 0 on error. 3325 */ 3326 ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, 3327 struct msghdr *msg, 3328 size_t iov_size, 3329 int buf_type) 3330 { 3331 ssize_t result; 3332 3333 if (!qpair) 3334 return VMCI_ERROR_INVALID_ARGS; 3335 3336 qp_lock(qpair); 3337 3338 do { 3339 result = qp_dequeue_locked(qpair->produce_q, 3340 qpair->consume_q, 3341 qpair->consume_q_size, 3342 msg, iov_size, 3343 qp_memcpy_from_queue_iov, 3344 false); 3345 3346 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3347 !qp_wait_for_ready_queue(qpair)) 3348 result = VMCI_ERROR_WOULD_BLOCK; 3349 3350 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY); 3351 3352 qp_unlock(qpair); 3353 return result; 3354 } 3355 EXPORT_SYMBOL_GPL(vmci_qpair_peekv); 3356