1 /* 2 * Vhost User library 3 * 4 * Copyright (c) 2016 Red Hat, Inc. 5 * 6 * Authors: 7 * Victor Kaplansky <victork@redhat.com> 8 * Marc-André Lureau <mlureau@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or 11 * later. See the COPYING file in the top-level directory. 12 */ 13 14 #ifndef LIBVHOST_USER_H 15 #define LIBVHOST_USER_H 16 17 #include <stdint.h> 18 #include <stdbool.h> 19 #include <stddef.h> 20 #include <poll.h> 21 #include <linux/vhost.h> 22 #include <pthread.h> 23 #include "standard-headers/linux/virtio_ring.h" 24 25 /* Based on qemu/hw/virtio/vhost-user.c */ 26 #define VHOST_USER_F_PROTOCOL_FEATURES 30 27 #define VHOST_LOG_PAGE 4096 28 29 #define VIRTQUEUE_MAX_SIZE 1024 30 31 #define VHOST_MEMORY_BASELINE_NREGIONS 8 32 33 /* 34 * Set a reasonable maximum number of ram slots, which will be supported by 35 * any architecture. 36 */ 37 #define VHOST_USER_MAX_RAM_SLOTS 32 38 39 #define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64) 40 41 typedef enum VhostSetConfigType { 42 VHOST_SET_CONFIG_TYPE_FRONTEND = 0, 43 VHOST_SET_CONFIG_TYPE_MIGRATION = 1, 44 } VhostSetConfigType; 45 46 /* 47 * Maximum size of virtio device config space 48 */ 49 #define VHOST_USER_MAX_CONFIG_SIZE 256 50 51 enum VhostUserProtocolFeature { 52 VHOST_USER_PROTOCOL_F_MQ = 0, 53 VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1, 54 VHOST_USER_PROTOCOL_F_RARP = 2, 55 VHOST_USER_PROTOCOL_F_REPLY_ACK = 3, 56 VHOST_USER_PROTOCOL_F_NET_MTU = 4, 57 VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5, 58 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6, 59 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7, 60 VHOST_USER_PROTOCOL_F_PAGEFAULT = 8, 61 VHOST_USER_PROTOCOL_F_CONFIG = 9, 62 VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10, 63 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11, 64 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, 65 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14, 66 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15, 67 68 VHOST_USER_PROTOCOL_F_MAX 69 }; 70 71 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1) 72 73 typedef enum VhostUserRequest { 74 VHOST_USER_NONE = 0, 75 VHOST_USER_GET_FEATURES = 1, 76 VHOST_USER_SET_FEATURES = 2, 77 VHOST_USER_SET_OWNER = 3, 78 VHOST_USER_RESET_OWNER = 4, 79 VHOST_USER_SET_MEM_TABLE = 5, 80 VHOST_USER_SET_LOG_BASE = 6, 81 VHOST_USER_SET_LOG_FD = 7, 82 VHOST_USER_SET_VRING_NUM = 8, 83 VHOST_USER_SET_VRING_ADDR = 9, 84 VHOST_USER_SET_VRING_BASE = 10, 85 VHOST_USER_GET_VRING_BASE = 11, 86 VHOST_USER_SET_VRING_KICK = 12, 87 VHOST_USER_SET_VRING_CALL = 13, 88 VHOST_USER_SET_VRING_ERR = 14, 89 VHOST_USER_GET_PROTOCOL_FEATURES = 15, 90 VHOST_USER_SET_PROTOCOL_FEATURES = 16, 91 VHOST_USER_GET_QUEUE_NUM = 17, 92 VHOST_USER_SET_VRING_ENABLE = 18, 93 VHOST_USER_SEND_RARP = 19, 94 VHOST_USER_NET_SET_MTU = 20, 95 VHOST_USER_SET_BACKEND_REQ_FD = 21, 96 VHOST_USER_IOTLB_MSG = 22, 97 VHOST_USER_SET_VRING_ENDIAN = 23, 98 VHOST_USER_GET_CONFIG = 24, 99 VHOST_USER_SET_CONFIG = 25, 100 VHOST_USER_CREATE_CRYPTO_SESSION = 26, 101 VHOST_USER_CLOSE_CRYPTO_SESSION = 27, 102 VHOST_USER_POSTCOPY_ADVISE = 28, 103 VHOST_USER_POSTCOPY_LISTEN = 29, 104 VHOST_USER_POSTCOPY_END = 30, 105 VHOST_USER_GET_INFLIGHT_FD = 31, 106 VHOST_USER_SET_INFLIGHT_FD = 32, 107 VHOST_USER_GPU_SET_SOCKET = 33, 108 VHOST_USER_VRING_KICK = 35, 109 VHOST_USER_GET_MAX_MEM_SLOTS = 36, 110 VHOST_USER_ADD_MEM_REG = 37, 111 VHOST_USER_REM_MEM_REG = 38, 112 VHOST_USER_MAX 113 } VhostUserRequest; 114 115 typedef enum VhostUserBackendRequest { 116 VHOST_USER_BACKEND_NONE = 0, 117 VHOST_USER_BACKEND_IOTLB_MSG = 1, 118 VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2, 119 VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3, 120 VHOST_USER_BACKEND_VRING_CALL = 4, 121 VHOST_USER_BACKEND_VRING_ERR = 5, 122 VHOST_USER_BACKEND_MAX 123 } VhostUserBackendRequest; 124 125 typedef struct VhostUserMemoryRegion { 126 uint64_t guest_phys_addr; 127 uint64_t memory_size; 128 uint64_t userspace_addr; 129 uint64_t mmap_offset; 130 } VhostUserMemoryRegion; 131 132 #define VHOST_USER_MEM_REG_SIZE (sizeof(VhostUserMemoryRegion)) 133 134 typedef struct VhostUserMemory { 135 uint32_t nregions; 136 uint32_t padding; 137 VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS]; 138 } VhostUserMemory; 139 140 typedef struct VhostUserMemRegMsg { 141 uint64_t padding; 142 VhostUserMemoryRegion region; 143 } VhostUserMemRegMsg; 144 145 typedef struct VhostUserLog { 146 uint64_t mmap_size; 147 uint64_t mmap_offset; 148 } VhostUserLog; 149 150 typedef struct VhostUserConfig { 151 uint32_t offset; 152 uint32_t size; 153 uint32_t flags; 154 uint8_t region[VHOST_USER_MAX_CONFIG_SIZE]; 155 } VhostUserConfig; 156 157 static VhostUserConfig c __attribute__ ((unused)); 158 #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \ 159 + sizeof(c.size) \ 160 + sizeof(c.flags)) 161 162 typedef struct VhostUserVringArea { 163 uint64_t u64; 164 uint64_t size; 165 uint64_t offset; 166 } VhostUserVringArea; 167 168 typedef struct VhostUserInflight { 169 uint64_t mmap_size; 170 uint64_t mmap_offset; 171 uint16_t num_queues; 172 uint16_t queue_size; 173 } VhostUserInflight; 174 175 #if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__)) 176 # define VU_PACKED __attribute__((gcc_struct, packed)) 177 #else 178 # define VU_PACKED __attribute__((packed)) 179 #endif 180 181 typedef struct VhostUserMsg { 182 int request; 183 184 #define VHOST_USER_VERSION_MASK (0x3) 185 #define VHOST_USER_REPLY_MASK (0x1 << 2) 186 #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3) 187 uint32_t flags; 188 uint32_t size; /* the following payload size */ 189 190 union { 191 #define VHOST_USER_VRING_IDX_MASK (0xff) 192 #define VHOST_USER_VRING_NOFD_MASK (0x1 << 8) 193 uint64_t u64; 194 struct vhost_vring_state state; 195 struct vhost_vring_addr addr; 196 VhostUserMemory memory; 197 VhostUserMemRegMsg memreg; 198 VhostUserLog log; 199 VhostUserConfig config; 200 VhostUserVringArea area; 201 VhostUserInflight inflight; 202 } payload; 203 204 int fds[VHOST_MEMORY_BASELINE_NREGIONS]; 205 int fd_num; 206 uint8_t *data; 207 } VU_PACKED VhostUserMsg; 208 209 typedef struct VuDevRegion { 210 /* Guest Physical address. */ 211 uint64_t gpa; 212 /* Memory region size. */ 213 uint64_t size; 214 /* QEMU virtual address (userspace). */ 215 uint64_t qva; 216 /* Starting offset in our mmaped space. */ 217 uint64_t mmap_offset; 218 /* Start address of mmaped space. */ 219 uint64_t mmap_addr; 220 } VuDevRegion; 221 222 typedef struct VuDev VuDev; 223 224 typedef uint64_t (*vu_get_features_cb) (VuDev *dev); 225 typedef void (*vu_set_features_cb) (VuDev *dev, uint64_t features); 226 typedef int (*vu_process_msg_cb) (VuDev *dev, VhostUserMsg *vmsg, 227 int *do_reply); 228 typedef bool (*vu_read_msg_cb) (VuDev *dev, int sock, VhostUserMsg *vmsg); 229 typedef void (*vu_queue_set_started_cb) (VuDev *dev, int qidx, bool started); 230 typedef bool (*vu_queue_is_processed_in_order_cb) (VuDev *dev, int qidx); 231 typedef int (*vu_get_config_cb) (VuDev *dev, uint8_t *config, uint32_t len); 232 typedef int (*vu_set_config_cb) (VuDev *dev, const uint8_t *data, 233 uint32_t offset, uint32_t size, 234 uint32_t flags); 235 236 typedef struct VuDevIface { 237 /* called by VHOST_USER_GET_FEATURES to get the features bitmask */ 238 vu_get_features_cb get_features; 239 /* enable vhost implementation features */ 240 vu_set_features_cb set_features; 241 /* get the protocol feature bitmask from the underlying vhost 242 * implementation */ 243 vu_get_features_cb get_protocol_features; 244 /* enable protocol features in the underlying vhost implementation. */ 245 vu_set_features_cb set_protocol_features; 246 /* process_msg is called for each vhost-user message received */ 247 /* skip libvhost-user processing if return value != 0 */ 248 vu_process_msg_cb process_msg; 249 /* tells when queues can be processed */ 250 vu_queue_set_started_cb queue_set_started; 251 /* 252 * If the queue is processed in order, in which case it will be 253 * resumed to vring.used->idx. This can help to support resuming 254 * on unmanaged exit/crash. 255 */ 256 vu_queue_is_processed_in_order_cb queue_is_processed_in_order; 257 /* get the config space of the device */ 258 vu_get_config_cb get_config; 259 /* set the config space of the device */ 260 vu_set_config_cb set_config; 261 } VuDevIface; 262 263 typedef void (*vu_queue_handler_cb) (VuDev *dev, int qidx); 264 265 typedef struct VuRing { 266 unsigned int num; 267 struct vring_desc *desc; 268 struct vring_avail *avail; 269 struct vring_used *used; 270 uint64_t log_guest_addr; 271 uint32_t flags; 272 } VuRing; 273 274 typedef struct VuDescStateSplit { 275 /* Indicate whether this descriptor is inflight or not. 276 * Only available for head-descriptor. */ 277 uint8_t inflight; 278 279 /* Padding */ 280 uint8_t padding[5]; 281 282 /* Maintain a list for the last batch of used descriptors. 283 * Only available when batching is used for submitting */ 284 uint16_t next; 285 286 /* Used to preserve the order of fetching available descriptors. 287 * Only available for head-descriptor. */ 288 uint64_t counter; 289 } VuDescStateSplit; 290 291 typedef struct VuVirtqInflight { 292 /* The feature flags of this region. Now it's initialized to 0. */ 293 uint64_t features; 294 295 /* The version of this region. It's 1 currently. 296 * Zero value indicates a vm reset happened. */ 297 uint16_t version; 298 299 /* 300 * The size of VuDescStateSplit array. It's equal to the virtqueue size. 301 * Backend could get it from queue size field of VhostUserInflight. 302 */ 303 uint16_t desc_num; 304 305 /* The head of list that track the last batch of used descriptors. */ 306 uint16_t last_batch_head; 307 308 /* Storing the idx value of used ring */ 309 uint16_t used_idx; 310 311 /* Used to track the state of each descriptor in descriptor table */ 312 VuDescStateSplit desc[]; 313 } VuVirtqInflight; 314 315 typedef struct VuVirtqInflightDesc { 316 uint16_t index; 317 uint64_t counter; 318 } VuVirtqInflightDesc; 319 320 typedef struct VuVirtq { 321 VuRing vring; 322 323 VuVirtqInflight *inflight; 324 325 VuVirtqInflightDesc *resubmit_list; 326 327 uint16_t resubmit_num; 328 329 uint64_t counter; 330 331 /* Next head to pop */ 332 uint16_t last_avail_idx; 333 334 /* Last avail_idx read from VQ. */ 335 uint16_t shadow_avail_idx; 336 337 uint16_t used_idx; 338 339 /* Last used index value we have signalled on */ 340 uint16_t signalled_used; 341 342 /* Last used index value we have signalled on */ 343 bool signalled_used_valid; 344 345 /* Notification enabled? */ 346 bool notification; 347 348 unsigned int inuse; 349 350 vu_queue_handler_cb handler; 351 352 int call_fd; 353 int kick_fd; 354 int err_fd; 355 unsigned int enable; 356 bool started; 357 358 /* Guest addresses of our ring */ 359 struct vhost_vring_addr vra; 360 } VuVirtq; 361 362 enum VuWatchCondtion { 363 VU_WATCH_IN = POLLIN, 364 VU_WATCH_OUT = POLLOUT, 365 VU_WATCH_PRI = POLLPRI, 366 VU_WATCH_ERR = POLLERR, 367 VU_WATCH_HUP = POLLHUP, 368 }; 369 370 typedef void (*vu_panic_cb) (VuDev *dev, const char *err); 371 typedef void (*vu_watch_cb) (VuDev *dev, int condition, void *data); 372 typedef void (*vu_set_watch_cb) (VuDev *dev, int fd, int condition, 373 vu_watch_cb cb, void *data); 374 typedef void (*vu_remove_watch_cb) (VuDev *dev, int fd); 375 376 typedef struct VuDevInflightInfo { 377 int fd; 378 void *addr; 379 uint64_t size; 380 } VuDevInflightInfo; 381 382 struct VuDev { 383 int sock; 384 uint32_t nregions; 385 VuDevRegion regions[VHOST_USER_MAX_RAM_SLOTS]; 386 VuVirtq *vq; 387 VuDevInflightInfo inflight_info; 388 int log_call_fd; 389 /* Must be held while using backend_fd */ 390 pthread_mutex_t backend_mutex; 391 int backend_fd; 392 uint64_t log_size; 393 uint8_t *log_table; 394 uint64_t features; 395 uint64_t protocol_features; 396 bool broken; 397 uint16_t max_queues; 398 399 /* 400 * @read_msg: custom method to read vhost-user message 401 * 402 * Read data from vhost_user socket fd and fill up 403 * the passed VhostUserMsg *vmsg struct. 404 * 405 * If reading fails, it should close the received set of file 406 * descriptors as socket message's auxiliary data. 407 * 408 * For the details, please refer to vu_message_read in libvhost-user.c 409 * which will be used by default if not custom method is provided when 410 * calling vu_init 411 * 412 * Returns: true if vhost-user message successfully received, 413 * otherwise return false. 414 * 415 */ 416 vu_read_msg_cb read_msg; 417 418 /* 419 * @set_watch: add or update the given fd to the watch set, 420 * call cb when condition is met. 421 */ 422 vu_set_watch_cb set_watch; 423 424 /* @remove_watch: remove the given fd from the watch set */ 425 vu_remove_watch_cb remove_watch; 426 427 /* 428 * @panic: encountered an unrecoverable error, you may try to re-initialize 429 */ 430 vu_panic_cb panic; 431 const VuDevIface *iface; 432 433 /* Postcopy data */ 434 int postcopy_ufd; 435 bool postcopy_listening; 436 }; 437 438 typedef struct VuVirtqElement { 439 unsigned int index; 440 unsigned int out_num; 441 unsigned int in_num; 442 struct iovec *in_sg; 443 struct iovec *out_sg; 444 } VuVirtqElement; 445 446 /** 447 * vu_init: 448 * @dev: a VuDev context 449 * @max_queues: maximum number of virtqueues 450 * @socket: the socket connected to vhost-user frontend 451 * @panic: a panic callback 452 * @set_watch: a set_watch callback 453 * @remove_watch: a remove_watch callback 454 * @iface: a VuDevIface structure with vhost-user device callbacks 455 * 456 * Initializes a VuDev vhost-user context. 457 * 458 * Returns: true on success, false on failure. 459 **/ 460 bool vu_init(VuDev *dev, 461 uint16_t max_queues, 462 int socket, 463 vu_panic_cb panic, 464 vu_read_msg_cb read_msg, 465 vu_set_watch_cb set_watch, 466 vu_remove_watch_cb remove_watch, 467 const VuDevIface *iface); 468 469 470 /** 471 * vu_deinit: 472 * @dev: a VuDev context 473 * 474 * Cleans up the VuDev context 475 */ 476 void vu_deinit(VuDev *dev); 477 478 479 /** 480 * vu_request_to_string: return string for vhost message request 481 * @req: VhostUserMsg request 482 * 483 * Returns a const string, do not free. 484 */ 485 const char *vu_request_to_string(unsigned int req); 486 487 /** 488 * vu_dispatch: 489 * @dev: a VuDev context 490 * 491 * Process one vhost-user message. 492 * 493 * Returns: TRUE on success, FALSE on failure. 494 */ 495 bool vu_dispatch(VuDev *dev); 496 497 /** 498 * vu_gpa_to_va: 499 * @dev: a VuDev context 500 * @plen: guest memory size 501 * @guest_addr: guest address 502 * 503 * Translate a guest address to a pointer. Returns NULL on failure. 504 */ 505 void *vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr); 506 507 /** 508 * vu_get_queue: 509 * @dev: a VuDev context 510 * @qidx: queue index 511 * 512 * Returns the queue number @qidx. 513 */ 514 VuVirtq *vu_get_queue(VuDev *dev, int qidx); 515 516 /** 517 * vu_set_queue_handler: 518 * @dev: a VuDev context 519 * @vq: a VuVirtq queue 520 * @handler: the queue handler callback 521 * 522 * Set the queue handler. This function may be called several times 523 * for the same queue. If called with NULL @handler, the handler is 524 * removed. 525 */ 526 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq, 527 vu_queue_handler_cb handler); 528 529 /** 530 * vu_set_queue_host_notifier: 531 * @dev: a VuDev context 532 * @vq: a VuVirtq queue 533 * @fd: a file descriptor 534 * @size: host page size 535 * @offset: notifier offset in @fd file 536 * 537 * Set queue's host notifier. This function may be called several 538 * times for the same queue. If called with -1 @fd, the notifier 539 * is removed. 540 */ 541 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, 542 int size, int offset); 543 544 /** 545 * vu_queue_set_notification: 546 * @dev: a VuDev context 547 * @vq: a VuVirtq queue 548 * @enable: state 549 * 550 * Set whether the queue notifies (via event index or interrupt) 551 */ 552 void vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable); 553 554 /** 555 * vu_queue_enabled: 556 * @dev: a VuDev context 557 * @vq: a VuVirtq queue 558 * 559 * Returns: whether the queue is enabled. 560 */ 561 bool vu_queue_enabled(VuDev *dev, VuVirtq *vq); 562 563 /** 564 * vu_queue_started: 565 * @dev: a VuDev context 566 * @vq: a VuVirtq queue 567 * 568 * Returns: whether the queue is started. 569 */ 570 bool vu_queue_started(const VuDev *dev, const VuVirtq *vq); 571 572 /** 573 * vu_queue_empty: 574 * @dev: a VuDev context 575 * @vq: a VuVirtq queue 576 * 577 * Returns: true if the queue is empty or not ready. 578 */ 579 bool vu_queue_empty(VuDev *dev, VuVirtq *vq); 580 581 /** 582 * vu_queue_notify: 583 * @dev: a VuDev context 584 * @vq: a VuVirtq queue 585 * 586 * Request to notify the queue via callfd (skipped if unnecessary) 587 */ 588 void vu_queue_notify(VuDev *dev, VuVirtq *vq); 589 590 void vu_config_change_msg(VuDev *dev); 591 592 /** 593 * vu_queue_notify_sync: 594 * @dev: a VuDev context 595 * @vq: a VuVirtq queue 596 * 597 * Request to notify the queue via callfd (skipped if unnecessary) 598 * or sync message if possible. 599 */ 600 void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq); 601 602 /** 603 * vu_queue_pop: 604 * @dev: a VuDev context 605 * @vq: a VuVirtq queue 606 * @sz: the size of struct to return (must be >= VuVirtqElement) 607 * 608 * Returns: a VuVirtqElement filled from the queue or NULL. The 609 * returned element must be free()-d by the caller. 610 */ 611 void *vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz); 612 613 614 /** 615 * vu_queue_unpop: 616 * @dev: a VuDev context 617 * @vq: a VuVirtq queue 618 * @elem: The #VuVirtqElement 619 * @len: number of bytes written 620 * 621 * Pretend the most recent element wasn't popped from the virtqueue. The next 622 * call to vu_queue_pop() will refetch the element. 623 */ 624 void vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, 625 size_t len); 626 627 /** 628 * vu_queue_rewind: 629 * @dev: a VuDev context 630 * @vq: a VuVirtq queue 631 * @num: number of elements to push back 632 * 633 * Pretend that elements weren't popped from the virtqueue. The next 634 * virtqueue_pop() will refetch the oldest element. 635 * 636 * Returns: true on success, false if @num is greater than the number of in use 637 * elements. 638 */ 639 bool vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num); 640 641 /** 642 * vu_queue_fill: 643 * @dev: a VuDev context 644 * @vq: a VuVirtq queue 645 * @elem: a VuVirtqElement 646 * @len: length in bytes to write 647 * @idx: optional offset for the used ring index (0 in general) 648 * 649 * Fill the used ring with @elem element. 650 */ 651 void vu_queue_fill(VuDev *dev, VuVirtq *vq, 652 const VuVirtqElement *elem, 653 unsigned int len, unsigned int idx); 654 655 /** 656 * vu_queue_push: 657 * @dev: a VuDev context 658 * @vq: a VuVirtq queue 659 * @elem: a VuVirtqElement 660 * @len: length in bytes to write 661 * 662 * Helper that combines vu_queue_fill() with a vu_queue_flush(). 663 */ 664 void vu_queue_push(VuDev *dev, VuVirtq *vq, 665 const VuVirtqElement *elem, unsigned int len); 666 667 /** 668 * vu_queue_flush: 669 * @dev: a VuDev context 670 * @vq: a VuVirtq queue 671 * @num: number of elements to flush 672 * 673 * Mark the last number of elements as done (used.idx is updated by 674 * num elements). 675 */ 676 void vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int num); 677 678 /** 679 * vu_queue_get_avail_bytes: 680 * @dev: a VuDev context 681 * @vq: a VuVirtq queue 682 * @in_bytes: in bytes 683 * @out_bytes: out bytes 684 * @max_in_bytes: stop counting after max_in_bytes 685 * @max_out_bytes: stop counting after max_out_bytes 686 * 687 * Count the number of available bytes, up to max_in_bytes/max_out_bytes. 688 */ 689 void vu_queue_get_avail_bytes(VuDev *vdev, VuVirtq *vq, unsigned int *in_bytes, 690 unsigned int *out_bytes, 691 unsigned max_in_bytes, unsigned max_out_bytes); 692 693 /** 694 * vu_queue_avail_bytes: 695 * @dev: a VuDev context 696 * @vq: a VuVirtq queue 697 * @in_bytes: expected in bytes 698 * @out_bytes: expected out bytes 699 * 700 * Returns: true if in_bytes <= in_total && out_bytes <= out_total 701 */ 702 bool vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes, 703 unsigned int out_bytes); 704 705 #endif /* LIBVHOST_USER_H */ 706