xref: /openbmc/qemu/hw/virtio/vhost-user.c (revision 4e6b1384)
1 /*
2  * vhost-user
3  *
4  * Copyright (c) 2013 Virtual Open Systems Sarl.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  *
9  */
10 
11 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "hw/virtio/vhost.h"
14 #include "hw/virtio/vhost-user.h"
15 #include "hw/virtio/vhost-backend.h"
16 #include "hw/virtio/virtio.h"
17 #include "hw/virtio/virtio-net.h"
18 #include "chardev/char-fe.h"
19 #include "sysemu/kvm.h"
20 #include "qemu/error-report.h"
21 #include "qemu/main-loop.h"
22 #include "qemu/sockets.h"
23 #include "sysemu/cryptodev.h"
24 #include "migration/migration.h"
25 #include "migration/postcopy-ram.h"
26 #include "trace.h"
27 
28 #include <sys/ioctl.h>
29 #include <sys/socket.h>
30 #include <sys/un.h>
31 
32 #include "standard-headers/linux/vhost_types.h"
33 
34 #ifdef CONFIG_LINUX
35 #include <linux/userfaultfd.h>
36 #endif
37 
38 #define VHOST_MEMORY_MAX_NREGIONS    8
39 #define VHOST_USER_F_PROTOCOL_FEATURES 30
40 #define VHOST_USER_SLAVE_MAX_FDS     8
41 
42 /*
43  * Maximum size of virtio device config space
44  */
45 #define VHOST_USER_MAX_CONFIG_SIZE 256
46 
47 enum VhostUserProtocolFeature {
48     VHOST_USER_PROTOCOL_F_MQ = 0,
49     VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
50     VHOST_USER_PROTOCOL_F_RARP = 2,
51     VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
52     VHOST_USER_PROTOCOL_F_NET_MTU = 4,
53     VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
54     VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
55     VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
56     VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
57     VHOST_USER_PROTOCOL_F_CONFIG = 9,
58     VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
59     VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
60     VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
61     VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
62     VHOST_USER_PROTOCOL_F_MAX
63 };
64 
65 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
66 
67 typedef enum VhostUserRequest {
68     VHOST_USER_NONE = 0,
69     VHOST_USER_GET_FEATURES = 1,
70     VHOST_USER_SET_FEATURES = 2,
71     VHOST_USER_SET_OWNER = 3,
72     VHOST_USER_RESET_OWNER = 4,
73     VHOST_USER_SET_MEM_TABLE = 5,
74     VHOST_USER_SET_LOG_BASE = 6,
75     VHOST_USER_SET_LOG_FD = 7,
76     VHOST_USER_SET_VRING_NUM = 8,
77     VHOST_USER_SET_VRING_ADDR = 9,
78     VHOST_USER_SET_VRING_BASE = 10,
79     VHOST_USER_GET_VRING_BASE = 11,
80     VHOST_USER_SET_VRING_KICK = 12,
81     VHOST_USER_SET_VRING_CALL = 13,
82     VHOST_USER_SET_VRING_ERR = 14,
83     VHOST_USER_GET_PROTOCOL_FEATURES = 15,
84     VHOST_USER_SET_PROTOCOL_FEATURES = 16,
85     VHOST_USER_GET_QUEUE_NUM = 17,
86     VHOST_USER_SET_VRING_ENABLE = 18,
87     VHOST_USER_SEND_RARP = 19,
88     VHOST_USER_NET_SET_MTU = 20,
89     VHOST_USER_SET_SLAVE_REQ_FD = 21,
90     VHOST_USER_IOTLB_MSG = 22,
91     VHOST_USER_SET_VRING_ENDIAN = 23,
92     VHOST_USER_GET_CONFIG = 24,
93     VHOST_USER_SET_CONFIG = 25,
94     VHOST_USER_CREATE_CRYPTO_SESSION = 26,
95     VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
96     VHOST_USER_POSTCOPY_ADVISE  = 28,
97     VHOST_USER_POSTCOPY_LISTEN  = 29,
98     VHOST_USER_POSTCOPY_END     = 30,
99     VHOST_USER_GET_INFLIGHT_FD = 31,
100     VHOST_USER_SET_INFLIGHT_FD = 32,
101     VHOST_USER_GPU_SET_SOCKET = 33,
102     VHOST_USER_RESET_DEVICE = 34,
103     VHOST_USER_MAX
104 } VhostUserRequest;
105 
106 typedef enum VhostUserSlaveRequest {
107     VHOST_USER_SLAVE_NONE = 0,
108     VHOST_USER_SLAVE_IOTLB_MSG = 1,
109     VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
110     VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
111     VHOST_USER_SLAVE_MAX
112 }  VhostUserSlaveRequest;
113 
114 typedef struct VhostUserMemoryRegion {
115     uint64_t guest_phys_addr;
116     uint64_t memory_size;
117     uint64_t userspace_addr;
118     uint64_t mmap_offset;
119 } VhostUserMemoryRegion;
120 
121 typedef struct VhostUserMemory {
122     uint32_t nregions;
123     uint32_t padding;
124     VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
125 } VhostUserMemory;
126 
127 typedef struct VhostUserLog {
128     uint64_t mmap_size;
129     uint64_t mmap_offset;
130 } VhostUserLog;
131 
132 typedef struct VhostUserConfig {
133     uint32_t offset;
134     uint32_t size;
135     uint32_t flags;
136     uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
137 } VhostUserConfig;
138 
139 #define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN    512
140 #define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN  64
141 
142 typedef struct VhostUserCryptoSession {
143     /* session id for success, -1 on errors */
144     int64_t session_id;
145     CryptoDevBackendSymSessionInfo session_setup_data;
146     uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
147     uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
148 } VhostUserCryptoSession;
149 
150 static VhostUserConfig c __attribute__ ((unused));
151 #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
152                                    + sizeof(c.size) \
153                                    + sizeof(c.flags))
154 
155 typedef struct VhostUserVringArea {
156     uint64_t u64;
157     uint64_t size;
158     uint64_t offset;
159 } VhostUserVringArea;
160 
161 typedef struct VhostUserInflight {
162     uint64_t mmap_size;
163     uint64_t mmap_offset;
164     uint16_t num_queues;
165     uint16_t queue_size;
166 } VhostUserInflight;
167 
168 typedef struct {
169     VhostUserRequest request;
170 
171 #define VHOST_USER_VERSION_MASK     (0x3)
172 #define VHOST_USER_REPLY_MASK       (0x1<<2)
173 #define VHOST_USER_NEED_REPLY_MASK  (0x1 << 3)
174     uint32_t flags;
175     uint32_t size; /* the following payload size */
176 } QEMU_PACKED VhostUserHeader;
177 
178 typedef union {
179 #define VHOST_USER_VRING_IDX_MASK   (0xff)
180 #define VHOST_USER_VRING_NOFD_MASK  (0x1<<8)
181         uint64_t u64;
182         struct vhost_vring_state state;
183         struct vhost_vring_addr addr;
184         VhostUserMemory memory;
185         VhostUserLog log;
186         struct vhost_iotlb_msg iotlb;
187         VhostUserConfig config;
188         VhostUserCryptoSession session;
189         VhostUserVringArea area;
190         VhostUserInflight inflight;
191 } VhostUserPayload;
192 
193 typedef struct VhostUserMsg {
194     VhostUserHeader hdr;
195     VhostUserPayload payload;
196 } QEMU_PACKED VhostUserMsg;
197 
198 static VhostUserMsg m __attribute__ ((unused));
199 #define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
200 
201 #define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
202 
203 /* The version of the protocol we support */
204 #define VHOST_USER_VERSION    (0x1)
205 
206 struct vhost_user {
207     struct vhost_dev *dev;
208     /* Shared between vhost devs of the same virtio device */
209     VhostUserState *user;
210     int slave_fd;
211     NotifierWithReturn postcopy_notifier;
212     struct PostCopyFD  postcopy_fd;
213     uint64_t           postcopy_client_bases[VHOST_MEMORY_MAX_NREGIONS];
214     /* Length of the region_rb and region_rb_offset arrays */
215     size_t             region_rb_len;
216     /* RAMBlock associated with a given region */
217     RAMBlock         **region_rb;
218     /* The offset from the start of the RAMBlock to the start of the
219      * vhost region.
220      */
221     ram_addr_t        *region_rb_offset;
222 
223     /* True once we've entered postcopy_listen */
224     bool               postcopy_listen;
225 };
226 
227 static bool ioeventfd_enabled(void)
228 {
229     return !kvm_enabled() || kvm_eventfds_enabled();
230 }
231 
232 static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
233 {
234     struct vhost_user *u = dev->opaque;
235     CharBackend *chr = u->user->chr;
236     uint8_t *p = (uint8_t *) msg;
237     int r, size = VHOST_USER_HDR_SIZE;
238 
239     r = qemu_chr_fe_read_all(chr, p, size);
240     if (r != size) {
241         error_report("Failed to read msg header. Read %d instead of %d."
242                      " Original request %d.", r, size, msg->hdr.request);
243         return -1;
244     }
245 
246     /* validate received flags */
247     if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
248         error_report("Failed to read msg header."
249                 " Flags 0x%x instead of 0x%x.", msg->hdr.flags,
250                 VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
251         return -1;
252     }
253 
254     return 0;
255 }
256 
257 static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
258 {
259     struct vhost_user *u = dev->opaque;
260     CharBackend *chr = u->user->chr;
261     uint8_t *p = (uint8_t *) msg;
262     int r, size;
263 
264     if (vhost_user_read_header(dev, msg) < 0) {
265         return -1;
266     }
267 
268     /* validate message size is sane */
269     if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) {
270         error_report("Failed to read msg header."
271                 " Size %d exceeds the maximum %zu.", msg->hdr.size,
272                 VHOST_USER_PAYLOAD_SIZE);
273         return -1;
274     }
275 
276     if (msg->hdr.size) {
277         p += VHOST_USER_HDR_SIZE;
278         size = msg->hdr.size;
279         r = qemu_chr_fe_read_all(chr, p, size);
280         if (r != size) {
281             error_report("Failed to read msg payload."
282                          " Read %d instead of %d.", r, msg->hdr.size);
283             return -1;
284         }
285     }
286 
287     return 0;
288 }
289 
290 static int process_message_reply(struct vhost_dev *dev,
291                                  const VhostUserMsg *msg)
292 {
293     VhostUserMsg msg_reply;
294 
295     if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
296         return 0;
297     }
298 
299     if (vhost_user_read(dev, &msg_reply) < 0) {
300         return -1;
301     }
302 
303     if (msg_reply.hdr.request != msg->hdr.request) {
304         error_report("Received unexpected msg type."
305                      "Expected %d received %d",
306                      msg->hdr.request, msg_reply.hdr.request);
307         return -1;
308     }
309 
310     return msg_reply.payload.u64 ? -1 : 0;
311 }
312 
313 static bool vhost_user_one_time_request(VhostUserRequest request)
314 {
315     switch (request) {
316     case VHOST_USER_SET_OWNER:
317     case VHOST_USER_RESET_OWNER:
318     case VHOST_USER_SET_MEM_TABLE:
319     case VHOST_USER_GET_QUEUE_NUM:
320     case VHOST_USER_NET_SET_MTU:
321         return true;
322     default:
323         return false;
324     }
325 }
326 
327 /* most non-init callers ignore the error */
328 static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
329                             int *fds, int fd_num)
330 {
331     struct vhost_user *u = dev->opaque;
332     CharBackend *chr = u->user->chr;
333     int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
334 
335     /*
336      * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
337      * we just need send it once in the first time. For later such
338      * request, we just ignore it.
339      */
340     if (vhost_user_one_time_request(msg->hdr.request) && dev->vq_index != 0) {
341         msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
342         return 0;
343     }
344 
345     if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
346         error_report("Failed to set msg fds.");
347         return -1;
348     }
349 
350     ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
351     if (ret != size) {
352         error_report("Failed to write msg."
353                      " Wrote %d instead of %d.", ret, size);
354         return -1;
355     }
356 
357     return 0;
358 }
359 
360 int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
361 {
362     VhostUserMsg msg = {
363         .hdr.request = VHOST_USER_GPU_SET_SOCKET,
364         .hdr.flags = VHOST_USER_VERSION,
365     };
366 
367     return vhost_user_write(dev, &msg, &fd, 1);
368 }
369 
370 static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
371                                    struct vhost_log *log)
372 {
373     int fds[VHOST_MEMORY_MAX_NREGIONS];
374     size_t fd_num = 0;
375     bool shmfd = virtio_has_feature(dev->protocol_features,
376                                     VHOST_USER_PROTOCOL_F_LOG_SHMFD);
377     VhostUserMsg msg = {
378         .hdr.request = VHOST_USER_SET_LOG_BASE,
379         .hdr.flags = VHOST_USER_VERSION,
380         .payload.log.mmap_size = log->size * sizeof(*(log->log)),
381         .payload.log.mmap_offset = 0,
382         .hdr.size = sizeof(msg.payload.log),
383     };
384 
385     if (shmfd && log->fd != -1) {
386         fds[fd_num++] = log->fd;
387     }
388 
389     if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
390         return -1;
391     }
392 
393     if (shmfd) {
394         msg.hdr.size = 0;
395         if (vhost_user_read(dev, &msg) < 0) {
396             return -1;
397         }
398 
399         if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
400             error_report("Received unexpected msg type. "
401                          "Expected %d received %d",
402                          VHOST_USER_SET_LOG_BASE, msg.hdr.request);
403             return -1;
404         }
405     }
406 
407     return 0;
408 }
409 
410 static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
411                                              struct vhost_memory *mem)
412 {
413     struct vhost_user *u = dev->opaque;
414     int fds[VHOST_MEMORY_MAX_NREGIONS];
415     int i, fd;
416     size_t fd_num = 0;
417     VhostUserMsg msg_reply;
418     int region_i, msg_i;
419 
420     VhostUserMsg msg = {
421         .hdr.request = VHOST_USER_SET_MEM_TABLE,
422         .hdr.flags = VHOST_USER_VERSION,
423     };
424 
425     if (u->region_rb_len < dev->mem->nregions) {
426         u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
427         u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
428                                       dev->mem->nregions);
429         memset(&(u->region_rb[u->region_rb_len]), '\0',
430                sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
431         memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
432                sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
433         u->region_rb_len = dev->mem->nregions;
434     }
435 
436     for (i = 0; i < dev->mem->nregions; ++i) {
437         struct vhost_memory_region *reg = dev->mem->regions + i;
438         ram_addr_t offset;
439         MemoryRegion *mr;
440 
441         assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
442         mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
443                                      &offset);
444         fd = memory_region_get_fd(mr);
445         if (fd > 0) {
446             trace_vhost_user_set_mem_table_withfd(fd_num, mr->name,
447                                                   reg->memory_size,
448                                                   reg->guest_phys_addr,
449                                                   reg->userspace_addr, offset);
450             u->region_rb_offset[i] = offset;
451             u->region_rb[i] = mr->ram_block;
452             msg.payload.memory.regions[fd_num].userspace_addr =
453                 reg->userspace_addr;
454             msg.payload.memory.regions[fd_num].memory_size  = reg->memory_size;
455             msg.payload.memory.regions[fd_num].guest_phys_addr =
456                 reg->guest_phys_addr;
457             msg.payload.memory.regions[fd_num].mmap_offset = offset;
458             assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
459             fds[fd_num++] = fd;
460         } else {
461             u->region_rb_offset[i] = 0;
462             u->region_rb[i] = NULL;
463         }
464     }
465 
466     msg.payload.memory.nregions = fd_num;
467 
468     if (!fd_num) {
469         error_report("Failed initializing vhost-user memory map, "
470                      "consider using -object memory-backend-file share=on");
471         return -1;
472     }
473 
474     msg.hdr.size = sizeof(msg.payload.memory.nregions);
475     msg.hdr.size += sizeof(msg.payload.memory.padding);
476     msg.hdr.size += fd_num * sizeof(VhostUserMemoryRegion);
477 
478     if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
479         return -1;
480     }
481 
482     if (vhost_user_read(dev, &msg_reply) < 0) {
483         return -1;
484     }
485 
486     if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
487         error_report("%s: Received unexpected msg type."
488                      "Expected %d received %d", __func__,
489                      VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
490         return -1;
491     }
492     /* We're using the same structure, just reusing one of the
493      * fields, so it should be the same size.
494      */
495     if (msg_reply.hdr.size != msg.hdr.size) {
496         error_report("%s: Unexpected size for postcopy reply "
497                      "%d vs %d", __func__, msg_reply.hdr.size, msg.hdr.size);
498         return -1;
499     }
500 
501     memset(u->postcopy_client_bases, 0,
502            sizeof(uint64_t) * VHOST_MEMORY_MAX_NREGIONS);
503 
504     /* They're in the same order as the regions that were sent
505      * but some of the regions were skipped (above) if they
506      * didn't have fd's
507     */
508     for (msg_i = 0, region_i = 0;
509          region_i < dev->mem->nregions;
510         region_i++) {
511         if (msg_i < fd_num &&
512             msg_reply.payload.memory.regions[msg_i].guest_phys_addr ==
513             dev->mem->regions[region_i].guest_phys_addr) {
514             u->postcopy_client_bases[region_i] =
515                 msg_reply.payload.memory.regions[msg_i].userspace_addr;
516             trace_vhost_user_set_mem_table_postcopy(
517                 msg_reply.payload.memory.regions[msg_i].userspace_addr,
518                 msg.payload.memory.regions[msg_i].userspace_addr,
519                 msg_i, region_i);
520             msg_i++;
521         }
522     }
523     if (msg_i != fd_num) {
524         error_report("%s: postcopy reply not fully consumed "
525                      "%d vs %zd",
526                      __func__, msg_i, fd_num);
527         return -1;
528     }
529     /* Now we've registered this with the postcopy code, we ack to the client,
530      * because now we're in the position to be able to deal with any faults
531      * it generates.
532      */
533     /* TODO: Use this for failure cases as well with a bad value */
534     msg.hdr.size = sizeof(msg.payload.u64);
535     msg.payload.u64 = 0; /* OK */
536     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
537         return -1;
538     }
539 
540     return 0;
541 }
542 
543 static int vhost_user_set_mem_table(struct vhost_dev *dev,
544                                     struct vhost_memory *mem)
545 {
546     struct vhost_user *u = dev->opaque;
547     int fds[VHOST_MEMORY_MAX_NREGIONS];
548     int i, fd;
549     size_t fd_num = 0;
550     bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
551     bool reply_supported = virtio_has_feature(dev->protocol_features,
552                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
553 
554     if (do_postcopy) {
555         /* Postcopy has enough differences that it's best done in it's own
556          * version
557          */
558         return vhost_user_set_mem_table_postcopy(dev, mem);
559     }
560 
561     VhostUserMsg msg = {
562         .hdr.request = VHOST_USER_SET_MEM_TABLE,
563         .hdr.flags = VHOST_USER_VERSION,
564     };
565 
566     if (reply_supported) {
567         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
568     }
569 
570     for (i = 0; i < dev->mem->nregions; ++i) {
571         struct vhost_memory_region *reg = dev->mem->regions + i;
572         ram_addr_t offset;
573         MemoryRegion *mr;
574 
575         assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
576         mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
577                                      &offset);
578         fd = memory_region_get_fd(mr);
579         if (fd > 0) {
580             if (fd_num == VHOST_MEMORY_MAX_NREGIONS) {
581                 error_report("Failed preparing vhost-user memory table msg");
582                 return -1;
583             }
584             msg.payload.memory.regions[fd_num].userspace_addr =
585                 reg->userspace_addr;
586             msg.payload.memory.regions[fd_num].memory_size  = reg->memory_size;
587             msg.payload.memory.regions[fd_num].guest_phys_addr =
588                 reg->guest_phys_addr;
589             msg.payload.memory.regions[fd_num].mmap_offset = offset;
590             fds[fd_num++] = fd;
591         }
592     }
593 
594     msg.payload.memory.nregions = fd_num;
595 
596     if (!fd_num) {
597         error_report("Failed initializing vhost-user memory map, "
598                      "consider using -object memory-backend-file share=on");
599         return -1;
600     }
601 
602     msg.hdr.size = sizeof(msg.payload.memory.nregions);
603     msg.hdr.size += sizeof(msg.payload.memory.padding);
604     msg.hdr.size += fd_num * sizeof(VhostUserMemoryRegion);
605 
606     if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
607         return -1;
608     }
609 
610     if (reply_supported) {
611         return process_message_reply(dev, &msg);
612     }
613 
614     return 0;
615 }
616 
617 static int vhost_user_set_vring_addr(struct vhost_dev *dev,
618                                      struct vhost_vring_addr *addr)
619 {
620     VhostUserMsg msg = {
621         .hdr.request = VHOST_USER_SET_VRING_ADDR,
622         .hdr.flags = VHOST_USER_VERSION,
623         .payload.addr = *addr,
624         .hdr.size = sizeof(msg.payload.addr),
625     };
626 
627     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
628         return -1;
629     }
630 
631     return 0;
632 }
633 
634 static int vhost_user_set_vring_endian(struct vhost_dev *dev,
635                                        struct vhost_vring_state *ring)
636 {
637     bool cross_endian = virtio_has_feature(dev->protocol_features,
638                                            VHOST_USER_PROTOCOL_F_CROSS_ENDIAN);
639     VhostUserMsg msg = {
640         .hdr.request = VHOST_USER_SET_VRING_ENDIAN,
641         .hdr.flags = VHOST_USER_VERSION,
642         .payload.state = *ring,
643         .hdr.size = sizeof(msg.payload.state),
644     };
645 
646     if (!cross_endian) {
647         error_report("vhost-user trying to send unhandled ioctl");
648         return -1;
649     }
650 
651     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
652         return -1;
653     }
654 
655     return 0;
656 }
657 
658 static int vhost_set_vring(struct vhost_dev *dev,
659                            unsigned long int request,
660                            struct vhost_vring_state *ring)
661 {
662     VhostUserMsg msg = {
663         .hdr.request = request,
664         .hdr.flags = VHOST_USER_VERSION,
665         .payload.state = *ring,
666         .hdr.size = sizeof(msg.payload.state),
667     };
668 
669     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
670         return -1;
671     }
672 
673     return 0;
674 }
675 
676 static int vhost_user_set_vring_num(struct vhost_dev *dev,
677                                     struct vhost_vring_state *ring)
678 {
679     return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
680 }
681 
682 static void vhost_user_host_notifier_restore(struct vhost_dev *dev,
683                                              int queue_idx)
684 {
685     struct vhost_user *u = dev->opaque;
686     VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
687     VirtIODevice *vdev = dev->vdev;
688 
689     if (n->addr && !n->set) {
690         virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true);
691         n->set = true;
692     }
693 }
694 
695 static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
696                                             int queue_idx)
697 {
698     struct vhost_user *u = dev->opaque;
699     VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
700     VirtIODevice *vdev = dev->vdev;
701 
702     if (n->addr && n->set) {
703         virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
704         n->set = false;
705     }
706 }
707 
708 static int vhost_user_set_vring_base(struct vhost_dev *dev,
709                                      struct vhost_vring_state *ring)
710 {
711     vhost_user_host_notifier_restore(dev, ring->index);
712 
713     return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
714 }
715 
716 static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
717 {
718     int i;
719 
720     if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
721         return -1;
722     }
723 
724     for (i = 0; i < dev->nvqs; ++i) {
725         struct vhost_vring_state state = {
726             .index = dev->vq_index + i,
727             .num   = enable,
728         };
729 
730         vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
731     }
732 
733     return 0;
734 }
735 
736 static int vhost_user_get_vring_base(struct vhost_dev *dev,
737                                      struct vhost_vring_state *ring)
738 {
739     VhostUserMsg msg = {
740         .hdr.request = VHOST_USER_GET_VRING_BASE,
741         .hdr.flags = VHOST_USER_VERSION,
742         .payload.state = *ring,
743         .hdr.size = sizeof(msg.payload.state),
744     };
745 
746     vhost_user_host_notifier_remove(dev, ring->index);
747 
748     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
749         return -1;
750     }
751 
752     if (vhost_user_read(dev, &msg) < 0) {
753         return -1;
754     }
755 
756     if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
757         error_report("Received unexpected msg type. Expected %d received %d",
758                      VHOST_USER_GET_VRING_BASE, msg.hdr.request);
759         return -1;
760     }
761 
762     if (msg.hdr.size != sizeof(msg.payload.state)) {
763         error_report("Received bad msg size.");
764         return -1;
765     }
766 
767     *ring = msg.payload.state;
768 
769     return 0;
770 }
771 
772 static int vhost_set_vring_file(struct vhost_dev *dev,
773                                 VhostUserRequest request,
774                                 struct vhost_vring_file *file)
775 {
776     int fds[VHOST_MEMORY_MAX_NREGIONS];
777     size_t fd_num = 0;
778     VhostUserMsg msg = {
779         .hdr.request = request,
780         .hdr.flags = VHOST_USER_VERSION,
781         .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
782         .hdr.size = sizeof(msg.payload.u64),
783     };
784 
785     if (ioeventfd_enabled() && file->fd > 0) {
786         fds[fd_num++] = file->fd;
787     } else {
788         msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
789     }
790 
791     if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
792         return -1;
793     }
794 
795     return 0;
796 }
797 
798 static int vhost_user_set_vring_kick(struct vhost_dev *dev,
799                                      struct vhost_vring_file *file)
800 {
801     return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
802 }
803 
804 static int vhost_user_set_vring_call(struct vhost_dev *dev,
805                                      struct vhost_vring_file *file)
806 {
807     return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
808 }
809 
810 static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
811 {
812     VhostUserMsg msg = {
813         .hdr.request = request,
814         .hdr.flags = VHOST_USER_VERSION,
815         .payload.u64 = u64,
816         .hdr.size = sizeof(msg.payload.u64),
817     };
818 
819     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
820         return -1;
821     }
822 
823     return 0;
824 }
825 
826 static int vhost_user_set_features(struct vhost_dev *dev,
827                                    uint64_t features)
828 {
829     return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
830 }
831 
832 static int vhost_user_set_protocol_features(struct vhost_dev *dev,
833                                             uint64_t features)
834 {
835     return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
836 }
837 
838 static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
839 {
840     VhostUserMsg msg = {
841         .hdr.request = request,
842         .hdr.flags = VHOST_USER_VERSION,
843     };
844 
845     if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
846         return 0;
847     }
848 
849     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
850         return -1;
851     }
852 
853     if (vhost_user_read(dev, &msg) < 0) {
854         return -1;
855     }
856 
857     if (msg.hdr.request != request) {
858         error_report("Received unexpected msg type. Expected %d received %d",
859                      request, msg.hdr.request);
860         return -1;
861     }
862 
863     if (msg.hdr.size != sizeof(msg.payload.u64)) {
864         error_report("Received bad msg size.");
865         return -1;
866     }
867 
868     *u64 = msg.payload.u64;
869 
870     return 0;
871 }
872 
873 static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
874 {
875     return vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features);
876 }
877 
878 static int vhost_user_set_owner(struct vhost_dev *dev)
879 {
880     VhostUserMsg msg = {
881         .hdr.request = VHOST_USER_SET_OWNER,
882         .hdr.flags = VHOST_USER_VERSION,
883     };
884 
885     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
886         return -1;
887     }
888 
889     return 0;
890 }
891 
892 static int vhost_user_reset_device(struct vhost_dev *dev)
893 {
894     VhostUserMsg msg = {
895         .hdr.flags = VHOST_USER_VERSION,
896     };
897 
898     msg.hdr.request = virtio_has_feature(dev->protocol_features,
899                                          VHOST_USER_PROTOCOL_F_RESET_DEVICE)
900         ? VHOST_USER_RESET_DEVICE
901         : VHOST_USER_RESET_OWNER;
902 
903     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
904         return -1;
905     }
906 
907     return 0;
908 }
909 
910 static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
911 {
912     int ret = -1;
913 
914     if (!dev->config_ops) {
915         return -1;
916     }
917 
918     if (dev->config_ops->vhost_dev_config_notifier) {
919         ret = dev->config_ops->vhost_dev_config_notifier(dev);
920     }
921 
922     return ret;
923 }
924 
925 static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
926                                                        VhostUserVringArea *area,
927                                                        int fd)
928 {
929     int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
930     size_t page_size = qemu_real_host_page_size;
931     struct vhost_user *u = dev->opaque;
932     VhostUserState *user = u->user;
933     VirtIODevice *vdev = dev->vdev;
934     VhostUserHostNotifier *n;
935     void *addr;
936     char *name;
937 
938     if (!virtio_has_feature(dev->protocol_features,
939                             VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
940         vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
941         return -1;
942     }
943 
944     n = &user->notifier[queue_idx];
945 
946     if (n->addr) {
947         virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
948         object_unparent(OBJECT(&n->mr));
949         munmap(n->addr, page_size);
950         n->addr = NULL;
951     }
952 
953     if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
954         return 0;
955     }
956 
957     /* Sanity check. */
958     if (area->size != page_size) {
959         return -1;
960     }
961 
962     addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
963                 fd, area->offset);
964     if (addr == MAP_FAILED) {
965         return -1;
966     }
967 
968     name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
969                            user, queue_idx);
970     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
971                                       page_size, addr);
972     g_free(name);
973 
974     if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
975         munmap(addr, page_size);
976         return -1;
977     }
978 
979     n->addr = addr;
980     n->set = true;
981 
982     return 0;
983 }
984 
985 static void slave_read(void *opaque)
986 {
987     struct vhost_dev *dev = opaque;
988     struct vhost_user *u = dev->opaque;
989     VhostUserHeader hdr = { 0, };
990     VhostUserPayload payload = { 0, };
991     int size, ret = 0;
992     struct iovec iov;
993     struct msghdr msgh;
994     int fd[VHOST_USER_SLAVE_MAX_FDS];
995     char control[CMSG_SPACE(sizeof(fd))];
996     struct cmsghdr *cmsg;
997     int i, fdsize = 0;
998 
999     memset(&msgh, 0, sizeof(msgh));
1000     msgh.msg_iov = &iov;
1001     msgh.msg_iovlen = 1;
1002     msgh.msg_control = control;
1003     msgh.msg_controllen = sizeof(control);
1004 
1005     memset(fd, -1, sizeof(fd));
1006 
1007     /* Read header */
1008     iov.iov_base = &hdr;
1009     iov.iov_len = VHOST_USER_HDR_SIZE;
1010 
1011     do {
1012         size = recvmsg(u->slave_fd, &msgh, 0);
1013     } while (size < 0 && (errno == EINTR || errno == EAGAIN));
1014 
1015     if (size != VHOST_USER_HDR_SIZE) {
1016         error_report("Failed to read from slave.");
1017         goto err;
1018     }
1019 
1020     if (msgh.msg_flags & MSG_CTRUNC) {
1021         error_report("Truncated message.");
1022         goto err;
1023     }
1024 
1025     for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
1026          cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
1027             if (cmsg->cmsg_level == SOL_SOCKET &&
1028                 cmsg->cmsg_type == SCM_RIGHTS) {
1029                     fdsize = cmsg->cmsg_len - CMSG_LEN(0);
1030                     memcpy(fd, CMSG_DATA(cmsg), fdsize);
1031                     break;
1032             }
1033     }
1034 
1035     if (hdr.size > VHOST_USER_PAYLOAD_SIZE) {
1036         error_report("Failed to read msg header."
1037                 " Size %d exceeds the maximum %zu.", hdr.size,
1038                 VHOST_USER_PAYLOAD_SIZE);
1039         goto err;
1040     }
1041 
1042     /* Read payload */
1043     do {
1044         size = read(u->slave_fd, &payload, hdr.size);
1045     } while (size < 0 && (errno == EINTR || errno == EAGAIN));
1046 
1047     if (size != hdr.size) {
1048         error_report("Failed to read payload from slave.");
1049         goto err;
1050     }
1051 
1052     switch (hdr.request) {
1053     case VHOST_USER_SLAVE_IOTLB_MSG:
1054         ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
1055         break;
1056     case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG :
1057         ret = vhost_user_slave_handle_config_change(dev);
1058         break;
1059     case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
1060         ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area,
1061                                                           fd[0]);
1062         break;
1063     default:
1064         error_report("Received unexpected msg type.");
1065         ret = -EINVAL;
1066     }
1067 
1068     /* Close the remaining file descriptors. */
1069     for (i = 0; i < fdsize; i++) {
1070         if (fd[i] != -1) {
1071             close(fd[i]);
1072         }
1073     }
1074 
1075     /*
1076      * REPLY_ACK feature handling. Other reply types has to be managed
1077      * directly in their request handlers.
1078      */
1079     if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
1080         struct iovec iovec[2];
1081 
1082 
1083         hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
1084         hdr.flags |= VHOST_USER_REPLY_MASK;
1085 
1086         payload.u64 = !!ret;
1087         hdr.size = sizeof(payload.u64);
1088 
1089         iovec[0].iov_base = &hdr;
1090         iovec[0].iov_len = VHOST_USER_HDR_SIZE;
1091         iovec[1].iov_base = &payload;
1092         iovec[1].iov_len = hdr.size;
1093 
1094         do {
1095             size = writev(u->slave_fd, iovec, ARRAY_SIZE(iovec));
1096         } while (size < 0 && (errno == EINTR || errno == EAGAIN));
1097 
1098         if (size != VHOST_USER_HDR_SIZE + hdr.size) {
1099             error_report("Failed to send msg reply to slave.");
1100             goto err;
1101         }
1102     }
1103 
1104     return;
1105 
1106 err:
1107     qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
1108     close(u->slave_fd);
1109     u->slave_fd = -1;
1110     for (i = 0; i < fdsize; i++) {
1111         if (fd[i] != -1) {
1112             close(fd[i]);
1113         }
1114     }
1115     return;
1116 }
1117 
1118 static int vhost_setup_slave_channel(struct vhost_dev *dev)
1119 {
1120     VhostUserMsg msg = {
1121         .hdr.request = VHOST_USER_SET_SLAVE_REQ_FD,
1122         .hdr.flags = VHOST_USER_VERSION,
1123     };
1124     struct vhost_user *u = dev->opaque;
1125     int sv[2], ret = 0;
1126     bool reply_supported = virtio_has_feature(dev->protocol_features,
1127                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
1128 
1129     if (!virtio_has_feature(dev->protocol_features,
1130                             VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
1131         return 0;
1132     }
1133 
1134     if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
1135         error_report("socketpair() failed");
1136         return -1;
1137     }
1138 
1139     u->slave_fd = sv[0];
1140     qemu_set_fd_handler(u->slave_fd, slave_read, NULL, dev);
1141 
1142     if (reply_supported) {
1143         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1144     }
1145 
1146     ret = vhost_user_write(dev, &msg, &sv[1], 1);
1147     if (ret) {
1148         goto out;
1149     }
1150 
1151     if (reply_supported) {
1152         ret = process_message_reply(dev, &msg);
1153     }
1154 
1155 out:
1156     close(sv[1]);
1157     if (ret) {
1158         qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
1159         close(u->slave_fd);
1160         u->slave_fd = -1;
1161     }
1162 
1163     return ret;
1164 }
1165 
1166 #ifdef CONFIG_LINUX
1167 /*
1168  * Called back from the postcopy fault thread when a fault is received on our
1169  * ufd.
1170  * TODO: This is Linux specific
1171  */
1172 static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd,
1173                                              void *ufd)
1174 {
1175     struct vhost_dev *dev = pcfd->data;
1176     struct vhost_user *u = dev->opaque;
1177     struct uffd_msg *msg = ufd;
1178     uint64_t faultaddr = msg->arg.pagefault.address;
1179     RAMBlock *rb = NULL;
1180     uint64_t rb_offset;
1181     int i;
1182 
1183     trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr,
1184                                             dev->mem->nregions);
1185     for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1186         trace_vhost_user_postcopy_fault_handler_loop(i,
1187                 u->postcopy_client_bases[i], dev->mem->regions[i].memory_size);
1188         if (faultaddr >= u->postcopy_client_bases[i]) {
1189             /* Ofset of the fault address in the vhost region */
1190             uint64_t region_offset = faultaddr - u->postcopy_client_bases[i];
1191             if (region_offset < dev->mem->regions[i].memory_size) {
1192                 rb_offset = region_offset + u->region_rb_offset[i];
1193                 trace_vhost_user_postcopy_fault_handler_found(i,
1194                         region_offset, rb_offset);
1195                 rb = u->region_rb[i];
1196                 return postcopy_request_shared_page(pcfd, rb, faultaddr,
1197                                                     rb_offset);
1198             }
1199         }
1200     }
1201     error_report("%s: Failed to find region for fault %" PRIx64,
1202                  __func__, faultaddr);
1203     return -1;
1204 }
1205 
1206 static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb,
1207                                      uint64_t offset)
1208 {
1209     struct vhost_dev *dev = pcfd->data;
1210     struct vhost_user *u = dev->opaque;
1211     int i;
1212 
1213     trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset);
1214 
1215     if (!u) {
1216         return 0;
1217     }
1218     /* Translate the offset into an address in the clients address space */
1219     for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1220         if (u->region_rb[i] == rb &&
1221             offset >= u->region_rb_offset[i] &&
1222             offset < (u->region_rb_offset[i] +
1223                       dev->mem->regions[i].memory_size)) {
1224             uint64_t client_addr = (offset - u->region_rb_offset[i]) +
1225                                    u->postcopy_client_bases[i];
1226             trace_vhost_user_postcopy_waker_found(client_addr);
1227             return postcopy_wake_shared(pcfd, client_addr, rb);
1228         }
1229     }
1230 
1231     trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset);
1232     return 0;
1233 }
1234 #endif
1235 
1236 /*
1237  * Called at the start of an inbound postcopy on reception of the
1238  * 'advise' command.
1239  */
1240 static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
1241 {
1242 #ifdef CONFIG_LINUX
1243     struct vhost_user *u = dev->opaque;
1244     CharBackend *chr = u->user->chr;
1245     int ufd;
1246     VhostUserMsg msg = {
1247         .hdr.request = VHOST_USER_POSTCOPY_ADVISE,
1248         .hdr.flags = VHOST_USER_VERSION,
1249     };
1250 
1251     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1252         error_setg(errp, "Failed to send postcopy_advise to vhost");
1253         return -1;
1254     }
1255 
1256     if (vhost_user_read(dev, &msg) < 0) {
1257         error_setg(errp, "Failed to get postcopy_advise reply from vhost");
1258         return -1;
1259     }
1260 
1261     if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
1262         error_setg(errp, "Unexpected msg type. Expected %d received %d",
1263                      VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
1264         return -1;
1265     }
1266 
1267     if (msg.hdr.size) {
1268         error_setg(errp, "Received bad msg size.");
1269         return -1;
1270     }
1271     ufd = qemu_chr_fe_get_msgfd(chr);
1272     if (ufd < 0) {
1273         error_setg(errp, "%s: Failed to get ufd", __func__);
1274         return -1;
1275     }
1276     qemu_set_nonblock(ufd);
1277 
1278     /* register ufd with userfault thread */
1279     u->postcopy_fd.fd = ufd;
1280     u->postcopy_fd.data = dev;
1281     u->postcopy_fd.handler = vhost_user_postcopy_fault_handler;
1282     u->postcopy_fd.waker = vhost_user_postcopy_waker;
1283     u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */
1284     postcopy_register_shared_ufd(&u->postcopy_fd);
1285     return 0;
1286 #else
1287     error_setg(errp, "Postcopy not supported on non-Linux systems");
1288     return -1;
1289 #endif
1290 }
1291 
1292 /*
1293  * Called at the switch to postcopy on reception of the 'listen' command.
1294  */
1295 static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
1296 {
1297     struct vhost_user *u = dev->opaque;
1298     int ret;
1299     VhostUserMsg msg = {
1300         .hdr.request = VHOST_USER_POSTCOPY_LISTEN,
1301         .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1302     };
1303     u->postcopy_listen = true;
1304     trace_vhost_user_postcopy_listen();
1305     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1306         error_setg(errp, "Failed to send postcopy_listen to vhost");
1307         return -1;
1308     }
1309 
1310     ret = process_message_reply(dev, &msg);
1311     if (ret) {
1312         error_setg(errp, "Failed to receive reply to postcopy_listen");
1313         return ret;
1314     }
1315 
1316     return 0;
1317 }
1318 
1319 /*
1320  * Called at the end of postcopy
1321  */
1322 static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
1323 {
1324     VhostUserMsg msg = {
1325         .hdr.request = VHOST_USER_POSTCOPY_END,
1326         .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1327     };
1328     int ret;
1329     struct vhost_user *u = dev->opaque;
1330 
1331     trace_vhost_user_postcopy_end_entry();
1332     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1333         error_setg(errp, "Failed to send postcopy_end to vhost");
1334         return -1;
1335     }
1336 
1337     ret = process_message_reply(dev, &msg);
1338     if (ret) {
1339         error_setg(errp, "Failed to receive reply to postcopy_end");
1340         return ret;
1341     }
1342     postcopy_unregister_shared_ufd(&u->postcopy_fd);
1343     close(u->postcopy_fd.fd);
1344     u->postcopy_fd.handler = NULL;
1345 
1346     trace_vhost_user_postcopy_end_exit();
1347 
1348     return 0;
1349 }
1350 
1351 static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
1352                                         void *opaque)
1353 {
1354     struct PostcopyNotifyData *pnd = opaque;
1355     struct vhost_user *u = container_of(notifier, struct vhost_user,
1356                                          postcopy_notifier);
1357     struct vhost_dev *dev = u->dev;
1358 
1359     switch (pnd->reason) {
1360     case POSTCOPY_NOTIFY_PROBE:
1361         if (!virtio_has_feature(dev->protocol_features,
1362                                 VHOST_USER_PROTOCOL_F_PAGEFAULT)) {
1363             /* TODO: Get the device name into this error somehow */
1364             error_setg(pnd->errp,
1365                        "vhost-user backend not capable of postcopy");
1366             return -ENOENT;
1367         }
1368         break;
1369 
1370     case POSTCOPY_NOTIFY_INBOUND_ADVISE:
1371         return vhost_user_postcopy_advise(dev, pnd->errp);
1372 
1373     case POSTCOPY_NOTIFY_INBOUND_LISTEN:
1374         return vhost_user_postcopy_listen(dev, pnd->errp);
1375 
1376     case POSTCOPY_NOTIFY_INBOUND_END:
1377         return vhost_user_postcopy_end(dev, pnd->errp);
1378 
1379     default:
1380         /* We ignore notifications we don't know */
1381         break;
1382     }
1383 
1384     return 0;
1385 }
1386 
1387 static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque)
1388 {
1389     uint64_t features, protocol_features;
1390     struct vhost_user *u;
1391     int err;
1392 
1393     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1394 
1395     u = g_new0(struct vhost_user, 1);
1396     u->user = opaque;
1397     u->slave_fd = -1;
1398     u->dev = dev;
1399     dev->opaque = u;
1400 
1401     err = vhost_user_get_features(dev, &features);
1402     if (err < 0) {
1403         return err;
1404     }
1405 
1406     if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
1407         dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1408 
1409         err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
1410                                  &protocol_features);
1411         if (err < 0) {
1412             return err;
1413         }
1414 
1415         dev->protocol_features =
1416             protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK;
1417 
1418         if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
1419             /* Don't acknowledge CONFIG feature if device doesn't support it */
1420             dev->protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
1421         } else if (!(protocol_features &
1422                     (1ULL << VHOST_USER_PROTOCOL_F_CONFIG))) {
1423             error_report("Device expects VHOST_USER_PROTOCOL_F_CONFIG "
1424                     "but backend does not support it.");
1425             return -1;
1426         }
1427 
1428         err = vhost_user_set_protocol_features(dev, dev->protocol_features);
1429         if (err < 0) {
1430             return err;
1431         }
1432 
1433         /* query the max queues we support if backend supports Multiple Queue */
1434         if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
1435             err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
1436                                      &dev->max_queues);
1437             if (err < 0) {
1438                 return err;
1439             }
1440         }
1441 
1442         if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
1443                 !(virtio_has_feature(dev->protocol_features,
1444                     VHOST_USER_PROTOCOL_F_SLAVE_REQ) &&
1445                  virtio_has_feature(dev->protocol_features,
1446                     VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
1447             error_report("IOMMU support requires reply-ack and "
1448                          "slave-req protocol features.");
1449             return -1;
1450         }
1451     }
1452 
1453     if (dev->migration_blocker == NULL &&
1454         !virtio_has_feature(dev->protocol_features,
1455                             VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
1456         error_setg(&dev->migration_blocker,
1457                    "Migration disabled: vhost-user backend lacks "
1458                    "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
1459     }
1460 
1461     err = vhost_setup_slave_channel(dev);
1462     if (err < 0) {
1463         return err;
1464     }
1465 
1466     u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
1467     postcopy_add_notifier(&u->postcopy_notifier);
1468 
1469     return 0;
1470 }
1471 
1472 static int vhost_user_backend_cleanup(struct vhost_dev *dev)
1473 {
1474     struct vhost_user *u;
1475 
1476     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1477 
1478     u = dev->opaque;
1479     if (u->postcopy_notifier.notify) {
1480         postcopy_remove_notifier(&u->postcopy_notifier);
1481         u->postcopy_notifier.notify = NULL;
1482     }
1483     u->postcopy_listen = false;
1484     if (u->postcopy_fd.handler) {
1485         postcopy_unregister_shared_ufd(&u->postcopy_fd);
1486         close(u->postcopy_fd.fd);
1487         u->postcopy_fd.handler = NULL;
1488     }
1489     if (u->slave_fd >= 0) {
1490         qemu_set_fd_handler(u->slave_fd, NULL, NULL, NULL);
1491         close(u->slave_fd);
1492         u->slave_fd = -1;
1493     }
1494     g_free(u->region_rb);
1495     u->region_rb = NULL;
1496     g_free(u->region_rb_offset);
1497     u->region_rb_offset = NULL;
1498     u->region_rb_len = 0;
1499     g_free(u);
1500     dev->opaque = 0;
1501 
1502     return 0;
1503 }
1504 
1505 static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
1506 {
1507     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
1508 
1509     return idx;
1510 }
1511 
1512 static int vhost_user_memslots_limit(struct vhost_dev *dev)
1513 {
1514     return VHOST_MEMORY_MAX_NREGIONS;
1515 }
1516 
1517 static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
1518 {
1519     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1520 
1521     return virtio_has_feature(dev->protocol_features,
1522                               VHOST_USER_PROTOCOL_F_LOG_SHMFD);
1523 }
1524 
1525 static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
1526 {
1527     VhostUserMsg msg = { };
1528 
1529     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1530 
1531     /* If guest supports GUEST_ANNOUNCE do nothing */
1532     if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
1533         return 0;
1534     }
1535 
1536     /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
1537     if (virtio_has_feature(dev->protocol_features,
1538                            VHOST_USER_PROTOCOL_F_RARP)) {
1539         msg.hdr.request = VHOST_USER_SEND_RARP;
1540         msg.hdr.flags = VHOST_USER_VERSION;
1541         memcpy((char *)&msg.payload.u64, mac_addr, 6);
1542         msg.hdr.size = sizeof(msg.payload.u64);
1543 
1544         return vhost_user_write(dev, &msg, NULL, 0);
1545     }
1546     return -1;
1547 }
1548 
1549 static bool vhost_user_can_merge(struct vhost_dev *dev,
1550                                  uint64_t start1, uint64_t size1,
1551                                  uint64_t start2, uint64_t size2)
1552 {
1553     ram_addr_t offset;
1554     int mfd, rfd;
1555     MemoryRegion *mr;
1556 
1557     mr = memory_region_from_host((void *)(uintptr_t)start1, &offset);
1558     mfd = memory_region_get_fd(mr);
1559 
1560     mr = memory_region_from_host((void *)(uintptr_t)start2, &offset);
1561     rfd = memory_region_get_fd(mr);
1562 
1563     return mfd == rfd;
1564 }
1565 
1566 static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
1567 {
1568     VhostUserMsg msg;
1569     bool reply_supported = virtio_has_feature(dev->protocol_features,
1570                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
1571 
1572     if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
1573         return 0;
1574     }
1575 
1576     msg.hdr.request = VHOST_USER_NET_SET_MTU;
1577     msg.payload.u64 = mtu;
1578     msg.hdr.size = sizeof(msg.payload.u64);
1579     msg.hdr.flags = VHOST_USER_VERSION;
1580     if (reply_supported) {
1581         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1582     }
1583 
1584     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1585         return -1;
1586     }
1587 
1588     /* If reply_ack supported, slave has to ack specified MTU is valid */
1589     if (reply_supported) {
1590         return process_message_reply(dev, &msg);
1591     }
1592 
1593     return 0;
1594 }
1595 
1596 static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
1597                                             struct vhost_iotlb_msg *imsg)
1598 {
1599     VhostUserMsg msg = {
1600         .hdr.request = VHOST_USER_IOTLB_MSG,
1601         .hdr.size = sizeof(msg.payload.iotlb),
1602         .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1603         .payload.iotlb = *imsg,
1604     };
1605 
1606     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1607         return -EFAULT;
1608     }
1609 
1610     return process_message_reply(dev, &msg);
1611 }
1612 
1613 
1614 static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
1615 {
1616     /* No-op as the receive channel is not dedicated to IOTLB messages. */
1617 }
1618 
1619 static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
1620                                  uint32_t config_len)
1621 {
1622     VhostUserMsg msg = {
1623         .hdr.request = VHOST_USER_GET_CONFIG,
1624         .hdr.flags = VHOST_USER_VERSION,
1625         .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
1626     };
1627 
1628     if (!virtio_has_feature(dev->protocol_features,
1629                 VHOST_USER_PROTOCOL_F_CONFIG)) {
1630         return -1;
1631     }
1632 
1633     if (config_len > VHOST_USER_MAX_CONFIG_SIZE) {
1634         return -1;
1635     }
1636 
1637     msg.payload.config.offset = 0;
1638     msg.payload.config.size = config_len;
1639     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1640         return -1;
1641     }
1642 
1643     if (vhost_user_read(dev, &msg) < 0) {
1644         return -1;
1645     }
1646 
1647     if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
1648         error_report("Received unexpected msg type. Expected %d received %d",
1649                      VHOST_USER_GET_CONFIG, msg.hdr.request);
1650         return -1;
1651     }
1652 
1653     if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
1654         error_report("Received bad msg size.");
1655         return -1;
1656     }
1657 
1658     memcpy(config, msg.payload.config.region, config_len);
1659 
1660     return 0;
1661 }
1662 
1663 static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
1664                                  uint32_t offset, uint32_t size, uint32_t flags)
1665 {
1666     uint8_t *p;
1667     bool reply_supported = virtio_has_feature(dev->protocol_features,
1668                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
1669 
1670     VhostUserMsg msg = {
1671         .hdr.request = VHOST_USER_SET_CONFIG,
1672         .hdr.flags = VHOST_USER_VERSION,
1673         .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size,
1674     };
1675 
1676     if (!virtio_has_feature(dev->protocol_features,
1677                 VHOST_USER_PROTOCOL_F_CONFIG)) {
1678         return -1;
1679     }
1680 
1681     if (reply_supported) {
1682         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1683     }
1684 
1685     if (size > VHOST_USER_MAX_CONFIG_SIZE) {
1686         return -1;
1687     }
1688 
1689     msg.payload.config.offset = offset,
1690     msg.payload.config.size = size,
1691     msg.payload.config.flags = flags,
1692     p = msg.payload.config.region;
1693     memcpy(p, data, size);
1694 
1695     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1696         return -1;
1697     }
1698 
1699     if (reply_supported) {
1700         return process_message_reply(dev, &msg);
1701     }
1702 
1703     return 0;
1704 }
1705 
1706 static int vhost_user_crypto_create_session(struct vhost_dev *dev,
1707                                             void *session_info,
1708                                             uint64_t *session_id)
1709 {
1710     bool crypto_session = virtio_has_feature(dev->protocol_features,
1711                                        VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
1712     CryptoDevBackendSymSessionInfo *sess_info = session_info;
1713     VhostUserMsg msg = {
1714         .hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION,
1715         .hdr.flags = VHOST_USER_VERSION,
1716         .hdr.size = sizeof(msg.payload.session),
1717     };
1718 
1719     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1720 
1721     if (!crypto_session) {
1722         error_report("vhost-user trying to send unhandled ioctl");
1723         return -1;
1724     }
1725 
1726     memcpy(&msg.payload.session.session_setup_data, sess_info,
1727               sizeof(CryptoDevBackendSymSessionInfo));
1728     if (sess_info->key_len) {
1729         memcpy(&msg.payload.session.key, sess_info->cipher_key,
1730                sess_info->key_len);
1731     }
1732     if (sess_info->auth_key_len > 0) {
1733         memcpy(&msg.payload.session.auth_key, sess_info->auth_key,
1734                sess_info->auth_key_len);
1735     }
1736     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1737         error_report("vhost_user_write() return -1, create session failed");
1738         return -1;
1739     }
1740 
1741     if (vhost_user_read(dev, &msg) < 0) {
1742         error_report("vhost_user_read() return -1, create session failed");
1743         return -1;
1744     }
1745 
1746     if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
1747         error_report("Received unexpected msg type. Expected %d received %d",
1748                      VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
1749         return -1;
1750     }
1751 
1752     if (msg.hdr.size != sizeof(msg.payload.session)) {
1753         error_report("Received bad msg size.");
1754         return -1;
1755     }
1756 
1757     if (msg.payload.session.session_id < 0) {
1758         error_report("Bad session id: %" PRId64 "",
1759                               msg.payload.session.session_id);
1760         return -1;
1761     }
1762     *session_id = msg.payload.session.session_id;
1763 
1764     return 0;
1765 }
1766 
1767 static int
1768 vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
1769 {
1770     bool crypto_session = virtio_has_feature(dev->protocol_features,
1771                                        VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
1772     VhostUserMsg msg = {
1773         .hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION,
1774         .hdr.flags = VHOST_USER_VERSION,
1775         .hdr.size = sizeof(msg.payload.u64),
1776     };
1777     msg.payload.u64 = session_id;
1778 
1779     if (!crypto_session) {
1780         error_report("vhost-user trying to send unhandled ioctl");
1781         return -1;
1782     }
1783 
1784     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1785         error_report("vhost_user_write() return -1, close session failed");
1786         return -1;
1787     }
1788 
1789     return 0;
1790 }
1791 
1792 static bool vhost_user_mem_section_filter(struct vhost_dev *dev,
1793                                           MemoryRegionSection *section)
1794 {
1795     bool result;
1796 
1797     result = memory_region_get_fd(section->mr) >= 0;
1798 
1799     return result;
1800 }
1801 
1802 static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
1803                                       uint16_t queue_size,
1804                                       struct vhost_inflight *inflight)
1805 {
1806     void *addr;
1807     int fd;
1808     struct vhost_user *u = dev->opaque;
1809     CharBackend *chr = u->user->chr;
1810     VhostUserMsg msg = {
1811         .hdr.request = VHOST_USER_GET_INFLIGHT_FD,
1812         .hdr.flags = VHOST_USER_VERSION,
1813         .payload.inflight.num_queues = dev->nvqs,
1814         .payload.inflight.queue_size = queue_size,
1815         .hdr.size = sizeof(msg.payload.inflight),
1816     };
1817 
1818     if (!virtio_has_feature(dev->protocol_features,
1819                             VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
1820         return 0;
1821     }
1822 
1823     if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1824         return -1;
1825     }
1826 
1827     if (vhost_user_read(dev, &msg) < 0) {
1828         return -1;
1829     }
1830 
1831     if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
1832         error_report("Received unexpected msg type. "
1833                      "Expected %d received %d",
1834                      VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
1835         return -1;
1836     }
1837 
1838     if (msg.hdr.size != sizeof(msg.payload.inflight)) {
1839         error_report("Received bad msg size.");
1840         return -1;
1841     }
1842 
1843     if (!msg.payload.inflight.mmap_size) {
1844         return 0;
1845     }
1846 
1847     fd = qemu_chr_fe_get_msgfd(chr);
1848     if (fd < 0) {
1849         error_report("Failed to get mem fd");
1850         return -1;
1851     }
1852 
1853     addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
1854                 MAP_SHARED, fd, msg.payload.inflight.mmap_offset);
1855 
1856     if (addr == MAP_FAILED) {
1857         error_report("Failed to mmap mem fd");
1858         close(fd);
1859         return -1;
1860     }
1861 
1862     inflight->addr = addr;
1863     inflight->fd = fd;
1864     inflight->size = msg.payload.inflight.mmap_size;
1865     inflight->offset = msg.payload.inflight.mmap_offset;
1866     inflight->queue_size = queue_size;
1867 
1868     return 0;
1869 }
1870 
1871 static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
1872                                       struct vhost_inflight *inflight)
1873 {
1874     VhostUserMsg msg = {
1875         .hdr.request = VHOST_USER_SET_INFLIGHT_FD,
1876         .hdr.flags = VHOST_USER_VERSION,
1877         .payload.inflight.mmap_size = inflight->size,
1878         .payload.inflight.mmap_offset = inflight->offset,
1879         .payload.inflight.num_queues = dev->nvqs,
1880         .payload.inflight.queue_size = inflight->queue_size,
1881         .hdr.size = sizeof(msg.payload.inflight),
1882     };
1883 
1884     if (!virtio_has_feature(dev->protocol_features,
1885                             VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
1886         return 0;
1887     }
1888 
1889     if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) {
1890         return -1;
1891     }
1892 
1893     return 0;
1894 }
1895 
1896 bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
1897 {
1898     if (user->chr) {
1899         error_setg(errp, "Cannot initialize vhost-user state");
1900         return false;
1901     }
1902     user->chr = chr;
1903     return true;
1904 }
1905 
1906 void vhost_user_cleanup(VhostUserState *user)
1907 {
1908     int i;
1909 
1910     if (!user->chr) {
1911         return;
1912     }
1913 
1914     for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1915         if (user->notifier[i].addr) {
1916             object_unparent(OBJECT(&user->notifier[i].mr));
1917             munmap(user->notifier[i].addr, qemu_real_host_page_size);
1918             user->notifier[i].addr = NULL;
1919         }
1920     }
1921     user->chr = NULL;
1922 }
1923 
1924 const VhostOps user_ops = {
1925         .backend_type = VHOST_BACKEND_TYPE_USER,
1926         .vhost_backend_init = vhost_user_backend_init,
1927         .vhost_backend_cleanup = vhost_user_backend_cleanup,
1928         .vhost_backend_memslots_limit = vhost_user_memslots_limit,
1929         .vhost_set_log_base = vhost_user_set_log_base,
1930         .vhost_set_mem_table = vhost_user_set_mem_table,
1931         .vhost_set_vring_addr = vhost_user_set_vring_addr,
1932         .vhost_set_vring_endian = vhost_user_set_vring_endian,
1933         .vhost_set_vring_num = vhost_user_set_vring_num,
1934         .vhost_set_vring_base = vhost_user_set_vring_base,
1935         .vhost_get_vring_base = vhost_user_get_vring_base,
1936         .vhost_set_vring_kick = vhost_user_set_vring_kick,
1937         .vhost_set_vring_call = vhost_user_set_vring_call,
1938         .vhost_set_features = vhost_user_set_features,
1939         .vhost_get_features = vhost_user_get_features,
1940         .vhost_set_owner = vhost_user_set_owner,
1941         .vhost_reset_device = vhost_user_reset_device,
1942         .vhost_get_vq_index = vhost_user_get_vq_index,
1943         .vhost_set_vring_enable = vhost_user_set_vring_enable,
1944         .vhost_requires_shm_log = vhost_user_requires_shm_log,
1945         .vhost_migration_done = vhost_user_migration_done,
1946         .vhost_backend_can_merge = vhost_user_can_merge,
1947         .vhost_net_set_mtu = vhost_user_net_set_mtu,
1948         .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
1949         .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
1950         .vhost_get_config = vhost_user_get_config,
1951         .vhost_set_config = vhost_user_set_config,
1952         .vhost_crypto_create_session = vhost_user_crypto_create_session,
1953         .vhost_crypto_close_session = vhost_user_crypto_close_session,
1954         .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
1955         .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
1956         .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
1957 };
1958