xref: /openbmc/qemu/hw/virtio/vhost-user.c (revision 4e245a9e)
1 /*
2  * vhost-user
3  *
4  * Copyright (c) 2013 Virtual Open Systems Sarl.
5  *
6  * This work is licensed under the terms of the GNU GPL, version 2 or later.
7  * See the COPYING file in the top-level directory.
8  *
9  */
10 
11 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "hw/virtio/vhost.h"
14 #include "hw/virtio/vhost-user.h"
15 #include "hw/virtio/vhost-backend.h"
16 #include "hw/virtio/virtio.h"
17 #include "hw/virtio/virtio-net.h"
18 #include "chardev/char-fe.h"
19 #include "io/channel-socket.h"
20 #include "sysemu/kvm.h"
21 #include "qemu/error-report.h"
22 #include "qemu/main-loop.h"
23 #include "qemu/sockets.h"
24 #include "sysemu/cryptodev.h"
25 #include "migration/migration.h"
26 #include "migration/postcopy-ram.h"
27 #include "trace.h"
28 #include "exec/ramblock.h"
29 
30 #include <sys/ioctl.h>
31 #include <sys/socket.h>
32 #include <sys/un.h>
33 
34 #include "standard-headers/linux/vhost_types.h"
35 
36 #ifdef CONFIG_LINUX
37 #include <linux/userfaultfd.h>
38 #endif
39 
40 #define VHOST_MEMORY_BASELINE_NREGIONS    8
41 #define VHOST_USER_F_PROTOCOL_FEATURES 30
42 #define VHOST_USER_SLAVE_MAX_FDS     8
43 
44 /*
45  * Set maximum number of RAM slots supported to
46  * the maximum number supported by the target
47  * hardware plaform.
48  */
49 #if defined(TARGET_X86) || defined(TARGET_X86_64) || \
50     defined(TARGET_ARM) || defined(TARGET_ARM_64)
51 #include "hw/acpi/acpi.h"
52 #define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS
53 
54 #elif defined(TARGET_PPC) || defined(TARGET_PPC64)
55 #include "hw/ppc/spapr.h"
56 #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
57 
58 #else
59 #define VHOST_USER_MAX_RAM_SLOTS 512
60 #endif
61 
62 /*
63  * Maximum size of virtio device config space
64  */
65 #define VHOST_USER_MAX_CONFIG_SIZE 256
66 
67 enum VhostUserProtocolFeature {
68     VHOST_USER_PROTOCOL_F_MQ = 0,
69     VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
70     VHOST_USER_PROTOCOL_F_RARP = 2,
71     VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
72     VHOST_USER_PROTOCOL_F_NET_MTU = 4,
73     VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
74     VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
75     VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
76     VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
77     VHOST_USER_PROTOCOL_F_CONFIG = 9,
78     VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
79     VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
80     VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
81     VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
82     /* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */
83     VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
84     VHOST_USER_PROTOCOL_F_MAX
85 };
86 
87 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
88 
89 typedef enum VhostUserRequest {
90     VHOST_USER_NONE = 0,
91     VHOST_USER_GET_FEATURES = 1,
92     VHOST_USER_SET_FEATURES = 2,
93     VHOST_USER_SET_OWNER = 3,
94     VHOST_USER_RESET_OWNER = 4,
95     VHOST_USER_SET_MEM_TABLE = 5,
96     VHOST_USER_SET_LOG_BASE = 6,
97     VHOST_USER_SET_LOG_FD = 7,
98     VHOST_USER_SET_VRING_NUM = 8,
99     VHOST_USER_SET_VRING_ADDR = 9,
100     VHOST_USER_SET_VRING_BASE = 10,
101     VHOST_USER_GET_VRING_BASE = 11,
102     VHOST_USER_SET_VRING_KICK = 12,
103     VHOST_USER_SET_VRING_CALL = 13,
104     VHOST_USER_SET_VRING_ERR = 14,
105     VHOST_USER_GET_PROTOCOL_FEATURES = 15,
106     VHOST_USER_SET_PROTOCOL_FEATURES = 16,
107     VHOST_USER_GET_QUEUE_NUM = 17,
108     VHOST_USER_SET_VRING_ENABLE = 18,
109     VHOST_USER_SEND_RARP = 19,
110     VHOST_USER_NET_SET_MTU = 20,
111     VHOST_USER_SET_SLAVE_REQ_FD = 21,
112     VHOST_USER_IOTLB_MSG = 22,
113     VHOST_USER_SET_VRING_ENDIAN = 23,
114     VHOST_USER_GET_CONFIG = 24,
115     VHOST_USER_SET_CONFIG = 25,
116     VHOST_USER_CREATE_CRYPTO_SESSION = 26,
117     VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
118     VHOST_USER_POSTCOPY_ADVISE  = 28,
119     VHOST_USER_POSTCOPY_LISTEN  = 29,
120     VHOST_USER_POSTCOPY_END     = 30,
121     VHOST_USER_GET_INFLIGHT_FD = 31,
122     VHOST_USER_SET_INFLIGHT_FD = 32,
123     VHOST_USER_GPU_SET_SOCKET = 33,
124     VHOST_USER_RESET_DEVICE = 34,
125     /* Message number 35 reserved for VHOST_USER_VRING_KICK. */
126     VHOST_USER_GET_MAX_MEM_SLOTS = 36,
127     VHOST_USER_ADD_MEM_REG = 37,
128     VHOST_USER_REM_MEM_REG = 38,
129     VHOST_USER_MAX
130 } VhostUserRequest;
131 
132 typedef enum VhostUserSlaveRequest {
133     VHOST_USER_SLAVE_NONE = 0,
134     VHOST_USER_SLAVE_IOTLB_MSG = 1,
135     VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
136     VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
137     VHOST_USER_SLAVE_MAX
138 }  VhostUserSlaveRequest;
139 
140 typedef struct VhostUserMemoryRegion {
141     uint64_t guest_phys_addr;
142     uint64_t memory_size;
143     uint64_t userspace_addr;
144     uint64_t mmap_offset;
145 } VhostUserMemoryRegion;
146 
147 typedef struct VhostUserMemory {
148     uint32_t nregions;
149     uint32_t padding;
150     VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
151 } VhostUserMemory;
152 
153 typedef struct VhostUserMemRegMsg {
154     uint64_t padding;
155     VhostUserMemoryRegion region;
156 } VhostUserMemRegMsg;
157 
158 typedef struct VhostUserLog {
159     uint64_t mmap_size;
160     uint64_t mmap_offset;
161 } VhostUserLog;
162 
163 typedef struct VhostUserConfig {
164     uint32_t offset;
165     uint32_t size;
166     uint32_t flags;
167     uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
168 } VhostUserConfig;
169 
170 #define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN    512
171 #define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN  64
172 
173 typedef struct VhostUserCryptoSession {
174     /* session id for success, -1 on errors */
175     int64_t session_id;
176     CryptoDevBackendSymSessionInfo session_setup_data;
177     uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
178     uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
179 } VhostUserCryptoSession;
180 
181 static VhostUserConfig c __attribute__ ((unused));
182 #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
183                                    + sizeof(c.size) \
184                                    + sizeof(c.flags))
185 
186 typedef struct VhostUserVringArea {
187     uint64_t u64;
188     uint64_t size;
189     uint64_t offset;
190 } VhostUserVringArea;
191 
192 typedef struct VhostUserInflight {
193     uint64_t mmap_size;
194     uint64_t mmap_offset;
195     uint16_t num_queues;
196     uint16_t queue_size;
197 } VhostUserInflight;
198 
199 typedef struct {
200     VhostUserRequest request;
201 
202 #define VHOST_USER_VERSION_MASK     (0x3)
203 #define VHOST_USER_REPLY_MASK       (0x1<<2)
204 #define VHOST_USER_NEED_REPLY_MASK  (0x1 << 3)
205     uint32_t flags;
206     uint32_t size; /* the following payload size */
207 } QEMU_PACKED VhostUserHeader;
208 
209 typedef union {
210 #define VHOST_USER_VRING_IDX_MASK   (0xff)
211 #define VHOST_USER_VRING_NOFD_MASK  (0x1<<8)
212         uint64_t u64;
213         struct vhost_vring_state state;
214         struct vhost_vring_addr addr;
215         VhostUserMemory memory;
216         VhostUserMemRegMsg mem_reg;
217         VhostUserLog log;
218         struct vhost_iotlb_msg iotlb;
219         VhostUserConfig config;
220         VhostUserCryptoSession session;
221         VhostUserVringArea area;
222         VhostUserInflight inflight;
223 } VhostUserPayload;
224 
225 typedef struct VhostUserMsg {
226     VhostUserHeader hdr;
227     VhostUserPayload payload;
228 } QEMU_PACKED VhostUserMsg;
229 
230 static VhostUserMsg m __attribute__ ((unused));
231 #define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
232 
233 #define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
234 
235 /* The version of the protocol we support */
236 #define VHOST_USER_VERSION    (0x1)
237 
238 struct vhost_user {
239     struct vhost_dev *dev;
240     /* Shared between vhost devs of the same virtio device */
241     VhostUserState *user;
242     QIOChannel *slave_ioc;
243     GSource *slave_src;
244     NotifierWithReturn postcopy_notifier;
245     struct PostCopyFD  postcopy_fd;
246     uint64_t           postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS];
247     /* Length of the region_rb and region_rb_offset arrays */
248     size_t             region_rb_len;
249     /* RAMBlock associated with a given region */
250     RAMBlock         **region_rb;
251     /* The offset from the start of the RAMBlock to the start of the
252      * vhost region.
253      */
254     ram_addr_t        *region_rb_offset;
255 
256     /* True once we've entered postcopy_listen */
257     bool               postcopy_listen;
258 
259     /* Our current regions */
260     int num_shadow_regions;
261     struct vhost_memory_region shadow_regions[VHOST_USER_MAX_RAM_SLOTS];
262 };
263 
264 struct scrub_regions {
265     struct vhost_memory_region *region;
266     int reg_idx;
267     int fd_idx;
268 };
269 
270 static bool ioeventfd_enabled(void)
271 {
272     return !kvm_enabled() || kvm_eventfds_enabled();
273 }
274 
275 static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
276 {
277     struct vhost_user *u = dev->opaque;
278     CharBackend *chr = u->user->chr;
279     uint8_t *p = (uint8_t *) msg;
280     int r, size = VHOST_USER_HDR_SIZE;
281 
282     r = qemu_chr_fe_read_all(chr, p, size);
283     if (r != size) {
284         int saved_errno = errno;
285         error_report("Failed to read msg header. Read %d instead of %d."
286                      " Original request %d.", r, size, msg->hdr.request);
287         return r < 0 ? -saved_errno : -EIO;
288     }
289 
290     /* validate received flags */
291     if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
292         error_report("Failed to read msg header."
293                 " Flags 0x%x instead of 0x%x.", msg->hdr.flags,
294                 VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
295         return -EPROTO;
296     }
297 
298     return 0;
299 }
300 
301 struct vhost_user_read_cb_data {
302     struct vhost_dev *dev;
303     VhostUserMsg *msg;
304     GMainLoop *loop;
305     int ret;
306 };
307 
308 static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition,
309                                    gpointer opaque)
310 {
311     struct vhost_user_read_cb_data *data = opaque;
312     struct vhost_dev *dev = data->dev;
313     VhostUserMsg *msg = data->msg;
314     struct vhost_user *u = dev->opaque;
315     CharBackend *chr = u->user->chr;
316     uint8_t *p = (uint8_t *) msg;
317     int r, size;
318 
319     r = vhost_user_read_header(dev, msg);
320     if (r < 0) {
321         data->ret = r;
322         goto end;
323     }
324 
325     /* validate message size is sane */
326     if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) {
327         error_report("Failed to read msg header."
328                 " Size %d exceeds the maximum %zu.", msg->hdr.size,
329                 VHOST_USER_PAYLOAD_SIZE);
330         data->ret = -EPROTO;
331         goto end;
332     }
333 
334     if (msg->hdr.size) {
335         p += VHOST_USER_HDR_SIZE;
336         size = msg->hdr.size;
337         r = qemu_chr_fe_read_all(chr, p, size);
338         if (r != size) {
339             int saved_errno = errno;
340             error_report("Failed to read msg payload."
341                          " Read %d instead of %d.", r, msg->hdr.size);
342             data->ret = r < 0 ? -saved_errno : -EIO;
343             goto end;
344         }
345     }
346 
347 end:
348     g_main_loop_quit(data->loop);
349     return G_SOURCE_REMOVE;
350 }
351 
352 static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
353                            gpointer opaque);
354 
355 /*
356  * This updates the read handler to use a new event loop context.
357  * Event sources are removed from the previous context : this ensures
358  * that events detected in the previous context are purged. They will
359  * be re-detected and processed in the new context.
360  */
361 static void slave_update_read_handler(struct vhost_dev *dev,
362                                       GMainContext *ctxt)
363 {
364     struct vhost_user *u = dev->opaque;
365 
366     if (!u->slave_ioc) {
367         return;
368     }
369 
370     if (u->slave_src) {
371         g_source_destroy(u->slave_src);
372         g_source_unref(u->slave_src);
373     }
374 
375     u->slave_src = qio_channel_add_watch_source(u->slave_ioc,
376                                                 G_IO_IN | G_IO_HUP,
377                                                 slave_read, dev, NULL,
378                                                 ctxt);
379 }
380 
381 static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
382 {
383     struct vhost_user *u = dev->opaque;
384     CharBackend *chr = u->user->chr;
385     GMainContext *prev_ctxt = chr->chr->gcontext;
386     GMainContext *ctxt = g_main_context_new();
387     GMainLoop *loop = g_main_loop_new(ctxt, FALSE);
388     struct vhost_user_read_cb_data data = {
389         .dev = dev,
390         .loop = loop,
391         .msg = msg,
392         .ret = 0
393     };
394 
395     /*
396      * We want to be able to monitor the slave channel fd while waiting
397      * for chr I/O. This requires an event loop, but we can't nest the
398      * one to which chr is currently attached : its fd handlers might not
399      * be prepared for re-entrancy. So we create a new one and switch chr
400      * to use it.
401      */
402     slave_update_read_handler(dev, ctxt);
403     qemu_chr_be_update_read_handlers(chr->chr, ctxt);
404     qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data);
405 
406     g_main_loop_run(loop);
407 
408     /*
409      * Restore the previous event loop context. This also destroys/recreates
410      * event sources : this guarantees that all pending events in the original
411      * context that have been processed by the nested loop are purged.
412      */
413     qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt);
414     slave_update_read_handler(dev, NULL);
415 
416     g_main_loop_unref(loop);
417     g_main_context_unref(ctxt);
418 
419     return data.ret;
420 }
421 
422 static int process_message_reply(struct vhost_dev *dev,
423                                  const VhostUserMsg *msg)
424 {
425     int ret;
426     VhostUserMsg msg_reply;
427 
428     if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
429         return 0;
430     }
431 
432     ret = vhost_user_read(dev, &msg_reply);
433     if (ret < 0) {
434         return ret;
435     }
436 
437     if (msg_reply.hdr.request != msg->hdr.request) {
438         error_report("Received unexpected msg type. "
439                      "Expected %d received %d",
440                      msg->hdr.request, msg_reply.hdr.request);
441         return -EPROTO;
442     }
443 
444     return msg_reply.payload.u64 ? -EIO : 0;
445 }
446 
447 static bool vhost_user_one_time_request(VhostUserRequest request)
448 {
449     switch (request) {
450     case VHOST_USER_SET_OWNER:
451     case VHOST_USER_RESET_OWNER:
452     case VHOST_USER_SET_MEM_TABLE:
453     case VHOST_USER_GET_QUEUE_NUM:
454     case VHOST_USER_NET_SET_MTU:
455         return true;
456     default:
457         return false;
458     }
459 }
460 
461 /* most non-init callers ignore the error */
462 static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
463                             int *fds, int fd_num)
464 {
465     struct vhost_user *u = dev->opaque;
466     CharBackend *chr = u->user->chr;
467     int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
468 
469     /*
470      * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
471      * we just need send it once in the first time. For later such
472      * request, we just ignore it.
473      */
474     if (vhost_user_one_time_request(msg->hdr.request) && dev->vq_index != 0) {
475         msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
476         return 0;
477     }
478 
479     if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
480         error_report("Failed to set msg fds.");
481         return -EINVAL;
482     }
483 
484     ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
485     if (ret != size) {
486         int saved_errno = errno;
487         error_report("Failed to write msg."
488                      " Wrote %d instead of %d.", ret, size);
489         return ret < 0 ? -saved_errno : -EIO;
490     }
491 
492     trace_vhost_user_write(msg->hdr.request, msg->hdr.flags);
493 
494     return 0;
495 }
496 
497 int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
498 {
499     VhostUserMsg msg = {
500         .hdr.request = VHOST_USER_GPU_SET_SOCKET,
501         .hdr.flags = VHOST_USER_VERSION,
502     };
503 
504     return vhost_user_write(dev, &msg, &fd, 1);
505 }
506 
507 static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
508                                    struct vhost_log *log)
509 {
510     int fds[VHOST_USER_MAX_RAM_SLOTS];
511     size_t fd_num = 0;
512     bool shmfd = virtio_has_feature(dev->protocol_features,
513                                     VHOST_USER_PROTOCOL_F_LOG_SHMFD);
514     int ret;
515     VhostUserMsg msg = {
516         .hdr.request = VHOST_USER_SET_LOG_BASE,
517         .hdr.flags = VHOST_USER_VERSION,
518         .payload.log.mmap_size = log->size * sizeof(*(log->log)),
519         .payload.log.mmap_offset = 0,
520         .hdr.size = sizeof(msg.payload.log),
521     };
522 
523     if (shmfd && log->fd != -1) {
524         fds[fd_num++] = log->fd;
525     }
526 
527     ret = vhost_user_write(dev, &msg, fds, fd_num);
528     if (ret < 0) {
529         return ret;
530     }
531 
532     if (shmfd) {
533         msg.hdr.size = 0;
534         ret = vhost_user_read(dev, &msg);
535         if (ret < 0) {
536             return ret;
537         }
538 
539         if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
540             error_report("Received unexpected msg type. "
541                          "Expected %d received %d",
542                          VHOST_USER_SET_LOG_BASE, msg.hdr.request);
543             return -EPROTO;
544         }
545     }
546 
547     trace_vhost_user_read(msg.hdr.request, msg.hdr.flags);
548 
549     return 0;
550 }
551 
552 static MemoryRegion *vhost_user_get_mr_data(uint64_t addr, ram_addr_t *offset,
553                                             int *fd)
554 {
555     MemoryRegion *mr;
556 
557     assert((uintptr_t)addr == addr);
558     mr = memory_region_from_host((void *)(uintptr_t)addr, offset);
559     *fd = memory_region_get_fd(mr);
560 
561     return mr;
562 }
563 
564 static void vhost_user_fill_msg_region(VhostUserMemoryRegion *dst,
565                                        struct vhost_memory_region *src,
566                                        uint64_t mmap_offset)
567 {
568     assert(src != NULL && dst != NULL);
569     dst->userspace_addr = src->userspace_addr;
570     dst->memory_size = src->memory_size;
571     dst->guest_phys_addr = src->guest_phys_addr;
572     dst->mmap_offset = mmap_offset;
573 }
574 
575 static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u,
576                                              struct vhost_dev *dev,
577                                              VhostUserMsg *msg,
578                                              int *fds, size_t *fd_num,
579                                              bool track_ramblocks)
580 {
581     int i, fd;
582     ram_addr_t offset;
583     MemoryRegion *mr;
584     struct vhost_memory_region *reg;
585     VhostUserMemoryRegion region_buffer;
586 
587     msg->hdr.request = VHOST_USER_SET_MEM_TABLE;
588 
589     for (i = 0; i < dev->mem->nregions; ++i) {
590         reg = dev->mem->regions + i;
591 
592         mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
593         if (fd > 0) {
594             if (track_ramblocks) {
595                 assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
596                 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
597                                                       reg->memory_size,
598                                                       reg->guest_phys_addr,
599                                                       reg->userspace_addr,
600                                                       offset);
601                 u->region_rb_offset[i] = offset;
602                 u->region_rb[i] = mr->ram_block;
603             } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
604                 error_report("Failed preparing vhost-user memory table msg");
605                 return -ENOBUFS;
606             }
607             vhost_user_fill_msg_region(&region_buffer, reg, offset);
608             msg->payload.memory.regions[*fd_num] = region_buffer;
609             fds[(*fd_num)++] = fd;
610         } else if (track_ramblocks) {
611             u->region_rb_offset[i] = 0;
612             u->region_rb[i] = NULL;
613         }
614     }
615 
616     msg->payload.memory.nregions = *fd_num;
617 
618     if (!*fd_num) {
619         error_report("Failed initializing vhost-user memory map, "
620                      "consider using -object memory-backend-file share=on");
621         return -EINVAL;
622     }
623 
624     msg->hdr.size = sizeof(msg->payload.memory.nregions);
625     msg->hdr.size += sizeof(msg->payload.memory.padding);
626     msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion);
627 
628     return 0;
629 }
630 
631 static inline bool reg_equal(struct vhost_memory_region *shadow_reg,
632                              struct vhost_memory_region *vdev_reg)
633 {
634     return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr &&
635         shadow_reg->userspace_addr == vdev_reg->userspace_addr &&
636         shadow_reg->memory_size == vdev_reg->memory_size;
637 }
638 
639 static void scrub_shadow_regions(struct vhost_dev *dev,
640                                  struct scrub_regions *add_reg,
641                                  int *nr_add_reg,
642                                  struct scrub_regions *rem_reg,
643                                  int *nr_rem_reg, uint64_t *shadow_pcb,
644                                  bool track_ramblocks)
645 {
646     struct vhost_user *u = dev->opaque;
647     bool found[VHOST_USER_MAX_RAM_SLOTS] = {};
648     struct vhost_memory_region *reg, *shadow_reg;
649     int i, j, fd, add_idx = 0, rm_idx = 0, fd_num = 0;
650     ram_addr_t offset;
651     MemoryRegion *mr;
652     bool matching;
653 
654     /*
655      * Find memory regions present in our shadow state which are not in
656      * the device's current memory state.
657      *
658      * Mark regions in both the shadow and device state as "found".
659      */
660     for (i = 0; i < u->num_shadow_regions; i++) {
661         shadow_reg = &u->shadow_regions[i];
662         matching = false;
663 
664         for (j = 0; j < dev->mem->nregions; j++) {
665             reg = &dev->mem->regions[j];
666 
667             mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
668 
669             if (reg_equal(shadow_reg, reg)) {
670                 matching = true;
671                 found[j] = true;
672                 if (track_ramblocks) {
673                     /*
674                      * Reset postcopy client bases, region_rb, and
675                      * region_rb_offset in case regions are removed.
676                      */
677                     if (fd > 0) {
678                         u->region_rb_offset[j] = offset;
679                         u->region_rb[j] = mr->ram_block;
680                         shadow_pcb[j] = u->postcopy_client_bases[i];
681                     } else {
682                         u->region_rb_offset[j] = 0;
683                         u->region_rb[j] = NULL;
684                     }
685                 }
686                 break;
687             }
688         }
689 
690         /*
691          * If the region was not found in the current device memory state
692          * create an entry for it in the removed list.
693          */
694         if (!matching) {
695             rem_reg[rm_idx].region = shadow_reg;
696             rem_reg[rm_idx++].reg_idx = i;
697         }
698     }
699 
700     /*
701      * For regions not marked "found", create entries in the added list.
702      *
703      * Note their indexes in the device memory state and the indexes of their
704      * file descriptors.
705      */
706     for (i = 0; i < dev->mem->nregions; i++) {
707         reg = &dev->mem->regions[i];
708         vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
709         if (fd > 0) {
710             ++fd_num;
711         }
712 
713         /*
714          * If the region was in both the shadow and device state we don't
715          * need to send a VHOST_USER_ADD_MEM_REG message for it.
716          */
717         if (found[i]) {
718             continue;
719         }
720 
721         add_reg[add_idx].region = reg;
722         add_reg[add_idx].reg_idx = i;
723         add_reg[add_idx++].fd_idx = fd_num;
724     }
725     *nr_rem_reg = rm_idx;
726     *nr_add_reg = add_idx;
727 
728     return;
729 }
730 
731 static int send_remove_regions(struct vhost_dev *dev,
732                                struct scrub_regions *remove_reg,
733                                int nr_rem_reg, VhostUserMsg *msg,
734                                bool reply_supported)
735 {
736     struct vhost_user *u = dev->opaque;
737     struct vhost_memory_region *shadow_reg;
738     int i, fd, shadow_reg_idx, ret;
739     ram_addr_t offset;
740     VhostUserMemoryRegion region_buffer;
741 
742     /*
743      * The regions in remove_reg appear in the same order they do in the
744      * shadow table. Therefore we can minimize memory copies by iterating
745      * through remove_reg backwards.
746      */
747     for (i = nr_rem_reg - 1; i >= 0; i--) {
748         shadow_reg = remove_reg[i].region;
749         shadow_reg_idx = remove_reg[i].reg_idx;
750 
751         vhost_user_get_mr_data(shadow_reg->userspace_addr, &offset, &fd);
752 
753         if (fd > 0) {
754             msg->hdr.request = VHOST_USER_REM_MEM_REG;
755             vhost_user_fill_msg_region(&region_buffer, shadow_reg, 0);
756             msg->payload.mem_reg.region = region_buffer;
757 
758             ret = vhost_user_write(dev, msg, NULL, 0);
759             if (ret < 0) {
760                 return ret;
761             }
762 
763             if (reply_supported) {
764                 ret = process_message_reply(dev, msg);
765                 if (ret) {
766                     return ret;
767                 }
768             }
769         }
770 
771         /*
772          * At this point we know the backend has unmapped the region. It is now
773          * safe to remove it from the shadow table.
774          */
775         memmove(&u->shadow_regions[shadow_reg_idx],
776                 &u->shadow_regions[shadow_reg_idx + 1],
777                 sizeof(struct vhost_memory_region) *
778                 (u->num_shadow_regions - shadow_reg_idx - 1));
779         u->num_shadow_regions--;
780     }
781 
782     return 0;
783 }
784 
785 static int send_add_regions(struct vhost_dev *dev,
786                             struct scrub_regions *add_reg, int nr_add_reg,
787                             VhostUserMsg *msg, uint64_t *shadow_pcb,
788                             bool reply_supported, bool track_ramblocks)
789 {
790     struct vhost_user *u = dev->opaque;
791     int i, fd, ret, reg_idx, reg_fd_idx;
792     struct vhost_memory_region *reg;
793     MemoryRegion *mr;
794     ram_addr_t offset;
795     VhostUserMsg msg_reply;
796     VhostUserMemoryRegion region_buffer;
797 
798     for (i = 0; i < nr_add_reg; i++) {
799         reg = add_reg[i].region;
800         reg_idx = add_reg[i].reg_idx;
801         reg_fd_idx = add_reg[i].fd_idx;
802 
803         mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
804 
805         if (fd > 0) {
806             if (track_ramblocks) {
807                 trace_vhost_user_set_mem_table_withfd(reg_fd_idx, mr->name,
808                                                       reg->memory_size,
809                                                       reg->guest_phys_addr,
810                                                       reg->userspace_addr,
811                                                       offset);
812                 u->region_rb_offset[reg_idx] = offset;
813                 u->region_rb[reg_idx] = mr->ram_block;
814             }
815             msg->hdr.request = VHOST_USER_ADD_MEM_REG;
816             vhost_user_fill_msg_region(&region_buffer, reg, offset);
817             msg->payload.mem_reg.region = region_buffer;
818 
819             ret = vhost_user_write(dev, msg, &fd, 1);
820             if (ret < 0) {
821                 return ret;
822             }
823 
824             if (track_ramblocks) {
825                 uint64_t reply_gpa;
826 
827                 ret = vhost_user_read(dev, &msg_reply);
828                 if (ret < 0) {
829                     return ret;
830                 }
831 
832                 reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr;
833 
834                 if (msg_reply.hdr.request != VHOST_USER_ADD_MEM_REG) {
835                     error_report("%s: Received unexpected msg type."
836                                  "Expected %d received %d", __func__,
837                                  VHOST_USER_ADD_MEM_REG,
838                                  msg_reply.hdr.request);
839                     return -EPROTO;
840                 }
841 
842                 /*
843                  * We're using the same structure, just reusing one of the
844                  * fields, so it should be the same size.
845                  */
846                 if (msg_reply.hdr.size != msg->hdr.size) {
847                     error_report("%s: Unexpected size for postcopy reply "
848                                  "%d vs %d", __func__, msg_reply.hdr.size,
849                                  msg->hdr.size);
850                     return -EPROTO;
851                 }
852 
853                 /* Get the postcopy client base from the backend's reply. */
854                 if (reply_gpa == dev->mem->regions[reg_idx].guest_phys_addr) {
855                     shadow_pcb[reg_idx] =
856                         msg_reply.payload.mem_reg.region.userspace_addr;
857                     trace_vhost_user_set_mem_table_postcopy(
858                         msg_reply.payload.mem_reg.region.userspace_addr,
859                         msg->payload.mem_reg.region.userspace_addr,
860                         reg_fd_idx, reg_idx);
861                 } else {
862                     error_report("%s: invalid postcopy reply for region. "
863                                  "Got guest physical address %" PRIX64 ", expected "
864                                  "%" PRIX64, __func__, reply_gpa,
865                                  dev->mem->regions[reg_idx].guest_phys_addr);
866                     return -EPROTO;
867                 }
868             } else if (reply_supported) {
869                 ret = process_message_reply(dev, msg);
870                 if (ret) {
871                     return ret;
872                 }
873             }
874         } else if (track_ramblocks) {
875             u->region_rb_offset[reg_idx] = 0;
876             u->region_rb[reg_idx] = NULL;
877         }
878 
879         /*
880          * At this point, we know the backend has mapped in the new
881          * region, if the region has a valid file descriptor.
882          *
883          * The region should now be added to the shadow table.
884          */
885         u->shadow_regions[u->num_shadow_regions].guest_phys_addr =
886             reg->guest_phys_addr;
887         u->shadow_regions[u->num_shadow_regions].userspace_addr =
888             reg->userspace_addr;
889         u->shadow_regions[u->num_shadow_regions].memory_size =
890             reg->memory_size;
891         u->num_shadow_regions++;
892     }
893 
894     return 0;
895 }
896 
897 static int vhost_user_add_remove_regions(struct vhost_dev *dev,
898                                          VhostUserMsg *msg,
899                                          bool reply_supported,
900                                          bool track_ramblocks)
901 {
902     struct vhost_user *u = dev->opaque;
903     struct scrub_regions add_reg[VHOST_USER_MAX_RAM_SLOTS];
904     struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS];
905     uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {};
906     int nr_add_reg, nr_rem_reg;
907     int ret;
908 
909     msg->hdr.size = sizeof(msg->payload.mem_reg);
910 
911     /* Find the regions which need to be removed or added. */
912     scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg,
913                          shadow_pcb, track_ramblocks);
914 
915     if (nr_rem_reg) {
916         ret = send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
917                                   reply_supported);
918         if (ret < 0) {
919             goto err;
920         }
921     }
922 
923     if (nr_add_reg) {
924         ret = send_add_regions(dev, add_reg, nr_add_reg, msg, shadow_pcb,
925                                reply_supported, track_ramblocks);
926         if (ret < 0) {
927             goto err;
928         }
929     }
930 
931     if (track_ramblocks) {
932         memcpy(u->postcopy_client_bases, shadow_pcb,
933                sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
934         /*
935          * Now we've registered this with the postcopy code, we ack to the
936          * client, because now we're in the position to be able to deal with
937          * any faults it generates.
938          */
939         /* TODO: Use this for failure cases as well with a bad value. */
940         msg->hdr.size = sizeof(msg->payload.u64);
941         msg->payload.u64 = 0; /* OK */
942 
943         ret = vhost_user_write(dev, msg, NULL, 0);
944         if (ret < 0) {
945             return ret;
946         }
947     }
948 
949     return 0;
950 
951 err:
952     if (track_ramblocks) {
953         memcpy(u->postcopy_client_bases, shadow_pcb,
954                sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
955     }
956 
957     return ret;
958 }
959 
960 static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
961                                              struct vhost_memory *mem,
962                                              bool reply_supported,
963                                              bool config_mem_slots)
964 {
965     struct vhost_user *u = dev->opaque;
966     int fds[VHOST_MEMORY_BASELINE_NREGIONS];
967     size_t fd_num = 0;
968     VhostUserMsg msg_reply;
969     int region_i, msg_i;
970     int ret;
971 
972     VhostUserMsg msg = {
973         .hdr.flags = VHOST_USER_VERSION,
974     };
975 
976     if (u->region_rb_len < dev->mem->nregions) {
977         u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
978         u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
979                                       dev->mem->nregions);
980         memset(&(u->region_rb[u->region_rb_len]), '\0',
981                sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
982         memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
983                sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
984         u->region_rb_len = dev->mem->nregions;
985     }
986 
987     if (config_mem_slots) {
988         ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, true);
989         if (ret < 0) {
990             return ret;
991         }
992     } else {
993         ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
994                                                 true);
995         if (ret < 0) {
996             return ret;
997         }
998 
999         ret = vhost_user_write(dev, &msg, fds, fd_num);
1000         if (ret < 0) {
1001             return ret;
1002         }
1003 
1004         ret = vhost_user_read(dev, &msg_reply);
1005         if (ret < 0) {
1006             return ret;
1007         }
1008 
1009         if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
1010             error_report("%s: Received unexpected msg type."
1011                          "Expected %d received %d", __func__,
1012                          VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
1013             return -EPROTO;
1014         }
1015 
1016         /*
1017          * We're using the same structure, just reusing one of the
1018          * fields, so it should be the same size.
1019          */
1020         if (msg_reply.hdr.size != msg.hdr.size) {
1021             error_report("%s: Unexpected size for postcopy reply "
1022                          "%d vs %d", __func__, msg_reply.hdr.size,
1023                          msg.hdr.size);
1024             return -EPROTO;
1025         }
1026 
1027         memset(u->postcopy_client_bases, 0,
1028                sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
1029 
1030         /*
1031          * They're in the same order as the regions that were sent
1032          * but some of the regions were skipped (above) if they
1033          * didn't have fd's
1034          */
1035         for (msg_i = 0, region_i = 0;
1036              region_i < dev->mem->nregions;
1037              region_i++) {
1038             if (msg_i < fd_num &&
1039                 msg_reply.payload.memory.regions[msg_i].guest_phys_addr ==
1040                 dev->mem->regions[region_i].guest_phys_addr) {
1041                 u->postcopy_client_bases[region_i] =
1042                     msg_reply.payload.memory.regions[msg_i].userspace_addr;
1043                 trace_vhost_user_set_mem_table_postcopy(
1044                     msg_reply.payload.memory.regions[msg_i].userspace_addr,
1045                     msg.payload.memory.regions[msg_i].userspace_addr,
1046                     msg_i, region_i);
1047                 msg_i++;
1048             }
1049         }
1050         if (msg_i != fd_num) {
1051             error_report("%s: postcopy reply not fully consumed "
1052                          "%d vs %zd",
1053                          __func__, msg_i, fd_num);
1054             return -EIO;
1055         }
1056 
1057         /*
1058          * Now we've registered this with the postcopy code, we ack to the
1059          * client, because now we're in the position to be able to deal
1060          * with any faults it generates.
1061          */
1062         /* TODO: Use this for failure cases as well with a bad value. */
1063         msg.hdr.size = sizeof(msg.payload.u64);
1064         msg.payload.u64 = 0; /* OK */
1065         ret = vhost_user_write(dev, &msg, NULL, 0);
1066         if (ret < 0) {
1067             return ret;
1068         }
1069     }
1070 
1071     return 0;
1072 }
1073 
1074 static int vhost_user_set_mem_table(struct vhost_dev *dev,
1075                                     struct vhost_memory *mem)
1076 {
1077     struct vhost_user *u = dev->opaque;
1078     int fds[VHOST_MEMORY_BASELINE_NREGIONS];
1079     size_t fd_num = 0;
1080     bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
1081     bool reply_supported = virtio_has_feature(dev->protocol_features,
1082                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
1083     bool config_mem_slots =
1084         virtio_has_feature(dev->protocol_features,
1085                            VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS);
1086     int ret;
1087 
1088     if (do_postcopy) {
1089         /*
1090          * Postcopy has enough differences that it's best done in it's own
1091          * version
1092          */
1093         return vhost_user_set_mem_table_postcopy(dev, mem, reply_supported,
1094                                                  config_mem_slots);
1095     }
1096 
1097     VhostUserMsg msg = {
1098         .hdr.flags = VHOST_USER_VERSION,
1099     };
1100 
1101     if (reply_supported) {
1102         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1103     }
1104 
1105     if (config_mem_slots) {
1106         ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, false);
1107         if (ret < 0) {
1108             return ret;
1109         }
1110     } else {
1111         ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
1112                                                 false);
1113         if (ret < 0) {
1114             return ret;
1115         }
1116 
1117         ret = vhost_user_write(dev, &msg, fds, fd_num);
1118         if (ret < 0) {
1119             return ret;
1120         }
1121 
1122         if (reply_supported) {
1123             return process_message_reply(dev, &msg);
1124         }
1125     }
1126 
1127     return 0;
1128 }
1129 
1130 static int vhost_user_set_vring_endian(struct vhost_dev *dev,
1131                                        struct vhost_vring_state *ring)
1132 {
1133     bool cross_endian = virtio_has_feature(dev->protocol_features,
1134                                            VHOST_USER_PROTOCOL_F_CROSS_ENDIAN);
1135     VhostUserMsg msg = {
1136         .hdr.request = VHOST_USER_SET_VRING_ENDIAN,
1137         .hdr.flags = VHOST_USER_VERSION,
1138         .payload.state = *ring,
1139         .hdr.size = sizeof(msg.payload.state),
1140     };
1141 
1142     if (!cross_endian) {
1143         error_report("vhost-user trying to send unhandled ioctl");
1144         return -ENOTSUP;
1145     }
1146 
1147     return vhost_user_write(dev, &msg, NULL, 0);
1148 }
1149 
1150 static int vhost_set_vring(struct vhost_dev *dev,
1151                            unsigned long int request,
1152                            struct vhost_vring_state *ring)
1153 {
1154     VhostUserMsg msg = {
1155         .hdr.request = request,
1156         .hdr.flags = VHOST_USER_VERSION,
1157         .payload.state = *ring,
1158         .hdr.size = sizeof(msg.payload.state),
1159     };
1160 
1161     return vhost_user_write(dev, &msg, NULL, 0);
1162 }
1163 
1164 static int vhost_user_set_vring_num(struct vhost_dev *dev,
1165                                     struct vhost_vring_state *ring)
1166 {
1167     return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
1168 }
1169 
1170 static void vhost_user_host_notifier_free(VhostUserHostNotifier *n)
1171 {
1172     assert(n && n->unmap_addr);
1173     munmap(n->unmap_addr, qemu_real_host_page_size());
1174     n->unmap_addr = NULL;
1175 }
1176 
1177 /*
1178  * clean-up function for notifier, will finally free the structure
1179  * under rcu.
1180  */
1181 static void vhost_user_host_notifier_remove(VhostUserHostNotifier *n,
1182                                             VirtIODevice *vdev)
1183 {
1184     if (n->addr) {
1185         if (vdev) {
1186             virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false);
1187         }
1188         assert(!n->unmap_addr);
1189         n->unmap_addr = n->addr;
1190         n->addr = NULL;
1191         call_rcu(n, vhost_user_host_notifier_free, rcu);
1192     }
1193 }
1194 
1195 static int vhost_user_set_vring_base(struct vhost_dev *dev,
1196                                      struct vhost_vring_state *ring)
1197 {
1198     return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
1199 }
1200 
1201 static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
1202 {
1203     int i;
1204 
1205     if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
1206         return -EINVAL;
1207     }
1208 
1209     for (i = 0; i < dev->nvqs; ++i) {
1210         int ret;
1211         struct vhost_vring_state state = {
1212             .index = dev->vq_index + i,
1213             .num   = enable,
1214         };
1215 
1216         ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
1217         if (ret < 0) {
1218             /*
1219              * Restoring the previous state is likely infeasible, as well as
1220              * proceeding regardless the error, so just bail out and hope for
1221              * the device-level recovery.
1222              */
1223             return ret;
1224         }
1225     }
1226 
1227     return 0;
1228 }
1229 
1230 static VhostUserHostNotifier *fetch_notifier(VhostUserState *u,
1231                                              int idx)
1232 {
1233     if (idx >= u->notifiers->len) {
1234         return NULL;
1235     }
1236     return g_ptr_array_index(u->notifiers, idx);
1237 }
1238 
1239 static int vhost_user_get_vring_base(struct vhost_dev *dev,
1240                                      struct vhost_vring_state *ring)
1241 {
1242     int ret;
1243     VhostUserMsg msg = {
1244         .hdr.request = VHOST_USER_GET_VRING_BASE,
1245         .hdr.flags = VHOST_USER_VERSION,
1246         .payload.state = *ring,
1247         .hdr.size = sizeof(msg.payload.state),
1248     };
1249     struct vhost_user *u = dev->opaque;
1250 
1251     VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index);
1252     if (n) {
1253         vhost_user_host_notifier_remove(n, dev->vdev);
1254     }
1255 
1256     ret = vhost_user_write(dev, &msg, NULL, 0);
1257     if (ret < 0) {
1258         return ret;
1259     }
1260 
1261     ret = vhost_user_read(dev, &msg);
1262     if (ret < 0) {
1263         return ret;
1264     }
1265 
1266     if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
1267         error_report("Received unexpected msg type. Expected %d received %d",
1268                      VHOST_USER_GET_VRING_BASE, msg.hdr.request);
1269         return -EPROTO;
1270     }
1271 
1272     if (msg.hdr.size != sizeof(msg.payload.state)) {
1273         error_report("Received bad msg size.");
1274         return -EPROTO;
1275     }
1276 
1277     *ring = msg.payload.state;
1278 
1279     return 0;
1280 }
1281 
1282 static int vhost_set_vring_file(struct vhost_dev *dev,
1283                                 VhostUserRequest request,
1284                                 struct vhost_vring_file *file)
1285 {
1286     int fds[VHOST_USER_MAX_RAM_SLOTS];
1287     size_t fd_num = 0;
1288     VhostUserMsg msg = {
1289         .hdr.request = request,
1290         .hdr.flags = VHOST_USER_VERSION,
1291         .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
1292         .hdr.size = sizeof(msg.payload.u64),
1293     };
1294 
1295     if (ioeventfd_enabled() && file->fd > 0) {
1296         fds[fd_num++] = file->fd;
1297     } else {
1298         msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
1299     }
1300 
1301     return vhost_user_write(dev, &msg, fds, fd_num);
1302 }
1303 
1304 static int vhost_user_set_vring_kick(struct vhost_dev *dev,
1305                                      struct vhost_vring_file *file)
1306 {
1307     return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
1308 }
1309 
1310 static int vhost_user_set_vring_call(struct vhost_dev *dev,
1311                                      struct vhost_vring_file *file)
1312 {
1313     return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
1314 }
1315 
1316 static int vhost_user_set_vring_err(struct vhost_dev *dev,
1317                                     struct vhost_vring_file *file)
1318 {
1319     return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_ERR, file);
1320 }
1321 
1322 static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
1323 {
1324     int ret;
1325     VhostUserMsg msg = {
1326         .hdr.request = request,
1327         .hdr.flags = VHOST_USER_VERSION,
1328     };
1329 
1330     if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
1331         return 0;
1332     }
1333 
1334     ret = vhost_user_write(dev, &msg, NULL, 0);
1335     if (ret < 0) {
1336         return ret;
1337     }
1338 
1339     ret = vhost_user_read(dev, &msg);
1340     if (ret < 0) {
1341         return ret;
1342     }
1343 
1344     if (msg.hdr.request != request) {
1345         error_report("Received unexpected msg type. Expected %d received %d",
1346                      request, msg.hdr.request);
1347         return -EPROTO;
1348     }
1349 
1350     if (msg.hdr.size != sizeof(msg.payload.u64)) {
1351         error_report("Received bad msg size.");
1352         return -EPROTO;
1353     }
1354 
1355     *u64 = msg.payload.u64;
1356 
1357     return 0;
1358 }
1359 
1360 static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
1361 {
1362     if (vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features) < 0) {
1363         return -EPROTO;
1364     }
1365 
1366     return 0;
1367 }
1368 
1369 static int enforce_reply(struct vhost_dev *dev,
1370                          const VhostUserMsg *msg)
1371 {
1372     uint64_t dummy;
1373 
1374     if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
1375         return process_message_reply(dev, msg);
1376     }
1377 
1378    /*
1379     * We need to wait for a reply but the backend does not
1380     * support replies for the command we just sent.
1381     * Send VHOST_USER_GET_FEATURES which makes all backends
1382     * send a reply.
1383     */
1384     return vhost_user_get_features(dev, &dummy);
1385 }
1386 
1387 static int vhost_user_set_vring_addr(struct vhost_dev *dev,
1388                                      struct vhost_vring_addr *addr)
1389 {
1390     int ret;
1391     VhostUserMsg msg = {
1392         .hdr.request = VHOST_USER_SET_VRING_ADDR,
1393         .hdr.flags = VHOST_USER_VERSION,
1394         .payload.addr = *addr,
1395         .hdr.size = sizeof(msg.payload.addr),
1396     };
1397 
1398     bool reply_supported = virtio_has_feature(dev->protocol_features,
1399                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
1400 
1401     /*
1402      * wait for a reply if logging is enabled to make sure
1403      * backend is actually logging changes
1404      */
1405     bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG);
1406 
1407     if (reply_supported && wait_for_reply) {
1408         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1409     }
1410 
1411     ret = vhost_user_write(dev, &msg, NULL, 0);
1412     if (ret < 0) {
1413         return ret;
1414     }
1415 
1416     if (wait_for_reply) {
1417         return enforce_reply(dev, &msg);
1418     }
1419 
1420     return 0;
1421 }
1422 
1423 static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
1424                               bool wait_for_reply)
1425 {
1426     VhostUserMsg msg = {
1427         .hdr.request = request,
1428         .hdr.flags = VHOST_USER_VERSION,
1429         .payload.u64 = u64,
1430         .hdr.size = sizeof(msg.payload.u64),
1431     };
1432     int ret;
1433 
1434     if (wait_for_reply) {
1435         bool reply_supported = virtio_has_feature(dev->protocol_features,
1436                                           VHOST_USER_PROTOCOL_F_REPLY_ACK);
1437         if (reply_supported) {
1438             msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1439         }
1440     }
1441 
1442     ret = vhost_user_write(dev, &msg, NULL, 0);
1443     if (ret < 0) {
1444         return ret;
1445     }
1446 
1447     if (wait_for_reply) {
1448         return enforce_reply(dev, &msg);
1449     }
1450 
1451     return 0;
1452 }
1453 
1454 static int vhost_user_set_features(struct vhost_dev *dev,
1455                                    uint64_t features)
1456 {
1457     /*
1458      * wait for a reply if logging is enabled to make sure
1459      * backend is actually logging changes
1460      */
1461     bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL);
1462 
1463     return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features,
1464                               log_enabled);
1465 }
1466 
1467 static int vhost_user_set_protocol_features(struct vhost_dev *dev,
1468                                             uint64_t features)
1469 {
1470     return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features,
1471                               false);
1472 }
1473 
1474 static int vhost_user_set_owner(struct vhost_dev *dev)
1475 {
1476     VhostUserMsg msg = {
1477         .hdr.request = VHOST_USER_SET_OWNER,
1478         .hdr.flags = VHOST_USER_VERSION,
1479     };
1480 
1481     return vhost_user_write(dev, &msg, NULL, 0);
1482 }
1483 
1484 static int vhost_user_get_max_memslots(struct vhost_dev *dev,
1485                                        uint64_t *max_memslots)
1486 {
1487     uint64_t backend_max_memslots;
1488     int err;
1489 
1490     err = vhost_user_get_u64(dev, VHOST_USER_GET_MAX_MEM_SLOTS,
1491                              &backend_max_memslots);
1492     if (err < 0) {
1493         return err;
1494     }
1495 
1496     *max_memslots = backend_max_memslots;
1497 
1498     return 0;
1499 }
1500 
1501 static int vhost_user_reset_device(struct vhost_dev *dev)
1502 {
1503     VhostUserMsg msg = {
1504         .hdr.flags = VHOST_USER_VERSION,
1505     };
1506 
1507     msg.hdr.request = virtio_has_feature(dev->protocol_features,
1508                                          VHOST_USER_PROTOCOL_F_RESET_DEVICE)
1509         ? VHOST_USER_RESET_DEVICE
1510         : VHOST_USER_RESET_OWNER;
1511 
1512     return vhost_user_write(dev, &msg, NULL, 0);
1513 }
1514 
1515 static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
1516 {
1517     if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
1518         return -ENOSYS;
1519     }
1520 
1521     return dev->config_ops->vhost_dev_config_notifier(dev);
1522 }
1523 
1524 /*
1525  * Fetch or create the notifier for a given idx. Newly created
1526  * notifiers are added to the pointer array that tracks them.
1527  */
1528 static VhostUserHostNotifier *fetch_or_create_notifier(VhostUserState *u,
1529                                                        int idx)
1530 {
1531     VhostUserHostNotifier *n = NULL;
1532     if (idx >= u->notifiers->len) {
1533         g_ptr_array_set_size(u->notifiers, idx + 1);
1534     }
1535 
1536     n = g_ptr_array_index(u->notifiers, idx);
1537     if (!n) {
1538         n = g_new0(VhostUserHostNotifier, 1);
1539         n->idx = idx;
1540         g_ptr_array_insert(u->notifiers, idx, n);
1541         trace_vhost_user_create_notifier(idx, n);
1542     }
1543 
1544     return n;
1545 }
1546 
1547 static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
1548                                                        VhostUserVringArea *area,
1549                                                        int fd)
1550 {
1551     int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
1552     size_t page_size = qemu_real_host_page_size();
1553     struct vhost_user *u = dev->opaque;
1554     VhostUserState *user = u->user;
1555     VirtIODevice *vdev = dev->vdev;
1556     VhostUserHostNotifier *n;
1557     void *addr;
1558     char *name;
1559 
1560     if (!virtio_has_feature(dev->protocol_features,
1561                             VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
1562         vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
1563         return -EINVAL;
1564     }
1565 
1566     /*
1567      * Fetch notifier and invalidate any old data before setting up
1568      * new mapped address.
1569      */
1570     n = fetch_or_create_notifier(user, queue_idx);
1571     vhost_user_host_notifier_remove(n, vdev);
1572 
1573     if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
1574         return 0;
1575     }
1576 
1577     /* Sanity check. */
1578     if (area->size != page_size) {
1579         return -EINVAL;
1580     }
1581 
1582     addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1583                 fd, area->offset);
1584     if (addr == MAP_FAILED) {
1585         return -EFAULT;
1586     }
1587 
1588     name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
1589                            user, queue_idx);
1590     if (!n->mr.ram) { /* Don't init again after suspend. */
1591         memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
1592                                           page_size, addr);
1593     } else {
1594         n->mr.ram_block->host = addr;
1595     }
1596     g_free(name);
1597 
1598     if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
1599         object_unparent(OBJECT(&n->mr));
1600         munmap(addr, page_size);
1601         return -ENXIO;
1602     }
1603 
1604     n->addr = addr;
1605 
1606     return 0;
1607 }
1608 
1609 static void close_slave_channel(struct vhost_user *u)
1610 {
1611     g_source_destroy(u->slave_src);
1612     g_source_unref(u->slave_src);
1613     u->slave_src = NULL;
1614     object_unref(OBJECT(u->slave_ioc));
1615     u->slave_ioc = NULL;
1616 }
1617 
1618 static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
1619                            gpointer opaque)
1620 {
1621     struct vhost_dev *dev = opaque;
1622     struct vhost_user *u = dev->opaque;
1623     VhostUserHeader hdr = { 0, };
1624     VhostUserPayload payload = { 0, };
1625     Error *local_err = NULL;
1626     gboolean rc = G_SOURCE_CONTINUE;
1627     int ret = 0;
1628     struct iovec iov;
1629     g_autofree int *fd = NULL;
1630     size_t fdsize = 0;
1631     int i;
1632 
1633     /* Read header */
1634     iov.iov_base = &hdr;
1635     iov.iov_len = VHOST_USER_HDR_SIZE;
1636 
1637     if (qio_channel_readv_full_all(ioc, &iov, 1, &fd, &fdsize, &local_err)) {
1638         error_report_err(local_err);
1639         goto err;
1640     }
1641 
1642     if (hdr.size > VHOST_USER_PAYLOAD_SIZE) {
1643         error_report("Failed to read msg header."
1644                 " Size %d exceeds the maximum %zu.", hdr.size,
1645                 VHOST_USER_PAYLOAD_SIZE);
1646         goto err;
1647     }
1648 
1649     /* Read payload */
1650     if (qio_channel_read_all(ioc, (char *) &payload, hdr.size, &local_err)) {
1651         error_report_err(local_err);
1652         goto err;
1653     }
1654 
1655     switch (hdr.request) {
1656     case VHOST_USER_SLAVE_IOTLB_MSG:
1657         ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
1658         break;
1659     case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG :
1660         ret = vhost_user_slave_handle_config_change(dev);
1661         break;
1662     case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
1663         ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area,
1664                                                           fd ? fd[0] : -1);
1665         break;
1666     default:
1667         error_report("Received unexpected msg type: %d.", hdr.request);
1668         ret = -EINVAL;
1669     }
1670 
1671     /*
1672      * REPLY_ACK feature handling. Other reply types has to be managed
1673      * directly in their request handlers.
1674      */
1675     if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
1676         struct iovec iovec[2];
1677 
1678 
1679         hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
1680         hdr.flags |= VHOST_USER_REPLY_MASK;
1681 
1682         payload.u64 = !!ret;
1683         hdr.size = sizeof(payload.u64);
1684 
1685         iovec[0].iov_base = &hdr;
1686         iovec[0].iov_len = VHOST_USER_HDR_SIZE;
1687         iovec[1].iov_base = &payload;
1688         iovec[1].iov_len = hdr.size;
1689 
1690         if (qio_channel_writev_all(ioc, iovec, ARRAY_SIZE(iovec), &local_err)) {
1691             error_report_err(local_err);
1692             goto err;
1693         }
1694     }
1695 
1696     goto fdcleanup;
1697 
1698 err:
1699     close_slave_channel(u);
1700     rc = G_SOURCE_REMOVE;
1701 
1702 fdcleanup:
1703     if (fd) {
1704         for (i = 0; i < fdsize; i++) {
1705             close(fd[i]);
1706         }
1707     }
1708     return rc;
1709 }
1710 
1711 static int vhost_setup_slave_channel(struct vhost_dev *dev)
1712 {
1713     VhostUserMsg msg = {
1714         .hdr.request = VHOST_USER_SET_SLAVE_REQ_FD,
1715         .hdr.flags = VHOST_USER_VERSION,
1716     };
1717     struct vhost_user *u = dev->opaque;
1718     int sv[2], ret = 0;
1719     bool reply_supported = virtio_has_feature(dev->protocol_features,
1720                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
1721     Error *local_err = NULL;
1722     QIOChannel *ioc;
1723 
1724     if (!virtio_has_feature(dev->protocol_features,
1725                             VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
1726         return 0;
1727     }
1728 
1729     if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
1730         int saved_errno = errno;
1731         error_report("socketpair() failed");
1732         return -saved_errno;
1733     }
1734 
1735     ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err));
1736     if (!ioc) {
1737         error_report_err(local_err);
1738         return -ECONNREFUSED;
1739     }
1740     u->slave_ioc = ioc;
1741     slave_update_read_handler(dev, NULL);
1742 
1743     if (reply_supported) {
1744         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1745     }
1746 
1747     ret = vhost_user_write(dev, &msg, &sv[1], 1);
1748     if (ret) {
1749         goto out;
1750     }
1751 
1752     if (reply_supported) {
1753         ret = process_message_reply(dev, &msg);
1754     }
1755 
1756 out:
1757     close(sv[1]);
1758     if (ret) {
1759         close_slave_channel(u);
1760     }
1761 
1762     return ret;
1763 }
1764 
1765 #ifdef CONFIG_LINUX
1766 /*
1767  * Called back from the postcopy fault thread when a fault is received on our
1768  * ufd.
1769  * TODO: This is Linux specific
1770  */
1771 static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd,
1772                                              void *ufd)
1773 {
1774     struct vhost_dev *dev = pcfd->data;
1775     struct vhost_user *u = dev->opaque;
1776     struct uffd_msg *msg = ufd;
1777     uint64_t faultaddr = msg->arg.pagefault.address;
1778     RAMBlock *rb = NULL;
1779     uint64_t rb_offset;
1780     int i;
1781 
1782     trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr,
1783                                             dev->mem->nregions);
1784     for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1785         trace_vhost_user_postcopy_fault_handler_loop(i,
1786                 u->postcopy_client_bases[i], dev->mem->regions[i].memory_size);
1787         if (faultaddr >= u->postcopy_client_bases[i]) {
1788             /* Ofset of the fault address in the vhost region */
1789             uint64_t region_offset = faultaddr - u->postcopy_client_bases[i];
1790             if (region_offset < dev->mem->regions[i].memory_size) {
1791                 rb_offset = region_offset + u->region_rb_offset[i];
1792                 trace_vhost_user_postcopy_fault_handler_found(i,
1793                         region_offset, rb_offset);
1794                 rb = u->region_rb[i];
1795                 return postcopy_request_shared_page(pcfd, rb, faultaddr,
1796                                                     rb_offset);
1797             }
1798         }
1799     }
1800     error_report("%s: Failed to find region for fault %" PRIx64,
1801                  __func__, faultaddr);
1802     return -1;
1803 }
1804 
1805 static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb,
1806                                      uint64_t offset)
1807 {
1808     struct vhost_dev *dev = pcfd->data;
1809     struct vhost_user *u = dev->opaque;
1810     int i;
1811 
1812     trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset);
1813 
1814     if (!u) {
1815         return 0;
1816     }
1817     /* Translate the offset into an address in the clients address space */
1818     for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1819         if (u->region_rb[i] == rb &&
1820             offset >= u->region_rb_offset[i] &&
1821             offset < (u->region_rb_offset[i] +
1822                       dev->mem->regions[i].memory_size)) {
1823             uint64_t client_addr = (offset - u->region_rb_offset[i]) +
1824                                    u->postcopy_client_bases[i];
1825             trace_vhost_user_postcopy_waker_found(client_addr);
1826             return postcopy_wake_shared(pcfd, client_addr, rb);
1827         }
1828     }
1829 
1830     trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset);
1831     return 0;
1832 }
1833 #endif
1834 
1835 /*
1836  * Called at the start of an inbound postcopy on reception of the
1837  * 'advise' command.
1838  */
1839 static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
1840 {
1841 #ifdef CONFIG_LINUX
1842     struct vhost_user *u = dev->opaque;
1843     CharBackend *chr = u->user->chr;
1844     int ufd;
1845     int ret;
1846     VhostUserMsg msg = {
1847         .hdr.request = VHOST_USER_POSTCOPY_ADVISE,
1848         .hdr.flags = VHOST_USER_VERSION,
1849     };
1850 
1851     ret = vhost_user_write(dev, &msg, NULL, 0);
1852     if (ret < 0) {
1853         error_setg(errp, "Failed to send postcopy_advise to vhost");
1854         return ret;
1855     }
1856 
1857     ret = vhost_user_read(dev, &msg);
1858     if (ret < 0) {
1859         error_setg(errp, "Failed to get postcopy_advise reply from vhost");
1860         return ret;
1861     }
1862 
1863     if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
1864         error_setg(errp, "Unexpected msg type. Expected %d received %d",
1865                      VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
1866         return -EPROTO;
1867     }
1868 
1869     if (msg.hdr.size) {
1870         error_setg(errp, "Received bad msg size.");
1871         return -EPROTO;
1872     }
1873     ufd = qemu_chr_fe_get_msgfd(chr);
1874     if (ufd < 0) {
1875         error_setg(errp, "%s: Failed to get ufd", __func__);
1876         return -EIO;
1877     }
1878     qemu_socket_set_nonblock(ufd);
1879 
1880     /* register ufd with userfault thread */
1881     u->postcopy_fd.fd = ufd;
1882     u->postcopy_fd.data = dev;
1883     u->postcopy_fd.handler = vhost_user_postcopy_fault_handler;
1884     u->postcopy_fd.waker = vhost_user_postcopy_waker;
1885     u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */
1886     postcopy_register_shared_ufd(&u->postcopy_fd);
1887     return 0;
1888 #else
1889     error_setg(errp, "Postcopy not supported on non-Linux systems");
1890     return -ENOSYS;
1891 #endif
1892 }
1893 
1894 /*
1895  * Called at the switch to postcopy on reception of the 'listen' command.
1896  */
1897 static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
1898 {
1899     struct vhost_user *u = dev->opaque;
1900     int ret;
1901     VhostUserMsg msg = {
1902         .hdr.request = VHOST_USER_POSTCOPY_LISTEN,
1903         .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1904     };
1905     u->postcopy_listen = true;
1906 
1907     trace_vhost_user_postcopy_listen();
1908 
1909     ret = vhost_user_write(dev, &msg, NULL, 0);
1910     if (ret < 0) {
1911         error_setg(errp, "Failed to send postcopy_listen to vhost");
1912         return ret;
1913     }
1914 
1915     ret = process_message_reply(dev, &msg);
1916     if (ret) {
1917         error_setg(errp, "Failed to receive reply to postcopy_listen");
1918         return ret;
1919     }
1920 
1921     return 0;
1922 }
1923 
1924 /*
1925  * Called at the end of postcopy
1926  */
1927 static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
1928 {
1929     VhostUserMsg msg = {
1930         .hdr.request = VHOST_USER_POSTCOPY_END,
1931         .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1932     };
1933     int ret;
1934     struct vhost_user *u = dev->opaque;
1935 
1936     trace_vhost_user_postcopy_end_entry();
1937 
1938     ret = vhost_user_write(dev, &msg, NULL, 0);
1939     if (ret < 0) {
1940         error_setg(errp, "Failed to send postcopy_end to vhost");
1941         return ret;
1942     }
1943 
1944     ret = process_message_reply(dev, &msg);
1945     if (ret) {
1946         error_setg(errp, "Failed to receive reply to postcopy_end");
1947         return ret;
1948     }
1949     postcopy_unregister_shared_ufd(&u->postcopy_fd);
1950     close(u->postcopy_fd.fd);
1951     u->postcopy_fd.handler = NULL;
1952 
1953     trace_vhost_user_postcopy_end_exit();
1954 
1955     return 0;
1956 }
1957 
1958 static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
1959                                         void *opaque)
1960 {
1961     struct PostcopyNotifyData *pnd = opaque;
1962     struct vhost_user *u = container_of(notifier, struct vhost_user,
1963                                          postcopy_notifier);
1964     struct vhost_dev *dev = u->dev;
1965 
1966     switch (pnd->reason) {
1967     case POSTCOPY_NOTIFY_PROBE:
1968         if (!virtio_has_feature(dev->protocol_features,
1969                                 VHOST_USER_PROTOCOL_F_PAGEFAULT)) {
1970             /* TODO: Get the device name into this error somehow */
1971             error_setg(pnd->errp,
1972                        "vhost-user backend not capable of postcopy");
1973             return -ENOENT;
1974         }
1975         break;
1976 
1977     case POSTCOPY_NOTIFY_INBOUND_ADVISE:
1978         return vhost_user_postcopy_advise(dev, pnd->errp);
1979 
1980     case POSTCOPY_NOTIFY_INBOUND_LISTEN:
1981         return vhost_user_postcopy_listen(dev, pnd->errp);
1982 
1983     case POSTCOPY_NOTIFY_INBOUND_END:
1984         return vhost_user_postcopy_end(dev, pnd->errp);
1985 
1986     default:
1987         /* We ignore notifications we don't know */
1988         break;
1989     }
1990 
1991     return 0;
1992 }
1993 
1994 static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque,
1995                                    Error **errp)
1996 {
1997     uint64_t features, ram_slots;
1998     struct vhost_user *u;
1999     VhostUserState *vus = (VhostUserState *) opaque;
2000     int err;
2001 
2002     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2003 
2004     u = g_new0(struct vhost_user, 1);
2005     u->user = vus;
2006     u->dev = dev;
2007     dev->opaque = u;
2008 
2009     err = vhost_user_get_features(dev, &features);
2010     if (err < 0) {
2011         error_setg_errno(errp, -err, "vhost_backend_init failed");
2012         return err;
2013     }
2014 
2015     if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
2016         bool supports_f_config = vus->supports_config ||
2017             (dev->config_ops && dev->config_ops->vhost_dev_config_notifier);
2018         uint64_t protocol_features;
2019 
2020         dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
2021 
2022         err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
2023                                  &protocol_features);
2024         if (err < 0) {
2025             error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2026             return -EPROTO;
2027         }
2028 
2029         /*
2030          * We will use all the protocol features we support - although
2031          * we suppress F_CONFIG if we know QEMUs internal code can not support
2032          * it.
2033          */
2034         protocol_features &= VHOST_USER_PROTOCOL_FEATURE_MASK;
2035 
2036         if (supports_f_config) {
2037             if (!virtio_has_feature(protocol_features,
2038                                     VHOST_USER_PROTOCOL_F_CONFIG)) {
2039                 error_setg(errp, "vhost-user device expecting "
2040                            "VHOST_USER_PROTOCOL_F_CONFIG but the vhost-user backend does "
2041                            "not support it.");
2042                 return -EPROTO;
2043             }
2044         } else {
2045             if (virtio_has_feature(protocol_features,
2046                                    VHOST_USER_PROTOCOL_F_CONFIG)) {
2047                 warn_reportf_err(*errp, "vhost-user backend supports "
2048                                  "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not.");
2049                 protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
2050             }
2051         }
2052 
2053         /* final set of protocol features */
2054         dev->protocol_features = protocol_features;
2055         err = vhost_user_set_protocol_features(dev, dev->protocol_features);
2056         if (err < 0) {
2057             error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2058             return -EPROTO;
2059         }
2060 
2061         /* query the max queues we support if backend supports Multiple Queue */
2062         if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
2063             err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
2064                                      &dev->max_queues);
2065             if (err < 0) {
2066                 error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2067                 return -EPROTO;
2068             }
2069         } else {
2070             dev->max_queues = 1;
2071         }
2072 
2073         if (dev->num_queues && dev->max_queues < dev->num_queues) {
2074             error_setg(errp, "The maximum number of queues supported by the "
2075                        "backend is %" PRIu64, dev->max_queues);
2076             return -EINVAL;
2077         }
2078 
2079         if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
2080                 !(virtio_has_feature(dev->protocol_features,
2081                     VHOST_USER_PROTOCOL_F_SLAVE_REQ) &&
2082                  virtio_has_feature(dev->protocol_features,
2083                     VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
2084             error_setg(errp, "IOMMU support requires reply-ack and "
2085                        "slave-req protocol features.");
2086             return -EINVAL;
2087         }
2088 
2089         /* get max memory regions if backend supports configurable RAM slots */
2090         if (!virtio_has_feature(dev->protocol_features,
2091                                 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS)) {
2092             u->user->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS;
2093         } else {
2094             err = vhost_user_get_max_memslots(dev, &ram_slots);
2095             if (err < 0) {
2096                 error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2097                 return -EPROTO;
2098             }
2099 
2100             if (ram_slots < u->user->memory_slots) {
2101                 error_setg(errp, "The backend specified a max ram slots limit "
2102                            "of %" PRIu64", when the prior validated limit was "
2103                            "%d. This limit should never decrease.", ram_slots,
2104                            u->user->memory_slots);
2105                 return -EINVAL;
2106             }
2107 
2108             u->user->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS);
2109         }
2110     }
2111 
2112     if (dev->migration_blocker == NULL &&
2113         !virtio_has_feature(dev->protocol_features,
2114                             VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
2115         error_setg(&dev->migration_blocker,
2116                    "Migration disabled: vhost-user backend lacks "
2117                    "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
2118     }
2119 
2120     if (dev->vq_index == 0) {
2121         err = vhost_setup_slave_channel(dev);
2122         if (err < 0) {
2123             error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2124             return -EPROTO;
2125         }
2126     }
2127 
2128     u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
2129     postcopy_add_notifier(&u->postcopy_notifier);
2130 
2131     return 0;
2132 }
2133 
2134 static int vhost_user_backend_cleanup(struct vhost_dev *dev)
2135 {
2136     struct vhost_user *u;
2137 
2138     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2139 
2140     u = dev->opaque;
2141     if (u->postcopy_notifier.notify) {
2142         postcopy_remove_notifier(&u->postcopy_notifier);
2143         u->postcopy_notifier.notify = NULL;
2144     }
2145     u->postcopy_listen = false;
2146     if (u->postcopy_fd.handler) {
2147         postcopy_unregister_shared_ufd(&u->postcopy_fd);
2148         close(u->postcopy_fd.fd);
2149         u->postcopy_fd.handler = NULL;
2150     }
2151     if (u->slave_ioc) {
2152         close_slave_channel(u);
2153     }
2154     g_free(u->region_rb);
2155     u->region_rb = NULL;
2156     g_free(u->region_rb_offset);
2157     u->region_rb_offset = NULL;
2158     u->region_rb_len = 0;
2159     g_free(u);
2160     dev->opaque = 0;
2161 
2162     return 0;
2163 }
2164 
2165 static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
2166 {
2167     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
2168 
2169     return idx;
2170 }
2171 
2172 static int vhost_user_memslots_limit(struct vhost_dev *dev)
2173 {
2174     struct vhost_user *u = dev->opaque;
2175 
2176     return u->user->memory_slots;
2177 }
2178 
2179 static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
2180 {
2181     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2182 
2183     return virtio_has_feature(dev->protocol_features,
2184                               VHOST_USER_PROTOCOL_F_LOG_SHMFD);
2185 }
2186 
2187 static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
2188 {
2189     VhostUserMsg msg = { };
2190 
2191     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2192 
2193     /* If guest supports GUEST_ANNOUNCE do nothing */
2194     if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
2195         return 0;
2196     }
2197 
2198     /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
2199     if (virtio_has_feature(dev->protocol_features,
2200                            VHOST_USER_PROTOCOL_F_RARP)) {
2201         msg.hdr.request = VHOST_USER_SEND_RARP;
2202         msg.hdr.flags = VHOST_USER_VERSION;
2203         memcpy((char *)&msg.payload.u64, mac_addr, 6);
2204         msg.hdr.size = sizeof(msg.payload.u64);
2205 
2206         return vhost_user_write(dev, &msg, NULL, 0);
2207     }
2208     return -ENOTSUP;
2209 }
2210 
2211 static bool vhost_user_can_merge(struct vhost_dev *dev,
2212                                  uint64_t start1, uint64_t size1,
2213                                  uint64_t start2, uint64_t size2)
2214 {
2215     ram_addr_t offset;
2216     int mfd, rfd;
2217 
2218     (void)vhost_user_get_mr_data(start1, &offset, &mfd);
2219     (void)vhost_user_get_mr_data(start2, &offset, &rfd);
2220 
2221     return mfd == rfd;
2222 }
2223 
2224 static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
2225 {
2226     VhostUserMsg msg;
2227     bool reply_supported = virtio_has_feature(dev->protocol_features,
2228                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
2229     int ret;
2230 
2231     if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
2232         return 0;
2233     }
2234 
2235     msg.hdr.request = VHOST_USER_NET_SET_MTU;
2236     msg.payload.u64 = mtu;
2237     msg.hdr.size = sizeof(msg.payload.u64);
2238     msg.hdr.flags = VHOST_USER_VERSION;
2239     if (reply_supported) {
2240         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
2241     }
2242 
2243     ret = vhost_user_write(dev, &msg, NULL, 0);
2244     if (ret < 0) {
2245         return ret;
2246     }
2247 
2248     /* If reply_ack supported, slave has to ack specified MTU is valid */
2249     if (reply_supported) {
2250         return process_message_reply(dev, &msg);
2251     }
2252 
2253     return 0;
2254 }
2255 
2256 static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
2257                                             struct vhost_iotlb_msg *imsg)
2258 {
2259     int ret;
2260     VhostUserMsg msg = {
2261         .hdr.request = VHOST_USER_IOTLB_MSG,
2262         .hdr.size = sizeof(msg.payload.iotlb),
2263         .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
2264         .payload.iotlb = *imsg,
2265     };
2266 
2267     ret = vhost_user_write(dev, &msg, NULL, 0);
2268     if (ret < 0) {
2269         return ret;
2270     }
2271 
2272     return process_message_reply(dev, &msg);
2273 }
2274 
2275 
2276 static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
2277 {
2278     /* No-op as the receive channel is not dedicated to IOTLB messages. */
2279 }
2280 
2281 static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
2282                                  uint32_t config_len, Error **errp)
2283 {
2284     int ret;
2285     VhostUserMsg msg = {
2286         .hdr.request = VHOST_USER_GET_CONFIG,
2287         .hdr.flags = VHOST_USER_VERSION,
2288         .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
2289     };
2290 
2291     if (!virtio_has_feature(dev->protocol_features,
2292                 VHOST_USER_PROTOCOL_F_CONFIG)) {
2293         error_setg(errp, "VHOST_USER_PROTOCOL_F_CONFIG not supported");
2294         return -EINVAL;
2295     }
2296 
2297     assert(config_len <= VHOST_USER_MAX_CONFIG_SIZE);
2298 
2299     msg.payload.config.offset = 0;
2300     msg.payload.config.size = config_len;
2301     ret = vhost_user_write(dev, &msg, NULL, 0);
2302     if (ret < 0) {
2303         error_setg_errno(errp, -ret, "vhost_get_config failed");
2304         return ret;
2305     }
2306 
2307     ret = vhost_user_read(dev, &msg);
2308     if (ret < 0) {
2309         error_setg_errno(errp, -ret, "vhost_get_config failed");
2310         return ret;
2311     }
2312 
2313     if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
2314         error_setg(errp,
2315                    "Received unexpected msg type. Expected %d received %d",
2316                    VHOST_USER_GET_CONFIG, msg.hdr.request);
2317         return -EPROTO;
2318     }
2319 
2320     if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
2321         error_setg(errp, "Received bad msg size.");
2322         return -EPROTO;
2323     }
2324 
2325     memcpy(config, msg.payload.config.region, config_len);
2326 
2327     return 0;
2328 }
2329 
2330 static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
2331                                  uint32_t offset, uint32_t size, uint32_t flags)
2332 {
2333     int ret;
2334     uint8_t *p;
2335     bool reply_supported = virtio_has_feature(dev->protocol_features,
2336                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
2337 
2338     VhostUserMsg msg = {
2339         .hdr.request = VHOST_USER_SET_CONFIG,
2340         .hdr.flags = VHOST_USER_VERSION,
2341         .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size,
2342     };
2343 
2344     if (!virtio_has_feature(dev->protocol_features,
2345                 VHOST_USER_PROTOCOL_F_CONFIG)) {
2346         return -ENOTSUP;
2347     }
2348 
2349     if (reply_supported) {
2350         msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
2351     }
2352 
2353     if (size > VHOST_USER_MAX_CONFIG_SIZE) {
2354         return -EINVAL;
2355     }
2356 
2357     msg.payload.config.offset = offset,
2358     msg.payload.config.size = size,
2359     msg.payload.config.flags = flags,
2360     p = msg.payload.config.region;
2361     memcpy(p, data, size);
2362 
2363     ret = vhost_user_write(dev, &msg, NULL, 0);
2364     if (ret < 0) {
2365         return ret;
2366     }
2367 
2368     if (reply_supported) {
2369         return process_message_reply(dev, &msg);
2370     }
2371 
2372     return 0;
2373 }
2374 
2375 static int vhost_user_crypto_create_session(struct vhost_dev *dev,
2376                                             void *session_info,
2377                                             uint64_t *session_id)
2378 {
2379     int ret;
2380     bool crypto_session = virtio_has_feature(dev->protocol_features,
2381                                        VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
2382     CryptoDevBackendSymSessionInfo *sess_info = session_info;
2383     VhostUserMsg msg = {
2384         .hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION,
2385         .hdr.flags = VHOST_USER_VERSION,
2386         .hdr.size = sizeof(msg.payload.session),
2387     };
2388 
2389     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2390 
2391     if (!crypto_session) {
2392         error_report("vhost-user trying to send unhandled ioctl");
2393         return -ENOTSUP;
2394     }
2395 
2396     memcpy(&msg.payload.session.session_setup_data, sess_info,
2397               sizeof(CryptoDevBackendSymSessionInfo));
2398     if (sess_info->key_len) {
2399         memcpy(&msg.payload.session.key, sess_info->cipher_key,
2400                sess_info->key_len);
2401     }
2402     if (sess_info->auth_key_len > 0) {
2403         memcpy(&msg.payload.session.auth_key, sess_info->auth_key,
2404                sess_info->auth_key_len);
2405     }
2406     ret = vhost_user_write(dev, &msg, NULL, 0);
2407     if (ret < 0) {
2408         error_report("vhost_user_write() return %d, create session failed",
2409                      ret);
2410         return ret;
2411     }
2412 
2413     ret = vhost_user_read(dev, &msg);
2414     if (ret < 0) {
2415         error_report("vhost_user_read() return %d, create session failed",
2416                      ret);
2417         return ret;
2418     }
2419 
2420     if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
2421         error_report("Received unexpected msg type. Expected %d received %d",
2422                      VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
2423         return -EPROTO;
2424     }
2425 
2426     if (msg.hdr.size != sizeof(msg.payload.session)) {
2427         error_report("Received bad msg size.");
2428         return -EPROTO;
2429     }
2430 
2431     if (msg.payload.session.session_id < 0) {
2432         error_report("Bad session id: %" PRId64 "",
2433                               msg.payload.session.session_id);
2434         return -EINVAL;
2435     }
2436     *session_id = msg.payload.session.session_id;
2437 
2438     return 0;
2439 }
2440 
2441 static int
2442 vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
2443 {
2444     int ret;
2445     bool crypto_session = virtio_has_feature(dev->protocol_features,
2446                                        VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
2447     VhostUserMsg msg = {
2448         .hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION,
2449         .hdr.flags = VHOST_USER_VERSION,
2450         .hdr.size = sizeof(msg.payload.u64),
2451     };
2452     msg.payload.u64 = session_id;
2453 
2454     if (!crypto_session) {
2455         error_report("vhost-user trying to send unhandled ioctl");
2456         return -ENOTSUP;
2457     }
2458 
2459     ret = vhost_user_write(dev, &msg, NULL, 0);
2460     if (ret < 0) {
2461         error_report("vhost_user_write() return %d, close session failed",
2462                      ret);
2463         return ret;
2464     }
2465 
2466     return 0;
2467 }
2468 
2469 static bool vhost_user_mem_section_filter(struct vhost_dev *dev,
2470                                           MemoryRegionSection *section)
2471 {
2472     bool result;
2473 
2474     result = memory_region_get_fd(section->mr) >= 0;
2475 
2476     return result;
2477 }
2478 
2479 static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
2480                                       uint16_t queue_size,
2481                                       struct vhost_inflight *inflight)
2482 {
2483     void *addr;
2484     int fd;
2485     int ret;
2486     struct vhost_user *u = dev->opaque;
2487     CharBackend *chr = u->user->chr;
2488     VhostUserMsg msg = {
2489         .hdr.request = VHOST_USER_GET_INFLIGHT_FD,
2490         .hdr.flags = VHOST_USER_VERSION,
2491         .payload.inflight.num_queues = dev->nvqs,
2492         .payload.inflight.queue_size = queue_size,
2493         .hdr.size = sizeof(msg.payload.inflight),
2494     };
2495 
2496     if (!virtio_has_feature(dev->protocol_features,
2497                             VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2498         return 0;
2499     }
2500 
2501     ret = vhost_user_write(dev, &msg, NULL, 0);
2502     if (ret < 0) {
2503         return ret;
2504     }
2505 
2506     ret = vhost_user_read(dev, &msg);
2507     if (ret < 0) {
2508         return ret;
2509     }
2510 
2511     if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
2512         error_report("Received unexpected msg type. "
2513                      "Expected %d received %d",
2514                      VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
2515         return -EPROTO;
2516     }
2517 
2518     if (msg.hdr.size != sizeof(msg.payload.inflight)) {
2519         error_report("Received bad msg size.");
2520         return -EPROTO;
2521     }
2522 
2523     if (!msg.payload.inflight.mmap_size) {
2524         return 0;
2525     }
2526 
2527     fd = qemu_chr_fe_get_msgfd(chr);
2528     if (fd < 0) {
2529         error_report("Failed to get mem fd");
2530         return -EIO;
2531     }
2532 
2533     addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
2534                 MAP_SHARED, fd, msg.payload.inflight.mmap_offset);
2535 
2536     if (addr == MAP_FAILED) {
2537         error_report("Failed to mmap mem fd");
2538         close(fd);
2539         return -EFAULT;
2540     }
2541 
2542     inflight->addr = addr;
2543     inflight->fd = fd;
2544     inflight->size = msg.payload.inflight.mmap_size;
2545     inflight->offset = msg.payload.inflight.mmap_offset;
2546     inflight->queue_size = queue_size;
2547 
2548     return 0;
2549 }
2550 
2551 static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
2552                                       struct vhost_inflight *inflight)
2553 {
2554     VhostUserMsg msg = {
2555         .hdr.request = VHOST_USER_SET_INFLIGHT_FD,
2556         .hdr.flags = VHOST_USER_VERSION,
2557         .payload.inflight.mmap_size = inflight->size,
2558         .payload.inflight.mmap_offset = inflight->offset,
2559         .payload.inflight.num_queues = dev->nvqs,
2560         .payload.inflight.queue_size = inflight->queue_size,
2561         .hdr.size = sizeof(msg.payload.inflight),
2562     };
2563 
2564     if (!virtio_has_feature(dev->protocol_features,
2565                             VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2566         return 0;
2567     }
2568 
2569     return vhost_user_write(dev, &msg, &inflight->fd, 1);
2570 }
2571 
2572 static void vhost_user_state_destroy(gpointer data)
2573 {
2574     VhostUserHostNotifier *n = (VhostUserHostNotifier *) data;
2575     if (n) {
2576         vhost_user_host_notifier_remove(n, NULL);
2577         object_unparent(OBJECT(&n->mr));
2578         /*
2579          * We can't free until vhost_user_host_notifier_remove has
2580          * done it's thing so schedule the free with RCU.
2581          */
2582         g_free_rcu(n, rcu);
2583     }
2584 }
2585 
2586 bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
2587 {
2588     if (user->chr) {
2589         error_setg(errp, "Cannot initialize vhost-user state");
2590         return false;
2591     }
2592     user->chr = chr;
2593     user->memory_slots = 0;
2594     user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4,
2595                                            &vhost_user_state_destroy);
2596     return true;
2597 }
2598 
2599 void vhost_user_cleanup(VhostUserState *user)
2600 {
2601     if (!user->chr) {
2602         return;
2603     }
2604     memory_region_transaction_begin();
2605     user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true);
2606     memory_region_transaction_commit();
2607     user->chr = NULL;
2608 }
2609 
2610 const VhostOps user_ops = {
2611         .backend_type = VHOST_BACKEND_TYPE_USER,
2612         .vhost_backend_init = vhost_user_backend_init,
2613         .vhost_backend_cleanup = vhost_user_backend_cleanup,
2614         .vhost_backend_memslots_limit = vhost_user_memslots_limit,
2615         .vhost_set_log_base = vhost_user_set_log_base,
2616         .vhost_set_mem_table = vhost_user_set_mem_table,
2617         .vhost_set_vring_addr = vhost_user_set_vring_addr,
2618         .vhost_set_vring_endian = vhost_user_set_vring_endian,
2619         .vhost_set_vring_num = vhost_user_set_vring_num,
2620         .vhost_set_vring_base = vhost_user_set_vring_base,
2621         .vhost_get_vring_base = vhost_user_get_vring_base,
2622         .vhost_set_vring_kick = vhost_user_set_vring_kick,
2623         .vhost_set_vring_call = vhost_user_set_vring_call,
2624         .vhost_set_vring_err = vhost_user_set_vring_err,
2625         .vhost_set_features = vhost_user_set_features,
2626         .vhost_get_features = vhost_user_get_features,
2627         .vhost_set_owner = vhost_user_set_owner,
2628         .vhost_reset_device = vhost_user_reset_device,
2629         .vhost_get_vq_index = vhost_user_get_vq_index,
2630         .vhost_set_vring_enable = vhost_user_set_vring_enable,
2631         .vhost_requires_shm_log = vhost_user_requires_shm_log,
2632         .vhost_migration_done = vhost_user_migration_done,
2633         .vhost_backend_can_merge = vhost_user_can_merge,
2634         .vhost_net_set_mtu = vhost_user_net_set_mtu,
2635         .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
2636         .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
2637         .vhost_get_config = vhost_user_get_config,
2638         .vhost_set_config = vhost_user_set_config,
2639         .vhost_crypto_create_session = vhost_user_crypto_create_session,
2640         .vhost_crypto_close_session = vhost_user_crypto_close_session,
2641         .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
2642         .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
2643         .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
2644 };
2645