15f6f6664SNikolay Nikolaev /*
25f6f6664SNikolay Nikolaev * vhost-user
35f6f6664SNikolay Nikolaev *
45f6f6664SNikolay Nikolaev * Copyright (c) 2013 Virtual Open Systems Sarl.
55f6f6664SNikolay Nikolaev *
65f6f6664SNikolay Nikolaev * This work is licensed under the terms of the GNU GPL, version 2 or later.
75f6f6664SNikolay Nikolaev * See the COPYING file in the top-level directory.
85f6f6664SNikolay Nikolaev *
95f6f6664SNikolay Nikolaev */
105f6f6664SNikolay Nikolaev
119b8bfe21SPeter Maydell #include "qemu/osdep.h"
12da34e65cSMarkus Armbruster #include "qapi/error.h"
1316094766SAlbert Esteve #include "hw/virtio/virtio-dmabuf.h"
145f6f6664SNikolay Nikolaev #include "hw/virtio/vhost.h"
155c33f978SGowrishankar Muthukrishnan #include "hw/virtio/virtio-crypto.h"
164d0cf552STiwei Bie #include "hw/virtio/vhost-user.h"
175f6f6664SNikolay Nikolaev #include "hw/virtio/vhost-backend.h"
1844866521STiwei Bie #include "hw/virtio/virtio.h"
193e866365SThibaut Collet #include "hw/virtio/virtio-net.h"
204d43a603SMarc-André Lureau #include "chardev/char-fe.h"
2157dc0217SGreg Kurz #include "io/channel-socket.h"
225f6f6664SNikolay Nikolaev #include "sysemu/kvm.h"
235f6f6664SNikolay Nikolaev #include "qemu/error-report.h"
24db725815SMarkus Armbruster #include "qemu/main-loop.h"
2516094766SAlbert Esteve #include "qemu/uuid.h"
265f6f6664SNikolay Nikolaev #include "qemu/sockets.h"
2771e076a0SAlex Bennée #include "sysemu/runstate.h"
28efbfeb81SGonglei #include "sysemu/cryptodev.h"
299ccbfe14SDr. David Alan Gilbert #include "migration/postcopy-ram.h"
306864a7b5SDr. David Alan Gilbert #include "trace.h"
310b0af4d6SXueming Li #include "exec/ramblock.h"
325f6f6664SNikolay Nikolaev
335f6f6664SNikolay Nikolaev #include <sys/ioctl.h>
345f6f6664SNikolay Nikolaev #include <sys/socket.h>
355f6f6664SNikolay Nikolaev #include <sys/un.h>
3618658a3cSPaolo Bonzini
3718658a3cSPaolo Bonzini #include "standard-headers/linux/vhost_types.h"
3818658a3cSPaolo Bonzini
3918658a3cSPaolo Bonzini #ifdef CONFIG_LINUX
40375318d0SDr. David Alan Gilbert #include <linux/userfaultfd.h>
4118658a3cSPaolo Bonzini #endif
425f6f6664SNikolay Nikolaev
4327598393SRaphael Norwitz #define VHOST_MEMORY_BASELINE_NREGIONS 8
44dcb10c00SMichael S. Tsirkin #define VHOST_USER_F_PROTOCOL_FEATURES 30
45a84ec993SMaxime Coquelin #define VHOST_USER_BACKEND_MAX_FDS 8
46e2051e9eSYuanhan Liu
47bab10530SDavid Hildenbrand #if defined(TARGET_PPC) || defined(TARGET_PPC64)
4827598393SRaphael Norwitz #include "hw/ppc/spapr.h"
4927598393SRaphael Norwitz #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
5027598393SRaphael Norwitz
5127598393SRaphael Norwitz #else
5227598393SRaphael Norwitz #define VHOST_USER_MAX_RAM_SLOTS 512
5327598393SRaphael Norwitz #endif
5427598393SRaphael Norwitz
5527598393SRaphael Norwitz /*
564c3e257bSChangpeng Liu * Maximum size of virtio device config space
574c3e257bSChangpeng Liu */
584c3e257bSChangpeng Liu #define VHOST_USER_MAX_CONFIG_SIZE 256
594c3e257bSChangpeng Liu
60de1372d4SThibaut Collet #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
615f6f6664SNikolay Nikolaev
625f6f6664SNikolay Nikolaev typedef enum VhostUserRequest {
635f6f6664SNikolay Nikolaev VHOST_USER_NONE = 0,
645f6f6664SNikolay Nikolaev VHOST_USER_GET_FEATURES = 1,
655f6f6664SNikolay Nikolaev VHOST_USER_SET_FEATURES = 2,
665f6f6664SNikolay Nikolaev VHOST_USER_SET_OWNER = 3,
6760915dc4SYuanhan Liu VHOST_USER_RESET_OWNER = 4,
685f6f6664SNikolay Nikolaev VHOST_USER_SET_MEM_TABLE = 5,
695f6f6664SNikolay Nikolaev VHOST_USER_SET_LOG_BASE = 6,
705f6f6664SNikolay Nikolaev VHOST_USER_SET_LOG_FD = 7,
715f6f6664SNikolay Nikolaev VHOST_USER_SET_VRING_NUM = 8,
725f6f6664SNikolay Nikolaev VHOST_USER_SET_VRING_ADDR = 9,
735f6f6664SNikolay Nikolaev VHOST_USER_SET_VRING_BASE = 10,
745f6f6664SNikolay Nikolaev VHOST_USER_GET_VRING_BASE = 11,
755f6f6664SNikolay Nikolaev VHOST_USER_SET_VRING_KICK = 12,
765f6f6664SNikolay Nikolaev VHOST_USER_SET_VRING_CALL = 13,
775f6f6664SNikolay Nikolaev VHOST_USER_SET_VRING_ERR = 14,
78dcb10c00SMichael S. Tsirkin VHOST_USER_GET_PROTOCOL_FEATURES = 15,
79dcb10c00SMichael S. Tsirkin VHOST_USER_SET_PROTOCOL_FEATURES = 16,
80e2051e9eSYuanhan Liu VHOST_USER_GET_QUEUE_NUM = 17,
817263a0adSChangchun Ouyang VHOST_USER_SET_VRING_ENABLE = 18,
823e866365SThibaut Collet VHOST_USER_SEND_RARP = 19,
83c5f048d8SMaxime Coquelin VHOST_USER_NET_SET_MTU = 20,
84a84ec993SMaxime Coquelin VHOST_USER_SET_BACKEND_REQ_FD = 21,
856dcdd06eSMaxime Coquelin VHOST_USER_IOTLB_MSG = 22,
865df04f17SFelipe Franciosi VHOST_USER_SET_VRING_ENDIAN = 23,
874c3e257bSChangpeng Liu VHOST_USER_GET_CONFIG = 24,
884c3e257bSChangpeng Liu VHOST_USER_SET_CONFIG = 25,
89efbfeb81SGonglei VHOST_USER_CREATE_CRYPTO_SESSION = 26,
90efbfeb81SGonglei VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
91d3dff7a5SDr. David Alan Gilbert VHOST_USER_POSTCOPY_ADVISE = 28,
926864a7b5SDr. David Alan Gilbert VHOST_USER_POSTCOPY_LISTEN = 29,
93c639187eSDr. David Alan Gilbert VHOST_USER_POSTCOPY_END = 30,
945ad204bfSXie Yongji VHOST_USER_GET_INFLIGHT_FD = 31,
955ad204bfSXie Yongji VHOST_USER_SET_INFLIGHT_FD = 32,
96bd2e44feSMarc-André Lureau VHOST_USER_GPU_SET_SOCKET = 33,
97d91d57e6SRaphael Norwitz VHOST_USER_RESET_DEVICE = 34,
986b0eff1aSRaphael Norwitz /* Message number 35 reserved for VHOST_USER_VRING_KICK. */
996b0eff1aSRaphael Norwitz VHOST_USER_GET_MAX_MEM_SLOTS = 36,
100f1aeb14bSRaphael Norwitz VHOST_USER_ADD_MEM_REG = 37,
101f1aeb14bSRaphael Norwitz VHOST_USER_REM_MEM_REG = 38,
102923b8921SYajun Wu VHOST_USER_SET_STATUS = 39,
103923b8921SYajun Wu VHOST_USER_GET_STATUS = 40,
10416094766SAlbert Esteve VHOST_USER_GET_SHARED_OBJECT = 41,
105cda83adcSHanna Czenczek VHOST_USER_SET_DEVICE_STATE_FD = 42,
106cda83adcSHanna Czenczek VHOST_USER_CHECK_DEVICE_STATE = 43,
1075f6f6664SNikolay Nikolaev VHOST_USER_MAX
1085f6f6664SNikolay Nikolaev } VhostUserRequest;
1095f6f6664SNikolay Nikolaev
110f8ed3648SManos Pitsidianakis typedef enum VhostUserBackendRequest {
111a84ec993SMaxime Coquelin VHOST_USER_BACKEND_NONE = 0,
112a84ec993SMaxime Coquelin VHOST_USER_BACKEND_IOTLB_MSG = 1,
113a84ec993SMaxime Coquelin VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2,
114a84ec993SMaxime Coquelin VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3,
11516094766SAlbert Esteve VHOST_USER_BACKEND_SHARED_OBJECT_ADD = 6,
11616094766SAlbert Esteve VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE = 7,
11716094766SAlbert Esteve VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP = 8,
118a84ec993SMaxime Coquelin VHOST_USER_BACKEND_MAX
119f8ed3648SManos Pitsidianakis } VhostUserBackendRequest;
1204bbeeba0SMarc-André Lureau
1215f6f6664SNikolay Nikolaev typedef struct VhostUserMemoryRegion {
1225f6f6664SNikolay Nikolaev uint64_t guest_phys_addr;
1235f6f6664SNikolay Nikolaev uint64_t memory_size;
1245f6f6664SNikolay Nikolaev uint64_t userspace_addr;
1253fd74b84SDamjan Marion uint64_t mmap_offset;
1265f6f6664SNikolay Nikolaev } VhostUserMemoryRegion;
1275f6f6664SNikolay Nikolaev
1285f6f6664SNikolay Nikolaev typedef struct VhostUserMemory {
1295f6f6664SNikolay Nikolaev uint32_t nregions;
1305f6f6664SNikolay Nikolaev uint32_t padding;
13127598393SRaphael Norwitz VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
1325f6f6664SNikolay Nikolaev } VhostUserMemory;
1335f6f6664SNikolay Nikolaev
134f1aeb14bSRaphael Norwitz typedef struct VhostUserMemRegMsg {
1353009edffSStefan Hajnoczi uint64_t padding;
136f1aeb14bSRaphael Norwitz VhostUserMemoryRegion region;
137f1aeb14bSRaphael Norwitz } VhostUserMemRegMsg;
138f1aeb14bSRaphael Norwitz
1392b8819c6SVictor Kaplansky typedef struct VhostUserLog {
1402b8819c6SVictor Kaplansky uint64_t mmap_size;
1412b8819c6SVictor Kaplansky uint64_t mmap_offset;
1422b8819c6SVictor Kaplansky } VhostUserLog;
1432b8819c6SVictor Kaplansky
1444c3e257bSChangpeng Liu typedef struct VhostUserConfig {
1454c3e257bSChangpeng Liu uint32_t offset;
1464c3e257bSChangpeng Liu uint32_t size;
1474c3e257bSChangpeng Liu uint32_t flags;
1484c3e257bSChangpeng Liu uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
1494c3e257bSChangpeng Liu } VhostUserConfig;
1504c3e257bSChangpeng Liu
151efbfeb81SGonglei #define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512
152efbfeb81SGonglei #define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64
1535c33f978SGowrishankar Muthukrishnan #define VHOST_CRYPTO_ASYM_MAX_KEY_LEN 1024
154efbfeb81SGonglei
155efbfeb81SGonglei typedef struct VhostUserCryptoSession {
1565c33f978SGowrishankar Muthukrishnan uint64_t op_code;
1575c33f978SGowrishankar Muthukrishnan union {
1585c33f978SGowrishankar Muthukrishnan struct {
159efbfeb81SGonglei CryptoDevBackendSymSessionInfo session_setup_data;
160efbfeb81SGonglei uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
161efbfeb81SGonglei uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
1625c33f978SGowrishankar Muthukrishnan } sym;
1635c33f978SGowrishankar Muthukrishnan struct {
1645c33f978SGowrishankar Muthukrishnan CryptoDevBackendAsymSessionInfo session_setup_data;
1655c33f978SGowrishankar Muthukrishnan uint8_t key[VHOST_CRYPTO_ASYM_MAX_KEY_LEN];
1665c33f978SGowrishankar Muthukrishnan } asym;
1675c33f978SGowrishankar Muthukrishnan } u;
1685c33f978SGowrishankar Muthukrishnan
1695c33f978SGowrishankar Muthukrishnan /* session id for success, -1 on errors */
1705c33f978SGowrishankar Muthukrishnan int64_t session_id;
171efbfeb81SGonglei } VhostUserCryptoSession;
172efbfeb81SGonglei
1734c3e257bSChangpeng Liu static VhostUserConfig c __attribute__ ((unused));
1744c3e257bSChangpeng Liu #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
1754c3e257bSChangpeng Liu + sizeof(c.size) \
1764c3e257bSChangpeng Liu + sizeof(c.flags))
1774c3e257bSChangpeng Liu
17844866521STiwei Bie typedef struct VhostUserVringArea {
17944866521STiwei Bie uint64_t u64;
18044866521STiwei Bie uint64_t size;
18144866521STiwei Bie uint64_t offset;
18244866521STiwei Bie } VhostUserVringArea;
18344866521STiwei Bie
1845ad204bfSXie Yongji typedef struct VhostUserInflight {
1855ad204bfSXie Yongji uint64_t mmap_size;
1865ad204bfSXie Yongji uint64_t mmap_offset;
1875ad204bfSXie Yongji uint16_t num_queues;
1885ad204bfSXie Yongji uint16_t queue_size;
1895ad204bfSXie Yongji } VhostUserInflight;
1905ad204bfSXie Yongji
19116094766SAlbert Esteve typedef struct VhostUserShared {
19216094766SAlbert Esteve unsigned char uuid[16];
19316094766SAlbert Esteve } VhostUserShared;
19416094766SAlbert Esteve
19524e34754SMichael S. Tsirkin typedef struct {
1965f6f6664SNikolay Nikolaev VhostUserRequest request;
1975f6f6664SNikolay Nikolaev
1985f6f6664SNikolay Nikolaev #define VHOST_USER_VERSION_MASK (0x3)
1995f6f6664SNikolay Nikolaev #define VHOST_USER_REPLY_MASK (0x1 << 2)
200ca525ce5SPrerna Saxena #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
2015f6f6664SNikolay Nikolaev uint32_t flags;
2025f6f6664SNikolay Nikolaev uint32_t size; /* the following payload size */
20324e34754SMichael S. Tsirkin } QEMU_PACKED VhostUserHeader;
20424e34754SMichael S. Tsirkin
205cda83adcSHanna Czenczek /* Request payload of VHOST_USER_SET_DEVICE_STATE_FD */
206cda83adcSHanna Czenczek typedef struct VhostUserTransferDeviceState {
207cda83adcSHanna Czenczek uint32_t direction;
208cda83adcSHanna Czenczek uint32_t phase;
209cda83adcSHanna Czenczek } VhostUserTransferDeviceState;
210cda83adcSHanna Czenczek
21124e34754SMichael S. Tsirkin typedef union {
2125f6f6664SNikolay Nikolaev #define VHOST_USER_VRING_IDX_MASK (0xff)
2135f6f6664SNikolay Nikolaev #define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
2145f6f6664SNikolay Nikolaev uint64_t u64;
2155f6f6664SNikolay Nikolaev struct vhost_vring_state state;
2165f6f6664SNikolay Nikolaev struct vhost_vring_addr addr;
2175f6f6664SNikolay Nikolaev VhostUserMemory memory;
218f1aeb14bSRaphael Norwitz VhostUserMemRegMsg mem_reg;
2192b8819c6SVictor Kaplansky VhostUserLog log;
2206dcdd06eSMaxime Coquelin struct vhost_iotlb_msg iotlb;
2214c3e257bSChangpeng Liu VhostUserConfig config;
222efbfeb81SGonglei VhostUserCryptoSession session;
22344866521STiwei Bie VhostUserVringArea area;
2245ad204bfSXie Yongji VhostUserInflight inflight;
22516094766SAlbert Esteve VhostUserShared object;
226cda83adcSHanna Czenczek VhostUserTransferDeviceState transfer_state;
22724e34754SMichael S. Tsirkin } VhostUserPayload;
22824e34754SMichael S. Tsirkin
22924e34754SMichael S. Tsirkin typedef struct VhostUserMsg {
23024e34754SMichael S. Tsirkin VhostUserHeader hdr;
23124e34754SMichael S. Tsirkin VhostUserPayload payload;
2325f6f6664SNikolay Nikolaev } QEMU_PACKED VhostUserMsg;
2335f6f6664SNikolay Nikolaev
2345f6f6664SNikolay Nikolaev static VhostUserMsg m __attribute__ ((unused));
23524e34754SMichael S. Tsirkin #define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
2365f6f6664SNikolay Nikolaev
23724e34754SMichael S. Tsirkin #define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
2385f6f6664SNikolay Nikolaev
2395f6f6664SNikolay Nikolaev /* The version of the protocol we support */
2405f6f6664SNikolay Nikolaev #define VHOST_USER_VERSION (0x1)
2415f6f6664SNikolay Nikolaev
2422152f3feSMarc-André Lureau struct vhost_user {
2439ccbfe14SDr. David Alan Gilbert struct vhost_dev *dev;
2444d0cf552STiwei Bie /* Shared between vhost devs of the same virtio device */
2454d0cf552STiwei Bie VhostUserState *user;
246f8ed3648SManos Pitsidianakis QIOChannel *backend_ioc;
247f8ed3648SManos Pitsidianakis GSource *backend_src;
2489ccbfe14SDr. David Alan Gilbert NotifierWithReturn postcopy_notifier;
249f82c1116SDr. David Alan Gilbert struct PostCopyFD postcopy_fd;
25027598393SRaphael Norwitz uint64_t postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS];
251905125d0SDr. David Alan Gilbert /* Length of the region_rb and region_rb_offset arrays */
252905125d0SDr. David Alan Gilbert size_t region_rb_len;
253905125d0SDr. David Alan Gilbert /* RAMBlock associated with a given region */
254905125d0SDr. David Alan Gilbert RAMBlock **region_rb;
255c97c76b3SAlex Bennée /*
256c97c76b3SAlex Bennée * The offset from the start of the RAMBlock to the start of the
257905125d0SDr. David Alan Gilbert * vhost region.
258905125d0SDr. David Alan Gilbert */
259905125d0SDr. David Alan Gilbert ram_addr_t *region_rb_offset;
260905125d0SDr. David Alan Gilbert
2616864a7b5SDr. David Alan Gilbert /* True once we've entered postcopy_listen */
2626864a7b5SDr. David Alan Gilbert bool postcopy_listen;
263f1aeb14bSRaphael Norwitz
264f1aeb14bSRaphael Norwitz /* Our current regions */
265f1aeb14bSRaphael Norwitz int num_shadow_regions;
26627598393SRaphael Norwitz struct vhost_memory_region shadow_regions[VHOST_USER_MAX_RAM_SLOTS];
267f1aeb14bSRaphael Norwitz };
268f1aeb14bSRaphael Norwitz
269f1aeb14bSRaphael Norwitz struct scrub_regions {
270f1aeb14bSRaphael Norwitz struct vhost_memory_region *region;
271f1aeb14bSRaphael Norwitz int reg_idx;
272f1aeb14bSRaphael Norwitz int fd_idx;
2732152f3feSMarc-André Lureau };
2742152f3feSMarc-André Lureau
vhost_user_read_header(struct vhost_dev * dev,VhostUserMsg * msg)2759af84c02SMarc-André Lureau static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
2765f6f6664SNikolay Nikolaev {
2772152f3feSMarc-André Lureau struct vhost_user *u = dev->opaque;
2784d0cf552STiwei Bie CharBackend *chr = u->user->chr;
2795f6f6664SNikolay Nikolaev uint8_t *p = (uint8_t *) msg;
2805f6f6664SNikolay Nikolaev int r, size = VHOST_USER_HDR_SIZE;
2815f6f6664SNikolay Nikolaev
2825f6f6664SNikolay Nikolaev r = qemu_chr_fe_read_all(chr, p, size);
2835f6f6664SNikolay Nikolaev if (r != size) {
284025faa87SRoman Kagan int saved_errno = errno;
2855421f318SMichael S. Tsirkin error_report("Failed to read msg header. Read %d instead of %d."
28624e34754SMichael S. Tsirkin " Original request %d.", r, size, msg->hdr.request);
287025faa87SRoman Kagan return r < 0 ? -saved_errno : -EIO;
2885f6f6664SNikolay Nikolaev }
2895f6f6664SNikolay Nikolaev
2905f6f6664SNikolay Nikolaev /* validate received flags */
29124e34754SMichael S. Tsirkin if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
2925f6f6664SNikolay Nikolaev error_report("Failed to read msg header."
29324e34754SMichael S. Tsirkin " Flags 0x%x instead of 0x%x.", msg->hdr.flags,
2945f6f6664SNikolay Nikolaev VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
295025faa87SRoman Kagan return -EPROTO;
2969af84c02SMarc-André Lureau }
2979af84c02SMarc-André Lureau
298643a9435SAlex Bennée trace_vhost_user_read(msg->hdr.request, msg->hdr.flags);
299643a9435SAlex Bennée
3009af84c02SMarc-André Lureau return 0;
3019af84c02SMarc-André Lureau }
3029af84c02SMarc-André Lureau
vhost_user_read(struct vhost_dev * dev,VhostUserMsg * msg)3034382138fSGreg Kurz static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
3049af84c02SMarc-André Lureau {
3059af84c02SMarc-André Lureau struct vhost_user *u = dev->opaque;
3069af84c02SMarc-André Lureau CharBackend *chr = u->user->chr;
3079af84c02SMarc-André Lureau uint8_t *p = (uint8_t *) msg;
3089af84c02SMarc-André Lureau int r, size;
3099af84c02SMarc-André Lureau
310025faa87SRoman Kagan r = vhost_user_read_header(dev, msg);
311025faa87SRoman Kagan if (r < 0) {
3124382138fSGreg Kurz return r;
3135f6f6664SNikolay Nikolaev }
3145f6f6664SNikolay Nikolaev
3155f6f6664SNikolay Nikolaev /* validate message size is sane */
31624e34754SMichael S. Tsirkin if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) {
3175f6f6664SNikolay Nikolaev error_report("Failed to read msg header."
31824e34754SMichael S. Tsirkin " Size %d exceeds the maximum %zu.", msg->hdr.size,
3195f6f6664SNikolay Nikolaev VHOST_USER_PAYLOAD_SIZE);
3204382138fSGreg Kurz return -EPROTO;
3215f6f6664SNikolay Nikolaev }
3225f6f6664SNikolay Nikolaev
32324e34754SMichael S. Tsirkin if (msg->hdr.size) {
3245f6f6664SNikolay Nikolaev p += VHOST_USER_HDR_SIZE;
32524e34754SMichael S. Tsirkin size = msg->hdr.size;
3265f6f6664SNikolay Nikolaev r = qemu_chr_fe_read_all(chr, p, size);
3275f6f6664SNikolay Nikolaev if (r != size) {
328025faa87SRoman Kagan int saved_errno = errno;
3295f6f6664SNikolay Nikolaev error_report("Failed to read msg payload."
33024e34754SMichael S. Tsirkin " Read %d instead of %d.", r, msg->hdr.size);
3314382138fSGreg Kurz return r < 0 ? -saved_errno : -EIO;
3325f6f6664SNikolay Nikolaev }
3335f6f6664SNikolay Nikolaev }
3345f6f6664SNikolay Nikolaev
3354382138fSGreg Kurz return 0;
3365f6f6664SNikolay Nikolaev }
3375f6f6664SNikolay Nikolaev
process_message_reply(struct vhost_dev * dev,const VhostUserMsg * msg)338ca525ce5SPrerna Saxena static int process_message_reply(struct vhost_dev *dev,
3393cf7daf8SMaxime Coquelin const VhostUserMsg *msg)
340ca525ce5SPrerna Saxena {
341025faa87SRoman Kagan int ret;
34260cd1102SZhiyong Yang VhostUserMsg msg_reply;
343ca525ce5SPrerna Saxena
34424e34754SMichael S. Tsirkin if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
34560cd1102SZhiyong Yang return 0;
34660cd1102SZhiyong Yang }
34760cd1102SZhiyong Yang
348025faa87SRoman Kagan ret = vhost_user_read(dev, &msg_reply);
349025faa87SRoman Kagan if (ret < 0) {
350025faa87SRoman Kagan return ret;
351ca525ce5SPrerna Saxena }
352ca525ce5SPrerna Saxena
35324e34754SMichael S. Tsirkin if (msg_reply.hdr.request != msg->hdr.request) {
354ca525ce5SPrerna Saxena error_report("Received unexpected msg type. "
355ca525ce5SPrerna Saxena "Expected %d received %d",
35624e34754SMichael S. Tsirkin msg->hdr.request, msg_reply.hdr.request);
357025faa87SRoman Kagan return -EPROTO;
358ca525ce5SPrerna Saxena }
359ca525ce5SPrerna Saxena
360025faa87SRoman Kagan return msg_reply.payload.u64 ? -EIO : 0;
361ca525ce5SPrerna Saxena }
362ca525ce5SPrerna Saxena
vhost_user_per_device_request(VhostUserRequest request)3630dcb4172STom Lonergan static bool vhost_user_per_device_request(VhostUserRequest request)
364b931bfbfSChangchun Ouyang {
365b931bfbfSChangchun Ouyang switch (request) {
366b931bfbfSChangchun Ouyang case VHOST_USER_SET_OWNER:
36760915dc4SYuanhan Liu case VHOST_USER_RESET_OWNER:
368b931bfbfSChangchun Ouyang case VHOST_USER_SET_MEM_TABLE:
369b931bfbfSChangchun Ouyang case VHOST_USER_GET_QUEUE_NUM:
370c5f048d8SMaxime Coquelin case VHOST_USER_NET_SET_MTU:
371667e58aeSTom Lonergan case VHOST_USER_RESET_DEVICE:
372920c184fSMinghao Yuan case VHOST_USER_ADD_MEM_REG:
373920c184fSMinghao Yuan case VHOST_USER_REM_MEM_REG:
3747c211eb0SBillXiang case VHOST_USER_SET_LOG_BASE:
375b931bfbfSChangchun Ouyang return true;
376b931bfbfSChangchun Ouyang default:
377b931bfbfSChangchun Ouyang return false;
378b931bfbfSChangchun Ouyang }
379b931bfbfSChangchun Ouyang }
380b931bfbfSChangchun Ouyang
38121e70425SMarc-André Lureau /* most non-init callers ignore the error */
vhost_user_write(struct vhost_dev * dev,VhostUserMsg * msg,int * fds,int fd_num)38221e70425SMarc-André Lureau static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
38321e70425SMarc-André Lureau int *fds, int fd_num)
3845f6f6664SNikolay Nikolaev {
3852152f3feSMarc-André Lureau struct vhost_user *u = dev->opaque;
3864d0cf552STiwei Bie CharBackend *chr = u->user->chr;
38724e34754SMichael S. Tsirkin int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
3887305483aSYuanhan Liu
389b931bfbfSChangchun Ouyang /*
3900dcb4172STom Lonergan * Some devices, like virtio-scsi, are implemented as a single vhost_dev,
3910dcb4172STom Lonergan * while others, like virtio-net, contain multiple vhost_devs. For
3920dcb4172STom Lonergan * operations such as configuring device memory mappings or issuing device
3930dcb4172STom Lonergan * resets, which affect the whole device instead of individual VQs,
3940dcb4172STom Lonergan * vhost-user messages should only be sent once.
3950dcb4172STom Lonergan *
3960dcb4172STom Lonergan * Devices with multiple vhost_devs are given an associated dev->vq_index
3970dcb4172STom Lonergan * so per_device requests are only sent if vq_index is 0.
398b931bfbfSChangchun Ouyang */
3990dcb4172STom Lonergan if (vhost_user_per_device_request(msg->hdr.request)
4000dcb4172STom Lonergan && dev->vq_index != 0) {
40124e34754SMichael S. Tsirkin msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
402b931bfbfSChangchun Ouyang return 0;
403b931bfbfSChangchun Ouyang }
404b931bfbfSChangchun Ouyang
4056fab2f3fSMarc-André Lureau if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
406f6b85710SMarc-André Lureau error_report("Failed to set msg fds.");
407025faa87SRoman Kagan return -EINVAL;
4086fab2f3fSMarc-André Lureau }
4095f6f6664SNikolay Nikolaev
410f6b85710SMarc-André Lureau ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
411f6b85710SMarc-André Lureau if (ret != size) {
412025faa87SRoman Kagan int saved_errno = errno;
413f6b85710SMarc-André Lureau error_report("Failed to write msg."
414f6b85710SMarc-André Lureau " Wrote %d instead of %d.", ret, size);
415025faa87SRoman Kagan return ret < 0 ? -saved_errno : -EIO;
416f6b85710SMarc-André Lureau }
417f6b85710SMarc-André Lureau
4186ca6d8eeSAlex Bennée trace_vhost_user_write(msg->hdr.request, msg->hdr.flags);
4196ca6d8eeSAlex Bennée
420f6b85710SMarc-André Lureau return 0;
4215f6f6664SNikolay Nikolaev }
4225f6f6664SNikolay Nikolaev
vhost_user_gpu_set_socket(struct vhost_dev * dev,int fd)423bd2e44feSMarc-André Lureau int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
424bd2e44feSMarc-André Lureau {
425bd2e44feSMarc-André Lureau VhostUserMsg msg = {
426bd2e44feSMarc-André Lureau .hdr.request = VHOST_USER_GPU_SET_SOCKET,
427bd2e44feSMarc-André Lureau .hdr.flags = VHOST_USER_VERSION,
428bd2e44feSMarc-André Lureau };
429bd2e44feSMarc-André Lureau
430bd2e44feSMarc-André Lureau return vhost_user_write(dev, &msg, &fd, 1);
431bd2e44feSMarc-André Lureau }
432bd2e44feSMarc-André Lureau
vhost_user_set_log_base(struct vhost_dev * dev,uint64_t base,struct vhost_log * log)43321e70425SMarc-André Lureau static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
4349a78a5ddSMarc-André Lureau struct vhost_log *log)
435c2bea314SMarc-André Lureau {
43627598393SRaphael Norwitz int fds[VHOST_USER_MAX_RAM_SLOTS];
4379a78a5ddSMarc-André Lureau size_t fd_num = 0;
4389a78a5ddSMarc-André Lureau bool shmfd = virtio_has_feature(dev->protocol_features,
4399a78a5ddSMarc-André Lureau VHOST_USER_PROTOCOL_F_LOG_SHMFD);
440025faa87SRoman Kagan int ret;
441c2bea314SMarc-André Lureau VhostUserMsg msg = {
44224e34754SMichael S. Tsirkin .hdr.request = VHOST_USER_SET_LOG_BASE,
44324e34754SMichael S. Tsirkin .hdr.flags = VHOST_USER_VERSION,
44448854f57SMichael S. Tsirkin .payload.log.mmap_size = log->size * sizeof(*(log->log)),
4452b8819c6SVictor Kaplansky .payload.log.mmap_offset = 0,
44624e34754SMichael S. Tsirkin .hdr.size = sizeof(msg.payload.log),
447c2bea314SMarc-André Lureau };
448c2bea314SMarc-André Lureau
449c98ac64cSYajun Wu /* Send only once with first queue pair */
450c98ac64cSYajun Wu if (dev->vq_index != 0) {
451c98ac64cSYajun Wu return 0;
452c98ac64cSYajun Wu }
453c98ac64cSYajun Wu
4549a78a5ddSMarc-André Lureau if (shmfd && log->fd != -1) {
4559a78a5ddSMarc-André Lureau fds[fd_num++] = log->fd;
4569a78a5ddSMarc-André Lureau }
4579a78a5ddSMarc-André Lureau
458025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, fds, fd_num);
459025faa87SRoman Kagan if (ret < 0) {
460025faa87SRoman Kagan return ret;
461c4843a45SMarc-André Lureau }
4629a78a5ddSMarc-André Lureau
4639a78a5ddSMarc-André Lureau if (shmfd) {
46424e34754SMichael S. Tsirkin msg.hdr.size = 0;
465025faa87SRoman Kagan ret = vhost_user_read(dev, &msg);
466025faa87SRoman Kagan if (ret < 0) {
467025faa87SRoman Kagan return ret;
4689a78a5ddSMarc-André Lureau }
4699a78a5ddSMarc-André Lureau
47024e34754SMichael S. Tsirkin if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
4719a78a5ddSMarc-André Lureau error_report("Received unexpected msg type. "
4729a78a5ddSMarc-André Lureau "Expected %d received %d",
47324e34754SMichael S. Tsirkin VHOST_USER_SET_LOG_BASE, msg.hdr.request);
474025faa87SRoman Kagan return -EPROTO;
4759a78a5ddSMarc-André Lureau }
4769a78a5ddSMarc-André Lureau }
477c2bea314SMarc-André Lureau
478c2bea314SMarc-André Lureau return 0;
479c2bea314SMarc-André Lureau }
480c2bea314SMarc-André Lureau
vhost_user_get_mr_data(uint64_t addr,ram_addr_t * offset,int * fd)48123374a84SRaphael Norwitz static MemoryRegion *vhost_user_get_mr_data(uint64_t addr, ram_addr_t *offset,
48223374a84SRaphael Norwitz int *fd)
48323374a84SRaphael Norwitz {
48423374a84SRaphael Norwitz MemoryRegion *mr;
48523374a84SRaphael Norwitz
48623374a84SRaphael Norwitz assert((uintptr_t)addr == addr);
48723374a84SRaphael Norwitz mr = memory_region_from_host((void *)(uintptr_t)addr, offset);
48823374a84SRaphael Norwitz *fd = memory_region_get_fd(mr);
4894b870dc4SAlexander Graf *offset += mr->ram_block->fd_offset;
49023374a84SRaphael Norwitz
49123374a84SRaphael Norwitz return mr;
49223374a84SRaphael Norwitz }
49323374a84SRaphael Norwitz
vhost_user_fill_msg_region(VhostUserMemoryRegion * dst,struct vhost_memory_region * src,uint64_t mmap_offset)494ece99091SRaphael Norwitz static void vhost_user_fill_msg_region(VhostUserMemoryRegion *dst,
4958d193715SRaphael Norwitz struct vhost_memory_region *src,
4968d193715SRaphael Norwitz uint64_t mmap_offset)
497ece99091SRaphael Norwitz {
498ece99091SRaphael Norwitz assert(src != NULL && dst != NULL);
499ece99091SRaphael Norwitz dst->userspace_addr = src->userspace_addr;
500ece99091SRaphael Norwitz dst->memory_size = src->memory_size;
501ece99091SRaphael Norwitz dst->guest_phys_addr = src->guest_phys_addr;
5028d193715SRaphael Norwitz dst->mmap_offset = mmap_offset;
503ece99091SRaphael Norwitz }
504ece99091SRaphael Norwitz
vhost_user_fill_set_mem_table_msg(struct vhost_user * u,struct vhost_dev * dev,VhostUserMsg * msg,int * fds,size_t * fd_num,bool track_ramblocks)5052d9da9dfSRaphael Norwitz static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u,
5062d9da9dfSRaphael Norwitz struct vhost_dev *dev,
5072d9da9dfSRaphael Norwitz VhostUserMsg *msg,
5082d9da9dfSRaphael Norwitz int *fds, size_t *fd_num,
5092d9da9dfSRaphael Norwitz bool track_ramblocks)
5102d9da9dfSRaphael Norwitz {
5112d9da9dfSRaphael Norwitz int i, fd;
5122d9da9dfSRaphael Norwitz ram_addr_t offset;
5132d9da9dfSRaphael Norwitz MemoryRegion *mr;
5142d9da9dfSRaphael Norwitz struct vhost_memory_region *reg;
515ece99091SRaphael Norwitz VhostUserMemoryRegion region_buffer;
5162d9da9dfSRaphael Norwitz
5172d9da9dfSRaphael Norwitz msg->hdr.request = VHOST_USER_SET_MEM_TABLE;
5182d9da9dfSRaphael Norwitz
5192d9da9dfSRaphael Norwitz for (i = 0; i < dev->mem->nregions; ++i) {
5202d9da9dfSRaphael Norwitz reg = dev->mem->regions + i;
5212d9da9dfSRaphael Norwitz
52223374a84SRaphael Norwitz mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
5232d9da9dfSRaphael Norwitz if (fd > 0) {
5242d9da9dfSRaphael Norwitz if (track_ramblocks) {
52527598393SRaphael Norwitz assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
5262d9da9dfSRaphael Norwitz trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
5272d9da9dfSRaphael Norwitz reg->memory_size,
5282d9da9dfSRaphael Norwitz reg->guest_phys_addr,
5292d9da9dfSRaphael Norwitz reg->userspace_addr,
5302d9da9dfSRaphael Norwitz offset);
5312d9da9dfSRaphael Norwitz u->region_rb_offset[i] = offset;
5322d9da9dfSRaphael Norwitz u->region_rb[i] = mr->ram_block;
53327598393SRaphael Norwitz } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
5342d9da9dfSRaphael Norwitz error_report("Failed preparing vhost-user memory table msg");
535025faa87SRoman Kagan return -ENOBUFS;
5362d9da9dfSRaphael Norwitz }
5378d193715SRaphael Norwitz vhost_user_fill_msg_region(®ion_buffer, reg, offset);
538ece99091SRaphael Norwitz msg->payload.memory.regions[*fd_num] = region_buffer;
5392d9da9dfSRaphael Norwitz fds[(*fd_num)++] = fd;
5402d9da9dfSRaphael Norwitz } else if (track_ramblocks) {
5412d9da9dfSRaphael Norwitz u->region_rb_offset[i] = 0;
5422d9da9dfSRaphael Norwitz u->region_rb[i] = NULL;
5432d9da9dfSRaphael Norwitz }
5442d9da9dfSRaphael Norwitz }
5452d9da9dfSRaphael Norwitz
5462d9da9dfSRaphael Norwitz msg->payload.memory.nregions = *fd_num;
5472d9da9dfSRaphael Norwitz
5482d9da9dfSRaphael Norwitz if (!*fd_num) {
5492d9da9dfSRaphael Norwitz error_report("Failed initializing vhost-user memory map, "
5502d9da9dfSRaphael Norwitz "consider using -object memory-backend-file share=on");
551025faa87SRoman Kagan return -EINVAL;
5522d9da9dfSRaphael Norwitz }
5532d9da9dfSRaphael Norwitz
5542d9da9dfSRaphael Norwitz msg->hdr.size = sizeof(msg->payload.memory.nregions);
5552d9da9dfSRaphael Norwitz msg->hdr.size += sizeof(msg->payload.memory.padding);
5562d9da9dfSRaphael Norwitz msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion);
5572d9da9dfSRaphael Norwitz
558025faa87SRoman Kagan return 0;
5592d9da9dfSRaphael Norwitz }
5602d9da9dfSRaphael Norwitz
reg_equal(struct vhost_memory_region * shadow_reg,struct vhost_memory_region * vdev_reg)561f1aeb14bSRaphael Norwitz static inline bool reg_equal(struct vhost_memory_region *shadow_reg,
562f1aeb14bSRaphael Norwitz struct vhost_memory_region *vdev_reg)
563f1aeb14bSRaphael Norwitz {
564f1aeb14bSRaphael Norwitz return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr &&
565f1aeb14bSRaphael Norwitz shadow_reg->userspace_addr == vdev_reg->userspace_addr &&
566f1aeb14bSRaphael Norwitz shadow_reg->memory_size == vdev_reg->memory_size;
567f1aeb14bSRaphael Norwitz }
568f1aeb14bSRaphael Norwitz
scrub_shadow_regions(struct vhost_dev * dev,struct scrub_regions * add_reg,int * nr_add_reg,struct scrub_regions * rem_reg,int * nr_rem_reg,uint64_t * shadow_pcb,bool track_ramblocks)569f1aeb14bSRaphael Norwitz static void scrub_shadow_regions(struct vhost_dev *dev,
570f1aeb14bSRaphael Norwitz struct scrub_regions *add_reg,
571f1aeb14bSRaphael Norwitz int *nr_add_reg,
572f1aeb14bSRaphael Norwitz struct scrub_regions *rem_reg,
573f1aeb14bSRaphael Norwitz int *nr_rem_reg, uint64_t *shadow_pcb,
574f1aeb14bSRaphael Norwitz bool track_ramblocks)
575f1aeb14bSRaphael Norwitz {
576f1aeb14bSRaphael Norwitz struct vhost_user *u = dev->opaque;
57727598393SRaphael Norwitz bool found[VHOST_USER_MAX_RAM_SLOTS] = {};
578f1aeb14bSRaphael Norwitz struct vhost_memory_region *reg, *shadow_reg;
579f1aeb14bSRaphael Norwitz int i, j, fd, add_idx = 0, rm_idx = 0, fd_num = 0;
580f1aeb14bSRaphael Norwitz ram_addr_t offset;
581f1aeb14bSRaphael Norwitz MemoryRegion *mr;
582f1aeb14bSRaphael Norwitz bool matching;
583f1aeb14bSRaphael Norwitz
584f1aeb14bSRaphael Norwitz /*
585f1aeb14bSRaphael Norwitz * Find memory regions present in our shadow state which are not in
586f1aeb14bSRaphael Norwitz * the device's current memory state.
587f1aeb14bSRaphael Norwitz *
588f1aeb14bSRaphael Norwitz * Mark regions in both the shadow and device state as "found".
589f1aeb14bSRaphael Norwitz */
590f1aeb14bSRaphael Norwitz for (i = 0; i < u->num_shadow_regions; i++) {
591f1aeb14bSRaphael Norwitz shadow_reg = &u->shadow_regions[i];
592f1aeb14bSRaphael Norwitz matching = false;
593f1aeb14bSRaphael Norwitz
594f1aeb14bSRaphael Norwitz for (j = 0; j < dev->mem->nregions; j++) {
595f1aeb14bSRaphael Norwitz reg = &dev->mem->regions[j];
596f1aeb14bSRaphael Norwitz
597f1aeb14bSRaphael Norwitz mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
598f1aeb14bSRaphael Norwitz
599f1aeb14bSRaphael Norwitz if (reg_equal(shadow_reg, reg)) {
600f1aeb14bSRaphael Norwitz matching = true;
601f1aeb14bSRaphael Norwitz found[j] = true;
602f1aeb14bSRaphael Norwitz if (track_ramblocks) {
603f1aeb14bSRaphael Norwitz /*
604f1aeb14bSRaphael Norwitz * Reset postcopy client bases, region_rb, and
605f1aeb14bSRaphael Norwitz * region_rb_offset in case regions are removed.
606f1aeb14bSRaphael Norwitz */
607f1aeb14bSRaphael Norwitz if (fd > 0) {
608f1aeb14bSRaphael Norwitz u->region_rb_offset[j] = offset;
609f1aeb14bSRaphael Norwitz u->region_rb[j] = mr->ram_block;
610f1aeb14bSRaphael Norwitz shadow_pcb[j] = u->postcopy_client_bases[i];
611f1aeb14bSRaphael Norwitz } else {
612f1aeb14bSRaphael Norwitz u->region_rb_offset[j] = 0;
613f1aeb14bSRaphael Norwitz u->region_rb[j] = NULL;
614f1aeb14bSRaphael Norwitz }
615f1aeb14bSRaphael Norwitz }
616f1aeb14bSRaphael Norwitz break;
617f1aeb14bSRaphael Norwitz }
618f1aeb14bSRaphael Norwitz }
619f1aeb14bSRaphael Norwitz
620f1aeb14bSRaphael Norwitz /*
621f1aeb14bSRaphael Norwitz * If the region was not found in the current device memory state
622f1aeb14bSRaphael Norwitz * create an entry for it in the removed list.
623f1aeb14bSRaphael Norwitz */
624f1aeb14bSRaphael Norwitz if (!matching) {
625f1aeb14bSRaphael Norwitz rem_reg[rm_idx].region = shadow_reg;
626f1aeb14bSRaphael Norwitz rem_reg[rm_idx++].reg_idx = i;
627f1aeb14bSRaphael Norwitz }
628f1aeb14bSRaphael Norwitz }
629f1aeb14bSRaphael Norwitz
630f1aeb14bSRaphael Norwitz /*
631f1aeb14bSRaphael Norwitz * For regions not marked "found", create entries in the added list.
632f1aeb14bSRaphael Norwitz *
633f1aeb14bSRaphael Norwitz * Note their indexes in the device memory state and the indexes of their
634f1aeb14bSRaphael Norwitz * file descriptors.
635f1aeb14bSRaphael Norwitz */
636f1aeb14bSRaphael Norwitz for (i = 0; i < dev->mem->nregions; i++) {
637f1aeb14bSRaphael Norwitz reg = &dev->mem->regions[i];
6388b616beeSChen Qun vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
639f1aeb14bSRaphael Norwitz if (fd > 0) {
640f1aeb14bSRaphael Norwitz ++fd_num;
641f1aeb14bSRaphael Norwitz }
642f1aeb14bSRaphael Norwitz
643f1aeb14bSRaphael Norwitz /*
644f1aeb14bSRaphael Norwitz * If the region was in both the shadow and device state we don't
645f1aeb14bSRaphael Norwitz * need to send a VHOST_USER_ADD_MEM_REG message for it.
646f1aeb14bSRaphael Norwitz */
647f1aeb14bSRaphael Norwitz if (found[i]) {
648f1aeb14bSRaphael Norwitz continue;
649f1aeb14bSRaphael Norwitz }
650f1aeb14bSRaphael Norwitz
651f1aeb14bSRaphael Norwitz add_reg[add_idx].region = reg;
652f1aeb14bSRaphael Norwitz add_reg[add_idx].reg_idx = i;
653f1aeb14bSRaphael Norwitz add_reg[add_idx++].fd_idx = fd_num;
654f1aeb14bSRaphael Norwitz }
655f1aeb14bSRaphael Norwitz *nr_rem_reg = rm_idx;
656f1aeb14bSRaphael Norwitz *nr_add_reg = add_idx;
657f1aeb14bSRaphael Norwitz
658f1aeb14bSRaphael Norwitz return;
659f1aeb14bSRaphael Norwitz }
660f1aeb14bSRaphael Norwitz
send_remove_regions(struct vhost_dev * dev,struct scrub_regions * remove_reg,int nr_rem_reg,VhostUserMsg * msg,bool reply_supported)661f1aeb14bSRaphael Norwitz static int send_remove_regions(struct vhost_dev *dev,
662f1aeb14bSRaphael Norwitz struct scrub_regions *remove_reg,
663f1aeb14bSRaphael Norwitz int nr_rem_reg, VhostUserMsg *msg,
664f1aeb14bSRaphael Norwitz bool reply_supported)
665f1aeb14bSRaphael Norwitz {
666f1aeb14bSRaphael Norwitz struct vhost_user *u = dev->opaque;
667f1aeb14bSRaphael Norwitz struct vhost_memory_region *shadow_reg;
668f1aeb14bSRaphael Norwitz int i, fd, shadow_reg_idx, ret;
669f1aeb14bSRaphael Norwitz ram_addr_t offset;
670f1aeb14bSRaphael Norwitz VhostUserMemoryRegion region_buffer;
671f1aeb14bSRaphael Norwitz
672f1aeb14bSRaphael Norwitz /*
673f1aeb14bSRaphael Norwitz * The regions in remove_reg appear in the same order they do in the
674f1aeb14bSRaphael Norwitz * shadow table. Therefore we can minimize memory copies by iterating
675f1aeb14bSRaphael Norwitz * through remove_reg backwards.
676f1aeb14bSRaphael Norwitz */
677f1aeb14bSRaphael Norwitz for (i = nr_rem_reg - 1; i >= 0; i--) {
678f1aeb14bSRaphael Norwitz shadow_reg = remove_reg[i].region;
679f1aeb14bSRaphael Norwitz shadow_reg_idx = remove_reg[i].reg_idx;
680f1aeb14bSRaphael Norwitz
681f1aeb14bSRaphael Norwitz vhost_user_get_mr_data(shadow_reg->userspace_addr, &offset, &fd);
682f1aeb14bSRaphael Norwitz
683f1aeb14bSRaphael Norwitz if (fd > 0) {
684f1aeb14bSRaphael Norwitz msg->hdr.request = VHOST_USER_REM_MEM_REG;
6858d193715SRaphael Norwitz vhost_user_fill_msg_region(®ion_buffer, shadow_reg, 0);
686f1aeb14bSRaphael Norwitz msg->payload.mem_reg.region = region_buffer;
687f1aeb14bSRaphael Norwitz
688a81d8d4aSKevin Wolf ret = vhost_user_write(dev, msg, NULL, 0);
689025faa87SRoman Kagan if (ret < 0) {
690025faa87SRoman Kagan return ret;
691f1aeb14bSRaphael Norwitz }
692f1aeb14bSRaphael Norwitz
693f1aeb14bSRaphael Norwitz if (reply_supported) {
694f1aeb14bSRaphael Norwitz ret = process_message_reply(dev, msg);
695f1aeb14bSRaphael Norwitz if (ret) {
696f1aeb14bSRaphael Norwitz return ret;
697f1aeb14bSRaphael Norwitz }
698f1aeb14bSRaphael Norwitz }
699f1aeb14bSRaphael Norwitz }
700f1aeb14bSRaphael Norwitz
701f1aeb14bSRaphael Norwitz /*
702f1aeb14bSRaphael Norwitz * At this point we know the backend has unmapped the region. It is now
703f1aeb14bSRaphael Norwitz * safe to remove it from the shadow table.
704f1aeb14bSRaphael Norwitz */
705f1aeb14bSRaphael Norwitz memmove(&u->shadow_regions[shadow_reg_idx],
706f1aeb14bSRaphael Norwitz &u->shadow_regions[shadow_reg_idx + 1],
707f1aeb14bSRaphael Norwitz sizeof(struct vhost_memory_region) *
7084fdecf05SRaphael Norwitz (u->num_shadow_regions - shadow_reg_idx - 1));
709f1aeb14bSRaphael Norwitz u->num_shadow_regions--;
710f1aeb14bSRaphael Norwitz }
711f1aeb14bSRaphael Norwitz
712f1aeb14bSRaphael Norwitz return 0;
713f1aeb14bSRaphael Norwitz }
714f1aeb14bSRaphael Norwitz
send_add_regions(struct vhost_dev * dev,struct scrub_regions * add_reg,int nr_add_reg,VhostUserMsg * msg,uint64_t * shadow_pcb,bool reply_supported,bool track_ramblocks)715f1aeb14bSRaphael Norwitz static int send_add_regions(struct vhost_dev *dev,
716f1aeb14bSRaphael Norwitz struct scrub_regions *add_reg, int nr_add_reg,
717f1aeb14bSRaphael Norwitz VhostUserMsg *msg, uint64_t *shadow_pcb,
718f1aeb14bSRaphael Norwitz bool reply_supported, bool track_ramblocks)
719f1aeb14bSRaphael Norwitz {
720f1aeb14bSRaphael Norwitz struct vhost_user *u = dev->opaque;
721f1aeb14bSRaphael Norwitz int i, fd, ret, reg_idx, reg_fd_idx;
722f1aeb14bSRaphael Norwitz struct vhost_memory_region *reg;
723f1aeb14bSRaphael Norwitz MemoryRegion *mr;
724f1aeb14bSRaphael Norwitz ram_addr_t offset;
725f1aeb14bSRaphael Norwitz VhostUserMsg msg_reply;
726f1aeb14bSRaphael Norwitz VhostUserMemoryRegion region_buffer;
727f1aeb14bSRaphael Norwitz
728f1aeb14bSRaphael Norwitz for (i = 0; i < nr_add_reg; i++) {
729f1aeb14bSRaphael Norwitz reg = add_reg[i].region;
730f1aeb14bSRaphael Norwitz reg_idx = add_reg[i].reg_idx;
731f1aeb14bSRaphael Norwitz reg_fd_idx = add_reg[i].fd_idx;
732f1aeb14bSRaphael Norwitz
733f1aeb14bSRaphael Norwitz mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
734f1aeb14bSRaphael Norwitz
735f1aeb14bSRaphael Norwitz if (fd > 0) {
736f1aeb14bSRaphael Norwitz if (track_ramblocks) {
737f1aeb14bSRaphael Norwitz trace_vhost_user_set_mem_table_withfd(reg_fd_idx, mr->name,
738f1aeb14bSRaphael Norwitz reg->memory_size,
739f1aeb14bSRaphael Norwitz reg->guest_phys_addr,
740f1aeb14bSRaphael Norwitz reg->userspace_addr,
741f1aeb14bSRaphael Norwitz offset);
742f1aeb14bSRaphael Norwitz u->region_rb_offset[reg_idx] = offset;
743f1aeb14bSRaphael Norwitz u->region_rb[reg_idx] = mr->ram_block;
744f1aeb14bSRaphael Norwitz }
745f1aeb14bSRaphael Norwitz msg->hdr.request = VHOST_USER_ADD_MEM_REG;
7468d193715SRaphael Norwitz vhost_user_fill_msg_region(®ion_buffer, reg, offset);
747f1aeb14bSRaphael Norwitz msg->payload.mem_reg.region = region_buffer;
748f1aeb14bSRaphael Norwitz
749025faa87SRoman Kagan ret = vhost_user_write(dev, msg, &fd, 1);
750025faa87SRoman Kagan if (ret < 0) {
751025faa87SRoman Kagan return ret;
752f1aeb14bSRaphael Norwitz }
753f1aeb14bSRaphael Norwitz
754f1aeb14bSRaphael Norwitz if (track_ramblocks) {
755f1aeb14bSRaphael Norwitz uint64_t reply_gpa;
756f1aeb14bSRaphael Norwitz
757025faa87SRoman Kagan ret = vhost_user_read(dev, &msg_reply);
758025faa87SRoman Kagan if (ret < 0) {
759025faa87SRoman Kagan return ret;
760f1aeb14bSRaphael Norwitz }
761f1aeb14bSRaphael Norwitz
762f1aeb14bSRaphael Norwitz reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr;
763f1aeb14bSRaphael Norwitz
764f1aeb14bSRaphael Norwitz if (msg_reply.hdr.request != VHOST_USER_ADD_MEM_REG) {
765f1aeb14bSRaphael Norwitz error_report("%s: Received unexpected msg type."
766f1aeb14bSRaphael Norwitz "Expected %d received %d", __func__,
767f1aeb14bSRaphael Norwitz VHOST_USER_ADD_MEM_REG,
768f1aeb14bSRaphael Norwitz msg_reply.hdr.request);
769025faa87SRoman Kagan return -EPROTO;
770f1aeb14bSRaphael Norwitz }
771f1aeb14bSRaphael Norwitz
772f1aeb14bSRaphael Norwitz /*
773f1aeb14bSRaphael Norwitz * We're using the same structure, just reusing one of the
774f1aeb14bSRaphael Norwitz * fields, so it should be the same size.
775f1aeb14bSRaphael Norwitz */
776f1aeb14bSRaphael Norwitz if (msg_reply.hdr.size != msg->hdr.size) {
777f1aeb14bSRaphael Norwitz error_report("%s: Unexpected size for postcopy reply "
778f1aeb14bSRaphael Norwitz "%d vs %d", __func__, msg_reply.hdr.size,
779f1aeb14bSRaphael Norwitz msg->hdr.size);
780025faa87SRoman Kagan return -EPROTO;
781f1aeb14bSRaphael Norwitz }
782f1aeb14bSRaphael Norwitz
783f1aeb14bSRaphael Norwitz /* Get the postcopy client base from the backend's reply. */
784f1aeb14bSRaphael Norwitz if (reply_gpa == dev->mem->regions[reg_idx].guest_phys_addr) {
785f1aeb14bSRaphael Norwitz shadow_pcb[reg_idx] =
786f1aeb14bSRaphael Norwitz msg_reply.payload.mem_reg.region.userspace_addr;
787f1aeb14bSRaphael Norwitz trace_vhost_user_set_mem_table_postcopy(
788f1aeb14bSRaphael Norwitz msg_reply.payload.mem_reg.region.userspace_addr,
789f1aeb14bSRaphael Norwitz msg->payload.mem_reg.region.userspace_addr,
790f1aeb14bSRaphael Norwitz reg_fd_idx, reg_idx);
791f1aeb14bSRaphael Norwitz } else {
792f1aeb14bSRaphael Norwitz error_report("%s: invalid postcopy reply for region. "
793f1aeb14bSRaphael Norwitz "Got guest physical address %" PRIX64 ", expected "
794f1aeb14bSRaphael Norwitz "%" PRIX64, __func__, reply_gpa,
795f1aeb14bSRaphael Norwitz dev->mem->regions[reg_idx].guest_phys_addr);
796025faa87SRoman Kagan return -EPROTO;
797f1aeb14bSRaphael Norwitz }
798f1aeb14bSRaphael Norwitz } else if (reply_supported) {
799f1aeb14bSRaphael Norwitz ret = process_message_reply(dev, msg);
800f1aeb14bSRaphael Norwitz if (ret) {
801f1aeb14bSRaphael Norwitz return ret;
802f1aeb14bSRaphael Norwitz }
803f1aeb14bSRaphael Norwitz }
804f1aeb14bSRaphael Norwitz } else if (track_ramblocks) {
805f1aeb14bSRaphael Norwitz u->region_rb_offset[reg_idx] = 0;
806f1aeb14bSRaphael Norwitz u->region_rb[reg_idx] = NULL;
807f1aeb14bSRaphael Norwitz }
808f1aeb14bSRaphael Norwitz
809f1aeb14bSRaphael Norwitz /*
810f1aeb14bSRaphael Norwitz * At this point, we know the backend has mapped in the new
811f1aeb14bSRaphael Norwitz * region, if the region has a valid file descriptor.
812f1aeb14bSRaphael Norwitz *
813f1aeb14bSRaphael Norwitz * The region should now be added to the shadow table.
814f1aeb14bSRaphael Norwitz */
815f1aeb14bSRaphael Norwitz u->shadow_regions[u->num_shadow_regions].guest_phys_addr =
816f1aeb14bSRaphael Norwitz reg->guest_phys_addr;
817f1aeb14bSRaphael Norwitz u->shadow_regions[u->num_shadow_regions].userspace_addr =
818f1aeb14bSRaphael Norwitz reg->userspace_addr;
819f1aeb14bSRaphael Norwitz u->shadow_regions[u->num_shadow_regions].memory_size =
820f1aeb14bSRaphael Norwitz reg->memory_size;
821f1aeb14bSRaphael Norwitz u->num_shadow_regions++;
822f1aeb14bSRaphael Norwitz }
823f1aeb14bSRaphael Norwitz
824f1aeb14bSRaphael Norwitz return 0;
825f1aeb14bSRaphael Norwitz }
826f1aeb14bSRaphael Norwitz
vhost_user_add_remove_regions(struct vhost_dev * dev,VhostUserMsg * msg,bool reply_supported,bool track_ramblocks)827f1aeb14bSRaphael Norwitz static int vhost_user_add_remove_regions(struct vhost_dev *dev,
828f1aeb14bSRaphael Norwitz VhostUserMsg *msg,
829f1aeb14bSRaphael Norwitz bool reply_supported,
830f1aeb14bSRaphael Norwitz bool track_ramblocks)
831f1aeb14bSRaphael Norwitz {
832f1aeb14bSRaphael Norwitz struct vhost_user *u = dev->opaque;
83327598393SRaphael Norwitz struct scrub_regions add_reg[VHOST_USER_MAX_RAM_SLOTS];
83427598393SRaphael Norwitz struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS];
83527598393SRaphael Norwitz uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {};
836f1aeb14bSRaphael Norwitz int nr_add_reg, nr_rem_reg;
837025faa87SRoman Kagan int ret;
838f1aeb14bSRaphael Norwitz
8393009edffSStefan Hajnoczi msg->hdr.size = sizeof(msg->payload.mem_reg);
840f1aeb14bSRaphael Norwitz
841f1aeb14bSRaphael Norwitz /* Find the regions which need to be removed or added. */
842f1aeb14bSRaphael Norwitz scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg,
843f1aeb14bSRaphael Norwitz shadow_pcb, track_ramblocks);
844f1aeb14bSRaphael Norwitz
845025faa87SRoman Kagan if (nr_rem_reg) {
846025faa87SRoman Kagan ret = send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
847025faa87SRoman Kagan reply_supported);
848025faa87SRoman Kagan if (ret < 0) {
849f1aeb14bSRaphael Norwitz goto err;
850f1aeb14bSRaphael Norwitz }
851025faa87SRoman Kagan }
852f1aeb14bSRaphael Norwitz
853025faa87SRoman Kagan if (nr_add_reg) {
854025faa87SRoman Kagan ret = send_add_regions(dev, add_reg, nr_add_reg, msg, shadow_pcb,
855025faa87SRoman Kagan reply_supported, track_ramblocks);
856025faa87SRoman Kagan if (ret < 0) {
857f1aeb14bSRaphael Norwitz goto err;
858f1aeb14bSRaphael Norwitz }
859025faa87SRoman Kagan }
860f1aeb14bSRaphael Norwitz
861f1aeb14bSRaphael Norwitz if (track_ramblocks) {
862f1aeb14bSRaphael Norwitz memcpy(u->postcopy_client_bases, shadow_pcb,
86327598393SRaphael Norwitz sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
864f1aeb14bSRaphael Norwitz /*
865f1aeb14bSRaphael Norwitz * Now we've registered this with the postcopy code, we ack to the
866f1aeb14bSRaphael Norwitz * client, because now we're in the position to be able to deal with
867f1aeb14bSRaphael Norwitz * any faults it generates.
868f1aeb14bSRaphael Norwitz */
869f1aeb14bSRaphael Norwitz /* TODO: Use this for failure cases as well with a bad value. */
870f1aeb14bSRaphael Norwitz msg->hdr.size = sizeof(msg->payload.u64);
871f1aeb14bSRaphael Norwitz msg->payload.u64 = 0; /* OK */
872f1aeb14bSRaphael Norwitz
873025faa87SRoman Kagan ret = vhost_user_write(dev, msg, NULL, 0);
874025faa87SRoman Kagan if (ret < 0) {
875025faa87SRoman Kagan return ret;
876f1aeb14bSRaphael Norwitz }
877f1aeb14bSRaphael Norwitz }
878f1aeb14bSRaphael Norwitz
879f1aeb14bSRaphael Norwitz return 0;
880f1aeb14bSRaphael Norwitz
881f1aeb14bSRaphael Norwitz err:
882f1aeb14bSRaphael Norwitz if (track_ramblocks) {
883f1aeb14bSRaphael Norwitz memcpy(u->postcopy_client_bases, shadow_pcb,
88427598393SRaphael Norwitz sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
885f1aeb14bSRaphael Norwitz }
886f1aeb14bSRaphael Norwitz
887025faa87SRoman Kagan return ret;
888f1aeb14bSRaphael Norwitz }
889f1aeb14bSRaphael Norwitz
vhost_user_set_mem_table_postcopy(struct vhost_dev * dev,struct vhost_memory * mem,bool reply_supported,bool config_mem_slots)89055d754b3SDr. David Alan Gilbert static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
891f1aeb14bSRaphael Norwitz struct vhost_memory *mem,
892f1aeb14bSRaphael Norwitz bool reply_supported,
893f1aeb14bSRaphael Norwitz bool config_mem_slots)
89494c9cb31SMichael S. Tsirkin {
8959bb38019SDr. David Alan Gilbert struct vhost_user *u = dev->opaque;
89627598393SRaphael Norwitz int fds[VHOST_MEMORY_BASELINE_NREGIONS];
89794c9cb31SMichael S. Tsirkin size_t fd_num = 0;
8989bb38019SDr. David Alan Gilbert VhostUserMsg msg_reply;
8999bb38019SDr. David Alan Gilbert int region_i, msg_i;
900025faa87SRoman Kagan int ret;
9019bb38019SDr. David Alan Gilbert
90255d754b3SDr. David Alan Gilbert VhostUserMsg msg = {
90355d754b3SDr. David Alan Gilbert .hdr.flags = VHOST_USER_VERSION,
90455d754b3SDr. David Alan Gilbert };
90555d754b3SDr. David Alan Gilbert
906905125d0SDr. David Alan Gilbert if (u->region_rb_len < dev->mem->nregions) {
907905125d0SDr. David Alan Gilbert u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
908905125d0SDr. David Alan Gilbert u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
909905125d0SDr. David Alan Gilbert dev->mem->nregions);
910905125d0SDr. David Alan Gilbert memset(&(u->region_rb[u->region_rb_len]), '\0',
911905125d0SDr. David Alan Gilbert sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
912905125d0SDr. David Alan Gilbert memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
913905125d0SDr. David Alan Gilbert sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
914905125d0SDr. David Alan Gilbert u->region_rb_len = dev->mem->nregions;
915905125d0SDr. David Alan Gilbert }
916905125d0SDr. David Alan Gilbert
917f1aeb14bSRaphael Norwitz if (config_mem_slots) {
918025faa87SRoman Kagan ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, true);
919025faa87SRoman Kagan if (ret < 0) {
920025faa87SRoman Kagan return ret;
921f1aeb14bSRaphael Norwitz }
922f1aeb14bSRaphael Norwitz } else {
923025faa87SRoman Kagan ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
924025faa87SRoman Kagan true);
925025faa87SRoman Kagan if (ret < 0) {
926025faa87SRoman Kagan return ret;
92755d754b3SDr. David Alan Gilbert }
92855d754b3SDr. David Alan Gilbert
929025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, fds, fd_num);
930025faa87SRoman Kagan if (ret < 0) {
931025faa87SRoman Kagan return ret;
93255d754b3SDr. David Alan Gilbert }
93355d754b3SDr. David Alan Gilbert
934025faa87SRoman Kagan ret = vhost_user_read(dev, &msg_reply);
935025faa87SRoman Kagan if (ret < 0) {
936025faa87SRoman Kagan return ret;
9379bb38019SDr. David Alan Gilbert }
9389bb38019SDr. David Alan Gilbert
9399bb38019SDr. David Alan Gilbert if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
9409bb38019SDr. David Alan Gilbert error_report("%s: Received unexpected msg type."
9419bb38019SDr. David Alan Gilbert "Expected %d received %d", __func__,
9429bb38019SDr. David Alan Gilbert VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
943025faa87SRoman Kagan return -EPROTO;
9449bb38019SDr. David Alan Gilbert }
945f1aeb14bSRaphael Norwitz
946f1aeb14bSRaphael Norwitz /*
947f1aeb14bSRaphael Norwitz * We're using the same structure, just reusing one of the
9489bb38019SDr. David Alan Gilbert * fields, so it should be the same size.
9499bb38019SDr. David Alan Gilbert */
9509bb38019SDr. David Alan Gilbert if (msg_reply.hdr.size != msg.hdr.size) {
9519bb38019SDr. David Alan Gilbert error_report("%s: Unexpected size for postcopy reply "
952f1aeb14bSRaphael Norwitz "%d vs %d", __func__, msg_reply.hdr.size,
953f1aeb14bSRaphael Norwitz msg.hdr.size);
954025faa87SRoman Kagan return -EPROTO;
9559bb38019SDr. David Alan Gilbert }
9569bb38019SDr. David Alan Gilbert
9579bb38019SDr. David Alan Gilbert memset(u->postcopy_client_bases, 0,
95827598393SRaphael Norwitz sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
9599bb38019SDr. David Alan Gilbert
960f1aeb14bSRaphael Norwitz /*
961f1aeb14bSRaphael Norwitz * They're in the same order as the regions that were sent
9629bb38019SDr. David Alan Gilbert * but some of the regions were skipped (above) if they
9639bb38019SDr. David Alan Gilbert * didn't have fd's
9649bb38019SDr. David Alan Gilbert */
9659bb38019SDr. David Alan Gilbert for (msg_i = 0, region_i = 0;
9669bb38019SDr. David Alan Gilbert region_i < dev->mem->nregions;
9679bb38019SDr. David Alan Gilbert region_i++) {
9689bb38019SDr. David Alan Gilbert if (msg_i < fd_num &&
9699bb38019SDr. David Alan Gilbert msg_reply.payload.memory.regions[msg_i].guest_phys_addr ==
9709bb38019SDr. David Alan Gilbert dev->mem->regions[region_i].guest_phys_addr) {
9719bb38019SDr. David Alan Gilbert u->postcopy_client_bases[region_i] =
9729bb38019SDr. David Alan Gilbert msg_reply.payload.memory.regions[msg_i].userspace_addr;
9739bb38019SDr. David Alan Gilbert trace_vhost_user_set_mem_table_postcopy(
9749bb38019SDr. David Alan Gilbert msg_reply.payload.memory.regions[msg_i].userspace_addr,
9759bb38019SDr. David Alan Gilbert msg.payload.memory.regions[msg_i].userspace_addr,
9769bb38019SDr. David Alan Gilbert msg_i, region_i);
9779bb38019SDr. David Alan Gilbert msg_i++;
9789bb38019SDr. David Alan Gilbert }
9799bb38019SDr. David Alan Gilbert }
9809bb38019SDr. David Alan Gilbert if (msg_i != fd_num) {
9819bb38019SDr. David Alan Gilbert error_report("%s: postcopy reply not fully consumed "
9829bb38019SDr. David Alan Gilbert "%d vs %zd",
9839bb38019SDr. David Alan Gilbert __func__, msg_i, fd_num);
984025faa87SRoman Kagan return -EIO;
9859bb38019SDr. David Alan Gilbert }
986f1aeb14bSRaphael Norwitz
987f1aeb14bSRaphael Norwitz /*
988f1aeb14bSRaphael Norwitz * Now we've registered this with the postcopy code, we ack to the
989f1aeb14bSRaphael Norwitz * client, because now we're in the position to be able to deal
990f1aeb14bSRaphael Norwitz * with any faults it generates.
9919bb38019SDr. David Alan Gilbert */
992f1aeb14bSRaphael Norwitz /* TODO: Use this for failure cases as well with a bad value. */
9939bb38019SDr. David Alan Gilbert msg.hdr.size = sizeof(msg.payload.u64);
9949bb38019SDr. David Alan Gilbert msg.payload.u64 = 0; /* OK */
995025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, NULL, 0);
996025faa87SRoman Kagan if (ret < 0) {
997025faa87SRoman Kagan return ret;
9989bb38019SDr. David Alan Gilbert }
999f1aeb14bSRaphael Norwitz }
10009bb38019SDr. David Alan Gilbert
100155d754b3SDr. David Alan Gilbert return 0;
100255d754b3SDr. David Alan Gilbert }
100355d754b3SDr. David Alan Gilbert
vhost_user_set_mem_table(struct vhost_dev * dev,struct vhost_memory * mem)100455d754b3SDr. David Alan Gilbert static int vhost_user_set_mem_table(struct vhost_dev *dev,
100555d754b3SDr. David Alan Gilbert struct vhost_memory *mem)
100655d754b3SDr. David Alan Gilbert {
100755d754b3SDr. David Alan Gilbert struct vhost_user *u = dev->opaque;
100827598393SRaphael Norwitz int fds[VHOST_MEMORY_BASELINE_NREGIONS];
100955d754b3SDr. David Alan Gilbert size_t fd_num = 0;
101055d754b3SDr. David Alan Gilbert bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
101155d754b3SDr. David Alan Gilbert bool reply_supported = virtio_has_feature(dev->protocol_features,
10125ce43896SIlya Maximets VHOST_USER_PROTOCOL_F_REPLY_ACK);
1013f1aeb14bSRaphael Norwitz bool config_mem_slots =
1014f1aeb14bSRaphael Norwitz virtio_has_feature(dev->protocol_features,
1015f1aeb14bSRaphael Norwitz VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS);
1016025faa87SRoman Kagan int ret;
101755d754b3SDr. David Alan Gilbert
101855d754b3SDr. David Alan Gilbert if (do_postcopy) {
1019f1aeb14bSRaphael Norwitz /*
1020f1aeb14bSRaphael Norwitz * Postcopy has enough differences that it's best done in it's own
102155d754b3SDr. David Alan Gilbert * version
102255d754b3SDr. David Alan Gilbert */
1023f1aeb14bSRaphael Norwitz return vhost_user_set_mem_table_postcopy(dev, mem, reply_supported,
1024f1aeb14bSRaphael Norwitz config_mem_slots);
102555d754b3SDr. David Alan Gilbert }
102694c9cb31SMichael S. Tsirkin
102794c9cb31SMichael S. Tsirkin VhostUserMsg msg = {
102824e34754SMichael S. Tsirkin .hdr.flags = VHOST_USER_VERSION,
102994c9cb31SMichael S. Tsirkin };
103094c9cb31SMichael S. Tsirkin
103194c9cb31SMichael S. Tsirkin if (reply_supported) {
103224e34754SMichael S. Tsirkin msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
103394c9cb31SMichael S. Tsirkin }
103494c9cb31SMichael S. Tsirkin
1035f1aeb14bSRaphael Norwitz if (config_mem_slots) {
1036025faa87SRoman Kagan ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, false);
1037025faa87SRoman Kagan if (ret < 0) {
1038025faa87SRoman Kagan return ret;
1039f1aeb14bSRaphael Norwitz }
1040f1aeb14bSRaphael Norwitz } else {
1041025faa87SRoman Kagan ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
1042025faa87SRoman Kagan false);
1043025faa87SRoman Kagan if (ret < 0) {
1044025faa87SRoman Kagan return ret;
1045f4bf56fbSJay Zhou }
1046025faa87SRoman Kagan
1047025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, fds, fd_num);
1048025faa87SRoman Kagan if (ret < 0) {
1049025faa87SRoman Kagan return ret;
105094c9cb31SMichael S. Tsirkin }
105194c9cb31SMichael S. Tsirkin
105294c9cb31SMichael S. Tsirkin if (reply_supported) {
10533cf7daf8SMaxime Coquelin return process_message_reply(dev, &msg);
105494c9cb31SMichael S. Tsirkin }
1055f1aeb14bSRaphael Norwitz }
105694c9cb31SMichael S. Tsirkin
105794c9cb31SMichael S. Tsirkin return 0;
105894c9cb31SMichael S. Tsirkin }
105994c9cb31SMichael S. Tsirkin
vhost_user_set_vring_endian(struct vhost_dev * dev,struct vhost_vring_state * ring)106021e70425SMarc-André Lureau static int vhost_user_set_vring_endian(struct vhost_dev *dev,
106121e70425SMarc-André Lureau struct vhost_vring_state *ring)
106221e70425SMarc-André Lureau {
10635df04f17SFelipe Franciosi bool cross_endian = virtio_has_feature(dev->protocol_features,
10645df04f17SFelipe Franciosi VHOST_USER_PROTOCOL_F_CROSS_ENDIAN);
10655df04f17SFelipe Franciosi VhostUserMsg msg = {
106624e34754SMichael S. Tsirkin .hdr.request = VHOST_USER_SET_VRING_ENDIAN,
106724e34754SMichael S. Tsirkin .hdr.flags = VHOST_USER_VERSION,
10685df04f17SFelipe Franciosi .payload.state = *ring,
106924e34754SMichael S. Tsirkin .hdr.size = sizeof(msg.payload.state),
10705df04f17SFelipe Franciosi };
10715df04f17SFelipe Franciosi
10725df04f17SFelipe Franciosi if (!cross_endian) {
107321e70425SMarc-André Lureau error_report("vhost-user trying to send unhandled ioctl");
1074025faa87SRoman Kagan return -ENOTSUP;
107521e70425SMarc-André Lureau }
107621e70425SMarc-André Lureau
1077025faa87SRoman Kagan return vhost_user_write(dev, &msg, NULL, 0);
10785df04f17SFelipe Franciosi }
10795df04f17SFelipe Franciosi
vhost_user_get_u64(struct vhost_dev * dev,int request,uint64_t * u64)1080df3b2abcSLaszlo Ersek static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
1081df3b2abcSLaszlo Ersek {
1082df3b2abcSLaszlo Ersek int ret;
1083df3b2abcSLaszlo Ersek VhostUserMsg msg = {
1084df3b2abcSLaszlo Ersek .hdr.request = request,
1085df3b2abcSLaszlo Ersek .hdr.flags = VHOST_USER_VERSION,
1086df3b2abcSLaszlo Ersek };
1087df3b2abcSLaszlo Ersek
1088df3b2abcSLaszlo Ersek if (vhost_user_per_device_request(request) && dev->vq_index != 0) {
1089df3b2abcSLaszlo Ersek return 0;
1090df3b2abcSLaszlo Ersek }
1091df3b2abcSLaszlo Ersek
1092df3b2abcSLaszlo Ersek ret = vhost_user_write(dev, &msg, NULL, 0);
1093df3b2abcSLaszlo Ersek if (ret < 0) {
1094df3b2abcSLaszlo Ersek return ret;
1095df3b2abcSLaszlo Ersek }
1096df3b2abcSLaszlo Ersek
1097df3b2abcSLaszlo Ersek ret = vhost_user_read(dev, &msg);
1098df3b2abcSLaszlo Ersek if (ret < 0) {
1099df3b2abcSLaszlo Ersek return ret;
1100df3b2abcSLaszlo Ersek }
1101df3b2abcSLaszlo Ersek
1102df3b2abcSLaszlo Ersek if (msg.hdr.request != request) {
1103df3b2abcSLaszlo Ersek error_report("Received unexpected msg type. Expected %d received %d",
1104df3b2abcSLaszlo Ersek request, msg.hdr.request);
1105df3b2abcSLaszlo Ersek return -EPROTO;
1106df3b2abcSLaszlo Ersek }
1107df3b2abcSLaszlo Ersek
1108df3b2abcSLaszlo Ersek if (msg.hdr.size != sizeof(msg.payload.u64)) {
1109df3b2abcSLaszlo Ersek error_report("Received bad msg size.");
1110df3b2abcSLaszlo Ersek return -EPROTO;
1111df3b2abcSLaszlo Ersek }
1112df3b2abcSLaszlo Ersek
1113df3b2abcSLaszlo Ersek *u64 = msg.payload.u64;
1114df3b2abcSLaszlo Ersek
1115df3b2abcSLaszlo Ersek return 0;
1116df3b2abcSLaszlo Ersek }
1117df3b2abcSLaszlo Ersek
vhost_user_get_features(struct vhost_dev * dev,uint64_t * features)1118df3b2abcSLaszlo Ersek static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
1119df3b2abcSLaszlo Ersek {
1120df3b2abcSLaszlo Ersek if (vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features) < 0) {
1121df3b2abcSLaszlo Ersek return -EPROTO;
1122df3b2abcSLaszlo Ersek }
1123df3b2abcSLaszlo Ersek
1124df3b2abcSLaszlo Ersek return 0;
1125df3b2abcSLaszlo Ersek }
1126df3b2abcSLaszlo Ersek
1127df3b2abcSLaszlo Ersek /* Note: "msg->hdr.flags" may be modified. */
vhost_user_write_sync(struct vhost_dev * dev,VhostUserMsg * msg,bool wait_for_reply)1128df3b2abcSLaszlo Ersek static int vhost_user_write_sync(struct vhost_dev *dev, VhostUserMsg *msg,
1129df3b2abcSLaszlo Ersek bool wait_for_reply)
1130df3b2abcSLaszlo Ersek {
1131df3b2abcSLaszlo Ersek int ret;
1132df3b2abcSLaszlo Ersek
1133df3b2abcSLaszlo Ersek if (wait_for_reply) {
1134df3b2abcSLaszlo Ersek bool reply_supported = virtio_has_feature(dev->protocol_features,
1135df3b2abcSLaszlo Ersek VHOST_USER_PROTOCOL_F_REPLY_ACK);
1136df3b2abcSLaszlo Ersek if (reply_supported) {
1137df3b2abcSLaszlo Ersek msg->hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1138df3b2abcSLaszlo Ersek }
1139df3b2abcSLaszlo Ersek }
1140df3b2abcSLaszlo Ersek
1141df3b2abcSLaszlo Ersek ret = vhost_user_write(dev, msg, NULL, 0);
1142df3b2abcSLaszlo Ersek if (ret < 0) {
1143df3b2abcSLaszlo Ersek return ret;
1144df3b2abcSLaszlo Ersek }
1145df3b2abcSLaszlo Ersek
1146df3b2abcSLaszlo Ersek if (wait_for_reply) {
1147df3b2abcSLaszlo Ersek uint64_t dummy;
1148df3b2abcSLaszlo Ersek
1149df3b2abcSLaszlo Ersek if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
1150df3b2abcSLaszlo Ersek return process_message_reply(dev, msg);
1151df3b2abcSLaszlo Ersek }
1152df3b2abcSLaszlo Ersek
1153df3b2abcSLaszlo Ersek /*
1154df3b2abcSLaszlo Ersek * We need to wait for a reply but the backend does not
1155df3b2abcSLaszlo Ersek * support replies for the command we just sent.
1156df3b2abcSLaszlo Ersek * Send VHOST_USER_GET_FEATURES which makes all backends
1157df3b2abcSLaszlo Ersek * send a reply.
1158df3b2abcSLaszlo Ersek */
1159df3b2abcSLaszlo Ersek return vhost_user_get_features(dev, &dummy);
1160df3b2abcSLaszlo Ersek }
1161df3b2abcSLaszlo Ersek
1162df3b2abcSLaszlo Ersek return 0;
1163df3b2abcSLaszlo Ersek }
1164df3b2abcSLaszlo Ersek
vhost_set_vring(struct vhost_dev * dev,unsigned long int request,struct vhost_vring_state * ring,bool wait_for_reply)116521e70425SMarc-André Lureau static int vhost_set_vring(struct vhost_dev *dev,
116621e70425SMarc-André Lureau unsigned long int request,
116775b6b6daSLaszlo Ersek struct vhost_vring_state *ring,
116875b6b6daSLaszlo Ersek bool wait_for_reply)
116921e70425SMarc-André Lureau {
117021e70425SMarc-André Lureau VhostUserMsg msg = {
117124e34754SMichael S. Tsirkin .hdr.request = request,
117224e34754SMichael S. Tsirkin .hdr.flags = VHOST_USER_VERSION,
11737f4a930eSMichael S. Tsirkin .payload.state = *ring,
117424e34754SMichael S. Tsirkin .hdr.size = sizeof(msg.payload.state),
117521e70425SMarc-André Lureau };
117621e70425SMarc-André Lureau
117775b6b6daSLaszlo Ersek return vhost_user_write_sync(dev, &msg, wait_for_reply);
117821e70425SMarc-André Lureau }
117921e70425SMarc-André Lureau
vhost_user_set_vring_num(struct vhost_dev * dev,struct vhost_vring_state * ring)118021e70425SMarc-André Lureau static int vhost_user_set_vring_num(struct vhost_dev *dev,
118121e70425SMarc-André Lureau struct vhost_vring_state *ring)
118221e70425SMarc-André Lureau {
118375b6b6daSLaszlo Ersek return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring, false);
118421e70425SMarc-André Lureau }
118521e70425SMarc-André Lureau
vhost_user_host_notifier_free(VhostUserHostNotifier * n)11860b0af4d6SXueming Li static void vhost_user_host_notifier_free(VhostUserHostNotifier *n)
118744866521STiwei Bie {
1188963b0276Syaozhenguo if (n->unmap_addr) {
11898e3b0cbbSMarc-André Lureau munmap(n->unmap_addr, qemu_real_host_page_size());
11900b0af4d6SXueming Li n->unmap_addr = NULL;
11910b0af4d6SXueming Li }
1192963b0276Syaozhenguo if (n->destroy) {
1193963b0276Syaozhenguo memory_region_transaction_begin();
1194963b0276Syaozhenguo object_unparent(OBJECT(&n->mr));
1195963b0276Syaozhenguo memory_region_transaction_commit();
1196963b0276Syaozhenguo g_free(n);
1197963b0276Syaozhenguo }
1198963b0276Syaozhenguo }
11990b0af4d6SXueming Li
1200503e3554SAlex Bennée /*
1201503e3554SAlex Bennée * clean-up function for notifier, will finally free the structure
1202503e3554SAlex Bennée * under rcu.
1203503e3554SAlex Bennée */
vhost_user_host_notifier_remove(VhostUserHostNotifier * n,VirtIODevice * vdev,bool destroy)1204503e3554SAlex Bennée static void vhost_user_host_notifier_remove(VhostUserHostNotifier *n,
1205963b0276Syaozhenguo VirtIODevice *vdev, bool destroy)
12060b0af4d6SXueming Li {
1207963b0276Syaozhenguo /*
1208963b0276Syaozhenguo * if destroy == false and n->addr == NULL, we have nothing to do.
1209963b0276Syaozhenguo * so, just return.
1210963b0276Syaozhenguo */
1211963b0276Syaozhenguo if (!n || (!destroy && !n->addr)) {
1212963b0276Syaozhenguo return;
1213963b0276Syaozhenguo }
1214963b0276Syaozhenguo
1215e867144bSXueming Li if (n->addr) {
12160b0af4d6SXueming Li if (vdev) {
1217963b0276Syaozhenguo memory_region_transaction_begin();
1218503e3554SAlex Bennée virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false);
1219963b0276Syaozhenguo memory_region_transaction_commit();
122044866521STiwei Bie }
12210b0af4d6SXueming Li assert(!n->unmap_addr);
12220b0af4d6SXueming Li n->unmap_addr = n->addr;
12230b0af4d6SXueming Li n->addr = NULL;
12240b0af4d6SXueming Li }
1225963b0276Syaozhenguo n->destroy = destroy;
1226963b0276Syaozhenguo call_rcu(n, vhost_user_host_notifier_free, rcu);
122744866521STiwei Bie }
122844866521STiwei Bie
vhost_user_set_vring_base(struct vhost_dev * dev,struct vhost_vring_state * ring)122921e70425SMarc-André Lureau static int vhost_user_set_vring_base(struct vhost_dev *dev,
123021e70425SMarc-André Lureau struct vhost_vring_state *ring)
123121e70425SMarc-André Lureau {
123275b6b6daSLaszlo Ersek return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring, false);
123321e70425SMarc-André Lureau }
123421e70425SMarc-André Lureau
vhost_user_set_vring_enable(struct vhost_dev * dev,int enable)123521e70425SMarc-André Lureau static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
123621e70425SMarc-André Lureau {
1237dc3db6adSMichael S. Tsirkin int i;
123821e70425SMarc-André Lureau
1239923e2d98SYuanhan Liu if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
1240025faa87SRoman Kagan return -EINVAL;
124121e70425SMarc-André Lureau }
124221e70425SMarc-André Lureau
1243dc3db6adSMichael S. Tsirkin for (i = 0; i < dev->nvqs; ++i) {
1244025faa87SRoman Kagan int ret;
1245dc3db6adSMichael S. Tsirkin struct vhost_vring_state state = {
1246dc3db6adSMichael S. Tsirkin .index = dev->vq_index + i,
1247dc3db6adSMichael S. Tsirkin .num = enable,
1248dc3db6adSMichael S. Tsirkin };
1249dc3db6adSMichael S. Tsirkin
1250d7dc0682SLaszlo Ersek /*
1251d7dc0682SLaszlo Ersek * SET_VRING_ENABLE travels from guest to QEMU to vhost-user backend /
1252d7dc0682SLaszlo Ersek * control plane thread via unix domain socket. Virtio requests travel
1253d7dc0682SLaszlo Ersek * from guest to vhost-user backend / data plane thread via eventfd.
1254d7dc0682SLaszlo Ersek * Even if the guest enables the ring first, and pushes its first virtio
1255d7dc0682SLaszlo Ersek * request second (conforming to the virtio spec), the data plane thread
1256d7dc0682SLaszlo Ersek * in the backend may see the virtio request before the control plane
1257d7dc0682SLaszlo Ersek * thread sees the queue enablement. This causes (in fact, requires) the
1258d7dc0682SLaszlo Ersek * data plane thread to discard the virtio request (it arrived on a
1259d7dc0682SLaszlo Ersek * seemingly disabled queue). To prevent this out-of-order delivery,
1260d7dc0682SLaszlo Ersek * don't let the guest proceed to pushing the virtio request until the
1261d7dc0682SLaszlo Ersek * backend control plane acknowledges enabling the queue -- IOW, pass
1262d7dc0682SLaszlo Ersek * wait_for_reply=true below.
1263d7dc0682SLaszlo Ersek */
1264d7dc0682SLaszlo Ersek ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state, true);
1265025faa87SRoman Kagan if (ret < 0) {
1266025faa87SRoman Kagan /*
1267025faa87SRoman Kagan * Restoring the previous state is likely infeasible, as well as
1268025faa87SRoman Kagan * proceeding regardless the error, so just bail out and hope for
1269025faa87SRoman Kagan * the device-level recovery.
1270025faa87SRoman Kagan */
1271025faa87SRoman Kagan return ret;
1272025faa87SRoman Kagan }
127321e70425SMarc-André Lureau }
127421e70425SMarc-André Lureau
1275dc3db6adSMichael S. Tsirkin return 0;
1276dc3db6adSMichael S. Tsirkin }
127721e70425SMarc-André Lureau
fetch_notifier(VhostUserState * u,int idx)1278503e3554SAlex Bennée static VhostUserHostNotifier *fetch_notifier(VhostUserState *u,
1279503e3554SAlex Bennée int idx)
1280503e3554SAlex Bennée {
1281503e3554SAlex Bennée if (idx >= u->notifiers->len) {
1282503e3554SAlex Bennée return NULL;
1283503e3554SAlex Bennée }
1284503e3554SAlex Bennée return g_ptr_array_index(u->notifiers, idx);
1285503e3554SAlex Bennée }
1286503e3554SAlex Bennée
vhost_user_get_vring_base(struct vhost_dev * dev,struct vhost_vring_state * ring)128721e70425SMarc-André Lureau static int vhost_user_get_vring_base(struct vhost_dev *dev,
128821e70425SMarc-André Lureau struct vhost_vring_state *ring)
128921e70425SMarc-André Lureau {
1290025faa87SRoman Kagan int ret;
129121e70425SMarc-André Lureau VhostUserMsg msg = {
129224e34754SMichael S. Tsirkin .hdr.request = VHOST_USER_GET_VRING_BASE,
129324e34754SMichael S. Tsirkin .hdr.flags = VHOST_USER_VERSION,
12947f4a930eSMichael S. Tsirkin .payload.state = *ring,
129524e34754SMichael S. Tsirkin .hdr.size = sizeof(msg.payload.state),
129621e70425SMarc-André Lureau };
12970b0af4d6SXueming Li struct vhost_user *u = dev->opaque;
129821e70425SMarc-André Lureau
1299503e3554SAlex Bennée VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index);
1300963b0276Syaozhenguo vhost_user_host_notifier_remove(n, dev->vdev, false);
130144866521STiwei Bie
1302025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, NULL, 0);
1303025faa87SRoman Kagan if (ret < 0) {
1304025faa87SRoman Kagan return ret;
1305c4843a45SMarc-André Lureau }
130621e70425SMarc-André Lureau
1307025faa87SRoman Kagan ret = vhost_user_read(dev, &msg);
1308025faa87SRoman Kagan if (ret < 0) {
1309025faa87SRoman Kagan return ret;
131021e70425SMarc-André Lureau }
131121e70425SMarc-André Lureau
131224e34754SMichael S. Tsirkin if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
131321e70425SMarc-André Lureau error_report("Received unexpected msg type. Expected %d received %d",
131424e34754SMichael S. Tsirkin VHOST_USER_GET_VRING_BASE, msg.hdr.request);
1315025faa87SRoman Kagan return -EPROTO;
131621e70425SMarc-André Lureau }
131721e70425SMarc-André Lureau
131824e34754SMichael S. Tsirkin if (msg.hdr.size != sizeof(msg.payload.state)) {
131921e70425SMarc-André Lureau error_report("Received bad msg size.");
1320025faa87SRoman Kagan return -EPROTO;
132121e70425SMarc-André Lureau }
132221e70425SMarc-André Lureau
13237f4a930eSMichael S. Tsirkin *ring = msg.payload.state;
132421e70425SMarc-André Lureau
132521e70425SMarc-André Lureau return 0;
132621e70425SMarc-André Lureau }
132721e70425SMarc-André Lureau
vhost_set_vring_file(struct vhost_dev * dev,VhostUserRequest request,struct vhost_vring_file * file)132821e70425SMarc-André Lureau static int vhost_set_vring_file(struct vhost_dev *dev,
132921e70425SMarc-André Lureau VhostUserRequest request,
133021e70425SMarc-André Lureau struct vhost_vring_file *file)
133121e70425SMarc-André Lureau {
133227598393SRaphael Norwitz int fds[VHOST_USER_MAX_RAM_SLOTS];
133321e70425SMarc-André Lureau size_t fd_num = 0;
133421e70425SMarc-André Lureau VhostUserMsg msg = {
133524e34754SMichael S. Tsirkin .hdr.request = request,
133624e34754SMichael S. Tsirkin .hdr.flags = VHOST_USER_VERSION,
13377f4a930eSMichael S. Tsirkin .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
133824e34754SMichael S. Tsirkin .hdr.size = sizeof(msg.payload.u64),
133921e70425SMarc-André Lureau };
134021e70425SMarc-André Lureau
1341126e7f78SPaolo Bonzini if (file->fd > 0) {
134221e70425SMarc-André Lureau fds[fd_num++] = file->fd;
134321e70425SMarc-André Lureau } else {
13447f4a930eSMichael S. Tsirkin msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
134521e70425SMarc-André Lureau }
134621e70425SMarc-André Lureau
1347025faa87SRoman Kagan return vhost_user_write(dev, &msg, fds, fd_num);
134821e70425SMarc-André Lureau }
134921e70425SMarc-André Lureau
vhost_user_set_vring_kick(struct vhost_dev * dev,struct vhost_vring_file * file)135021e70425SMarc-André Lureau static int vhost_user_set_vring_kick(struct vhost_dev *dev,
135121e70425SMarc-André Lureau struct vhost_vring_file *file)
135221e70425SMarc-André Lureau {
135321e70425SMarc-André Lureau return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
135421e70425SMarc-André Lureau }
135521e70425SMarc-André Lureau
vhost_user_set_vring_call(struct vhost_dev * dev,struct vhost_vring_file * file)135621e70425SMarc-André Lureau static int vhost_user_set_vring_call(struct vhost_dev *dev,
135721e70425SMarc-André Lureau struct vhost_vring_file *file)
135821e70425SMarc-André Lureau {
135921e70425SMarc-André Lureau return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
136021e70425SMarc-André Lureau }
136121e70425SMarc-André Lureau
vhost_user_set_vring_err(struct vhost_dev * dev,struct vhost_vring_file * file)136260dc3c5bSKonstantin Khlebnikov static int vhost_user_set_vring_err(struct vhost_dev *dev,
136360dc3c5bSKonstantin Khlebnikov struct vhost_vring_file *file)
136460dc3c5bSKonstantin Khlebnikov {
136560dc3c5bSKonstantin Khlebnikov return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_ERR, file);
136660dc3c5bSKonstantin Khlebnikov }
136721e70425SMarc-André Lureau
vhost_user_set_vring_addr(struct vhost_dev * dev,struct vhost_vring_addr * addr)1368699f2e53SDenis Plotnikov static int vhost_user_set_vring_addr(struct vhost_dev *dev,
1369699f2e53SDenis Plotnikov struct vhost_vring_addr *addr)
1370699f2e53SDenis Plotnikov {
1371699f2e53SDenis Plotnikov VhostUserMsg msg = {
1372699f2e53SDenis Plotnikov .hdr.request = VHOST_USER_SET_VRING_ADDR,
1373699f2e53SDenis Plotnikov .hdr.flags = VHOST_USER_VERSION,
1374699f2e53SDenis Plotnikov .payload.addr = *addr,
1375699f2e53SDenis Plotnikov .hdr.size = sizeof(msg.payload.addr),
1376699f2e53SDenis Plotnikov };
1377699f2e53SDenis Plotnikov
1378699f2e53SDenis Plotnikov /*
1379699f2e53SDenis Plotnikov * wait for a reply if logging is enabled to make sure
1380699f2e53SDenis Plotnikov * backend is actually logging changes
1381699f2e53SDenis Plotnikov */
1382699f2e53SDenis Plotnikov bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG);
1383699f2e53SDenis Plotnikov
138454ae3682SLaszlo Ersek return vhost_user_write_sync(dev, &msg, wait_for_reply);
1385699f2e53SDenis Plotnikov }
1386699f2e53SDenis Plotnikov
vhost_user_set_u64(struct vhost_dev * dev,int request,uint64_t u64,bool wait_for_reply)1387699f2e53SDenis Plotnikov static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
1388699f2e53SDenis Plotnikov bool wait_for_reply)
1389699f2e53SDenis Plotnikov {
1390699f2e53SDenis Plotnikov VhostUserMsg msg = {
1391699f2e53SDenis Plotnikov .hdr.request = request,
1392699f2e53SDenis Plotnikov .hdr.flags = VHOST_USER_VERSION,
1393699f2e53SDenis Plotnikov .payload.u64 = u64,
1394699f2e53SDenis Plotnikov .hdr.size = sizeof(msg.payload.u64),
1395699f2e53SDenis Plotnikov };
1396699f2e53SDenis Plotnikov
139754ae3682SLaszlo Ersek return vhost_user_write_sync(dev, &msg, wait_for_reply);
1398699f2e53SDenis Plotnikov }
1399699f2e53SDenis Plotnikov
vhost_user_set_status(struct vhost_dev * dev,uint8_t status)1400923b8921SYajun Wu static int vhost_user_set_status(struct vhost_dev *dev, uint8_t status)
1401923b8921SYajun Wu {
1402923b8921SYajun Wu return vhost_user_set_u64(dev, VHOST_USER_SET_STATUS, status, false);
1403923b8921SYajun Wu }
1404923b8921SYajun Wu
vhost_user_get_status(struct vhost_dev * dev,uint8_t * status)1405923b8921SYajun Wu static int vhost_user_get_status(struct vhost_dev *dev, uint8_t *status)
1406923b8921SYajun Wu {
1407923b8921SYajun Wu uint64_t value;
1408923b8921SYajun Wu int ret;
1409923b8921SYajun Wu
1410923b8921SYajun Wu ret = vhost_user_get_u64(dev, VHOST_USER_GET_STATUS, &value);
1411923b8921SYajun Wu if (ret < 0) {
1412923b8921SYajun Wu return ret;
1413923b8921SYajun Wu }
1414923b8921SYajun Wu *status = value;
1415923b8921SYajun Wu
1416923b8921SYajun Wu return 0;
1417923b8921SYajun Wu }
1418923b8921SYajun Wu
vhost_user_add_status(struct vhost_dev * dev,uint8_t status)1419923b8921SYajun Wu static int vhost_user_add_status(struct vhost_dev *dev, uint8_t status)
1420923b8921SYajun Wu {
1421923b8921SYajun Wu uint8_t s;
1422923b8921SYajun Wu int ret;
1423923b8921SYajun Wu
1424923b8921SYajun Wu ret = vhost_user_get_status(dev, &s);
1425923b8921SYajun Wu if (ret < 0) {
1426923b8921SYajun Wu return ret;
1427923b8921SYajun Wu }
1428923b8921SYajun Wu
1429923b8921SYajun Wu if ((s & status) == status) {
1430923b8921SYajun Wu return 0;
1431923b8921SYajun Wu }
1432923b8921SYajun Wu s |= status;
1433923b8921SYajun Wu
1434923b8921SYajun Wu return vhost_user_set_status(dev, s);
1435923b8921SYajun Wu }
1436923b8921SYajun Wu
vhost_user_set_features(struct vhost_dev * dev,uint64_t features)1437699f2e53SDenis Plotnikov static int vhost_user_set_features(struct vhost_dev *dev,
1438699f2e53SDenis Plotnikov uint64_t features)
1439699f2e53SDenis Plotnikov {
1440699f2e53SDenis Plotnikov /*
1441699f2e53SDenis Plotnikov * wait for a reply if logging is enabled to make sure
1442699f2e53SDenis Plotnikov * backend is actually logging changes
1443699f2e53SDenis Plotnikov */
1444699f2e53SDenis Plotnikov bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL);
1445923b8921SYajun Wu int ret;
1446699f2e53SDenis Plotnikov
144702b61f38SAlex Bennée /*
144802b61f38SAlex Bennée * We need to include any extra backend only feature bits that
144902b61f38SAlex Bennée * might be needed by our device. Currently this includes the
145002b61f38SAlex Bennée * VHOST_USER_F_PROTOCOL_FEATURES bit for enabling protocol
145102b61f38SAlex Bennée * features.
145202b61f38SAlex Bennée */
1453923b8921SYajun Wu ret = vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES,
145402b61f38SAlex Bennée features | dev->backend_features,
1455699f2e53SDenis Plotnikov log_enabled);
1456923b8921SYajun Wu
1457923b8921SYajun Wu if (virtio_has_feature(dev->protocol_features,
1458923b8921SYajun Wu VHOST_USER_PROTOCOL_F_STATUS)) {
1459923b8921SYajun Wu if (!ret) {
1460923b8921SYajun Wu return vhost_user_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
1461923b8921SYajun Wu }
1462923b8921SYajun Wu }
1463923b8921SYajun Wu
1464923b8921SYajun Wu return ret;
1465699f2e53SDenis Plotnikov }
1466699f2e53SDenis Plotnikov
vhost_user_set_protocol_features(struct vhost_dev * dev,uint64_t features)1467699f2e53SDenis Plotnikov static int vhost_user_set_protocol_features(struct vhost_dev *dev,
1468699f2e53SDenis Plotnikov uint64_t features)
1469699f2e53SDenis Plotnikov {
1470699f2e53SDenis Plotnikov return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features,
1471699f2e53SDenis Plotnikov false);
1472699f2e53SDenis Plotnikov }
1473699f2e53SDenis Plotnikov
vhost_user_set_owner(struct vhost_dev * dev)147421e70425SMarc-André Lureau static int vhost_user_set_owner(struct vhost_dev *dev)
147521e70425SMarc-André Lureau {
147621e70425SMarc-André Lureau VhostUserMsg msg = {
147724e34754SMichael S. Tsirkin .hdr.request = VHOST_USER_SET_OWNER,
147824e34754SMichael S. Tsirkin .hdr.flags = VHOST_USER_VERSION,
147921e70425SMarc-André Lureau };
148021e70425SMarc-André Lureau
1481025faa87SRoman Kagan return vhost_user_write(dev, &msg, NULL, 0);
148221e70425SMarc-André Lureau }
148321e70425SMarc-André Lureau
vhost_user_get_max_memslots(struct vhost_dev * dev,uint64_t * max_memslots)14846b0eff1aSRaphael Norwitz static int vhost_user_get_max_memslots(struct vhost_dev *dev,
14856b0eff1aSRaphael Norwitz uint64_t *max_memslots)
14866b0eff1aSRaphael Norwitz {
14876b0eff1aSRaphael Norwitz uint64_t backend_max_memslots;
14886b0eff1aSRaphael Norwitz int err;
14896b0eff1aSRaphael Norwitz
14906b0eff1aSRaphael Norwitz err = vhost_user_get_u64(dev, VHOST_USER_GET_MAX_MEM_SLOTS,
14916b0eff1aSRaphael Norwitz &backend_max_memslots);
14926b0eff1aSRaphael Norwitz if (err < 0) {
14936b0eff1aSRaphael Norwitz return err;
14946b0eff1aSRaphael Norwitz }
14956b0eff1aSRaphael Norwitz
14966b0eff1aSRaphael Norwitz *max_memslots = backend_max_memslots;
14976b0eff1aSRaphael Norwitz
14986b0eff1aSRaphael Norwitz return 0;
14996b0eff1aSRaphael Norwitz }
15006b0eff1aSRaphael Norwitz
vhost_user_reset_device(struct vhost_dev * dev)150121e70425SMarc-André Lureau static int vhost_user_reset_device(struct vhost_dev *dev)
150221e70425SMarc-André Lureau {
150321e70425SMarc-André Lureau VhostUserMsg msg = {
150424e34754SMichael S. Tsirkin .hdr.flags = VHOST_USER_VERSION,
150522d2464fSStefan Hajnoczi .hdr.request = VHOST_USER_RESET_DEVICE,
150621e70425SMarc-André Lureau };
150721e70425SMarc-André Lureau
150822d2464fSStefan Hajnoczi /*
150922d2464fSStefan Hajnoczi * Historically, reset was not implemented so only reset devices
151022d2464fSStefan Hajnoczi * that are expecting it.
151122d2464fSStefan Hajnoczi */
151222d2464fSStefan Hajnoczi if (!virtio_has_feature(dev->protocol_features,
151322d2464fSStefan Hajnoczi VHOST_USER_PROTOCOL_F_RESET_DEVICE)) {
151422d2464fSStefan Hajnoczi return -ENOSYS;
151522d2464fSStefan Hajnoczi }
1516d91d57e6SRaphael Norwitz
1517025faa87SRoman Kagan return vhost_user_write(dev, &msg, NULL, 0);
151821e70425SMarc-André Lureau }
151921e70425SMarc-André Lureau
vhost_user_backend_handle_config_change(struct vhost_dev * dev)1520f8ed3648SManos Pitsidianakis static int vhost_user_backend_handle_config_change(struct vhost_dev *dev)
15214c3e257bSChangpeng Liu {
1522025faa87SRoman Kagan if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
1523025faa87SRoman Kagan return -ENOSYS;
15244c3e257bSChangpeng Liu }
15254c3e257bSChangpeng Liu
1526025faa87SRoman Kagan return dev->config_ops->vhost_dev_config_notifier(dev);
15274c3e257bSChangpeng Liu }
15284c3e257bSChangpeng Liu
1529503e3554SAlex Bennée /*
1530503e3554SAlex Bennée * Fetch or create the notifier for a given idx. Newly created
1531503e3554SAlex Bennée * notifiers are added to the pointer array that tracks them.
1532503e3554SAlex Bennée */
fetch_or_create_notifier(VhostUserState * u,int idx)1533503e3554SAlex Bennée static VhostUserHostNotifier *fetch_or_create_notifier(VhostUserState *u,
1534503e3554SAlex Bennée int idx)
1535503e3554SAlex Bennée {
1536503e3554SAlex Bennée VhostUserHostNotifier *n = NULL;
1537503e3554SAlex Bennée if (idx >= u->notifiers->len) {
1538b595d627SYajun Wu g_ptr_array_set_size(u->notifiers, idx + 1);
1539503e3554SAlex Bennée }
1540503e3554SAlex Bennée
1541503e3554SAlex Bennée n = g_ptr_array_index(u->notifiers, idx);
1542503e3554SAlex Bennée if (!n) {
1543bd437c96SYajun Wu /*
1544bd437c96SYajun Wu * In case notification arrive out-of-order,
1545bd437c96SYajun Wu * make room for current index.
1546bd437c96SYajun Wu */
1547bd437c96SYajun Wu g_ptr_array_remove_index(u->notifiers, idx);
1548503e3554SAlex Bennée n = g_new0(VhostUserHostNotifier, 1);
1549503e3554SAlex Bennée n->idx = idx;
1550503e3554SAlex Bennée g_ptr_array_insert(u->notifiers, idx, n);
1551503e3554SAlex Bennée trace_vhost_user_create_notifier(idx, n);
1552503e3554SAlex Bennée }
1553503e3554SAlex Bennée
1554503e3554SAlex Bennée return n;
1555503e3554SAlex Bennée }
1556503e3554SAlex Bennée
vhost_user_backend_handle_vring_host_notifier(struct vhost_dev * dev,VhostUserVringArea * area,int fd)1557f8ed3648SManos Pitsidianakis static int vhost_user_backend_handle_vring_host_notifier(struct vhost_dev *dev,
155844866521STiwei Bie VhostUserVringArea *area,
155944866521STiwei Bie int fd)
156044866521STiwei Bie {
156144866521STiwei Bie int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
15628e3b0cbbSMarc-André Lureau size_t page_size = qemu_real_host_page_size();
156344866521STiwei Bie struct vhost_user *u = dev->opaque;
156444866521STiwei Bie VhostUserState *user = u->user;
156544866521STiwei Bie VirtIODevice *vdev = dev->vdev;
156644866521STiwei Bie VhostUserHostNotifier *n;
156744866521STiwei Bie void *addr;
156844866521STiwei Bie char *name;
156944866521STiwei Bie
157044866521STiwei Bie if (!virtio_has_feature(dev->protocol_features,
157144866521STiwei Bie VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
157244866521STiwei Bie vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
1573025faa87SRoman Kagan return -EINVAL;
157444866521STiwei Bie }
157544866521STiwei Bie
1576503e3554SAlex Bennée /*
1577503e3554SAlex Bennée * Fetch notifier and invalidate any old data before setting up
1578503e3554SAlex Bennée * new mapped address.
1579503e3554SAlex Bennée */
1580503e3554SAlex Bennée n = fetch_or_create_notifier(user, queue_idx);
1581963b0276Syaozhenguo vhost_user_host_notifier_remove(n, vdev, false);
158244866521STiwei Bie
158344866521STiwei Bie if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
158444866521STiwei Bie return 0;
158544866521STiwei Bie }
158644866521STiwei Bie
158744866521STiwei Bie /* Sanity check. */
158844866521STiwei Bie if (area->size != page_size) {
1589025faa87SRoman Kagan return -EINVAL;
159044866521STiwei Bie }
159144866521STiwei Bie
159244866521STiwei Bie addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
159344866521STiwei Bie fd, area->offset);
159444866521STiwei Bie if (addr == MAP_FAILED) {
1595025faa87SRoman Kagan return -EFAULT;
159644866521STiwei Bie }
159744866521STiwei Bie
159844866521STiwei Bie name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
159944866521STiwei Bie user, queue_idx);
16000b0af4d6SXueming Li if (!n->mr.ram) { /* Don't init again after suspend. */
160144866521STiwei Bie memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
160244866521STiwei Bie page_size, addr);
16030b0af4d6SXueming Li } else {
16040b0af4d6SXueming Li n->mr.ram_block->host = addr;
16050b0af4d6SXueming Li }
160644866521STiwei Bie g_free(name);
160744866521STiwei Bie
160844866521STiwei Bie if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
16091f89d3b9SYajun Wu object_unparent(OBJECT(&n->mr));
161044866521STiwei Bie munmap(addr, page_size);
1611025faa87SRoman Kagan return -ENXIO;
161244866521STiwei Bie }
161344866521STiwei Bie
161444866521STiwei Bie n->addr = addr;
161544866521STiwei Bie
161644866521STiwei Bie return 0;
161744866521STiwei Bie }
161844866521STiwei Bie
161916094766SAlbert Esteve static int
vhost_user_backend_handle_shared_object_add(struct vhost_dev * dev,VhostUserShared * object)162016094766SAlbert Esteve vhost_user_backend_handle_shared_object_add(struct vhost_dev *dev,
162116094766SAlbert Esteve VhostUserShared *object)
162216094766SAlbert Esteve {
162316094766SAlbert Esteve QemuUUID uuid;
162416094766SAlbert Esteve
162516094766SAlbert Esteve memcpy(uuid.data, object->uuid, sizeof(object->uuid));
1626*eea5aeefSAlbert Esteve return !virtio_add_vhost_device(&uuid, dev);
162716094766SAlbert Esteve }
162816094766SAlbert Esteve
1629*eea5aeefSAlbert Esteve /*
1630*eea5aeefSAlbert Esteve * Handle VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE backend requests.
1631*eea5aeefSAlbert Esteve *
1632*eea5aeefSAlbert Esteve * Return: 0 on success, 1 on error.
1633*eea5aeefSAlbert Esteve */
163416094766SAlbert Esteve static int
vhost_user_backend_handle_shared_object_remove(struct vhost_dev * dev,VhostUserShared * object)1635043e127aSAlbert Esteve vhost_user_backend_handle_shared_object_remove(struct vhost_dev *dev,
1636043e127aSAlbert Esteve VhostUserShared *object)
163716094766SAlbert Esteve {
163816094766SAlbert Esteve QemuUUID uuid;
163916094766SAlbert Esteve
164016094766SAlbert Esteve memcpy(uuid.data, object->uuid, sizeof(object->uuid));
1641043e127aSAlbert Esteve switch (virtio_object_type(&uuid)) {
1642043e127aSAlbert Esteve case TYPE_VHOST_DEV:
1643043e127aSAlbert Esteve {
1644043e127aSAlbert Esteve struct vhost_dev *owner = virtio_lookup_vhost_device(&uuid);
1645043e127aSAlbert Esteve if (dev != owner) {
1646043e127aSAlbert Esteve /* Not allowed to remove non-owned entries */
1647*eea5aeefSAlbert Esteve return 1;
1648043e127aSAlbert Esteve }
1649043e127aSAlbert Esteve break;
1650043e127aSAlbert Esteve }
1651043e127aSAlbert Esteve default:
1652043e127aSAlbert Esteve /* Not allowed to remove non-owned entries */
1653*eea5aeefSAlbert Esteve return 1;
1654043e127aSAlbert Esteve }
1655043e127aSAlbert Esteve
1656*eea5aeefSAlbert Esteve return !virtio_remove_resource(&uuid);
165716094766SAlbert Esteve }
165816094766SAlbert Esteve
vhost_user_send_resp(QIOChannel * ioc,VhostUserHeader * hdr,VhostUserPayload * payload,Error ** errp)165916094766SAlbert Esteve static bool vhost_user_send_resp(QIOChannel *ioc, VhostUserHeader *hdr,
166016094766SAlbert Esteve VhostUserPayload *payload, Error **errp)
166116094766SAlbert Esteve {
166216094766SAlbert Esteve struct iovec iov[] = {
166316094766SAlbert Esteve { .iov_base = hdr, .iov_len = VHOST_USER_HDR_SIZE },
166416094766SAlbert Esteve { .iov_base = payload, .iov_len = hdr->size },
166516094766SAlbert Esteve };
166616094766SAlbert Esteve
166716094766SAlbert Esteve hdr->flags &= ~VHOST_USER_NEED_REPLY_MASK;
166816094766SAlbert Esteve hdr->flags |= VHOST_USER_REPLY_MASK;
166916094766SAlbert Esteve
167016094766SAlbert Esteve return !qio_channel_writev_all(ioc, iov, ARRAY_SIZE(iov), errp);
167116094766SAlbert Esteve }
167216094766SAlbert Esteve
167316094766SAlbert Esteve static bool
vhost_user_backend_send_dmabuf_fd(QIOChannel * ioc,VhostUserHeader * hdr,VhostUserPayload * payload,Error ** errp)167416094766SAlbert Esteve vhost_user_backend_send_dmabuf_fd(QIOChannel *ioc, VhostUserHeader *hdr,
167516094766SAlbert Esteve VhostUserPayload *payload, Error **errp)
167616094766SAlbert Esteve {
167716094766SAlbert Esteve hdr->size = sizeof(payload->u64);
167816094766SAlbert Esteve return vhost_user_send_resp(ioc, hdr, payload, errp);
167916094766SAlbert Esteve }
168016094766SAlbert Esteve
vhost_user_get_shared_object(struct vhost_dev * dev,unsigned char * uuid,int * dmabuf_fd)168116094766SAlbert Esteve int vhost_user_get_shared_object(struct vhost_dev *dev, unsigned char *uuid,
168216094766SAlbert Esteve int *dmabuf_fd)
168316094766SAlbert Esteve {
168416094766SAlbert Esteve struct vhost_user *u = dev->opaque;
168516094766SAlbert Esteve CharBackend *chr = u->user->chr;
168616094766SAlbert Esteve int ret;
168716094766SAlbert Esteve VhostUserMsg msg = {
168816094766SAlbert Esteve .hdr.request = VHOST_USER_GET_SHARED_OBJECT,
168916094766SAlbert Esteve .hdr.flags = VHOST_USER_VERSION,
169016094766SAlbert Esteve };
169116094766SAlbert Esteve memcpy(msg.payload.object.uuid, uuid, sizeof(msg.payload.object.uuid));
169216094766SAlbert Esteve
169316094766SAlbert Esteve ret = vhost_user_write(dev, &msg, NULL, 0);
169416094766SAlbert Esteve if (ret < 0) {
169516094766SAlbert Esteve return ret;
169616094766SAlbert Esteve }
169716094766SAlbert Esteve
169816094766SAlbert Esteve ret = vhost_user_read(dev, &msg);
169916094766SAlbert Esteve if (ret < 0) {
170016094766SAlbert Esteve return ret;
170116094766SAlbert Esteve }
170216094766SAlbert Esteve
170316094766SAlbert Esteve if (msg.hdr.request != VHOST_USER_GET_SHARED_OBJECT) {
170416094766SAlbert Esteve error_report("Received unexpected msg type. "
170516094766SAlbert Esteve "Expected %d received %d",
170616094766SAlbert Esteve VHOST_USER_GET_SHARED_OBJECT, msg.hdr.request);
170716094766SAlbert Esteve return -EPROTO;
170816094766SAlbert Esteve }
170916094766SAlbert Esteve
171016094766SAlbert Esteve *dmabuf_fd = qemu_chr_fe_get_msgfd(chr);
171116094766SAlbert Esteve if (*dmabuf_fd < 0) {
171216094766SAlbert Esteve error_report("Failed to get dmabuf fd");
171316094766SAlbert Esteve return -EIO;
171416094766SAlbert Esteve }
171516094766SAlbert Esteve
171616094766SAlbert Esteve return 0;
171716094766SAlbert Esteve }
171816094766SAlbert Esteve
171916094766SAlbert Esteve static int
vhost_user_backend_handle_shared_object_lookup(struct vhost_user * u,QIOChannel * ioc,VhostUserHeader * hdr,VhostUserPayload * payload)172016094766SAlbert Esteve vhost_user_backend_handle_shared_object_lookup(struct vhost_user *u,
172116094766SAlbert Esteve QIOChannel *ioc,
172216094766SAlbert Esteve VhostUserHeader *hdr,
172316094766SAlbert Esteve VhostUserPayload *payload)
172416094766SAlbert Esteve {
172516094766SAlbert Esteve QemuUUID uuid;
172616094766SAlbert Esteve CharBackend *chr = u->user->chr;
172716094766SAlbert Esteve Error *local_err = NULL;
172816094766SAlbert Esteve int dmabuf_fd = -1;
172916094766SAlbert Esteve int fd_num = 0;
173016094766SAlbert Esteve
173116094766SAlbert Esteve memcpy(uuid.data, payload->object.uuid, sizeof(payload->object.uuid));
173216094766SAlbert Esteve
173316094766SAlbert Esteve payload->u64 = 0;
173416094766SAlbert Esteve switch (virtio_object_type(&uuid)) {
173516094766SAlbert Esteve case TYPE_DMABUF:
173616094766SAlbert Esteve dmabuf_fd = virtio_lookup_dmabuf(&uuid);
173716094766SAlbert Esteve break;
173816094766SAlbert Esteve case TYPE_VHOST_DEV:
173916094766SAlbert Esteve {
174016094766SAlbert Esteve struct vhost_dev *dev = virtio_lookup_vhost_device(&uuid);
174116094766SAlbert Esteve if (dev == NULL) {
174216094766SAlbert Esteve payload->u64 = -EINVAL;
174316094766SAlbert Esteve break;
174416094766SAlbert Esteve }
174516094766SAlbert Esteve int ret = vhost_user_get_shared_object(dev, uuid.data, &dmabuf_fd);
174616094766SAlbert Esteve if (ret < 0) {
174716094766SAlbert Esteve payload->u64 = ret;
174816094766SAlbert Esteve }
174916094766SAlbert Esteve break;
175016094766SAlbert Esteve }
175116094766SAlbert Esteve case TYPE_INVALID:
175216094766SAlbert Esteve payload->u64 = -EINVAL;
175316094766SAlbert Esteve break;
175416094766SAlbert Esteve }
175516094766SAlbert Esteve
175616094766SAlbert Esteve if (dmabuf_fd != -1) {
175716094766SAlbert Esteve fd_num++;
175816094766SAlbert Esteve }
175916094766SAlbert Esteve
176016094766SAlbert Esteve if (qemu_chr_fe_set_msgfds(chr, &dmabuf_fd, fd_num) < 0) {
176116094766SAlbert Esteve error_report("Failed to set msg fds.");
176216094766SAlbert Esteve payload->u64 = -EINVAL;
176316094766SAlbert Esteve }
176416094766SAlbert Esteve
176516094766SAlbert Esteve if (!vhost_user_backend_send_dmabuf_fd(ioc, hdr, payload, &local_err)) {
176616094766SAlbert Esteve error_report_err(local_err);
176716094766SAlbert Esteve return -EINVAL;
176816094766SAlbert Esteve }
176916094766SAlbert Esteve
177016094766SAlbert Esteve return 0;
177116094766SAlbert Esteve }
177216094766SAlbert Esteve
close_backend_channel(struct vhost_user * u)1773f8ed3648SManos Pitsidianakis static void close_backend_channel(struct vhost_user *u)
1774de62e494SGreg Kurz {
1775f8ed3648SManos Pitsidianakis g_source_destroy(u->backend_src);
1776f8ed3648SManos Pitsidianakis g_source_unref(u->backend_src);
1777f8ed3648SManos Pitsidianakis u->backend_src = NULL;
1778f8ed3648SManos Pitsidianakis object_unref(OBJECT(u->backend_ioc));
1779f8ed3648SManos Pitsidianakis u->backend_ioc = NULL;
1780de62e494SGreg Kurz }
1781de62e494SGreg Kurz
backend_read(QIOChannel * ioc,GIOCondition condition,gpointer opaque)1782f8ed3648SManos Pitsidianakis static gboolean backend_read(QIOChannel *ioc, GIOCondition condition,
178357dc0217SGreg Kurz gpointer opaque)
17844bbeeba0SMarc-André Lureau {
17854bbeeba0SMarc-André Lureau struct vhost_dev *dev = opaque;
17864bbeeba0SMarc-André Lureau struct vhost_user *u = dev->opaque;
178769aff030SMichael S. Tsirkin VhostUserHeader hdr = { 0, };
178869aff030SMichael S. Tsirkin VhostUserPayload payload = { 0, };
178957dc0217SGreg Kurz Error *local_err = NULL;
179057dc0217SGreg Kurz gboolean rc = G_SOURCE_CONTINUE;
179157dc0217SGreg Kurz int ret = 0;
17921f3a4519STiwei Bie struct iovec iov;
179357dc0217SGreg Kurz g_autofree int *fd = NULL;
179457dc0217SGreg Kurz size_t fdsize = 0;
179557dc0217SGreg Kurz int i;
17965f57fbeaSTiwei Bie
17974bbeeba0SMarc-André Lureau /* Read header */
17981f3a4519STiwei Bie iov.iov_base = &hdr;
17991f3a4519STiwei Bie iov.iov_len = VHOST_USER_HDR_SIZE;
18001f3a4519STiwei Bie
180157dc0217SGreg Kurz if (qio_channel_readv_full_all(ioc, &iov, 1, &fd, &fdsize, &local_err)) {
180257dc0217SGreg Kurz error_report_err(local_err);
18034bbeeba0SMarc-André Lureau goto err;
18044bbeeba0SMarc-André Lureau }
18054bbeeba0SMarc-André Lureau
180669aff030SMichael S. Tsirkin if (hdr.size > VHOST_USER_PAYLOAD_SIZE) {
18074bbeeba0SMarc-André Lureau error_report("Failed to read msg header."
180869aff030SMichael S. Tsirkin " Size %d exceeds the maximum %zu.", hdr.size,
18094bbeeba0SMarc-André Lureau VHOST_USER_PAYLOAD_SIZE);
18104bbeeba0SMarc-André Lureau goto err;
18114bbeeba0SMarc-André Lureau }
18124bbeeba0SMarc-André Lureau
18134bbeeba0SMarc-André Lureau /* Read payload */
181457dc0217SGreg Kurz if (qio_channel_read_all(ioc, (char *) &payload, hdr.size, &local_err)) {
181557dc0217SGreg Kurz error_report_err(local_err);
18164bbeeba0SMarc-André Lureau goto err;
18174bbeeba0SMarc-André Lureau }
18184bbeeba0SMarc-André Lureau
181969aff030SMichael S. Tsirkin switch (hdr.request) {
1820a84ec993SMaxime Coquelin case VHOST_USER_BACKEND_IOTLB_MSG:
182169aff030SMichael S. Tsirkin ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
18226dcdd06eSMaxime Coquelin break;
1823a84ec993SMaxime Coquelin case VHOST_USER_BACKEND_CONFIG_CHANGE_MSG:
1824f8ed3648SManos Pitsidianakis ret = vhost_user_backend_handle_config_change(dev);
18254c3e257bSChangpeng Liu break;
1826a84ec993SMaxime Coquelin case VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG:
1827f8ed3648SManos Pitsidianakis ret = vhost_user_backend_handle_vring_host_notifier(dev, &payload.area,
182857dc0217SGreg Kurz fd ? fd[0] : -1);
182944866521STiwei Bie break;
183016094766SAlbert Esteve case VHOST_USER_BACKEND_SHARED_OBJECT_ADD:
183116094766SAlbert Esteve ret = vhost_user_backend_handle_shared_object_add(dev, &payload.object);
183216094766SAlbert Esteve break;
183316094766SAlbert Esteve case VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE:
1834043e127aSAlbert Esteve ret = vhost_user_backend_handle_shared_object_remove(dev,
1835043e127aSAlbert Esteve &payload.object);
183616094766SAlbert Esteve break;
183716094766SAlbert Esteve case VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP:
183816094766SAlbert Esteve ret = vhost_user_backend_handle_shared_object_lookup(dev->opaque, ioc,
183916094766SAlbert Esteve &hdr, &payload);
184016094766SAlbert Esteve break;
18414bbeeba0SMarc-André Lureau default:
18420fdc465dSDr. David Alan Gilbert error_report("Received unexpected msg type: %d.", hdr.request);
18434bbeeba0SMarc-André Lureau ret = -EINVAL;
18444bbeeba0SMarc-André Lureau }
18454bbeeba0SMarc-André Lureau
18464bbeeba0SMarc-André Lureau /*
18474bbeeba0SMarc-André Lureau * REPLY_ACK feature handling. Other reply types has to be managed
18484bbeeba0SMarc-André Lureau * directly in their request handlers.
18494bbeeba0SMarc-André Lureau */
185069aff030SMichael S. Tsirkin if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
185169aff030SMichael S. Tsirkin payload.u64 = !!ret;
185269aff030SMichael S. Tsirkin hdr.size = sizeof(payload.u64);
185369aff030SMichael S. Tsirkin
185416094766SAlbert Esteve if (!vhost_user_send_resp(ioc, &hdr, &payload, &local_err)) {
185557dc0217SGreg Kurz error_report_err(local_err);
18564bbeeba0SMarc-André Lureau goto err;
18574bbeeba0SMarc-André Lureau }
18584bbeeba0SMarc-André Lureau }
18594bbeeba0SMarc-André Lureau
18609e06080bSGreg Kurz goto fdcleanup;
18614bbeeba0SMarc-André Lureau
18624bbeeba0SMarc-André Lureau err:
1863f8ed3648SManos Pitsidianakis close_backend_channel(u);
186457dc0217SGreg Kurz rc = G_SOURCE_REMOVE;
18659e06080bSGreg Kurz
18669e06080bSGreg Kurz fdcleanup:
186757dc0217SGreg Kurz if (fd) {
18685f57fbeaSTiwei Bie for (i = 0; i < fdsize; i++) {
18695f57fbeaSTiwei Bie close(fd[i]);
18705f57fbeaSTiwei Bie }
18711f3a4519STiwei Bie }
187257dc0217SGreg Kurz return rc;
18734bbeeba0SMarc-André Lureau }
18744bbeeba0SMarc-André Lureau
vhost_setup_backend_channel(struct vhost_dev * dev)1875f8ed3648SManos Pitsidianakis static int vhost_setup_backend_channel(struct vhost_dev *dev)
18764bbeeba0SMarc-André Lureau {
18774bbeeba0SMarc-André Lureau VhostUserMsg msg = {
1878a84ec993SMaxime Coquelin .hdr.request = VHOST_USER_SET_BACKEND_REQ_FD,
187924e34754SMichael S. Tsirkin .hdr.flags = VHOST_USER_VERSION,
18804bbeeba0SMarc-André Lureau };
18814bbeeba0SMarc-André Lureau struct vhost_user *u = dev->opaque;
18824bbeeba0SMarc-André Lureau int sv[2], ret = 0;
18834bbeeba0SMarc-André Lureau bool reply_supported = virtio_has_feature(dev->protocol_features,
18844bbeeba0SMarc-André Lureau VHOST_USER_PROTOCOL_F_REPLY_ACK);
188557dc0217SGreg Kurz Error *local_err = NULL;
188657dc0217SGreg Kurz QIOChannel *ioc;
18874bbeeba0SMarc-André Lureau
18884bbeeba0SMarc-André Lureau if (!virtio_has_feature(dev->protocol_features,
1889a84ec993SMaxime Coquelin VHOST_USER_PROTOCOL_F_BACKEND_REQ)) {
18904bbeeba0SMarc-André Lureau return 0;
18914bbeeba0SMarc-André Lureau }
18924bbeeba0SMarc-André Lureau
18939cbda7b3SGuoyi Tu if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
1894025faa87SRoman Kagan int saved_errno = errno;
18954bbeeba0SMarc-André Lureau error_report("socketpair() failed");
1896025faa87SRoman Kagan return -saved_errno;
18974bbeeba0SMarc-André Lureau }
18984bbeeba0SMarc-André Lureau
189957dc0217SGreg Kurz ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err));
190057dc0217SGreg Kurz if (!ioc) {
190157dc0217SGreg Kurz error_report_err(local_err);
1902025faa87SRoman Kagan return -ECONNREFUSED;
190357dc0217SGreg Kurz }
1904f8ed3648SManos Pitsidianakis u->backend_ioc = ioc;
1905f8ed3648SManos Pitsidianakis u->backend_src = qio_channel_add_watch_source(u->backend_ioc,
1906f340a59dSGreg Kurz G_IO_IN | G_IO_HUP,
1907f8ed3648SManos Pitsidianakis backend_read, dev, NULL, NULL);
19084bbeeba0SMarc-André Lureau
19094bbeeba0SMarc-André Lureau if (reply_supported) {
191024e34754SMichael S. Tsirkin msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
19114bbeeba0SMarc-André Lureau }
19124bbeeba0SMarc-André Lureau
19134bbeeba0SMarc-André Lureau ret = vhost_user_write(dev, &msg, &sv[1], 1);
19144bbeeba0SMarc-André Lureau if (ret) {
19154bbeeba0SMarc-André Lureau goto out;
19164bbeeba0SMarc-André Lureau }
19174bbeeba0SMarc-André Lureau
19184bbeeba0SMarc-André Lureau if (reply_supported) {
19194bbeeba0SMarc-André Lureau ret = process_message_reply(dev, &msg);
19204bbeeba0SMarc-André Lureau }
19214bbeeba0SMarc-André Lureau
19224bbeeba0SMarc-André Lureau out:
19234bbeeba0SMarc-André Lureau close(sv[1]);
19244bbeeba0SMarc-André Lureau if (ret) {
1925f8ed3648SManos Pitsidianakis close_backend_channel(u);
19264bbeeba0SMarc-André Lureau }
19274bbeeba0SMarc-André Lureau
19284bbeeba0SMarc-André Lureau return ret;
19294bbeeba0SMarc-André Lureau }
19304bbeeba0SMarc-André Lureau
193118658a3cSPaolo Bonzini #ifdef CONFIG_LINUX
1932d3dff7a5SDr. David Alan Gilbert /*
1933f82c1116SDr. David Alan Gilbert * Called back from the postcopy fault thread when a fault is received on our
1934f82c1116SDr. David Alan Gilbert * ufd.
1935f82c1116SDr. David Alan Gilbert * TODO: This is Linux specific
1936f82c1116SDr. David Alan Gilbert */
vhost_user_postcopy_fault_handler(struct PostCopyFD * pcfd,void * ufd)1937f82c1116SDr. David Alan Gilbert static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd,
1938f82c1116SDr. David Alan Gilbert void *ufd)
1939f82c1116SDr. David Alan Gilbert {
1940375318d0SDr. David Alan Gilbert struct vhost_dev *dev = pcfd->data;
1941375318d0SDr. David Alan Gilbert struct vhost_user *u = dev->opaque;
1942375318d0SDr. David Alan Gilbert struct uffd_msg *msg = ufd;
1943375318d0SDr. David Alan Gilbert uint64_t faultaddr = msg->arg.pagefault.address;
1944375318d0SDr. David Alan Gilbert RAMBlock *rb = NULL;
1945375318d0SDr. David Alan Gilbert uint64_t rb_offset;
1946375318d0SDr. David Alan Gilbert int i;
1947375318d0SDr. David Alan Gilbert
1948375318d0SDr. David Alan Gilbert trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr,
1949375318d0SDr. David Alan Gilbert dev->mem->nregions);
1950375318d0SDr. David Alan Gilbert for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1951375318d0SDr. David Alan Gilbert trace_vhost_user_postcopy_fault_handler_loop(i,
1952375318d0SDr. David Alan Gilbert u->postcopy_client_bases[i], dev->mem->regions[i].memory_size);
1953375318d0SDr. David Alan Gilbert if (faultaddr >= u->postcopy_client_bases[i]) {
1954375318d0SDr. David Alan Gilbert /* Ofset of the fault address in the vhost region */
1955375318d0SDr. David Alan Gilbert uint64_t region_offset = faultaddr - u->postcopy_client_bases[i];
1956375318d0SDr. David Alan Gilbert if (region_offset < dev->mem->regions[i].memory_size) {
1957375318d0SDr. David Alan Gilbert rb_offset = region_offset + u->region_rb_offset[i];
1958375318d0SDr. David Alan Gilbert trace_vhost_user_postcopy_fault_handler_found(i,
1959375318d0SDr. David Alan Gilbert region_offset, rb_offset);
1960375318d0SDr. David Alan Gilbert rb = u->region_rb[i];
1961375318d0SDr. David Alan Gilbert return postcopy_request_shared_page(pcfd, rb, faultaddr,
1962375318d0SDr. David Alan Gilbert rb_offset);
1963375318d0SDr. David Alan Gilbert }
1964375318d0SDr. David Alan Gilbert }
1965375318d0SDr. David Alan Gilbert }
1966375318d0SDr. David Alan Gilbert error_report("%s: Failed to find region for fault %" PRIx64,
1967375318d0SDr. David Alan Gilbert __func__, faultaddr);
1968375318d0SDr. David Alan Gilbert return -1;
1969f82c1116SDr. David Alan Gilbert }
1970f82c1116SDr. David Alan Gilbert
vhost_user_postcopy_waker(struct PostCopyFD * pcfd,RAMBlock * rb,uint64_t offset)1971c07e3615SDr. David Alan Gilbert static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb,
1972c07e3615SDr. David Alan Gilbert uint64_t offset)
1973c07e3615SDr. David Alan Gilbert {
1974c07e3615SDr. David Alan Gilbert struct vhost_dev *dev = pcfd->data;
1975c07e3615SDr. David Alan Gilbert struct vhost_user *u = dev->opaque;
1976c07e3615SDr. David Alan Gilbert int i;
1977c07e3615SDr. David Alan Gilbert
1978c07e3615SDr. David Alan Gilbert trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset);
1979c07e3615SDr. David Alan Gilbert
1980c07e3615SDr. David Alan Gilbert if (!u) {
1981c07e3615SDr. David Alan Gilbert return 0;
1982c07e3615SDr. David Alan Gilbert }
1983c07e3615SDr. David Alan Gilbert /* Translate the offset into an address in the clients address space */
1984c07e3615SDr. David Alan Gilbert for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1985c07e3615SDr. David Alan Gilbert if (u->region_rb[i] == rb &&
1986c07e3615SDr. David Alan Gilbert offset >= u->region_rb_offset[i] &&
1987c07e3615SDr. David Alan Gilbert offset < (u->region_rb_offset[i] +
1988c07e3615SDr. David Alan Gilbert dev->mem->regions[i].memory_size)) {
1989c07e3615SDr. David Alan Gilbert uint64_t client_addr = (offset - u->region_rb_offset[i]) +
1990c07e3615SDr. David Alan Gilbert u->postcopy_client_bases[i];
1991c07e3615SDr. David Alan Gilbert trace_vhost_user_postcopy_waker_found(client_addr);
1992c07e3615SDr. David Alan Gilbert return postcopy_wake_shared(pcfd, client_addr, rb);
1993c07e3615SDr. David Alan Gilbert }
1994c07e3615SDr. David Alan Gilbert }
1995c07e3615SDr. David Alan Gilbert
1996c07e3615SDr. David Alan Gilbert trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset);
1997c07e3615SDr. David Alan Gilbert return 0;
1998c07e3615SDr. David Alan Gilbert }
199918658a3cSPaolo Bonzini #endif
2000c07e3615SDr. David Alan Gilbert
2001f82c1116SDr. David Alan Gilbert /*
2002d3dff7a5SDr. David Alan Gilbert * Called at the start of an inbound postcopy on reception of the
2003d3dff7a5SDr. David Alan Gilbert * 'advise' command.
2004d3dff7a5SDr. David Alan Gilbert */
vhost_user_postcopy_advise(struct vhost_dev * dev,Error ** errp)2005d3dff7a5SDr. David Alan Gilbert static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
2006d3dff7a5SDr. David Alan Gilbert {
200718658a3cSPaolo Bonzini #ifdef CONFIG_LINUX
2008d3dff7a5SDr. David Alan Gilbert struct vhost_user *u = dev->opaque;
20094d0cf552STiwei Bie CharBackend *chr = u->user->chr;
2010d3dff7a5SDr. David Alan Gilbert int ufd;
2011025faa87SRoman Kagan int ret;
2012d3dff7a5SDr. David Alan Gilbert VhostUserMsg msg = {
2013d3dff7a5SDr. David Alan Gilbert .hdr.request = VHOST_USER_POSTCOPY_ADVISE,
2014d3dff7a5SDr. David Alan Gilbert .hdr.flags = VHOST_USER_VERSION,
2015d3dff7a5SDr. David Alan Gilbert };
2016d3dff7a5SDr. David Alan Gilbert
2017025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, NULL, 0);
2018025faa87SRoman Kagan if (ret < 0) {
2019d3dff7a5SDr. David Alan Gilbert error_setg(errp, "Failed to send postcopy_advise to vhost");
2020025faa87SRoman Kagan return ret;
2021d3dff7a5SDr. David Alan Gilbert }
2022d3dff7a5SDr. David Alan Gilbert
2023025faa87SRoman Kagan ret = vhost_user_read(dev, &msg);
2024025faa87SRoman Kagan if (ret < 0) {
2025d3dff7a5SDr. David Alan Gilbert error_setg(errp, "Failed to get postcopy_advise reply from vhost");
2026025faa87SRoman Kagan return ret;
2027d3dff7a5SDr. David Alan Gilbert }
2028d3dff7a5SDr. David Alan Gilbert
2029d3dff7a5SDr. David Alan Gilbert if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
2030d3dff7a5SDr. David Alan Gilbert error_setg(errp, "Unexpected msg type. Expected %d received %d",
2031d3dff7a5SDr. David Alan Gilbert VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
2032025faa87SRoman Kagan return -EPROTO;
2033d3dff7a5SDr. David Alan Gilbert }
2034d3dff7a5SDr. David Alan Gilbert
2035d3dff7a5SDr. David Alan Gilbert if (msg.hdr.size) {
2036d3dff7a5SDr. David Alan Gilbert error_setg(errp, "Received bad msg size.");
2037025faa87SRoman Kagan return -EPROTO;
2038d3dff7a5SDr. David Alan Gilbert }
2039d3dff7a5SDr. David Alan Gilbert ufd = qemu_chr_fe_get_msgfd(chr);
2040d3dff7a5SDr. David Alan Gilbert if (ufd < 0) {
2041d3dff7a5SDr. David Alan Gilbert error_setg(errp, "%s: Failed to get ufd", __func__);
2042025faa87SRoman Kagan return -EIO;
2043d3dff7a5SDr. David Alan Gilbert }
2044ff5927baSMarc-André Lureau qemu_socket_set_nonblock(ufd);
2045d3dff7a5SDr. David Alan Gilbert
2046f82c1116SDr. David Alan Gilbert /* register ufd with userfault thread */
2047f82c1116SDr. David Alan Gilbert u->postcopy_fd.fd = ufd;
2048f82c1116SDr. David Alan Gilbert u->postcopy_fd.data = dev;
2049f82c1116SDr. David Alan Gilbert u->postcopy_fd.handler = vhost_user_postcopy_fault_handler;
2050c07e3615SDr. David Alan Gilbert u->postcopy_fd.waker = vhost_user_postcopy_waker;
2051f82c1116SDr. David Alan Gilbert u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */
2052f82c1116SDr. David Alan Gilbert postcopy_register_shared_ufd(&u->postcopy_fd);
2053d3dff7a5SDr. David Alan Gilbert return 0;
205418658a3cSPaolo Bonzini #else
205518658a3cSPaolo Bonzini error_setg(errp, "Postcopy not supported on non-Linux systems");
2056025faa87SRoman Kagan return -ENOSYS;
205718658a3cSPaolo Bonzini #endif
2058d3dff7a5SDr. David Alan Gilbert }
2059d3dff7a5SDr. David Alan Gilbert
20606864a7b5SDr. David Alan Gilbert /*
20616864a7b5SDr. David Alan Gilbert * Called at the switch to postcopy on reception of the 'listen' command.
20626864a7b5SDr. David Alan Gilbert */
vhost_user_postcopy_listen(struct vhost_dev * dev,Error ** errp)20636864a7b5SDr. David Alan Gilbert static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
20646864a7b5SDr. David Alan Gilbert {
20656864a7b5SDr. David Alan Gilbert struct vhost_user *u = dev->opaque;
20666864a7b5SDr. David Alan Gilbert int ret;
20676864a7b5SDr. David Alan Gilbert VhostUserMsg msg = {
20686864a7b5SDr. David Alan Gilbert .hdr.request = VHOST_USER_POSTCOPY_LISTEN,
20696864a7b5SDr. David Alan Gilbert .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
20706864a7b5SDr. David Alan Gilbert };
20716864a7b5SDr. David Alan Gilbert u->postcopy_listen = true;
2072025faa87SRoman Kagan
20736864a7b5SDr. David Alan Gilbert trace_vhost_user_postcopy_listen();
2074025faa87SRoman Kagan
2075025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, NULL, 0);
2076025faa87SRoman Kagan if (ret < 0) {
20776864a7b5SDr. David Alan Gilbert error_setg(errp, "Failed to send postcopy_listen to vhost");
2078025faa87SRoman Kagan return ret;
20796864a7b5SDr. David Alan Gilbert }
20806864a7b5SDr. David Alan Gilbert
20816864a7b5SDr. David Alan Gilbert ret = process_message_reply(dev, &msg);
20826864a7b5SDr. David Alan Gilbert if (ret) {
20836864a7b5SDr. David Alan Gilbert error_setg(errp, "Failed to receive reply to postcopy_listen");
20846864a7b5SDr. David Alan Gilbert return ret;
20856864a7b5SDr. David Alan Gilbert }
20866864a7b5SDr. David Alan Gilbert
20876864a7b5SDr. David Alan Gilbert return 0;
20886864a7b5SDr. David Alan Gilbert }
20896864a7b5SDr. David Alan Gilbert
209046343570SDr. David Alan Gilbert /*
209146343570SDr. David Alan Gilbert * Called at the end of postcopy
209246343570SDr. David Alan Gilbert */
vhost_user_postcopy_end(struct vhost_dev * dev,Error ** errp)209346343570SDr. David Alan Gilbert static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
209446343570SDr. David Alan Gilbert {
209546343570SDr. David Alan Gilbert VhostUserMsg msg = {
209646343570SDr. David Alan Gilbert .hdr.request = VHOST_USER_POSTCOPY_END,
209746343570SDr. David Alan Gilbert .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
209846343570SDr. David Alan Gilbert };
209946343570SDr. David Alan Gilbert int ret;
210046343570SDr. David Alan Gilbert struct vhost_user *u = dev->opaque;
210146343570SDr. David Alan Gilbert
210246343570SDr. David Alan Gilbert trace_vhost_user_postcopy_end_entry();
2103025faa87SRoman Kagan
2104025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, NULL, 0);
2105025faa87SRoman Kagan if (ret < 0) {
210646343570SDr. David Alan Gilbert error_setg(errp, "Failed to send postcopy_end to vhost");
2107025faa87SRoman Kagan return ret;
210846343570SDr. David Alan Gilbert }
210946343570SDr. David Alan Gilbert
211046343570SDr. David Alan Gilbert ret = process_message_reply(dev, &msg);
211146343570SDr. David Alan Gilbert if (ret) {
211246343570SDr. David Alan Gilbert error_setg(errp, "Failed to receive reply to postcopy_end");
211346343570SDr. David Alan Gilbert return ret;
211446343570SDr. David Alan Gilbert }
211546343570SDr. David Alan Gilbert postcopy_unregister_shared_ufd(&u->postcopy_fd);
2116c4f75385SIlya Maximets close(u->postcopy_fd.fd);
211746343570SDr. David Alan Gilbert u->postcopy_fd.handler = NULL;
211846343570SDr. David Alan Gilbert
211946343570SDr. David Alan Gilbert trace_vhost_user_postcopy_end_exit();
212046343570SDr. David Alan Gilbert
212146343570SDr. David Alan Gilbert return 0;
212246343570SDr. David Alan Gilbert }
212346343570SDr. David Alan Gilbert
vhost_user_postcopy_notifier(NotifierWithReturn * notifier,void * opaque,Error ** errp)21249ccbfe14SDr. David Alan Gilbert static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
2125be19d836SSteve Sistare void *opaque, Error **errp)
21269ccbfe14SDr. David Alan Gilbert {
21279ccbfe14SDr. David Alan Gilbert struct PostcopyNotifyData *pnd = opaque;
21289ccbfe14SDr. David Alan Gilbert struct vhost_user *u = container_of(notifier, struct vhost_user,
21299ccbfe14SDr. David Alan Gilbert postcopy_notifier);
21309ccbfe14SDr. David Alan Gilbert struct vhost_dev *dev = u->dev;
21319ccbfe14SDr. David Alan Gilbert
21329ccbfe14SDr. David Alan Gilbert switch (pnd->reason) {
21339ccbfe14SDr. David Alan Gilbert case POSTCOPY_NOTIFY_PROBE:
21349ccbfe14SDr. David Alan Gilbert if (!virtio_has_feature(dev->protocol_features,
21359ccbfe14SDr. David Alan Gilbert VHOST_USER_PROTOCOL_F_PAGEFAULT)) {
21369ccbfe14SDr. David Alan Gilbert /* TODO: Get the device name into this error somehow */
2137d91f33c7SSteve Sistare error_setg(errp,
21389ccbfe14SDr. David Alan Gilbert "vhost-user backend not capable of postcopy");
21399ccbfe14SDr. David Alan Gilbert return -ENOENT;
21409ccbfe14SDr. David Alan Gilbert }
21419ccbfe14SDr. David Alan Gilbert break;
21429ccbfe14SDr. David Alan Gilbert
2143d3dff7a5SDr. David Alan Gilbert case POSTCOPY_NOTIFY_INBOUND_ADVISE:
2144d91f33c7SSteve Sistare return vhost_user_postcopy_advise(dev, errp);
2145d3dff7a5SDr. David Alan Gilbert
21466864a7b5SDr. David Alan Gilbert case POSTCOPY_NOTIFY_INBOUND_LISTEN:
2147d91f33c7SSteve Sistare return vhost_user_postcopy_listen(dev, errp);
21486864a7b5SDr. David Alan Gilbert
214946343570SDr. David Alan Gilbert case POSTCOPY_NOTIFY_INBOUND_END:
2150d91f33c7SSteve Sistare return vhost_user_postcopy_end(dev, errp);
215146343570SDr. David Alan Gilbert
21529ccbfe14SDr. David Alan Gilbert default:
21539ccbfe14SDr. David Alan Gilbert /* We ignore notifications we don't know */
21549ccbfe14SDr. David Alan Gilbert break;
21559ccbfe14SDr. David Alan Gilbert }
21569ccbfe14SDr. David Alan Gilbert
21579ccbfe14SDr. David Alan Gilbert return 0;
21589ccbfe14SDr. David Alan Gilbert }
21599ccbfe14SDr. David Alan Gilbert
vhost_user_backend_init(struct vhost_dev * dev,void * opaque,Error ** errp)216028770ff9SKevin Wolf static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque,
216128770ff9SKevin Wolf Error **errp)
21625f6f6664SNikolay Nikolaev {
216356534930SAlex Bennée uint64_t features, ram_slots;
21642152f3feSMarc-André Lureau struct vhost_user *u;
216556534930SAlex Bennée VhostUserState *vus = (VhostUserState *) opaque;
2166dcb10c00SMichael S. Tsirkin int err;
2167dcb10c00SMichael S. Tsirkin
21685f6f6664SNikolay Nikolaev assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
21695f6f6664SNikolay Nikolaev
21702152f3feSMarc-André Lureau u = g_new0(struct vhost_user, 1);
217156534930SAlex Bennée u->user = vus;
21729ccbfe14SDr. David Alan Gilbert u->dev = dev;
21732152f3feSMarc-André Lureau dev->opaque = u;
21745f6f6664SNikolay Nikolaev
217521e70425SMarc-André Lureau err = vhost_user_get_features(dev, &features);
2176dcb10c00SMichael S. Tsirkin if (err < 0) {
2177998647dcSMarkus Armbruster error_setg_errno(errp, -err, "vhost_backend_init failed");
2178f2a6e6c4SKevin Wolf return err;
2179dcb10c00SMichael S. Tsirkin }
2180dcb10c00SMichael S. Tsirkin
2181dcb10c00SMichael S. Tsirkin if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
218256534930SAlex Bennée bool supports_f_config = vus->supports_config ||
218356534930SAlex Bennée (dev->config_ops && dev->config_ops->vhost_dev_config_notifier);
218456534930SAlex Bennée uint64_t protocol_features;
218556534930SAlex Bennée
2186dcb10c00SMichael S. Tsirkin dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
2187dcb10c00SMichael S. Tsirkin
218821e70425SMarc-André Lureau err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
21896dcdd06eSMaxime Coquelin &protocol_features);
2190dcb10c00SMichael S. Tsirkin if (err < 0) {
2191998647dcSMarkus Armbruster error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
219228770ff9SKevin Wolf return -EPROTO;
2193dcb10c00SMichael S. Tsirkin }
2194dcb10c00SMichael S. Tsirkin
219556534930SAlex Bennée /*
219656534930SAlex Bennée * We will use all the protocol features we support - although
219756534930SAlex Bennée * we suppress F_CONFIG if we know QEMUs internal code can not support
219856534930SAlex Bennée * it.
219956534930SAlex Bennée */
220056534930SAlex Bennée protocol_features &= VHOST_USER_PROTOCOL_FEATURE_MASK;
22011c3e5a26SMaxime Coquelin
220256534930SAlex Bennée if (supports_f_config) {
220356534930SAlex Bennée if (!virtio_has_feature(protocol_features,
220456534930SAlex Bennée VHOST_USER_PROTOCOL_F_CONFIG)) {
2205fb38d0c9SChangpeng Liu error_setg(errp, "vhost-user device expecting "
220656534930SAlex Bennée "VHOST_USER_PROTOCOL_F_CONFIG but the vhost-user backend does "
2207fb38d0c9SChangpeng Liu "not support it.");
220856534930SAlex Bennée return -EPROTO;
220956534930SAlex Bennée }
221056534930SAlex Bennée } else {
221156534930SAlex Bennée if (virtio_has_feature(protocol_features,
221256534930SAlex Bennée VHOST_USER_PROTOCOL_F_CONFIG)) {
221390e31232SAlbert Esteve warn_report("vhost-user backend supports "
2214fb38d0c9SChangpeng Liu "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not.");
221556534930SAlex Bennée protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
221656534930SAlex Bennée }
22171c3e5a26SMaxime Coquelin }
22181c3e5a26SMaxime Coquelin
221956534930SAlex Bennée /* final set of protocol features */
222056534930SAlex Bennée dev->protocol_features = protocol_features;
222121e70425SMarc-André Lureau err = vhost_user_set_protocol_features(dev, dev->protocol_features);
2222dcb10c00SMichael S. Tsirkin if (err < 0) {
2223998647dcSMarkus Armbruster error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
222428770ff9SKevin Wolf return -EPROTO;
2225dcb10c00SMichael S. Tsirkin }
2226e2051e9eSYuanhan Liu
2227e2051e9eSYuanhan Liu /* query the max queues we support if backend supports Multiple Queue */
2228e2051e9eSYuanhan Liu if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
222921e70425SMarc-André Lureau err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
223021e70425SMarc-André Lureau &dev->max_queues);
2231e2051e9eSYuanhan Liu if (err < 0) {
2232998647dcSMarkus Armbruster error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
223328770ff9SKevin Wolf return -EPROTO;
2234e2051e9eSYuanhan Liu }
223584affad1SKevin Wolf } else {
223684affad1SKevin Wolf dev->max_queues = 1;
2237e2051e9eSYuanhan Liu }
223884affad1SKevin Wolf
2239c90bd505SKevin Wolf if (dev->num_queues && dev->max_queues < dev->num_queues) {
224028770ff9SKevin Wolf error_setg(errp, "The maximum number of queues supported by the "
2241c90bd505SKevin Wolf "backend is %" PRIu64, dev->max_queues);
2242c90bd505SKevin Wolf return -EINVAL;
2243c90bd505SKevin Wolf }
22446dcdd06eSMaxime Coquelin
22456dcdd06eSMaxime Coquelin if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
22466dcdd06eSMaxime Coquelin !(virtio_has_feature(dev->protocol_features,
2247a84ec993SMaxime Coquelin VHOST_USER_PROTOCOL_F_BACKEND_REQ) &&
22486dcdd06eSMaxime Coquelin virtio_has_feature(dev->protocol_features,
22496dcdd06eSMaxime Coquelin VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
225028770ff9SKevin Wolf error_setg(errp, "IOMMU support requires reply-ack and "
2251f8ed3648SManos Pitsidianakis "backend-req protocol features.");
225228770ff9SKevin Wolf return -EINVAL;
22536dcdd06eSMaxime Coquelin }
22546b0eff1aSRaphael Norwitz
22556b0eff1aSRaphael Norwitz /* get max memory regions if backend supports configurable RAM slots */
22566b0eff1aSRaphael Norwitz if (!virtio_has_feature(dev->protocol_features,
22576b0eff1aSRaphael Norwitz VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS)) {
225827598393SRaphael Norwitz u->user->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS;
22596b0eff1aSRaphael Norwitz } else {
22606b0eff1aSRaphael Norwitz err = vhost_user_get_max_memslots(dev, &ram_slots);
22616b0eff1aSRaphael Norwitz if (err < 0) {
2262998647dcSMarkus Armbruster error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
226328770ff9SKevin Wolf return -EPROTO;
22646b0eff1aSRaphael Norwitz }
22656b0eff1aSRaphael Norwitz
22666b0eff1aSRaphael Norwitz if (ram_slots < u->user->memory_slots) {
226728770ff9SKevin Wolf error_setg(errp, "The backend specified a max ram slots limit "
226828770ff9SKevin Wolf "of %" PRIu64", when the prior validated limit was "
226928770ff9SKevin Wolf "%d. This limit should never decrease.", ram_slots,
22706b0eff1aSRaphael Norwitz u->user->memory_slots);
227128770ff9SKevin Wolf return -EINVAL;
22726b0eff1aSRaphael Norwitz }
22736b0eff1aSRaphael Norwitz
227427598393SRaphael Norwitz u->user->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS);
22756b0eff1aSRaphael Norwitz }
2276dcb10c00SMichael S. Tsirkin }
2277dcb10c00SMichael S. Tsirkin
2278d2fc4402SMarc-André Lureau if (dev->migration_blocker == NULL &&
2279d2fc4402SMarc-André Lureau !virtio_has_feature(dev->protocol_features,
2280d2fc4402SMarc-André Lureau VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
2281d2fc4402SMarc-André Lureau error_setg(&dev->migration_blocker,
2282d2fc4402SMarc-André Lureau "Migration disabled: vhost-user backend lacks "
2283d2fc4402SMarc-André Lureau "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
2284d2fc4402SMarc-André Lureau }
2285d2fc4402SMarc-André Lureau
228667b3965eSAdrian Moreno if (dev->vq_index == 0) {
2287f8ed3648SManos Pitsidianakis err = vhost_setup_backend_channel(dev);
22884bbeeba0SMarc-André Lureau if (err < 0) {
2289998647dcSMarkus Armbruster error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
229028770ff9SKevin Wolf return -EPROTO;
22914bbeeba0SMarc-André Lureau }
229267b3965eSAdrian Moreno }
22934bbeeba0SMarc-André Lureau
22949ccbfe14SDr. David Alan Gilbert u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
22959ccbfe14SDr. David Alan Gilbert postcopy_add_notifier(&u->postcopy_notifier);
22969ccbfe14SDr. David Alan Gilbert
22975f6f6664SNikolay Nikolaev return 0;
22985f6f6664SNikolay Nikolaev }
22995f6f6664SNikolay Nikolaev
vhost_user_backend_cleanup(struct vhost_dev * dev)23004d0cf552STiwei Bie static int vhost_user_backend_cleanup(struct vhost_dev *dev)
23015f6f6664SNikolay Nikolaev {
23022152f3feSMarc-André Lureau struct vhost_user *u;
23032152f3feSMarc-André Lureau
23045f6f6664SNikolay Nikolaev assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
23055f6f6664SNikolay Nikolaev
23062152f3feSMarc-André Lureau u = dev->opaque;
23079ccbfe14SDr. David Alan Gilbert if (u->postcopy_notifier.notify) {
23089ccbfe14SDr. David Alan Gilbert postcopy_remove_notifier(&u->postcopy_notifier);
23099ccbfe14SDr. David Alan Gilbert u->postcopy_notifier.notify = NULL;
23109ccbfe14SDr. David Alan Gilbert }
2311c4f75385SIlya Maximets u->postcopy_listen = false;
2312c4f75385SIlya Maximets if (u->postcopy_fd.handler) {
2313c4f75385SIlya Maximets postcopy_unregister_shared_ufd(&u->postcopy_fd);
2314c4f75385SIlya Maximets close(u->postcopy_fd.fd);
2315c4f75385SIlya Maximets u->postcopy_fd.handler = NULL;
2316c4f75385SIlya Maximets }
2317f8ed3648SManos Pitsidianakis if (u->backend_ioc) {
2318f8ed3648SManos Pitsidianakis close_backend_channel(u);
23194bbeeba0SMarc-André Lureau }
2320905125d0SDr. David Alan Gilbert g_free(u->region_rb);
2321905125d0SDr. David Alan Gilbert u->region_rb = NULL;
2322905125d0SDr. David Alan Gilbert g_free(u->region_rb_offset);
2323905125d0SDr. David Alan Gilbert u->region_rb_offset = NULL;
2324905125d0SDr. David Alan Gilbert u->region_rb_len = 0;
23252152f3feSMarc-André Lureau g_free(u);
23265f6f6664SNikolay Nikolaev dev->opaque = 0;
23275f6f6664SNikolay Nikolaev
23285f6f6664SNikolay Nikolaev return 0;
23295f6f6664SNikolay Nikolaev }
23305f6f6664SNikolay Nikolaev
vhost_user_get_vq_index(struct vhost_dev * dev,int idx)2331fc57fd99SYuanhan Liu static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
2332fc57fd99SYuanhan Liu {
2333fc57fd99SYuanhan Liu assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
2334fc57fd99SYuanhan Liu
2335fc57fd99SYuanhan Liu return idx;
2336fc57fd99SYuanhan Liu }
2337fc57fd99SYuanhan Liu
vhost_user_memslots_limit(struct vhost_dev * dev)23382ce68e4cSIgor Mammedov static int vhost_user_memslots_limit(struct vhost_dev *dev)
23392ce68e4cSIgor Mammedov {
23406b0eff1aSRaphael Norwitz struct vhost_user *u = dev->opaque;
23416b0eff1aSRaphael Norwitz
23426b0eff1aSRaphael Norwitz return u->user->memory_slots;
23432ce68e4cSIgor Mammedov }
23442ce68e4cSIgor Mammedov
vhost_user_requires_shm_log(struct vhost_dev * dev)23451be0ac21SMarc-André Lureau static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
23461be0ac21SMarc-André Lureau {
23471be0ac21SMarc-André Lureau assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
23481be0ac21SMarc-André Lureau
23491be0ac21SMarc-André Lureau return virtio_has_feature(dev->protocol_features,
23501be0ac21SMarc-André Lureau VHOST_USER_PROTOCOL_F_LOG_SHMFD);
23511be0ac21SMarc-André Lureau }
23521be0ac21SMarc-André Lureau
vhost_user_migration_done(struct vhost_dev * dev,char * mac_addr)23533e866365SThibaut Collet static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
23543e866365SThibaut Collet {
2355ebf2a499SRichard Henderson VhostUserMsg msg = { };
23563e866365SThibaut Collet
23573e866365SThibaut Collet assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
23583e866365SThibaut Collet
23593e866365SThibaut Collet /* If guest supports GUEST_ANNOUNCE do nothing */
23603e866365SThibaut Collet if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
23613e866365SThibaut Collet return 0;
23623e866365SThibaut Collet }
23633e866365SThibaut Collet
23643e866365SThibaut Collet /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
23653e866365SThibaut Collet if (virtio_has_feature(dev->protocol_features,
23663e866365SThibaut Collet VHOST_USER_PROTOCOL_F_RARP)) {
236724e34754SMichael S. Tsirkin msg.hdr.request = VHOST_USER_SEND_RARP;
236824e34754SMichael S. Tsirkin msg.hdr.flags = VHOST_USER_VERSION;
23697f4a930eSMichael S. Tsirkin memcpy((char *)&msg.payload.u64, mac_addr, 6);
237024e34754SMichael S. Tsirkin msg.hdr.size = sizeof(msg.payload.u64);
23713e866365SThibaut Collet
2372c4843a45SMarc-André Lureau return vhost_user_write(dev, &msg, NULL, 0);
23733e866365SThibaut Collet }
2374025faa87SRoman Kagan return -ENOTSUP;
23753e866365SThibaut Collet }
23763e866365SThibaut Collet
vhost_user_net_set_mtu(struct vhost_dev * dev,uint16_t mtu)2377c5f048d8SMaxime Coquelin static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
2378c5f048d8SMaxime Coquelin {
2379c5f048d8SMaxime Coquelin VhostUserMsg msg;
2380c5f048d8SMaxime Coquelin bool reply_supported = virtio_has_feature(dev->protocol_features,
2381c5f048d8SMaxime Coquelin VHOST_USER_PROTOCOL_F_REPLY_ACK);
2382025faa87SRoman Kagan int ret;
2383c5f048d8SMaxime Coquelin
2384c5f048d8SMaxime Coquelin if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
2385c5f048d8SMaxime Coquelin return 0;
2386c5f048d8SMaxime Coquelin }
2387c5f048d8SMaxime Coquelin
238824e34754SMichael S. Tsirkin msg.hdr.request = VHOST_USER_NET_SET_MTU;
2389c5f048d8SMaxime Coquelin msg.payload.u64 = mtu;
239024e34754SMichael S. Tsirkin msg.hdr.size = sizeof(msg.payload.u64);
239124e34754SMichael S. Tsirkin msg.hdr.flags = VHOST_USER_VERSION;
2392c5f048d8SMaxime Coquelin if (reply_supported) {
239324e34754SMichael S. Tsirkin msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
2394c5f048d8SMaxime Coquelin }
2395c5f048d8SMaxime Coquelin
2396025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, NULL, 0);
2397025faa87SRoman Kagan if (ret < 0) {
2398025faa87SRoman Kagan return ret;
2399c5f048d8SMaxime Coquelin }
2400c5f048d8SMaxime Coquelin
2401f8ed3648SManos Pitsidianakis /* If reply_ack supported, backend has to ack specified MTU is valid */
2402c5f048d8SMaxime Coquelin if (reply_supported) {
24033cf7daf8SMaxime Coquelin return process_message_reply(dev, &msg);
2404c5f048d8SMaxime Coquelin }
2405c5f048d8SMaxime Coquelin
2406c5f048d8SMaxime Coquelin return 0;
2407c5f048d8SMaxime Coquelin }
2408c5f048d8SMaxime Coquelin
vhost_user_send_device_iotlb_msg(struct vhost_dev * dev,struct vhost_iotlb_msg * imsg)24096dcdd06eSMaxime Coquelin static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
24106dcdd06eSMaxime Coquelin struct vhost_iotlb_msg *imsg)
24116dcdd06eSMaxime Coquelin {
2412025faa87SRoman Kagan int ret;
24136dcdd06eSMaxime Coquelin VhostUserMsg msg = {
241424e34754SMichael S. Tsirkin .hdr.request = VHOST_USER_IOTLB_MSG,
241524e34754SMichael S. Tsirkin .hdr.size = sizeof(msg.payload.iotlb),
241624e34754SMichael S. Tsirkin .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
24176dcdd06eSMaxime Coquelin .payload.iotlb = *imsg,
24186dcdd06eSMaxime Coquelin };
24196dcdd06eSMaxime Coquelin
2420025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, NULL, 0);
2421025faa87SRoman Kagan if (ret < 0) {
2422025faa87SRoman Kagan return ret;
24236dcdd06eSMaxime Coquelin }
24246dcdd06eSMaxime Coquelin
24256dcdd06eSMaxime Coquelin return process_message_reply(dev, &msg);
24266dcdd06eSMaxime Coquelin }
24276dcdd06eSMaxime Coquelin
24286dcdd06eSMaxime Coquelin
vhost_user_set_iotlb_callback(struct vhost_dev * dev,int enabled)24296dcdd06eSMaxime Coquelin static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
24306dcdd06eSMaxime Coquelin {
24316dcdd06eSMaxime Coquelin /* No-op as the receive channel is not dedicated to IOTLB messages. */
24326dcdd06eSMaxime Coquelin }
24336dcdd06eSMaxime Coquelin
vhost_user_get_config(struct vhost_dev * dev,uint8_t * config,uint32_t config_len,Error ** errp)24344c3e257bSChangpeng Liu static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
243550de5138SKevin Wolf uint32_t config_len, Error **errp)
24364c3e257bSChangpeng Liu {
2437025faa87SRoman Kagan int ret;
24384c3e257bSChangpeng Liu VhostUserMsg msg = {
243924e34754SMichael S. Tsirkin .hdr.request = VHOST_USER_GET_CONFIG,
244024e34754SMichael S. Tsirkin .hdr.flags = VHOST_USER_VERSION,
244124e34754SMichael S. Tsirkin .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
24424c3e257bSChangpeng Liu };
24434c3e257bSChangpeng Liu
24441c3e5a26SMaxime Coquelin if (!virtio_has_feature(dev->protocol_features,
24451c3e5a26SMaxime Coquelin VHOST_USER_PROTOCOL_F_CONFIG)) {
244650de5138SKevin Wolf error_setg(errp, "VHOST_USER_PROTOCOL_F_CONFIG not supported");
244750de5138SKevin Wolf return -EINVAL;
24481c3e5a26SMaxime Coquelin }
24491c3e5a26SMaxime Coquelin
245050de5138SKevin Wolf assert(config_len <= VHOST_USER_MAX_CONFIG_SIZE);
24514c3e257bSChangpeng Liu
24524c3e257bSChangpeng Liu msg.payload.config.offset = 0;
24534c3e257bSChangpeng Liu msg.payload.config.size = config_len;
2454025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, NULL, 0);
2455025faa87SRoman Kagan if (ret < 0) {
2456025faa87SRoman Kagan error_setg_errno(errp, -ret, "vhost_get_config failed");
2457025faa87SRoman Kagan return ret;
24584c3e257bSChangpeng Liu }
24594c3e257bSChangpeng Liu
2460025faa87SRoman Kagan ret = vhost_user_read(dev, &msg);
2461025faa87SRoman Kagan if (ret < 0) {
2462025faa87SRoman Kagan error_setg_errno(errp, -ret, "vhost_get_config failed");
2463025faa87SRoman Kagan return ret;
24644c3e257bSChangpeng Liu }
24654c3e257bSChangpeng Liu
246624e34754SMichael S. Tsirkin if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
246750de5138SKevin Wolf error_setg(errp,
246850de5138SKevin Wolf "Received unexpected msg type. Expected %d received %d",
246924e34754SMichael S. Tsirkin VHOST_USER_GET_CONFIG, msg.hdr.request);
2470025faa87SRoman Kagan return -EPROTO;
24714c3e257bSChangpeng Liu }
24724c3e257bSChangpeng Liu
247324e34754SMichael S. Tsirkin if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
247450de5138SKevin Wolf error_setg(errp, "Received bad msg size.");
2475025faa87SRoman Kagan return -EPROTO;
24764c3e257bSChangpeng Liu }
24774c3e257bSChangpeng Liu
24784c3e257bSChangpeng Liu memcpy(config, msg.payload.config.region, config_len);
24794c3e257bSChangpeng Liu
24804c3e257bSChangpeng Liu return 0;
24814c3e257bSChangpeng Liu }
24824c3e257bSChangpeng Liu
vhost_user_set_config(struct vhost_dev * dev,const uint8_t * data,uint32_t offset,uint32_t size,uint32_t flags)24834c3e257bSChangpeng Liu static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
24844c3e257bSChangpeng Liu uint32_t offset, uint32_t size, uint32_t flags)
24854c3e257bSChangpeng Liu {
2486025faa87SRoman Kagan int ret;
24874c3e257bSChangpeng Liu uint8_t *p;
24884c3e257bSChangpeng Liu bool reply_supported = virtio_has_feature(dev->protocol_features,
24894c3e257bSChangpeng Liu VHOST_USER_PROTOCOL_F_REPLY_ACK);
24904c3e257bSChangpeng Liu
24914c3e257bSChangpeng Liu VhostUserMsg msg = {
249224e34754SMichael S. Tsirkin .hdr.request = VHOST_USER_SET_CONFIG,
249324e34754SMichael S. Tsirkin .hdr.flags = VHOST_USER_VERSION,
249424e34754SMichael S. Tsirkin .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size,
24954c3e257bSChangpeng Liu };
24964c3e257bSChangpeng Liu
24971c3e5a26SMaxime Coquelin if (!virtio_has_feature(dev->protocol_features,
24981c3e5a26SMaxime Coquelin VHOST_USER_PROTOCOL_F_CONFIG)) {
2499025faa87SRoman Kagan return -ENOTSUP;
25001c3e5a26SMaxime Coquelin }
25011c3e5a26SMaxime Coquelin
25024c3e257bSChangpeng Liu if (reply_supported) {
250324e34754SMichael S. Tsirkin msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
25044c3e257bSChangpeng Liu }
25054c3e257bSChangpeng Liu
25064c3e257bSChangpeng Liu if (size > VHOST_USER_MAX_CONFIG_SIZE) {
2507025faa87SRoman Kagan return -EINVAL;
25084c3e257bSChangpeng Liu }
25094c3e257bSChangpeng Liu
25104c3e257bSChangpeng Liu msg.payload.config.offset = offset,
25114c3e257bSChangpeng Liu msg.payload.config.size = size,
25124c3e257bSChangpeng Liu msg.payload.config.flags = flags,
25134c3e257bSChangpeng Liu p = msg.payload.config.region;
25144c3e257bSChangpeng Liu memcpy(p, data, size);
25154c3e257bSChangpeng Liu
2516025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, NULL, 0);
2517025faa87SRoman Kagan if (ret < 0) {
2518025faa87SRoman Kagan return ret;
25194c3e257bSChangpeng Liu }
25204c3e257bSChangpeng Liu
25214c3e257bSChangpeng Liu if (reply_supported) {
25224c3e257bSChangpeng Liu return process_message_reply(dev, &msg);
25234c3e257bSChangpeng Liu }
25244c3e257bSChangpeng Liu
25254c3e257bSChangpeng Liu return 0;
25264c3e257bSChangpeng Liu }
25274c3e257bSChangpeng Liu
vhost_user_crypto_create_session(struct vhost_dev * dev,void * session_info,uint64_t * session_id)2528efbfeb81SGonglei static int vhost_user_crypto_create_session(struct vhost_dev *dev,
2529efbfeb81SGonglei void *session_info,
2530efbfeb81SGonglei uint64_t *session_id)
2531efbfeb81SGonglei {
2532025faa87SRoman Kagan int ret;
2533efbfeb81SGonglei bool crypto_session = virtio_has_feature(dev->protocol_features,
2534efbfeb81SGonglei VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
25355c33f978SGowrishankar Muthukrishnan CryptoDevBackendSessionInfo *backend_info = session_info;
2536efbfeb81SGonglei VhostUserMsg msg = {
2537efbfeb81SGonglei .hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION,
2538efbfeb81SGonglei .hdr.flags = VHOST_USER_VERSION,
2539efbfeb81SGonglei .hdr.size = sizeof(msg.payload.session),
2540efbfeb81SGonglei };
2541efbfeb81SGonglei
2542efbfeb81SGonglei assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2543efbfeb81SGonglei
2544efbfeb81SGonglei if (!crypto_session) {
2545efbfeb81SGonglei error_report("vhost-user trying to send unhandled ioctl");
2546025faa87SRoman Kagan return -ENOTSUP;
2547efbfeb81SGonglei }
2548efbfeb81SGonglei
25495c33f978SGowrishankar Muthukrishnan if (backend_info->op_code == VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION) {
25505c33f978SGowrishankar Muthukrishnan CryptoDevBackendAsymSessionInfo *sess = &backend_info->u.asym_sess_info;
25515c33f978SGowrishankar Muthukrishnan size_t keylen;
25525c33f978SGowrishankar Muthukrishnan
25535c33f978SGowrishankar Muthukrishnan memcpy(&msg.payload.session.u.asym.session_setup_data, sess,
25545c33f978SGowrishankar Muthukrishnan sizeof(CryptoDevBackendAsymSessionInfo));
25555c33f978SGowrishankar Muthukrishnan if (sess->keylen) {
25565c33f978SGowrishankar Muthukrishnan keylen = sizeof(msg.payload.session.u.asym.key);
25575c33f978SGowrishankar Muthukrishnan if (sess->keylen > keylen) {
25585c33f978SGowrishankar Muthukrishnan error_report("Unsupported asymmetric key size");
25595c33f978SGowrishankar Muthukrishnan return -ENOTSUP;
25605c33f978SGowrishankar Muthukrishnan }
25615c33f978SGowrishankar Muthukrishnan
25625c33f978SGowrishankar Muthukrishnan memcpy(&msg.payload.session.u.asym.key, sess->key,
25635c33f978SGowrishankar Muthukrishnan sess->keylen);
25645c33f978SGowrishankar Muthukrishnan }
25655c33f978SGowrishankar Muthukrishnan } else {
25665c33f978SGowrishankar Muthukrishnan CryptoDevBackendSymSessionInfo *sess = &backend_info->u.sym_sess_info;
25675c33f978SGowrishankar Muthukrishnan size_t keylen;
25685c33f978SGowrishankar Muthukrishnan
25695c33f978SGowrishankar Muthukrishnan memcpy(&msg.payload.session.u.sym.session_setup_data, sess,
2570efbfeb81SGonglei sizeof(CryptoDevBackendSymSessionInfo));
25715c33f978SGowrishankar Muthukrishnan if (sess->key_len) {
25725c33f978SGowrishankar Muthukrishnan keylen = sizeof(msg.payload.session.u.sym.key);
25735c33f978SGowrishankar Muthukrishnan if (sess->key_len > keylen) {
25745c33f978SGowrishankar Muthukrishnan error_report("Unsupported cipher key size");
25755c33f978SGowrishankar Muthukrishnan return -ENOTSUP;
2576efbfeb81SGonglei }
25775c33f978SGowrishankar Muthukrishnan
25785c33f978SGowrishankar Muthukrishnan memcpy(&msg.payload.session.u.sym.key, sess->cipher_key,
25795c33f978SGowrishankar Muthukrishnan sess->key_len);
2580efbfeb81SGonglei }
25815c33f978SGowrishankar Muthukrishnan
25825c33f978SGowrishankar Muthukrishnan if (sess->auth_key_len > 0) {
25835c33f978SGowrishankar Muthukrishnan keylen = sizeof(msg.payload.session.u.sym.auth_key);
25845c33f978SGowrishankar Muthukrishnan if (sess->auth_key_len > keylen) {
25855c33f978SGowrishankar Muthukrishnan error_report("Unsupported auth key size");
25865c33f978SGowrishankar Muthukrishnan return -ENOTSUP;
25875c33f978SGowrishankar Muthukrishnan }
25885c33f978SGowrishankar Muthukrishnan
25895c33f978SGowrishankar Muthukrishnan memcpy(&msg.payload.session.u.sym.auth_key, sess->auth_key,
25905c33f978SGowrishankar Muthukrishnan sess->auth_key_len);
25915c33f978SGowrishankar Muthukrishnan }
25925c33f978SGowrishankar Muthukrishnan }
25935c33f978SGowrishankar Muthukrishnan
25945c33f978SGowrishankar Muthukrishnan msg.payload.session.op_code = backend_info->op_code;
25955c33f978SGowrishankar Muthukrishnan msg.payload.session.session_id = backend_info->session_id;
2596025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, NULL, 0);
2597025faa87SRoman Kagan if (ret < 0) {
2598025faa87SRoman Kagan error_report("vhost_user_write() return %d, create session failed",
2599025faa87SRoman Kagan ret);
2600025faa87SRoman Kagan return ret;
2601efbfeb81SGonglei }
2602efbfeb81SGonglei
2603025faa87SRoman Kagan ret = vhost_user_read(dev, &msg);
2604025faa87SRoman Kagan if (ret < 0) {
2605025faa87SRoman Kagan error_report("vhost_user_read() return %d, create session failed",
2606025faa87SRoman Kagan ret);
2607025faa87SRoman Kagan return ret;
2608efbfeb81SGonglei }
2609efbfeb81SGonglei
2610efbfeb81SGonglei if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
2611efbfeb81SGonglei error_report("Received unexpected msg type. Expected %d received %d",
2612efbfeb81SGonglei VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
2613025faa87SRoman Kagan return -EPROTO;
2614efbfeb81SGonglei }
2615efbfeb81SGonglei
2616efbfeb81SGonglei if (msg.hdr.size != sizeof(msg.payload.session)) {
2617efbfeb81SGonglei error_report("Received bad msg size.");
2618025faa87SRoman Kagan return -EPROTO;
2619efbfeb81SGonglei }
2620efbfeb81SGonglei
2621efbfeb81SGonglei if (msg.payload.session.session_id < 0) {
2622efbfeb81SGonglei error_report("Bad session id: %" PRId64 "",
2623efbfeb81SGonglei msg.payload.session.session_id);
2624025faa87SRoman Kagan return -EINVAL;
2625efbfeb81SGonglei }
2626efbfeb81SGonglei *session_id = msg.payload.session.session_id;
2627efbfeb81SGonglei
2628efbfeb81SGonglei return 0;
2629efbfeb81SGonglei }
2630efbfeb81SGonglei
2631efbfeb81SGonglei static int
vhost_user_crypto_close_session(struct vhost_dev * dev,uint64_t session_id)2632efbfeb81SGonglei vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
2633efbfeb81SGonglei {
2634025faa87SRoman Kagan int ret;
2635efbfeb81SGonglei bool crypto_session = virtio_has_feature(dev->protocol_features,
2636efbfeb81SGonglei VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
2637efbfeb81SGonglei VhostUserMsg msg = {
2638efbfeb81SGonglei .hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION,
2639efbfeb81SGonglei .hdr.flags = VHOST_USER_VERSION,
2640efbfeb81SGonglei .hdr.size = sizeof(msg.payload.u64),
2641efbfeb81SGonglei };
2642efbfeb81SGonglei msg.payload.u64 = session_id;
2643efbfeb81SGonglei
2644efbfeb81SGonglei if (!crypto_session) {
2645efbfeb81SGonglei error_report("vhost-user trying to send unhandled ioctl");
2646025faa87SRoman Kagan return -ENOTSUP;
2647efbfeb81SGonglei }
2648efbfeb81SGonglei
2649025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, NULL, 0);
2650025faa87SRoman Kagan if (ret < 0) {
2651025faa87SRoman Kagan error_report("vhost_user_write() return %d, close session failed",
2652025faa87SRoman Kagan ret);
2653025faa87SRoman Kagan return ret;
2654efbfeb81SGonglei }
2655efbfeb81SGonglei
2656efbfeb81SGonglei return 0;
2657efbfeb81SGonglei }
2658efbfeb81SGonglei
vhost_user_no_private_memslots(struct vhost_dev * dev)2659552b2522SDavid Hildenbrand static bool vhost_user_no_private_memslots(struct vhost_dev *dev)
2660988a2775STiwei Bie {
2661552b2522SDavid Hildenbrand return true;
2662988a2775STiwei Bie }
2663988a2775STiwei Bie
vhost_user_get_inflight_fd(struct vhost_dev * dev,uint16_t queue_size,struct vhost_inflight * inflight)26645ad204bfSXie Yongji static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
26655ad204bfSXie Yongji uint16_t queue_size,
26665ad204bfSXie Yongji struct vhost_inflight *inflight)
26675ad204bfSXie Yongji {
26685ad204bfSXie Yongji void *addr;
26695ad204bfSXie Yongji int fd;
2670025faa87SRoman Kagan int ret;
26715ad204bfSXie Yongji struct vhost_user *u = dev->opaque;
26725ad204bfSXie Yongji CharBackend *chr = u->user->chr;
26735ad204bfSXie Yongji VhostUserMsg msg = {
26745ad204bfSXie Yongji .hdr.request = VHOST_USER_GET_INFLIGHT_FD,
26755ad204bfSXie Yongji .hdr.flags = VHOST_USER_VERSION,
26765ad204bfSXie Yongji .payload.inflight.num_queues = dev->nvqs,
26775ad204bfSXie Yongji .payload.inflight.queue_size = queue_size,
26785ad204bfSXie Yongji .hdr.size = sizeof(msg.payload.inflight),
26795ad204bfSXie Yongji };
26805ad204bfSXie Yongji
26815ad204bfSXie Yongji if (!virtio_has_feature(dev->protocol_features,
26825ad204bfSXie Yongji VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
26835ad204bfSXie Yongji return 0;
26845ad204bfSXie Yongji }
26855ad204bfSXie Yongji
2686025faa87SRoman Kagan ret = vhost_user_write(dev, &msg, NULL, 0);
2687025faa87SRoman Kagan if (ret < 0) {
2688025faa87SRoman Kagan return ret;
26895ad204bfSXie Yongji }
26905ad204bfSXie Yongji
2691025faa87SRoman Kagan ret = vhost_user_read(dev, &msg);
2692025faa87SRoman Kagan if (ret < 0) {
2693025faa87SRoman Kagan return ret;
26945ad204bfSXie Yongji }
26955ad204bfSXie Yongji
26965ad204bfSXie Yongji if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
26975ad204bfSXie Yongji error_report("Received unexpected msg type. "
26985ad204bfSXie Yongji "Expected %d received %d",
26995ad204bfSXie Yongji VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
2700025faa87SRoman Kagan return -EPROTO;
27015ad204bfSXie Yongji }
27025ad204bfSXie Yongji
27035ad204bfSXie Yongji if (msg.hdr.size != sizeof(msg.payload.inflight)) {
27045ad204bfSXie Yongji error_report("Received bad msg size.");
2705025faa87SRoman Kagan return -EPROTO;
27065ad204bfSXie Yongji }
27075ad204bfSXie Yongji
27085ad204bfSXie Yongji if (!msg.payload.inflight.mmap_size) {
27095ad204bfSXie Yongji return 0;
27105ad204bfSXie Yongji }
27115ad204bfSXie Yongji
27125ad204bfSXie Yongji fd = qemu_chr_fe_get_msgfd(chr);
27135ad204bfSXie Yongji if (fd < 0) {
27145ad204bfSXie Yongji error_report("Failed to get mem fd");
2715025faa87SRoman Kagan return -EIO;
27165ad204bfSXie Yongji }
27175ad204bfSXie Yongji
27185ad204bfSXie Yongji addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
27195ad204bfSXie Yongji MAP_SHARED, fd, msg.payload.inflight.mmap_offset);
27205ad204bfSXie Yongji
27215ad204bfSXie Yongji if (addr == MAP_FAILED) {
27225ad204bfSXie Yongji error_report("Failed to mmap mem fd");
27235ad204bfSXie Yongji close(fd);
2724025faa87SRoman Kagan return -EFAULT;
27255ad204bfSXie Yongji }
27265ad204bfSXie Yongji
27275ad204bfSXie Yongji inflight->addr = addr;
27285ad204bfSXie Yongji inflight->fd = fd;
27295ad204bfSXie Yongji inflight->size = msg.payload.inflight.mmap_size;
27305ad204bfSXie Yongji inflight->offset = msg.payload.inflight.mmap_offset;
27315ad204bfSXie Yongji inflight->queue_size = queue_size;
27325ad204bfSXie Yongji
27335ad204bfSXie Yongji return 0;
27345ad204bfSXie Yongji }
27355ad204bfSXie Yongji
vhost_user_set_inflight_fd(struct vhost_dev * dev,struct vhost_inflight * inflight)27365ad204bfSXie Yongji static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
27375ad204bfSXie Yongji struct vhost_inflight *inflight)
27385ad204bfSXie Yongji {
27395ad204bfSXie Yongji VhostUserMsg msg = {
27405ad204bfSXie Yongji .hdr.request = VHOST_USER_SET_INFLIGHT_FD,
27415ad204bfSXie Yongji .hdr.flags = VHOST_USER_VERSION,
27425ad204bfSXie Yongji .payload.inflight.mmap_size = inflight->size,
27435ad204bfSXie Yongji .payload.inflight.mmap_offset = inflight->offset,
27445ad204bfSXie Yongji .payload.inflight.num_queues = dev->nvqs,
27455ad204bfSXie Yongji .payload.inflight.queue_size = inflight->queue_size,
27465ad204bfSXie Yongji .hdr.size = sizeof(msg.payload.inflight),
27475ad204bfSXie Yongji };
27485ad204bfSXie Yongji
27495ad204bfSXie Yongji if (!virtio_has_feature(dev->protocol_features,
27505ad204bfSXie Yongji VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
27515ad204bfSXie Yongji return 0;
27525ad204bfSXie Yongji }
27535ad204bfSXie Yongji
2754025faa87SRoman Kagan return vhost_user_write(dev, &msg, &inflight->fd, 1);
27555ad204bfSXie Yongji }
27565ad204bfSXie Yongji
vhost_user_state_destroy(gpointer data)2757503e3554SAlex Bennée static void vhost_user_state_destroy(gpointer data)
2758503e3554SAlex Bennée {
2759503e3554SAlex Bennée VhostUserHostNotifier *n = (VhostUserHostNotifier *) data;
2760963b0276Syaozhenguo vhost_user_host_notifier_remove(n, NULL, true);
2761503e3554SAlex Bennée }
2762503e3554SAlex Bennée
vhost_user_init(VhostUserState * user,CharBackend * chr,Error ** errp)27630b99f224SMarc-André Lureau bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
27644d0cf552STiwei Bie {
27650b99f224SMarc-André Lureau if (user->chr) {
27660b99f224SMarc-André Lureau error_setg(errp, "Cannot initialize vhost-user state");
27670b99f224SMarc-André Lureau return false;
27680b99f224SMarc-André Lureau }
27690b99f224SMarc-André Lureau user->chr = chr;
27706b0eff1aSRaphael Norwitz user->memory_slots = 0;
2771503e3554SAlex Bennée user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4,
2772503e3554SAlex Bennée &vhost_user_state_destroy);
27730b99f224SMarc-André Lureau return true;
27744d0cf552STiwei Bie }
27754d0cf552STiwei Bie
vhost_user_cleanup(VhostUserState * user)27764d0cf552STiwei Bie void vhost_user_cleanup(VhostUserState *user)
27774d0cf552STiwei Bie {
27780b99f224SMarc-André Lureau if (!user->chr) {
27790b99f224SMarc-André Lureau return;
27800b99f224SMarc-André Lureau }
2781503e3554SAlex Bennée user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true);
27820b99f224SMarc-André Lureau user->chr = NULL;
27834d0cf552STiwei Bie }
27844d0cf552STiwei Bie
278571e076a0SAlex Bennée
278671e076a0SAlex Bennée typedef struct {
278771e076a0SAlex Bennée vu_async_close_fn cb;
278871e076a0SAlex Bennée DeviceState *dev;
278971e076a0SAlex Bennée CharBackend *cd;
279071e076a0SAlex Bennée struct vhost_dev *vhost;
279171e076a0SAlex Bennée } VhostAsyncCallback;
279271e076a0SAlex Bennée
vhost_user_async_close_bh(void * opaque)279371e076a0SAlex Bennée static void vhost_user_async_close_bh(void *opaque)
279471e076a0SAlex Bennée {
279571e076a0SAlex Bennée VhostAsyncCallback *data = opaque;
279671e076a0SAlex Bennée
279771e076a0SAlex Bennée data->cb(data->dev);
279871e076a0SAlex Bennée
279971e076a0SAlex Bennée g_free(data);
280071e076a0SAlex Bennée }
280171e076a0SAlex Bennée
280271e076a0SAlex Bennée /*
280371e076a0SAlex Bennée * We only schedule the work if the machine is running. If suspended
280471e076a0SAlex Bennée * we want to keep all the in-flight data as is for migration
280571e076a0SAlex Bennée * purposes.
280671e076a0SAlex Bennée */
vhost_user_async_close(DeviceState * d,CharBackend * chardev,struct vhost_dev * vhost,vu_async_close_fn cb)280771e076a0SAlex Bennée void vhost_user_async_close(DeviceState *d,
280871e076a0SAlex Bennée CharBackend *chardev, struct vhost_dev *vhost,
28099569fe0aSLi Feng vu_async_close_fn cb)
281071e076a0SAlex Bennée {
281171e076a0SAlex Bennée if (!runstate_check(RUN_STATE_SHUTDOWN)) {
281271e076a0SAlex Bennée /*
281371e076a0SAlex Bennée * A close event may happen during a read/write, but vhost
281471e076a0SAlex Bennée * code assumes the vhost_dev remains setup, so delay the
281571e076a0SAlex Bennée * stop & clear.
281671e076a0SAlex Bennée */
281771e076a0SAlex Bennée AioContext *ctx = qemu_get_current_aio_context();
281871e076a0SAlex Bennée VhostAsyncCallback *data = g_new0(VhostAsyncCallback, 1);
281971e076a0SAlex Bennée
282071e076a0SAlex Bennée /* Save data for the callback */
282171e076a0SAlex Bennée data->cb = cb;
282271e076a0SAlex Bennée data->dev = d;
282371e076a0SAlex Bennée data->cd = chardev;
282471e076a0SAlex Bennée data->vhost = vhost;
282571e076a0SAlex Bennée
282671e076a0SAlex Bennée /* Disable any further notifications on the chardev */
282771e076a0SAlex Bennée qemu_chr_fe_set_handlers(chardev,
282871e076a0SAlex Bennée NULL, NULL, NULL, NULL, NULL, NULL,
282971e076a0SAlex Bennée false);
283071e076a0SAlex Bennée
283171e076a0SAlex Bennée aio_bh_schedule_oneshot(ctx, vhost_user_async_close_bh, data);
283271e076a0SAlex Bennée
283371e076a0SAlex Bennée /*
283471e076a0SAlex Bennée * Move vhost device to the stopped state. The vhost-user device
283571e076a0SAlex Bennée * will be clean up and disconnected in BH. This can be useful in
283671e076a0SAlex Bennée * the vhost migration code. If disconnect was caught there is an
283771e076a0SAlex Bennée * option for the general vhost code to get the dev state without
283871e076a0SAlex Bennée * knowing its type (in this case vhost-user).
283971e076a0SAlex Bennée *
284071e076a0SAlex Bennée * Note if the vhost device is fully cleared by the time we
284171e076a0SAlex Bennée * execute the bottom half we won't continue with the cleanup.
284271e076a0SAlex Bennée */
284371e076a0SAlex Bennée vhost->started = false;
284471e076a0SAlex Bennée }
284571e076a0SAlex Bennée }
284671e076a0SAlex Bennée
vhost_user_dev_start(struct vhost_dev * dev,bool started)2847923b8921SYajun Wu static int vhost_user_dev_start(struct vhost_dev *dev, bool started)
2848923b8921SYajun Wu {
2849923b8921SYajun Wu if (!virtio_has_feature(dev->protocol_features,
2850923b8921SYajun Wu VHOST_USER_PROTOCOL_F_STATUS)) {
2851923b8921SYajun Wu return 0;
2852923b8921SYajun Wu }
2853923b8921SYajun Wu
2854923b8921SYajun Wu /* Set device status only for last queue pair */
2855923b8921SYajun Wu if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
2856923b8921SYajun Wu return 0;
2857923b8921SYajun Wu }
2858923b8921SYajun Wu
2859923b8921SYajun Wu if (started) {
2860923b8921SYajun Wu return vhost_user_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
2861923b8921SYajun Wu VIRTIO_CONFIG_S_DRIVER |
2862923b8921SYajun Wu VIRTIO_CONFIG_S_DRIVER_OK);
2863923b8921SYajun Wu } else {
28646f8be29eSStefan Hajnoczi return 0;
28656f8be29eSStefan Hajnoczi }
28666f8be29eSStefan Hajnoczi }
28676f8be29eSStefan Hajnoczi
vhost_user_reset_status(struct vhost_dev * dev)28686f8be29eSStefan Hajnoczi static void vhost_user_reset_status(struct vhost_dev *dev)
28696f8be29eSStefan Hajnoczi {
28706f8be29eSStefan Hajnoczi /* Set device status only for last queue pair */
28716f8be29eSStefan Hajnoczi if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
28726f8be29eSStefan Hajnoczi return;
28736f8be29eSStefan Hajnoczi }
28746f8be29eSStefan Hajnoczi
28756f8be29eSStefan Hajnoczi if (virtio_has_feature(dev->protocol_features,
28766f8be29eSStefan Hajnoczi VHOST_USER_PROTOCOL_F_STATUS)) {
28776f8be29eSStefan Hajnoczi vhost_user_set_status(dev, 0);
2878923b8921SYajun Wu }
2879923b8921SYajun Wu }
2880923b8921SYajun Wu
vhost_user_supports_device_state(struct vhost_dev * dev)2881cda83adcSHanna Czenczek static bool vhost_user_supports_device_state(struct vhost_dev *dev)
2882cda83adcSHanna Czenczek {
2883cda83adcSHanna Czenczek return virtio_has_feature(dev->protocol_features,
2884cda83adcSHanna Czenczek VHOST_USER_PROTOCOL_F_DEVICE_STATE);
2885cda83adcSHanna Czenczek }
2886cda83adcSHanna Czenczek
vhost_user_set_device_state_fd(struct vhost_dev * dev,VhostDeviceStateDirection direction,VhostDeviceStatePhase phase,int fd,int * reply_fd,Error ** errp)2887cda83adcSHanna Czenczek static int vhost_user_set_device_state_fd(struct vhost_dev *dev,
2888cda83adcSHanna Czenczek VhostDeviceStateDirection direction,
2889cda83adcSHanna Czenczek VhostDeviceStatePhase phase,
2890cda83adcSHanna Czenczek int fd,
2891cda83adcSHanna Czenczek int *reply_fd,
2892cda83adcSHanna Czenczek Error **errp)
2893cda83adcSHanna Czenczek {
2894cda83adcSHanna Czenczek int ret;
2895cda83adcSHanna Czenczek struct vhost_user *vu = dev->opaque;
2896cda83adcSHanna Czenczek VhostUserMsg msg = {
2897cda83adcSHanna Czenczek .hdr = {
2898cda83adcSHanna Czenczek .request = VHOST_USER_SET_DEVICE_STATE_FD,
2899cda83adcSHanna Czenczek .flags = VHOST_USER_VERSION,
2900cda83adcSHanna Czenczek .size = sizeof(msg.payload.transfer_state),
2901cda83adcSHanna Czenczek },
2902cda83adcSHanna Czenczek .payload.transfer_state = {
2903cda83adcSHanna Czenczek .direction = direction,
2904cda83adcSHanna Czenczek .phase = phase,
2905cda83adcSHanna Czenczek },
2906cda83adcSHanna Czenczek };
2907cda83adcSHanna Czenczek
2908cda83adcSHanna Czenczek *reply_fd = -1;
2909cda83adcSHanna Czenczek
2910cda83adcSHanna Czenczek if (!vhost_user_supports_device_state(dev)) {
2911cda83adcSHanna Czenczek close(fd);
2912cda83adcSHanna Czenczek error_setg(errp, "Back-end does not support migration state transfer");
2913cda83adcSHanna Czenczek return -ENOTSUP;
2914cda83adcSHanna Czenczek }
2915cda83adcSHanna Czenczek
2916cda83adcSHanna Czenczek ret = vhost_user_write(dev, &msg, &fd, 1);
2917cda83adcSHanna Czenczek close(fd);
2918cda83adcSHanna Czenczek if (ret < 0) {
2919cda83adcSHanna Czenczek error_setg_errno(errp, -ret,
2920cda83adcSHanna Czenczek "Failed to send SET_DEVICE_STATE_FD message");
2921cda83adcSHanna Czenczek return ret;
2922cda83adcSHanna Czenczek }
2923cda83adcSHanna Czenczek
2924cda83adcSHanna Czenczek ret = vhost_user_read(dev, &msg);
2925cda83adcSHanna Czenczek if (ret < 0) {
2926cda83adcSHanna Czenczek error_setg_errno(errp, -ret,
2927cda83adcSHanna Czenczek "Failed to receive SET_DEVICE_STATE_FD reply");
2928cda83adcSHanna Czenczek return ret;
2929cda83adcSHanna Czenczek }
2930cda83adcSHanna Czenczek
2931cda83adcSHanna Czenczek if (msg.hdr.request != VHOST_USER_SET_DEVICE_STATE_FD) {
2932cda83adcSHanna Czenczek error_setg(errp,
2933cda83adcSHanna Czenczek "Received unexpected message type, expected %d, received %d",
2934cda83adcSHanna Czenczek VHOST_USER_SET_DEVICE_STATE_FD, msg.hdr.request);
2935cda83adcSHanna Czenczek return -EPROTO;
2936cda83adcSHanna Czenczek }
2937cda83adcSHanna Czenczek
2938cda83adcSHanna Czenczek if (msg.hdr.size != sizeof(msg.payload.u64)) {
2939cda83adcSHanna Czenczek error_setg(errp,
2940cda83adcSHanna Czenczek "Received bad message size, expected %zu, received %" PRIu32,
2941cda83adcSHanna Czenczek sizeof(msg.payload.u64), msg.hdr.size);
2942cda83adcSHanna Czenczek return -EPROTO;
2943cda83adcSHanna Czenczek }
2944cda83adcSHanna Czenczek
2945cda83adcSHanna Czenczek if ((msg.payload.u64 & 0xff) != 0) {
2946cda83adcSHanna Czenczek error_setg(errp, "Back-end did not accept migration state transfer");
2947cda83adcSHanna Czenczek return -EIO;
2948cda83adcSHanna Czenczek }
2949cda83adcSHanna Czenczek
2950cda83adcSHanna Czenczek if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK)) {
2951cda83adcSHanna Czenczek *reply_fd = qemu_chr_fe_get_msgfd(vu->user->chr);
2952cda83adcSHanna Czenczek if (*reply_fd < 0) {
2953cda83adcSHanna Czenczek error_setg(errp,
2954cda83adcSHanna Czenczek "Failed to get back-end-provided transfer pipe FD");
2955cda83adcSHanna Czenczek *reply_fd = -1;
2956cda83adcSHanna Czenczek return -EIO;
2957cda83adcSHanna Czenczek }
2958cda83adcSHanna Czenczek }
2959cda83adcSHanna Czenczek
2960cda83adcSHanna Czenczek return 0;
2961cda83adcSHanna Czenczek }
2962cda83adcSHanna Czenczek
vhost_user_check_device_state(struct vhost_dev * dev,Error ** errp)2963cda83adcSHanna Czenczek static int vhost_user_check_device_state(struct vhost_dev *dev, Error **errp)
2964cda83adcSHanna Czenczek {
2965cda83adcSHanna Czenczek int ret;
2966cda83adcSHanna Czenczek VhostUserMsg msg = {
2967cda83adcSHanna Czenczek .hdr = {
2968cda83adcSHanna Czenczek .request = VHOST_USER_CHECK_DEVICE_STATE,
2969cda83adcSHanna Czenczek .flags = VHOST_USER_VERSION,
2970cda83adcSHanna Czenczek .size = 0,
2971cda83adcSHanna Czenczek },
2972cda83adcSHanna Czenczek };
2973cda83adcSHanna Czenczek
2974cda83adcSHanna Czenczek if (!vhost_user_supports_device_state(dev)) {
2975cda83adcSHanna Czenczek error_setg(errp, "Back-end does not support migration state transfer");
2976cda83adcSHanna Czenczek return -ENOTSUP;
2977cda83adcSHanna Czenczek }
2978cda83adcSHanna Czenczek
2979cda83adcSHanna Czenczek ret = vhost_user_write(dev, &msg, NULL, 0);
2980cda83adcSHanna Czenczek if (ret < 0) {
2981cda83adcSHanna Czenczek error_setg_errno(errp, -ret,
2982cda83adcSHanna Czenczek "Failed to send CHECK_DEVICE_STATE message");
2983cda83adcSHanna Czenczek return ret;
2984cda83adcSHanna Czenczek }
2985cda83adcSHanna Czenczek
2986cda83adcSHanna Czenczek ret = vhost_user_read(dev, &msg);
2987cda83adcSHanna Czenczek if (ret < 0) {
2988cda83adcSHanna Czenczek error_setg_errno(errp, -ret,
2989cda83adcSHanna Czenczek "Failed to receive CHECK_DEVICE_STATE reply");
2990cda83adcSHanna Czenczek return ret;
2991cda83adcSHanna Czenczek }
2992cda83adcSHanna Czenczek
2993cda83adcSHanna Czenczek if (msg.hdr.request != VHOST_USER_CHECK_DEVICE_STATE) {
2994cda83adcSHanna Czenczek error_setg(errp,
2995cda83adcSHanna Czenczek "Received unexpected message type, expected %d, received %d",
2996cda83adcSHanna Czenczek VHOST_USER_CHECK_DEVICE_STATE, msg.hdr.request);
2997cda83adcSHanna Czenczek return -EPROTO;
2998cda83adcSHanna Czenczek }
2999cda83adcSHanna Czenczek
3000cda83adcSHanna Czenczek if (msg.hdr.size != sizeof(msg.payload.u64)) {
3001cda83adcSHanna Czenczek error_setg(errp,
3002cda83adcSHanna Czenczek "Received bad message size, expected %zu, received %" PRIu32,
3003cda83adcSHanna Czenczek sizeof(msg.payload.u64), msg.hdr.size);
3004cda83adcSHanna Czenczek return -EPROTO;
3005cda83adcSHanna Czenczek }
3006cda83adcSHanna Czenczek
3007cda83adcSHanna Czenczek if (msg.payload.u64 != 0) {
3008cda83adcSHanna Czenczek error_setg(errp, "Back-end failed to process its internal state");
3009cda83adcSHanna Czenczek return -EIO;
3010cda83adcSHanna Czenczek }
3011cda83adcSHanna Czenczek
3012cda83adcSHanna Czenczek return 0;
3013cda83adcSHanna Czenczek }
3014cda83adcSHanna Czenczek
30155f6f6664SNikolay Nikolaev const VhostOps user_ops = {
30165f6f6664SNikolay Nikolaev .backend_type = VHOST_BACKEND_TYPE_USER,
30174d0cf552STiwei Bie .vhost_backend_init = vhost_user_backend_init,
30184d0cf552STiwei Bie .vhost_backend_cleanup = vhost_user_backend_cleanup,
30192ce68e4cSIgor Mammedov .vhost_backend_memslots_limit = vhost_user_memslots_limit,
3020552b2522SDavid Hildenbrand .vhost_backend_no_private_memslots = vhost_user_no_private_memslots,
302121e70425SMarc-André Lureau .vhost_set_log_base = vhost_user_set_log_base,
302221e70425SMarc-André Lureau .vhost_set_mem_table = vhost_user_set_mem_table,
302321e70425SMarc-André Lureau .vhost_set_vring_addr = vhost_user_set_vring_addr,
302421e70425SMarc-André Lureau .vhost_set_vring_endian = vhost_user_set_vring_endian,
302521e70425SMarc-André Lureau .vhost_set_vring_num = vhost_user_set_vring_num,
302621e70425SMarc-André Lureau .vhost_set_vring_base = vhost_user_set_vring_base,
302721e70425SMarc-André Lureau .vhost_get_vring_base = vhost_user_get_vring_base,
302821e70425SMarc-André Lureau .vhost_set_vring_kick = vhost_user_set_vring_kick,
302921e70425SMarc-André Lureau .vhost_set_vring_call = vhost_user_set_vring_call,
303060dc3c5bSKonstantin Khlebnikov .vhost_set_vring_err = vhost_user_set_vring_err,
303121e70425SMarc-André Lureau .vhost_set_features = vhost_user_set_features,
303221e70425SMarc-André Lureau .vhost_get_features = vhost_user_get_features,
303321e70425SMarc-André Lureau .vhost_set_owner = vhost_user_set_owner,
303421e70425SMarc-André Lureau .vhost_reset_device = vhost_user_reset_device,
303521e70425SMarc-André Lureau .vhost_get_vq_index = vhost_user_get_vq_index,
303621e70425SMarc-André Lureau .vhost_set_vring_enable = vhost_user_set_vring_enable,
30371be0ac21SMarc-André Lureau .vhost_requires_shm_log = vhost_user_requires_shm_log,
30383e866365SThibaut Collet .vhost_migration_done = vhost_user_migration_done,
3039c5f048d8SMaxime Coquelin .vhost_net_set_mtu = vhost_user_net_set_mtu,
30406dcdd06eSMaxime Coquelin .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
30416dcdd06eSMaxime Coquelin .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
30424c3e257bSChangpeng Liu .vhost_get_config = vhost_user_get_config,
30434c3e257bSChangpeng Liu .vhost_set_config = vhost_user_set_config,
3044efbfeb81SGonglei .vhost_crypto_create_session = vhost_user_crypto_create_session,
3045efbfeb81SGonglei .vhost_crypto_close_session = vhost_user_crypto_close_session,
30465ad204bfSXie Yongji .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
30475ad204bfSXie Yongji .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
3048923b8921SYajun Wu .vhost_dev_start = vhost_user_dev_start,
30496f8be29eSStefan Hajnoczi .vhost_reset_status = vhost_user_reset_status,
3050cda83adcSHanna Czenczek .vhost_supports_device_state = vhost_user_supports_device_state,
3051cda83adcSHanna Czenczek .vhost_set_device_state_fd = vhost_user_set_device_state_fd,
3052cda83adcSHanna Czenczek .vhost_check_device_state = vhost_user_check_device_state,
30535f6f6664SNikolay Nikolaev };
3054