1 /*
2 * vhost-user
3 *
4 * Copyright (c) 2013 Virtual Open Systems Sarl.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
11 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "hw/virtio/virtio-dmabuf.h"
14 #include "hw/virtio/vhost.h"
15 #include "hw/virtio/virtio-crypto.h"
16 #include "hw/virtio/vhost-user.h"
17 #include "hw/virtio/vhost-backend.h"
18 #include "hw/virtio/virtio.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "chardev/char-fe.h"
21 #include "io/channel-socket.h"
22 #include "sysemu/kvm.h"
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
25 #include "qemu/uuid.h"
26 #include "qemu/sockets.h"
27 #include "sysemu/runstate.h"
28 #include "sysemu/cryptodev.h"
29 #include "migration/migration.h"
30 #include "migration/postcopy-ram.h"
31 #include "trace.h"
32 #include "exec/ramblock.h"
33
34 #include <sys/ioctl.h>
35 #include <sys/socket.h>
36 #include <sys/un.h>
37
38 #include "standard-headers/linux/vhost_types.h"
39
40 #ifdef CONFIG_LINUX
41 #include <linux/userfaultfd.h>
42 #endif
43
44 #define VHOST_MEMORY_BASELINE_NREGIONS 8
45 #define VHOST_USER_F_PROTOCOL_FEATURES 30
46 #define VHOST_USER_BACKEND_MAX_FDS 8
47
48 #if defined(TARGET_PPC) || defined(TARGET_PPC64)
49 #include "hw/ppc/spapr.h"
50 #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
51
52 #else
53 #define VHOST_USER_MAX_RAM_SLOTS 512
54 #endif
55
56 /*
57 * Maximum size of virtio device config space
58 */
59 #define VHOST_USER_MAX_CONFIG_SIZE 256
60
61 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
62
63 typedef enum VhostUserRequest {
64 VHOST_USER_NONE = 0,
65 VHOST_USER_GET_FEATURES = 1,
66 VHOST_USER_SET_FEATURES = 2,
67 VHOST_USER_SET_OWNER = 3,
68 VHOST_USER_RESET_OWNER = 4,
69 VHOST_USER_SET_MEM_TABLE = 5,
70 VHOST_USER_SET_LOG_BASE = 6,
71 VHOST_USER_SET_LOG_FD = 7,
72 VHOST_USER_SET_VRING_NUM = 8,
73 VHOST_USER_SET_VRING_ADDR = 9,
74 VHOST_USER_SET_VRING_BASE = 10,
75 VHOST_USER_GET_VRING_BASE = 11,
76 VHOST_USER_SET_VRING_KICK = 12,
77 VHOST_USER_SET_VRING_CALL = 13,
78 VHOST_USER_SET_VRING_ERR = 14,
79 VHOST_USER_GET_PROTOCOL_FEATURES = 15,
80 VHOST_USER_SET_PROTOCOL_FEATURES = 16,
81 VHOST_USER_GET_QUEUE_NUM = 17,
82 VHOST_USER_SET_VRING_ENABLE = 18,
83 VHOST_USER_SEND_RARP = 19,
84 VHOST_USER_NET_SET_MTU = 20,
85 VHOST_USER_SET_BACKEND_REQ_FD = 21,
86 VHOST_USER_IOTLB_MSG = 22,
87 VHOST_USER_SET_VRING_ENDIAN = 23,
88 VHOST_USER_GET_CONFIG = 24,
89 VHOST_USER_SET_CONFIG = 25,
90 VHOST_USER_CREATE_CRYPTO_SESSION = 26,
91 VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
92 VHOST_USER_POSTCOPY_ADVISE = 28,
93 VHOST_USER_POSTCOPY_LISTEN = 29,
94 VHOST_USER_POSTCOPY_END = 30,
95 VHOST_USER_GET_INFLIGHT_FD = 31,
96 VHOST_USER_SET_INFLIGHT_FD = 32,
97 VHOST_USER_GPU_SET_SOCKET = 33,
98 VHOST_USER_RESET_DEVICE = 34,
99 /* Message number 35 reserved for VHOST_USER_VRING_KICK. */
100 VHOST_USER_GET_MAX_MEM_SLOTS = 36,
101 VHOST_USER_ADD_MEM_REG = 37,
102 VHOST_USER_REM_MEM_REG = 38,
103 VHOST_USER_SET_STATUS = 39,
104 VHOST_USER_GET_STATUS = 40,
105 VHOST_USER_GET_SHARED_OBJECT = 41,
106 VHOST_USER_SET_DEVICE_STATE_FD = 42,
107 VHOST_USER_CHECK_DEVICE_STATE = 43,
108 VHOST_USER_MAX
109 } VhostUserRequest;
110
111 typedef enum VhostUserBackendRequest {
112 VHOST_USER_BACKEND_NONE = 0,
113 VHOST_USER_BACKEND_IOTLB_MSG = 1,
114 VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2,
115 VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3,
116 VHOST_USER_BACKEND_SHARED_OBJECT_ADD = 6,
117 VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE = 7,
118 VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP = 8,
119 VHOST_USER_BACKEND_MAX
120 } VhostUserBackendRequest;
121
122 typedef struct VhostUserMemoryRegion {
123 uint64_t guest_phys_addr;
124 uint64_t memory_size;
125 uint64_t userspace_addr;
126 uint64_t mmap_offset;
127 } VhostUserMemoryRegion;
128
129 typedef struct VhostUserMemory {
130 uint32_t nregions;
131 uint32_t padding;
132 VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
133 } VhostUserMemory;
134
135 typedef struct VhostUserMemRegMsg {
136 uint64_t padding;
137 VhostUserMemoryRegion region;
138 } VhostUserMemRegMsg;
139
140 typedef struct VhostUserLog {
141 uint64_t mmap_size;
142 uint64_t mmap_offset;
143 } VhostUserLog;
144
145 typedef struct VhostUserConfig {
146 uint32_t offset;
147 uint32_t size;
148 uint32_t flags;
149 uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
150 } VhostUserConfig;
151
152 #define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512
153 #define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64
154 #define VHOST_CRYPTO_ASYM_MAX_KEY_LEN 1024
155
156 typedef struct VhostUserCryptoSession {
157 uint64_t op_code;
158 union {
159 struct {
160 CryptoDevBackendSymSessionInfo session_setup_data;
161 uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
162 uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
163 } sym;
164 struct {
165 CryptoDevBackendAsymSessionInfo session_setup_data;
166 uint8_t key[VHOST_CRYPTO_ASYM_MAX_KEY_LEN];
167 } asym;
168 } u;
169
170 /* session id for success, -1 on errors */
171 int64_t session_id;
172 } VhostUserCryptoSession;
173
174 static VhostUserConfig c __attribute__ ((unused));
175 #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
176 + sizeof(c.size) \
177 + sizeof(c.flags))
178
179 typedef struct VhostUserVringArea {
180 uint64_t u64;
181 uint64_t size;
182 uint64_t offset;
183 } VhostUserVringArea;
184
185 typedef struct VhostUserInflight {
186 uint64_t mmap_size;
187 uint64_t mmap_offset;
188 uint16_t num_queues;
189 uint16_t queue_size;
190 } VhostUserInflight;
191
192 typedef struct VhostUserShared {
193 unsigned char uuid[16];
194 } VhostUserShared;
195
196 typedef struct {
197 VhostUserRequest request;
198
199 #define VHOST_USER_VERSION_MASK (0x3)
200 #define VHOST_USER_REPLY_MASK (0x1 << 2)
201 #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
202 uint32_t flags;
203 uint32_t size; /* the following payload size */
204 } QEMU_PACKED VhostUserHeader;
205
206 /* Request payload of VHOST_USER_SET_DEVICE_STATE_FD */
207 typedef struct VhostUserTransferDeviceState {
208 uint32_t direction;
209 uint32_t phase;
210 } VhostUserTransferDeviceState;
211
212 typedef union {
213 #define VHOST_USER_VRING_IDX_MASK (0xff)
214 #define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
215 uint64_t u64;
216 struct vhost_vring_state state;
217 struct vhost_vring_addr addr;
218 VhostUserMemory memory;
219 VhostUserMemRegMsg mem_reg;
220 VhostUserLog log;
221 struct vhost_iotlb_msg iotlb;
222 VhostUserConfig config;
223 VhostUserCryptoSession session;
224 VhostUserVringArea area;
225 VhostUserInflight inflight;
226 VhostUserShared object;
227 VhostUserTransferDeviceState transfer_state;
228 } VhostUserPayload;
229
230 typedef struct VhostUserMsg {
231 VhostUserHeader hdr;
232 VhostUserPayload payload;
233 } QEMU_PACKED VhostUserMsg;
234
235 static VhostUserMsg m __attribute__ ((unused));
236 #define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
237
238 #define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
239
240 /* The version of the protocol we support */
241 #define VHOST_USER_VERSION (0x1)
242
243 struct vhost_user {
244 struct vhost_dev *dev;
245 /* Shared between vhost devs of the same virtio device */
246 VhostUserState *user;
247 QIOChannel *backend_ioc;
248 GSource *backend_src;
249 NotifierWithReturn postcopy_notifier;
250 struct PostCopyFD postcopy_fd;
251 uint64_t postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS];
252 /* Length of the region_rb and region_rb_offset arrays */
253 size_t region_rb_len;
254 /* RAMBlock associated with a given region */
255 RAMBlock **region_rb;
256 /*
257 * The offset from the start of the RAMBlock to the start of the
258 * vhost region.
259 */
260 ram_addr_t *region_rb_offset;
261
262 /* True once we've entered postcopy_listen */
263 bool postcopy_listen;
264
265 /* Our current regions */
266 int num_shadow_regions;
267 struct vhost_memory_region shadow_regions[VHOST_USER_MAX_RAM_SLOTS];
268 };
269
270 struct scrub_regions {
271 struct vhost_memory_region *region;
272 int reg_idx;
273 int fd_idx;
274 };
275
vhost_user_read_header(struct vhost_dev * dev,VhostUserMsg * msg)276 static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
277 {
278 struct vhost_user *u = dev->opaque;
279 CharBackend *chr = u->user->chr;
280 uint8_t *p = (uint8_t *) msg;
281 int r, size = VHOST_USER_HDR_SIZE;
282
283 r = qemu_chr_fe_read_all(chr, p, size);
284 if (r != size) {
285 int saved_errno = errno;
286 error_report("Failed to read msg header. Read %d instead of %d."
287 " Original request %d.", r, size, msg->hdr.request);
288 return r < 0 ? -saved_errno : -EIO;
289 }
290
291 /* validate received flags */
292 if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
293 error_report("Failed to read msg header."
294 " Flags 0x%x instead of 0x%x.", msg->hdr.flags,
295 VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
296 return -EPROTO;
297 }
298
299 trace_vhost_user_read(msg->hdr.request, msg->hdr.flags);
300
301 return 0;
302 }
303
vhost_user_read(struct vhost_dev * dev,VhostUserMsg * msg)304 static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
305 {
306 struct vhost_user *u = dev->opaque;
307 CharBackend *chr = u->user->chr;
308 uint8_t *p = (uint8_t *) msg;
309 int r, size;
310
311 r = vhost_user_read_header(dev, msg);
312 if (r < 0) {
313 return r;
314 }
315
316 /* validate message size is sane */
317 if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) {
318 error_report("Failed to read msg header."
319 " Size %d exceeds the maximum %zu.", msg->hdr.size,
320 VHOST_USER_PAYLOAD_SIZE);
321 return -EPROTO;
322 }
323
324 if (msg->hdr.size) {
325 p += VHOST_USER_HDR_SIZE;
326 size = msg->hdr.size;
327 r = qemu_chr_fe_read_all(chr, p, size);
328 if (r != size) {
329 int saved_errno = errno;
330 error_report("Failed to read msg payload."
331 " Read %d instead of %d.", r, msg->hdr.size);
332 return r < 0 ? -saved_errno : -EIO;
333 }
334 }
335
336 return 0;
337 }
338
process_message_reply(struct vhost_dev * dev,const VhostUserMsg * msg)339 static int process_message_reply(struct vhost_dev *dev,
340 const VhostUserMsg *msg)
341 {
342 int ret;
343 VhostUserMsg msg_reply;
344
345 if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
346 return 0;
347 }
348
349 ret = vhost_user_read(dev, &msg_reply);
350 if (ret < 0) {
351 return ret;
352 }
353
354 if (msg_reply.hdr.request != msg->hdr.request) {
355 error_report("Received unexpected msg type. "
356 "Expected %d received %d",
357 msg->hdr.request, msg_reply.hdr.request);
358 return -EPROTO;
359 }
360
361 return msg_reply.payload.u64 ? -EIO : 0;
362 }
363
vhost_user_per_device_request(VhostUserRequest request)364 static bool vhost_user_per_device_request(VhostUserRequest request)
365 {
366 switch (request) {
367 case VHOST_USER_SET_OWNER:
368 case VHOST_USER_RESET_OWNER:
369 case VHOST_USER_SET_MEM_TABLE:
370 case VHOST_USER_GET_QUEUE_NUM:
371 case VHOST_USER_NET_SET_MTU:
372 case VHOST_USER_RESET_DEVICE:
373 case VHOST_USER_ADD_MEM_REG:
374 case VHOST_USER_REM_MEM_REG:
375 return true;
376 default:
377 return false;
378 }
379 }
380
381 /* most non-init callers ignore the error */
vhost_user_write(struct vhost_dev * dev,VhostUserMsg * msg,int * fds,int fd_num)382 static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
383 int *fds, int fd_num)
384 {
385 struct vhost_user *u = dev->opaque;
386 CharBackend *chr = u->user->chr;
387 int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
388
389 /*
390 * Some devices, like virtio-scsi, are implemented as a single vhost_dev,
391 * while others, like virtio-net, contain multiple vhost_devs. For
392 * operations such as configuring device memory mappings or issuing device
393 * resets, which affect the whole device instead of individual VQs,
394 * vhost-user messages should only be sent once.
395 *
396 * Devices with multiple vhost_devs are given an associated dev->vq_index
397 * so per_device requests are only sent if vq_index is 0.
398 */
399 if (vhost_user_per_device_request(msg->hdr.request)
400 && dev->vq_index != 0) {
401 msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
402 return 0;
403 }
404
405 if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
406 error_report("Failed to set msg fds.");
407 return -EINVAL;
408 }
409
410 ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
411 if (ret != size) {
412 int saved_errno = errno;
413 error_report("Failed to write msg."
414 " Wrote %d instead of %d.", ret, size);
415 return ret < 0 ? -saved_errno : -EIO;
416 }
417
418 trace_vhost_user_write(msg->hdr.request, msg->hdr.flags);
419
420 return 0;
421 }
422
vhost_user_gpu_set_socket(struct vhost_dev * dev,int fd)423 int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
424 {
425 VhostUserMsg msg = {
426 .hdr.request = VHOST_USER_GPU_SET_SOCKET,
427 .hdr.flags = VHOST_USER_VERSION,
428 };
429
430 return vhost_user_write(dev, &msg, &fd, 1);
431 }
432
vhost_user_set_log_base(struct vhost_dev * dev,uint64_t base,struct vhost_log * log)433 static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
434 struct vhost_log *log)
435 {
436 int fds[VHOST_USER_MAX_RAM_SLOTS];
437 size_t fd_num = 0;
438 bool shmfd = virtio_has_feature(dev->protocol_features,
439 VHOST_USER_PROTOCOL_F_LOG_SHMFD);
440 int ret;
441 VhostUserMsg msg = {
442 .hdr.request = VHOST_USER_SET_LOG_BASE,
443 .hdr.flags = VHOST_USER_VERSION,
444 .payload.log.mmap_size = log->size * sizeof(*(log->log)),
445 .payload.log.mmap_offset = 0,
446 .hdr.size = sizeof(msg.payload.log),
447 };
448
449 /* Send only once with first queue pair */
450 if (dev->vq_index != 0) {
451 return 0;
452 }
453
454 if (shmfd && log->fd != -1) {
455 fds[fd_num++] = log->fd;
456 }
457
458 ret = vhost_user_write(dev, &msg, fds, fd_num);
459 if (ret < 0) {
460 return ret;
461 }
462
463 if (shmfd) {
464 msg.hdr.size = 0;
465 ret = vhost_user_read(dev, &msg);
466 if (ret < 0) {
467 return ret;
468 }
469
470 if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
471 error_report("Received unexpected msg type. "
472 "Expected %d received %d",
473 VHOST_USER_SET_LOG_BASE, msg.hdr.request);
474 return -EPROTO;
475 }
476 }
477
478 return 0;
479 }
480
vhost_user_get_mr_data(uint64_t addr,ram_addr_t * offset,int * fd)481 static MemoryRegion *vhost_user_get_mr_data(uint64_t addr, ram_addr_t *offset,
482 int *fd)
483 {
484 MemoryRegion *mr;
485
486 assert((uintptr_t)addr == addr);
487 mr = memory_region_from_host((void *)(uintptr_t)addr, offset);
488 *fd = memory_region_get_fd(mr);
489 *offset += mr->ram_block->fd_offset;
490
491 return mr;
492 }
493
vhost_user_fill_msg_region(VhostUserMemoryRegion * dst,struct vhost_memory_region * src,uint64_t mmap_offset)494 static void vhost_user_fill_msg_region(VhostUserMemoryRegion *dst,
495 struct vhost_memory_region *src,
496 uint64_t mmap_offset)
497 {
498 assert(src != NULL && dst != NULL);
499 dst->userspace_addr = src->userspace_addr;
500 dst->memory_size = src->memory_size;
501 dst->guest_phys_addr = src->guest_phys_addr;
502 dst->mmap_offset = mmap_offset;
503 }
504
vhost_user_fill_set_mem_table_msg(struct vhost_user * u,struct vhost_dev * dev,VhostUserMsg * msg,int * fds,size_t * fd_num,bool track_ramblocks)505 static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u,
506 struct vhost_dev *dev,
507 VhostUserMsg *msg,
508 int *fds, size_t *fd_num,
509 bool track_ramblocks)
510 {
511 int i, fd;
512 ram_addr_t offset;
513 MemoryRegion *mr;
514 struct vhost_memory_region *reg;
515 VhostUserMemoryRegion region_buffer;
516
517 msg->hdr.request = VHOST_USER_SET_MEM_TABLE;
518
519 for (i = 0; i < dev->mem->nregions; ++i) {
520 reg = dev->mem->regions + i;
521
522 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
523 if (fd > 0) {
524 if (track_ramblocks) {
525 assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
526 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
527 reg->memory_size,
528 reg->guest_phys_addr,
529 reg->userspace_addr,
530 offset);
531 u->region_rb_offset[i] = offset;
532 u->region_rb[i] = mr->ram_block;
533 } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
534 error_report("Failed preparing vhost-user memory table msg");
535 return -ENOBUFS;
536 }
537 vhost_user_fill_msg_region(®ion_buffer, reg, offset);
538 msg->payload.memory.regions[*fd_num] = region_buffer;
539 fds[(*fd_num)++] = fd;
540 } else if (track_ramblocks) {
541 u->region_rb_offset[i] = 0;
542 u->region_rb[i] = NULL;
543 }
544 }
545
546 msg->payload.memory.nregions = *fd_num;
547
548 if (!*fd_num) {
549 error_report("Failed initializing vhost-user memory map, "
550 "consider using -object memory-backend-file share=on");
551 return -EINVAL;
552 }
553
554 msg->hdr.size = sizeof(msg->payload.memory.nregions);
555 msg->hdr.size += sizeof(msg->payload.memory.padding);
556 msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion);
557
558 return 0;
559 }
560
reg_equal(struct vhost_memory_region * shadow_reg,struct vhost_memory_region * vdev_reg)561 static inline bool reg_equal(struct vhost_memory_region *shadow_reg,
562 struct vhost_memory_region *vdev_reg)
563 {
564 return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr &&
565 shadow_reg->userspace_addr == vdev_reg->userspace_addr &&
566 shadow_reg->memory_size == vdev_reg->memory_size;
567 }
568
scrub_shadow_regions(struct vhost_dev * dev,struct scrub_regions * add_reg,int * nr_add_reg,struct scrub_regions * rem_reg,int * nr_rem_reg,uint64_t * shadow_pcb,bool track_ramblocks)569 static void scrub_shadow_regions(struct vhost_dev *dev,
570 struct scrub_regions *add_reg,
571 int *nr_add_reg,
572 struct scrub_regions *rem_reg,
573 int *nr_rem_reg, uint64_t *shadow_pcb,
574 bool track_ramblocks)
575 {
576 struct vhost_user *u = dev->opaque;
577 bool found[VHOST_USER_MAX_RAM_SLOTS] = {};
578 struct vhost_memory_region *reg, *shadow_reg;
579 int i, j, fd, add_idx = 0, rm_idx = 0, fd_num = 0;
580 ram_addr_t offset;
581 MemoryRegion *mr;
582 bool matching;
583
584 /*
585 * Find memory regions present in our shadow state which are not in
586 * the device's current memory state.
587 *
588 * Mark regions in both the shadow and device state as "found".
589 */
590 for (i = 0; i < u->num_shadow_regions; i++) {
591 shadow_reg = &u->shadow_regions[i];
592 matching = false;
593
594 for (j = 0; j < dev->mem->nregions; j++) {
595 reg = &dev->mem->regions[j];
596
597 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
598
599 if (reg_equal(shadow_reg, reg)) {
600 matching = true;
601 found[j] = true;
602 if (track_ramblocks) {
603 /*
604 * Reset postcopy client bases, region_rb, and
605 * region_rb_offset in case regions are removed.
606 */
607 if (fd > 0) {
608 u->region_rb_offset[j] = offset;
609 u->region_rb[j] = mr->ram_block;
610 shadow_pcb[j] = u->postcopy_client_bases[i];
611 } else {
612 u->region_rb_offset[j] = 0;
613 u->region_rb[j] = NULL;
614 }
615 }
616 break;
617 }
618 }
619
620 /*
621 * If the region was not found in the current device memory state
622 * create an entry for it in the removed list.
623 */
624 if (!matching) {
625 rem_reg[rm_idx].region = shadow_reg;
626 rem_reg[rm_idx++].reg_idx = i;
627 }
628 }
629
630 /*
631 * For regions not marked "found", create entries in the added list.
632 *
633 * Note their indexes in the device memory state and the indexes of their
634 * file descriptors.
635 */
636 for (i = 0; i < dev->mem->nregions; i++) {
637 reg = &dev->mem->regions[i];
638 vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
639 if (fd > 0) {
640 ++fd_num;
641 }
642
643 /*
644 * If the region was in both the shadow and device state we don't
645 * need to send a VHOST_USER_ADD_MEM_REG message for it.
646 */
647 if (found[i]) {
648 continue;
649 }
650
651 add_reg[add_idx].region = reg;
652 add_reg[add_idx].reg_idx = i;
653 add_reg[add_idx++].fd_idx = fd_num;
654 }
655 *nr_rem_reg = rm_idx;
656 *nr_add_reg = add_idx;
657
658 return;
659 }
660
send_remove_regions(struct vhost_dev * dev,struct scrub_regions * remove_reg,int nr_rem_reg,VhostUserMsg * msg,bool reply_supported)661 static int send_remove_regions(struct vhost_dev *dev,
662 struct scrub_regions *remove_reg,
663 int nr_rem_reg, VhostUserMsg *msg,
664 bool reply_supported)
665 {
666 struct vhost_user *u = dev->opaque;
667 struct vhost_memory_region *shadow_reg;
668 int i, fd, shadow_reg_idx, ret;
669 ram_addr_t offset;
670 VhostUserMemoryRegion region_buffer;
671
672 /*
673 * The regions in remove_reg appear in the same order they do in the
674 * shadow table. Therefore we can minimize memory copies by iterating
675 * through remove_reg backwards.
676 */
677 for (i = nr_rem_reg - 1; i >= 0; i--) {
678 shadow_reg = remove_reg[i].region;
679 shadow_reg_idx = remove_reg[i].reg_idx;
680
681 vhost_user_get_mr_data(shadow_reg->userspace_addr, &offset, &fd);
682
683 if (fd > 0) {
684 msg->hdr.request = VHOST_USER_REM_MEM_REG;
685 vhost_user_fill_msg_region(®ion_buffer, shadow_reg, 0);
686 msg->payload.mem_reg.region = region_buffer;
687
688 ret = vhost_user_write(dev, msg, NULL, 0);
689 if (ret < 0) {
690 return ret;
691 }
692
693 if (reply_supported) {
694 ret = process_message_reply(dev, msg);
695 if (ret) {
696 return ret;
697 }
698 }
699 }
700
701 /*
702 * At this point we know the backend has unmapped the region. It is now
703 * safe to remove it from the shadow table.
704 */
705 memmove(&u->shadow_regions[shadow_reg_idx],
706 &u->shadow_regions[shadow_reg_idx + 1],
707 sizeof(struct vhost_memory_region) *
708 (u->num_shadow_regions - shadow_reg_idx - 1));
709 u->num_shadow_regions--;
710 }
711
712 return 0;
713 }
714
send_add_regions(struct vhost_dev * dev,struct scrub_regions * add_reg,int nr_add_reg,VhostUserMsg * msg,uint64_t * shadow_pcb,bool reply_supported,bool track_ramblocks)715 static int send_add_regions(struct vhost_dev *dev,
716 struct scrub_regions *add_reg, int nr_add_reg,
717 VhostUserMsg *msg, uint64_t *shadow_pcb,
718 bool reply_supported, bool track_ramblocks)
719 {
720 struct vhost_user *u = dev->opaque;
721 int i, fd, ret, reg_idx, reg_fd_idx;
722 struct vhost_memory_region *reg;
723 MemoryRegion *mr;
724 ram_addr_t offset;
725 VhostUserMsg msg_reply;
726 VhostUserMemoryRegion region_buffer;
727
728 for (i = 0; i < nr_add_reg; i++) {
729 reg = add_reg[i].region;
730 reg_idx = add_reg[i].reg_idx;
731 reg_fd_idx = add_reg[i].fd_idx;
732
733 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
734
735 if (fd > 0) {
736 if (track_ramblocks) {
737 trace_vhost_user_set_mem_table_withfd(reg_fd_idx, mr->name,
738 reg->memory_size,
739 reg->guest_phys_addr,
740 reg->userspace_addr,
741 offset);
742 u->region_rb_offset[reg_idx] = offset;
743 u->region_rb[reg_idx] = mr->ram_block;
744 }
745 msg->hdr.request = VHOST_USER_ADD_MEM_REG;
746 vhost_user_fill_msg_region(®ion_buffer, reg, offset);
747 msg->payload.mem_reg.region = region_buffer;
748
749 ret = vhost_user_write(dev, msg, &fd, 1);
750 if (ret < 0) {
751 return ret;
752 }
753
754 if (track_ramblocks) {
755 uint64_t reply_gpa;
756
757 ret = vhost_user_read(dev, &msg_reply);
758 if (ret < 0) {
759 return ret;
760 }
761
762 reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr;
763
764 if (msg_reply.hdr.request != VHOST_USER_ADD_MEM_REG) {
765 error_report("%s: Received unexpected msg type."
766 "Expected %d received %d", __func__,
767 VHOST_USER_ADD_MEM_REG,
768 msg_reply.hdr.request);
769 return -EPROTO;
770 }
771
772 /*
773 * We're using the same structure, just reusing one of the
774 * fields, so it should be the same size.
775 */
776 if (msg_reply.hdr.size != msg->hdr.size) {
777 error_report("%s: Unexpected size for postcopy reply "
778 "%d vs %d", __func__, msg_reply.hdr.size,
779 msg->hdr.size);
780 return -EPROTO;
781 }
782
783 /* Get the postcopy client base from the backend's reply. */
784 if (reply_gpa == dev->mem->regions[reg_idx].guest_phys_addr) {
785 shadow_pcb[reg_idx] =
786 msg_reply.payload.mem_reg.region.userspace_addr;
787 trace_vhost_user_set_mem_table_postcopy(
788 msg_reply.payload.mem_reg.region.userspace_addr,
789 msg->payload.mem_reg.region.userspace_addr,
790 reg_fd_idx, reg_idx);
791 } else {
792 error_report("%s: invalid postcopy reply for region. "
793 "Got guest physical address %" PRIX64 ", expected "
794 "%" PRIX64, __func__, reply_gpa,
795 dev->mem->regions[reg_idx].guest_phys_addr);
796 return -EPROTO;
797 }
798 } else if (reply_supported) {
799 ret = process_message_reply(dev, msg);
800 if (ret) {
801 return ret;
802 }
803 }
804 } else if (track_ramblocks) {
805 u->region_rb_offset[reg_idx] = 0;
806 u->region_rb[reg_idx] = NULL;
807 }
808
809 /*
810 * At this point, we know the backend has mapped in the new
811 * region, if the region has a valid file descriptor.
812 *
813 * The region should now be added to the shadow table.
814 */
815 u->shadow_regions[u->num_shadow_regions].guest_phys_addr =
816 reg->guest_phys_addr;
817 u->shadow_regions[u->num_shadow_regions].userspace_addr =
818 reg->userspace_addr;
819 u->shadow_regions[u->num_shadow_regions].memory_size =
820 reg->memory_size;
821 u->num_shadow_regions++;
822 }
823
824 return 0;
825 }
826
vhost_user_add_remove_regions(struct vhost_dev * dev,VhostUserMsg * msg,bool reply_supported,bool track_ramblocks)827 static int vhost_user_add_remove_regions(struct vhost_dev *dev,
828 VhostUserMsg *msg,
829 bool reply_supported,
830 bool track_ramblocks)
831 {
832 struct vhost_user *u = dev->opaque;
833 struct scrub_regions add_reg[VHOST_USER_MAX_RAM_SLOTS];
834 struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS];
835 uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {};
836 int nr_add_reg, nr_rem_reg;
837 int ret;
838
839 msg->hdr.size = sizeof(msg->payload.mem_reg);
840
841 /* Find the regions which need to be removed or added. */
842 scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg,
843 shadow_pcb, track_ramblocks);
844
845 if (nr_rem_reg) {
846 ret = send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
847 reply_supported);
848 if (ret < 0) {
849 goto err;
850 }
851 }
852
853 if (nr_add_reg) {
854 ret = send_add_regions(dev, add_reg, nr_add_reg, msg, shadow_pcb,
855 reply_supported, track_ramblocks);
856 if (ret < 0) {
857 goto err;
858 }
859 }
860
861 if (track_ramblocks) {
862 memcpy(u->postcopy_client_bases, shadow_pcb,
863 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
864 /*
865 * Now we've registered this with the postcopy code, we ack to the
866 * client, because now we're in the position to be able to deal with
867 * any faults it generates.
868 */
869 /* TODO: Use this for failure cases as well with a bad value. */
870 msg->hdr.size = sizeof(msg->payload.u64);
871 msg->payload.u64 = 0; /* OK */
872
873 ret = vhost_user_write(dev, msg, NULL, 0);
874 if (ret < 0) {
875 return ret;
876 }
877 }
878
879 return 0;
880
881 err:
882 if (track_ramblocks) {
883 memcpy(u->postcopy_client_bases, shadow_pcb,
884 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
885 }
886
887 return ret;
888 }
889
vhost_user_set_mem_table_postcopy(struct vhost_dev * dev,struct vhost_memory * mem,bool reply_supported,bool config_mem_slots)890 static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
891 struct vhost_memory *mem,
892 bool reply_supported,
893 bool config_mem_slots)
894 {
895 struct vhost_user *u = dev->opaque;
896 int fds[VHOST_MEMORY_BASELINE_NREGIONS];
897 size_t fd_num = 0;
898 VhostUserMsg msg_reply;
899 int region_i, msg_i;
900 int ret;
901
902 VhostUserMsg msg = {
903 .hdr.flags = VHOST_USER_VERSION,
904 };
905
906 if (u->region_rb_len < dev->mem->nregions) {
907 u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
908 u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
909 dev->mem->nregions);
910 memset(&(u->region_rb[u->region_rb_len]), '\0',
911 sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
912 memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
913 sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
914 u->region_rb_len = dev->mem->nregions;
915 }
916
917 if (config_mem_slots) {
918 ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, true);
919 if (ret < 0) {
920 return ret;
921 }
922 } else {
923 ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
924 true);
925 if (ret < 0) {
926 return ret;
927 }
928
929 ret = vhost_user_write(dev, &msg, fds, fd_num);
930 if (ret < 0) {
931 return ret;
932 }
933
934 ret = vhost_user_read(dev, &msg_reply);
935 if (ret < 0) {
936 return ret;
937 }
938
939 if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
940 error_report("%s: Received unexpected msg type."
941 "Expected %d received %d", __func__,
942 VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
943 return -EPROTO;
944 }
945
946 /*
947 * We're using the same structure, just reusing one of the
948 * fields, so it should be the same size.
949 */
950 if (msg_reply.hdr.size != msg.hdr.size) {
951 error_report("%s: Unexpected size for postcopy reply "
952 "%d vs %d", __func__, msg_reply.hdr.size,
953 msg.hdr.size);
954 return -EPROTO;
955 }
956
957 memset(u->postcopy_client_bases, 0,
958 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
959
960 /*
961 * They're in the same order as the regions that were sent
962 * but some of the regions were skipped (above) if they
963 * didn't have fd's
964 */
965 for (msg_i = 0, region_i = 0;
966 region_i < dev->mem->nregions;
967 region_i++) {
968 if (msg_i < fd_num &&
969 msg_reply.payload.memory.regions[msg_i].guest_phys_addr ==
970 dev->mem->regions[region_i].guest_phys_addr) {
971 u->postcopy_client_bases[region_i] =
972 msg_reply.payload.memory.regions[msg_i].userspace_addr;
973 trace_vhost_user_set_mem_table_postcopy(
974 msg_reply.payload.memory.regions[msg_i].userspace_addr,
975 msg.payload.memory.regions[msg_i].userspace_addr,
976 msg_i, region_i);
977 msg_i++;
978 }
979 }
980 if (msg_i != fd_num) {
981 error_report("%s: postcopy reply not fully consumed "
982 "%d vs %zd",
983 __func__, msg_i, fd_num);
984 return -EIO;
985 }
986
987 /*
988 * Now we've registered this with the postcopy code, we ack to the
989 * client, because now we're in the position to be able to deal
990 * with any faults it generates.
991 */
992 /* TODO: Use this for failure cases as well with a bad value. */
993 msg.hdr.size = sizeof(msg.payload.u64);
994 msg.payload.u64 = 0; /* OK */
995 ret = vhost_user_write(dev, &msg, NULL, 0);
996 if (ret < 0) {
997 return ret;
998 }
999 }
1000
1001 return 0;
1002 }
1003
vhost_user_set_mem_table(struct vhost_dev * dev,struct vhost_memory * mem)1004 static int vhost_user_set_mem_table(struct vhost_dev *dev,
1005 struct vhost_memory *mem)
1006 {
1007 struct vhost_user *u = dev->opaque;
1008 int fds[VHOST_MEMORY_BASELINE_NREGIONS];
1009 size_t fd_num = 0;
1010 bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
1011 bool reply_supported = virtio_has_feature(dev->protocol_features,
1012 VHOST_USER_PROTOCOL_F_REPLY_ACK);
1013 bool config_mem_slots =
1014 virtio_has_feature(dev->protocol_features,
1015 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS);
1016 int ret;
1017
1018 if (do_postcopy) {
1019 /*
1020 * Postcopy has enough differences that it's best done in it's own
1021 * version
1022 */
1023 return vhost_user_set_mem_table_postcopy(dev, mem, reply_supported,
1024 config_mem_slots);
1025 }
1026
1027 VhostUserMsg msg = {
1028 .hdr.flags = VHOST_USER_VERSION,
1029 };
1030
1031 if (reply_supported) {
1032 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1033 }
1034
1035 if (config_mem_slots) {
1036 ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, false);
1037 if (ret < 0) {
1038 return ret;
1039 }
1040 } else {
1041 ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
1042 false);
1043 if (ret < 0) {
1044 return ret;
1045 }
1046
1047 ret = vhost_user_write(dev, &msg, fds, fd_num);
1048 if (ret < 0) {
1049 return ret;
1050 }
1051
1052 if (reply_supported) {
1053 return process_message_reply(dev, &msg);
1054 }
1055 }
1056
1057 return 0;
1058 }
1059
vhost_user_set_vring_endian(struct vhost_dev * dev,struct vhost_vring_state * ring)1060 static int vhost_user_set_vring_endian(struct vhost_dev *dev,
1061 struct vhost_vring_state *ring)
1062 {
1063 bool cross_endian = virtio_has_feature(dev->protocol_features,
1064 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN);
1065 VhostUserMsg msg = {
1066 .hdr.request = VHOST_USER_SET_VRING_ENDIAN,
1067 .hdr.flags = VHOST_USER_VERSION,
1068 .payload.state = *ring,
1069 .hdr.size = sizeof(msg.payload.state),
1070 };
1071
1072 if (!cross_endian) {
1073 error_report("vhost-user trying to send unhandled ioctl");
1074 return -ENOTSUP;
1075 }
1076
1077 return vhost_user_write(dev, &msg, NULL, 0);
1078 }
1079
vhost_user_get_u64(struct vhost_dev * dev,int request,uint64_t * u64)1080 static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
1081 {
1082 int ret;
1083 VhostUserMsg msg = {
1084 .hdr.request = request,
1085 .hdr.flags = VHOST_USER_VERSION,
1086 };
1087
1088 if (vhost_user_per_device_request(request) && dev->vq_index != 0) {
1089 return 0;
1090 }
1091
1092 ret = vhost_user_write(dev, &msg, NULL, 0);
1093 if (ret < 0) {
1094 return ret;
1095 }
1096
1097 ret = vhost_user_read(dev, &msg);
1098 if (ret < 0) {
1099 return ret;
1100 }
1101
1102 if (msg.hdr.request != request) {
1103 error_report("Received unexpected msg type. Expected %d received %d",
1104 request, msg.hdr.request);
1105 return -EPROTO;
1106 }
1107
1108 if (msg.hdr.size != sizeof(msg.payload.u64)) {
1109 error_report("Received bad msg size.");
1110 return -EPROTO;
1111 }
1112
1113 *u64 = msg.payload.u64;
1114
1115 return 0;
1116 }
1117
vhost_user_get_features(struct vhost_dev * dev,uint64_t * features)1118 static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
1119 {
1120 if (vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features) < 0) {
1121 return -EPROTO;
1122 }
1123
1124 return 0;
1125 }
1126
1127 /* Note: "msg->hdr.flags" may be modified. */
vhost_user_write_sync(struct vhost_dev * dev,VhostUserMsg * msg,bool wait_for_reply)1128 static int vhost_user_write_sync(struct vhost_dev *dev, VhostUserMsg *msg,
1129 bool wait_for_reply)
1130 {
1131 int ret;
1132
1133 if (wait_for_reply) {
1134 bool reply_supported = virtio_has_feature(dev->protocol_features,
1135 VHOST_USER_PROTOCOL_F_REPLY_ACK);
1136 if (reply_supported) {
1137 msg->hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1138 }
1139 }
1140
1141 ret = vhost_user_write(dev, msg, NULL, 0);
1142 if (ret < 0) {
1143 return ret;
1144 }
1145
1146 if (wait_for_reply) {
1147 uint64_t dummy;
1148
1149 if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
1150 return process_message_reply(dev, msg);
1151 }
1152
1153 /*
1154 * We need to wait for a reply but the backend does not
1155 * support replies for the command we just sent.
1156 * Send VHOST_USER_GET_FEATURES which makes all backends
1157 * send a reply.
1158 */
1159 return vhost_user_get_features(dev, &dummy);
1160 }
1161
1162 return 0;
1163 }
1164
vhost_set_vring(struct vhost_dev * dev,unsigned long int request,struct vhost_vring_state * ring,bool wait_for_reply)1165 static int vhost_set_vring(struct vhost_dev *dev,
1166 unsigned long int request,
1167 struct vhost_vring_state *ring,
1168 bool wait_for_reply)
1169 {
1170 VhostUserMsg msg = {
1171 .hdr.request = request,
1172 .hdr.flags = VHOST_USER_VERSION,
1173 .payload.state = *ring,
1174 .hdr.size = sizeof(msg.payload.state),
1175 };
1176
1177 return vhost_user_write_sync(dev, &msg, wait_for_reply);
1178 }
1179
vhost_user_set_vring_num(struct vhost_dev * dev,struct vhost_vring_state * ring)1180 static int vhost_user_set_vring_num(struct vhost_dev *dev,
1181 struct vhost_vring_state *ring)
1182 {
1183 return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring, false);
1184 }
1185
vhost_user_host_notifier_free(VhostUserHostNotifier * n)1186 static void vhost_user_host_notifier_free(VhostUserHostNotifier *n)
1187 {
1188 assert(n && n->unmap_addr);
1189 munmap(n->unmap_addr, qemu_real_host_page_size());
1190 n->unmap_addr = NULL;
1191 }
1192
1193 /*
1194 * clean-up function for notifier, will finally free the structure
1195 * under rcu.
1196 */
vhost_user_host_notifier_remove(VhostUserHostNotifier * n,VirtIODevice * vdev)1197 static void vhost_user_host_notifier_remove(VhostUserHostNotifier *n,
1198 VirtIODevice *vdev)
1199 {
1200 if (n->addr) {
1201 if (vdev) {
1202 virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false);
1203 }
1204 assert(!n->unmap_addr);
1205 n->unmap_addr = n->addr;
1206 n->addr = NULL;
1207 call_rcu(n, vhost_user_host_notifier_free, rcu);
1208 }
1209 }
1210
vhost_user_set_vring_base(struct vhost_dev * dev,struct vhost_vring_state * ring)1211 static int vhost_user_set_vring_base(struct vhost_dev *dev,
1212 struct vhost_vring_state *ring)
1213 {
1214 return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring, false);
1215 }
1216
vhost_user_set_vring_enable(struct vhost_dev * dev,int enable)1217 static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
1218 {
1219 int i;
1220
1221 if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
1222 return -EINVAL;
1223 }
1224
1225 for (i = 0; i < dev->nvqs; ++i) {
1226 int ret;
1227 struct vhost_vring_state state = {
1228 .index = dev->vq_index + i,
1229 .num = enable,
1230 };
1231
1232 /*
1233 * SET_VRING_ENABLE travels from guest to QEMU to vhost-user backend /
1234 * control plane thread via unix domain socket. Virtio requests travel
1235 * from guest to vhost-user backend / data plane thread via eventfd.
1236 * Even if the guest enables the ring first, and pushes its first virtio
1237 * request second (conforming to the virtio spec), the data plane thread
1238 * in the backend may see the virtio request before the control plane
1239 * thread sees the queue enablement. This causes (in fact, requires) the
1240 * data plane thread to discard the virtio request (it arrived on a
1241 * seemingly disabled queue). To prevent this out-of-order delivery,
1242 * don't let the guest proceed to pushing the virtio request until the
1243 * backend control plane acknowledges enabling the queue -- IOW, pass
1244 * wait_for_reply=true below.
1245 */
1246 ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state, true);
1247 if (ret < 0) {
1248 /*
1249 * Restoring the previous state is likely infeasible, as well as
1250 * proceeding regardless the error, so just bail out and hope for
1251 * the device-level recovery.
1252 */
1253 return ret;
1254 }
1255 }
1256
1257 return 0;
1258 }
1259
fetch_notifier(VhostUserState * u,int idx)1260 static VhostUserHostNotifier *fetch_notifier(VhostUserState *u,
1261 int idx)
1262 {
1263 if (idx >= u->notifiers->len) {
1264 return NULL;
1265 }
1266 return g_ptr_array_index(u->notifiers, idx);
1267 }
1268
vhost_user_get_vring_base(struct vhost_dev * dev,struct vhost_vring_state * ring)1269 static int vhost_user_get_vring_base(struct vhost_dev *dev,
1270 struct vhost_vring_state *ring)
1271 {
1272 int ret;
1273 VhostUserMsg msg = {
1274 .hdr.request = VHOST_USER_GET_VRING_BASE,
1275 .hdr.flags = VHOST_USER_VERSION,
1276 .payload.state = *ring,
1277 .hdr.size = sizeof(msg.payload.state),
1278 };
1279 struct vhost_user *u = dev->opaque;
1280
1281 VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index);
1282 if (n) {
1283 vhost_user_host_notifier_remove(n, dev->vdev);
1284 }
1285
1286 ret = vhost_user_write(dev, &msg, NULL, 0);
1287 if (ret < 0) {
1288 return ret;
1289 }
1290
1291 ret = vhost_user_read(dev, &msg);
1292 if (ret < 0) {
1293 return ret;
1294 }
1295
1296 if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
1297 error_report("Received unexpected msg type. Expected %d received %d",
1298 VHOST_USER_GET_VRING_BASE, msg.hdr.request);
1299 return -EPROTO;
1300 }
1301
1302 if (msg.hdr.size != sizeof(msg.payload.state)) {
1303 error_report("Received bad msg size.");
1304 return -EPROTO;
1305 }
1306
1307 *ring = msg.payload.state;
1308
1309 return 0;
1310 }
1311
vhost_set_vring_file(struct vhost_dev * dev,VhostUserRequest request,struct vhost_vring_file * file)1312 static int vhost_set_vring_file(struct vhost_dev *dev,
1313 VhostUserRequest request,
1314 struct vhost_vring_file *file)
1315 {
1316 int fds[VHOST_USER_MAX_RAM_SLOTS];
1317 size_t fd_num = 0;
1318 VhostUserMsg msg = {
1319 .hdr.request = request,
1320 .hdr.flags = VHOST_USER_VERSION,
1321 .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
1322 .hdr.size = sizeof(msg.payload.u64),
1323 };
1324
1325 if (file->fd > 0) {
1326 fds[fd_num++] = file->fd;
1327 } else {
1328 msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
1329 }
1330
1331 return vhost_user_write(dev, &msg, fds, fd_num);
1332 }
1333
vhost_user_set_vring_kick(struct vhost_dev * dev,struct vhost_vring_file * file)1334 static int vhost_user_set_vring_kick(struct vhost_dev *dev,
1335 struct vhost_vring_file *file)
1336 {
1337 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
1338 }
1339
vhost_user_set_vring_call(struct vhost_dev * dev,struct vhost_vring_file * file)1340 static int vhost_user_set_vring_call(struct vhost_dev *dev,
1341 struct vhost_vring_file *file)
1342 {
1343 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
1344 }
1345
vhost_user_set_vring_err(struct vhost_dev * dev,struct vhost_vring_file * file)1346 static int vhost_user_set_vring_err(struct vhost_dev *dev,
1347 struct vhost_vring_file *file)
1348 {
1349 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_ERR, file);
1350 }
1351
vhost_user_set_vring_addr(struct vhost_dev * dev,struct vhost_vring_addr * addr)1352 static int vhost_user_set_vring_addr(struct vhost_dev *dev,
1353 struct vhost_vring_addr *addr)
1354 {
1355 VhostUserMsg msg = {
1356 .hdr.request = VHOST_USER_SET_VRING_ADDR,
1357 .hdr.flags = VHOST_USER_VERSION,
1358 .payload.addr = *addr,
1359 .hdr.size = sizeof(msg.payload.addr),
1360 };
1361
1362 /*
1363 * wait for a reply if logging is enabled to make sure
1364 * backend is actually logging changes
1365 */
1366 bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG);
1367
1368 return vhost_user_write_sync(dev, &msg, wait_for_reply);
1369 }
1370
vhost_user_set_u64(struct vhost_dev * dev,int request,uint64_t u64,bool wait_for_reply)1371 static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
1372 bool wait_for_reply)
1373 {
1374 VhostUserMsg msg = {
1375 .hdr.request = request,
1376 .hdr.flags = VHOST_USER_VERSION,
1377 .payload.u64 = u64,
1378 .hdr.size = sizeof(msg.payload.u64),
1379 };
1380
1381 return vhost_user_write_sync(dev, &msg, wait_for_reply);
1382 }
1383
vhost_user_set_status(struct vhost_dev * dev,uint8_t status)1384 static int vhost_user_set_status(struct vhost_dev *dev, uint8_t status)
1385 {
1386 return vhost_user_set_u64(dev, VHOST_USER_SET_STATUS, status, false);
1387 }
1388
vhost_user_get_status(struct vhost_dev * dev,uint8_t * status)1389 static int vhost_user_get_status(struct vhost_dev *dev, uint8_t *status)
1390 {
1391 uint64_t value;
1392 int ret;
1393
1394 ret = vhost_user_get_u64(dev, VHOST_USER_GET_STATUS, &value);
1395 if (ret < 0) {
1396 return ret;
1397 }
1398 *status = value;
1399
1400 return 0;
1401 }
1402
vhost_user_add_status(struct vhost_dev * dev,uint8_t status)1403 static int vhost_user_add_status(struct vhost_dev *dev, uint8_t status)
1404 {
1405 uint8_t s;
1406 int ret;
1407
1408 ret = vhost_user_get_status(dev, &s);
1409 if (ret < 0) {
1410 return ret;
1411 }
1412
1413 if ((s & status) == status) {
1414 return 0;
1415 }
1416 s |= status;
1417
1418 return vhost_user_set_status(dev, s);
1419 }
1420
vhost_user_set_features(struct vhost_dev * dev,uint64_t features)1421 static int vhost_user_set_features(struct vhost_dev *dev,
1422 uint64_t features)
1423 {
1424 /*
1425 * wait for a reply if logging is enabled to make sure
1426 * backend is actually logging changes
1427 */
1428 bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL);
1429 int ret;
1430
1431 /*
1432 * We need to include any extra backend only feature bits that
1433 * might be needed by our device. Currently this includes the
1434 * VHOST_USER_F_PROTOCOL_FEATURES bit for enabling protocol
1435 * features.
1436 */
1437 ret = vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES,
1438 features | dev->backend_features,
1439 log_enabled);
1440
1441 if (virtio_has_feature(dev->protocol_features,
1442 VHOST_USER_PROTOCOL_F_STATUS)) {
1443 if (!ret) {
1444 return vhost_user_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
1445 }
1446 }
1447
1448 return ret;
1449 }
1450
vhost_user_set_protocol_features(struct vhost_dev * dev,uint64_t features)1451 static int vhost_user_set_protocol_features(struct vhost_dev *dev,
1452 uint64_t features)
1453 {
1454 return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features,
1455 false);
1456 }
1457
vhost_user_set_owner(struct vhost_dev * dev)1458 static int vhost_user_set_owner(struct vhost_dev *dev)
1459 {
1460 VhostUserMsg msg = {
1461 .hdr.request = VHOST_USER_SET_OWNER,
1462 .hdr.flags = VHOST_USER_VERSION,
1463 };
1464
1465 return vhost_user_write(dev, &msg, NULL, 0);
1466 }
1467
vhost_user_get_max_memslots(struct vhost_dev * dev,uint64_t * max_memslots)1468 static int vhost_user_get_max_memslots(struct vhost_dev *dev,
1469 uint64_t *max_memslots)
1470 {
1471 uint64_t backend_max_memslots;
1472 int err;
1473
1474 err = vhost_user_get_u64(dev, VHOST_USER_GET_MAX_MEM_SLOTS,
1475 &backend_max_memslots);
1476 if (err < 0) {
1477 return err;
1478 }
1479
1480 *max_memslots = backend_max_memslots;
1481
1482 return 0;
1483 }
1484
vhost_user_reset_device(struct vhost_dev * dev)1485 static int vhost_user_reset_device(struct vhost_dev *dev)
1486 {
1487 VhostUserMsg msg = {
1488 .hdr.flags = VHOST_USER_VERSION,
1489 .hdr.request = VHOST_USER_RESET_DEVICE,
1490 };
1491
1492 /*
1493 * Historically, reset was not implemented so only reset devices
1494 * that are expecting it.
1495 */
1496 if (!virtio_has_feature(dev->protocol_features,
1497 VHOST_USER_PROTOCOL_F_RESET_DEVICE)) {
1498 return -ENOSYS;
1499 }
1500
1501 return vhost_user_write(dev, &msg, NULL, 0);
1502 }
1503
vhost_user_backend_handle_config_change(struct vhost_dev * dev)1504 static int vhost_user_backend_handle_config_change(struct vhost_dev *dev)
1505 {
1506 if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
1507 return -ENOSYS;
1508 }
1509
1510 return dev->config_ops->vhost_dev_config_notifier(dev);
1511 }
1512
1513 /*
1514 * Fetch or create the notifier for a given idx. Newly created
1515 * notifiers are added to the pointer array that tracks them.
1516 */
fetch_or_create_notifier(VhostUserState * u,int idx)1517 static VhostUserHostNotifier *fetch_or_create_notifier(VhostUserState *u,
1518 int idx)
1519 {
1520 VhostUserHostNotifier *n = NULL;
1521 if (idx >= u->notifiers->len) {
1522 g_ptr_array_set_size(u->notifiers, idx + 1);
1523 }
1524
1525 n = g_ptr_array_index(u->notifiers, idx);
1526 if (!n) {
1527 /*
1528 * In case notification arrive out-of-order,
1529 * make room for current index.
1530 */
1531 g_ptr_array_remove_index(u->notifiers, idx);
1532 n = g_new0(VhostUserHostNotifier, 1);
1533 n->idx = idx;
1534 g_ptr_array_insert(u->notifiers, idx, n);
1535 trace_vhost_user_create_notifier(idx, n);
1536 }
1537
1538 return n;
1539 }
1540
vhost_user_backend_handle_vring_host_notifier(struct vhost_dev * dev,VhostUserVringArea * area,int fd)1541 static int vhost_user_backend_handle_vring_host_notifier(struct vhost_dev *dev,
1542 VhostUserVringArea *area,
1543 int fd)
1544 {
1545 int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
1546 size_t page_size = qemu_real_host_page_size();
1547 struct vhost_user *u = dev->opaque;
1548 VhostUserState *user = u->user;
1549 VirtIODevice *vdev = dev->vdev;
1550 VhostUserHostNotifier *n;
1551 void *addr;
1552 char *name;
1553
1554 if (!virtio_has_feature(dev->protocol_features,
1555 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
1556 vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
1557 return -EINVAL;
1558 }
1559
1560 /*
1561 * Fetch notifier and invalidate any old data before setting up
1562 * new mapped address.
1563 */
1564 n = fetch_or_create_notifier(user, queue_idx);
1565 vhost_user_host_notifier_remove(n, vdev);
1566
1567 if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
1568 return 0;
1569 }
1570
1571 /* Sanity check. */
1572 if (area->size != page_size) {
1573 return -EINVAL;
1574 }
1575
1576 addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1577 fd, area->offset);
1578 if (addr == MAP_FAILED) {
1579 return -EFAULT;
1580 }
1581
1582 name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
1583 user, queue_idx);
1584 if (!n->mr.ram) { /* Don't init again after suspend. */
1585 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
1586 page_size, addr);
1587 } else {
1588 n->mr.ram_block->host = addr;
1589 }
1590 g_free(name);
1591
1592 if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
1593 object_unparent(OBJECT(&n->mr));
1594 munmap(addr, page_size);
1595 return -ENXIO;
1596 }
1597
1598 n->addr = addr;
1599
1600 return 0;
1601 }
1602
1603 static int
vhost_user_backend_handle_shared_object_add(struct vhost_dev * dev,VhostUserShared * object)1604 vhost_user_backend_handle_shared_object_add(struct vhost_dev *dev,
1605 VhostUserShared *object)
1606 {
1607 QemuUUID uuid;
1608
1609 memcpy(uuid.data, object->uuid, sizeof(object->uuid));
1610 return virtio_add_vhost_device(&uuid, dev);
1611 }
1612
1613 static int
vhost_user_backend_handle_shared_object_remove(VhostUserShared * object)1614 vhost_user_backend_handle_shared_object_remove(VhostUserShared *object)
1615 {
1616 QemuUUID uuid;
1617
1618 memcpy(uuid.data, object->uuid, sizeof(object->uuid));
1619 return virtio_remove_resource(&uuid);
1620 }
1621
vhost_user_send_resp(QIOChannel * ioc,VhostUserHeader * hdr,VhostUserPayload * payload,Error ** errp)1622 static bool vhost_user_send_resp(QIOChannel *ioc, VhostUserHeader *hdr,
1623 VhostUserPayload *payload, Error **errp)
1624 {
1625 struct iovec iov[] = {
1626 { .iov_base = hdr, .iov_len = VHOST_USER_HDR_SIZE },
1627 { .iov_base = payload, .iov_len = hdr->size },
1628 };
1629
1630 hdr->flags &= ~VHOST_USER_NEED_REPLY_MASK;
1631 hdr->flags |= VHOST_USER_REPLY_MASK;
1632
1633 return !qio_channel_writev_all(ioc, iov, ARRAY_SIZE(iov), errp);
1634 }
1635
1636 static bool
vhost_user_backend_send_dmabuf_fd(QIOChannel * ioc,VhostUserHeader * hdr,VhostUserPayload * payload,Error ** errp)1637 vhost_user_backend_send_dmabuf_fd(QIOChannel *ioc, VhostUserHeader *hdr,
1638 VhostUserPayload *payload, Error **errp)
1639 {
1640 hdr->size = sizeof(payload->u64);
1641 return vhost_user_send_resp(ioc, hdr, payload, errp);
1642 }
1643
vhost_user_get_shared_object(struct vhost_dev * dev,unsigned char * uuid,int * dmabuf_fd)1644 int vhost_user_get_shared_object(struct vhost_dev *dev, unsigned char *uuid,
1645 int *dmabuf_fd)
1646 {
1647 struct vhost_user *u = dev->opaque;
1648 CharBackend *chr = u->user->chr;
1649 int ret;
1650 VhostUserMsg msg = {
1651 .hdr.request = VHOST_USER_GET_SHARED_OBJECT,
1652 .hdr.flags = VHOST_USER_VERSION,
1653 };
1654 memcpy(msg.payload.object.uuid, uuid, sizeof(msg.payload.object.uuid));
1655
1656 ret = vhost_user_write(dev, &msg, NULL, 0);
1657 if (ret < 0) {
1658 return ret;
1659 }
1660
1661 ret = vhost_user_read(dev, &msg);
1662 if (ret < 0) {
1663 return ret;
1664 }
1665
1666 if (msg.hdr.request != VHOST_USER_GET_SHARED_OBJECT) {
1667 error_report("Received unexpected msg type. "
1668 "Expected %d received %d",
1669 VHOST_USER_GET_SHARED_OBJECT, msg.hdr.request);
1670 return -EPROTO;
1671 }
1672
1673 *dmabuf_fd = qemu_chr_fe_get_msgfd(chr);
1674 if (*dmabuf_fd < 0) {
1675 error_report("Failed to get dmabuf fd");
1676 return -EIO;
1677 }
1678
1679 return 0;
1680 }
1681
1682 static int
vhost_user_backend_handle_shared_object_lookup(struct vhost_user * u,QIOChannel * ioc,VhostUserHeader * hdr,VhostUserPayload * payload)1683 vhost_user_backend_handle_shared_object_lookup(struct vhost_user *u,
1684 QIOChannel *ioc,
1685 VhostUserHeader *hdr,
1686 VhostUserPayload *payload)
1687 {
1688 QemuUUID uuid;
1689 CharBackend *chr = u->user->chr;
1690 Error *local_err = NULL;
1691 int dmabuf_fd = -1;
1692 int fd_num = 0;
1693
1694 memcpy(uuid.data, payload->object.uuid, sizeof(payload->object.uuid));
1695
1696 payload->u64 = 0;
1697 switch (virtio_object_type(&uuid)) {
1698 case TYPE_DMABUF:
1699 dmabuf_fd = virtio_lookup_dmabuf(&uuid);
1700 break;
1701 case TYPE_VHOST_DEV:
1702 {
1703 struct vhost_dev *dev = virtio_lookup_vhost_device(&uuid);
1704 if (dev == NULL) {
1705 payload->u64 = -EINVAL;
1706 break;
1707 }
1708 int ret = vhost_user_get_shared_object(dev, uuid.data, &dmabuf_fd);
1709 if (ret < 0) {
1710 payload->u64 = ret;
1711 }
1712 break;
1713 }
1714 case TYPE_INVALID:
1715 payload->u64 = -EINVAL;
1716 break;
1717 }
1718
1719 if (dmabuf_fd != -1) {
1720 fd_num++;
1721 }
1722
1723 if (qemu_chr_fe_set_msgfds(chr, &dmabuf_fd, fd_num) < 0) {
1724 error_report("Failed to set msg fds.");
1725 payload->u64 = -EINVAL;
1726 }
1727
1728 if (!vhost_user_backend_send_dmabuf_fd(ioc, hdr, payload, &local_err)) {
1729 error_report_err(local_err);
1730 return -EINVAL;
1731 }
1732
1733 return 0;
1734 }
1735
close_backend_channel(struct vhost_user * u)1736 static void close_backend_channel(struct vhost_user *u)
1737 {
1738 g_source_destroy(u->backend_src);
1739 g_source_unref(u->backend_src);
1740 u->backend_src = NULL;
1741 object_unref(OBJECT(u->backend_ioc));
1742 u->backend_ioc = NULL;
1743 }
1744
backend_read(QIOChannel * ioc,GIOCondition condition,gpointer opaque)1745 static gboolean backend_read(QIOChannel *ioc, GIOCondition condition,
1746 gpointer opaque)
1747 {
1748 struct vhost_dev *dev = opaque;
1749 struct vhost_user *u = dev->opaque;
1750 VhostUserHeader hdr = { 0, };
1751 VhostUserPayload payload = { 0, };
1752 Error *local_err = NULL;
1753 gboolean rc = G_SOURCE_CONTINUE;
1754 int ret = 0;
1755 struct iovec iov;
1756 g_autofree int *fd = NULL;
1757 size_t fdsize = 0;
1758 int i;
1759
1760 /* Read header */
1761 iov.iov_base = &hdr;
1762 iov.iov_len = VHOST_USER_HDR_SIZE;
1763
1764 if (qio_channel_readv_full_all(ioc, &iov, 1, &fd, &fdsize, &local_err)) {
1765 error_report_err(local_err);
1766 goto err;
1767 }
1768
1769 if (hdr.size > VHOST_USER_PAYLOAD_SIZE) {
1770 error_report("Failed to read msg header."
1771 " Size %d exceeds the maximum %zu.", hdr.size,
1772 VHOST_USER_PAYLOAD_SIZE);
1773 goto err;
1774 }
1775
1776 /* Read payload */
1777 if (qio_channel_read_all(ioc, (char *) &payload, hdr.size, &local_err)) {
1778 error_report_err(local_err);
1779 goto err;
1780 }
1781
1782 switch (hdr.request) {
1783 case VHOST_USER_BACKEND_IOTLB_MSG:
1784 ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
1785 break;
1786 case VHOST_USER_BACKEND_CONFIG_CHANGE_MSG:
1787 ret = vhost_user_backend_handle_config_change(dev);
1788 break;
1789 case VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG:
1790 ret = vhost_user_backend_handle_vring_host_notifier(dev, &payload.area,
1791 fd ? fd[0] : -1);
1792 break;
1793 case VHOST_USER_BACKEND_SHARED_OBJECT_ADD:
1794 ret = vhost_user_backend_handle_shared_object_add(dev, &payload.object);
1795 break;
1796 case VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE:
1797 ret = vhost_user_backend_handle_shared_object_remove(&payload.object);
1798 break;
1799 case VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP:
1800 ret = vhost_user_backend_handle_shared_object_lookup(dev->opaque, ioc,
1801 &hdr, &payload);
1802 break;
1803 default:
1804 error_report("Received unexpected msg type: %d.", hdr.request);
1805 ret = -EINVAL;
1806 }
1807
1808 /*
1809 * REPLY_ACK feature handling. Other reply types has to be managed
1810 * directly in their request handlers.
1811 */
1812 if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
1813 payload.u64 = !!ret;
1814 hdr.size = sizeof(payload.u64);
1815
1816 if (!vhost_user_send_resp(ioc, &hdr, &payload, &local_err)) {
1817 error_report_err(local_err);
1818 goto err;
1819 }
1820 }
1821
1822 goto fdcleanup;
1823
1824 err:
1825 close_backend_channel(u);
1826 rc = G_SOURCE_REMOVE;
1827
1828 fdcleanup:
1829 if (fd) {
1830 for (i = 0; i < fdsize; i++) {
1831 close(fd[i]);
1832 }
1833 }
1834 return rc;
1835 }
1836
vhost_setup_backend_channel(struct vhost_dev * dev)1837 static int vhost_setup_backend_channel(struct vhost_dev *dev)
1838 {
1839 VhostUserMsg msg = {
1840 .hdr.request = VHOST_USER_SET_BACKEND_REQ_FD,
1841 .hdr.flags = VHOST_USER_VERSION,
1842 };
1843 struct vhost_user *u = dev->opaque;
1844 int sv[2], ret = 0;
1845 bool reply_supported = virtio_has_feature(dev->protocol_features,
1846 VHOST_USER_PROTOCOL_F_REPLY_ACK);
1847 Error *local_err = NULL;
1848 QIOChannel *ioc;
1849
1850 if (!virtio_has_feature(dev->protocol_features,
1851 VHOST_USER_PROTOCOL_F_BACKEND_REQ)) {
1852 return 0;
1853 }
1854
1855 if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
1856 int saved_errno = errno;
1857 error_report("socketpair() failed");
1858 return -saved_errno;
1859 }
1860
1861 ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err));
1862 if (!ioc) {
1863 error_report_err(local_err);
1864 return -ECONNREFUSED;
1865 }
1866 u->backend_ioc = ioc;
1867 u->backend_src = qio_channel_add_watch_source(u->backend_ioc,
1868 G_IO_IN | G_IO_HUP,
1869 backend_read, dev, NULL, NULL);
1870
1871 if (reply_supported) {
1872 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1873 }
1874
1875 ret = vhost_user_write(dev, &msg, &sv[1], 1);
1876 if (ret) {
1877 goto out;
1878 }
1879
1880 if (reply_supported) {
1881 ret = process_message_reply(dev, &msg);
1882 }
1883
1884 out:
1885 close(sv[1]);
1886 if (ret) {
1887 close_backend_channel(u);
1888 }
1889
1890 return ret;
1891 }
1892
1893 #ifdef CONFIG_LINUX
1894 /*
1895 * Called back from the postcopy fault thread when a fault is received on our
1896 * ufd.
1897 * TODO: This is Linux specific
1898 */
vhost_user_postcopy_fault_handler(struct PostCopyFD * pcfd,void * ufd)1899 static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd,
1900 void *ufd)
1901 {
1902 struct vhost_dev *dev = pcfd->data;
1903 struct vhost_user *u = dev->opaque;
1904 struct uffd_msg *msg = ufd;
1905 uint64_t faultaddr = msg->arg.pagefault.address;
1906 RAMBlock *rb = NULL;
1907 uint64_t rb_offset;
1908 int i;
1909
1910 trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr,
1911 dev->mem->nregions);
1912 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1913 trace_vhost_user_postcopy_fault_handler_loop(i,
1914 u->postcopy_client_bases[i], dev->mem->regions[i].memory_size);
1915 if (faultaddr >= u->postcopy_client_bases[i]) {
1916 /* Ofset of the fault address in the vhost region */
1917 uint64_t region_offset = faultaddr - u->postcopy_client_bases[i];
1918 if (region_offset < dev->mem->regions[i].memory_size) {
1919 rb_offset = region_offset + u->region_rb_offset[i];
1920 trace_vhost_user_postcopy_fault_handler_found(i,
1921 region_offset, rb_offset);
1922 rb = u->region_rb[i];
1923 return postcopy_request_shared_page(pcfd, rb, faultaddr,
1924 rb_offset);
1925 }
1926 }
1927 }
1928 error_report("%s: Failed to find region for fault %" PRIx64,
1929 __func__, faultaddr);
1930 return -1;
1931 }
1932
vhost_user_postcopy_waker(struct PostCopyFD * pcfd,RAMBlock * rb,uint64_t offset)1933 static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb,
1934 uint64_t offset)
1935 {
1936 struct vhost_dev *dev = pcfd->data;
1937 struct vhost_user *u = dev->opaque;
1938 int i;
1939
1940 trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset);
1941
1942 if (!u) {
1943 return 0;
1944 }
1945 /* Translate the offset into an address in the clients address space */
1946 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1947 if (u->region_rb[i] == rb &&
1948 offset >= u->region_rb_offset[i] &&
1949 offset < (u->region_rb_offset[i] +
1950 dev->mem->regions[i].memory_size)) {
1951 uint64_t client_addr = (offset - u->region_rb_offset[i]) +
1952 u->postcopy_client_bases[i];
1953 trace_vhost_user_postcopy_waker_found(client_addr);
1954 return postcopy_wake_shared(pcfd, client_addr, rb);
1955 }
1956 }
1957
1958 trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset);
1959 return 0;
1960 }
1961 #endif
1962
1963 /*
1964 * Called at the start of an inbound postcopy on reception of the
1965 * 'advise' command.
1966 */
vhost_user_postcopy_advise(struct vhost_dev * dev,Error ** errp)1967 static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
1968 {
1969 #ifdef CONFIG_LINUX
1970 struct vhost_user *u = dev->opaque;
1971 CharBackend *chr = u->user->chr;
1972 int ufd;
1973 int ret;
1974 VhostUserMsg msg = {
1975 .hdr.request = VHOST_USER_POSTCOPY_ADVISE,
1976 .hdr.flags = VHOST_USER_VERSION,
1977 };
1978
1979 ret = vhost_user_write(dev, &msg, NULL, 0);
1980 if (ret < 0) {
1981 error_setg(errp, "Failed to send postcopy_advise to vhost");
1982 return ret;
1983 }
1984
1985 ret = vhost_user_read(dev, &msg);
1986 if (ret < 0) {
1987 error_setg(errp, "Failed to get postcopy_advise reply from vhost");
1988 return ret;
1989 }
1990
1991 if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
1992 error_setg(errp, "Unexpected msg type. Expected %d received %d",
1993 VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
1994 return -EPROTO;
1995 }
1996
1997 if (msg.hdr.size) {
1998 error_setg(errp, "Received bad msg size.");
1999 return -EPROTO;
2000 }
2001 ufd = qemu_chr_fe_get_msgfd(chr);
2002 if (ufd < 0) {
2003 error_setg(errp, "%s: Failed to get ufd", __func__);
2004 return -EIO;
2005 }
2006 qemu_socket_set_nonblock(ufd);
2007
2008 /* register ufd with userfault thread */
2009 u->postcopy_fd.fd = ufd;
2010 u->postcopy_fd.data = dev;
2011 u->postcopy_fd.handler = vhost_user_postcopy_fault_handler;
2012 u->postcopy_fd.waker = vhost_user_postcopy_waker;
2013 u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */
2014 postcopy_register_shared_ufd(&u->postcopy_fd);
2015 return 0;
2016 #else
2017 error_setg(errp, "Postcopy not supported on non-Linux systems");
2018 return -ENOSYS;
2019 #endif
2020 }
2021
2022 /*
2023 * Called at the switch to postcopy on reception of the 'listen' command.
2024 */
vhost_user_postcopy_listen(struct vhost_dev * dev,Error ** errp)2025 static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
2026 {
2027 struct vhost_user *u = dev->opaque;
2028 int ret;
2029 VhostUserMsg msg = {
2030 .hdr.request = VHOST_USER_POSTCOPY_LISTEN,
2031 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
2032 };
2033 u->postcopy_listen = true;
2034
2035 trace_vhost_user_postcopy_listen();
2036
2037 ret = vhost_user_write(dev, &msg, NULL, 0);
2038 if (ret < 0) {
2039 error_setg(errp, "Failed to send postcopy_listen to vhost");
2040 return ret;
2041 }
2042
2043 ret = process_message_reply(dev, &msg);
2044 if (ret) {
2045 error_setg(errp, "Failed to receive reply to postcopy_listen");
2046 return ret;
2047 }
2048
2049 return 0;
2050 }
2051
2052 /*
2053 * Called at the end of postcopy
2054 */
vhost_user_postcopy_end(struct vhost_dev * dev,Error ** errp)2055 static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
2056 {
2057 VhostUserMsg msg = {
2058 .hdr.request = VHOST_USER_POSTCOPY_END,
2059 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
2060 };
2061 int ret;
2062 struct vhost_user *u = dev->opaque;
2063
2064 trace_vhost_user_postcopy_end_entry();
2065
2066 ret = vhost_user_write(dev, &msg, NULL, 0);
2067 if (ret < 0) {
2068 error_setg(errp, "Failed to send postcopy_end to vhost");
2069 return ret;
2070 }
2071
2072 ret = process_message_reply(dev, &msg);
2073 if (ret) {
2074 error_setg(errp, "Failed to receive reply to postcopy_end");
2075 return ret;
2076 }
2077 postcopy_unregister_shared_ufd(&u->postcopy_fd);
2078 close(u->postcopy_fd.fd);
2079 u->postcopy_fd.handler = NULL;
2080
2081 trace_vhost_user_postcopy_end_exit();
2082
2083 return 0;
2084 }
2085
vhost_user_postcopy_notifier(NotifierWithReturn * notifier,void * opaque)2086 static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
2087 void *opaque)
2088 {
2089 struct PostcopyNotifyData *pnd = opaque;
2090 struct vhost_user *u = container_of(notifier, struct vhost_user,
2091 postcopy_notifier);
2092 struct vhost_dev *dev = u->dev;
2093
2094 switch (pnd->reason) {
2095 case POSTCOPY_NOTIFY_PROBE:
2096 if (!virtio_has_feature(dev->protocol_features,
2097 VHOST_USER_PROTOCOL_F_PAGEFAULT)) {
2098 /* TODO: Get the device name into this error somehow */
2099 error_setg(pnd->errp,
2100 "vhost-user backend not capable of postcopy");
2101 return -ENOENT;
2102 }
2103 break;
2104
2105 case POSTCOPY_NOTIFY_INBOUND_ADVISE:
2106 return vhost_user_postcopy_advise(dev, pnd->errp);
2107
2108 case POSTCOPY_NOTIFY_INBOUND_LISTEN:
2109 return vhost_user_postcopy_listen(dev, pnd->errp);
2110
2111 case POSTCOPY_NOTIFY_INBOUND_END:
2112 return vhost_user_postcopy_end(dev, pnd->errp);
2113
2114 default:
2115 /* We ignore notifications we don't know */
2116 break;
2117 }
2118
2119 return 0;
2120 }
2121
vhost_user_backend_init(struct vhost_dev * dev,void * opaque,Error ** errp)2122 static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque,
2123 Error **errp)
2124 {
2125 uint64_t features, ram_slots;
2126 struct vhost_user *u;
2127 VhostUserState *vus = (VhostUserState *) opaque;
2128 int err;
2129
2130 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2131
2132 u = g_new0(struct vhost_user, 1);
2133 u->user = vus;
2134 u->dev = dev;
2135 dev->opaque = u;
2136
2137 err = vhost_user_get_features(dev, &features);
2138 if (err < 0) {
2139 error_setg_errno(errp, -err, "vhost_backend_init failed");
2140 return err;
2141 }
2142
2143 if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
2144 bool supports_f_config = vus->supports_config ||
2145 (dev->config_ops && dev->config_ops->vhost_dev_config_notifier);
2146 uint64_t protocol_features;
2147
2148 dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
2149
2150 err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
2151 &protocol_features);
2152 if (err < 0) {
2153 error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2154 return -EPROTO;
2155 }
2156
2157 /*
2158 * We will use all the protocol features we support - although
2159 * we suppress F_CONFIG if we know QEMUs internal code can not support
2160 * it.
2161 */
2162 protocol_features &= VHOST_USER_PROTOCOL_FEATURE_MASK;
2163
2164 if (supports_f_config) {
2165 if (!virtio_has_feature(protocol_features,
2166 VHOST_USER_PROTOCOL_F_CONFIG)) {
2167 error_setg(errp, "vhost-user device expecting "
2168 "VHOST_USER_PROTOCOL_F_CONFIG but the vhost-user backend does "
2169 "not support it.");
2170 return -EPROTO;
2171 }
2172 } else {
2173 if (virtio_has_feature(protocol_features,
2174 VHOST_USER_PROTOCOL_F_CONFIG)) {
2175 warn_report("vhost-user backend supports "
2176 "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not.");
2177 protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
2178 }
2179 }
2180
2181 /* final set of protocol features */
2182 dev->protocol_features = protocol_features;
2183 err = vhost_user_set_protocol_features(dev, dev->protocol_features);
2184 if (err < 0) {
2185 error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2186 return -EPROTO;
2187 }
2188
2189 /* query the max queues we support if backend supports Multiple Queue */
2190 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
2191 err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
2192 &dev->max_queues);
2193 if (err < 0) {
2194 error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2195 return -EPROTO;
2196 }
2197 } else {
2198 dev->max_queues = 1;
2199 }
2200
2201 if (dev->num_queues && dev->max_queues < dev->num_queues) {
2202 error_setg(errp, "The maximum number of queues supported by the "
2203 "backend is %" PRIu64, dev->max_queues);
2204 return -EINVAL;
2205 }
2206
2207 if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
2208 !(virtio_has_feature(dev->protocol_features,
2209 VHOST_USER_PROTOCOL_F_BACKEND_REQ) &&
2210 virtio_has_feature(dev->protocol_features,
2211 VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
2212 error_setg(errp, "IOMMU support requires reply-ack and "
2213 "backend-req protocol features.");
2214 return -EINVAL;
2215 }
2216
2217 /* get max memory regions if backend supports configurable RAM slots */
2218 if (!virtio_has_feature(dev->protocol_features,
2219 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS)) {
2220 u->user->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS;
2221 } else {
2222 err = vhost_user_get_max_memslots(dev, &ram_slots);
2223 if (err < 0) {
2224 error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2225 return -EPROTO;
2226 }
2227
2228 if (ram_slots < u->user->memory_slots) {
2229 error_setg(errp, "The backend specified a max ram slots limit "
2230 "of %" PRIu64", when the prior validated limit was "
2231 "%d. This limit should never decrease.", ram_slots,
2232 u->user->memory_slots);
2233 return -EINVAL;
2234 }
2235
2236 u->user->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS);
2237 }
2238 }
2239
2240 if (dev->migration_blocker == NULL &&
2241 !virtio_has_feature(dev->protocol_features,
2242 VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
2243 error_setg(&dev->migration_blocker,
2244 "Migration disabled: vhost-user backend lacks "
2245 "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
2246 }
2247
2248 if (dev->vq_index == 0) {
2249 err = vhost_setup_backend_channel(dev);
2250 if (err < 0) {
2251 error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
2252 return -EPROTO;
2253 }
2254 }
2255
2256 u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
2257 postcopy_add_notifier(&u->postcopy_notifier);
2258
2259 return 0;
2260 }
2261
vhost_user_backend_cleanup(struct vhost_dev * dev)2262 static int vhost_user_backend_cleanup(struct vhost_dev *dev)
2263 {
2264 struct vhost_user *u;
2265
2266 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2267
2268 u = dev->opaque;
2269 if (u->postcopy_notifier.notify) {
2270 postcopy_remove_notifier(&u->postcopy_notifier);
2271 u->postcopy_notifier.notify = NULL;
2272 }
2273 u->postcopy_listen = false;
2274 if (u->postcopy_fd.handler) {
2275 postcopy_unregister_shared_ufd(&u->postcopy_fd);
2276 close(u->postcopy_fd.fd);
2277 u->postcopy_fd.handler = NULL;
2278 }
2279 if (u->backend_ioc) {
2280 close_backend_channel(u);
2281 }
2282 g_free(u->region_rb);
2283 u->region_rb = NULL;
2284 g_free(u->region_rb_offset);
2285 u->region_rb_offset = NULL;
2286 u->region_rb_len = 0;
2287 g_free(u);
2288 dev->opaque = 0;
2289
2290 return 0;
2291 }
2292
vhost_user_get_vq_index(struct vhost_dev * dev,int idx)2293 static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
2294 {
2295 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
2296
2297 return idx;
2298 }
2299
vhost_user_memslots_limit(struct vhost_dev * dev)2300 static int vhost_user_memslots_limit(struct vhost_dev *dev)
2301 {
2302 struct vhost_user *u = dev->opaque;
2303
2304 return u->user->memory_slots;
2305 }
2306
vhost_user_requires_shm_log(struct vhost_dev * dev)2307 static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
2308 {
2309 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2310
2311 return virtio_has_feature(dev->protocol_features,
2312 VHOST_USER_PROTOCOL_F_LOG_SHMFD);
2313 }
2314
vhost_user_migration_done(struct vhost_dev * dev,char * mac_addr)2315 static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
2316 {
2317 VhostUserMsg msg = { };
2318
2319 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2320
2321 /* If guest supports GUEST_ANNOUNCE do nothing */
2322 if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
2323 return 0;
2324 }
2325
2326 /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
2327 if (virtio_has_feature(dev->protocol_features,
2328 VHOST_USER_PROTOCOL_F_RARP)) {
2329 msg.hdr.request = VHOST_USER_SEND_RARP;
2330 msg.hdr.flags = VHOST_USER_VERSION;
2331 memcpy((char *)&msg.payload.u64, mac_addr, 6);
2332 msg.hdr.size = sizeof(msg.payload.u64);
2333
2334 return vhost_user_write(dev, &msg, NULL, 0);
2335 }
2336 return -ENOTSUP;
2337 }
2338
vhost_user_net_set_mtu(struct vhost_dev * dev,uint16_t mtu)2339 static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
2340 {
2341 VhostUserMsg msg;
2342 bool reply_supported = virtio_has_feature(dev->protocol_features,
2343 VHOST_USER_PROTOCOL_F_REPLY_ACK);
2344 int ret;
2345
2346 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
2347 return 0;
2348 }
2349
2350 msg.hdr.request = VHOST_USER_NET_SET_MTU;
2351 msg.payload.u64 = mtu;
2352 msg.hdr.size = sizeof(msg.payload.u64);
2353 msg.hdr.flags = VHOST_USER_VERSION;
2354 if (reply_supported) {
2355 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
2356 }
2357
2358 ret = vhost_user_write(dev, &msg, NULL, 0);
2359 if (ret < 0) {
2360 return ret;
2361 }
2362
2363 /* If reply_ack supported, backend has to ack specified MTU is valid */
2364 if (reply_supported) {
2365 return process_message_reply(dev, &msg);
2366 }
2367
2368 return 0;
2369 }
2370
vhost_user_send_device_iotlb_msg(struct vhost_dev * dev,struct vhost_iotlb_msg * imsg)2371 static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
2372 struct vhost_iotlb_msg *imsg)
2373 {
2374 int ret;
2375 VhostUserMsg msg = {
2376 .hdr.request = VHOST_USER_IOTLB_MSG,
2377 .hdr.size = sizeof(msg.payload.iotlb),
2378 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
2379 .payload.iotlb = *imsg,
2380 };
2381
2382 ret = vhost_user_write(dev, &msg, NULL, 0);
2383 if (ret < 0) {
2384 return ret;
2385 }
2386
2387 return process_message_reply(dev, &msg);
2388 }
2389
2390
vhost_user_set_iotlb_callback(struct vhost_dev * dev,int enabled)2391 static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
2392 {
2393 /* No-op as the receive channel is not dedicated to IOTLB messages. */
2394 }
2395
vhost_user_get_config(struct vhost_dev * dev,uint8_t * config,uint32_t config_len,Error ** errp)2396 static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
2397 uint32_t config_len, Error **errp)
2398 {
2399 int ret;
2400 VhostUserMsg msg = {
2401 .hdr.request = VHOST_USER_GET_CONFIG,
2402 .hdr.flags = VHOST_USER_VERSION,
2403 .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
2404 };
2405
2406 if (!virtio_has_feature(dev->protocol_features,
2407 VHOST_USER_PROTOCOL_F_CONFIG)) {
2408 error_setg(errp, "VHOST_USER_PROTOCOL_F_CONFIG not supported");
2409 return -EINVAL;
2410 }
2411
2412 assert(config_len <= VHOST_USER_MAX_CONFIG_SIZE);
2413
2414 msg.payload.config.offset = 0;
2415 msg.payload.config.size = config_len;
2416 ret = vhost_user_write(dev, &msg, NULL, 0);
2417 if (ret < 0) {
2418 error_setg_errno(errp, -ret, "vhost_get_config failed");
2419 return ret;
2420 }
2421
2422 ret = vhost_user_read(dev, &msg);
2423 if (ret < 0) {
2424 error_setg_errno(errp, -ret, "vhost_get_config failed");
2425 return ret;
2426 }
2427
2428 if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
2429 error_setg(errp,
2430 "Received unexpected msg type. Expected %d received %d",
2431 VHOST_USER_GET_CONFIG, msg.hdr.request);
2432 return -EPROTO;
2433 }
2434
2435 if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
2436 error_setg(errp, "Received bad msg size.");
2437 return -EPROTO;
2438 }
2439
2440 memcpy(config, msg.payload.config.region, config_len);
2441
2442 return 0;
2443 }
2444
vhost_user_set_config(struct vhost_dev * dev,const uint8_t * data,uint32_t offset,uint32_t size,uint32_t flags)2445 static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
2446 uint32_t offset, uint32_t size, uint32_t flags)
2447 {
2448 int ret;
2449 uint8_t *p;
2450 bool reply_supported = virtio_has_feature(dev->protocol_features,
2451 VHOST_USER_PROTOCOL_F_REPLY_ACK);
2452
2453 VhostUserMsg msg = {
2454 .hdr.request = VHOST_USER_SET_CONFIG,
2455 .hdr.flags = VHOST_USER_VERSION,
2456 .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size,
2457 };
2458
2459 if (!virtio_has_feature(dev->protocol_features,
2460 VHOST_USER_PROTOCOL_F_CONFIG)) {
2461 return -ENOTSUP;
2462 }
2463
2464 if (reply_supported) {
2465 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
2466 }
2467
2468 if (size > VHOST_USER_MAX_CONFIG_SIZE) {
2469 return -EINVAL;
2470 }
2471
2472 msg.payload.config.offset = offset,
2473 msg.payload.config.size = size,
2474 msg.payload.config.flags = flags,
2475 p = msg.payload.config.region;
2476 memcpy(p, data, size);
2477
2478 ret = vhost_user_write(dev, &msg, NULL, 0);
2479 if (ret < 0) {
2480 return ret;
2481 }
2482
2483 if (reply_supported) {
2484 return process_message_reply(dev, &msg);
2485 }
2486
2487 return 0;
2488 }
2489
vhost_user_crypto_create_session(struct vhost_dev * dev,void * session_info,uint64_t * session_id)2490 static int vhost_user_crypto_create_session(struct vhost_dev *dev,
2491 void *session_info,
2492 uint64_t *session_id)
2493 {
2494 int ret;
2495 bool crypto_session = virtio_has_feature(dev->protocol_features,
2496 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
2497 CryptoDevBackendSessionInfo *backend_info = session_info;
2498 VhostUserMsg msg = {
2499 .hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION,
2500 .hdr.flags = VHOST_USER_VERSION,
2501 .hdr.size = sizeof(msg.payload.session),
2502 };
2503
2504 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2505
2506 if (!crypto_session) {
2507 error_report("vhost-user trying to send unhandled ioctl");
2508 return -ENOTSUP;
2509 }
2510
2511 if (backend_info->op_code == VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION) {
2512 CryptoDevBackendAsymSessionInfo *sess = &backend_info->u.asym_sess_info;
2513 size_t keylen;
2514
2515 memcpy(&msg.payload.session.u.asym.session_setup_data, sess,
2516 sizeof(CryptoDevBackendAsymSessionInfo));
2517 if (sess->keylen) {
2518 keylen = sizeof(msg.payload.session.u.asym.key);
2519 if (sess->keylen > keylen) {
2520 error_report("Unsupported asymmetric key size");
2521 return -ENOTSUP;
2522 }
2523
2524 memcpy(&msg.payload.session.u.asym.key, sess->key,
2525 sess->keylen);
2526 }
2527 } else {
2528 CryptoDevBackendSymSessionInfo *sess = &backend_info->u.sym_sess_info;
2529 size_t keylen;
2530
2531 memcpy(&msg.payload.session.u.sym.session_setup_data, sess,
2532 sizeof(CryptoDevBackendSymSessionInfo));
2533 if (sess->key_len) {
2534 keylen = sizeof(msg.payload.session.u.sym.key);
2535 if (sess->key_len > keylen) {
2536 error_report("Unsupported cipher key size");
2537 return -ENOTSUP;
2538 }
2539
2540 memcpy(&msg.payload.session.u.sym.key, sess->cipher_key,
2541 sess->key_len);
2542 }
2543
2544 if (sess->auth_key_len > 0) {
2545 keylen = sizeof(msg.payload.session.u.sym.auth_key);
2546 if (sess->auth_key_len > keylen) {
2547 error_report("Unsupported auth key size");
2548 return -ENOTSUP;
2549 }
2550
2551 memcpy(&msg.payload.session.u.sym.auth_key, sess->auth_key,
2552 sess->auth_key_len);
2553 }
2554 }
2555
2556 msg.payload.session.op_code = backend_info->op_code;
2557 msg.payload.session.session_id = backend_info->session_id;
2558 ret = vhost_user_write(dev, &msg, NULL, 0);
2559 if (ret < 0) {
2560 error_report("vhost_user_write() return %d, create session failed",
2561 ret);
2562 return ret;
2563 }
2564
2565 ret = vhost_user_read(dev, &msg);
2566 if (ret < 0) {
2567 error_report("vhost_user_read() return %d, create session failed",
2568 ret);
2569 return ret;
2570 }
2571
2572 if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
2573 error_report("Received unexpected msg type. Expected %d received %d",
2574 VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
2575 return -EPROTO;
2576 }
2577
2578 if (msg.hdr.size != sizeof(msg.payload.session)) {
2579 error_report("Received bad msg size.");
2580 return -EPROTO;
2581 }
2582
2583 if (msg.payload.session.session_id < 0) {
2584 error_report("Bad session id: %" PRId64 "",
2585 msg.payload.session.session_id);
2586 return -EINVAL;
2587 }
2588 *session_id = msg.payload.session.session_id;
2589
2590 return 0;
2591 }
2592
2593 static int
vhost_user_crypto_close_session(struct vhost_dev * dev,uint64_t session_id)2594 vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
2595 {
2596 int ret;
2597 bool crypto_session = virtio_has_feature(dev->protocol_features,
2598 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
2599 VhostUserMsg msg = {
2600 .hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION,
2601 .hdr.flags = VHOST_USER_VERSION,
2602 .hdr.size = sizeof(msg.payload.u64),
2603 };
2604 msg.payload.u64 = session_id;
2605
2606 if (!crypto_session) {
2607 error_report("vhost-user trying to send unhandled ioctl");
2608 return -ENOTSUP;
2609 }
2610
2611 ret = vhost_user_write(dev, &msg, NULL, 0);
2612 if (ret < 0) {
2613 error_report("vhost_user_write() return %d, close session failed",
2614 ret);
2615 return ret;
2616 }
2617
2618 return 0;
2619 }
2620
vhost_user_no_private_memslots(struct vhost_dev * dev)2621 static bool vhost_user_no_private_memslots(struct vhost_dev *dev)
2622 {
2623 return true;
2624 }
2625
vhost_user_get_inflight_fd(struct vhost_dev * dev,uint16_t queue_size,struct vhost_inflight * inflight)2626 static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
2627 uint16_t queue_size,
2628 struct vhost_inflight *inflight)
2629 {
2630 void *addr;
2631 int fd;
2632 int ret;
2633 struct vhost_user *u = dev->opaque;
2634 CharBackend *chr = u->user->chr;
2635 VhostUserMsg msg = {
2636 .hdr.request = VHOST_USER_GET_INFLIGHT_FD,
2637 .hdr.flags = VHOST_USER_VERSION,
2638 .payload.inflight.num_queues = dev->nvqs,
2639 .payload.inflight.queue_size = queue_size,
2640 .hdr.size = sizeof(msg.payload.inflight),
2641 };
2642
2643 if (!virtio_has_feature(dev->protocol_features,
2644 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2645 return 0;
2646 }
2647
2648 ret = vhost_user_write(dev, &msg, NULL, 0);
2649 if (ret < 0) {
2650 return ret;
2651 }
2652
2653 ret = vhost_user_read(dev, &msg);
2654 if (ret < 0) {
2655 return ret;
2656 }
2657
2658 if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
2659 error_report("Received unexpected msg type. "
2660 "Expected %d received %d",
2661 VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
2662 return -EPROTO;
2663 }
2664
2665 if (msg.hdr.size != sizeof(msg.payload.inflight)) {
2666 error_report("Received bad msg size.");
2667 return -EPROTO;
2668 }
2669
2670 if (!msg.payload.inflight.mmap_size) {
2671 return 0;
2672 }
2673
2674 fd = qemu_chr_fe_get_msgfd(chr);
2675 if (fd < 0) {
2676 error_report("Failed to get mem fd");
2677 return -EIO;
2678 }
2679
2680 addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
2681 MAP_SHARED, fd, msg.payload.inflight.mmap_offset);
2682
2683 if (addr == MAP_FAILED) {
2684 error_report("Failed to mmap mem fd");
2685 close(fd);
2686 return -EFAULT;
2687 }
2688
2689 inflight->addr = addr;
2690 inflight->fd = fd;
2691 inflight->size = msg.payload.inflight.mmap_size;
2692 inflight->offset = msg.payload.inflight.mmap_offset;
2693 inflight->queue_size = queue_size;
2694
2695 return 0;
2696 }
2697
vhost_user_set_inflight_fd(struct vhost_dev * dev,struct vhost_inflight * inflight)2698 static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
2699 struct vhost_inflight *inflight)
2700 {
2701 VhostUserMsg msg = {
2702 .hdr.request = VHOST_USER_SET_INFLIGHT_FD,
2703 .hdr.flags = VHOST_USER_VERSION,
2704 .payload.inflight.mmap_size = inflight->size,
2705 .payload.inflight.mmap_offset = inflight->offset,
2706 .payload.inflight.num_queues = dev->nvqs,
2707 .payload.inflight.queue_size = inflight->queue_size,
2708 .hdr.size = sizeof(msg.payload.inflight),
2709 };
2710
2711 if (!virtio_has_feature(dev->protocol_features,
2712 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2713 return 0;
2714 }
2715
2716 return vhost_user_write(dev, &msg, &inflight->fd, 1);
2717 }
2718
vhost_user_state_destroy(gpointer data)2719 static void vhost_user_state_destroy(gpointer data)
2720 {
2721 VhostUserHostNotifier *n = (VhostUserHostNotifier *) data;
2722 if (n) {
2723 vhost_user_host_notifier_remove(n, NULL);
2724 object_unparent(OBJECT(&n->mr));
2725 /*
2726 * We can't free until vhost_user_host_notifier_remove has
2727 * done it's thing so schedule the free with RCU.
2728 */
2729 g_free_rcu(n, rcu);
2730 }
2731 }
2732
vhost_user_init(VhostUserState * user,CharBackend * chr,Error ** errp)2733 bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
2734 {
2735 if (user->chr) {
2736 error_setg(errp, "Cannot initialize vhost-user state");
2737 return false;
2738 }
2739 user->chr = chr;
2740 user->memory_slots = 0;
2741 user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4,
2742 &vhost_user_state_destroy);
2743 return true;
2744 }
2745
vhost_user_cleanup(VhostUserState * user)2746 void vhost_user_cleanup(VhostUserState *user)
2747 {
2748 if (!user->chr) {
2749 return;
2750 }
2751 memory_region_transaction_begin();
2752 user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true);
2753 memory_region_transaction_commit();
2754 user->chr = NULL;
2755 }
2756
2757
2758 typedef struct {
2759 vu_async_close_fn cb;
2760 DeviceState *dev;
2761 CharBackend *cd;
2762 struct vhost_dev *vhost;
2763 IOEventHandler *event_cb;
2764 } VhostAsyncCallback;
2765
vhost_user_async_close_bh(void * opaque)2766 static void vhost_user_async_close_bh(void *opaque)
2767 {
2768 VhostAsyncCallback *data = opaque;
2769 struct vhost_dev *vhost = data->vhost;
2770
2771 /*
2772 * If the vhost_dev has been cleared in the meantime there is
2773 * nothing left to do as some other path has completed the
2774 * cleanup.
2775 */
2776 if (vhost->vdev) {
2777 data->cb(data->dev);
2778 } else if (data->event_cb) {
2779 qemu_chr_fe_set_handlers(data->cd, NULL, NULL, data->event_cb,
2780 NULL, data->dev, NULL, true);
2781 }
2782
2783 g_free(data);
2784 }
2785
2786 /*
2787 * We only schedule the work if the machine is running. If suspended
2788 * we want to keep all the in-flight data as is for migration
2789 * purposes.
2790 */
vhost_user_async_close(DeviceState * d,CharBackend * chardev,struct vhost_dev * vhost,vu_async_close_fn cb,IOEventHandler * event_cb)2791 void vhost_user_async_close(DeviceState *d,
2792 CharBackend *chardev, struct vhost_dev *vhost,
2793 vu_async_close_fn cb,
2794 IOEventHandler *event_cb)
2795 {
2796 if (!runstate_check(RUN_STATE_SHUTDOWN)) {
2797 /*
2798 * A close event may happen during a read/write, but vhost
2799 * code assumes the vhost_dev remains setup, so delay the
2800 * stop & clear.
2801 */
2802 AioContext *ctx = qemu_get_current_aio_context();
2803 VhostAsyncCallback *data = g_new0(VhostAsyncCallback, 1);
2804
2805 /* Save data for the callback */
2806 data->cb = cb;
2807 data->dev = d;
2808 data->cd = chardev;
2809 data->vhost = vhost;
2810 data->event_cb = event_cb;
2811
2812 /* Disable any further notifications on the chardev */
2813 qemu_chr_fe_set_handlers(chardev,
2814 NULL, NULL, NULL, NULL, NULL, NULL,
2815 false);
2816
2817 aio_bh_schedule_oneshot(ctx, vhost_user_async_close_bh, data);
2818
2819 /*
2820 * Move vhost device to the stopped state. The vhost-user device
2821 * will be clean up and disconnected in BH. This can be useful in
2822 * the vhost migration code. If disconnect was caught there is an
2823 * option for the general vhost code to get the dev state without
2824 * knowing its type (in this case vhost-user).
2825 *
2826 * Note if the vhost device is fully cleared by the time we
2827 * execute the bottom half we won't continue with the cleanup.
2828 */
2829 vhost->started = false;
2830 }
2831 }
2832
vhost_user_dev_start(struct vhost_dev * dev,bool started)2833 static int vhost_user_dev_start(struct vhost_dev *dev, bool started)
2834 {
2835 if (!virtio_has_feature(dev->protocol_features,
2836 VHOST_USER_PROTOCOL_F_STATUS)) {
2837 return 0;
2838 }
2839
2840 /* Set device status only for last queue pair */
2841 if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
2842 return 0;
2843 }
2844
2845 if (started) {
2846 return vhost_user_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
2847 VIRTIO_CONFIG_S_DRIVER |
2848 VIRTIO_CONFIG_S_DRIVER_OK);
2849 } else {
2850 return 0;
2851 }
2852 }
2853
vhost_user_reset_status(struct vhost_dev * dev)2854 static void vhost_user_reset_status(struct vhost_dev *dev)
2855 {
2856 /* Set device status only for last queue pair */
2857 if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
2858 return;
2859 }
2860
2861 if (virtio_has_feature(dev->protocol_features,
2862 VHOST_USER_PROTOCOL_F_STATUS)) {
2863 vhost_user_set_status(dev, 0);
2864 }
2865 }
2866
vhost_user_supports_device_state(struct vhost_dev * dev)2867 static bool vhost_user_supports_device_state(struct vhost_dev *dev)
2868 {
2869 return virtio_has_feature(dev->protocol_features,
2870 VHOST_USER_PROTOCOL_F_DEVICE_STATE);
2871 }
2872
vhost_user_set_device_state_fd(struct vhost_dev * dev,VhostDeviceStateDirection direction,VhostDeviceStatePhase phase,int fd,int * reply_fd,Error ** errp)2873 static int vhost_user_set_device_state_fd(struct vhost_dev *dev,
2874 VhostDeviceStateDirection direction,
2875 VhostDeviceStatePhase phase,
2876 int fd,
2877 int *reply_fd,
2878 Error **errp)
2879 {
2880 int ret;
2881 struct vhost_user *vu = dev->opaque;
2882 VhostUserMsg msg = {
2883 .hdr = {
2884 .request = VHOST_USER_SET_DEVICE_STATE_FD,
2885 .flags = VHOST_USER_VERSION,
2886 .size = sizeof(msg.payload.transfer_state),
2887 },
2888 .payload.transfer_state = {
2889 .direction = direction,
2890 .phase = phase,
2891 },
2892 };
2893
2894 *reply_fd = -1;
2895
2896 if (!vhost_user_supports_device_state(dev)) {
2897 close(fd);
2898 error_setg(errp, "Back-end does not support migration state transfer");
2899 return -ENOTSUP;
2900 }
2901
2902 ret = vhost_user_write(dev, &msg, &fd, 1);
2903 close(fd);
2904 if (ret < 0) {
2905 error_setg_errno(errp, -ret,
2906 "Failed to send SET_DEVICE_STATE_FD message");
2907 return ret;
2908 }
2909
2910 ret = vhost_user_read(dev, &msg);
2911 if (ret < 0) {
2912 error_setg_errno(errp, -ret,
2913 "Failed to receive SET_DEVICE_STATE_FD reply");
2914 return ret;
2915 }
2916
2917 if (msg.hdr.request != VHOST_USER_SET_DEVICE_STATE_FD) {
2918 error_setg(errp,
2919 "Received unexpected message type, expected %d, received %d",
2920 VHOST_USER_SET_DEVICE_STATE_FD, msg.hdr.request);
2921 return -EPROTO;
2922 }
2923
2924 if (msg.hdr.size != sizeof(msg.payload.u64)) {
2925 error_setg(errp,
2926 "Received bad message size, expected %zu, received %" PRIu32,
2927 sizeof(msg.payload.u64), msg.hdr.size);
2928 return -EPROTO;
2929 }
2930
2931 if ((msg.payload.u64 & 0xff) != 0) {
2932 error_setg(errp, "Back-end did not accept migration state transfer");
2933 return -EIO;
2934 }
2935
2936 if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK)) {
2937 *reply_fd = qemu_chr_fe_get_msgfd(vu->user->chr);
2938 if (*reply_fd < 0) {
2939 error_setg(errp,
2940 "Failed to get back-end-provided transfer pipe FD");
2941 *reply_fd = -1;
2942 return -EIO;
2943 }
2944 }
2945
2946 return 0;
2947 }
2948
vhost_user_check_device_state(struct vhost_dev * dev,Error ** errp)2949 static int vhost_user_check_device_state(struct vhost_dev *dev, Error **errp)
2950 {
2951 int ret;
2952 VhostUserMsg msg = {
2953 .hdr = {
2954 .request = VHOST_USER_CHECK_DEVICE_STATE,
2955 .flags = VHOST_USER_VERSION,
2956 .size = 0,
2957 },
2958 };
2959
2960 if (!vhost_user_supports_device_state(dev)) {
2961 error_setg(errp, "Back-end does not support migration state transfer");
2962 return -ENOTSUP;
2963 }
2964
2965 ret = vhost_user_write(dev, &msg, NULL, 0);
2966 if (ret < 0) {
2967 error_setg_errno(errp, -ret,
2968 "Failed to send CHECK_DEVICE_STATE message");
2969 return ret;
2970 }
2971
2972 ret = vhost_user_read(dev, &msg);
2973 if (ret < 0) {
2974 error_setg_errno(errp, -ret,
2975 "Failed to receive CHECK_DEVICE_STATE reply");
2976 return ret;
2977 }
2978
2979 if (msg.hdr.request != VHOST_USER_CHECK_DEVICE_STATE) {
2980 error_setg(errp,
2981 "Received unexpected message type, expected %d, received %d",
2982 VHOST_USER_CHECK_DEVICE_STATE, msg.hdr.request);
2983 return -EPROTO;
2984 }
2985
2986 if (msg.hdr.size != sizeof(msg.payload.u64)) {
2987 error_setg(errp,
2988 "Received bad message size, expected %zu, received %" PRIu32,
2989 sizeof(msg.payload.u64), msg.hdr.size);
2990 return -EPROTO;
2991 }
2992
2993 if (msg.payload.u64 != 0) {
2994 error_setg(errp, "Back-end failed to process its internal state");
2995 return -EIO;
2996 }
2997
2998 return 0;
2999 }
3000
3001 const VhostOps user_ops = {
3002 .backend_type = VHOST_BACKEND_TYPE_USER,
3003 .vhost_backend_init = vhost_user_backend_init,
3004 .vhost_backend_cleanup = vhost_user_backend_cleanup,
3005 .vhost_backend_memslots_limit = vhost_user_memslots_limit,
3006 .vhost_backend_no_private_memslots = vhost_user_no_private_memslots,
3007 .vhost_set_log_base = vhost_user_set_log_base,
3008 .vhost_set_mem_table = vhost_user_set_mem_table,
3009 .vhost_set_vring_addr = vhost_user_set_vring_addr,
3010 .vhost_set_vring_endian = vhost_user_set_vring_endian,
3011 .vhost_set_vring_num = vhost_user_set_vring_num,
3012 .vhost_set_vring_base = vhost_user_set_vring_base,
3013 .vhost_get_vring_base = vhost_user_get_vring_base,
3014 .vhost_set_vring_kick = vhost_user_set_vring_kick,
3015 .vhost_set_vring_call = vhost_user_set_vring_call,
3016 .vhost_set_vring_err = vhost_user_set_vring_err,
3017 .vhost_set_features = vhost_user_set_features,
3018 .vhost_get_features = vhost_user_get_features,
3019 .vhost_set_owner = vhost_user_set_owner,
3020 .vhost_reset_device = vhost_user_reset_device,
3021 .vhost_get_vq_index = vhost_user_get_vq_index,
3022 .vhost_set_vring_enable = vhost_user_set_vring_enable,
3023 .vhost_requires_shm_log = vhost_user_requires_shm_log,
3024 .vhost_migration_done = vhost_user_migration_done,
3025 .vhost_net_set_mtu = vhost_user_net_set_mtu,
3026 .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
3027 .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
3028 .vhost_get_config = vhost_user_get_config,
3029 .vhost_set_config = vhost_user_set_config,
3030 .vhost_crypto_create_session = vhost_user_crypto_create_session,
3031 .vhost_crypto_close_session = vhost_user_crypto_close_session,
3032 .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
3033 .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
3034 .vhost_dev_start = vhost_user_dev_start,
3035 .vhost_reset_status = vhost_user_reset_status,
3036 .vhost_supports_device_state = vhost_user_supports_device_state,
3037 .vhost_set_device_state_fd = vhost_user_set_device_state_fd,
3038 .vhost_check_device_state = vhost_user_check_device_state,
3039 };
3040