xref: /openbmc/qemu/hw/virtio/vhost-vsock.c (revision ddda3748)
1 /*
2  * Virtio vsock device
3  *
4  * Copyright 2015 Red Hat, Inc.
5  *
6  * Authors:
7  *  Stefan Hajnoczi <stefanha@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or
10  * (at your option) any later version.  See the COPYING file in the
11  * top-level directory.
12  */
13 
14 #include "qemu/osdep.h"
15 #include <sys/ioctl.h>
16 #include "standard-headers/linux/virtio_vsock.h"
17 #include "qapi/error.h"
18 #include "hw/virtio/virtio-bus.h"
19 #include "hw/virtio/virtio-access.h"
20 #include "qemu/error-report.h"
21 #include "hw/qdev-properties.h"
22 #include "hw/virtio/vhost-vsock.h"
23 #include "qemu/iov.h"
24 #include "qemu/module.h"
25 #include "monitor/monitor.h"
26 
27 enum {
28     VHOST_VSOCK_SAVEVM_VERSION = 0,
29 
30     VHOST_VSOCK_QUEUE_SIZE = 128,
31 };
32 
33 static void vhost_vsock_get_config(VirtIODevice *vdev, uint8_t *config)
34 {
35     VHostVSock *vsock = VHOST_VSOCK(vdev);
36     struct virtio_vsock_config vsockcfg = {};
37 
38     virtio_stq_p(vdev, &vsockcfg.guest_cid, vsock->conf.guest_cid);
39     memcpy(config, &vsockcfg, sizeof(vsockcfg));
40 }
41 
42 static int vhost_vsock_set_guest_cid(VHostVSock *vsock)
43 {
44     const VhostOps *vhost_ops = vsock->vhost_dev.vhost_ops;
45     int ret;
46 
47     if (!vhost_ops->vhost_vsock_set_guest_cid) {
48         return -ENOSYS;
49     }
50 
51     ret = vhost_ops->vhost_vsock_set_guest_cid(&vsock->vhost_dev,
52                                                vsock->conf.guest_cid);
53     if (ret < 0) {
54         return -errno;
55     }
56     return 0;
57 }
58 
59 static int vhost_vsock_set_running(VHostVSock *vsock, int start)
60 {
61     const VhostOps *vhost_ops = vsock->vhost_dev.vhost_ops;
62     int ret;
63 
64     if (!vhost_ops->vhost_vsock_set_running) {
65         return -ENOSYS;
66     }
67 
68     ret = vhost_ops->vhost_vsock_set_running(&vsock->vhost_dev, start);
69     if (ret < 0) {
70         return -errno;
71     }
72     return 0;
73 }
74 
75 static void vhost_vsock_start(VirtIODevice *vdev)
76 {
77     VHostVSock *vsock = VHOST_VSOCK(vdev);
78     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
79     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
80     int ret;
81     int i;
82 
83     if (!k->set_guest_notifiers) {
84         error_report("binding does not support guest notifiers");
85         return;
86     }
87 
88     ret = vhost_dev_enable_notifiers(&vsock->vhost_dev, vdev);
89     if (ret < 0) {
90         error_report("Error enabling host notifiers: %d", -ret);
91         return;
92     }
93 
94     ret = k->set_guest_notifiers(qbus->parent, vsock->vhost_dev.nvqs, true);
95     if (ret < 0) {
96         error_report("Error binding guest notifier: %d", -ret);
97         goto err_host_notifiers;
98     }
99 
100     vsock->vhost_dev.acked_features = vdev->guest_features;
101     ret = vhost_dev_start(&vsock->vhost_dev, vdev);
102     if (ret < 0) {
103         error_report("Error starting vhost: %d", -ret);
104         goto err_guest_notifiers;
105     }
106 
107     ret = vhost_vsock_set_running(vsock, 1);
108     if (ret < 0) {
109         error_report("Error starting vhost vsock: %d", -ret);
110         goto err_dev_start;
111     }
112 
113     /* guest_notifier_mask/pending not used yet, so just unmask
114      * everything here.  virtio-pci will do the right thing by
115      * enabling/disabling irqfd.
116      */
117     for (i = 0; i < vsock->vhost_dev.nvqs; i++) {
118         vhost_virtqueue_mask(&vsock->vhost_dev, vdev, i, false);
119     }
120 
121     return;
122 
123 err_dev_start:
124     vhost_dev_stop(&vsock->vhost_dev, vdev);
125 err_guest_notifiers:
126     k->set_guest_notifiers(qbus->parent, vsock->vhost_dev.nvqs, false);
127 err_host_notifiers:
128     vhost_dev_disable_notifiers(&vsock->vhost_dev, vdev);
129 }
130 
131 static void vhost_vsock_stop(VirtIODevice *vdev)
132 {
133     VHostVSock *vsock = VHOST_VSOCK(vdev);
134     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
135     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
136     int ret;
137 
138     if (!k->set_guest_notifiers) {
139         return;
140     }
141 
142     ret = vhost_vsock_set_running(vsock, 0);
143     if (ret < 0) {
144         error_report("vhost vsock set running failed: %d", ret);
145         return;
146     }
147 
148     vhost_dev_stop(&vsock->vhost_dev, vdev);
149 
150     ret = k->set_guest_notifiers(qbus->parent, vsock->vhost_dev.nvqs, false);
151     if (ret < 0) {
152         error_report("vhost guest notifier cleanup failed: %d", ret);
153         return;
154     }
155 
156     vhost_dev_disable_notifiers(&vsock->vhost_dev, vdev);
157 }
158 
159 static void vhost_vsock_set_status(VirtIODevice *vdev, uint8_t status)
160 {
161     VHostVSock *vsock = VHOST_VSOCK(vdev);
162     bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK;
163 
164     if (!vdev->vm_running) {
165         should_start = false;
166     }
167 
168     if (vsock->vhost_dev.started == should_start) {
169         return;
170     }
171 
172     if (should_start) {
173         vhost_vsock_start(vdev);
174     } else {
175         vhost_vsock_stop(vdev);
176     }
177 }
178 
179 static uint64_t vhost_vsock_get_features(VirtIODevice *vdev,
180                                          uint64_t requested_features,
181                                          Error **errp)
182 {
183     /* No feature bits used yet */
184     return requested_features;
185 }
186 
187 static void vhost_vsock_handle_output(VirtIODevice *vdev, VirtQueue *vq)
188 {
189     /* Do nothing */
190 }
191 
192 static void vhost_vsock_guest_notifier_mask(VirtIODevice *vdev, int idx,
193                                             bool mask)
194 {
195     VHostVSock *vsock = VHOST_VSOCK(vdev);
196 
197     vhost_virtqueue_mask(&vsock->vhost_dev, vdev, idx, mask);
198 }
199 
200 static bool vhost_vsock_guest_notifier_pending(VirtIODevice *vdev, int idx)
201 {
202     VHostVSock *vsock = VHOST_VSOCK(vdev);
203 
204     return vhost_virtqueue_pending(&vsock->vhost_dev, idx);
205 }
206 
207 static void vhost_vsock_send_transport_reset(VHostVSock *vsock)
208 {
209     VirtQueueElement *elem;
210     VirtQueue *vq = vsock->event_vq;
211     struct virtio_vsock_event event = {
212         .id = cpu_to_le32(VIRTIO_VSOCK_EVENT_TRANSPORT_RESET),
213     };
214 
215     elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
216     if (!elem) {
217         error_report("vhost-vsock missed transport reset event");
218         return;
219     }
220 
221     if (elem->out_num) {
222         error_report("invalid vhost-vsock event virtqueue element with "
223                      "out buffers");
224         goto out;
225     }
226 
227     if (iov_from_buf(elem->in_sg, elem->in_num, 0,
228                      &event, sizeof(event)) != sizeof(event)) {
229         error_report("vhost-vsock event virtqueue element is too short");
230         goto out;
231     }
232 
233     virtqueue_push(vq, elem, sizeof(event));
234     virtio_notify(VIRTIO_DEVICE(vsock), vq);
235 
236 out:
237     g_free(elem);
238 }
239 
240 static void vhost_vsock_post_load_timer_cleanup(VHostVSock *vsock)
241 {
242     if (!vsock->post_load_timer) {
243         return;
244     }
245 
246     timer_del(vsock->post_load_timer);
247     timer_free(vsock->post_load_timer);
248     vsock->post_load_timer = NULL;
249 }
250 
251 static void vhost_vsock_post_load_timer_cb(void *opaque)
252 {
253     VHostVSock *vsock = opaque;
254 
255     vhost_vsock_post_load_timer_cleanup(vsock);
256     vhost_vsock_send_transport_reset(vsock);
257 }
258 
259 static int vhost_vsock_pre_save(void *opaque)
260 {
261     VHostVSock *vsock = opaque;
262 
263     /* At this point, backend must be stopped, otherwise
264      * it might keep writing to memory. */
265     assert(!vsock->vhost_dev.started);
266 
267     return 0;
268 }
269 
270 static int vhost_vsock_post_load(void *opaque, int version_id)
271 {
272     VHostVSock *vsock = opaque;
273     VirtIODevice *vdev = VIRTIO_DEVICE(vsock);
274 
275     if (virtio_queue_get_addr(vdev, 2)) {
276         /* Defer transport reset event to a vm clock timer so that virtqueue
277          * changes happen after migration has completed.
278          */
279         assert(!vsock->post_load_timer);
280         vsock->post_load_timer =
281             timer_new_ns(QEMU_CLOCK_VIRTUAL,
282                          vhost_vsock_post_load_timer_cb,
283                          vsock);
284         timer_mod(vsock->post_load_timer, 1);
285     }
286     return 0;
287 }
288 
289 static const VMStateDescription vmstate_virtio_vhost_vsock = {
290     .name = "virtio-vhost_vsock",
291     .minimum_version_id = VHOST_VSOCK_SAVEVM_VERSION,
292     .version_id = VHOST_VSOCK_SAVEVM_VERSION,
293     .fields = (VMStateField[]) {
294         VMSTATE_VIRTIO_DEVICE,
295         VMSTATE_END_OF_LIST()
296     },
297     .pre_save = vhost_vsock_pre_save,
298     .post_load = vhost_vsock_post_load,
299 };
300 
301 static void vhost_vsock_device_realize(DeviceState *dev, Error **errp)
302 {
303     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
304     VHostVSock *vsock = VHOST_VSOCK(dev);
305     int vhostfd;
306     int ret;
307 
308     /* Refuse to use reserved CID numbers */
309     if (vsock->conf.guest_cid <= 2) {
310         error_setg(errp, "guest-cid property must be greater than 2");
311         return;
312     }
313 
314     if (vsock->conf.guest_cid > UINT32_MAX) {
315         error_setg(errp, "guest-cid property must be a 32-bit number");
316         return;
317     }
318 
319     if (vsock->conf.vhostfd) {
320         vhostfd = monitor_fd_param(cur_mon, vsock->conf.vhostfd, errp);
321         if (vhostfd == -1) {
322             error_prepend(errp, "vhost-vsock: unable to parse vhostfd: ");
323             return;
324         }
325     } else {
326         vhostfd = open("/dev/vhost-vsock", O_RDWR);
327         if (vhostfd < 0) {
328             error_setg_errno(errp, -errno,
329                              "vhost-vsock: failed to open vhost device");
330             return;
331         }
332     }
333 
334     virtio_init(vdev, "vhost-vsock", VIRTIO_ID_VSOCK,
335                 sizeof(struct virtio_vsock_config));
336 
337     /* Receive and transmit queues belong to vhost */
338     vsock->recv_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
339                                       vhost_vsock_handle_output);
340     vsock->trans_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
341                                        vhost_vsock_handle_output);
342 
343     /* The event queue belongs to QEMU */
344     vsock->event_vq = virtio_add_queue(vdev, VHOST_VSOCK_QUEUE_SIZE,
345                                        vhost_vsock_handle_output);
346 
347     vsock->vhost_dev.nvqs = ARRAY_SIZE(vsock->vhost_vqs);
348     vsock->vhost_dev.vqs = vsock->vhost_vqs;
349     ret = vhost_dev_init(&vsock->vhost_dev, (void *)(uintptr_t)vhostfd,
350                          VHOST_BACKEND_TYPE_KERNEL, 0);
351     if (ret < 0) {
352         error_setg_errno(errp, -ret, "vhost-vsock: vhost_dev_init failed");
353         goto err_virtio;
354     }
355 
356     ret = vhost_vsock_set_guest_cid(vsock);
357     if (ret < 0) {
358         error_setg_errno(errp, -ret, "vhost-vsock: unable to set guest cid");
359         goto err_vhost_dev;
360     }
361 
362     vsock->post_load_timer = NULL;
363     return;
364 
365 err_vhost_dev:
366     vhost_dev_cleanup(&vsock->vhost_dev);
367 err_virtio:
368     virtio_delete_queue(vsock->recv_vq);
369     virtio_delete_queue(vsock->trans_vq);
370     virtio_delete_queue(vsock->event_vq);
371     virtio_cleanup(vdev);
372     close(vhostfd);
373     return;
374 }
375 
376 static void vhost_vsock_device_unrealize(DeviceState *dev, Error **errp)
377 {
378     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
379     VHostVSock *vsock = VHOST_VSOCK(dev);
380 
381     vhost_vsock_post_load_timer_cleanup(vsock);
382 
383     /* This will stop vhost backend if appropriate. */
384     vhost_vsock_set_status(vdev, 0);
385 
386     vhost_dev_cleanup(&vsock->vhost_dev);
387     virtio_delete_queue(vsock->recv_vq);
388     virtio_delete_queue(vsock->trans_vq);
389     virtio_delete_queue(vsock->event_vq);
390     virtio_cleanup(vdev);
391 }
392 
393 static Property vhost_vsock_properties[] = {
394     DEFINE_PROP_UINT64("guest-cid", VHostVSock, conf.guest_cid, 0),
395     DEFINE_PROP_STRING("vhostfd", VHostVSock, conf.vhostfd),
396     DEFINE_PROP_END_OF_LIST(),
397 };
398 
399 static void vhost_vsock_class_init(ObjectClass *klass, void *data)
400 {
401     DeviceClass *dc = DEVICE_CLASS(klass);
402     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
403 
404     device_class_set_props(dc, vhost_vsock_properties);
405     dc->vmsd = &vmstate_virtio_vhost_vsock;
406     set_bit(DEVICE_CATEGORY_MISC, dc->categories);
407     vdc->realize = vhost_vsock_device_realize;
408     vdc->unrealize = vhost_vsock_device_unrealize;
409     vdc->get_features = vhost_vsock_get_features;
410     vdc->get_config = vhost_vsock_get_config;
411     vdc->set_status = vhost_vsock_set_status;
412     vdc->guest_notifier_mask = vhost_vsock_guest_notifier_mask;
413     vdc->guest_notifier_pending = vhost_vsock_guest_notifier_pending;
414 }
415 
416 static const TypeInfo vhost_vsock_info = {
417     .name = TYPE_VHOST_VSOCK,
418     .parent = TYPE_VIRTIO_DEVICE,
419     .instance_size = sizeof(VHostVSock),
420     .class_init = vhost_vsock_class_init,
421 };
422 
423 static void vhost_vsock_register_types(void)
424 {
425     type_register_static(&vhost_vsock_info);
426 }
427 
428 type_init(vhost_vsock_register_types)
429