xref: /openbmc/qemu/hw/virtio/vhost-user-fs.c (revision 0af3dfa5)
1 /*
2  * Vhost-user filesystem virtio device
3  *
4  * Copyright 2018-2019 Red Hat, Inc.
5  *
6  * Authors:
7  *  Stefan Hajnoczi <stefanha@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or
10  * (at your option) any later version.  See the COPYING file in the
11  * top-level directory.
12  */
13 
14 #include "qemu/osdep.h"
15 #include <sys/ioctl.h>
16 #include "standard-headers/linux/virtio_fs.h"
17 #include "qapi/error.h"
18 #include "hw/qdev-properties.h"
19 #include "hw/qdev-properties-system.h"
20 #include "hw/virtio/virtio-bus.h"
21 #include "hw/virtio/virtio-access.h"
22 #include "qemu/error-report.h"
23 #include "hw/virtio/vhost.h"
24 #include "hw/virtio/vhost-user-fs.h"
25 #include "monitor/monitor.h"
26 #include "sysemu/sysemu.h"
27 
28 static const int user_feature_bits[] = {
29     VIRTIO_F_VERSION_1,
30     VIRTIO_RING_F_INDIRECT_DESC,
31     VIRTIO_RING_F_EVENT_IDX,
32     VIRTIO_F_NOTIFY_ON_EMPTY,
33     VIRTIO_F_RING_PACKED,
34     VIRTIO_F_IOMMU_PLATFORM,
35     VIRTIO_F_RING_RESET,
36     VIRTIO_F_IN_ORDER,
37     VIRTIO_F_NOTIFICATION_DATA,
38     VHOST_INVALID_FEATURE_BIT
39 };
40 
41 static void vuf_get_config(VirtIODevice *vdev, uint8_t *config)
42 {
43     VHostUserFS *fs = VHOST_USER_FS(vdev);
44     struct virtio_fs_config fscfg = {};
45 
46     memcpy((char *)fscfg.tag, fs->conf.tag,
47            MIN(strlen(fs->conf.tag) + 1, sizeof(fscfg.tag)));
48 
49     virtio_stl_p(vdev, &fscfg.num_request_queues, fs->conf.num_request_queues);
50 
51     memcpy(config, &fscfg, sizeof(fscfg));
52 }
53 
54 static void vuf_start(VirtIODevice *vdev)
55 {
56     VHostUserFS *fs = VHOST_USER_FS(vdev);
57     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
58     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
59     int ret;
60     int i;
61 
62     if (!k->set_guest_notifiers) {
63         error_report("binding does not support guest notifiers");
64         return;
65     }
66 
67     ret = vhost_dev_enable_notifiers(&fs->vhost_dev, vdev);
68     if (ret < 0) {
69         error_report("Error enabling host notifiers: %d", -ret);
70         return;
71     }
72 
73     ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, true);
74     if (ret < 0) {
75         error_report("Error binding guest notifier: %d", -ret);
76         goto err_host_notifiers;
77     }
78 
79     fs->vhost_dev.acked_features = vdev->guest_features;
80     ret = vhost_dev_start(&fs->vhost_dev, vdev, true);
81     if (ret < 0) {
82         error_report("Error starting vhost: %d", -ret);
83         goto err_guest_notifiers;
84     }
85 
86     /*
87      * guest_notifier_mask/pending not used yet, so just unmask
88      * everything here.  virtio-pci will do the right thing by
89      * enabling/disabling irqfd.
90      */
91     for (i = 0; i < fs->vhost_dev.nvqs; i++) {
92         vhost_virtqueue_mask(&fs->vhost_dev, vdev, i, false);
93     }
94 
95     return;
96 
97 err_guest_notifiers:
98     k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false);
99 err_host_notifiers:
100     vhost_dev_disable_notifiers(&fs->vhost_dev, vdev);
101 }
102 
103 static void vuf_stop(VirtIODevice *vdev)
104 {
105     VHostUserFS *fs = VHOST_USER_FS(vdev);
106     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
107     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
108     int ret;
109 
110     if (!k->set_guest_notifiers) {
111         return;
112     }
113 
114     vhost_dev_stop(&fs->vhost_dev, vdev, true);
115 
116     ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false);
117     if (ret < 0) {
118         error_report("vhost guest notifier cleanup failed: %d", ret);
119         return;
120     }
121 
122     vhost_dev_disable_notifiers(&fs->vhost_dev, vdev);
123 }
124 
125 static void vuf_set_status(VirtIODevice *vdev, uint8_t status)
126 {
127     VHostUserFS *fs = VHOST_USER_FS(vdev);
128     bool should_start = virtio_device_should_start(vdev, status);
129 
130     if (vhost_dev_is_started(&fs->vhost_dev) == should_start) {
131         return;
132     }
133 
134     if (should_start) {
135         vuf_start(vdev);
136     } else {
137         vuf_stop(vdev);
138     }
139 }
140 
141 static uint64_t vuf_get_features(VirtIODevice *vdev,
142                                  uint64_t features,
143                                  Error **errp)
144 {
145     VHostUserFS *fs = VHOST_USER_FS(vdev);
146 
147     return vhost_get_features(&fs->vhost_dev, user_feature_bits, features);
148 }
149 
150 static void vuf_handle_output(VirtIODevice *vdev, VirtQueue *vq)
151 {
152     /*
153      * Not normally called; it's the daemon that handles the queue;
154      * however virtio's cleanup path can call this.
155      */
156 }
157 
158 static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx,
159                                             bool mask)
160 {
161     VHostUserFS *fs = VHOST_USER_FS(vdev);
162 
163     /*
164      * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
165      * as the macro of configure interrupt's IDX, If this driver does not
166      * support, the function will return
167      */
168 
169     if (idx == VIRTIO_CONFIG_IRQ_IDX) {
170         return;
171     }
172     vhost_virtqueue_mask(&fs->vhost_dev, vdev, idx, mask);
173 }
174 
175 static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx)
176 {
177     VHostUserFS *fs = VHOST_USER_FS(vdev);
178 
179     /*
180      * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
181      * as the macro of configure interrupt's IDX, If this driver does not
182      * support, the function will return
183      */
184 
185     if (idx == VIRTIO_CONFIG_IRQ_IDX) {
186         return false;
187     }
188     return vhost_virtqueue_pending(&fs->vhost_dev, idx);
189 }
190 
191 static void vuf_device_realize(DeviceState *dev, Error **errp)
192 {
193     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
194     VHostUserFS *fs = VHOST_USER_FS(dev);
195     unsigned int i;
196     size_t len;
197     int ret;
198 
199     if (!fs->conf.chardev.chr) {
200         error_setg(errp, "missing chardev");
201         return;
202     }
203 
204     if (!fs->conf.tag) {
205         error_setg(errp, "missing tag property");
206         return;
207     }
208     len = strlen(fs->conf.tag);
209     if (len == 0) {
210         error_setg(errp, "tag property cannot be empty");
211         return;
212     }
213     if (len > sizeof_field(struct virtio_fs_config, tag)) {
214         error_setg(errp, "tag property must be %zu bytes or less",
215                    sizeof_field(struct virtio_fs_config, tag));
216         return;
217     }
218 
219     if (fs->conf.num_request_queues == 0) {
220         error_setg(errp, "num-request-queues property must be larger than 0");
221         return;
222     }
223 
224     if (!is_power_of_2(fs->conf.queue_size)) {
225         error_setg(errp, "queue-size property must be a power of 2");
226         return;
227     }
228 
229     if (fs->conf.queue_size > VIRTQUEUE_MAX_SIZE) {
230         error_setg(errp, "queue-size property must be %u or smaller",
231                    VIRTQUEUE_MAX_SIZE);
232         return;
233     }
234 
235     if (!vhost_user_init(&fs->vhost_user, &fs->conf.chardev, errp)) {
236         return;
237     }
238 
239     virtio_init(vdev, VIRTIO_ID_FS, sizeof(struct virtio_fs_config));
240 
241     /* Hiprio queue */
242     fs->hiprio_vq = virtio_add_queue(vdev, fs->conf.queue_size, vuf_handle_output);
243 
244     /* Request queues */
245     fs->req_vqs = g_new(VirtQueue *, fs->conf.num_request_queues);
246     for (i = 0; i < fs->conf.num_request_queues; i++) {
247         fs->req_vqs[i] = virtio_add_queue(vdev, fs->conf.queue_size, vuf_handle_output);
248     }
249 
250     /* 1 high prio queue, plus the number configured */
251     fs->vhost_dev.nvqs = 1 + fs->conf.num_request_queues;
252     fs->vhost_dev.vqs = g_new0(struct vhost_virtqueue, fs->vhost_dev.nvqs);
253     ret = vhost_dev_init(&fs->vhost_dev, &fs->vhost_user,
254                          VHOST_BACKEND_TYPE_USER, 0, errp);
255     if (ret < 0) {
256         goto err_virtio;
257     }
258 
259     return;
260 
261 err_virtio:
262     vhost_user_cleanup(&fs->vhost_user);
263     virtio_delete_queue(fs->hiprio_vq);
264     for (i = 0; i < fs->conf.num_request_queues; i++) {
265         virtio_delete_queue(fs->req_vqs[i]);
266     }
267     g_free(fs->req_vqs);
268     virtio_cleanup(vdev);
269     g_free(fs->vhost_dev.vqs);
270     return;
271 }
272 
273 static void vuf_device_unrealize(DeviceState *dev)
274 {
275     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
276     VHostUserFS *fs = VHOST_USER_FS(dev);
277     struct vhost_virtqueue *vhost_vqs = fs->vhost_dev.vqs;
278     int i;
279 
280     /* This will stop vhost backend if appropriate. */
281     vuf_set_status(vdev, 0);
282 
283     vhost_dev_cleanup(&fs->vhost_dev);
284 
285     vhost_user_cleanup(&fs->vhost_user);
286 
287     virtio_delete_queue(fs->hiprio_vq);
288     for (i = 0; i < fs->conf.num_request_queues; i++) {
289         virtio_delete_queue(fs->req_vqs[i]);
290     }
291     g_free(fs->req_vqs);
292     virtio_cleanup(vdev);
293     g_free(vhost_vqs);
294 }
295 
296 static struct vhost_dev *vuf_get_vhost(VirtIODevice *vdev)
297 {
298     VHostUserFS *fs = VHOST_USER_FS(vdev);
299     return &fs->vhost_dev;
300 }
301 
302 /**
303  * Fetch the internal state from virtiofsd and save it to `f`.
304  */
305 static int vuf_save_state(QEMUFile *f, void *pv, size_t size,
306                           const VMStateField *field, JSONWriter *vmdesc)
307 {
308     VirtIODevice *vdev = pv;
309     VHostUserFS *fs = VHOST_USER_FS(vdev);
310     Error *local_error = NULL;
311     int ret;
312 
313     ret = vhost_save_backend_state(&fs->vhost_dev, f, &local_error);
314     if (ret < 0) {
315         error_reportf_err(local_error,
316                           "Error saving back-end state of %s device %s "
317                           "(tag: \"%s\"): ",
318                           vdev->name, vdev->parent_obj.canonical_path,
319                           fs->conf.tag ?: "<none>");
320         return ret;
321     }
322 
323     return 0;
324 }
325 
326 /**
327  * Load virtiofsd's internal state from `f` and send it over to virtiofsd.
328  */
329 static int vuf_load_state(QEMUFile *f, void *pv, size_t size,
330                           const VMStateField *field)
331 {
332     VirtIODevice *vdev = pv;
333     VHostUserFS *fs = VHOST_USER_FS(vdev);
334     Error *local_error = NULL;
335     int ret;
336 
337     ret = vhost_load_backend_state(&fs->vhost_dev, f, &local_error);
338     if (ret < 0) {
339         error_reportf_err(local_error,
340                           "Error loading back-end state of %s device %s "
341                           "(tag: \"%s\"): ",
342                           vdev->name, vdev->parent_obj.canonical_path,
343                           fs->conf.tag ?: "<none>");
344         return ret;
345     }
346 
347     return 0;
348 }
349 
350 static bool vuf_is_internal_migration(void *opaque)
351 {
352     /* TODO: Return false when an external migration is requested */
353     return true;
354 }
355 
356 static int vuf_check_migration_support(void *opaque)
357 {
358     VirtIODevice *vdev = opaque;
359     VHostUserFS *fs = VHOST_USER_FS(vdev);
360 
361     if (!vhost_supports_device_state(&fs->vhost_dev)) {
362         error_report("Back-end of %s device %s (tag: \"%s\") does not support "
363                      "migration through qemu",
364                      vdev->name, vdev->parent_obj.canonical_path,
365                      fs->conf.tag ?: "<none>");
366         return -ENOTSUP;
367     }
368 
369     return 0;
370 }
371 
372 static const VMStateDescription vuf_backend_vmstate;
373 
374 static const VMStateDescription vuf_vmstate = {
375     .name = "vhost-user-fs",
376     .version_id = 0,
377     .fields = (const VMStateField[]) {
378         VMSTATE_VIRTIO_DEVICE,
379         VMSTATE_END_OF_LIST()
380     },
381     .subsections = (const VMStateDescription * const []) {
382         &vuf_backend_vmstate,
383         NULL,
384     }
385 };
386 
387 static const VMStateDescription vuf_backend_vmstate = {
388     .name = "vhost-user-fs-backend",
389     .version_id = 0,
390     .needed = vuf_is_internal_migration,
391     .pre_load = vuf_check_migration_support,
392     .pre_save = vuf_check_migration_support,
393     .fields = (const VMStateField[]) {
394         {
395             .name = "back-end",
396             .info = &(const VMStateInfo) {
397                 .name = "virtio-fs back-end state",
398                 .get = vuf_load_state,
399                 .put = vuf_save_state,
400             },
401         },
402         VMSTATE_END_OF_LIST()
403     },
404 };
405 
406 static Property vuf_properties[] = {
407     DEFINE_PROP_CHR("chardev", VHostUserFS, conf.chardev),
408     DEFINE_PROP_STRING("tag", VHostUserFS, conf.tag),
409     DEFINE_PROP_UINT16("num-request-queues", VHostUserFS,
410                        conf.num_request_queues, 1),
411     DEFINE_PROP_UINT16("queue-size", VHostUserFS, conf.queue_size, 128),
412     DEFINE_PROP_END_OF_LIST(),
413 };
414 
415 static void vuf_instance_init(Object *obj)
416 {
417     VHostUserFS *fs = VHOST_USER_FS(obj);
418 
419     device_add_bootindex_property(obj, &fs->bootindex, "bootindex",
420                                   "/filesystem@0", DEVICE(obj));
421 }
422 
423 static void vuf_class_init(ObjectClass *klass, void *data)
424 {
425     DeviceClass *dc = DEVICE_CLASS(klass);
426     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
427 
428     device_class_set_props(dc, vuf_properties);
429     dc->vmsd = &vuf_vmstate;
430     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
431     vdc->realize = vuf_device_realize;
432     vdc->unrealize = vuf_device_unrealize;
433     vdc->get_features = vuf_get_features;
434     vdc->get_config = vuf_get_config;
435     vdc->set_status = vuf_set_status;
436     vdc->guest_notifier_mask = vuf_guest_notifier_mask;
437     vdc->guest_notifier_pending = vuf_guest_notifier_pending;
438     vdc->get_vhost = vuf_get_vhost;
439 }
440 
441 static const TypeInfo vuf_info = {
442     .name = TYPE_VHOST_USER_FS,
443     .parent = TYPE_VIRTIO_DEVICE,
444     .instance_size = sizeof(VHostUserFS),
445     .instance_init = vuf_instance_init,
446     .class_init = vuf_class_init,
447 };
448 
449 static void vuf_register_types(void)
450 {
451     type_register_static(&vuf_info);
452 }
453 
454 type_init(vuf_register_types)
455