xref: /openbmc/qemu/hw/block/vhost-user-blk.c (revision a6caeee8)
1 /*
2  * vhost-user-blk host device
3  *
4  * Copyright(C) 2017 Intel Corporation.
5  *
6  * Authors:
7  *  Changpeng Liu <changpeng.liu@intel.com>
8  *
9  * Largely based on the "vhost-user-scsi.c" and "vhost-scsi.c" implemented by:
10  * Felipe Franciosi <felipe@nutanix.com>
11  * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
12  * Nicholas Bellinger <nab@risingtidesystems.com>
13  *
14  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
15  * See the COPYING.LIB file in the top-level directory.
16  *
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
21 #include "qemu/error-report.h"
22 #include "qemu/cutils.h"
23 #include "hw/qdev-core.h"
24 #include "hw/qdev-properties.h"
25 #include "hw/qdev-properties-system.h"
26 #include "hw/virtio/vhost.h"
27 #include "hw/virtio/vhost-user-blk.h"
28 #include "hw/virtio/virtio.h"
29 #include "hw/virtio/virtio-bus.h"
30 #include "hw/virtio/virtio-access.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/runstate.h"
33 
34 #define REALIZE_CONNECTION_RETRIES 3
35 
36 static const int user_feature_bits[] = {
37     VIRTIO_BLK_F_SIZE_MAX,
38     VIRTIO_BLK_F_SEG_MAX,
39     VIRTIO_BLK_F_GEOMETRY,
40     VIRTIO_BLK_F_BLK_SIZE,
41     VIRTIO_BLK_F_TOPOLOGY,
42     VIRTIO_BLK_F_MQ,
43     VIRTIO_BLK_F_RO,
44     VIRTIO_BLK_F_FLUSH,
45     VIRTIO_BLK_F_CONFIG_WCE,
46     VIRTIO_BLK_F_DISCARD,
47     VIRTIO_BLK_F_WRITE_ZEROES,
48     VIRTIO_F_VERSION_1,
49     VIRTIO_RING_F_INDIRECT_DESC,
50     VIRTIO_RING_F_EVENT_IDX,
51     VIRTIO_F_NOTIFY_ON_EMPTY,
52     VIRTIO_F_RING_PACKED,
53     VIRTIO_F_IOMMU_PLATFORM,
54     VHOST_INVALID_FEATURE_BIT
55 };
56 
57 static void vhost_user_blk_event(void *opaque, QEMUChrEvent event);
58 
59 static void vhost_user_blk_update_config(VirtIODevice *vdev, uint8_t *config)
60 {
61     VHostUserBlk *s = VHOST_USER_BLK(vdev);
62 
63     /* Our num_queues overrides the device backend */
64     virtio_stw_p(vdev, &s->blkcfg.num_queues, s->num_queues);
65 
66     memcpy(config, &s->blkcfg, sizeof(struct virtio_blk_config));
67 }
68 
69 static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
70 {
71     VHostUserBlk *s = VHOST_USER_BLK(vdev);
72     struct virtio_blk_config *blkcfg = (struct virtio_blk_config *)config;
73     int ret;
74 
75     if (blkcfg->wce == s->blkcfg.wce) {
76         return;
77     }
78 
79     ret = vhost_dev_set_config(&s->dev, &blkcfg->wce,
80                                offsetof(struct virtio_blk_config, wce),
81                                sizeof(blkcfg->wce),
82                                VHOST_SET_CONFIG_TYPE_MASTER);
83     if (ret) {
84         error_report("set device config space failed");
85         return;
86     }
87 
88     s->blkcfg.wce = blkcfg->wce;
89 }
90 
91 static int vhost_user_blk_handle_config_change(struct vhost_dev *dev)
92 {
93     int ret;
94     struct virtio_blk_config blkcfg;
95     VHostUserBlk *s = VHOST_USER_BLK(dev->vdev);
96     Error *local_err = NULL;
97 
98     ret = vhost_dev_get_config(dev, (uint8_t *)&blkcfg,
99                                sizeof(struct virtio_blk_config),
100                                &local_err);
101     if (ret < 0) {
102         error_report_err(local_err);
103         return ret;
104     }
105 
106     /* valid for resize only */
107     if (blkcfg.capacity != s->blkcfg.capacity) {
108         s->blkcfg.capacity = blkcfg.capacity;
109         memcpy(dev->vdev->config, &s->blkcfg, sizeof(struct virtio_blk_config));
110         virtio_notify_config(dev->vdev);
111     }
112 
113     return 0;
114 }
115 
116 const VhostDevConfigOps blk_ops = {
117     .vhost_dev_config_notifier = vhost_user_blk_handle_config_change,
118 };
119 
120 static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp)
121 {
122     VHostUserBlk *s = VHOST_USER_BLK(vdev);
123     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
124     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
125     int i, ret;
126 
127     if (!k->set_guest_notifiers) {
128         error_setg(errp, "binding does not support guest notifiers");
129         return -ENOSYS;
130     }
131 
132     ret = vhost_dev_enable_notifiers(&s->dev, vdev);
133     if (ret < 0) {
134         error_setg_errno(errp, -ret, "Error enabling host notifiers");
135         return ret;
136     }
137 
138     ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, true);
139     if (ret < 0) {
140         error_setg_errno(errp, -ret, "Error binding guest notifier");
141         goto err_host_notifiers;
142     }
143 
144     s->dev.acked_features = vdev->guest_features;
145 
146     ret = vhost_dev_prepare_inflight(&s->dev, vdev);
147     if (ret < 0) {
148         error_setg_errno(errp, -ret, "Error setting inflight format");
149         goto err_guest_notifiers;
150     }
151 
152     if (!s->inflight->addr) {
153         ret = vhost_dev_get_inflight(&s->dev, s->queue_size, s->inflight);
154         if (ret < 0) {
155             error_setg_errno(errp, -ret, "Error getting inflight");
156             goto err_guest_notifiers;
157         }
158     }
159 
160     ret = vhost_dev_set_inflight(&s->dev, s->inflight);
161     if (ret < 0) {
162         error_setg_errno(errp, -ret, "Error setting inflight");
163         goto err_guest_notifiers;
164     }
165 
166     ret = vhost_dev_start(&s->dev, vdev);
167     if (ret < 0) {
168         error_setg_errno(errp, -ret, "Error starting vhost");
169         goto err_guest_notifiers;
170     }
171     s->started_vu = true;
172 
173     /* guest_notifier_mask/pending not used yet, so just unmask
174      * everything here. virtio-pci will do the right thing by
175      * enabling/disabling irqfd.
176      */
177     for (i = 0; i < s->dev.nvqs; i++) {
178         vhost_virtqueue_mask(&s->dev, vdev, i, false);
179     }
180 
181     return ret;
182 
183 err_guest_notifiers:
184     k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
185 err_host_notifiers:
186     vhost_dev_disable_notifiers(&s->dev, vdev);
187     return ret;
188 }
189 
190 static void vhost_user_blk_stop(VirtIODevice *vdev)
191 {
192     VHostUserBlk *s = VHOST_USER_BLK(vdev);
193     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
194     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
195     int ret;
196 
197     if (!s->started_vu) {
198         return;
199     }
200     s->started_vu = false;
201 
202     if (!k->set_guest_notifiers) {
203         return;
204     }
205 
206     vhost_dev_stop(&s->dev, vdev);
207 
208     ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
209     if (ret < 0) {
210         error_report("vhost guest notifier cleanup failed: %d", ret);
211         return;
212     }
213 
214     vhost_dev_disable_notifiers(&s->dev, vdev);
215 }
216 
217 static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status)
218 {
219     VHostUserBlk *s = VHOST_USER_BLK(vdev);
220     bool should_start = virtio_device_started(vdev, status);
221     Error *local_err = NULL;
222     int ret;
223 
224     if (!vdev->vm_running) {
225         should_start = false;
226     }
227 
228     if (!s->connected) {
229         return;
230     }
231 
232     if (s->dev.started == should_start) {
233         return;
234     }
235 
236     if (should_start) {
237         ret = vhost_user_blk_start(vdev, &local_err);
238         if (ret < 0) {
239             error_reportf_err(local_err, "vhost-user-blk: vhost start failed: ");
240             qemu_chr_fe_disconnect(&s->chardev);
241         }
242     } else {
243         vhost_user_blk_stop(vdev);
244     }
245 
246 }
247 
248 static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev,
249                                             uint64_t features,
250                                             Error **errp)
251 {
252     VHostUserBlk *s = VHOST_USER_BLK(vdev);
253 
254     /* Turn on pre-defined features */
255     virtio_add_feature(&features, VIRTIO_BLK_F_SIZE_MAX);
256     virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
257     virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
258     virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
259     virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
260     virtio_add_feature(&features, VIRTIO_BLK_F_FLUSH);
261     virtio_add_feature(&features, VIRTIO_BLK_F_RO);
262     virtio_add_feature(&features, VIRTIO_BLK_F_DISCARD);
263     virtio_add_feature(&features, VIRTIO_BLK_F_WRITE_ZEROES);
264 
265     if (s->config_wce) {
266         virtio_add_feature(&features, VIRTIO_BLK_F_CONFIG_WCE);
267     }
268     if (s->num_queues > 1) {
269         virtio_add_feature(&features, VIRTIO_BLK_F_MQ);
270     }
271 
272     return vhost_get_features(&s->dev, user_feature_bits, features);
273 }
274 
275 static void vhost_user_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
276 {
277     VHostUserBlk *s = VHOST_USER_BLK(vdev);
278     Error *local_err = NULL;
279     int i, ret;
280 
281     if (!vdev->start_on_kick) {
282         return;
283     }
284 
285     if (!s->connected) {
286         return;
287     }
288 
289     if (s->dev.started) {
290         return;
291     }
292 
293     /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
294      * vhost here instead of waiting for .set_status().
295      */
296     ret = vhost_user_blk_start(vdev, &local_err);
297     if (ret < 0) {
298         error_reportf_err(local_err, "vhost-user-blk: vhost start failed: ");
299         qemu_chr_fe_disconnect(&s->chardev);
300         return;
301     }
302 
303     /* Kick right away to begin processing requests already in vring */
304     for (i = 0; i < s->dev.nvqs; i++) {
305         VirtQueue *kick_vq = virtio_get_queue(vdev, i);
306 
307         if (!virtio_queue_get_desc_addr(vdev, i)) {
308             continue;
309         }
310         event_notifier_set(virtio_queue_get_host_notifier(kick_vq));
311     }
312 }
313 
314 static void vhost_user_blk_reset(VirtIODevice *vdev)
315 {
316     VHostUserBlk *s = VHOST_USER_BLK(vdev);
317 
318     vhost_dev_free_inflight(s->inflight);
319 }
320 
321 static int vhost_user_blk_connect(DeviceState *dev, Error **errp)
322 {
323     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
324     VHostUserBlk *s = VHOST_USER_BLK(vdev);
325     int ret = 0;
326 
327     if (s->connected) {
328         return 0;
329     }
330     s->connected = true;
331 
332     s->dev.num_queues = s->num_queues;
333     s->dev.nvqs = s->num_queues;
334     s->dev.vqs = s->vhost_vqs;
335     s->dev.vq_index = 0;
336     s->dev.backend_features = 0;
337 
338     vhost_dev_set_config_notifier(&s->dev, &blk_ops);
339 
340     s->vhost_user.supports_config = true;
341     ret = vhost_dev_init(&s->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0,
342                          errp);
343     if (ret < 0) {
344         return ret;
345     }
346 
347     /* restore vhost state */
348     if (virtio_device_started(vdev, vdev->status)) {
349         ret = vhost_user_blk_start(vdev, errp);
350         if (ret < 0) {
351             return ret;
352         }
353     }
354 
355     return 0;
356 }
357 
358 static void vhost_user_blk_disconnect(DeviceState *dev)
359 {
360     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
361     VHostUserBlk *s = VHOST_USER_BLK(vdev);
362 
363     if (!s->connected) {
364         return;
365     }
366     s->connected = false;
367 
368     vhost_user_blk_stop(vdev);
369 
370     vhost_dev_cleanup(&s->dev);
371 }
372 
373 static void vhost_user_blk_chr_closed_bh(void *opaque)
374 {
375     DeviceState *dev = opaque;
376     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
377     VHostUserBlk *s = VHOST_USER_BLK(vdev);
378 
379     vhost_user_blk_disconnect(dev);
380     qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
381                              NULL, opaque, NULL, true);
382 }
383 
384 static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
385 {
386     DeviceState *dev = opaque;
387     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
388     VHostUserBlk *s = VHOST_USER_BLK(vdev);
389     Error *local_err = NULL;
390 
391     switch (event) {
392     case CHR_EVENT_OPENED:
393         if (vhost_user_blk_connect(dev, &local_err) < 0) {
394             error_report_err(local_err);
395             qemu_chr_fe_disconnect(&s->chardev);
396             return;
397         }
398         break;
399     case CHR_EVENT_CLOSED:
400         if (!runstate_check(RUN_STATE_SHUTDOWN)) {
401             /*
402              * A close event may happen during a read/write, but vhost
403              * code assumes the vhost_dev remains setup, so delay the
404              * stop & clear.
405              */
406             AioContext *ctx = qemu_get_current_aio_context();
407 
408             qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, NULL, NULL,
409                     NULL, NULL, false);
410             aio_bh_schedule_oneshot(ctx, vhost_user_blk_chr_closed_bh, opaque);
411 
412             /*
413              * Move vhost device to the stopped state. The vhost-user device
414              * will be clean up and disconnected in BH. This can be useful in
415              * the vhost migration code. If disconnect was caught there is an
416              * option for the general vhost code to get the dev state without
417              * knowing its type (in this case vhost-user).
418              */
419             s->dev.started = false;
420         }
421         break;
422     case CHR_EVENT_BREAK:
423     case CHR_EVENT_MUX_IN:
424     case CHR_EVENT_MUX_OUT:
425         /* Ignore */
426         break;
427     }
428 }
429 
430 static int vhost_user_blk_realize_connect(VHostUserBlk *s, Error **errp)
431 {
432     DeviceState *dev = &s->parent_obj.parent_obj;
433     int ret;
434 
435     s->connected = false;
436 
437     ret = qemu_chr_fe_wait_connected(&s->chardev, errp);
438     if (ret < 0) {
439         return ret;
440     }
441 
442     ret = vhost_user_blk_connect(dev, errp);
443     if (ret < 0) {
444         qemu_chr_fe_disconnect(&s->chardev);
445         return ret;
446     }
447     assert(s->connected);
448 
449     ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg,
450                                sizeof(struct virtio_blk_config), errp);
451     if (ret < 0) {
452         qemu_chr_fe_disconnect(&s->chardev);
453         vhost_dev_cleanup(&s->dev);
454         return ret;
455     }
456 
457     return 0;
458 }
459 
460 static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
461 {
462     ERRP_GUARD();
463     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
464     VHostUserBlk *s = VHOST_USER_BLK(vdev);
465     int retries;
466     int i, ret;
467 
468     if (!s->chardev.chr) {
469         error_setg(errp, "chardev is mandatory");
470         return;
471     }
472 
473     if (s->num_queues == VHOST_USER_BLK_AUTO_NUM_QUEUES) {
474         s->num_queues = 1;
475     }
476     if (!s->num_queues || s->num_queues > VIRTIO_QUEUE_MAX) {
477         error_setg(errp, "invalid number of IO queues");
478         return;
479     }
480 
481     if (!s->queue_size) {
482         error_setg(errp, "queue size must be non-zero");
483         return;
484     }
485     if (s->queue_size > VIRTQUEUE_MAX_SIZE) {
486         error_setg(errp, "queue size must not exceed %d",
487                    VIRTQUEUE_MAX_SIZE);
488         return;
489     }
490 
491     if (!vhost_user_init(&s->vhost_user, &s->chardev, errp)) {
492         return;
493     }
494 
495     virtio_init(vdev, VIRTIO_ID_BLOCK,
496                 sizeof(struct virtio_blk_config));
497 
498     s->virtqs = g_new(VirtQueue *, s->num_queues);
499     for (i = 0; i < s->num_queues; i++) {
500         s->virtqs[i] = virtio_add_queue(vdev, s->queue_size,
501                                         vhost_user_blk_handle_output);
502     }
503 
504     s->inflight = g_new0(struct vhost_inflight, 1);
505     s->vhost_vqs = g_new0(struct vhost_virtqueue, s->num_queues);
506 
507     retries = REALIZE_CONNECTION_RETRIES;
508     assert(!*errp);
509     do {
510         if (*errp) {
511             error_prepend(errp, "Reconnecting after error: ");
512             error_report_err(*errp);
513             *errp = NULL;
514         }
515         ret = vhost_user_blk_realize_connect(s, errp);
516     } while (ret < 0 && retries--);
517 
518     if (ret < 0) {
519         goto virtio_err;
520     }
521 
522     /* we're fully initialized, now we can operate, so add the handler */
523     qemu_chr_fe_set_handlers(&s->chardev,  NULL, NULL,
524                              vhost_user_blk_event, NULL, (void *)dev,
525                              NULL, true);
526     return;
527 
528 virtio_err:
529     g_free(s->vhost_vqs);
530     s->vhost_vqs = NULL;
531     g_free(s->inflight);
532     s->inflight = NULL;
533     for (i = 0; i < s->num_queues; i++) {
534         virtio_delete_queue(s->virtqs[i]);
535     }
536     g_free(s->virtqs);
537     virtio_cleanup(vdev);
538     vhost_user_cleanup(&s->vhost_user);
539 }
540 
541 static void vhost_user_blk_device_unrealize(DeviceState *dev)
542 {
543     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
544     VHostUserBlk *s = VHOST_USER_BLK(dev);
545     int i;
546 
547     virtio_set_status(vdev, 0);
548     qemu_chr_fe_set_handlers(&s->chardev,  NULL, NULL, NULL,
549                              NULL, NULL, NULL, false);
550     vhost_dev_cleanup(&s->dev);
551     vhost_dev_free_inflight(s->inflight);
552     g_free(s->vhost_vqs);
553     s->vhost_vqs = NULL;
554     g_free(s->inflight);
555     s->inflight = NULL;
556 
557     for (i = 0; i < s->num_queues; i++) {
558         virtio_delete_queue(s->virtqs[i]);
559     }
560     g_free(s->virtqs);
561     virtio_cleanup(vdev);
562     vhost_user_cleanup(&s->vhost_user);
563 }
564 
565 static void vhost_user_blk_instance_init(Object *obj)
566 {
567     VHostUserBlk *s = VHOST_USER_BLK(obj);
568 
569     device_add_bootindex_property(obj, &s->bootindex, "bootindex",
570                                   "/disk@0,0", DEVICE(obj));
571 }
572 
573 static struct vhost_dev *vhost_user_blk_get_vhost(VirtIODevice *vdev)
574 {
575     VHostUserBlk *s = VHOST_USER_BLK(vdev);
576     return &s->dev;
577 }
578 
579 static const VMStateDescription vmstate_vhost_user_blk = {
580     .name = "vhost-user-blk",
581     .minimum_version_id = 1,
582     .version_id = 1,
583     .fields = (VMStateField[]) {
584         VMSTATE_VIRTIO_DEVICE,
585         VMSTATE_END_OF_LIST()
586     },
587 };
588 
589 static Property vhost_user_blk_properties[] = {
590     DEFINE_PROP_CHR("chardev", VHostUserBlk, chardev),
591     DEFINE_PROP_UINT16("num-queues", VHostUserBlk, num_queues,
592                        VHOST_USER_BLK_AUTO_NUM_QUEUES),
593     DEFINE_PROP_UINT32("queue-size", VHostUserBlk, queue_size, 128),
594     DEFINE_PROP_BIT("config-wce", VHostUserBlk, config_wce, 0, true),
595     DEFINE_PROP_END_OF_LIST(),
596 };
597 
598 static void vhost_user_blk_class_init(ObjectClass *klass, void *data)
599 {
600     DeviceClass *dc = DEVICE_CLASS(klass);
601     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
602 
603     device_class_set_props(dc, vhost_user_blk_properties);
604     dc->vmsd = &vmstate_vhost_user_blk;
605     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
606     vdc->realize = vhost_user_blk_device_realize;
607     vdc->unrealize = vhost_user_blk_device_unrealize;
608     vdc->get_config = vhost_user_blk_update_config;
609     vdc->set_config = vhost_user_blk_set_config;
610     vdc->get_features = vhost_user_blk_get_features;
611     vdc->set_status = vhost_user_blk_set_status;
612     vdc->reset = vhost_user_blk_reset;
613     vdc->get_vhost = vhost_user_blk_get_vhost;
614 }
615 
616 static const TypeInfo vhost_user_blk_info = {
617     .name = TYPE_VHOST_USER_BLK,
618     .parent = TYPE_VIRTIO_DEVICE,
619     .instance_size = sizeof(VHostUserBlk),
620     .instance_init = vhost_user_blk_instance_init,
621     .class_init = vhost_user_blk_class_init,
622 };
623 
624 static void virtio_register_types(void)
625 {
626     type_register_static(&vhost_user_blk_info);
627 }
628 
629 type_init(virtio_register_types)
630