xref: /openbmc/qemu/hw/block/vhost-user-blk.c (revision eba61056)
1 /*
2  * vhost-user-blk host device
3  *
4  * Copyright(C) 2017 Intel Corporation.
5  *
6  * Authors:
7  *  Changpeng Liu <changpeng.liu@intel.com>
8  *
9  * Largely based on the "vhost-user-scsi.c" and "vhost-scsi.c" implemented by:
10  * Felipe Franciosi <felipe@nutanix.com>
11  * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
12  * Nicholas Bellinger <nab@risingtidesystems.com>
13  *
14  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
15  * See the COPYING.LIB file in the top-level directory.
16  *
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
21 #include "qemu/error-report.h"
22 #include "qemu/cutils.h"
23 #include "hw/qdev-core.h"
24 #include "hw/qdev-properties.h"
25 #include "hw/qdev-properties-system.h"
26 #include "hw/virtio/vhost.h"
27 #include "hw/virtio/vhost-user-blk.h"
28 #include "hw/virtio/virtio.h"
29 #include "hw/virtio/virtio-bus.h"
30 #include "hw/virtio/virtio-access.h"
31 #include "sysemu/sysemu.h"
32 #include "sysemu/runstate.h"
33 
34 #define REALIZE_CONNECTION_RETRIES 3
35 
36 static const int user_feature_bits[] = {
37     VIRTIO_BLK_F_SIZE_MAX,
38     VIRTIO_BLK_F_SEG_MAX,
39     VIRTIO_BLK_F_GEOMETRY,
40     VIRTIO_BLK_F_BLK_SIZE,
41     VIRTIO_BLK_F_TOPOLOGY,
42     VIRTIO_BLK_F_MQ,
43     VIRTIO_BLK_F_RO,
44     VIRTIO_BLK_F_FLUSH,
45     VIRTIO_BLK_F_CONFIG_WCE,
46     VIRTIO_BLK_F_DISCARD,
47     VIRTIO_BLK_F_WRITE_ZEROES,
48     VIRTIO_F_VERSION_1,
49     VIRTIO_RING_F_INDIRECT_DESC,
50     VIRTIO_RING_F_EVENT_IDX,
51     VIRTIO_F_NOTIFY_ON_EMPTY,
52     VIRTIO_F_RING_PACKED,
53     VIRTIO_F_IOMMU_PLATFORM,
54     VHOST_INVALID_FEATURE_BIT
55 };
56 
57 static void vhost_user_blk_event(void *opaque, QEMUChrEvent event);
58 
59 static void vhost_user_blk_update_config(VirtIODevice *vdev, uint8_t *config)
60 {
61     VHostUserBlk *s = VHOST_USER_BLK(vdev);
62 
63     /* Our num_queues overrides the device backend */
64     virtio_stw_p(vdev, &s->blkcfg.num_queues, s->num_queues);
65 
66     memcpy(config, &s->blkcfg, sizeof(struct virtio_blk_config));
67 }
68 
69 static void vhost_user_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
70 {
71     VHostUserBlk *s = VHOST_USER_BLK(vdev);
72     struct virtio_blk_config *blkcfg = (struct virtio_blk_config *)config;
73     int ret;
74 
75     if (blkcfg->wce == s->blkcfg.wce) {
76         return;
77     }
78 
79     ret = vhost_dev_set_config(&s->dev, &blkcfg->wce,
80                                offsetof(struct virtio_blk_config, wce),
81                                sizeof(blkcfg->wce),
82                                VHOST_SET_CONFIG_TYPE_MASTER);
83     if (ret) {
84         error_report("set device config space failed");
85         return;
86     }
87 
88     s->blkcfg.wce = blkcfg->wce;
89 }
90 
91 static int vhost_user_blk_handle_config_change(struct vhost_dev *dev)
92 {
93     int ret;
94     struct virtio_blk_config blkcfg;
95     VHostUserBlk *s = VHOST_USER_BLK(dev->vdev);
96     Error *local_err = NULL;
97 
98     ret = vhost_dev_get_config(dev, (uint8_t *)&blkcfg,
99                                sizeof(struct virtio_blk_config),
100                                &local_err);
101     if (ret < 0) {
102         error_report_err(local_err);
103         return -1;
104     }
105 
106     /* valid for resize only */
107     if (blkcfg.capacity != s->blkcfg.capacity) {
108         s->blkcfg.capacity = blkcfg.capacity;
109         memcpy(dev->vdev->config, &s->blkcfg, sizeof(struct virtio_blk_config));
110         virtio_notify_config(dev->vdev);
111     }
112 
113     return 0;
114 }
115 
116 const VhostDevConfigOps blk_ops = {
117     .vhost_dev_config_notifier = vhost_user_blk_handle_config_change,
118 };
119 
120 static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp)
121 {
122     VHostUserBlk *s = VHOST_USER_BLK(vdev);
123     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
124     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
125     int i, ret;
126 
127     if (!k->set_guest_notifiers) {
128         error_setg(errp, "binding does not support guest notifiers");
129         return -ENOSYS;
130     }
131 
132     ret = vhost_dev_enable_notifiers(&s->dev, vdev);
133     if (ret < 0) {
134         error_setg_errno(errp, -ret, "Error enabling host notifiers");
135         return ret;
136     }
137 
138     ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, true);
139     if (ret < 0) {
140         error_setg_errno(errp, -ret, "Error binding guest notifier");
141         goto err_host_notifiers;
142     }
143 
144     s->dev.acked_features = vdev->guest_features;
145 
146     ret = vhost_dev_prepare_inflight(&s->dev, vdev);
147     if (ret < 0) {
148         error_setg_errno(errp, -ret, "Error setting inflight format");
149         goto err_guest_notifiers;
150     }
151 
152     if (!s->inflight->addr) {
153         ret = vhost_dev_get_inflight(&s->dev, s->queue_size, s->inflight);
154         if (ret < 0) {
155             error_setg_errno(errp, -ret, "Error getting inflight");
156             goto err_guest_notifiers;
157         }
158     }
159 
160     ret = vhost_dev_set_inflight(&s->dev, s->inflight);
161     if (ret < 0) {
162         error_setg_errno(errp, -ret, "Error setting inflight");
163         goto err_guest_notifiers;
164     }
165 
166     ret = vhost_dev_start(&s->dev, vdev);
167     if (ret < 0) {
168         error_setg_errno(errp, -ret, "Error starting vhost");
169         goto err_guest_notifiers;
170     }
171     s->started_vu = true;
172 
173     /* guest_notifier_mask/pending not used yet, so just unmask
174      * everything here. virtio-pci will do the right thing by
175      * enabling/disabling irqfd.
176      */
177     for (i = 0; i < s->dev.nvqs; i++) {
178         vhost_virtqueue_mask(&s->dev, vdev, i, false);
179     }
180 
181     return ret;
182 
183 err_guest_notifiers:
184     k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
185 err_host_notifiers:
186     vhost_dev_disable_notifiers(&s->dev, vdev);
187     return ret;
188 }
189 
190 static void vhost_user_blk_stop(VirtIODevice *vdev)
191 {
192     VHostUserBlk *s = VHOST_USER_BLK(vdev);
193     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
194     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
195     int ret;
196 
197     if (!s->started_vu) {
198         return;
199     }
200     s->started_vu = false;
201 
202     if (!k->set_guest_notifiers) {
203         return;
204     }
205 
206     vhost_dev_stop(&s->dev, vdev);
207 
208     ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false);
209     if (ret < 0) {
210         error_report("vhost guest notifier cleanup failed: %d", ret);
211         return;
212     }
213 
214     vhost_dev_disable_notifiers(&s->dev, vdev);
215 }
216 
217 static void vhost_user_blk_set_status(VirtIODevice *vdev, uint8_t status)
218 {
219     VHostUserBlk *s = VHOST_USER_BLK(vdev);
220     bool should_start = virtio_device_started(vdev, status);
221     Error *local_err = NULL;
222     int ret;
223 
224     if (!vdev->vm_running) {
225         should_start = false;
226     }
227 
228     if (!s->connected) {
229         return;
230     }
231 
232     if (s->dev.started == should_start) {
233         return;
234     }
235 
236     if (should_start) {
237         ret = vhost_user_blk_start(vdev, &local_err);
238         if (ret < 0) {
239             error_reportf_err(local_err, "vhost-user-blk: vhost start failed: ");
240             qemu_chr_fe_disconnect(&s->chardev);
241         }
242     } else {
243         vhost_user_blk_stop(vdev);
244     }
245 
246 }
247 
248 static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev,
249                                             uint64_t features,
250                                             Error **errp)
251 {
252     VHostUserBlk *s = VHOST_USER_BLK(vdev);
253 
254     /* Turn on pre-defined features */
255     virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
256     virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
257     virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
258     virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
259     virtio_add_feature(&features, VIRTIO_BLK_F_FLUSH);
260     virtio_add_feature(&features, VIRTIO_BLK_F_RO);
261     virtio_add_feature(&features, VIRTIO_BLK_F_DISCARD);
262     virtio_add_feature(&features, VIRTIO_BLK_F_WRITE_ZEROES);
263 
264     if (s->config_wce) {
265         virtio_add_feature(&features, VIRTIO_BLK_F_CONFIG_WCE);
266     }
267     if (s->num_queues > 1) {
268         virtio_add_feature(&features, VIRTIO_BLK_F_MQ);
269     }
270 
271     return vhost_get_features(&s->dev, user_feature_bits, features);
272 }
273 
274 static void vhost_user_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
275 {
276     VHostUserBlk *s = VHOST_USER_BLK(vdev);
277     Error *local_err = NULL;
278     int i, ret;
279 
280     if (!vdev->start_on_kick) {
281         return;
282     }
283 
284     if (!s->connected) {
285         return;
286     }
287 
288     if (s->dev.started) {
289         return;
290     }
291 
292     /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
293      * vhost here instead of waiting for .set_status().
294      */
295     ret = vhost_user_blk_start(vdev, &local_err);
296     if (ret < 0) {
297         error_reportf_err(local_err, "vhost-user-blk: vhost start failed: ");
298         qemu_chr_fe_disconnect(&s->chardev);
299         return;
300     }
301 
302     /* Kick right away to begin processing requests already in vring */
303     for (i = 0; i < s->dev.nvqs; i++) {
304         VirtQueue *kick_vq = virtio_get_queue(vdev, i);
305 
306         if (!virtio_queue_get_desc_addr(vdev, i)) {
307             continue;
308         }
309         event_notifier_set(virtio_queue_get_host_notifier(kick_vq));
310     }
311 }
312 
313 static void vhost_user_blk_reset(VirtIODevice *vdev)
314 {
315     VHostUserBlk *s = VHOST_USER_BLK(vdev);
316 
317     vhost_dev_free_inflight(s->inflight);
318 }
319 
320 static int vhost_user_blk_connect(DeviceState *dev, Error **errp)
321 {
322     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
323     VHostUserBlk *s = VHOST_USER_BLK(vdev);
324     int ret = 0;
325 
326     if (s->connected) {
327         return 0;
328     }
329     s->connected = true;
330 
331     s->dev.num_queues = s->num_queues;
332     s->dev.nvqs = s->num_queues;
333     s->dev.vqs = s->vhost_vqs;
334     s->dev.vq_index = 0;
335     s->dev.backend_features = 0;
336 
337     vhost_dev_set_config_notifier(&s->dev, &blk_ops);
338 
339     ret = vhost_dev_init(&s->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0,
340                          errp);
341     if (ret < 0) {
342         return ret;
343     }
344 
345     /* restore vhost state */
346     if (virtio_device_started(vdev, vdev->status)) {
347         ret = vhost_user_blk_start(vdev, errp);
348         if (ret < 0) {
349             return ret;
350         }
351     }
352 
353     return 0;
354 }
355 
356 static void vhost_user_blk_disconnect(DeviceState *dev)
357 {
358     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
359     VHostUserBlk *s = VHOST_USER_BLK(vdev);
360 
361     if (!s->connected) {
362         return;
363     }
364     s->connected = false;
365 
366     vhost_user_blk_stop(vdev);
367 
368     vhost_dev_cleanup(&s->dev);
369 }
370 
371 static void vhost_user_blk_chr_closed_bh(void *opaque)
372 {
373     DeviceState *dev = opaque;
374     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
375     VHostUserBlk *s = VHOST_USER_BLK(vdev);
376 
377     vhost_user_blk_disconnect(dev);
378     qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, vhost_user_blk_event,
379                              NULL, opaque, NULL, true);
380 }
381 
382 static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
383 {
384     DeviceState *dev = opaque;
385     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
386     VHostUserBlk *s = VHOST_USER_BLK(vdev);
387     Error *local_err = NULL;
388 
389     switch (event) {
390     case CHR_EVENT_OPENED:
391         if (vhost_user_blk_connect(dev, &local_err) < 0) {
392             error_report_err(local_err);
393             qemu_chr_fe_disconnect(&s->chardev);
394             return;
395         }
396         break;
397     case CHR_EVENT_CLOSED:
398         if (!runstate_check(RUN_STATE_SHUTDOWN)) {
399             /*
400              * A close event may happen during a read/write, but vhost
401              * code assumes the vhost_dev remains setup, so delay the
402              * stop & clear.
403              */
404             AioContext *ctx = qemu_get_current_aio_context();
405 
406             qemu_chr_fe_set_handlers(&s->chardev, NULL, NULL, NULL, NULL,
407                     NULL, NULL, false);
408             aio_bh_schedule_oneshot(ctx, vhost_user_blk_chr_closed_bh, opaque);
409 
410             /*
411              * Move vhost device to the stopped state. The vhost-user device
412              * will be clean up and disconnected in BH. This can be useful in
413              * the vhost migration code. If disconnect was caught there is an
414              * option for the general vhost code to get the dev state without
415              * knowing its type (in this case vhost-user).
416              */
417             s->dev.started = false;
418         }
419         break;
420     case CHR_EVENT_BREAK:
421     case CHR_EVENT_MUX_IN:
422     case CHR_EVENT_MUX_OUT:
423         /* Ignore */
424         break;
425     }
426 }
427 
428 static int vhost_user_blk_realize_connect(VHostUserBlk *s, Error **errp)
429 {
430     DeviceState *dev = &s->parent_obj.parent_obj;
431     int ret;
432 
433     s->connected = false;
434 
435     ret = qemu_chr_fe_wait_connected(&s->chardev, errp);
436     if (ret < 0) {
437         return ret;
438     }
439 
440     ret = vhost_user_blk_connect(dev, errp);
441     if (ret < 0) {
442         qemu_chr_fe_disconnect(&s->chardev);
443         return ret;
444     }
445     assert(s->connected);
446 
447     ret = vhost_dev_get_config(&s->dev, (uint8_t *)&s->blkcfg,
448                                sizeof(struct virtio_blk_config), errp);
449     if (ret < 0) {
450         qemu_chr_fe_disconnect(&s->chardev);
451         vhost_dev_cleanup(&s->dev);
452         return ret;
453     }
454 
455     return 0;
456 }
457 
458 static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
459 {
460     ERRP_GUARD();
461     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
462     VHostUserBlk *s = VHOST_USER_BLK(vdev);
463     int retries;
464     int i, ret;
465 
466     if (!s->chardev.chr) {
467         error_setg(errp, "chardev is mandatory");
468         return;
469     }
470 
471     if (s->num_queues == VHOST_USER_BLK_AUTO_NUM_QUEUES) {
472         s->num_queues = 1;
473     }
474     if (!s->num_queues || s->num_queues > VIRTIO_QUEUE_MAX) {
475         error_setg(errp, "invalid number of IO queues");
476         return;
477     }
478 
479     if (!s->queue_size) {
480         error_setg(errp, "queue size must be non-zero");
481         return;
482     }
483     if (s->queue_size > VIRTQUEUE_MAX_SIZE) {
484         error_setg(errp, "queue size must not exceed %d",
485                    VIRTQUEUE_MAX_SIZE);
486         return;
487     }
488 
489     if (!vhost_user_init(&s->vhost_user, &s->chardev, errp)) {
490         return;
491     }
492 
493     virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK,
494                 sizeof(struct virtio_blk_config));
495 
496     s->virtqs = g_new(VirtQueue *, s->num_queues);
497     for (i = 0; i < s->num_queues; i++) {
498         s->virtqs[i] = virtio_add_queue(vdev, s->queue_size,
499                                         vhost_user_blk_handle_output);
500     }
501 
502     s->inflight = g_new0(struct vhost_inflight, 1);
503     s->vhost_vqs = g_new0(struct vhost_virtqueue, s->num_queues);
504 
505     retries = REALIZE_CONNECTION_RETRIES;
506     assert(!*errp);
507     do {
508         if (*errp) {
509             error_prepend(errp, "Reconnecting after error: ");
510             error_report_err(*errp);
511             *errp = NULL;
512         }
513         ret = vhost_user_blk_realize_connect(s, errp);
514     } while (ret == -EPROTO && retries--);
515 
516     if (ret < 0) {
517         goto virtio_err;
518     }
519 
520     /* we're fully initialized, now we can operate, so add the handler */
521     qemu_chr_fe_set_handlers(&s->chardev,  NULL, NULL,
522                              vhost_user_blk_event, NULL, (void *)dev,
523                              NULL, true);
524     return;
525 
526 virtio_err:
527     g_free(s->vhost_vqs);
528     s->vhost_vqs = NULL;
529     g_free(s->inflight);
530     s->inflight = NULL;
531     for (i = 0; i < s->num_queues; i++) {
532         virtio_delete_queue(s->virtqs[i]);
533     }
534     g_free(s->virtqs);
535     virtio_cleanup(vdev);
536     vhost_user_cleanup(&s->vhost_user);
537 }
538 
539 static void vhost_user_blk_device_unrealize(DeviceState *dev)
540 {
541     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
542     VHostUserBlk *s = VHOST_USER_BLK(dev);
543     int i;
544 
545     virtio_set_status(vdev, 0);
546     qemu_chr_fe_set_handlers(&s->chardev,  NULL, NULL, NULL,
547                              NULL, NULL, NULL, false);
548     vhost_dev_cleanup(&s->dev);
549     vhost_dev_free_inflight(s->inflight);
550     g_free(s->vhost_vqs);
551     s->vhost_vqs = NULL;
552     g_free(s->inflight);
553     s->inflight = NULL;
554 
555     for (i = 0; i < s->num_queues; i++) {
556         virtio_delete_queue(s->virtqs[i]);
557     }
558     g_free(s->virtqs);
559     virtio_cleanup(vdev);
560     vhost_user_cleanup(&s->vhost_user);
561 }
562 
563 static void vhost_user_blk_instance_init(Object *obj)
564 {
565     VHostUserBlk *s = VHOST_USER_BLK(obj);
566 
567     device_add_bootindex_property(obj, &s->bootindex, "bootindex",
568                                   "/disk@0,0", DEVICE(obj));
569 }
570 
571 static const VMStateDescription vmstate_vhost_user_blk = {
572     .name = "vhost-user-blk",
573     .minimum_version_id = 1,
574     .version_id = 1,
575     .fields = (VMStateField[]) {
576         VMSTATE_VIRTIO_DEVICE,
577         VMSTATE_END_OF_LIST()
578     },
579 };
580 
581 static Property vhost_user_blk_properties[] = {
582     DEFINE_PROP_CHR("chardev", VHostUserBlk, chardev),
583     DEFINE_PROP_UINT16("num-queues", VHostUserBlk, num_queues,
584                        VHOST_USER_BLK_AUTO_NUM_QUEUES),
585     DEFINE_PROP_UINT32("queue-size", VHostUserBlk, queue_size, 128),
586     DEFINE_PROP_BIT("config-wce", VHostUserBlk, config_wce, 0, true),
587     DEFINE_PROP_END_OF_LIST(),
588 };
589 
590 static void vhost_user_blk_class_init(ObjectClass *klass, void *data)
591 {
592     DeviceClass *dc = DEVICE_CLASS(klass);
593     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
594 
595     device_class_set_props(dc, vhost_user_blk_properties);
596     dc->vmsd = &vmstate_vhost_user_blk;
597     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
598     vdc->realize = vhost_user_blk_device_realize;
599     vdc->unrealize = vhost_user_blk_device_unrealize;
600     vdc->get_config = vhost_user_blk_update_config;
601     vdc->set_config = vhost_user_blk_set_config;
602     vdc->get_features = vhost_user_blk_get_features;
603     vdc->set_status = vhost_user_blk_set_status;
604     vdc->reset = vhost_user_blk_reset;
605 }
606 
607 static const TypeInfo vhost_user_blk_info = {
608     .name = TYPE_VHOST_USER_BLK,
609     .parent = TYPE_VIRTIO_DEVICE,
610     .instance_size = sizeof(VHostUserBlk),
611     .instance_init = vhost_user_blk_instance_init,
612     .class_init = vhost_user_blk_class_init,
613 };
614 
615 static void virtio_register_types(void)
616 {
617     type_register_static(&vhost_user_blk_info);
618 }
619 
620 type_init(virtio_register_types)
621