1 /* 2 * Vhost-user filesystem virtio device 3 * 4 * Copyright 2018-2019 Red Hat, Inc. 5 * 6 * Authors: 7 * Stefan Hajnoczi <stefanha@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or 10 * (at your option) any later version. See the COPYING file in the 11 * top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include <sys/ioctl.h> 16 #include "standard-headers/linux/virtio_fs.h" 17 #include "qapi/error.h" 18 #include "hw/qdev-properties.h" 19 #include "hw/qdev-properties-system.h" 20 #include "hw/virtio/virtio-bus.h" 21 #include "hw/virtio/virtio-access.h" 22 #include "qemu/error-report.h" 23 #include "hw/virtio/vhost.h" 24 #include "hw/virtio/vhost-user-fs.h" 25 #include "monitor/monitor.h" 26 #include "sysemu/sysemu.h" 27 28 static const int user_feature_bits[] = { 29 VIRTIO_F_VERSION_1, 30 VIRTIO_RING_F_INDIRECT_DESC, 31 VIRTIO_RING_F_EVENT_IDX, 32 VIRTIO_F_NOTIFY_ON_EMPTY, 33 VIRTIO_F_RING_PACKED, 34 VIRTIO_F_IOMMU_PLATFORM, 35 VIRTIO_F_RING_RESET, 36 37 VHOST_INVALID_FEATURE_BIT 38 }; 39 40 static void vuf_get_config(VirtIODevice *vdev, uint8_t *config) 41 { 42 VHostUserFS *fs = VHOST_USER_FS(vdev); 43 struct virtio_fs_config fscfg = {}; 44 45 memcpy((char *)fscfg.tag, fs->conf.tag, 46 MIN(strlen(fs->conf.tag) + 1, sizeof(fscfg.tag))); 47 48 virtio_stl_p(vdev, &fscfg.num_request_queues, fs->conf.num_request_queues); 49 50 memcpy(config, &fscfg, sizeof(fscfg)); 51 } 52 53 static void vuf_start(VirtIODevice *vdev) 54 { 55 VHostUserFS *fs = VHOST_USER_FS(vdev); 56 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 57 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 58 int ret; 59 int i; 60 61 if (!k->set_guest_notifiers) { 62 error_report("binding does not support guest notifiers"); 63 return; 64 } 65 66 ret = vhost_dev_enable_notifiers(&fs->vhost_dev, vdev); 67 if (ret < 0) { 68 error_report("Error enabling host notifiers: %d", -ret); 69 return; 70 } 71 72 ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, true); 73 if (ret < 0) { 74 error_report("Error binding guest notifier: %d", -ret); 75 goto err_host_notifiers; 76 } 77 78 fs->vhost_dev.acked_features = vdev->guest_features; 79 ret = vhost_dev_start(&fs->vhost_dev, vdev, true); 80 if (ret < 0) { 81 error_report("Error starting vhost: %d", -ret); 82 goto err_guest_notifiers; 83 } 84 85 /* 86 * guest_notifier_mask/pending not used yet, so just unmask 87 * everything here. virtio-pci will do the right thing by 88 * enabling/disabling irqfd. 89 */ 90 for (i = 0; i < fs->vhost_dev.nvqs; i++) { 91 vhost_virtqueue_mask(&fs->vhost_dev, vdev, i, false); 92 } 93 94 return; 95 96 err_guest_notifiers: 97 k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false); 98 err_host_notifiers: 99 vhost_dev_disable_notifiers(&fs->vhost_dev, vdev); 100 } 101 102 static void vuf_stop(VirtIODevice *vdev) 103 { 104 VHostUserFS *fs = VHOST_USER_FS(vdev); 105 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 106 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); 107 int ret; 108 109 if (!k->set_guest_notifiers) { 110 return; 111 } 112 113 vhost_dev_stop(&fs->vhost_dev, vdev, true); 114 115 ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false); 116 if (ret < 0) { 117 error_report("vhost guest notifier cleanup failed: %d", ret); 118 return; 119 } 120 121 vhost_dev_disable_notifiers(&fs->vhost_dev, vdev); 122 } 123 124 static void vuf_set_status(VirtIODevice *vdev, uint8_t status) 125 { 126 VHostUserFS *fs = VHOST_USER_FS(vdev); 127 bool should_start = virtio_device_should_start(vdev, status); 128 129 if (vhost_dev_is_started(&fs->vhost_dev) == should_start) { 130 return; 131 } 132 133 if (should_start) { 134 vuf_start(vdev); 135 } else { 136 vuf_stop(vdev); 137 } 138 } 139 140 static uint64_t vuf_get_features(VirtIODevice *vdev, 141 uint64_t features, 142 Error **errp) 143 { 144 VHostUserFS *fs = VHOST_USER_FS(vdev); 145 146 return vhost_get_features(&fs->vhost_dev, user_feature_bits, features); 147 } 148 149 static void vuf_handle_output(VirtIODevice *vdev, VirtQueue *vq) 150 { 151 /* 152 * Not normally called; it's the daemon that handles the queue; 153 * however virtio's cleanup path can call this. 154 */ 155 } 156 157 static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx, 158 bool mask) 159 { 160 VHostUserFS *fs = VHOST_USER_FS(vdev); 161 162 /* 163 * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1 164 * as the macro of configure interrupt's IDX, If this driver does not 165 * support, the function will return 166 */ 167 168 if (idx == VIRTIO_CONFIG_IRQ_IDX) { 169 return; 170 } 171 vhost_virtqueue_mask(&fs->vhost_dev, vdev, idx, mask); 172 } 173 174 static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx) 175 { 176 VHostUserFS *fs = VHOST_USER_FS(vdev); 177 178 /* 179 * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1 180 * as the macro of configure interrupt's IDX, If this driver does not 181 * support, the function will return 182 */ 183 184 if (idx == VIRTIO_CONFIG_IRQ_IDX) { 185 return false; 186 } 187 return vhost_virtqueue_pending(&fs->vhost_dev, idx); 188 } 189 190 static void vuf_device_realize(DeviceState *dev, Error **errp) 191 { 192 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 193 VHostUserFS *fs = VHOST_USER_FS(dev); 194 unsigned int i; 195 size_t len; 196 int ret; 197 198 if (!fs->conf.chardev.chr) { 199 error_setg(errp, "missing chardev"); 200 return; 201 } 202 203 if (!fs->conf.tag) { 204 error_setg(errp, "missing tag property"); 205 return; 206 } 207 len = strlen(fs->conf.tag); 208 if (len == 0) { 209 error_setg(errp, "tag property cannot be empty"); 210 return; 211 } 212 if (len > sizeof_field(struct virtio_fs_config, tag)) { 213 error_setg(errp, "tag property must be %zu bytes or less", 214 sizeof_field(struct virtio_fs_config, tag)); 215 return; 216 } 217 218 if (fs->conf.num_request_queues == 0) { 219 error_setg(errp, "num-request-queues property must be larger than 0"); 220 return; 221 } 222 223 if (!is_power_of_2(fs->conf.queue_size)) { 224 error_setg(errp, "queue-size property must be a power of 2"); 225 return; 226 } 227 228 if (fs->conf.queue_size > VIRTQUEUE_MAX_SIZE) { 229 error_setg(errp, "queue-size property must be %u or smaller", 230 VIRTQUEUE_MAX_SIZE); 231 return; 232 } 233 234 if (!vhost_user_init(&fs->vhost_user, &fs->conf.chardev, errp)) { 235 return; 236 } 237 238 virtio_init(vdev, VIRTIO_ID_FS, sizeof(struct virtio_fs_config)); 239 240 /* Hiprio queue */ 241 fs->hiprio_vq = virtio_add_queue(vdev, fs->conf.queue_size, vuf_handle_output); 242 243 /* Request queues */ 244 fs->req_vqs = g_new(VirtQueue *, fs->conf.num_request_queues); 245 for (i = 0; i < fs->conf.num_request_queues; i++) { 246 fs->req_vqs[i] = virtio_add_queue(vdev, fs->conf.queue_size, vuf_handle_output); 247 } 248 249 /* 1 high prio queue, plus the number configured */ 250 fs->vhost_dev.nvqs = 1 + fs->conf.num_request_queues; 251 fs->vhost_dev.vqs = g_new0(struct vhost_virtqueue, fs->vhost_dev.nvqs); 252 ret = vhost_dev_init(&fs->vhost_dev, &fs->vhost_user, 253 VHOST_BACKEND_TYPE_USER, 0, errp); 254 if (ret < 0) { 255 goto err_virtio; 256 } 257 258 return; 259 260 err_virtio: 261 vhost_user_cleanup(&fs->vhost_user); 262 virtio_delete_queue(fs->hiprio_vq); 263 for (i = 0; i < fs->conf.num_request_queues; i++) { 264 virtio_delete_queue(fs->req_vqs[i]); 265 } 266 g_free(fs->req_vqs); 267 virtio_cleanup(vdev); 268 g_free(fs->vhost_dev.vqs); 269 return; 270 } 271 272 static void vuf_device_unrealize(DeviceState *dev) 273 { 274 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 275 VHostUserFS *fs = VHOST_USER_FS(dev); 276 struct vhost_virtqueue *vhost_vqs = fs->vhost_dev.vqs; 277 int i; 278 279 /* This will stop vhost backend if appropriate. */ 280 vuf_set_status(vdev, 0); 281 282 vhost_dev_cleanup(&fs->vhost_dev); 283 284 vhost_user_cleanup(&fs->vhost_user); 285 286 virtio_delete_queue(fs->hiprio_vq); 287 for (i = 0; i < fs->conf.num_request_queues; i++) { 288 virtio_delete_queue(fs->req_vqs[i]); 289 } 290 g_free(fs->req_vqs); 291 virtio_cleanup(vdev); 292 g_free(vhost_vqs); 293 } 294 295 static struct vhost_dev *vuf_get_vhost(VirtIODevice *vdev) 296 { 297 VHostUserFS *fs = VHOST_USER_FS(vdev); 298 return &fs->vhost_dev; 299 } 300 301 /** 302 * Fetch the internal state from virtiofsd and save it to `f`. 303 */ 304 static int vuf_save_state(QEMUFile *f, void *pv, size_t size, 305 const VMStateField *field, JSONWriter *vmdesc) 306 { 307 VirtIODevice *vdev = pv; 308 VHostUserFS *fs = VHOST_USER_FS(vdev); 309 Error *local_error = NULL; 310 int ret; 311 312 ret = vhost_save_backend_state(&fs->vhost_dev, f, &local_error); 313 if (ret < 0) { 314 error_reportf_err(local_error, 315 "Error saving back-end state of %s device %s " 316 "(tag: \"%s\"): ", 317 vdev->name, vdev->parent_obj.canonical_path, 318 fs->conf.tag ?: "<none>"); 319 return ret; 320 } 321 322 return 0; 323 } 324 325 /** 326 * Load virtiofsd's internal state from `f` and send it over to virtiofsd. 327 */ 328 static int vuf_load_state(QEMUFile *f, void *pv, size_t size, 329 const VMStateField *field) 330 { 331 VirtIODevice *vdev = pv; 332 VHostUserFS *fs = VHOST_USER_FS(vdev); 333 Error *local_error = NULL; 334 int ret; 335 336 ret = vhost_load_backend_state(&fs->vhost_dev, f, &local_error); 337 if (ret < 0) { 338 error_reportf_err(local_error, 339 "Error loading back-end state of %s device %s " 340 "(tag: \"%s\"): ", 341 vdev->name, vdev->parent_obj.canonical_path, 342 fs->conf.tag ?: "<none>"); 343 return ret; 344 } 345 346 return 0; 347 } 348 349 static bool vuf_is_internal_migration(void *opaque) 350 { 351 /* TODO: Return false when an external migration is requested */ 352 return true; 353 } 354 355 static int vuf_check_migration_support(void *opaque) 356 { 357 VirtIODevice *vdev = opaque; 358 VHostUserFS *fs = VHOST_USER_FS(vdev); 359 360 if (!vhost_supports_device_state(&fs->vhost_dev)) { 361 error_report("Back-end of %s device %s (tag: \"%s\") does not support " 362 "migration through qemu", 363 vdev->name, vdev->parent_obj.canonical_path, 364 fs->conf.tag ?: "<none>"); 365 return -ENOTSUP; 366 } 367 368 return 0; 369 } 370 371 static const VMStateDescription vuf_backend_vmstate; 372 373 static const VMStateDescription vuf_vmstate = { 374 .name = "vhost-user-fs", 375 .version_id = 0, 376 .fields = (const VMStateField[]) { 377 VMSTATE_VIRTIO_DEVICE, 378 VMSTATE_END_OF_LIST() 379 }, 380 .subsections = (const VMStateDescription * const []) { 381 &vuf_backend_vmstate, 382 NULL, 383 } 384 }; 385 386 static const VMStateDescription vuf_backend_vmstate = { 387 .name = "vhost-user-fs-backend", 388 .version_id = 0, 389 .needed = vuf_is_internal_migration, 390 .pre_load = vuf_check_migration_support, 391 .pre_save = vuf_check_migration_support, 392 .fields = (const VMStateField[]) { 393 { 394 .name = "back-end", 395 .info = &(const VMStateInfo) { 396 .name = "virtio-fs back-end state", 397 .get = vuf_load_state, 398 .put = vuf_save_state, 399 }, 400 }, 401 VMSTATE_END_OF_LIST() 402 }, 403 }; 404 405 static Property vuf_properties[] = { 406 DEFINE_PROP_CHR("chardev", VHostUserFS, conf.chardev), 407 DEFINE_PROP_STRING("tag", VHostUserFS, conf.tag), 408 DEFINE_PROP_UINT16("num-request-queues", VHostUserFS, 409 conf.num_request_queues, 1), 410 DEFINE_PROP_UINT16("queue-size", VHostUserFS, conf.queue_size, 128), 411 DEFINE_PROP_END_OF_LIST(), 412 }; 413 414 static void vuf_instance_init(Object *obj) 415 { 416 VHostUserFS *fs = VHOST_USER_FS(obj); 417 418 device_add_bootindex_property(obj, &fs->bootindex, "bootindex", 419 "/filesystem@0", DEVICE(obj)); 420 } 421 422 static void vuf_class_init(ObjectClass *klass, void *data) 423 { 424 DeviceClass *dc = DEVICE_CLASS(klass); 425 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 426 427 device_class_set_props(dc, vuf_properties); 428 dc->vmsd = &vuf_vmstate; 429 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); 430 vdc->realize = vuf_device_realize; 431 vdc->unrealize = vuf_device_unrealize; 432 vdc->get_features = vuf_get_features; 433 vdc->get_config = vuf_get_config; 434 vdc->set_status = vuf_set_status; 435 vdc->guest_notifier_mask = vuf_guest_notifier_mask; 436 vdc->guest_notifier_pending = vuf_guest_notifier_pending; 437 vdc->get_vhost = vuf_get_vhost; 438 } 439 440 static const TypeInfo vuf_info = { 441 .name = TYPE_VHOST_USER_FS, 442 .parent = TYPE_VIRTIO_DEVICE, 443 .instance_size = sizeof(VHostUserFS), 444 .instance_init = vuf_instance_init, 445 .class_init = vuf_class_init, 446 }; 447 448 static void vuf_register_types(void) 449 { 450 type_register_static(&vuf_info); 451 } 452 453 type_init(vuf_register_types) 454