1 #ifndef VHOST_H 2 #define VHOST_H 3 4 #include "net/vhost_net.h" 5 #include "hw/virtio/vhost-backend.h" 6 #include "hw/virtio/virtio.h" 7 #include "system/memory.h" 8 9 #define VHOST_F_DEVICE_IOTLB 63 10 #define VHOST_USER_F_PROTOCOL_FEATURES 30 11 12 #define VU_REALIZE_CONN_RETRIES 3 13 14 /* Generic structures common for any vhost based device. */ 15 16 struct vhost_inflight { 17 int fd; 18 void *addr; 19 uint64_t size; 20 uint64_t offset; 21 uint16_t queue_size; 22 }; 23 24 struct vhost_virtqueue { 25 int kick; 26 int call; 27 void *desc; 28 void *avail; 29 void *used; 30 int num; 31 unsigned long long desc_phys; 32 unsigned desc_size; 33 unsigned long long avail_phys; 34 unsigned avail_size; 35 unsigned long long used_phys; 36 unsigned used_size; 37 EventNotifier masked_notifier; 38 EventNotifier error_notifier; 39 EventNotifier masked_config_notifier; 40 struct vhost_dev *dev; 41 }; 42 43 typedef unsigned long vhost_log_chunk_t; 44 #define VHOST_LOG_PAGE 0x1000 45 #define VHOST_LOG_BITS (8 * sizeof(vhost_log_chunk_t)) 46 #define VHOST_LOG_CHUNK (VHOST_LOG_PAGE * VHOST_LOG_BITS) 47 #define VHOST_INVALID_FEATURE_BIT (0xff) 48 #define VHOST_QUEUE_NUM_CONFIG_INR 0 49 50 struct vhost_log { 51 unsigned long long size; 52 int refcnt; 53 int fd; 54 vhost_log_chunk_t *log; 55 }; 56 57 struct vhost_dev; 58 struct vhost_iommu { 59 struct vhost_dev *hdev; 60 MemoryRegion *mr; 61 hwaddr iommu_offset; 62 IOMMUNotifier n; 63 QLIST_ENTRY(vhost_iommu) iommu_next; 64 }; 65 66 typedef struct VhostDevConfigOps { 67 /* Vhost device config space changed callback 68 */ 69 int (*vhost_dev_config_notifier)(struct vhost_dev *dev); 70 } VhostDevConfigOps; 71 72 struct vhost_memory; 73 74 /** 75 * struct vhost_dev - common vhost_dev structure 76 * @vhost_ops: backend specific ops 77 * @config_ops: ops for config changes (see @vhost_dev_set_config_notifier) 78 */ 79 struct vhost_dev { 80 VirtIODevice *vdev; 81 MemoryListener memory_listener; 82 MemoryListener iommu_listener; 83 struct vhost_memory *mem; 84 int n_mem_sections; 85 MemoryRegionSection *mem_sections; 86 int n_tmp_sections; 87 MemoryRegionSection *tmp_sections; 88 struct vhost_virtqueue *vqs; 89 unsigned int nvqs; 90 /* the first virtqueue which would be used by this vhost dev */ 91 int vq_index; 92 /* one past the last vq index for the virtio device (not vhost) */ 93 int vq_index_end; 94 /* if non-zero, minimum required value for max_queues */ 95 int num_queues; 96 /** 97 * vhost feature handling requires matching the feature set 98 * offered by a backend which may be a subset of the total 99 * features eventually offered to the guest. 100 * 101 * @features: available features provided by the backend 102 * @acked_features: final negotiated features with front-end driver 103 * 104 * @backend_features: this is used in a couple of places to either 105 * store VHOST_USER_F_PROTOCOL_FEATURES to apply to 106 * VHOST_USER_SET_FEATURES or VHOST_NET_F_VIRTIO_NET_HDR. Its 107 * future use should be discouraged and the variable retired as 108 * its easy to confuse with the VirtIO backend_features. 109 */ 110 uint64_t features; 111 uint64_t acked_features; 112 uint64_t backend_features; 113 114 /** 115 * @protocol_features: is the vhost-user only feature set by 116 * VHOST_USER_SET_PROTOCOL_FEATURES. Protocol features are only 117 * negotiated if VHOST_USER_F_PROTOCOL_FEATURES has been offered 118 * by the backend (see @features). 119 */ 120 uint64_t protocol_features; 121 122 uint64_t max_queues; 123 uint64_t backend_cap; 124 /* @started: is the vhost device started? */ 125 bool started; 126 bool log_enabled; 127 uint64_t log_size; 128 Error *migration_blocker; 129 const VhostOps *vhost_ops; 130 void *opaque; 131 struct vhost_log *log; 132 QLIST_ENTRY(vhost_dev) entry; 133 QLIST_ENTRY(vhost_dev) logdev_entry; 134 QLIST_HEAD(, vhost_iommu) iommu_list; 135 IOMMUNotifier n; 136 const VhostDevConfigOps *config_ops; 137 }; 138 139 extern const VhostOps kernel_ops; 140 extern const VhostOps user_ops; 141 extern const VhostOps vdpa_ops; 142 143 struct vhost_net { 144 struct vhost_dev dev; 145 struct vhost_virtqueue vqs[2]; 146 int backend; 147 const int *feature_bits; 148 int max_tx_queue_size; 149 SaveAcketFeatures *save_acked_features; 150 NetClientState *nc; 151 }; 152 153 /** 154 * vhost_dev_init() - initialise the vhost interface 155 * @hdev: the common vhost_dev structure 156 * @opaque: opaque ptr passed to backend (vhost/vhost-user/vdpa) 157 * @backend_type: type of backend 158 * @busyloop_timeout: timeout for polling virtqueue 159 * @errp: error handle 160 * 161 * The initialisation of the vhost device will trigger the 162 * initialisation of the backend and potentially capability 163 * negotiation of backend interface. Configuration of the VirtIO 164 * itself won't happen until the interface is started. 165 * 166 * Return: 0 on success, non-zero on error while setting errp. 167 */ 168 int vhost_dev_init(struct vhost_dev *hdev, void *opaque, 169 VhostBackendType backend_type, 170 uint32_t busyloop_timeout, Error **errp); 171 172 /** 173 * vhost_dev_cleanup() - tear down and cleanup vhost interface 174 * @hdev: the common vhost_dev structure 175 */ 176 void vhost_dev_cleanup(struct vhost_dev *hdev); 177 178 void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev, 179 VirtIODevice *vdev, 180 unsigned int nvqs); 181 182 /** 183 * vhost_dev_enable_notifiers() - enable event notifiers 184 * @hdev: common vhost_dev structure 185 * @vdev: the VirtIODevice structure 186 * 187 * Enable notifications directly to the vhost device rather than being 188 * triggered by QEMU itself. Notifications should be enabled before 189 * the vhost device is started via @vhost_dev_start. 190 * 191 * Return: 0 on success, < 0 on error. 192 */ 193 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev); 194 195 /** 196 * vhost_dev_disable_notifiers - disable event notifications 197 * @hdev: common vhost_dev structure 198 * @vdev: the VirtIODevice structure 199 * 200 * Disable direct notifications to vhost device. 201 */ 202 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev); 203 bool vhost_config_pending(struct vhost_dev *hdev); 204 void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask); 205 206 /** 207 * vhost_dev_is_started() - report status of vhost device 208 * @hdev: common vhost_dev structure 209 * 210 * Return the started status of the vhost device 211 */ 212 static inline bool vhost_dev_is_started(struct vhost_dev *hdev) 213 { 214 return hdev->started; 215 } 216 217 /** 218 * vhost_dev_start() - start the vhost device 219 * @hdev: common vhost_dev structure 220 * @vdev: the VirtIODevice structure 221 * @vrings: true to have vrings enabled in this call 222 * 223 * Starts the vhost device. From this point VirtIO feature negotiation 224 * can start and the device can start processing VirtIO transactions. 225 * 226 * Return: 0 on success, < 0 on error. 227 */ 228 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings); 229 230 /** 231 * vhost_dev_stop() - stop the vhost device 232 * @hdev: common vhost_dev structure 233 * @vdev: the VirtIODevice structure 234 * @vrings: true to have vrings disabled in this call 235 * 236 * Stop the vhost device. After the device is stopped the notifiers 237 * can be disabled (@vhost_dev_disable_notifiers) and the device can 238 * be torn down (@vhost_dev_cleanup). 239 * 240 * Return: 0 on success, != 0 on error when stopping dev. 241 */ 242 int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings); 243 244 /** 245 * DOC: vhost device configuration handling 246 * 247 * The VirtIO device configuration space is used for rarely changing 248 * or initialisation time parameters. The configuration can be updated 249 * by either the guest driver or the device itself. If the device can 250 * change the configuration over time the vhost handler should 251 * register a @VhostDevConfigOps structure with 252 * @vhost_dev_set_config_notifier so the guest can be notified. Some 253 * devices register a handler anyway and will signal an error if an 254 * unexpected config change happens. 255 */ 256 257 /** 258 * vhost_dev_get_config() - fetch device configuration 259 * @hdev: common vhost_dev_structure 260 * @config: pointer to device appropriate config structure 261 * @config_len: size of device appropriate config structure 262 * 263 * Return: 0 on success, < 0 on error while setting errp 264 */ 265 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, 266 uint32_t config_len, Error **errp); 267 268 /** 269 * vhost_dev_set_config() - set device configuration 270 * @hdev: common vhost_dev_structure 271 * @data: pointer to data to set 272 * @offset: offset into configuration space 273 * @size: length of set 274 * @flags: @VhostSetConfigType flags 275 * 276 * By use of @offset/@size a subset of the configuration space can be 277 * written to. The @flags are used to indicate if it is a normal 278 * transaction or related to migration. 279 * 280 * Return: 0 on success, non-zero on error 281 */ 282 int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data, 283 uint32_t offset, uint32_t size, uint32_t flags); 284 285 /** 286 * vhost_dev_set_config_notifier() - register VhostDevConfigOps 287 * @hdev: common vhost_dev_structure 288 * @ops: notifier ops 289 * 290 * If the device is expected to change configuration a notifier can be 291 * setup to handle the case. 292 */ 293 void vhost_dev_set_config_notifier(struct vhost_dev *dev, 294 const VhostDevConfigOps *ops); 295 296 297 /* Test and clear masked event pending status. 298 * Should be called after unmask to avoid losing events. 299 */ 300 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n); 301 302 /* Mask/unmask events from this vq. 303 */ 304 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, 305 bool mask); 306 307 /** 308 * vhost_get_features() - return a sanitised set of feature bits 309 * @hdev: common vhost_dev structure 310 * @feature_bits: pointer to terminated table of feature bits 311 * @features: original feature set 312 * 313 * This returns a set of features bits that is an intersection of what 314 * is supported by the vhost backend (hdev->features), the supported 315 * feature_bits and the requested feature set. 316 */ 317 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, 318 uint64_t features); 319 320 /** 321 * vhost_ack_features() - set vhost acked_features 322 * @hdev: common vhost_dev structure 323 * @feature_bits: pointer to terminated table of feature bits 324 * @features: requested feature set 325 * 326 * This sets the internal hdev->acked_features to the intersection of 327 * the backends advertised features and the supported feature_bits. 328 */ 329 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, 330 uint64_t features); 331 unsigned int vhost_get_max_memslots(void); 332 unsigned int vhost_get_free_memslots(void); 333 334 int vhost_net_set_backend(struct vhost_dev *hdev, 335 struct vhost_vring_file *file); 336 337 void vhost_toggle_device_iotlb(VirtIODevice *vdev); 338 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write); 339 340 int vhost_virtqueue_start(struct vhost_dev *dev, struct VirtIODevice *vdev, 341 struct vhost_virtqueue *vq, unsigned idx); 342 int vhost_virtqueue_stop(struct vhost_dev *dev, struct VirtIODevice *vdev, 343 struct vhost_virtqueue *vq, unsigned idx); 344 345 void vhost_dev_reset_inflight(struct vhost_inflight *inflight); 346 void vhost_dev_free_inflight(struct vhost_inflight *inflight); 347 int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev); 348 int vhost_dev_set_inflight(struct vhost_dev *dev, 349 struct vhost_inflight *inflight); 350 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, 351 struct vhost_inflight *inflight); 352 bool vhost_dev_has_iommu(struct vhost_dev *dev); 353 354 #ifdef CONFIG_VHOST 355 int vhost_reset_device(struct vhost_dev *hdev); 356 #else 357 static inline int vhost_reset_device(struct vhost_dev *hdev) 358 { 359 return -ENOSYS; 360 } 361 #endif /* CONFIG_VHOST */ 362 363 /** 364 * vhost_supports_device_state(): Checks whether the back-end supports 365 * transferring internal device state for the purpose of migration. 366 * Support for this feature is required for vhost_set_device_state_fd() 367 * and vhost_check_device_state(). 368 * 369 * @dev: The vhost device 370 * 371 * Returns true if the device supports these commands, and false if it 372 * does not. 373 */ 374 #ifdef CONFIG_VHOST 375 bool vhost_supports_device_state(struct vhost_dev *dev); 376 #else 377 static inline bool vhost_supports_device_state(struct vhost_dev *dev) 378 { 379 return false; 380 } 381 #endif 382 383 /** 384 * vhost_set_device_state_fd(): Begin transfer of internal state from/to 385 * the back-end for the purpose of migration. Data is to be transferred 386 * over a pipe according to @direction and @phase. The sending end must 387 * only write to the pipe, and the receiving end must only read from it. 388 * Once the sending end is done, it closes its FD. The receiving end 389 * must take this as the end-of-transfer signal and close its FD, too. 390 * 391 * @fd is the back-end's end of the pipe: The write FD for SAVE, and the 392 * read FD for LOAD. This function transfers ownership of @fd to the 393 * back-end, i.e. closes it in the front-end. 394 * 395 * The back-end may optionally reply with an FD of its own, if this 396 * improves efficiency on its end. In this case, the returned FD is 397 * stored in *reply_fd. The back-end will discard the FD sent to it, 398 * and the front-end must use *reply_fd for transferring state to/from 399 * the back-end. 400 * 401 * @dev: The vhost device 402 * @direction: The direction in which the state is to be transferred. 403 * For outgoing migrations, this is SAVE, and data is read 404 * from the back-end and stored by the front-end in the 405 * migration stream. 406 * For incoming migrations, this is LOAD, and data is read 407 * by the front-end from the migration stream and sent to 408 * the back-end to restore the saved state. 409 * @phase: Which migration phase we are in. Currently, there is only 410 * STOPPED (device and all vrings are stopped), in the future, 411 * more phases such as PRE_COPY or POST_COPY may be added. 412 * @fd: Back-end's end of the pipe through which to transfer state; note 413 * that ownership is transferred to the back-end, so this function 414 * closes @fd in the front-end. 415 * @reply_fd: If the back-end wishes to use a different pipe for state 416 * transfer, this will contain an FD for the front-end to 417 * use. Otherwise, -1 is stored here. 418 * @errp: Potential error description 419 * 420 * Returns 0 on success, and -errno on failure. 421 */ 422 int vhost_set_device_state_fd(struct vhost_dev *dev, 423 VhostDeviceStateDirection direction, 424 VhostDeviceStatePhase phase, 425 int fd, 426 int *reply_fd, 427 Error **errp); 428 429 /** 430 * vhost_set_device_state_fd(): After transferring state from/to the 431 * back-end via vhost_set_device_state_fd(), i.e. once the sending end 432 * has closed the pipe, inquire the back-end to report any potential 433 * errors that have occurred on its side. This allows to sense errors 434 * like: 435 * - During outgoing migration, when the source side had already started 436 * to produce its state, something went wrong and it failed to finish 437 * - During incoming migration, when the received state is somehow 438 * invalid and cannot be processed by the back-end 439 * 440 * @dev: The vhost device 441 * @errp: Potential error description 442 * 443 * Returns 0 when the back-end reports successful state transfer and 444 * processing, and -errno when an error occurred somewhere. 445 */ 446 int vhost_check_device_state(struct vhost_dev *dev, Error **errp); 447 448 /** 449 * vhost_save_backend_state(): High-level function to receive a vhost 450 * back-end's state, and save it in @f. Uses 451 * `vhost_set_device_state_fd()` to get the data from the back-end, and 452 * stores it in consecutive chunks that are each prefixed by their 453 * respective length (be32). The end is marked by a 0-length chunk. 454 * 455 * Must only be called while the device and all its vrings are stopped 456 * (`VHOST_TRANSFER_STATE_PHASE_STOPPED`). 457 * 458 * @dev: The vhost device from which to save the state 459 * @f: Migration stream in which to save the state 460 * @errp: Potential error message 461 * 462 * Returns 0 on success, and -errno otherwise. 463 */ 464 #ifdef CONFIG_VHOST 465 int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp); 466 #else 467 static inline int vhost_save_backend_state(struct vhost_dev *dev, QEMUFile *f, 468 Error **errp) 469 { 470 return -ENOSYS; 471 } 472 #endif 473 474 /** 475 * vhost_load_backend_state(): High-level function to load a vhost 476 * back-end's state from @f, and send it over to the back-end. Reads 477 * the data from @f in the format used by `vhost_save_state()`, and uses 478 * `vhost_set_device_state_fd()` to transfer it to the back-end. 479 * 480 * Must only be called while the device and all its vrings are stopped 481 * (`VHOST_TRANSFER_STATE_PHASE_STOPPED`). 482 * 483 * @dev: The vhost device to which to send the state 484 * @f: Migration stream from which to load the state 485 * @errp: Potential error message 486 * 487 * Returns 0 on success, and -errno otherwise. 488 */ 489 #ifdef CONFIG_VHOST 490 int vhost_load_backend_state(struct vhost_dev *dev, QEMUFile *f, Error **errp); 491 #else 492 static inline int vhost_load_backend_state(struct vhost_dev *dev, QEMUFile *f, 493 Error **errp) 494 { 495 return -ENOSYS; 496 } 497 #endif 498 499 #endif 500