1 #ifndef VHOST_H 2 #define VHOST_H 3 4 #include "hw/virtio/vhost-backend.h" 5 #include "hw/virtio/virtio.h" 6 #include "exec/memory.h" 7 8 #define VHOST_F_DEVICE_IOTLB 63 9 #define VHOST_USER_F_PROTOCOL_FEATURES 30 10 11 /* Generic structures common for any vhost based device. */ 12 13 struct vhost_inflight { 14 int fd; 15 void *addr; 16 uint64_t size; 17 uint64_t offset; 18 uint16_t queue_size; 19 }; 20 21 struct vhost_virtqueue { 22 int kick; 23 int call; 24 void *desc; 25 void *avail; 26 void *used; 27 int num; 28 unsigned long long desc_phys; 29 unsigned desc_size; 30 unsigned long long avail_phys; 31 unsigned avail_size; 32 unsigned long long used_phys; 33 unsigned used_size; 34 EventNotifier masked_notifier; 35 EventNotifier error_notifier; 36 struct vhost_dev *dev; 37 }; 38 39 typedef unsigned long vhost_log_chunk_t; 40 #define VHOST_LOG_PAGE 0x1000 41 #define VHOST_LOG_BITS (8 * sizeof(vhost_log_chunk_t)) 42 #define VHOST_LOG_CHUNK (VHOST_LOG_PAGE * VHOST_LOG_BITS) 43 #define VHOST_INVALID_FEATURE_BIT (0xff) 44 45 struct vhost_log { 46 unsigned long long size; 47 int refcnt; 48 int fd; 49 vhost_log_chunk_t *log; 50 }; 51 52 struct vhost_dev; 53 struct vhost_iommu { 54 struct vhost_dev *hdev; 55 MemoryRegion *mr; 56 hwaddr iommu_offset; 57 IOMMUNotifier n; 58 QLIST_ENTRY(vhost_iommu) iommu_next; 59 }; 60 61 typedef struct VhostDevConfigOps { 62 /* Vhost device config space changed callback 63 */ 64 int (*vhost_dev_config_notifier)(struct vhost_dev *dev); 65 } VhostDevConfigOps; 66 67 struct vhost_memory; 68 69 /** 70 * struct vhost_dev - common vhost_dev structure 71 * @vhost_ops: backend specific ops 72 * @config_ops: ops for config changes (see @vhost_dev_set_config_notifier) 73 */ 74 struct vhost_dev { 75 VirtIODevice *vdev; 76 MemoryListener memory_listener; 77 MemoryListener iommu_listener; 78 struct vhost_memory *mem; 79 int n_mem_sections; 80 MemoryRegionSection *mem_sections; 81 int n_tmp_sections; 82 MemoryRegionSection *tmp_sections; 83 struct vhost_virtqueue *vqs; 84 unsigned int nvqs; 85 /* the first virtqueue which would be used by this vhost dev */ 86 int vq_index; 87 /* one past the last vq index for the virtio device (not vhost) */ 88 int vq_index_end; 89 /* if non-zero, minimum required value for max_queues */ 90 int num_queues; 91 uint64_t features; 92 /** @acked_features: final set of negotiated features */ 93 uint64_t acked_features; 94 /** @backend_features: backend specific feature bits */ 95 uint64_t backend_features; 96 /** @protocol_features: final negotiated protocol features */ 97 uint64_t protocol_features; 98 uint64_t max_queues; 99 uint64_t backend_cap; 100 /* @started: is the vhost device started? */ 101 bool started; 102 bool log_enabled; 103 uint64_t log_size; 104 Error *migration_blocker; 105 const VhostOps *vhost_ops; 106 void *opaque; 107 struct vhost_log *log; 108 QLIST_ENTRY(vhost_dev) entry; 109 QLIST_HEAD(, vhost_iommu) iommu_list; 110 IOMMUNotifier n; 111 const VhostDevConfigOps *config_ops; 112 }; 113 114 extern const VhostOps kernel_ops; 115 extern const VhostOps user_ops; 116 extern const VhostOps vdpa_ops; 117 118 struct vhost_net { 119 struct vhost_dev dev; 120 struct vhost_virtqueue vqs[2]; 121 int backend; 122 NetClientState *nc; 123 }; 124 125 /** 126 * vhost_dev_init() - initialise the vhost interface 127 * @hdev: the common vhost_dev structure 128 * @opaque: opaque ptr passed to backend (vhost/vhost-user/vdpa) 129 * @backend_type: type of backend 130 * @busyloop_timeout: timeout for polling virtqueue 131 * @errp: error handle 132 * 133 * The initialisation of the vhost device will trigger the 134 * initialisation of the backend and potentially capability 135 * negotiation of backend interface. Configuration of the VirtIO 136 * itself won't happen until the interface is started. 137 * 138 * Return: 0 on success, non-zero on error while setting errp. 139 */ 140 int vhost_dev_init(struct vhost_dev *hdev, void *opaque, 141 VhostBackendType backend_type, 142 uint32_t busyloop_timeout, Error **errp); 143 144 /** 145 * vhost_dev_cleanup() - tear down and cleanup vhost interface 146 * @hdev: the common vhost_dev structure 147 */ 148 void vhost_dev_cleanup(struct vhost_dev *hdev); 149 150 /** 151 * vhost_dev_enable_notifiers() - enable event notifiers 152 * @hdev: common vhost_dev structure 153 * @vdev: the VirtIODevice structure 154 * 155 * Enable notifications directly to the vhost device rather than being 156 * triggered by QEMU itself. Notifications should be enabled before 157 * the vhost device is started via @vhost_dev_start. 158 * 159 * Return: 0 on success, < 0 on error. 160 */ 161 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev); 162 163 /** 164 * vhost_dev_disable_notifiers - disable event notifications 165 * @hdev: common vhost_dev structure 166 * @vdev: the VirtIODevice structure 167 * 168 * Disable direct notifications to vhost device. 169 */ 170 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev); 171 172 /** 173 * vhost_dev_is_started() - report status of vhost device 174 * @hdev: common vhost_dev structure 175 * 176 * Return the started status of the vhost device 177 */ 178 static inline bool vhost_dev_is_started(struct vhost_dev *hdev) 179 { 180 return hdev->started; 181 } 182 183 /** 184 * vhost_dev_start() - start the vhost device 185 * @hdev: common vhost_dev structure 186 * @vdev: the VirtIODevice structure 187 * 188 * Starts the vhost device. From this point VirtIO feature negotiation 189 * can start and the device can start processing VirtIO transactions. 190 * 191 * Return: 0 on success, < 0 on error. 192 */ 193 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev); 194 195 /** 196 * vhost_dev_stop() - stop the vhost device 197 * @hdev: common vhost_dev structure 198 * @vdev: the VirtIODevice structure 199 * 200 * Stop the vhost device. After the device is stopped the notifiers 201 * can be disabled (@vhost_dev_disable_notifiers) and the device can 202 * be torn down (@vhost_dev_cleanup). 203 */ 204 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev); 205 206 /** 207 * DOC: vhost device configuration handling 208 * 209 * The VirtIO device configuration space is used for rarely changing 210 * or initialisation time parameters. The configuration can be updated 211 * by either the guest driver or the device itself. If the device can 212 * change the configuration over time the vhost handler should 213 * register a @VhostDevConfigOps structure with 214 * @vhost_dev_set_config_notifier so the guest can be notified. Some 215 * devices register a handler anyway and will signal an error if an 216 * unexpected config change happens. 217 */ 218 219 /** 220 * vhost_dev_get_config() - fetch device configuration 221 * @hdev: common vhost_dev_structure 222 * @config: pointer to device appropriate config structure 223 * @config_len: size of device appropriate config structure 224 * 225 * Return: 0 on success, < 0 on error while setting errp 226 */ 227 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, 228 uint32_t config_len, Error **errp); 229 230 /** 231 * vhost_dev_set_config() - set device configuration 232 * @hdev: common vhost_dev_structure 233 * @data: pointer to data to set 234 * @offset: offset into configuration space 235 * @size: length of set 236 * @flags: @VhostSetConfigType flags 237 * 238 * By use of @offset/@size a subset of the configuration space can be 239 * written to. The @flags are used to indicate if it is a normal 240 * transaction or related to migration. 241 * 242 * Return: 0 on success, non-zero on error 243 */ 244 int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data, 245 uint32_t offset, uint32_t size, uint32_t flags); 246 247 /** 248 * vhost_dev_set_config_notifier() - register VhostDevConfigOps 249 * @hdev: common vhost_dev_structure 250 * @ops: notifier ops 251 * 252 * If the device is expected to change configuration a notifier can be 253 * setup to handle the case. 254 */ 255 void vhost_dev_set_config_notifier(struct vhost_dev *dev, 256 const VhostDevConfigOps *ops); 257 258 259 /* Test and clear masked event pending status. 260 * Should be called after unmask to avoid losing events. 261 */ 262 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n); 263 264 /* Mask/unmask events from this vq. 265 */ 266 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, 267 bool mask); 268 269 /** 270 * vhost_get_features() - return a sanitised set of feature bits 271 * @hdev: common vhost_dev structure 272 * @feature_bits: pointer to terminated table of feature bits 273 * @features: original feature set 274 * 275 * This returns a set of features bits that is an intersection of what 276 * is supported by the vhost backend (hdev->features), the supported 277 * feature_bits and the requested feature set. 278 */ 279 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, 280 uint64_t features); 281 282 /** 283 * vhost_ack_features() - set vhost acked_features 284 * @hdev: common vhost_dev structure 285 * @feature_bits: pointer to terminated table of feature bits 286 * @features: requested feature set 287 * 288 * This sets the internal hdev->acked_features to the intersection of 289 * the backends advertised features and the supported feature_bits. 290 */ 291 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, 292 uint64_t features); 293 bool vhost_has_free_slot(void); 294 295 int vhost_net_set_backend(struct vhost_dev *hdev, 296 struct vhost_vring_file *file); 297 298 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write); 299 300 int vhost_virtqueue_start(struct vhost_dev *dev, struct VirtIODevice *vdev, 301 struct vhost_virtqueue *vq, unsigned idx); 302 void vhost_virtqueue_stop(struct vhost_dev *dev, struct VirtIODevice *vdev, 303 struct vhost_virtqueue *vq, unsigned idx); 304 305 void vhost_dev_reset_inflight(struct vhost_inflight *inflight); 306 void vhost_dev_free_inflight(struct vhost_inflight *inflight); 307 void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f); 308 int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f); 309 int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev); 310 int vhost_dev_set_inflight(struct vhost_dev *dev, 311 struct vhost_inflight *inflight); 312 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, 313 struct vhost_inflight *inflight); 314 #endif 315