1 #ifndef VHOST_H 2 #define VHOST_H 3 4 #include "hw/virtio/vhost-backend.h" 5 #include "hw/virtio/virtio.h" 6 #include "exec/memory.h" 7 8 /* Generic structures common for any vhost based device. */ 9 10 struct vhost_inflight { 11 int fd; 12 void *addr; 13 uint64_t size; 14 uint64_t offset; 15 uint16_t queue_size; 16 }; 17 18 struct vhost_virtqueue { 19 int kick; 20 int call; 21 void *desc; 22 void *avail; 23 void *used; 24 int num; 25 unsigned long long desc_phys; 26 unsigned desc_size; 27 unsigned long long avail_phys; 28 unsigned avail_size; 29 unsigned long long used_phys; 30 unsigned used_size; 31 EventNotifier masked_notifier; 32 EventNotifier error_notifier; 33 struct vhost_dev *dev; 34 }; 35 36 typedef unsigned long vhost_log_chunk_t; 37 #define VHOST_LOG_PAGE 0x1000 38 #define VHOST_LOG_BITS (8 * sizeof(vhost_log_chunk_t)) 39 #define VHOST_LOG_CHUNK (VHOST_LOG_PAGE * VHOST_LOG_BITS) 40 #define VHOST_INVALID_FEATURE_BIT (0xff) 41 42 struct vhost_log { 43 unsigned long long size; 44 int refcnt; 45 int fd; 46 vhost_log_chunk_t *log; 47 }; 48 49 struct vhost_dev; 50 struct vhost_iommu { 51 struct vhost_dev *hdev; 52 MemoryRegion *mr; 53 hwaddr iommu_offset; 54 IOMMUNotifier n; 55 QLIST_ENTRY(vhost_iommu) iommu_next; 56 }; 57 58 typedef struct VhostDevConfigOps { 59 /* Vhost device config space changed callback 60 */ 61 int (*vhost_dev_config_notifier)(struct vhost_dev *dev); 62 } VhostDevConfigOps; 63 64 struct vhost_memory; 65 66 /** 67 * struct vhost_dev - common vhost_dev structure 68 * @vhost_ops: backend specific ops 69 * @config_ops: ops for config changes (see @vhost_dev_set_config_notifier) 70 */ 71 struct vhost_dev { 72 VirtIODevice *vdev; 73 MemoryListener memory_listener; 74 MemoryListener iommu_listener; 75 struct vhost_memory *mem; 76 int n_mem_sections; 77 MemoryRegionSection *mem_sections; 78 int n_tmp_sections; 79 MemoryRegionSection *tmp_sections; 80 struct vhost_virtqueue *vqs; 81 unsigned int nvqs; 82 /* the first virtqueue which would be used by this vhost dev */ 83 int vq_index; 84 /* one past the last vq index for the virtio device (not vhost) */ 85 int vq_index_end; 86 /* if non-zero, minimum required value for max_queues */ 87 int num_queues; 88 uint64_t features; 89 /** @acked_features: final set of negotiated features */ 90 uint64_t acked_features; 91 /** @backend_features: backend specific feature bits */ 92 uint64_t backend_features; 93 /** @protocol_features: final negotiated protocol features */ 94 uint64_t protocol_features; 95 uint64_t max_queues; 96 uint64_t backend_cap; 97 /* @started: is the vhost device started? */ 98 bool started; 99 bool log_enabled; 100 uint64_t log_size; 101 Error *migration_blocker; 102 const VhostOps *vhost_ops; 103 void *opaque; 104 struct vhost_log *log; 105 QLIST_ENTRY(vhost_dev) entry; 106 QLIST_HEAD(, vhost_iommu) iommu_list; 107 IOMMUNotifier n; 108 const VhostDevConfigOps *config_ops; 109 }; 110 111 extern const VhostOps kernel_ops; 112 extern const VhostOps user_ops; 113 extern const VhostOps vdpa_ops; 114 115 struct vhost_net { 116 struct vhost_dev dev; 117 struct vhost_virtqueue vqs[2]; 118 int backend; 119 NetClientState *nc; 120 }; 121 122 /** 123 * vhost_dev_init() - initialise the vhost interface 124 * @hdev: the common vhost_dev structure 125 * @opaque: opaque ptr passed to backend (vhost/vhost-user/vdpa) 126 * @backend_type: type of backend 127 * @busyloop_timeout: timeout for polling virtqueue 128 * @errp: error handle 129 * 130 * The initialisation of the vhost device will trigger the 131 * initialisation of the backend and potentially capability 132 * negotiation of backend interface. Configuration of the VirtIO 133 * itself won't happen until the interface is started. 134 * 135 * Return: 0 on success, non-zero on error while setting errp. 136 */ 137 int vhost_dev_init(struct vhost_dev *hdev, void *opaque, 138 VhostBackendType backend_type, 139 uint32_t busyloop_timeout, Error **errp); 140 141 /** 142 * vhost_dev_cleanup() - tear down and cleanup vhost interface 143 * @hdev: the common vhost_dev structure 144 */ 145 void vhost_dev_cleanup(struct vhost_dev *hdev); 146 147 /** 148 * vhost_dev_enable_notifiers() - enable event notifiers 149 * @hdev: common vhost_dev structure 150 * @vdev: the VirtIODevice structure 151 * 152 * Enable notifications directly to the vhost device rather than being 153 * triggered by QEMU itself. Notifications should be enabled before 154 * the vhost device is started via @vhost_dev_start. 155 * 156 * Return: 0 on success, < 0 on error. 157 */ 158 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev); 159 160 /** 161 * vhost_dev_disable_notifiers - disable event notifications 162 * @hdev: common vhost_dev structure 163 * @vdev: the VirtIODevice structure 164 * 165 * Disable direct notifications to vhost device. 166 */ 167 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev); 168 169 /** 170 * vhost_dev_is_started() - report status of vhost device 171 * @hdev: common vhost_dev structure 172 * 173 * Return the started status of the vhost device 174 */ 175 static inline bool vhost_dev_is_started(struct vhost_dev *hdev) 176 { 177 return hdev->started; 178 } 179 180 /** 181 * vhost_dev_start() - start the vhost device 182 * @hdev: common vhost_dev structure 183 * @vdev: the VirtIODevice structure 184 * 185 * Starts the vhost device. From this point VirtIO feature negotiation 186 * can start and the device can start processing VirtIO transactions. 187 * 188 * Return: 0 on success, < 0 on error. 189 */ 190 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev); 191 192 /** 193 * vhost_dev_stop() - stop the vhost device 194 * @hdev: common vhost_dev structure 195 * @vdev: the VirtIODevice structure 196 * 197 * Stop the vhost device. After the device is stopped the notifiers 198 * can be disabled (@vhost_dev_disable_notifiers) and the device can 199 * be torn down (@vhost_dev_cleanup). 200 */ 201 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev); 202 203 /** 204 * DOC: vhost device configuration handling 205 * 206 * The VirtIO device configuration space is used for rarely changing 207 * or initialisation time parameters. The configuration can be updated 208 * by either the guest driver or the device itself. If the device can 209 * change the configuration over time the vhost handler should 210 * register a @VhostDevConfigOps structure with 211 * @vhost_dev_set_config_notifier so the guest can be notified. Some 212 * devices register a handler anyway and will signal an error if an 213 * unexpected config change happens. 214 */ 215 216 /** 217 * vhost_dev_get_config() - fetch device configuration 218 * @hdev: common vhost_dev_structure 219 * @config: pointer to device appropriate config structure 220 * @config_len: size of device appropriate config structure 221 * 222 * Return: 0 on success, < 0 on error while setting errp 223 */ 224 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, 225 uint32_t config_len, Error **errp); 226 227 /** 228 * vhost_dev_set_config() - set device configuration 229 * @hdev: common vhost_dev_structure 230 * @data: pointer to data to set 231 * @offset: offset into configuration space 232 * @size: length of set 233 * @flags: @VhostSetConfigType flags 234 * 235 * By use of @offset/@size a subset of the configuration space can be 236 * written to. The @flags are used to indicate if it is a normal 237 * transaction or related to migration. 238 * 239 * Return: 0 on success, non-zero on error 240 */ 241 int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data, 242 uint32_t offset, uint32_t size, uint32_t flags); 243 244 /** 245 * vhost_dev_set_config_notifier() - register VhostDevConfigOps 246 * @hdev: common vhost_dev_structure 247 * @ops: notifier ops 248 * 249 * If the device is expected to change configuration a notifier can be 250 * setup to handle the case. 251 */ 252 void vhost_dev_set_config_notifier(struct vhost_dev *dev, 253 const VhostDevConfigOps *ops); 254 255 256 /* Test and clear masked event pending status. 257 * Should be called after unmask to avoid losing events. 258 */ 259 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n); 260 261 /* Mask/unmask events from this vq. 262 */ 263 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, 264 bool mask); 265 266 /** 267 * vhost_get_features() - return a sanitised set of feature bits 268 * @hdev: common vhost_dev structure 269 * @feature_bits: pointer to terminated table of feature bits 270 * @features: original feature set 271 * 272 * This returns a set of features bits that is an intersection of what 273 * is supported by the vhost backend (hdev->features), the supported 274 * feature_bits and the requested feature set. 275 */ 276 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, 277 uint64_t features); 278 279 /** 280 * vhost_ack_features() - set vhost acked_features 281 * @hdev: common vhost_dev structure 282 * @feature_bits: pointer to terminated table of feature bits 283 * @features: requested feature set 284 * 285 * This sets the internal hdev->acked_features to the intersection of 286 * the backends advertised features and the supported feature_bits. 287 */ 288 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, 289 uint64_t features); 290 bool vhost_has_free_slot(void); 291 292 int vhost_net_set_backend(struct vhost_dev *hdev, 293 struct vhost_vring_file *file); 294 295 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write); 296 297 void vhost_dev_reset_inflight(struct vhost_inflight *inflight); 298 void vhost_dev_free_inflight(struct vhost_inflight *inflight); 299 void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f); 300 int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f); 301 int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev); 302 int vhost_dev_set_inflight(struct vhost_dev *dev, 303 struct vhost_inflight *inflight); 304 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, 305 struct vhost_inflight *inflight); 306 #endif 307