xref: /openbmc/qemu/include/hw/virtio/vhost.h (revision a6caeee8)
1 #ifndef VHOST_H
2 #define VHOST_H
3 
4 #include "hw/virtio/vhost-backend.h"
5 #include "hw/virtio/virtio.h"
6 #include "exec/memory.h"
7 
8 /* Generic structures common for any vhost based device. */
9 
10 struct vhost_inflight {
11     int fd;
12     void *addr;
13     uint64_t size;
14     uint64_t offset;
15     uint16_t queue_size;
16 };
17 
18 struct vhost_virtqueue {
19     int kick;
20     int call;
21     void *desc;
22     void *avail;
23     void *used;
24     int num;
25     unsigned long long desc_phys;
26     unsigned desc_size;
27     unsigned long long avail_phys;
28     unsigned avail_size;
29     unsigned long long used_phys;
30     unsigned used_size;
31     EventNotifier masked_notifier;
32     struct vhost_dev *dev;
33 };
34 
35 typedef unsigned long vhost_log_chunk_t;
36 #define VHOST_LOG_PAGE 0x1000
37 #define VHOST_LOG_BITS (8 * sizeof(vhost_log_chunk_t))
38 #define VHOST_LOG_CHUNK (VHOST_LOG_PAGE * VHOST_LOG_BITS)
39 #define VHOST_INVALID_FEATURE_BIT   (0xff)
40 
41 struct vhost_log {
42     unsigned long long size;
43     int refcnt;
44     int fd;
45     vhost_log_chunk_t *log;
46 };
47 
48 struct vhost_dev;
49 struct vhost_iommu {
50     struct vhost_dev *hdev;
51     MemoryRegion *mr;
52     hwaddr iommu_offset;
53     IOMMUNotifier n;
54     QLIST_ENTRY(vhost_iommu) iommu_next;
55 };
56 
57 typedef struct VhostDevConfigOps {
58     /* Vhost device config space changed callback
59      */
60     int (*vhost_dev_config_notifier)(struct vhost_dev *dev);
61 } VhostDevConfigOps;
62 
63 struct vhost_memory;
64 
65 /**
66  * struct vhost_dev - common vhost_dev structure
67  * @vhost_ops: backend specific ops
68  * @config_ops: ops for config changes (see @vhost_dev_set_config_notifier)
69  */
70 struct vhost_dev {
71     VirtIODevice *vdev;
72     MemoryListener memory_listener;
73     MemoryListener iommu_listener;
74     struct vhost_memory *mem;
75     int n_mem_sections;
76     MemoryRegionSection *mem_sections;
77     int n_tmp_sections;
78     MemoryRegionSection *tmp_sections;
79     struct vhost_virtqueue *vqs;
80     unsigned int nvqs;
81     /* the first virtqueue which would be used by this vhost dev */
82     int vq_index;
83     /* one past the last vq index for the virtio device (not vhost) */
84     int vq_index_end;
85     /* if non-zero, minimum required value for max_queues */
86     int num_queues;
87     uint64_t features;
88     uint64_t acked_features;
89     uint64_t backend_features;
90     uint64_t protocol_features;
91     uint64_t max_queues;
92     uint64_t backend_cap;
93     bool started;
94     bool log_enabled;
95     uint64_t log_size;
96     Error *migration_blocker;
97     const VhostOps *vhost_ops;
98     void *opaque;
99     struct vhost_log *log;
100     QLIST_ENTRY(vhost_dev) entry;
101     QLIST_HEAD(, vhost_iommu) iommu_list;
102     IOMMUNotifier n;
103     const VhostDevConfigOps *config_ops;
104 };
105 
106 extern const VhostOps kernel_ops;
107 extern const VhostOps user_ops;
108 extern const VhostOps vdpa_ops;
109 
110 struct vhost_net {
111     struct vhost_dev dev;
112     struct vhost_virtqueue vqs[2];
113     int backend;
114     NetClientState *nc;
115 };
116 
117 /**
118  * vhost_dev_init() - initialise the vhost interface
119  * @hdev: the common vhost_dev structure
120  * @opaque: opaque ptr passed to backend (vhost/vhost-user/vdpa)
121  * @backend_type: type of backend
122  * @busyloop_timeout: timeout for polling virtqueue
123  * @errp: error handle
124  *
125  * The initialisation of the vhost device will trigger the
126  * initialisation of the backend and potentially capability
127  * negotiation of backend interface. Configuration of the VirtIO
128  * itself won't happen until the interface is started.
129  *
130  * Return: 0 on success, non-zero on error while setting errp.
131  */
132 int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
133                    VhostBackendType backend_type,
134                    uint32_t busyloop_timeout, Error **errp);
135 
136 /**
137  * vhost_dev_cleanup() - tear down and cleanup vhost interface
138  * @hdev: the common vhost_dev structure
139  */
140 void vhost_dev_cleanup(struct vhost_dev *hdev);
141 
142 /**
143  * vhost_dev_enable_notifiers() - enable event notifiers
144  * @hdev: common vhost_dev structure
145  * @vdev: the VirtIODevice structure
146  *
147  * Enable notifications directly to the vhost device rather than being
148  * triggered by QEMU itself. Notifications should be enabled before
149  * the vhost device is started via @vhost_dev_start.
150  *
151  * Return: 0 on success, < 0 on error.
152  */
153 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
154 
155 /**
156  * vhost_dev_disable_notifiers - disable event notifications
157  * @hdev: common vhost_dev structure
158  * @vdev: the VirtIODevice structure
159  *
160  * Disable direct notifications to vhost device.
161  */
162 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
163 
164 /**
165  * vhost_dev_start() - start the vhost device
166  * @hdev: common vhost_dev structure
167  * @vdev: the VirtIODevice structure
168  *
169  * Starts the vhost device. From this point VirtIO feature negotiation
170  * can start and the device can start processing VirtIO transactions.
171  *
172  * Return: 0 on success, < 0 on error.
173  */
174 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
175 
176 /**
177  * vhost_dev_stop() - stop the vhost device
178  * @hdev: common vhost_dev structure
179  * @vdev: the VirtIODevice structure
180  *
181  * Stop the vhost device. After the device is stopped the notifiers
182  * can be disabled (@vhost_dev_disable_notifiers) and the device can
183  * be torn down (@vhost_dev_cleanup).
184  */
185 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
186 
187 /**
188  * DOC: vhost device configuration handling
189  *
190  * The VirtIO device configuration space is used for rarely changing
191  * or initialisation time parameters. The configuration can be updated
192  * by either the guest driver or the device itself. If the device can
193  * change the configuration over time the vhost handler should
194  * register a @VhostDevConfigOps structure with
195  * @vhost_dev_set_config_notifier so the guest can be notified. Some
196  * devices register a handler anyway and will signal an error if an
197  * unexpected config change happens.
198  */
199 
200 /**
201  * vhost_dev_get_config() - fetch device configuration
202  * @hdev: common vhost_dev_structure
203  * @config: pointer to device appropriate config structure
204  * @config_len: size of device appropriate config structure
205  *
206  * Return: 0 on success, < 0 on error while setting errp
207  */
208 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
209                          uint32_t config_len, Error **errp);
210 
211 /**
212  * vhost_dev_set_config() - set device configuration
213  * @hdev: common vhost_dev_structure
214  * @data: pointer to data to set
215  * @offset: offset into configuration space
216  * @size: length of set
217  * @flags: @VhostSetConfigType flags
218  *
219  * By use of @offset/@size a subset of the configuration space can be
220  * written to. The @flags are used to indicate if it is a normal
221  * transaction or related to migration.
222  *
223  * Return: 0 on success, non-zero on error
224  */
225 int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data,
226                          uint32_t offset, uint32_t size, uint32_t flags);
227 
228 /**
229  * vhost_dev_set_config_notifier() - register VhostDevConfigOps
230  * @hdev: common vhost_dev_structure
231  * @ops: notifier ops
232  *
233  * If the device is expected to change configuration a notifier can be
234  * setup to handle the case.
235  */
236 void vhost_dev_set_config_notifier(struct vhost_dev *dev,
237                                    const VhostDevConfigOps *ops);
238 
239 
240 /* Test and clear masked event pending status.
241  * Should be called after unmask to avoid losing events.
242  */
243 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n);
244 
245 /* Mask/unmask events from this vq.
246  */
247 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
248                           bool mask);
249 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
250                             uint64_t features);
251 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
252                         uint64_t features);
253 bool vhost_has_free_slot(void);
254 
255 int vhost_net_set_backend(struct vhost_dev *hdev,
256                           struct vhost_vring_file *file);
257 
258 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write);
259 
260 void vhost_dev_reset_inflight(struct vhost_inflight *inflight);
261 void vhost_dev_free_inflight(struct vhost_inflight *inflight);
262 void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f);
263 int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f);
264 int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev);
265 int vhost_dev_set_inflight(struct vhost_dev *dev,
266                            struct vhost_inflight *inflight);
267 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
268                            struct vhost_inflight *inflight);
269 #endif
270