xref: /openbmc/linux/drivers/vhost/vhost.h (revision abe067dc)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _VHOST_H
3 #define _VHOST_H
4 
5 #include <linux/eventfd.h>
6 #include <linux/vhost.h>
7 #include <linux/mm.h>
8 #include <linux/mutex.h>
9 #include <linux/poll.h>
10 #include <linux/file.h>
11 #include <linux/uio.h>
12 #include <linux/virtio_config.h>
13 #include <linux/virtio_ring.h>
14 #include <linux/atomic.h>
15 #include <linux/vhost_iotlb.h>
16 #include <linux/irqbypass.h>
17 
18 struct vhost_work;
19 struct vhost_task;
20 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
21 
22 #define VHOST_WORK_QUEUED 1
23 struct vhost_work {
24 	struct llist_node	node;
25 	vhost_work_fn_t		fn;
26 	unsigned long		flags;
27 };
28 
29 struct vhost_worker {
30 	struct vhost_task	*vtsk;
31 	struct vhost_dev	*dev;
32 	/* Used to serialize device wide flushing with worker swapping. */
33 	struct mutex		mutex;
34 	struct llist_head	work_list;
35 	u64			kcov_handle;
36 	u32			id;
37 	int			attachment_cnt;
38 	bool			killed;
39 };
40 
41 /* Poll a file (eventfd or socket) */
42 /* Note: there's nothing vhost specific about this structure. */
43 struct vhost_poll {
44 	poll_table		table;
45 	wait_queue_head_t	*wqh;
46 	wait_queue_entry_t	wait;
47 	struct vhost_work	work;
48 	__poll_t		mask;
49 	struct vhost_dev	*dev;
50 	struct vhost_virtqueue	*vq;
51 };
52 
53 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
54 		     __poll_t mask, struct vhost_dev *dev,
55 		     struct vhost_virtqueue *vq);
56 int vhost_poll_start(struct vhost_poll *poll, struct file *file);
57 void vhost_poll_stop(struct vhost_poll *poll);
58 void vhost_poll_queue(struct vhost_poll *poll);
59 
60 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
61 void vhost_dev_flush(struct vhost_dev *dev);
62 
63 struct vhost_log {
64 	u64 addr;
65 	u64 len;
66 };
67 
68 enum vhost_uaddr_type {
69 	VHOST_ADDR_DESC = 0,
70 	VHOST_ADDR_AVAIL = 1,
71 	VHOST_ADDR_USED = 2,
72 	VHOST_NUM_ADDRS = 3,
73 };
74 
75 struct vhost_vring_call {
76 	struct eventfd_ctx *ctx;
77 	struct irq_bypass_producer producer;
78 };
79 
80 /* The virtqueue structure describes a queue attached to a device. */
81 struct vhost_virtqueue {
82 	struct vhost_dev *dev;
83 	struct vhost_worker __rcu *worker;
84 
85 	/* The actual ring of buffers. */
86 	struct mutex mutex;
87 	unsigned int num;
88 	vring_desc_t __user *desc;
89 	vring_avail_t __user *avail;
90 	vring_used_t __user *used;
91 	const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
92 	struct file *kick;
93 	struct vhost_vring_call call_ctx;
94 	struct eventfd_ctx *error_ctx;
95 	struct eventfd_ctx *log_ctx;
96 
97 	struct vhost_poll poll;
98 
99 	/* The routine to call when the Guest pings us, or timeout. */
100 	vhost_work_fn_t handle_kick;
101 
102 	/* Last available index we saw.
103 	 * Values are limited to 0x7fff, and the high bit is used as
104 	 * a wrap counter when using VIRTIO_F_RING_PACKED. */
105 	u16 last_avail_idx;
106 
107 	/* Caches available index value from user. */
108 	u16 avail_idx;
109 
110 	/* Last index we used.
111 	 * Values are limited to 0x7fff, and the high bit is used as
112 	 * a wrap counter when using VIRTIO_F_RING_PACKED. */
113 	u16 last_used_idx;
114 
115 	/* Used flags */
116 	u16 used_flags;
117 
118 	/* Last used index value we have signalled on */
119 	u16 signalled_used;
120 
121 	/* Last used index value we have signalled on */
122 	bool signalled_used_valid;
123 
124 	/* Log writes to used structure. */
125 	bool log_used;
126 	u64 log_addr;
127 
128 	struct iovec iov[UIO_MAXIOV];
129 	struct iovec iotlb_iov[64];
130 	struct iovec *indirect;
131 	struct vring_used_elem *heads;
132 	/* Protected by virtqueue mutex. */
133 	struct vhost_iotlb *umem;
134 	struct vhost_iotlb *iotlb;
135 	void *private_data;
136 	u64 acked_features;
137 	u64 acked_backend_features;
138 	/* Log write descriptors */
139 	void __user *log_base;
140 	struct vhost_log *log;
141 	struct iovec log_iov[64];
142 
143 	/* Ring endianness. Defaults to legacy native endianness.
144 	 * Set to true when starting a modern virtio device. */
145 	bool is_le;
146 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
147 	/* Ring endianness requested by userspace for cross-endian support. */
148 	bool user_be;
149 #endif
150 	u32 busyloop_timeout;
151 };
152 
153 struct vhost_msg_node {
154   union {
155 	  struct vhost_msg msg;
156 	  struct vhost_msg_v2 msg_v2;
157   };
158   struct vhost_virtqueue *vq;
159   struct list_head node;
160 };
161 
162 struct vhost_dev {
163 	struct mm_struct *mm;
164 	struct mutex mutex;
165 	struct vhost_virtqueue **vqs;
166 	int nvqs;
167 	struct eventfd_ctx *log_ctx;
168 	struct vhost_iotlb *umem;
169 	struct vhost_iotlb *iotlb;
170 	spinlock_t iotlb_lock;
171 	struct list_head read_list;
172 	struct list_head pending_list;
173 	wait_queue_head_t wait;
174 	int iov_limit;
175 	int weight;
176 	int byte_weight;
177 	struct xarray worker_xa;
178 	bool use_worker;
179 	int (*msg_handler)(struct vhost_dev *dev, u32 asid,
180 			   struct vhost_iotlb_msg *msg);
181 };
182 
183 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
184 void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
185 		    int nvqs, int iov_limit, int weight, int byte_weight,
186 		    bool use_worker,
187 		    int (*msg_handler)(struct vhost_dev *dev, u32 asid,
188 				       struct vhost_iotlb_msg *msg));
189 long vhost_dev_set_owner(struct vhost_dev *dev);
190 bool vhost_dev_has_owner(struct vhost_dev *dev);
191 long vhost_dev_check_owner(struct vhost_dev *);
192 struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
193 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
194 void vhost_dev_cleanup(struct vhost_dev *);
195 void vhost_dev_stop(struct vhost_dev *);
196 long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
197 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
198 long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
199 			void __user *argp);
200 bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
201 bool vhost_log_access_ok(struct vhost_dev *);
202 void vhost_clear_msg(struct vhost_dev *dev);
203 
204 int vhost_get_vq_desc(struct vhost_virtqueue *,
205 		      struct iovec iov[], unsigned int iov_size,
206 		      unsigned int *out_num, unsigned int *in_num,
207 		      struct vhost_log *log, unsigned int *log_num);
208 void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
209 
210 void vhost_vq_flush(struct vhost_virtqueue *vq);
211 bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work);
212 bool vhost_vq_has_work(struct vhost_virtqueue *vq);
213 bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
214 int vhost_vq_init_access(struct vhost_virtqueue *);
215 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
216 int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
217 		     unsigned count);
218 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
219 			       unsigned int id, int len);
220 void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
221 			       struct vring_used_elem *heads, unsigned count);
222 void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
223 void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
224 bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
225 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
226 
227 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
228 		    unsigned int log_num, u64 len,
229 		    struct iovec *iov, int count);
230 int vq_meta_prefetch(struct vhost_virtqueue *vq);
231 
232 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
233 void vhost_enqueue_msg(struct vhost_dev *dev,
234 		       struct list_head *head,
235 		       struct vhost_msg_node *node);
236 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
237 					 struct list_head *head);
238 void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
239 
240 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
241 			    poll_table *wait);
242 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
243 			    int noblock);
244 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
245 			     struct iov_iter *from);
246 int vhost_init_device_iotlb(struct vhost_dev *d);
247 
248 void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
249 			  struct vhost_iotlb_map *map);
250 
251 #define vq_err(vq, fmt, ...) do {                                  \
252 		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
253 		if ((vq)->error_ctx)                               \
254 				eventfd_signal((vq)->error_ctx, 1);\
255 	} while (0)
256 
257 enum {
258 	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
259 			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
260 			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
261 			 (1ULL << VHOST_F_LOG_ALL) |
262 			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
263 			 (1ULL << VIRTIO_F_VERSION_1)
264 };
265 
266 /**
267  * vhost_vq_set_backend - Set backend.
268  *
269  * @vq            Virtqueue.
270  * @private_data  The private data.
271  *
272  * Context: Need to call with vq->mutex acquired.
273  */
vhost_vq_set_backend(struct vhost_virtqueue * vq,void * private_data)274 static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
275 					void *private_data)
276 {
277 	vq->private_data = private_data;
278 }
279 
280 /**
281  * vhost_vq_get_backend - Get backend.
282  *
283  * @vq            Virtqueue.
284  *
285  * Context: Need to call with vq->mutex acquired.
286  * Return: Private data previously set with vhost_vq_set_backend.
287  */
vhost_vq_get_backend(struct vhost_virtqueue * vq)288 static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
289 {
290 	return vq->private_data;
291 }
292 
vhost_has_feature(struct vhost_virtqueue * vq,int bit)293 static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
294 {
295 	return vq->acked_features & (1ULL << bit);
296 }
297 
vhost_backend_has_feature(struct vhost_virtqueue * vq,int bit)298 static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
299 {
300 	return vq->acked_backend_features & (1ULL << bit);
301 }
302 
303 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
vhost_is_little_endian(struct vhost_virtqueue * vq)304 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
305 {
306 	return vq->is_le;
307 }
308 #else
vhost_is_little_endian(struct vhost_virtqueue * vq)309 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
310 {
311 	return virtio_legacy_is_little_endian() || vq->is_le;
312 }
313 #endif
314 
315 /* Memory accessors */
vhost16_to_cpu(struct vhost_virtqueue * vq,__virtio16 val)316 static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
317 {
318 	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
319 }
320 
cpu_to_vhost16(struct vhost_virtqueue * vq,u16 val)321 static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
322 {
323 	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
324 }
325 
vhost32_to_cpu(struct vhost_virtqueue * vq,__virtio32 val)326 static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
327 {
328 	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
329 }
330 
cpu_to_vhost32(struct vhost_virtqueue * vq,u32 val)331 static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
332 {
333 	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
334 }
335 
vhost64_to_cpu(struct vhost_virtqueue * vq,__virtio64 val)336 static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
337 {
338 	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
339 }
340 
cpu_to_vhost64(struct vhost_virtqueue * vq,u64 val)341 static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
342 {
343 	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
344 }
345 #endif
346