xref: /openbmc/linux/drivers/vhost/vhost.h (revision d3964221)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _VHOST_H
3 #define _VHOST_H
4 
5 #include <linux/eventfd.h>
6 #include <linux/vhost.h>
7 #include <linux/mm.h>
8 #include <linux/mutex.h>
9 #include <linux/poll.h>
10 #include <linux/file.h>
11 #include <linux/uio.h>
12 #include <linux/virtio_config.h>
13 #include <linux/virtio_ring.h>
14 #include <linux/atomic.h>
15 
16 struct vhost_work;
17 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
18 
19 #define VHOST_WORK_QUEUED 1
20 struct vhost_work {
21 	struct llist_node	  node;
22 	vhost_work_fn_t		  fn;
23 	wait_queue_head_t	  done;
24 	int			  flushing;
25 	unsigned		  queue_seq;
26 	unsigned		  done_seq;
27 	unsigned long		  flags;
28 };
29 
30 /* Poll a file (eventfd or socket) */
31 /* Note: there's nothing vhost specific about this structure. */
32 struct vhost_poll {
33 	poll_table                table;
34 	wait_queue_head_t        *wqh;
35 	wait_queue_entry_t              wait;
36 	struct vhost_work	  work;
37 	unsigned long		  mask;
38 	struct vhost_dev	 *dev;
39 };
40 
41 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
42 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
43 bool vhost_has_work(struct vhost_dev *dev);
44 
45 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
46 		     unsigned long mask, struct vhost_dev *dev);
47 int vhost_poll_start(struct vhost_poll *poll, struct file *file);
48 void vhost_poll_stop(struct vhost_poll *poll);
49 void vhost_poll_flush(struct vhost_poll *poll);
50 void vhost_poll_queue(struct vhost_poll *poll);
51 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
52 long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
53 
54 struct vhost_log {
55 	u64 addr;
56 	u64 len;
57 };
58 
59 #define START(node) ((node)->start)
60 #define LAST(node) ((node)->last)
61 
62 struct vhost_umem_node {
63 	struct rb_node rb;
64 	struct list_head link;
65 	__u64 start;
66 	__u64 last;
67 	__u64 size;
68 	__u64 userspace_addr;
69 	__u32 perm;
70 	__u32 flags_padding;
71 	__u64 __subtree_last;
72 };
73 
74 struct vhost_umem {
75 	struct rb_root_cached umem_tree;
76 	struct list_head umem_list;
77 	int numem;
78 };
79 
80 enum vhost_uaddr_type {
81 	VHOST_ADDR_DESC = 0,
82 	VHOST_ADDR_AVAIL = 1,
83 	VHOST_ADDR_USED = 2,
84 	VHOST_NUM_ADDRS = 3,
85 };
86 
87 /* The virtqueue structure describes a queue attached to a device. */
88 struct vhost_virtqueue {
89 	struct vhost_dev *dev;
90 
91 	/* The actual ring of buffers. */
92 	struct mutex mutex;
93 	unsigned int num;
94 	struct vring_desc __user *desc;
95 	struct vring_avail __user *avail;
96 	struct vring_used __user *used;
97 	const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
98 	struct file *kick;
99 	struct file *call;
100 	struct file *error;
101 	struct eventfd_ctx *call_ctx;
102 	struct eventfd_ctx *error_ctx;
103 	struct eventfd_ctx *log_ctx;
104 
105 	struct vhost_poll poll;
106 
107 	/* The routine to call when the Guest pings us, or timeout. */
108 	vhost_work_fn_t handle_kick;
109 
110 	/* Last available index we saw. */
111 	u16 last_avail_idx;
112 
113 	/* Caches available index value from user. */
114 	u16 avail_idx;
115 
116 	/* Last index we used. */
117 	u16 last_used_idx;
118 
119 	/* Used flags */
120 	u16 used_flags;
121 
122 	/* Last used index value we have signalled on */
123 	u16 signalled_used;
124 
125 	/* Last used index value we have signalled on */
126 	bool signalled_used_valid;
127 
128 	/* Log writes to used structure. */
129 	bool log_used;
130 	u64 log_addr;
131 
132 	struct iovec iov[UIO_MAXIOV];
133 	struct iovec iotlb_iov[64];
134 	struct iovec *indirect;
135 	struct vring_used_elem *heads;
136 	/* Protected by virtqueue mutex. */
137 	struct vhost_umem *umem;
138 	struct vhost_umem *iotlb;
139 	void *private_data;
140 	u64 acked_features;
141 	/* Log write descriptors */
142 	void __user *log_base;
143 	struct vhost_log *log;
144 
145 	/* Ring endianness. Defaults to legacy native endianness.
146 	 * Set to true when starting a modern virtio device. */
147 	bool is_le;
148 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
149 	/* Ring endianness requested by userspace for cross-endian support. */
150 	bool user_be;
151 #endif
152 	u32 busyloop_timeout;
153 };
154 
155 struct vhost_msg_node {
156   struct vhost_msg msg;
157   struct vhost_virtqueue *vq;
158   struct list_head node;
159 };
160 
161 struct vhost_dev {
162 	struct mm_struct *mm;
163 	struct mutex mutex;
164 	struct vhost_virtqueue **vqs;
165 	int nvqs;
166 	struct file *log_file;
167 	struct eventfd_ctx *log_ctx;
168 	struct llist_head work_list;
169 	struct task_struct *worker;
170 	struct vhost_umem *umem;
171 	struct vhost_umem *iotlb;
172 	spinlock_t iotlb_lock;
173 	struct list_head read_list;
174 	struct list_head pending_list;
175 	wait_queue_head_t wait;
176 };
177 
178 void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
179 long vhost_dev_set_owner(struct vhost_dev *dev);
180 bool vhost_dev_has_owner(struct vhost_dev *dev);
181 long vhost_dev_check_owner(struct vhost_dev *);
182 struct vhost_umem *vhost_dev_reset_owner_prepare(void);
183 void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *);
184 void vhost_dev_cleanup(struct vhost_dev *, bool locked);
185 void vhost_dev_stop(struct vhost_dev *);
186 long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
187 long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
188 int vhost_vq_access_ok(struct vhost_virtqueue *vq);
189 int vhost_log_access_ok(struct vhost_dev *);
190 
191 int vhost_get_vq_desc(struct vhost_virtqueue *,
192 		      struct iovec iov[], unsigned int iov_count,
193 		      unsigned int *out_num, unsigned int *in_num,
194 		      struct vhost_log *log, unsigned int *log_num);
195 void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
196 
197 int vhost_vq_init_access(struct vhost_virtqueue *);
198 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
199 int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
200 		     unsigned count);
201 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
202 			       unsigned int id, int len);
203 void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
204 			       struct vring_used_elem *heads, unsigned count);
205 void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
206 void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
207 bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
208 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
209 
210 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
211 		    unsigned int log_num, u64 len);
212 int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
213 
214 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
215 void vhost_enqueue_msg(struct vhost_dev *dev,
216 		       struct list_head *head,
217 		       struct vhost_msg_node *node);
218 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
219 					 struct list_head *head);
220 unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
221 			    poll_table *wait);
222 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
223 			    int noblock);
224 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
225 			     struct iov_iter *from);
226 int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
227 
228 #define vq_err(vq, fmt, ...) do {                                  \
229 		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
230 		if ((vq)->error_ctx)                               \
231 				eventfd_signal((vq)->error_ctx, 1);\
232 	} while (0)
233 
234 enum {
235 	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
236 			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
237 			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
238 			 (1ULL << VHOST_F_LOG_ALL) |
239 			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
240 			 (1ULL << VIRTIO_F_VERSION_1)
241 };
242 
243 static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
244 {
245 	return vq->acked_features & (1ULL << bit);
246 }
247 
248 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
249 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
250 {
251 	return vq->is_le;
252 }
253 #else
254 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
255 {
256 	return virtio_legacy_is_little_endian() || vq->is_le;
257 }
258 #endif
259 
260 /* Memory accessors */
261 static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
262 {
263 	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
264 }
265 
266 static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
267 {
268 	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
269 }
270 
271 static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
272 {
273 	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
274 }
275 
276 static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
277 {
278 	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
279 }
280 
281 static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
282 {
283 	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
284 }
285 
286 static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
287 {
288 	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
289 }
290 #endif
291