xref: /openbmc/linux/drivers/vhost/vhost.h (revision bc5aa3a0)
1 #ifndef _VHOST_H
2 #define _VHOST_H
3 
4 #include <linux/eventfd.h>
5 #include <linux/vhost.h>
6 #include <linux/mm.h>
7 #include <linux/mutex.h>
8 #include <linux/poll.h>
9 #include <linux/file.h>
10 #include <linux/uio.h>
11 #include <linux/virtio_config.h>
12 #include <linux/virtio_ring.h>
13 #include <linux/atomic.h>
14 
15 struct vhost_work;
16 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
17 
18 #define VHOST_WORK_QUEUED 1
19 struct vhost_work {
20 	struct llist_node	  node;
21 	vhost_work_fn_t		  fn;
22 	wait_queue_head_t	  done;
23 	int			  flushing;
24 	unsigned		  queue_seq;
25 	unsigned		  done_seq;
26 	unsigned long		  flags;
27 };
28 
29 /* Poll a file (eventfd or socket) */
30 /* Note: there's nothing vhost specific about this structure. */
31 struct vhost_poll {
32 	poll_table                table;
33 	wait_queue_head_t        *wqh;
34 	wait_queue_t              wait;
35 	struct vhost_work	  work;
36 	unsigned long		  mask;
37 	struct vhost_dev	 *dev;
38 };
39 
40 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
41 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
42 bool vhost_has_work(struct vhost_dev *dev);
43 
44 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
45 		     unsigned long mask, struct vhost_dev *dev);
46 int vhost_poll_start(struct vhost_poll *poll, struct file *file);
47 void vhost_poll_stop(struct vhost_poll *poll);
48 void vhost_poll_flush(struct vhost_poll *poll);
49 void vhost_poll_queue(struct vhost_poll *poll);
50 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
51 long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
52 
53 struct vhost_log {
54 	u64 addr;
55 	u64 len;
56 };
57 
58 #define START(node) ((node)->start)
59 #define LAST(node) ((node)->last)
60 
61 struct vhost_umem_node {
62 	struct rb_node rb;
63 	struct list_head link;
64 	__u64 start;
65 	__u64 last;
66 	__u64 size;
67 	__u64 userspace_addr;
68 	__u32 perm;
69 	__u32 flags_padding;
70 	__u64 __subtree_last;
71 };
72 
73 struct vhost_umem {
74 	struct rb_root umem_tree;
75 	struct list_head umem_list;
76 	int numem;
77 };
78 
79 /* The virtqueue structure describes a queue attached to a device. */
80 struct vhost_virtqueue {
81 	struct vhost_dev *dev;
82 
83 	/* The actual ring of buffers. */
84 	struct mutex mutex;
85 	unsigned int num;
86 	struct vring_desc __user *desc;
87 	struct vring_avail __user *avail;
88 	struct vring_used __user *used;
89 	struct file *kick;
90 	struct file *call;
91 	struct file *error;
92 	struct eventfd_ctx *call_ctx;
93 	struct eventfd_ctx *error_ctx;
94 	struct eventfd_ctx *log_ctx;
95 
96 	struct vhost_poll poll;
97 
98 	/* The routine to call when the Guest pings us, or timeout. */
99 	vhost_work_fn_t handle_kick;
100 
101 	/* Last available index we saw. */
102 	u16 last_avail_idx;
103 
104 	/* Caches available index value from user. */
105 	u16 avail_idx;
106 
107 	/* Last index we used. */
108 	u16 last_used_idx;
109 
110 	/* Used flags */
111 	u16 used_flags;
112 
113 	/* Last used index value we have signalled on */
114 	u16 signalled_used;
115 
116 	/* Last used index value we have signalled on */
117 	bool signalled_used_valid;
118 
119 	/* Log writes to used structure. */
120 	bool log_used;
121 	u64 log_addr;
122 
123 	struct iovec iov[UIO_MAXIOV];
124 	struct iovec iotlb_iov[64];
125 	struct iovec *indirect;
126 	struct vring_used_elem *heads;
127 	/* Protected by virtqueue mutex. */
128 	struct vhost_umem *umem;
129 	struct vhost_umem *iotlb;
130 	void *private_data;
131 	u64 acked_features;
132 	/* Log write descriptors */
133 	void __user *log_base;
134 	struct vhost_log *log;
135 
136 	/* Ring endianness. Defaults to legacy native endianness.
137 	 * Set to true when starting a modern virtio device. */
138 	bool is_le;
139 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
140 	/* Ring endianness requested by userspace for cross-endian support. */
141 	bool user_be;
142 #endif
143 	u32 busyloop_timeout;
144 };
145 
146 struct vhost_msg_node {
147   struct vhost_msg msg;
148   struct vhost_virtqueue *vq;
149   struct list_head node;
150 };
151 
152 struct vhost_dev {
153 	struct mm_struct *mm;
154 	struct mutex mutex;
155 	struct vhost_virtqueue **vqs;
156 	int nvqs;
157 	struct file *log_file;
158 	struct eventfd_ctx *log_ctx;
159 	struct llist_head work_list;
160 	struct task_struct *worker;
161 	struct vhost_umem *umem;
162 	struct vhost_umem *iotlb;
163 	spinlock_t iotlb_lock;
164 	struct list_head read_list;
165 	struct list_head pending_list;
166 	wait_queue_head_t wait;
167 };
168 
169 void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
170 long vhost_dev_set_owner(struct vhost_dev *dev);
171 bool vhost_dev_has_owner(struct vhost_dev *dev);
172 long vhost_dev_check_owner(struct vhost_dev *);
173 struct vhost_umem *vhost_dev_reset_owner_prepare(void);
174 void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *);
175 void vhost_dev_cleanup(struct vhost_dev *, bool locked);
176 void vhost_dev_stop(struct vhost_dev *);
177 long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
178 long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
179 int vhost_vq_access_ok(struct vhost_virtqueue *vq);
180 int vhost_log_access_ok(struct vhost_dev *);
181 
182 int vhost_get_vq_desc(struct vhost_virtqueue *,
183 		      struct iovec iov[], unsigned int iov_count,
184 		      unsigned int *out_num, unsigned int *in_num,
185 		      struct vhost_log *log, unsigned int *log_num);
186 void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
187 
188 int vhost_vq_init_access(struct vhost_virtqueue *);
189 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
190 int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
191 		     unsigned count);
192 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
193 			       unsigned int id, int len);
194 void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
195 			       struct vring_used_elem *heads, unsigned count);
196 void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
197 void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
198 bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
199 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
200 
201 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
202 		    unsigned int log_num, u64 len);
203 int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
204 
205 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
206 void vhost_enqueue_msg(struct vhost_dev *dev,
207 		       struct list_head *head,
208 		       struct vhost_msg_node *node);
209 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
210 					 struct list_head *head);
211 unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
212 			    poll_table *wait);
213 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
214 			    int noblock);
215 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
216 			     struct iov_iter *from);
217 int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
218 
219 #define vq_err(vq, fmt, ...) do {                                  \
220 		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
221 		if ((vq)->error_ctx)                               \
222 				eventfd_signal((vq)->error_ctx, 1);\
223 	} while (0)
224 
225 enum {
226 	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
227 			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
228 			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
229 			 (1ULL << VHOST_F_LOG_ALL) |
230 			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
231 			 (1ULL << VIRTIO_F_VERSION_1)
232 };
233 
234 static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
235 {
236 	return vq->acked_features & (1ULL << bit);
237 }
238 
239 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
240 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
241 {
242 	return vq->is_le;
243 }
244 #else
245 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
246 {
247 	return virtio_legacy_is_little_endian() || vq->is_le;
248 }
249 #endif
250 
251 /* Memory accessors */
252 static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
253 {
254 	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
255 }
256 
257 static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
258 {
259 	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
260 }
261 
262 static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
263 {
264 	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
265 }
266 
267 static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
268 {
269 	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
270 }
271 
272 static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
273 {
274 	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
275 }
276 
277 static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
278 {
279 	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
280 }
281 #endif
282