xref: /openbmc/linux/drivers/vhost/vhost.h (revision 0d456bad)
1 #ifndef _VHOST_H
2 #define _VHOST_H
3 
4 #include <linux/eventfd.h>
5 #include <linux/vhost.h>
6 #include <linux/mm.h>
7 #include <linux/mutex.h>
8 #include <linux/poll.h>
9 #include <linux/file.h>
10 #include <linux/uio.h>
11 #include <linux/virtio_config.h>
12 #include <linux/virtio_ring.h>
13 #include <linux/atomic.h>
14 
15 struct vhost_device;
16 
17 struct vhost_work;
18 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
19 
20 struct vhost_work {
21 	struct list_head	  node;
22 	vhost_work_fn_t		  fn;
23 	wait_queue_head_t	  done;
24 	int			  flushing;
25 	unsigned		  queue_seq;
26 	unsigned		  done_seq;
27 };
28 
29 /* Poll a file (eventfd or socket) */
30 /* Note: there's nothing vhost specific about this structure. */
31 struct vhost_poll {
32 	poll_table                table;
33 	wait_queue_head_t        *wqh;
34 	wait_queue_t              wait;
35 	struct vhost_work	  work;
36 	unsigned long		  mask;
37 	struct vhost_dev	 *dev;
38 };
39 
40 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
41 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
42 
43 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
44 		     unsigned long mask, struct vhost_dev *dev);
45 void vhost_poll_start(struct vhost_poll *poll, struct file *file);
46 void vhost_poll_stop(struct vhost_poll *poll);
47 void vhost_poll_flush(struct vhost_poll *poll);
48 void vhost_poll_queue(struct vhost_poll *poll);
49 
50 struct vhost_log {
51 	u64 addr;
52 	u64 len;
53 };
54 
55 struct vhost_virtqueue;
56 
57 struct vhost_ubuf_ref {
58 	struct kref kref;
59 	wait_queue_head_t wait;
60 	struct vhost_virtqueue *vq;
61 };
62 
63 struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *, bool zcopy);
64 void vhost_ubuf_put(struct vhost_ubuf_ref *);
65 void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *);
66 
67 struct ubuf_info;
68 
69 /* The virtqueue structure describes a queue attached to a device. */
70 struct vhost_virtqueue {
71 	struct vhost_dev *dev;
72 
73 	/* The actual ring of buffers. */
74 	struct mutex mutex;
75 	unsigned int num;
76 	struct vring_desc __user *desc;
77 	struct vring_avail __user *avail;
78 	struct vring_used __user *used;
79 	struct file *kick;
80 	struct file *call;
81 	struct file *error;
82 	struct eventfd_ctx *call_ctx;
83 	struct eventfd_ctx *error_ctx;
84 	struct eventfd_ctx *log_ctx;
85 
86 	struct vhost_poll poll;
87 
88 	/* The routine to call when the Guest pings us, or timeout. */
89 	vhost_work_fn_t handle_kick;
90 
91 	/* Last available index we saw. */
92 	u16 last_avail_idx;
93 
94 	/* Caches available index value from user. */
95 	u16 avail_idx;
96 
97 	/* Last index we used. */
98 	u16 last_used_idx;
99 
100 	/* Used flags */
101 	u16 used_flags;
102 
103 	/* Last used index value we have signalled on */
104 	u16 signalled_used;
105 
106 	/* Last used index value we have signalled on */
107 	bool signalled_used_valid;
108 
109 	/* Log writes to used structure. */
110 	bool log_used;
111 	u64 log_addr;
112 
113 	struct iovec iov[UIO_MAXIOV];
114 	/* hdr is used to store the virtio header.
115 	 * Since each iovec has >= 1 byte length, we never need more than
116 	 * header length entries to store the header. */
117 	struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
118 	struct iovec *indirect;
119 	size_t vhost_hlen;
120 	size_t sock_hlen;
121 	struct vring_used_elem *heads;
122 	/* We use a kind of RCU to access private pointer.
123 	 * All readers access it from worker, which makes it possible to
124 	 * flush the vhost_work instead of synchronize_rcu. Therefore readers do
125 	 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
126 	 * vhost_work execution acts instead of rcu_read_lock() and the end of
127 	 * vhost_work execution acts instead of rcu_read_unlock().
128 	 * Writers use virtqueue mutex. */
129 	void __rcu *private_data;
130 	/* Log write descriptors */
131 	void __user *log_base;
132 	struct vhost_log *log;
133 	/* vhost zerocopy support fields below: */
134 	/* last used idx for outstanding DMA zerocopy buffers */
135 	int upend_idx;
136 	/* first used idx for DMA done zerocopy buffers */
137 	int done_idx;
138 	/* an array of userspace buffers info */
139 	struct ubuf_info *ubuf_info;
140 	/* Reference counting for outstanding ubufs.
141 	 * Protected by vq mutex. Writers must also take device mutex. */
142 	struct vhost_ubuf_ref *ubufs;
143 };
144 
145 struct vhost_dev {
146 	/* Readers use RCU to access memory table pointer
147 	 * log base pointer and features.
148 	 * Writers use mutex below.*/
149 	struct vhost_memory __rcu *memory;
150 	struct mm_struct *mm;
151 	struct mutex mutex;
152 	unsigned acked_features;
153 	struct vhost_virtqueue *vqs;
154 	int nvqs;
155 	struct file *log_file;
156 	struct eventfd_ctx *log_ctx;
157 	spinlock_t work_lock;
158 	struct list_head work_list;
159 	struct task_struct *worker;
160 };
161 
162 long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
163 long vhost_dev_check_owner(struct vhost_dev *);
164 long vhost_dev_reset_owner(struct vhost_dev *);
165 void vhost_dev_cleanup(struct vhost_dev *, bool locked);
166 void vhost_dev_stop(struct vhost_dev *);
167 long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
168 long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
169 int vhost_vq_access_ok(struct vhost_virtqueue *vq);
170 int vhost_log_access_ok(struct vhost_dev *);
171 
172 int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
173 		      struct iovec iov[], unsigned int iov_count,
174 		      unsigned int *out_num, unsigned int *in_num,
175 		      struct vhost_log *log, unsigned int *log_num);
176 void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
177 
178 int vhost_init_used(struct vhost_virtqueue *);
179 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
180 int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
181 		     unsigned count);
182 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
183 			       unsigned int id, int len);
184 void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
185 			       struct vring_used_elem *heads, unsigned count);
186 void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
187 void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
188 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
189 
190 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
191 		    unsigned int log_num, u64 len);
192 
193 #define vq_err(vq, fmt, ...) do {                                  \
194 		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
195 		if ((vq)->error_ctx)                               \
196 				eventfd_signal((vq)->error_ctx, 1);\
197 	} while (0)
198 
199 enum {
200 	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
201 			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
202 			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
203 			 (1ULL << VHOST_F_LOG_ALL),
204 	VHOST_NET_FEATURES = VHOST_FEATURES |
205 			 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
206 			 (1ULL << VIRTIO_NET_F_MRG_RXBUF),
207 };
208 
209 static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
210 {
211 	unsigned acked_features;
212 
213 	/* TODO: check that we are running from vhost_worker or dev mutex is
214 	 * held? */
215 	acked_features = rcu_dereference_index_check(dev->acked_features, 1);
216 	return acked_features & (1 << bit);
217 }
218 
219 void vhost_enable_zcopy(int vq);
220 
221 #endif
222