xref: /openbmc/linux/drivers/vhost/vhost.h (revision 1fa6ac37)
1 #ifndef _VHOST_H
2 #define _VHOST_H
3 
4 #include <linux/eventfd.h>
5 #include <linux/vhost.h>
6 #include <linux/mm.h>
7 #include <linux/mutex.h>
8 #include <linux/workqueue.h>
9 #include <linux/poll.h>
10 #include <linux/file.h>
11 #include <linux/skbuff.h>
12 #include <linux/uio.h>
13 #include <linux/virtio_config.h>
14 #include <linux/virtio_ring.h>
15 
16 struct vhost_device;
17 
18 enum {
19 	/* Enough place for all fragments, head, and virtio net header. */
20 	VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2,
21 };
22 
23 /* Poll a file (eventfd or socket) */
24 /* Note: there's nothing vhost specific about this structure. */
25 struct vhost_poll {
26 	poll_table                table;
27 	wait_queue_head_t        *wqh;
28 	wait_queue_t              wait;
29 	/* struct which will handle all actual work. */
30 	struct work_struct        work;
31 	unsigned long		  mask;
32 };
33 
34 void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
35 		     unsigned long mask);
36 void vhost_poll_start(struct vhost_poll *poll, struct file *file);
37 void vhost_poll_stop(struct vhost_poll *poll);
38 void vhost_poll_flush(struct vhost_poll *poll);
39 void vhost_poll_queue(struct vhost_poll *poll);
40 
41 struct vhost_log {
42 	u64 addr;
43 	u64 len;
44 };
45 
46 /* The virtqueue structure describes a queue attached to a device. */
47 struct vhost_virtqueue {
48 	struct vhost_dev *dev;
49 
50 	/* The actual ring of buffers. */
51 	struct mutex mutex;
52 	unsigned int num;
53 	struct vring_desc __user *desc;
54 	struct vring_avail __user *avail;
55 	struct vring_used __user *used;
56 	struct file *kick;
57 	struct file *call;
58 	struct file *error;
59 	struct eventfd_ctx *call_ctx;
60 	struct eventfd_ctx *error_ctx;
61 	struct eventfd_ctx *log_ctx;
62 
63 	struct vhost_poll poll;
64 
65 	/* The routine to call when the Guest pings us, or timeout. */
66 	work_func_t handle_kick;
67 
68 	/* Last available index we saw. */
69 	u16 last_avail_idx;
70 
71 	/* Caches available index value from user. */
72 	u16 avail_idx;
73 
74 	/* Last index we used. */
75 	u16 last_used_idx;
76 
77 	/* Used flags */
78 	u16 used_flags;
79 
80 	/* Log writes to used structure. */
81 	bool log_used;
82 	u64 log_addr;
83 
84 	struct iovec indirect[VHOST_NET_MAX_SG];
85 	struct iovec iov[VHOST_NET_MAX_SG];
86 	struct iovec hdr[VHOST_NET_MAX_SG];
87 	size_t hdr_size;
88 	/* We use a kind of RCU to access private pointer.
89 	 * All readers access it from workqueue, which makes it possible to
90 	 * flush the workqueue instead of synchronize_rcu. Therefore readers do
91 	 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
92 	 * work item execution acts instead of rcu_read_lock() and the end of
93 	 * work item execution acts instead of rcu_read_lock().
94 	 * Writers use virtqueue mutex. */
95 	void *private_data;
96 	/* Log write descriptors */
97 	void __user *log_base;
98 	struct vhost_log log[VHOST_NET_MAX_SG];
99 };
100 
101 struct vhost_dev {
102 	/* Readers use RCU to access memory table pointer
103 	 * log base pointer and features.
104 	 * Writers use mutex below.*/
105 	struct vhost_memory *memory;
106 	struct mm_struct *mm;
107 	struct mutex mutex;
108 	unsigned acked_features;
109 	struct vhost_virtqueue *vqs;
110 	int nvqs;
111 	struct file *log_file;
112 	struct eventfd_ctx *log_ctx;
113 };
114 
115 long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
116 long vhost_dev_check_owner(struct vhost_dev *);
117 long vhost_dev_reset_owner(struct vhost_dev *);
118 void vhost_dev_cleanup(struct vhost_dev *);
119 long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
120 int vhost_vq_access_ok(struct vhost_virtqueue *vq);
121 int vhost_log_access_ok(struct vhost_dev *);
122 
123 unsigned vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
124 			   struct iovec iov[], unsigned int iov_count,
125 			   unsigned int *out_num, unsigned int *in_num,
126 			   struct vhost_log *log, unsigned int *log_num);
127 void vhost_discard_vq_desc(struct vhost_virtqueue *);
128 
129 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
130 void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
131 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
132 			       unsigned int head, int len);
133 void vhost_disable_notify(struct vhost_virtqueue *);
134 bool vhost_enable_notify(struct vhost_virtqueue *);
135 
136 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
137 		    unsigned int log_num, u64 len);
138 
139 int vhost_init(void);
140 void vhost_cleanup(void);
141 
142 #define vq_err(vq, fmt, ...) do {                                  \
143 		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
144 		if ((vq)->error_ctx)                               \
145 				eventfd_signal((vq)->error_ctx, 1);\
146 	} while (0)
147 
148 enum {
149 	VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) |
150 			 (1 << VIRTIO_RING_F_INDIRECT_DESC) |
151 			 (1 << VHOST_F_LOG_ALL) |
152 			 (1 << VHOST_NET_F_VIRTIO_NET_HDR),
153 };
154 
155 static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
156 {
157 	unsigned acked_features = rcu_dereference(dev->acked_features);
158 	return acked_features & (1 << bit);
159 }
160 
161 #endif
162