1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _VHOST_H 3 #define _VHOST_H 4 5 #include <linux/eventfd.h> 6 #include <linux/vhost.h> 7 #include <linux/mm.h> 8 #include <linux/mutex.h> 9 #include <linux/poll.h> 10 #include <linux/file.h> 11 #include <linux/uio.h> 12 #include <linux/virtio_config.h> 13 #include <linux/virtio_ring.h> 14 #include <linux/atomic.h> 15 16 struct vhost_work; 17 typedef void (*vhost_work_fn_t)(struct vhost_work *work); 18 19 #define VHOST_WORK_QUEUED 1 20 struct vhost_work { 21 struct llist_node node; 22 vhost_work_fn_t fn; 23 unsigned long flags; 24 }; 25 26 /* Poll a file (eventfd or socket) */ 27 /* Note: there's nothing vhost specific about this structure. */ 28 struct vhost_poll { 29 poll_table table; 30 wait_queue_head_t *wqh; 31 wait_queue_entry_t wait; 32 struct vhost_work work; 33 __poll_t mask; 34 struct vhost_dev *dev; 35 }; 36 37 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn); 38 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work); 39 bool vhost_has_work(struct vhost_dev *dev); 40 41 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 42 __poll_t mask, struct vhost_dev *dev); 43 int vhost_poll_start(struct vhost_poll *poll, struct file *file); 44 void vhost_poll_stop(struct vhost_poll *poll); 45 void vhost_poll_flush(struct vhost_poll *poll); 46 void vhost_poll_queue(struct vhost_poll *poll); 47 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work); 48 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp); 49 50 struct vhost_log { 51 u64 addr; 52 u64 len; 53 }; 54 55 #define START(node) ((node)->start) 56 #define LAST(node) ((node)->last) 57 58 struct vhost_umem_node { 59 struct rb_node rb; 60 struct list_head link; 61 __u64 start; 62 __u64 last; 63 __u64 size; 64 __u64 userspace_addr; 65 __u32 perm; 66 __u32 flags_padding; 67 __u64 __subtree_last; 68 }; 69 70 struct vhost_umem { 71 struct rb_root_cached umem_tree; 72 struct list_head umem_list; 73 int numem; 74 }; 75 76 enum vhost_uaddr_type { 77 VHOST_ADDR_DESC = 0, 78 VHOST_ADDR_AVAIL = 1, 79 VHOST_ADDR_USED = 2, 80 VHOST_NUM_ADDRS = 3, 81 }; 82 83 /* The virtqueue structure describes a queue attached to a device. */ 84 struct vhost_virtqueue { 85 struct vhost_dev *dev; 86 87 /* The actual ring of buffers. */ 88 struct mutex mutex; 89 unsigned int num; 90 struct vring_desc __user *desc; 91 struct vring_avail __user *avail; 92 struct vring_used __user *used; 93 const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS]; 94 struct file *kick; 95 struct eventfd_ctx *call_ctx; 96 struct eventfd_ctx *error_ctx; 97 struct eventfd_ctx *log_ctx; 98 99 struct vhost_poll poll; 100 101 /* The routine to call when the Guest pings us, or timeout. */ 102 vhost_work_fn_t handle_kick; 103 104 /* Last available index we saw. */ 105 u16 last_avail_idx; 106 107 /* Caches available index value from user. */ 108 u16 avail_idx; 109 110 /* Last index we used. */ 111 u16 last_used_idx; 112 113 /* Used flags */ 114 u16 used_flags; 115 116 /* Last used index value we have signalled on */ 117 u16 signalled_used; 118 119 /* Last used index value we have signalled on */ 120 bool signalled_used_valid; 121 122 /* Log writes to used structure. */ 123 bool log_used; 124 u64 log_addr; 125 126 struct iovec iov[UIO_MAXIOV]; 127 struct iovec iotlb_iov[64]; 128 struct iovec *indirect; 129 struct vring_used_elem *heads; 130 /* Protected by virtqueue mutex. */ 131 struct vhost_umem *umem; 132 struct vhost_umem *iotlb; 133 void *private_data; 134 u64 acked_features; 135 u64 acked_backend_features; 136 /* Log write descriptors */ 137 void __user *log_base; 138 struct vhost_log *log; 139 140 /* Ring endianness. Defaults to legacy native endianness. 141 * Set to true when starting a modern virtio device. */ 142 bool is_le; 143 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY 144 /* Ring endianness requested by userspace for cross-endian support. */ 145 bool user_be; 146 #endif 147 u32 busyloop_timeout; 148 }; 149 150 struct vhost_msg_node { 151 union { 152 struct vhost_msg msg; 153 struct vhost_msg_v2 msg_v2; 154 }; 155 struct vhost_virtqueue *vq; 156 struct list_head node; 157 }; 158 159 struct vhost_dev { 160 struct mm_struct *mm; 161 struct mutex mutex; 162 struct vhost_virtqueue **vqs; 163 int nvqs; 164 struct eventfd_ctx *log_ctx; 165 struct llist_head work_list; 166 struct task_struct *worker; 167 struct vhost_umem *umem; 168 struct vhost_umem *iotlb; 169 spinlock_t iotlb_lock; 170 struct list_head read_list; 171 struct list_head pending_list; 172 wait_queue_head_t wait; 173 }; 174 175 void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); 176 long vhost_dev_set_owner(struct vhost_dev *dev); 177 bool vhost_dev_has_owner(struct vhost_dev *dev); 178 long vhost_dev_check_owner(struct vhost_dev *); 179 struct vhost_umem *vhost_dev_reset_owner_prepare(void); 180 void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *); 181 void vhost_dev_cleanup(struct vhost_dev *); 182 void vhost_dev_stop(struct vhost_dev *); 183 long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp); 184 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp); 185 bool vhost_vq_access_ok(struct vhost_virtqueue *vq); 186 bool vhost_log_access_ok(struct vhost_dev *); 187 188 int vhost_get_vq_desc(struct vhost_virtqueue *, 189 struct iovec iov[], unsigned int iov_count, 190 unsigned int *out_num, unsigned int *in_num, 191 struct vhost_log *log, unsigned int *log_num); 192 void vhost_discard_vq_desc(struct vhost_virtqueue *, int n); 193 194 int vhost_vq_init_access(struct vhost_virtqueue *); 195 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len); 196 int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads, 197 unsigned count); 198 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *, 199 unsigned int id, int len); 200 void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *, 201 struct vring_used_elem *heads, unsigned count); 202 void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *); 203 void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *); 204 bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *); 205 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); 206 207 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 208 unsigned int log_num, u64 len); 209 int vq_iotlb_prefetch(struct vhost_virtqueue *vq); 210 211 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type); 212 void vhost_enqueue_msg(struct vhost_dev *dev, 213 struct list_head *head, 214 struct vhost_msg_node *node); 215 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev, 216 struct list_head *head); 217 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev, 218 poll_table *wait); 219 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to, 220 int noblock); 221 ssize_t vhost_chr_write_iter(struct vhost_dev *dev, 222 struct iov_iter *from); 223 int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled); 224 225 #define vq_err(vq, fmt, ...) do { \ 226 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \ 227 if ((vq)->error_ctx) \ 228 eventfd_signal((vq)->error_ctx, 1);\ 229 } while (0) 230 231 enum { 232 VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | 233 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | 234 (1ULL << VIRTIO_RING_F_EVENT_IDX) | 235 (1ULL << VHOST_F_LOG_ALL) | 236 (1ULL << VIRTIO_F_ANY_LAYOUT) | 237 (1ULL << VIRTIO_F_VERSION_1) 238 }; 239 240 static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit) 241 { 242 return vq->acked_features & (1ULL << bit); 243 } 244 245 static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit) 246 { 247 return vq->acked_backend_features & (1ULL << bit); 248 } 249 250 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY 251 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq) 252 { 253 return vq->is_le; 254 } 255 #else 256 static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq) 257 { 258 return virtio_legacy_is_little_endian() || vq->is_le; 259 } 260 #endif 261 262 /* Memory accessors */ 263 static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val) 264 { 265 return __virtio16_to_cpu(vhost_is_little_endian(vq), val); 266 } 267 268 static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val) 269 { 270 return __cpu_to_virtio16(vhost_is_little_endian(vq), val); 271 } 272 273 static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val) 274 { 275 return __virtio32_to_cpu(vhost_is_little_endian(vq), val); 276 } 277 278 static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val) 279 { 280 return __cpu_to_virtio32(vhost_is_little_endian(vq), val); 281 } 282 283 static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val) 284 { 285 return __virtio64_to_cpu(vhost_is_little_endian(vq), val); 286 } 287 288 static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val) 289 { 290 return __cpu_to_virtio64(vhost_is_little_endian(vq), val); 291 } 292 #endif 293