xref: /openbmc/linux/drivers/vhost/vringh.c (revision afba8b0a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Helpers for the host side of a virtio ring.
4  *
5  * Since these may be in userspace, we use (inline) accessors.
6  */
7 #include <linux/compiler.h>
8 #include <linux/module.h>
9 #include <linux/vringh.h>
10 #include <linux/virtio_ring.h>
11 #include <linux/kernel.h>
12 #include <linux/ratelimit.h>
13 #include <linux/uaccess.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
17 #include <linux/bvec.h>
18 #include <linux/highmem.h>
19 #include <linux/vhost_iotlb.h>
20 #endif
21 #include <uapi/linux/virtio_config.h>
22 
23 static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
24 {
25 	static DEFINE_RATELIMIT_STATE(vringh_rs,
26 				      DEFAULT_RATELIMIT_INTERVAL,
27 				      DEFAULT_RATELIMIT_BURST);
28 	if (__ratelimit(&vringh_rs)) {
29 		va_list ap;
30 		va_start(ap, fmt);
31 		printk(KERN_NOTICE "vringh:");
32 		vprintk(fmt, ap);
33 		va_end(ap);
34 	}
35 }
36 
37 /* Returns vring->num if empty, -ve on error. */
38 static inline int __vringh_get_head(const struct vringh *vrh,
39 				    int (*getu16)(const struct vringh *vrh,
40 						  u16 *val, const __virtio16 *p),
41 				    u16 *last_avail_idx)
42 {
43 	u16 avail_idx, i, head;
44 	int err;
45 
46 	err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
47 	if (err) {
48 		vringh_bad("Failed to access avail idx at %p",
49 			   &vrh->vring.avail->idx);
50 		return err;
51 	}
52 
53 	if (*last_avail_idx == avail_idx)
54 		return vrh->vring.num;
55 
56 	/* Only get avail ring entries after they have been exposed by guest. */
57 	virtio_rmb(vrh->weak_barriers);
58 
59 	i = *last_avail_idx & (vrh->vring.num - 1);
60 
61 	err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
62 	if (err) {
63 		vringh_bad("Failed to read head: idx %d address %p",
64 			   *last_avail_idx, &vrh->vring.avail->ring[i]);
65 		return err;
66 	}
67 
68 	if (head >= vrh->vring.num) {
69 		vringh_bad("Guest says index %u > %u is available",
70 			   head, vrh->vring.num);
71 		return -EINVAL;
72 	}
73 
74 	(*last_avail_idx)++;
75 	return head;
76 }
77 
78 /* Copy some bytes to/from the iovec.  Returns num copied. */
79 static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
80 				      struct vringh_kiov *iov,
81 				      void *ptr, size_t len,
82 				      int (*xfer)(const struct vringh *vrh,
83 						  void *addr, void *ptr,
84 						  size_t len))
85 {
86 	int err, done = 0;
87 
88 	while (len && iov->i < iov->used) {
89 		size_t partlen;
90 
91 		partlen = min(iov->iov[iov->i].iov_len, len);
92 		err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
93 		if (err)
94 			return err;
95 		done += partlen;
96 		len -= partlen;
97 		ptr += partlen;
98 		iov->consumed += partlen;
99 		iov->iov[iov->i].iov_len -= partlen;
100 		iov->iov[iov->i].iov_base += partlen;
101 
102 		if (!iov->iov[iov->i].iov_len) {
103 			/* Fix up old iov element then increment. */
104 			iov->iov[iov->i].iov_len = iov->consumed;
105 			iov->iov[iov->i].iov_base -= iov->consumed;
106 
107 
108 			iov->consumed = 0;
109 			iov->i++;
110 		}
111 	}
112 	return done;
113 }
114 
115 /* May reduce *len if range is shorter. */
116 static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len,
117 			       struct vringh_range *range,
118 			       bool (*getrange)(struct vringh *,
119 						u64, struct vringh_range *))
120 {
121 	if (addr < range->start || addr > range->end_incl) {
122 		if (!getrange(vrh, addr, range))
123 			return false;
124 	}
125 	BUG_ON(addr < range->start || addr > range->end_incl);
126 
127 	/* To end of memory? */
128 	if (unlikely(addr + *len == 0)) {
129 		if (range->end_incl == -1ULL)
130 			return true;
131 		goto truncate;
132 	}
133 
134 	/* Otherwise, don't wrap. */
135 	if (addr + *len < addr) {
136 		vringh_bad("Wrapping descriptor %zu@0x%llx",
137 			   *len, (unsigned long long)addr);
138 		return false;
139 	}
140 
141 	if (unlikely(addr + *len - 1 > range->end_incl))
142 		goto truncate;
143 	return true;
144 
145 truncate:
146 	*len = range->end_incl + 1 - addr;
147 	return true;
148 }
149 
150 static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
151 				  struct vringh_range *range,
152 				  bool (*getrange)(struct vringh *,
153 						   u64, struct vringh_range *))
154 {
155 	return true;
156 }
157 
158 /* No reason for this code to be inline. */
159 static int move_to_indirect(const struct vringh *vrh,
160 			    int *up_next, u16 *i, void *addr,
161 			    const struct vring_desc *desc,
162 			    struct vring_desc **descs, int *desc_max)
163 {
164 	u32 len;
165 
166 	/* Indirect tables can't have indirect. */
167 	if (*up_next != -1) {
168 		vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
169 		return -EINVAL;
170 	}
171 
172 	len = vringh32_to_cpu(vrh, desc->len);
173 	if (unlikely(len % sizeof(struct vring_desc))) {
174 		vringh_bad("Strange indirect len %u", desc->len);
175 		return -EINVAL;
176 	}
177 
178 	/* We will check this when we follow it! */
179 	if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
180 		*up_next = vringh16_to_cpu(vrh, desc->next);
181 	else
182 		*up_next = -2;
183 	*descs = addr;
184 	*desc_max = len / sizeof(struct vring_desc);
185 
186 	/* Now, start at the first indirect. */
187 	*i = 0;
188 	return 0;
189 }
190 
191 static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
192 {
193 	struct kvec *new;
194 	unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2;
195 
196 	if (new_num < 8)
197 		new_num = 8;
198 
199 	flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
200 	if (flag)
201 		new = krealloc(iov->iov, new_num * sizeof(struct iovec), gfp);
202 	else {
203 		new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
204 		if (new) {
205 			memcpy(new, iov->iov,
206 			       iov->max_num * sizeof(struct iovec));
207 			flag = VRINGH_IOV_ALLOCATED;
208 		}
209 	}
210 	if (!new)
211 		return -ENOMEM;
212 	iov->iov = new;
213 	iov->max_num = (new_num | flag);
214 	return 0;
215 }
216 
217 static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next,
218 				       struct vring_desc **descs, int *desc_max)
219 {
220 	u16 i = *up_next;
221 
222 	*up_next = -1;
223 	*descs = vrh->vring.desc;
224 	*desc_max = vrh->vring.num;
225 	return i;
226 }
227 
228 static int slow_copy(struct vringh *vrh, void *dst, const void *src,
229 		     bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
230 				    struct vringh_range *range,
231 				    bool (*getrange)(struct vringh *vrh,
232 						     u64,
233 						     struct vringh_range *)),
234 		     bool (*getrange)(struct vringh *vrh,
235 				      u64 addr,
236 				      struct vringh_range *r),
237 		     struct vringh_range *range,
238 		     int (*copy)(const struct vringh *vrh,
239 				 void *dst, const void *src, size_t len))
240 {
241 	size_t part, len = sizeof(struct vring_desc);
242 
243 	do {
244 		u64 addr;
245 		int err;
246 
247 		part = len;
248 		addr = (u64)(unsigned long)src - range->offset;
249 
250 		if (!rcheck(vrh, addr, &part, range, getrange))
251 			return -EINVAL;
252 
253 		err = copy(vrh, dst, src, part);
254 		if (err)
255 			return err;
256 
257 		dst += part;
258 		src += part;
259 		len -= part;
260 	} while (len);
261 	return 0;
262 }
263 
264 static inline int
265 __vringh_iov(struct vringh *vrh, u16 i,
266 	     struct vringh_kiov *riov,
267 	     struct vringh_kiov *wiov,
268 	     bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
269 			    struct vringh_range *range,
270 			    bool (*getrange)(struct vringh *, u64,
271 					     struct vringh_range *)),
272 	     bool (*getrange)(struct vringh *, u64, struct vringh_range *),
273 	     gfp_t gfp,
274 	     int (*copy)(const struct vringh *vrh,
275 			 void *dst, const void *src, size_t len))
276 {
277 	int err, count = 0, up_next, desc_max;
278 	struct vring_desc desc, *descs;
279 	struct vringh_range range = { -1ULL, 0 }, slowrange;
280 	bool slow = false;
281 
282 	/* We start traversing vring's descriptor table. */
283 	descs = vrh->vring.desc;
284 	desc_max = vrh->vring.num;
285 	up_next = -1;
286 
287 	/* You must want something! */
288 	if (WARN_ON(!riov && !wiov))
289 		return -EINVAL;
290 
291 	if (riov)
292 		riov->i = riov->used = 0;
293 	if (wiov)
294 		wiov->i = wiov->used = 0;
295 
296 	for (;;) {
297 		void *addr;
298 		struct vringh_kiov *iov;
299 		size_t len;
300 
301 		if (unlikely(slow))
302 			err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
303 					&slowrange, copy);
304 		else
305 			err = copy(vrh, &desc, &descs[i], sizeof(desc));
306 		if (unlikely(err))
307 			goto fail;
308 
309 		if (unlikely(desc.flags &
310 			     cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
311 			u64 a = vringh64_to_cpu(vrh, desc.addr);
312 
313 			/* Make sure it's OK, and get offset. */
314 			len = vringh32_to_cpu(vrh, desc.len);
315 			if (!rcheck(vrh, a, &len, &range, getrange)) {
316 				err = -EINVAL;
317 				goto fail;
318 			}
319 
320 			if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
321 				slow = true;
322 				/* We need to save this range to use offset */
323 				slowrange = range;
324 			}
325 
326 			addr = (void *)(long)(a + range.offset);
327 			err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
328 					       &descs, &desc_max);
329 			if (err)
330 				goto fail;
331 			continue;
332 		}
333 
334 		if (count++ == vrh->vring.num) {
335 			vringh_bad("Descriptor loop in %p", descs);
336 			err = -ELOOP;
337 			goto fail;
338 		}
339 
340 		if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
341 			iov = wiov;
342 		else {
343 			iov = riov;
344 			if (unlikely(wiov && wiov->i)) {
345 				vringh_bad("Readable desc %p after writable",
346 					   &descs[i]);
347 				err = -EINVAL;
348 				goto fail;
349 			}
350 		}
351 
352 		if (!iov) {
353 			vringh_bad("Unexpected %s desc",
354 				   !wiov ? "writable" : "readable");
355 			err = -EPROTO;
356 			goto fail;
357 		}
358 
359 	again:
360 		/* Make sure it's OK, and get offset. */
361 		len = vringh32_to_cpu(vrh, desc.len);
362 		if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
363 			    getrange)) {
364 			err = -EINVAL;
365 			goto fail;
366 		}
367 		addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
368 					       range.offset);
369 
370 		if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
371 			err = resize_iovec(iov, gfp);
372 			if (err)
373 				goto fail;
374 		}
375 
376 		iov->iov[iov->used].iov_base = addr;
377 		iov->iov[iov->used].iov_len = len;
378 		iov->used++;
379 
380 		if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
381 			desc.len = cpu_to_vringh32(vrh,
382 				   vringh32_to_cpu(vrh, desc.len) - len);
383 			desc.addr = cpu_to_vringh64(vrh,
384 				    vringh64_to_cpu(vrh, desc.addr) + len);
385 			goto again;
386 		}
387 
388 		if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
389 			i = vringh16_to_cpu(vrh, desc.next);
390 		} else {
391 			/* Just in case we need to finish traversing above. */
392 			if (unlikely(up_next > 0)) {
393 				i = return_from_indirect(vrh, &up_next,
394 							 &descs, &desc_max);
395 				slow = false;
396 			} else
397 				break;
398 		}
399 
400 		if (i >= desc_max) {
401 			vringh_bad("Chained index %u > %u", i, desc_max);
402 			err = -EINVAL;
403 			goto fail;
404 		}
405 	}
406 
407 	return 0;
408 
409 fail:
410 	return err;
411 }
412 
413 static inline int __vringh_complete(struct vringh *vrh,
414 				    const struct vring_used_elem *used,
415 				    unsigned int num_used,
416 				    int (*putu16)(const struct vringh *vrh,
417 						  __virtio16 *p, u16 val),
418 				    int (*putused)(const struct vringh *vrh,
419 						   struct vring_used_elem *dst,
420 						   const struct vring_used_elem
421 						   *src, unsigned num))
422 {
423 	struct vring_used *used_ring;
424 	int err;
425 	u16 used_idx, off;
426 
427 	used_ring = vrh->vring.used;
428 	used_idx = vrh->last_used_idx + vrh->completed;
429 
430 	off = used_idx % vrh->vring.num;
431 
432 	/* Compiler knows num_used == 1 sometimes, hence extra check */
433 	if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
434 		u16 part = vrh->vring.num - off;
435 		err = putused(vrh, &used_ring->ring[off], used, part);
436 		if (!err)
437 			err = putused(vrh, &used_ring->ring[0], used + part,
438 				      num_used - part);
439 	} else
440 		err = putused(vrh, &used_ring->ring[off], used, num_used);
441 
442 	if (err) {
443 		vringh_bad("Failed to write %u used entries %u at %p",
444 			   num_used, off, &used_ring->ring[off]);
445 		return err;
446 	}
447 
448 	/* Make sure buffer is written before we update index. */
449 	virtio_wmb(vrh->weak_barriers);
450 
451 	err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
452 	if (err) {
453 		vringh_bad("Failed to update used index at %p",
454 			   &vrh->vring.used->idx);
455 		return err;
456 	}
457 
458 	vrh->completed += num_used;
459 	return 0;
460 }
461 
462 
463 static inline int __vringh_need_notify(struct vringh *vrh,
464 				       int (*getu16)(const struct vringh *vrh,
465 						     u16 *val,
466 						     const __virtio16 *p))
467 {
468 	bool notify;
469 	u16 used_event;
470 	int err;
471 
472 	/* Flush out used index update. This is paired with the
473 	 * barrier that the Guest executes when enabling
474 	 * interrupts. */
475 	virtio_mb(vrh->weak_barriers);
476 
477 	/* Old-style, without event indices. */
478 	if (!vrh->event_indices) {
479 		u16 flags;
480 		err = getu16(vrh, &flags, &vrh->vring.avail->flags);
481 		if (err) {
482 			vringh_bad("Failed to get flags at %p",
483 				   &vrh->vring.avail->flags);
484 			return err;
485 		}
486 		return (!(flags & VRING_AVAIL_F_NO_INTERRUPT));
487 	}
488 
489 	/* Modern: we know when other side wants to know. */
490 	err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
491 	if (err) {
492 		vringh_bad("Failed to get used event idx at %p",
493 			   &vring_used_event(&vrh->vring));
494 		return err;
495 	}
496 
497 	/* Just in case we added so many that we wrap. */
498 	if (unlikely(vrh->completed > 0xffff))
499 		notify = true;
500 	else
501 		notify = vring_need_event(used_event,
502 					  vrh->last_used_idx + vrh->completed,
503 					  vrh->last_used_idx);
504 
505 	vrh->last_used_idx += vrh->completed;
506 	vrh->completed = 0;
507 	return notify;
508 }
509 
510 static inline bool __vringh_notify_enable(struct vringh *vrh,
511 					  int (*getu16)(const struct vringh *vrh,
512 							u16 *val, const __virtio16 *p),
513 					  int (*putu16)(const struct vringh *vrh,
514 							__virtio16 *p, u16 val))
515 {
516 	u16 avail;
517 
518 	if (!vrh->event_indices) {
519 		/* Old-school; update flags. */
520 		if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
521 			vringh_bad("Clearing used flags %p",
522 				   &vrh->vring.used->flags);
523 			return true;
524 		}
525 	} else {
526 		if (putu16(vrh, &vring_avail_event(&vrh->vring),
527 			   vrh->last_avail_idx) != 0) {
528 			vringh_bad("Updating avail event index %p",
529 				   &vring_avail_event(&vrh->vring));
530 			return true;
531 		}
532 	}
533 
534 	/* They could have slipped one in as we were doing that: make
535 	 * sure it's written, then check again. */
536 	virtio_mb(vrh->weak_barriers);
537 
538 	if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
539 		vringh_bad("Failed to check avail idx at %p",
540 			   &vrh->vring.avail->idx);
541 		return true;
542 	}
543 
544 	/* This is unlikely, so we just leave notifications enabled
545 	 * (if we're using event_indices, we'll only get one
546 	 * notification anyway). */
547 	return avail == vrh->last_avail_idx;
548 }
549 
550 static inline void __vringh_notify_disable(struct vringh *vrh,
551 					   int (*putu16)(const struct vringh *vrh,
552 							 __virtio16 *p, u16 val))
553 {
554 	if (!vrh->event_indices) {
555 		/* Old-school; update flags. */
556 		if (putu16(vrh, &vrh->vring.used->flags,
557 			   VRING_USED_F_NO_NOTIFY)) {
558 			vringh_bad("Setting used flags %p",
559 				   &vrh->vring.used->flags);
560 		}
561 	}
562 }
563 
564 /* Userspace access helpers: in this case, addresses are really userspace. */
565 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
566 {
567 	__virtio16 v = 0;
568 	int rc = get_user(v, (__force __virtio16 __user *)p);
569 	*val = vringh16_to_cpu(vrh, v);
570 	return rc;
571 }
572 
573 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
574 {
575 	__virtio16 v = cpu_to_vringh16(vrh, val);
576 	return put_user(v, (__force __virtio16 __user *)p);
577 }
578 
579 static inline int copydesc_user(const struct vringh *vrh,
580 				void *dst, const void *src, size_t len)
581 {
582 	return copy_from_user(dst, (__force void __user *)src, len) ?
583 		-EFAULT : 0;
584 }
585 
586 static inline int putused_user(const struct vringh *vrh,
587 			       struct vring_used_elem *dst,
588 			       const struct vring_used_elem *src,
589 			       unsigned int num)
590 {
591 	return copy_to_user((__force void __user *)dst, src,
592 			    sizeof(*dst) * num) ? -EFAULT : 0;
593 }
594 
595 static inline int xfer_from_user(const struct vringh *vrh, void *src,
596 				 void *dst, size_t len)
597 {
598 	return copy_from_user(dst, (__force void __user *)src, len) ?
599 		-EFAULT : 0;
600 }
601 
602 static inline int xfer_to_user(const struct vringh *vrh,
603 			       void *dst, void *src, size_t len)
604 {
605 	return copy_to_user((__force void __user *)dst, src, len) ?
606 		-EFAULT : 0;
607 }
608 
609 /**
610  * vringh_init_user - initialize a vringh for a userspace vring.
611  * @vrh: the vringh to initialize.
612  * @features: the feature bits for this ring.
613  * @num: the number of elements.
614  * @weak_barriers: true if we only need memory barriers, not I/O.
615  * @desc: the userpace descriptor pointer.
616  * @avail: the userpace avail pointer.
617  * @used: the userpace used pointer.
618  *
619  * Returns an error if num is invalid: you should check pointers
620  * yourself!
621  */
622 int vringh_init_user(struct vringh *vrh, u64 features,
623 		     unsigned int num, bool weak_barriers,
624 		     vring_desc_t __user *desc,
625 		     vring_avail_t __user *avail,
626 		     vring_used_t __user *used)
627 {
628 	/* Sane power of 2 please! */
629 	if (!num || num > 0xffff || (num & (num - 1))) {
630 		vringh_bad("Bad ring size %u", num);
631 		return -EINVAL;
632 	}
633 
634 	vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
635 	vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
636 	vrh->weak_barriers = weak_barriers;
637 	vrh->completed = 0;
638 	vrh->last_avail_idx = 0;
639 	vrh->last_used_idx = 0;
640 	vrh->vring.num = num;
641 	/* vring expects kernel addresses, but only used via accessors. */
642 	vrh->vring.desc = (__force struct vring_desc *)desc;
643 	vrh->vring.avail = (__force struct vring_avail *)avail;
644 	vrh->vring.used = (__force struct vring_used *)used;
645 	return 0;
646 }
647 EXPORT_SYMBOL(vringh_init_user);
648 
649 /**
650  * vringh_getdesc_user - get next available descriptor from userspace ring.
651  * @vrh: the userspace vring.
652  * @riov: where to put the readable descriptors (or NULL)
653  * @wiov: where to put the writable descriptors (or NULL)
654  * @getrange: function to call to check ranges.
655  * @head: head index we received, for passing to vringh_complete_user().
656  *
657  * Returns 0 if there was no descriptor, 1 if there was, or -errno.
658  *
659  * Note that on error return, you can tell the difference between an
660  * invalid ring and a single invalid descriptor: in the former case,
661  * *head will be vrh->vring.num.  You may be able to ignore an invalid
662  * descriptor, but there's not much you can do with an invalid ring.
663  *
664  * Note that you may need to clean up riov and wiov, even on error!
665  */
666 int vringh_getdesc_user(struct vringh *vrh,
667 			struct vringh_iov *riov,
668 			struct vringh_iov *wiov,
669 			bool (*getrange)(struct vringh *vrh,
670 					 u64 addr, struct vringh_range *r),
671 			u16 *head)
672 {
673 	int err;
674 
675 	*head = vrh->vring.num;
676 	err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx);
677 	if (err < 0)
678 		return err;
679 
680 	/* Empty... */
681 	if (err == vrh->vring.num)
682 		return 0;
683 
684 	/* We need the layouts to be the identical for this to work */
685 	BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov));
686 	BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) !=
687 		     offsetof(struct vringh_iov, iov));
688 	BUILD_BUG_ON(offsetof(struct vringh_kiov, i) !=
689 		     offsetof(struct vringh_iov, i));
690 	BUILD_BUG_ON(offsetof(struct vringh_kiov, used) !=
691 		     offsetof(struct vringh_iov, used));
692 	BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) !=
693 		     offsetof(struct vringh_iov, max_num));
694 	BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
695 	BUILD_BUG_ON(offsetof(struct iovec, iov_base) !=
696 		     offsetof(struct kvec, iov_base));
697 	BUILD_BUG_ON(offsetof(struct iovec, iov_len) !=
698 		     offsetof(struct kvec, iov_len));
699 	BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base)
700 		     != sizeof(((struct kvec *)NULL)->iov_base));
701 	BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len)
702 		     != sizeof(((struct kvec *)NULL)->iov_len));
703 
704 	*head = err;
705 	err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov,
706 			   (struct vringh_kiov *)wiov,
707 			   range_check, getrange, GFP_KERNEL, copydesc_user);
708 	if (err)
709 		return err;
710 
711 	return 1;
712 }
713 EXPORT_SYMBOL(vringh_getdesc_user);
714 
715 /**
716  * vringh_iov_pull_user - copy bytes from vring_iov.
717  * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume)
718  * @dst: the place to copy.
719  * @len: the maximum length to copy.
720  *
721  * Returns the bytes copied <= len or a negative errno.
722  */
723 ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
724 {
725 	return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
726 			       dst, len, xfer_from_user);
727 }
728 EXPORT_SYMBOL(vringh_iov_pull_user);
729 
730 /**
731  * vringh_iov_push_user - copy bytes into vring_iov.
732  * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
733  * @dst: the place to copy.
734  * @len: the maximum length to copy.
735  *
736  * Returns the bytes copied <= len or a negative errno.
737  */
738 ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
739 			     const void *src, size_t len)
740 {
741 	return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
742 			       (void *)src, len, xfer_to_user);
743 }
744 EXPORT_SYMBOL(vringh_iov_push_user);
745 
746 /**
747  * vringh_abandon_user - we've decided not to handle the descriptor(s).
748  * @vrh: the vring.
749  * @num: the number of descriptors to put back (ie. num
750  *	 vringh_get_user() to undo).
751  *
752  * The next vringh_get_user() will return the old descriptor(s) again.
753  */
754 void vringh_abandon_user(struct vringh *vrh, unsigned int num)
755 {
756 	/* We only update vring_avail_event(vr) when we want to be notified,
757 	 * so we haven't changed that yet. */
758 	vrh->last_avail_idx -= num;
759 }
760 EXPORT_SYMBOL(vringh_abandon_user);
761 
762 /**
763  * vringh_complete_user - we've finished with descriptor, publish it.
764  * @vrh: the vring.
765  * @head: the head as filled in by vringh_getdesc_user.
766  * @len: the length of data we have written.
767  *
768  * You should check vringh_need_notify_user() after one or more calls
769  * to this function.
770  */
771 int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
772 {
773 	struct vring_used_elem used;
774 
775 	used.id = cpu_to_vringh32(vrh, head);
776 	used.len = cpu_to_vringh32(vrh, len);
777 	return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
778 }
779 EXPORT_SYMBOL(vringh_complete_user);
780 
781 /**
782  * vringh_complete_multi_user - we've finished with many descriptors.
783  * @vrh: the vring.
784  * @used: the head, length pairs.
785  * @num_used: the number of used elements.
786  *
787  * You should check vringh_need_notify_user() after one or more calls
788  * to this function.
789  */
790 int vringh_complete_multi_user(struct vringh *vrh,
791 			       const struct vring_used_elem used[],
792 			       unsigned num_used)
793 {
794 	return __vringh_complete(vrh, used, num_used,
795 				 putu16_user, putused_user);
796 }
797 EXPORT_SYMBOL(vringh_complete_multi_user);
798 
799 /**
800  * vringh_notify_enable_user - we want to know if something changes.
801  * @vrh: the vring.
802  *
803  * This always enables notifications, but returns false if there are
804  * now more buffers available in the vring.
805  */
806 bool vringh_notify_enable_user(struct vringh *vrh)
807 {
808 	return __vringh_notify_enable(vrh, getu16_user, putu16_user);
809 }
810 EXPORT_SYMBOL(vringh_notify_enable_user);
811 
812 /**
813  * vringh_notify_disable_user - don't tell us if something changes.
814  * @vrh: the vring.
815  *
816  * This is our normal running state: we disable and then only enable when
817  * we're going to sleep.
818  */
819 void vringh_notify_disable_user(struct vringh *vrh)
820 {
821 	__vringh_notify_disable(vrh, putu16_user);
822 }
823 EXPORT_SYMBOL(vringh_notify_disable_user);
824 
825 /**
826  * vringh_need_notify_user - must we tell the other side about used buffers?
827  * @vrh: the vring we've called vringh_complete_user() on.
828  *
829  * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
830  */
831 int vringh_need_notify_user(struct vringh *vrh)
832 {
833 	return __vringh_need_notify(vrh, getu16_user);
834 }
835 EXPORT_SYMBOL(vringh_need_notify_user);
836 
837 /* Kernelspace access helpers. */
838 static inline int getu16_kern(const struct vringh *vrh,
839 			      u16 *val, const __virtio16 *p)
840 {
841 	*val = vringh16_to_cpu(vrh, READ_ONCE(*p));
842 	return 0;
843 }
844 
845 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
846 {
847 	WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
848 	return 0;
849 }
850 
851 static inline int copydesc_kern(const struct vringh *vrh,
852 				void *dst, const void *src, size_t len)
853 {
854 	memcpy(dst, src, len);
855 	return 0;
856 }
857 
858 static inline int putused_kern(const struct vringh *vrh,
859 			       struct vring_used_elem *dst,
860 			       const struct vring_used_elem *src,
861 			       unsigned int num)
862 {
863 	memcpy(dst, src, num * sizeof(*dst));
864 	return 0;
865 }
866 
867 static inline int xfer_kern(const struct vringh *vrh, void *src,
868 			    void *dst, size_t len)
869 {
870 	memcpy(dst, src, len);
871 	return 0;
872 }
873 
874 static inline int kern_xfer(const struct vringh *vrh, void *dst,
875 			    void *src, size_t len)
876 {
877 	memcpy(dst, src, len);
878 	return 0;
879 }
880 
881 /**
882  * vringh_init_kern - initialize a vringh for a kernelspace vring.
883  * @vrh: the vringh to initialize.
884  * @features: the feature bits for this ring.
885  * @num: the number of elements.
886  * @weak_barriers: true if we only need memory barriers, not I/O.
887  * @desc: the userpace descriptor pointer.
888  * @avail: the userpace avail pointer.
889  * @used: the userpace used pointer.
890  *
891  * Returns an error if num is invalid.
892  */
893 int vringh_init_kern(struct vringh *vrh, u64 features,
894 		     unsigned int num, bool weak_barriers,
895 		     struct vring_desc *desc,
896 		     struct vring_avail *avail,
897 		     struct vring_used *used)
898 {
899 	/* Sane power of 2 please! */
900 	if (!num || num > 0xffff || (num & (num - 1))) {
901 		vringh_bad("Bad ring size %u", num);
902 		return -EINVAL;
903 	}
904 
905 	vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
906 	vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
907 	vrh->weak_barriers = weak_barriers;
908 	vrh->completed = 0;
909 	vrh->last_avail_idx = 0;
910 	vrh->last_used_idx = 0;
911 	vrh->vring.num = num;
912 	vrh->vring.desc = desc;
913 	vrh->vring.avail = avail;
914 	vrh->vring.used = used;
915 	return 0;
916 }
917 EXPORT_SYMBOL(vringh_init_kern);
918 
919 /**
920  * vringh_getdesc_kern - get next available descriptor from kernelspace ring.
921  * @vrh: the kernelspace vring.
922  * @riov: where to put the readable descriptors (or NULL)
923  * @wiov: where to put the writable descriptors (or NULL)
924  * @head: head index we received, for passing to vringh_complete_kern().
925  * @gfp: flags for allocating larger riov/wiov.
926  *
927  * Returns 0 if there was no descriptor, 1 if there was, or -errno.
928  *
929  * Note that on error return, you can tell the difference between an
930  * invalid ring and a single invalid descriptor: in the former case,
931  * *head will be vrh->vring.num.  You may be able to ignore an invalid
932  * descriptor, but there's not much you can do with an invalid ring.
933  *
934  * Note that you may need to clean up riov and wiov, even on error!
935  */
936 int vringh_getdesc_kern(struct vringh *vrh,
937 			struct vringh_kiov *riov,
938 			struct vringh_kiov *wiov,
939 			u16 *head,
940 			gfp_t gfp)
941 {
942 	int err;
943 
944 	err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx);
945 	if (err < 0)
946 		return err;
947 
948 	/* Empty... */
949 	if (err == vrh->vring.num)
950 		return 0;
951 
952 	*head = err;
953 	err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
954 			   gfp, copydesc_kern);
955 	if (err)
956 		return err;
957 
958 	return 1;
959 }
960 EXPORT_SYMBOL(vringh_getdesc_kern);
961 
962 /**
963  * vringh_iov_pull_kern - copy bytes from vring_iov.
964  * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume)
965  * @dst: the place to copy.
966  * @len: the maximum length to copy.
967  *
968  * Returns the bytes copied <= len or a negative errno.
969  */
970 ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
971 {
972 	return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
973 }
974 EXPORT_SYMBOL(vringh_iov_pull_kern);
975 
976 /**
977  * vringh_iov_push_kern - copy bytes into vring_iov.
978  * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
979  * @dst: the place to copy.
980  * @len: the maximum length to copy.
981  *
982  * Returns the bytes copied <= len or a negative errno.
983  */
984 ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
985 			     const void *src, size_t len)
986 {
987 	return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
988 }
989 EXPORT_SYMBOL(vringh_iov_push_kern);
990 
991 /**
992  * vringh_abandon_kern - we've decided not to handle the descriptor(s).
993  * @vrh: the vring.
994  * @num: the number of descriptors to put back (ie. num
995  *	 vringh_get_kern() to undo).
996  *
997  * The next vringh_get_kern() will return the old descriptor(s) again.
998  */
999 void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
1000 {
1001 	/* We only update vring_avail_event(vr) when we want to be notified,
1002 	 * so we haven't changed that yet. */
1003 	vrh->last_avail_idx -= num;
1004 }
1005 EXPORT_SYMBOL(vringh_abandon_kern);
1006 
1007 /**
1008  * vringh_complete_kern - we've finished with descriptor, publish it.
1009  * @vrh: the vring.
1010  * @head: the head as filled in by vringh_getdesc_kern.
1011  * @len: the length of data we have written.
1012  *
1013  * You should check vringh_need_notify_kern() after one or more calls
1014  * to this function.
1015  */
1016 int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
1017 {
1018 	struct vring_used_elem used;
1019 
1020 	used.id = cpu_to_vringh32(vrh, head);
1021 	used.len = cpu_to_vringh32(vrh, len);
1022 
1023 	return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
1024 }
1025 EXPORT_SYMBOL(vringh_complete_kern);
1026 
1027 /**
1028  * vringh_notify_enable_kern - we want to know if something changes.
1029  * @vrh: the vring.
1030  *
1031  * This always enables notifications, but returns false if there are
1032  * now more buffers available in the vring.
1033  */
1034 bool vringh_notify_enable_kern(struct vringh *vrh)
1035 {
1036 	return __vringh_notify_enable(vrh, getu16_kern, putu16_kern);
1037 }
1038 EXPORT_SYMBOL(vringh_notify_enable_kern);
1039 
1040 /**
1041  * vringh_notify_disable_kern - don't tell us if something changes.
1042  * @vrh: the vring.
1043  *
1044  * This is our normal running state: we disable and then only enable when
1045  * we're going to sleep.
1046  */
1047 void vringh_notify_disable_kern(struct vringh *vrh)
1048 {
1049 	__vringh_notify_disable(vrh, putu16_kern);
1050 }
1051 EXPORT_SYMBOL(vringh_notify_disable_kern);
1052 
1053 /**
1054  * vringh_need_notify_kern - must we tell the other side about used buffers?
1055  * @vrh: the vring we've called vringh_complete_kern() on.
1056  *
1057  * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1058  */
1059 int vringh_need_notify_kern(struct vringh *vrh)
1060 {
1061 	return __vringh_need_notify(vrh, getu16_kern);
1062 }
1063 EXPORT_SYMBOL(vringh_need_notify_kern);
1064 
1065 #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
1066 
1067 static int iotlb_translate(const struct vringh *vrh,
1068 			   u64 addr, u64 len, struct bio_vec iov[],
1069 			   int iov_size, u32 perm)
1070 {
1071 	struct vhost_iotlb_map *map;
1072 	struct vhost_iotlb *iotlb = vrh->iotlb;
1073 	int ret = 0;
1074 	u64 s = 0;
1075 
1076 	while (len > s) {
1077 		u64 size, pa, pfn;
1078 
1079 		if (unlikely(ret >= iov_size)) {
1080 			ret = -ENOBUFS;
1081 			break;
1082 		}
1083 
1084 		map = vhost_iotlb_itree_first(iotlb, addr,
1085 					      addr + len - 1);
1086 		if (!map || map->start > addr) {
1087 			ret = -EINVAL;
1088 			break;
1089 		} else if (!(map->perm & perm)) {
1090 			ret = -EPERM;
1091 			break;
1092 		}
1093 
1094 		size = map->size - addr + map->start;
1095 		pa = map->addr + addr - map->start;
1096 		pfn = pa >> PAGE_SHIFT;
1097 		iov[ret].bv_page = pfn_to_page(pfn);
1098 		iov[ret].bv_len = min(len - s, size);
1099 		iov[ret].bv_offset = pa & (PAGE_SIZE - 1);
1100 		s += size;
1101 		addr += size;
1102 		++ret;
1103 	}
1104 
1105 	return ret;
1106 }
1107 
1108 static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
1109 				  void *src, size_t len)
1110 {
1111 	struct iov_iter iter;
1112 	struct bio_vec iov[16];
1113 	int ret;
1114 
1115 	ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
1116 			      len, iov, 16, VHOST_MAP_RO);
1117 	if (ret < 0)
1118 		return ret;
1119 
1120 	iov_iter_bvec(&iter, READ, iov, ret, len);
1121 
1122 	ret = copy_from_iter(dst, len, &iter);
1123 
1124 	return ret;
1125 }
1126 
1127 static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
1128 				void *src, size_t len)
1129 {
1130 	struct iov_iter iter;
1131 	struct bio_vec iov[16];
1132 	int ret;
1133 
1134 	ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
1135 			      len, iov, 16, VHOST_MAP_WO);
1136 	if (ret < 0)
1137 		return ret;
1138 
1139 	iov_iter_bvec(&iter, WRITE, iov, ret, len);
1140 
1141 	return copy_to_iter(src, len, &iter);
1142 }
1143 
1144 static inline int getu16_iotlb(const struct vringh *vrh,
1145 			       u16 *val, const __virtio16 *p)
1146 {
1147 	struct bio_vec iov;
1148 	void *kaddr, *from;
1149 	int ret;
1150 
1151 	/* Atomic read is needed for getu16 */
1152 	ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1153 			      &iov, 1, VHOST_MAP_RO);
1154 	if (ret < 0)
1155 		return ret;
1156 
1157 	kaddr = kmap_atomic(iov.bv_page);
1158 	from = kaddr + iov.bv_offset;
1159 	*val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from));
1160 	kunmap_atomic(kaddr);
1161 
1162 	return 0;
1163 }
1164 
1165 static inline int putu16_iotlb(const struct vringh *vrh,
1166 			       __virtio16 *p, u16 val)
1167 {
1168 	struct bio_vec iov;
1169 	void *kaddr, *to;
1170 	int ret;
1171 
1172 	/* Atomic write is needed for putu16 */
1173 	ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1174 			      &iov, 1, VHOST_MAP_WO);
1175 	if (ret < 0)
1176 		return ret;
1177 
1178 	kaddr = kmap_atomic(iov.bv_page);
1179 	to = kaddr + iov.bv_offset;
1180 	WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val));
1181 	kunmap_atomic(kaddr);
1182 
1183 	return 0;
1184 }
1185 
1186 static inline int copydesc_iotlb(const struct vringh *vrh,
1187 				 void *dst, const void *src, size_t len)
1188 {
1189 	int ret;
1190 
1191 	ret = copy_from_iotlb(vrh, dst, (void *)src, len);
1192 	if (ret != len)
1193 		return -EFAULT;
1194 
1195 	return 0;
1196 }
1197 
1198 static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
1199 				  void *dst, size_t len)
1200 {
1201 	int ret;
1202 
1203 	ret = copy_from_iotlb(vrh, dst, src, len);
1204 	if (ret != len)
1205 		return -EFAULT;
1206 
1207 	return 0;
1208 }
1209 
1210 static inline int xfer_to_iotlb(const struct vringh *vrh,
1211 			       void *dst, void *src, size_t len)
1212 {
1213 	int ret;
1214 
1215 	ret = copy_to_iotlb(vrh, dst, src, len);
1216 	if (ret != len)
1217 		return -EFAULT;
1218 
1219 	return 0;
1220 }
1221 
1222 static inline int putused_iotlb(const struct vringh *vrh,
1223 				struct vring_used_elem *dst,
1224 				const struct vring_used_elem *src,
1225 				unsigned int num)
1226 {
1227 	int size = num * sizeof(*dst);
1228 	int ret;
1229 
1230 	ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
1231 	if (ret != size)
1232 		return -EFAULT;
1233 
1234 	return 0;
1235 }
1236 
1237 /**
1238  * vringh_init_iotlb - initialize a vringh for a ring with IOTLB.
1239  * @vrh: the vringh to initialize.
1240  * @features: the feature bits for this ring.
1241  * @num: the number of elements.
1242  * @weak_barriers: true if we only need memory barriers, not I/O.
1243  * @desc: the userpace descriptor pointer.
1244  * @avail: the userpace avail pointer.
1245  * @used: the userpace used pointer.
1246  *
1247  * Returns an error if num is invalid.
1248  */
1249 int vringh_init_iotlb(struct vringh *vrh, u64 features,
1250 		      unsigned int num, bool weak_barriers,
1251 		      struct vring_desc *desc,
1252 		      struct vring_avail *avail,
1253 		      struct vring_used *used)
1254 {
1255 	return vringh_init_kern(vrh, features, num, weak_barriers,
1256 				desc, avail, used);
1257 }
1258 EXPORT_SYMBOL(vringh_init_iotlb);
1259 
1260 /**
1261  * vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
1262  * @vrh: the vring
1263  * @iotlb: iotlb associated with this vring
1264  */
1265 void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb)
1266 {
1267 	vrh->iotlb = iotlb;
1268 }
1269 EXPORT_SYMBOL(vringh_set_iotlb);
1270 
1271 /**
1272  * vringh_getdesc_iotlb - get next available descriptor from ring with
1273  * IOTLB.
1274  * @vrh: the kernelspace vring.
1275  * @riov: where to put the readable descriptors (or NULL)
1276  * @wiov: where to put the writable descriptors (or NULL)
1277  * @head: head index we received, for passing to vringh_complete_iotlb().
1278  * @gfp: flags for allocating larger riov/wiov.
1279  *
1280  * Returns 0 if there was no descriptor, 1 if there was, or -errno.
1281  *
1282  * Note that on error return, you can tell the difference between an
1283  * invalid ring and a single invalid descriptor: in the former case,
1284  * *head will be vrh->vring.num.  You may be able to ignore an invalid
1285  * descriptor, but there's not much you can do with an invalid ring.
1286  *
1287  * Note that you may need to clean up riov and wiov, even on error!
1288  */
1289 int vringh_getdesc_iotlb(struct vringh *vrh,
1290 			 struct vringh_kiov *riov,
1291 			 struct vringh_kiov *wiov,
1292 			 u16 *head,
1293 			 gfp_t gfp)
1294 {
1295 	int err;
1296 
1297 	err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
1298 	if (err < 0)
1299 		return err;
1300 
1301 	/* Empty... */
1302 	if (err == vrh->vring.num)
1303 		return 0;
1304 
1305 	*head = err;
1306 	err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
1307 			   gfp, copydesc_iotlb);
1308 	if (err)
1309 		return err;
1310 
1311 	return 1;
1312 }
1313 EXPORT_SYMBOL(vringh_getdesc_iotlb);
1314 
1315 /**
1316  * vringh_iov_pull_iotlb - copy bytes from vring_iov.
1317  * @vrh: the vring.
1318  * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume)
1319  * @dst: the place to copy.
1320  * @len: the maximum length to copy.
1321  *
1322  * Returns the bytes copied <= len or a negative errno.
1323  */
1324 ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
1325 			      struct vringh_kiov *riov,
1326 			      void *dst, size_t len)
1327 {
1328 	return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
1329 }
1330 EXPORT_SYMBOL(vringh_iov_pull_iotlb);
1331 
1332 /**
1333  * vringh_iov_push_iotlb - copy bytes into vring_iov.
1334  * @vrh: the vring.
1335  * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
1336  * @dst: the place to copy.
1337  * @len: the maximum length to copy.
1338  *
1339  * Returns the bytes copied <= len or a negative errno.
1340  */
1341 ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
1342 			      struct vringh_kiov *wiov,
1343 			      const void *src, size_t len)
1344 {
1345 	return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
1346 }
1347 EXPORT_SYMBOL(vringh_iov_push_iotlb);
1348 
1349 /**
1350  * vringh_abandon_iotlb - we've decided not to handle the descriptor(s).
1351  * @vrh: the vring.
1352  * @num: the number of descriptors to put back (ie. num
1353  *	 vringh_get_iotlb() to undo).
1354  *
1355  * The next vringh_get_iotlb() will return the old descriptor(s) again.
1356  */
1357 void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
1358 {
1359 	/* We only update vring_avail_event(vr) when we want to be notified,
1360 	 * so we haven't changed that yet.
1361 	 */
1362 	vrh->last_avail_idx -= num;
1363 }
1364 EXPORT_SYMBOL(vringh_abandon_iotlb);
1365 
1366 /**
1367  * vringh_complete_iotlb - we've finished with descriptor, publish it.
1368  * @vrh: the vring.
1369  * @head: the head as filled in by vringh_getdesc_iotlb.
1370  * @len: the length of data we have written.
1371  *
1372  * You should check vringh_need_notify_iotlb() after one or more calls
1373  * to this function.
1374  */
1375 int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
1376 {
1377 	struct vring_used_elem used;
1378 
1379 	used.id = cpu_to_vringh32(vrh, head);
1380 	used.len = cpu_to_vringh32(vrh, len);
1381 
1382 	return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
1383 }
1384 EXPORT_SYMBOL(vringh_complete_iotlb);
1385 
1386 /**
1387  * vringh_notify_enable_iotlb - we want to know if something changes.
1388  * @vrh: the vring.
1389  *
1390  * This always enables notifications, but returns false if there are
1391  * now more buffers available in the vring.
1392  */
1393 bool vringh_notify_enable_iotlb(struct vringh *vrh)
1394 {
1395 	return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
1396 }
1397 EXPORT_SYMBOL(vringh_notify_enable_iotlb);
1398 
1399 /**
1400  * vringh_notify_disable_iotlb - don't tell us if something changes.
1401  * @vrh: the vring.
1402  *
1403  * This is our normal running state: we disable and then only enable when
1404  * we're going to sleep.
1405  */
1406 void vringh_notify_disable_iotlb(struct vringh *vrh)
1407 {
1408 	__vringh_notify_disable(vrh, putu16_iotlb);
1409 }
1410 EXPORT_SYMBOL(vringh_notify_disable_iotlb);
1411 
1412 /**
1413  * vringh_need_notify_iotlb - must we tell the other side about used buffers?
1414  * @vrh: the vring we've called vringh_complete_iotlb() on.
1415  *
1416  * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1417  */
1418 int vringh_need_notify_iotlb(struct vringh *vrh)
1419 {
1420 	return __vringh_need_notify(vrh, getu16_iotlb);
1421 }
1422 EXPORT_SYMBOL(vringh_need_notify_iotlb);
1423 
1424 #endif
1425 
1426 MODULE_LICENSE("GPL");
1427