1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Helpers for the host side of a virtio ring. 4 * 5 * Since these may be in userspace, we use (inline) accessors. 6 */ 7 #include <linux/compiler.h> 8 #include <linux/module.h> 9 #include <linux/vringh.h> 10 #include <linux/virtio_ring.h> 11 #include <linux/kernel.h> 12 #include <linux/ratelimit.h> 13 #include <linux/uaccess.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #include <linux/bvec.h> 17 #include <linux/highmem.h> 18 #include <linux/vhost_iotlb.h> 19 #include <uapi/linux/virtio_config.h> 20 21 static __printf(1,2) __cold void vringh_bad(const char *fmt, ...) 22 { 23 static DEFINE_RATELIMIT_STATE(vringh_rs, 24 DEFAULT_RATELIMIT_INTERVAL, 25 DEFAULT_RATELIMIT_BURST); 26 if (__ratelimit(&vringh_rs)) { 27 va_list ap; 28 va_start(ap, fmt); 29 printk(KERN_NOTICE "vringh:"); 30 vprintk(fmt, ap); 31 va_end(ap); 32 } 33 } 34 35 /* Returns vring->num if empty, -ve on error. */ 36 static inline int __vringh_get_head(const struct vringh *vrh, 37 int (*getu16)(const struct vringh *vrh, 38 u16 *val, const __virtio16 *p), 39 u16 *last_avail_idx) 40 { 41 u16 avail_idx, i, head; 42 int err; 43 44 err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx); 45 if (err) { 46 vringh_bad("Failed to access avail idx at %p", 47 &vrh->vring.avail->idx); 48 return err; 49 } 50 51 if (*last_avail_idx == avail_idx) 52 return vrh->vring.num; 53 54 /* Only get avail ring entries after they have been exposed by guest. */ 55 virtio_rmb(vrh->weak_barriers); 56 57 i = *last_avail_idx & (vrh->vring.num - 1); 58 59 err = getu16(vrh, &head, &vrh->vring.avail->ring[i]); 60 if (err) { 61 vringh_bad("Failed to read head: idx %d address %p", 62 *last_avail_idx, &vrh->vring.avail->ring[i]); 63 return err; 64 } 65 66 if (head >= vrh->vring.num) { 67 vringh_bad("Guest says index %u > %u is available", 68 head, vrh->vring.num); 69 return -EINVAL; 70 } 71 72 (*last_avail_idx)++; 73 return head; 74 } 75 76 /* Copy some bytes to/from the iovec. Returns num copied. */ 77 static inline ssize_t vringh_iov_xfer(struct vringh *vrh, 78 struct vringh_kiov *iov, 79 void *ptr, size_t len, 80 int (*xfer)(const struct vringh *vrh, 81 void *addr, void *ptr, 82 size_t len)) 83 { 84 int err, done = 0; 85 86 while (len && iov->i < iov->used) { 87 size_t partlen; 88 89 partlen = min(iov->iov[iov->i].iov_len, len); 90 err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen); 91 if (err) 92 return err; 93 done += partlen; 94 len -= partlen; 95 ptr += partlen; 96 iov->consumed += partlen; 97 iov->iov[iov->i].iov_len -= partlen; 98 iov->iov[iov->i].iov_base += partlen; 99 100 if (!iov->iov[iov->i].iov_len) { 101 /* Fix up old iov element then increment. */ 102 iov->iov[iov->i].iov_len = iov->consumed; 103 iov->iov[iov->i].iov_base -= iov->consumed; 104 105 106 iov->consumed = 0; 107 iov->i++; 108 } 109 } 110 return done; 111 } 112 113 /* May reduce *len if range is shorter. */ 114 static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len, 115 struct vringh_range *range, 116 bool (*getrange)(struct vringh *, 117 u64, struct vringh_range *)) 118 { 119 if (addr < range->start || addr > range->end_incl) { 120 if (!getrange(vrh, addr, range)) 121 return false; 122 } 123 BUG_ON(addr < range->start || addr > range->end_incl); 124 125 /* To end of memory? */ 126 if (unlikely(addr + *len == 0)) { 127 if (range->end_incl == -1ULL) 128 return true; 129 goto truncate; 130 } 131 132 /* Otherwise, don't wrap. */ 133 if (addr + *len < addr) { 134 vringh_bad("Wrapping descriptor %zu@0x%llx", 135 *len, (unsigned long long)addr); 136 return false; 137 } 138 139 if (unlikely(addr + *len - 1 > range->end_incl)) 140 goto truncate; 141 return true; 142 143 truncate: 144 *len = range->end_incl + 1 - addr; 145 return true; 146 } 147 148 static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len, 149 struct vringh_range *range, 150 bool (*getrange)(struct vringh *, 151 u64, struct vringh_range *)) 152 { 153 return true; 154 } 155 156 /* No reason for this code to be inline. */ 157 static int move_to_indirect(const struct vringh *vrh, 158 int *up_next, u16 *i, void *addr, 159 const struct vring_desc *desc, 160 struct vring_desc **descs, int *desc_max) 161 { 162 u32 len; 163 164 /* Indirect tables can't have indirect. */ 165 if (*up_next != -1) { 166 vringh_bad("Multilevel indirect %u->%u", *up_next, *i); 167 return -EINVAL; 168 } 169 170 len = vringh32_to_cpu(vrh, desc->len); 171 if (unlikely(len % sizeof(struct vring_desc))) { 172 vringh_bad("Strange indirect len %u", desc->len); 173 return -EINVAL; 174 } 175 176 /* We will check this when we follow it! */ 177 if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) 178 *up_next = vringh16_to_cpu(vrh, desc->next); 179 else 180 *up_next = -2; 181 *descs = addr; 182 *desc_max = len / sizeof(struct vring_desc); 183 184 /* Now, start at the first indirect. */ 185 *i = 0; 186 return 0; 187 } 188 189 static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp) 190 { 191 struct kvec *new; 192 unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2; 193 194 if (new_num < 8) 195 new_num = 8; 196 197 flag = (iov->max_num & VRINGH_IOV_ALLOCATED); 198 if (flag) 199 new = krealloc(iov->iov, new_num * sizeof(struct iovec), gfp); 200 else { 201 new = kmalloc_array(new_num, sizeof(struct iovec), gfp); 202 if (new) { 203 memcpy(new, iov->iov, 204 iov->max_num * sizeof(struct iovec)); 205 flag = VRINGH_IOV_ALLOCATED; 206 } 207 } 208 if (!new) 209 return -ENOMEM; 210 iov->iov = new; 211 iov->max_num = (new_num | flag); 212 return 0; 213 } 214 215 static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next, 216 struct vring_desc **descs, int *desc_max) 217 { 218 u16 i = *up_next; 219 220 *up_next = -1; 221 *descs = vrh->vring.desc; 222 *desc_max = vrh->vring.num; 223 return i; 224 } 225 226 static int slow_copy(struct vringh *vrh, void *dst, const void *src, 227 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, 228 struct vringh_range *range, 229 bool (*getrange)(struct vringh *vrh, 230 u64, 231 struct vringh_range *)), 232 bool (*getrange)(struct vringh *vrh, 233 u64 addr, 234 struct vringh_range *r), 235 struct vringh_range *range, 236 int (*copy)(const struct vringh *vrh, 237 void *dst, const void *src, size_t len)) 238 { 239 size_t part, len = sizeof(struct vring_desc); 240 241 do { 242 u64 addr; 243 int err; 244 245 part = len; 246 addr = (u64)(unsigned long)src - range->offset; 247 248 if (!rcheck(vrh, addr, &part, range, getrange)) 249 return -EINVAL; 250 251 err = copy(vrh, dst, src, part); 252 if (err) 253 return err; 254 255 dst += part; 256 src += part; 257 len -= part; 258 } while (len); 259 return 0; 260 } 261 262 static inline int 263 __vringh_iov(struct vringh *vrh, u16 i, 264 struct vringh_kiov *riov, 265 struct vringh_kiov *wiov, 266 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, 267 struct vringh_range *range, 268 bool (*getrange)(struct vringh *, u64, 269 struct vringh_range *)), 270 bool (*getrange)(struct vringh *, u64, struct vringh_range *), 271 gfp_t gfp, 272 int (*copy)(const struct vringh *vrh, 273 void *dst, const void *src, size_t len)) 274 { 275 int err, count = 0, up_next, desc_max; 276 struct vring_desc desc, *descs; 277 struct vringh_range range = { -1ULL, 0 }, slowrange; 278 bool slow = false; 279 280 /* We start traversing vring's descriptor table. */ 281 descs = vrh->vring.desc; 282 desc_max = vrh->vring.num; 283 up_next = -1; 284 285 if (riov) 286 riov->i = riov->used = 0; 287 else if (wiov) 288 wiov->i = wiov->used = 0; 289 else 290 /* You must want something! */ 291 BUG(); 292 293 for (;;) { 294 void *addr; 295 struct vringh_kiov *iov; 296 size_t len; 297 298 if (unlikely(slow)) 299 err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange, 300 &slowrange, copy); 301 else 302 err = copy(vrh, &desc, &descs[i], sizeof(desc)); 303 if (unlikely(err)) 304 goto fail; 305 306 if (unlikely(desc.flags & 307 cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) { 308 u64 a = vringh64_to_cpu(vrh, desc.addr); 309 310 /* Make sure it's OK, and get offset. */ 311 len = vringh32_to_cpu(vrh, desc.len); 312 if (!rcheck(vrh, a, &len, &range, getrange)) { 313 err = -EINVAL; 314 goto fail; 315 } 316 317 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) { 318 slow = true; 319 /* We need to save this range to use offset */ 320 slowrange = range; 321 } 322 323 addr = (void *)(long)(a + range.offset); 324 err = move_to_indirect(vrh, &up_next, &i, addr, &desc, 325 &descs, &desc_max); 326 if (err) 327 goto fail; 328 continue; 329 } 330 331 if (count++ == vrh->vring.num) { 332 vringh_bad("Descriptor loop in %p", descs); 333 err = -ELOOP; 334 goto fail; 335 } 336 337 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE)) 338 iov = wiov; 339 else { 340 iov = riov; 341 if (unlikely(wiov && wiov->i)) { 342 vringh_bad("Readable desc %p after writable", 343 &descs[i]); 344 err = -EINVAL; 345 goto fail; 346 } 347 } 348 349 if (!iov) { 350 vringh_bad("Unexpected %s desc", 351 !wiov ? "writable" : "readable"); 352 err = -EPROTO; 353 goto fail; 354 } 355 356 again: 357 /* Make sure it's OK, and get offset. */ 358 len = vringh32_to_cpu(vrh, desc.len); 359 if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range, 360 getrange)) { 361 err = -EINVAL; 362 goto fail; 363 } 364 addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) + 365 range.offset); 366 367 if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) { 368 err = resize_iovec(iov, gfp); 369 if (err) 370 goto fail; 371 } 372 373 iov->iov[iov->used].iov_base = addr; 374 iov->iov[iov->used].iov_len = len; 375 iov->used++; 376 377 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) { 378 desc.len = cpu_to_vringh32(vrh, 379 vringh32_to_cpu(vrh, desc.len) - len); 380 desc.addr = cpu_to_vringh64(vrh, 381 vringh64_to_cpu(vrh, desc.addr) + len); 382 goto again; 383 } 384 385 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) { 386 i = vringh16_to_cpu(vrh, desc.next); 387 } else { 388 /* Just in case we need to finish traversing above. */ 389 if (unlikely(up_next > 0)) { 390 i = return_from_indirect(vrh, &up_next, 391 &descs, &desc_max); 392 slow = false; 393 } else 394 break; 395 } 396 397 if (i >= desc_max) { 398 vringh_bad("Chained index %u > %u", i, desc_max); 399 err = -EINVAL; 400 goto fail; 401 } 402 } 403 404 return 0; 405 406 fail: 407 return err; 408 } 409 410 static inline int __vringh_complete(struct vringh *vrh, 411 const struct vring_used_elem *used, 412 unsigned int num_used, 413 int (*putu16)(const struct vringh *vrh, 414 __virtio16 *p, u16 val), 415 int (*putused)(const struct vringh *vrh, 416 struct vring_used_elem *dst, 417 const struct vring_used_elem 418 *src, unsigned num)) 419 { 420 struct vring_used *used_ring; 421 int err; 422 u16 used_idx, off; 423 424 used_ring = vrh->vring.used; 425 used_idx = vrh->last_used_idx + vrh->completed; 426 427 off = used_idx % vrh->vring.num; 428 429 /* Compiler knows num_used == 1 sometimes, hence extra check */ 430 if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) { 431 u16 part = vrh->vring.num - off; 432 err = putused(vrh, &used_ring->ring[off], used, part); 433 if (!err) 434 err = putused(vrh, &used_ring->ring[0], used + part, 435 num_used - part); 436 } else 437 err = putused(vrh, &used_ring->ring[off], used, num_used); 438 439 if (err) { 440 vringh_bad("Failed to write %u used entries %u at %p", 441 num_used, off, &used_ring->ring[off]); 442 return err; 443 } 444 445 /* Make sure buffer is written before we update index. */ 446 virtio_wmb(vrh->weak_barriers); 447 448 err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used); 449 if (err) { 450 vringh_bad("Failed to update used index at %p", 451 &vrh->vring.used->idx); 452 return err; 453 } 454 455 vrh->completed += num_used; 456 return 0; 457 } 458 459 460 static inline int __vringh_need_notify(struct vringh *vrh, 461 int (*getu16)(const struct vringh *vrh, 462 u16 *val, 463 const __virtio16 *p)) 464 { 465 bool notify; 466 u16 used_event; 467 int err; 468 469 /* Flush out used index update. This is paired with the 470 * barrier that the Guest executes when enabling 471 * interrupts. */ 472 virtio_mb(vrh->weak_barriers); 473 474 /* Old-style, without event indices. */ 475 if (!vrh->event_indices) { 476 u16 flags; 477 err = getu16(vrh, &flags, &vrh->vring.avail->flags); 478 if (err) { 479 vringh_bad("Failed to get flags at %p", 480 &vrh->vring.avail->flags); 481 return err; 482 } 483 return (!(flags & VRING_AVAIL_F_NO_INTERRUPT)); 484 } 485 486 /* Modern: we know when other side wants to know. */ 487 err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring)); 488 if (err) { 489 vringh_bad("Failed to get used event idx at %p", 490 &vring_used_event(&vrh->vring)); 491 return err; 492 } 493 494 /* Just in case we added so many that we wrap. */ 495 if (unlikely(vrh->completed > 0xffff)) 496 notify = true; 497 else 498 notify = vring_need_event(used_event, 499 vrh->last_used_idx + vrh->completed, 500 vrh->last_used_idx); 501 502 vrh->last_used_idx += vrh->completed; 503 vrh->completed = 0; 504 return notify; 505 } 506 507 static inline bool __vringh_notify_enable(struct vringh *vrh, 508 int (*getu16)(const struct vringh *vrh, 509 u16 *val, const __virtio16 *p), 510 int (*putu16)(const struct vringh *vrh, 511 __virtio16 *p, u16 val)) 512 { 513 u16 avail; 514 515 if (!vrh->event_indices) { 516 /* Old-school; update flags. */ 517 if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) { 518 vringh_bad("Clearing used flags %p", 519 &vrh->vring.used->flags); 520 return true; 521 } 522 } else { 523 if (putu16(vrh, &vring_avail_event(&vrh->vring), 524 vrh->last_avail_idx) != 0) { 525 vringh_bad("Updating avail event index %p", 526 &vring_avail_event(&vrh->vring)); 527 return true; 528 } 529 } 530 531 /* They could have slipped one in as we were doing that: make 532 * sure it's written, then check again. */ 533 virtio_mb(vrh->weak_barriers); 534 535 if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) { 536 vringh_bad("Failed to check avail idx at %p", 537 &vrh->vring.avail->idx); 538 return true; 539 } 540 541 /* This is unlikely, so we just leave notifications enabled 542 * (if we're using event_indices, we'll only get one 543 * notification anyway). */ 544 return avail == vrh->last_avail_idx; 545 } 546 547 static inline void __vringh_notify_disable(struct vringh *vrh, 548 int (*putu16)(const struct vringh *vrh, 549 __virtio16 *p, u16 val)) 550 { 551 if (!vrh->event_indices) { 552 /* Old-school; update flags. */ 553 if (putu16(vrh, &vrh->vring.used->flags, 554 VRING_USED_F_NO_NOTIFY)) { 555 vringh_bad("Setting used flags %p", 556 &vrh->vring.used->flags); 557 } 558 } 559 } 560 561 /* Userspace access helpers: in this case, addresses are really userspace. */ 562 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p) 563 { 564 __virtio16 v = 0; 565 int rc = get_user(v, (__force __virtio16 __user *)p); 566 *val = vringh16_to_cpu(vrh, v); 567 return rc; 568 } 569 570 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val) 571 { 572 __virtio16 v = cpu_to_vringh16(vrh, val); 573 return put_user(v, (__force __virtio16 __user *)p); 574 } 575 576 static inline int copydesc_user(const struct vringh *vrh, 577 void *dst, const void *src, size_t len) 578 { 579 return copy_from_user(dst, (__force void __user *)src, len) ? 580 -EFAULT : 0; 581 } 582 583 static inline int putused_user(const struct vringh *vrh, 584 struct vring_used_elem *dst, 585 const struct vring_used_elem *src, 586 unsigned int num) 587 { 588 return copy_to_user((__force void __user *)dst, src, 589 sizeof(*dst) * num) ? -EFAULT : 0; 590 } 591 592 static inline int xfer_from_user(const struct vringh *vrh, void *src, 593 void *dst, size_t len) 594 { 595 return copy_from_user(dst, (__force void __user *)src, len) ? 596 -EFAULT : 0; 597 } 598 599 static inline int xfer_to_user(const struct vringh *vrh, 600 void *dst, void *src, size_t len) 601 { 602 return copy_to_user((__force void __user *)dst, src, len) ? 603 -EFAULT : 0; 604 } 605 606 /** 607 * vringh_init_user - initialize a vringh for a userspace vring. 608 * @vrh: the vringh to initialize. 609 * @features: the feature bits for this ring. 610 * @num: the number of elements. 611 * @weak_barriers: true if we only need memory barriers, not I/O. 612 * @desc: the userpace descriptor pointer. 613 * @avail: the userpace avail pointer. 614 * @used: the userpace used pointer. 615 * 616 * Returns an error if num is invalid: you should check pointers 617 * yourself! 618 */ 619 int vringh_init_user(struct vringh *vrh, u64 features, 620 unsigned int num, bool weak_barriers, 621 struct vring_desc __user *desc, 622 struct vring_avail __user *avail, 623 struct vring_used __user *used) 624 { 625 /* Sane power of 2 please! */ 626 if (!num || num > 0xffff || (num & (num - 1))) { 627 vringh_bad("Bad ring size %u", num); 628 return -EINVAL; 629 } 630 631 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1)); 632 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); 633 vrh->weak_barriers = weak_barriers; 634 vrh->completed = 0; 635 vrh->last_avail_idx = 0; 636 vrh->last_used_idx = 0; 637 vrh->vring.num = num; 638 /* vring expects kernel addresses, but only used via accessors. */ 639 vrh->vring.desc = (__force struct vring_desc *)desc; 640 vrh->vring.avail = (__force struct vring_avail *)avail; 641 vrh->vring.used = (__force struct vring_used *)used; 642 return 0; 643 } 644 EXPORT_SYMBOL(vringh_init_user); 645 646 /** 647 * vringh_getdesc_user - get next available descriptor from userspace ring. 648 * @vrh: the userspace vring. 649 * @riov: where to put the readable descriptors (or NULL) 650 * @wiov: where to put the writable descriptors (or NULL) 651 * @getrange: function to call to check ranges. 652 * @head: head index we received, for passing to vringh_complete_user(). 653 * 654 * Returns 0 if there was no descriptor, 1 if there was, or -errno. 655 * 656 * Note that on error return, you can tell the difference between an 657 * invalid ring and a single invalid descriptor: in the former case, 658 * *head will be vrh->vring.num. You may be able to ignore an invalid 659 * descriptor, but there's not much you can do with an invalid ring. 660 * 661 * Note that you may need to clean up riov and wiov, even on error! 662 */ 663 int vringh_getdesc_user(struct vringh *vrh, 664 struct vringh_iov *riov, 665 struct vringh_iov *wiov, 666 bool (*getrange)(struct vringh *vrh, 667 u64 addr, struct vringh_range *r), 668 u16 *head) 669 { 670 int err; 671 672 *head = vrh->vring.num; 673 err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx); 674 if (err < 0) 675 return err; 676 677 /* Empty... */ 678 if (err == vrh->vring.num) 679 return 0; 680 681 /* We need the layouts to be the identical for this to work */ 682 BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov)); 683 BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) != 684 offsetof(struct vringh_iov, iov)); 685 BUILD_BUG_ON(offsetof(struct vringh_kiov, i) != 686 offsetof(struct vringh_iov, i)); 687 BUILD_BUG_ON(offsetof(struct vringh_kiov, used) != 688 offsetof(struct vringh_iov, used)); 689 BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) != 690 offsetof(struct vringh_iov, max_num)); 691 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 692 BUILD_BUG_ON(offsetof(struct iovec, iov_base) != 693 offsetof(struct kvec, iov_base)); 694 BUILD_BUG_ON(offsetof(struct iovec, iov_len) != 695 offsetof(struct kvec, iov_len)); 696 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base) 697 != sizeof(((struct kvec *)NULL)->iov_base)); 698 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len) 699 != sizeof(((struct kvec *)NULL)->iov_len)); 700 701 *head = err; 702 err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov, 703 (struct vringh_kiov *)wiov, 704 range_check, getrange, GFP_KERNEL, copydesc_user); 705 if (err) 706 return err; 707 708 return 1; 709 } 710 EXPORT_SYMBOL(vringh_getdesc_user); 711 712 /** 713 * vringh_iov_pull_user - copy bytes from vring_iov. 714 * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume) 715 * @dst: the place to copy. 716 * @len: the maximum length to copy. 717 * 718 * Returns the bytes copied <= len or a negative errno. 719 */ 720 ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len) 721 { 722 return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov, 723 dst, len, xfer_from_user); 724 } 725 EXPORT_SYMBOL(vringh_iov_pull_user); 726 727 /** 728 * vringh_iov_push_user - copy bytes into vring_iov. 729 * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume) 730 * @dst: the place to copy. 731 * @len: the maximum length to copy. 732 * 733 * Returns the bytes copied <= len or a negative errno. 734 */ 735 ssize_t vringh_iov_push_user(struct vringh_iov *wiov, 736 const void *src, size_t len) 737 { 738 return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov, 739 (void *)src, len, xfer_to_user); 740 } 741 EXPORT_SYMBOL(vringh_iov_push_user); 742 743 /** 744 * vringh_abandon_user - we've decided not to handle the descriptor(s). 745 * @vrh: the vring. 746 * @num: the number of descriptors to put back (ie. num 747 * vringh_get_user() to undo). 748 * 749 * The next vringh_get_user() will return the old descriptor(s) again. 750 */ 751 void vringh_abandon_user(struct vringh *vrh, unsigned int num) 752 { 753 /* We only update vring_avail_event(vr) when we want to be notified, 754 * so we haven't changed that yet. */ 755 vrh->last_avail_idx -= num; 756 } 757 EXPORT_SYMBOL(vringh_abandon_user); 758 759 /** 760 * vringh_complete_user - we've finished with descriptor, publish it. 761 * @vrh: the vring. 762 * @head: the head as filled in by vringh_getdesc_user. 763 * @len: the length of data we have written. 764 * 765 * You should check vringh_need_notify_user() after one or more calls 766 * to this function. 767 */ 768 int vringh_complete_user(struct vringh *vrh, u16 head, u32 len) 769 { 770 struct vring_used_elem used; 771 772 used.id = cpu_to_vringh32(vrh, head); 773 used.len = cpu_to_vringh32(vrh, len); 774 return __vringh_complete(vrh, &used, 1, putu16_user, putused_user); 775 } 776 EXPORT_SYMBOL(vringh_complete_user); 777 778 /** 779 * vringh_complete_multi_user - we've finished with many descriptors. 780 * @vrh: the vring. 781 * @used: the head, length pairs. 782 * @num_used: the number of used elements. 783 * 784 * You should check vringh_need_notify_user() after one or more calls 785 * to this function. 786 */ 787 int vringh_complete_multi_user(struct vringh *vrh, 788 const struct vring_used_elem used[], 789 unsigned num_used) 790 { 791 return __vringh_complete(vrh, used, num_used, 792 putu16_user, putused_user); 793 } 794 EXPORT_SYMBOL(vringh_complete_multi_user); 795 796 /** 797 * vringh_notify_enable_user - we want to know if something changes. 798 * @vrh: the vring. 799 * 800 * This always enables notifications, but returns false if there are 801 * now more buffers available in the vring. 802 */ 803 bool vringh_notify_enable_user(struct vringh *vrh) 804 { 805 return __vringh_notify_enable(vrh, getu16_user, putu16_user); 806 } 807 EXPORT_SYMBOL(vringh_notify_enable_user); 808 809 /** 810 * vringh_notify_disable_user - don't tell us if something changes. 811 * @vrh: the vring. 812 * 813 * This is our normal running state: we disable and then only enable when 814 * we're going to sleep. 815 */ 816 void vringh_notify_disable_user(struct vringh *vrh) 817 { 818 __vringh_notify_disable(vrh, putu16_user); 819 } 820 EXPORT_SYMBOL(vringh_notify_disable_user); 821 822 /** 823 * vringh_need_notify_user - must we tell the other side about used buffers? 824 * @vrh: the vring we've called vringh_complete_user() on. 825 * 826 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 827 */ 828 int vringh_need_notify_user(struct vringh *vrh) 829 { 830 return __vringh_need_notify(vrh, getu16_user); 831 } 832 EXPORT_SYMBOL(vringh_need_notify_user); 833 834 /* Kernelspace access helpers. */ 835 static inline int getu16_kern(const struct vringh *vrh, 836 u16 *val, const __virtio16 *p) 837 { 838 *val = vringh16_to_cpu(vrh, READ_ONCE(*p)); 839 return 0; 840 } 841 842 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val) 843 { 844 WRITE_ONCE(*p, cpu_to_vringh16(vrh, val)); 845 return 0; 846 } 847 848 static inline int copydesc_kern(const struct vringh *vrh, 849 void *dst, const void *src, size_t len) 850 { 851 memcpy(dst, src, len); 852 return 0; 853 } 854 855 static inline int putused_kern(const struct vringh *vrh, 856 struct vring_used_elem *dst, 857 const struct vring_used_elem *src, 858 unsigned int num) 859 { 860 memcpy(dst, src, num * sizeof(*dst)); 861 return 0; 862 } 863 864 static inline int xfer_kern(const struct vringh *vrh, void *src, 865 void *dst, size_t len) 866 { 867 memcpy(dst, src, len); 868 return 0; 869 } 870 871 static inline int kern_xfer(const struct vringh *vrh, void *dst, 872 void *src, size_t len) 873 { 874 memcpy(dst, src, len); 875 return 0; 876 } 877 878 /** 879 * vringh_init_kern - initialize a vringh for a kernelspace vring. 880 * @vrh: the vringh to initialize. 881 * @features: the feature bits for this ring. 882 * @num: the number of elements. 883 * @weak_barriers: true if we only need memory barriers, not I/O. 884 * @desc: the userpace descriptor pointer. 885 * @avail: the userpace avail pointer. 886 * @used: the userpace used pointer. 887 * 888 * Returns an error if num is invalid. 889 */ 890 int vringh_init_kern(struct vringh *vrh, u64 features, 891 unsigned int num, bool weak_barriers, 892 struct vring_desc *desc, 893 struct vring_avail *avail, 894 struct vring_used *used) 895 { 896 /* Sane power of 2 please! */ 897 if (!num || num > 0xffff || (num & (num - 1))) { 898 vringh_bad("Bad ring size %u", num); 899 return -EINVAL; 900 } 901 902 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1)); 903 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); 904 vrh->weak_barriers = weak_barriers; 905 vrh->completed = 0; 906 vrh->last_avail_idx = 0; 907 vrh->last_used_idx = 0; 908 vrh->vring.num = num; 909 vrh->vring.desc = desc; 910 vrh->vring.avail = avail; 911 vrh->vring.used = used; 912 return 0; 913 } 914 EXPORT_SYMBOL(vringh_init_kern); 915 916 /** 917 * vringh_getdesc_kern - get next available descriptor from kernelspace ring. 918 * @vrh: the kernelspace vring. 919 * @riov: where to put the readable descriptors (or NULL) 920 * @wiov: where to put the writable descriptors (or NULL) 921 * @head: head index we received, for passing to vringh_complete_kern(). 922 * @gfp: flags for allocating larger riov/wiov. 923 * 924 * Returns 0 if there was no descriptor, 1 if there was, or -errno. 925 * 926 * Note that on error return, you can tell the difference between an 927 * invalid ring and a single invalid descriptor: in the former case, 928 * *head will be vrh->vring.num. You may be able to ignore an invalid 929 * descriptor, but there's not much you can do with an invalid ring. 930 * 931 * Note that you may need to clean up riov and wiov, even on error! 932 */ 933 int vringh_getdesc_kern(struct vringh *vrh, 934 struct vringh_kiov *riov, 935 struct vringh_kiov *wiov, 936 u16 *head, 937 gfp_t gfp) 938 { 939 int err; 940 941 err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx); 942 if (err < 0) 943 return err; 944 945 /* Empty... */ 946 if (err == vrh->vring.num) 947 return 0; 948 949 *head = err; 950 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, 951 gfp, copydesc_kern); 952 if (err) 953 return err; 954 955 return 1; 956 } 957 EXPORT_SYMBOL(vringh_getdesc_kern); 958 959 /** 960 * vringh_iov_pull_kern - copy bytes from vring_iov. 961 * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume) 962 * @dst: the place to copy. 963 * @len: the maximum length to copy. 964 * 965 * Returns the bytes copied <= len or a negative errno. 966 */ 967 ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len) 968 { 969 return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern); 970 } 971 EXPORT_SYMBOL(vringh_iov_pull_kern); 972 973 /** 974 * vringh_iov_push_kern - copy bytes into vring_iov. 975 * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume) 976 * @dst: the place to copy. 977 * @len: the maximum length to copy. 978 * 979 * Returns the bytes copied <= len or a negative errno. 980 */ 981 ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, 982 const void *src, size_t len) 983 { 984 return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer); 985 } 986 EXPORT_SYMBOL(vringh_iov_push_kern); 987 988 /** 989 * vringh_abandon_kern - we've decided not to handle the descriptor(s). 990 * @vrh: the vring. 991 * @num: the number of descriptors to put back (ie. num 992 * vringh_get_kern() to undo). 993 * 994 * The next vringh_get_kern() will return the old descriptor(s) again. 995 */ 996 void vringh_abandon_kern(struct vringh *vrh, unsigned int num) 997 { 998 /* We only update vring_avail_event(vr) when we want to be notified, 999 * so we haven't changed that yet. */ 1000 vrh->last_avail_idx -= num; 1001 } 1002 EXPORT_SYMBOL(vringh_abandon_kern); 1003 1004 /** 1005 * vringh_complete_kern - we've finished with descriptor, publish it. 1006 * @vrh: the vring. 1007 * @head: the head as filled in by vringh_getdesc_kern. 1008 * @len: the length of data we have written. 1009 * 1010 * You should check vringh_need_notify_kern() after one or more calls 1011 * to this function. 1012 */ 1013 int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len) 1014 { 1015 struct vring_used_elem used; 1016 1017 used.id = cpu_to_vringh32(vrh, head); 1018 used.len = cpu_to_vringh32(vrh, len); 1019 1020 return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern); 1021 } 1022 EXPORT_SYMBOL(vringh_complete_kern); 1023 1024 /** 1025 * vringh_notify_enable_kern - we want to know if something changes. 1026 * @vrh: the vring. 1027 * 1028 * This always enables notifications, but returns false if there are 1029 * now more buffers available in the vring. 1030 */ 1031 bool vringh_notify_enable_kern(struct vringh *vrh) 1032 { 1033 return __vringh_notify_enable(vrh, getu16_kern, putu16_kern); 1034 } 1035 EXPORT_SYMBOL(vringh_notify_enable_kern); 1036 1037 /** 1038 * vringh_notify_disable_kern - don't tell us if something changes. 1039 * @vrh: the vring. 1040 * 1041 * This is our normal running state: we disable and then only enable when 1042 * we're going to sleep. 1043 */ 1044 void vringh_notify_disable_kern(struct vringh *vrh) 1045 { 1046 __vringh_notify_disable(vrh, putu16_kern); 1047 } 1048 EXPORT_SYMBOL(vringh_notify_disable_kern); 1049 1050 /** 1051 * vringh_need_notify_kern - must we tell the other side about used buffers? 1052 * @vrh: the vring we've called vringh_complete_kern() on. 1053 * 1054 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 1055 */ 1056 int vringh_need_notify_kern(struct vringh *vrh) 1057 { 1058 return __vringh_need_notify(vrh, getu16_kern); 1059 } 1060 EXPORT_SYMBOL(vringh_need_notify_kern); 1061 1062 static int iotlb_translate(const struct vringh *vrh, 1063 u64 addr, u64 len, struct bio_vec iov[], 1064 int iov_size, u32 perm) 1065 { 1066 struct vhost_iotlb_map *map; 1067 struct vhost_iotlb *iotlb = vrh->iotlb; 1068 int ret = 0; 1069 u64 s = 0; 1070 1071 while (len > s) { 1072 u64 size, pa, pfn; 1073 1074 if (unlikely(ret >= iov_size)) { 1075 ret = -ENOBUFS; 1076 break; 1077 } 1078 1079 map = vhost_iotlb_itree_first(iotlb, addr, 1080 addr + len - 1); 1081 if (!map || map->start > addr) { 1082 ret = -EINVAL; 1083 break; 1084 } else if (!(map->perm & perm)) { 1085 ret = -EPERM; 1086 break; 1087 } 1088 1089 size = map->size - addr + map->start; 1090 pa = map->addr + addr - map->start; 1091 pfn = pa >> PAGE_SHIFT; 1092 iov[ret].bv_page = pfn_to_page(pfn); 1093 iov[ret].bv_len = min(len - s, size); 1094 iov[ret].bv_offset = pa & (PAGE_SIZE - 1); 1095 s += size; 1096 addr += size; 1097 ++ret; 1098 } 1099 1100 return ret; 1101 } 1102 1103 static inline int copy_from_iotlb(const struct vringh *vrh, void *dst, 1104 void *src, size_t len) 1105 { 1106 struct iov_iter iter; 1107 struct bio_vec iov[16]; 1108 int ret; 1109 1110 ret = iotlb_translate(vrh, (u64)(uintptr_t)src, 1111 len, iov, 16, VHOST_MAP_RO); 1112 if (ret < 0) 1113 return ret; 1114 1115 iov_iter_bvec(&iter, READ, iov, ret, len); 1116 1117 ret = copy_from_iter(dst, len, &iter); 1118 1119 return ret; 1120 } 1121 1122 static inline int copy_to_iotlb(const struct vringh *vrh, void *dst, 1123 void *src, size_t len) 1124 { 1125 struct iov_iter iter; 1126 struct bio_vec iov[16]; 1127 int ret; 1128 1129 ret = iotlb_translate(vrh, (u64)(uintptr_t)dst, 1130 len, iov, 16, VHOST_MAP_WO); 1131 if (ret < 0) 1132 return ret; 1133 1134 iov_iter_bvec(&iter, WRITE, iov, ret, len); 1135 1136 return copy_to_iter(src, len, &iter); 1137 } 1138 1139 static inline int getu16_iotlb(const struct vringh *vrh, 1140 u16 *val, const __virtio16 *p) 1141 { 1142 struct bio_vec iov; 1143 void *kaddr, *from; 1144 int ret; 1145 1146 /* Atomic read is needed for getu16 */ 1147 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), 1148 &iov, 1, VHOST_MAP_RO); 1149 if (ret < 0) 1150 return ret; 1151 1152 kaddr = kmap_atomic(iov.bv_page); 1153 from = kaddr + iov.bv_offset; 1154 *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from)); 1155 kunmap_atomic(kaddr); 1156 1157 return 0; 1158 } 1159 1160 static inline int putu16_iotlb(const struct vringh *vrh, 1161 __virtio16 *p, u16 val) 1162 { 1163 struct bio_vec iov; 1164 void *kaddr, *to; 1165 int ret; 1166 1167 /* Atomic write is needed for putu16 */ 1168 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), 1169 &iov, 1, VHOST_MAP_WO); 1170 if (ret < 0) 1171 return ret; 1172 1173 kaddr = kmap_atomic(iov.bv_page); 1174 to = kaddr + iov.bv_offset; 1175 WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val)); 1176 kunmap_atomic(kaddr); 1177 1178 return 0; 1179 } 1180 1181 static inline int copydesc_iotlb(const struct vringh *vrh, 1182 void *dst, const void *src, size_t len) 1183 { 1184 int ret; 1185 1186 ret = copy_from_iotlb(vrh, dst, (void *)src, len); 1187 if (ret != len) 1188 return -EFAULT; 1189 1190 return 0; 1191 } 1192 1193 static inline int xfer_from_iotlb(const struct vringh *vrh, void *src, 1194 void *dst, size_t len) 1195 { 1196 int ret; 1197 1198 ret = copy_from_iotlb(vrh, dst, src, len); 1199 if (ret != len) 1200 return -EFAULT; 1201 1202 return 0; 1203 } 1204 1205 static inline int xfer_to_iotlb(const struct vringh *vrh, 1206 void *dst, void *src, size_t len) 1207 { 1208 int ret; 1209 1210 ret = copy_to_iotlb(vrh, dst, src, len); 1211 if (ret != len) 1212 return -EFAULT; 1213 1214 return 0; 1215 } 1216 1217 static inline int putused_iotlb(const struct vringh *vrh, 1218 struct vring_used_elem *dst, 1219 const struct vring_used_elem *src, 1220 unsigned int num) 1221 { 1222 int size = num * sizeof(*dst); 1223 int ret; 1224 1225 ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst)); 1226 if (ret != size) 1227 return -EFAULT; 1228 1229 return 0; 1230 } 1231 1232 /** 1233 * vringh_init_iotlb - initialize a vringh for a ring with IOTLB. 1234 * @vrh: the vringh to initialize. 1235 * @features: the feature bits for this ring. 1236 * @num: the number of elements. 1237 * @weak_barriers: true if we only need memory barriers, not I/O. 1238 * @desc: the userpace descriptor pointer. 1239 * @avail: the userpace avail pointer. 1240 * @used: the userpace used pointer. 1241 * 1242 * Returns an error if num is invalid. 1243 */ 1244 int vringh_init_iotlb(struct vringh *vrh, u64 features, 1245 unsigned int num, bool weak_barriers, 1246 struct vring_desc *desc, 1247 struct vring_avail *avail, 1248 struct vring_used *used) 1249 { 1250 return vringh_init_kern(vrh, features, num, weak_barriers, 1251 desc, avail, used); 1252 } 1253 EXPORT_SYMBOL(vringh_init_iotlb); 1254 1255 /** 1256 * vringh_set_iotlb - initialize a vringh for a ring with IOTLB. 1257 * @vrh: the vring 1258 * @iotlb: iotlb associated with this vring 1259 */ 1260 void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb) 1261 { 1262 vrh->iotlb = iotlb; 1263 } 1264 EXPORT_SYMBOL(vringh_set_iotlb); 1265 1266 /** 1267 * vringh_getdesc_iotlb - get next available descriptor from ring with 1268 * IOTLB. 1269 * @vrh: the kernelspace vring. 1270 * @riov: where to put the readable descriptors (or NULL) 1271 * @wiov: where to put the writable descriptors (or NULL) 1272 * @head: head index we received, for passing to vringh_complete_iotlb(). 1273 * @gfp: flags for allocating larger riov/wiov. 1274 * 1275 * Returns 0 if there was no descriptor, 1 if there was, or -errno. 1276 * 1277 * Note that on error return, you can tell the difference between an 1278 * invalid ring and a single invalid descriptor: in the former case, 1279 * *head will be vrh->vring.num. You may be able to ignore an invalid 1280 * descriptor, but there's not much you can do with an invalid ring. 1281 * 1282 * Note that you may need to clean up riov and wiov, even on error! 1283 */ 1284 int vringh_getdesc_iotlb(struct vringh *vrh, 1285 struct vringh_kiov *riov, 1286 struct vringh_kiov *wiov, 1287 u16 *head, 1288 gfp_t gfp) 1289 { 1290 int err; 1291 1292 err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx); 1293 if (err < 0) 1294 return err; 1295 1296 /* Empty... */ 1297 if (err == vrh->vring.num) 1298 return 0; 1299 1300 *head = err; 1301 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, 1302 gfp, copydesc_iotlb); 1303 if (err) 1304 return err; 1305 1306 return 1; 1307 } 1308 EXPORT_SYMBOL(vringh_getdesc_iotlb); 1309 1310 /** 1311 * vringh_iov_pull_iotlb - copy bytes from vring_iov. 1312 * @vrh: the vring. 1313 * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume) 1314 * @dst: the place to copy. 1315 * @len: the maximum length to copy. 1316 * 1317 * Returns the bytes copied <= len or a negative errno. 1318 */ 1319 ssize_t vringh_iov_pull_iotlb(struct vringh *vrh, 1320 struct vringh_kiov *riov, 1321 void *dst, size_t len) 1322 { 1323 return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb); 1324 } 1325 EXPORT_SYMBOL(vringh_iov_pull_iotlb); 1326 1327 /** 1328 * vringh_iov_push_iotlb - copy bytes into vring_iov. 1329 * @vrh: the vring. 1330 * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume) 1331 * @dst: the place to copy. 1332 * @len: the maximum length to copy. 1333 * 1334 * Returns the bytes copied <= len or a negative errno. 1335 */ 1336 ssize_t vringh_iov_push_iotlb(struct vringh *vrh, 1337 struct vringh_kiov *wiov, 1338 const void *src, size_t len) 1339 { 1340 return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb); 1341 } 1342 EXPORT_SYMBOL(vringh_iov_push_iotlb); 1343 1344 /** 1345 * vringh_abandon_iotlb - we've decided not to handle the descriptor(s). 1346 * @vrh: the vring. 1347 * @num: the number of descriptors to put back (ie. num 1348 * vringh_get_iotlb() to undo). 1349 * 1350 * The next vringh_get_iotlb() will return the old descriptor(s) again. 1351 */ 1352 void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num) 1353 { 1354 /* We only update vring_avail_event(vr) when we want to be notified, 1355 * so we haven't changed that yet. 1356 */ 1357 vrh->last_avail_idx -= num; 1358 } 1359 EXPORT_SYMBOL(vringh_abandon_iotlb); 1360 1361 /** 1362 * vringh_complete_iotlb - we've finished with descriptor, publish it. 1363 * @vrh: the vring. 1364 * @head: the head as filled in by vringh_getdesc_iotlb. 1365 * @len: the length of data we have written. 1366 * 1367 * You should check vringh_need_notify_iotlb() after one or more calls 1368 * to this function. 1369 */ 1370 int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len) 1371 { 1372 struct vring_used_elem used; 1373 1374 used.id = cpu_to_vringh32(vrh, head); 1375 used.len = cpu_to_vringh32(vrh, len); 1376 1377 return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb); 1378 } 1379 EXPORT_SYMBOL(vringh_complete_iotlb); 1380 1381 /** 1382 * vringh_notify_enable_iotlb - we want to know if something changes. 1383 * @vrh: the vring. 1384 * 1385 * This always enables notifications, but returns false if there are 1386 * now more buffers available in the vring. 1387 */ 1388 bool vringh_notify_enable_iotlb(struct vringh *vrh) 1389 { 1390 return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb); 1391 } 1392 EXPORT_SYMBOL(vringh_notify_enable_iotlb); 1393 1394 /** 1395 * vringh_notify_disable_iotlb - don't tell us if something changes. 1396 * @vrh: the vring. 1397 * 1398 * This is our normal running state: we disable and then only enable when 1399 * we're going to sleep. 1400 */ 1401 void vringh_notify_disable_iotlb(struct vringh *vrh) 1402 { 1403 __vringh_notify_disable(vrh, putu16_iotlb); 1404 } 1405 EXPORT_SYMBOL(vringh_notify_disable_iotlb); 1406 1407 /** 1408 * vringh_need_notify_iotlb - must we tell the other side about used buffers? 1409 * @vrh: the vring we've called vringh_complete_iotlb() on. 1410 * 1411 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 1412 */ 1413 int vringh_need_notify_iotlb(struct vringh *vrh) 1414 { 1415 return __vringh_need_notify(vrh, getu16_iotlb); 1416 } 1417 EXPORT_SYMBOL(vringh_need_notify_iotlb); 1418 1419 1420 MODULE_LICENSE("GPL"); 1421