1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Helpers for the host side of a virtio ring. 4 * 5 * Since these may be in userspace, we use (inline) accessors. 6 */ 7 #include <linux/compiler.h> 8 #include <linux/module.h> 9 #include <linux/vringh.h> 10 #include <linux/virtio_ring.h> 11 #include <linux/kernel.h> 12 #include <linux/ratelimit.h> 13 #include <linux/uaccess.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #if IS_REACHABLE(CONFIG_VHOST_IOTLB) 17 #include <linux/bvec.h> 18 #include <linux/highmem.h> 19 #include <linux/vhost_iotlb.h> 20 #endif 21 #include <uapi/linux/virtio_config.h> 22 23 static __printf(1,2) __cold void vringh_bad(const char *fmt, ...) 24 { 25 static DEFINE_RATELIMIT_STATE(vringh_rs, 26 DEFAULT_RATELIMIT_INTERVAL, 27 DEFAULT_RATELIMIT_BURST); 28 if (__ratelimit(&vringh_rs)) { 29 va_list ap; 30 va_start(ap, fmt); 31 printk(KERN_NOTICE "vringh:"); 32 vprintk(fmt, ap); 33 va_end(ap); 34 } 35 } 36 37 /* Returns vring->num if empty, -ve on error. */ 38 static inline int __vringh_get_head(const struct vringh *vrh, 39 int (*getu16)(const struct vringh *vrh, 40 u16 *val, const __virtio16 *p), 41 u16 *last_avail_idx) 42 { 43 u16 avail_idx, i, head; 44 int err; 45 46 err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx); 47 if (err) { 48 vringh_bad("Failed to access avail idx at %p", 49 &vrh->vring.avail->idx); 50 return err; 51 } 52 53 if (*last_avail_idx == avail_idx) 54 return vrh->vring.num; 55 56 /* Only get avail ring entries after they have been exposed by guest. */ 57 virtio_rmb(vrh->weak_barriers); 58 59 i = *last_avail_idx & (vrh->vring.num - 1); 60 61 err = getu16(vrh, &head, &vrh->vring.avail->ring[i]); 62 if (err) { 63 vringh_bad("Failed to read head: idx %d address %p", 64 *last_avail_idx, &vrh->vring.avail->ring[i]); 65 return err; 66 } 67 68 if (head >= vrh->vring.num) { 69 vringh_bad("Guest says index %u > %u is available", 70 head, vrh->vring.num); 71 return -EINVAL; 72 } 73 74 (*last_avail_idx)++; 75 return head; 76 } 77 78 /* Copy some bytes to/from the iovec. Returns num copied. */ 79 static inline ssize_t vringh_iov_xfer(struct vringh *vrh, 80 struct vringh_kiov *iov, 81 void *ptr, size_t len, 82 int (*xfer)(const struct vringh *vrh, 83 void *addr, void *ptr, 84 size_t len)) 85 { 86 int err, done = 0; 87 88 while (len && iov->i < iov->used) { 89 size_t partlen; 90 91 partlen = min(iov->iov[iov->i].iov_len, len); 92 err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen); 93 if (err) 94 return err; 95 done += partlen; 96 len -= partlen; 97 ptr += partlen; 98 iov->consumed += partlen; 99 iov->iov[iov->i].iov_len -= partlen; 100 iov->iov[iov->i].iov_base += partlen; 101 102 if (!iov->iov[iov->i].iov_len) { 103 /* Fix up old iov element then increment. */ 104 iov->iov[iov->i].iov_len = iov->consumed; 105 iov->iov[iov->i].iov_base -= iov->consumed; 106 107 108 iov->consumed = 0; 109 iov->i++; 110 } 111 } 112 return done; 113 } 114 115 /* May reduce *len if range is shorter. */ 116 static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len, 117 struct vringh_range *range, 118 bool (*getrange)(struct vringh *, 119 u64, struct vringh_range *)) 120 { 121 if (addr < range->start || addr > range->end_incl) { 122 if (!getrange(vrh, addr, range)) 123 return false; 124 } 125 BUG_ON(addr < range->start || addr > range->end_incl); 126 127 /* To end of memory? */ 128 if (unlikely(addr + *len == 0)) { 129 if (range->end_incl == -1ULL) 130 return true; 131 goto truncate; 132 } 133 134 /* Otherwise, don't wrap. */ 135 if (addr + *len < addr) { 136 vringh_bad("Wrapping descriptor %zu@0x%llx", 137 *len, (unsigned long long)addr); 138 return false; 139 } 140 141 if (unlikely(addr + *len - 1 > range->end_incl)) 142 goto truncate; 143 return true; 144 145 truncate: 146 *len = range->end_incl + 1 - addr; 147 return true; 148 } 149 150 static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len, 151 struct vringh_range *range, 152 bool (*getrange)(struct vringh *, 153 u64, struct vringh_range *)) 154 { 155 return true; 156 } 157 158 /* No reason for this code to be inline. */ 159 static int move_to_indirect(const struct vringh *vrh, 160 int *up_next, u16 *i, void *addr, 161 const struct vring_desc *desc, 162 struct vring_desc **descs, int *desc_max) 163 { 164 u32 len; 165 166 /* Indirect tables can't have indirect. */ 167 if (*up_next != -1) { 168 vringh_bad("Multilevel indirect %u->%u", *up_next, *i); 169 return -EINVAL; 170 } 171 172 len = vringh32_to_cpu(vrh, desc->len); 173 if (unlikely(len % sizeof(struct vring_desc))) { 174 vringh_bad("Strange indirect len %u", desc->len); 175 return -EINVAL; 176 } 177 178 /* We will check this when we follow it! */ 179 if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) 180 *up_next = vringh16_to_cpu(vrh, desc->next); 181 else 182 *up_next = -2; 183 *descs = addr; 184 *desc_max = len / sizeof(struct vring_desc); 185 186 /* Now, start at the first indirect. */ 187 *i = 0; 188 return 0; 189 } 190 191 static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp) 192 { 193 struct kvec *new; 194 unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2; 195 196 if (new_num < 8) 197 new_num = 8; 198 199 flag = (iov->max_num & VRINGH_IOV_ALLOCATED); 200 if (flag) 201 new = krealloc(iov->iov, new_num * sizeof(struct iovec), gfp); 202 else { 203 new = kmalloc_array(new_num, sizeof(struct iovec), gfp); 204 if (new) { 205 memcpy(new, iov->iov, 206 iov->max_num * sizeof(struct iovec)); 207 flag = VRINGH_IOV_ALLOCATED; 208 } 209 } 210 if (!new) 211 return -ENOMEM; 212 iov->iov = new; 213 iov->max_num = (new_num | flag); 214 return 0; 215 } 216 217 static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next, 218 struct vring_desc **descs, int *desc_max) 219 { 220 u16 i = *up_next; 221 222 *up_next = -1; 223 *descs = vrh->vring.desc; 224 *desc_max = vrh->vring.num; 225 return i; 226 } 227 228 static int slow_copy(struct vringh *vrh, void *dst, const void *src, 229 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, 230 struct vringh_range *range, 231 bool (*getrange)(struct vringh *vrh, 232 u64, 233 struct vringh_range *)), 234 bool (*getrange)(struct vringh *vrh, 235 u64 addr, 236 struct vringh_range *r), 237 struct vringh_range *range, 238 int (*copy)(const struct vringh *vrh, 239 void *dst, const void *src, size_t len)) 240 { 241 size_t part, len = sizeof(struct vring_desc); 242 243 do { 244 u64 addr; 245 int err; 246 247 part = len; 248 addr = (u64)(unsigned long)src - range->offset; 249 250 if (!rcheck(vrh, addr, &part, range, getrange)) 251 return -EINVAL; 252 253 err = copy(vrh, dst, src, part); 254 if (err) 255 return err; 256 257 dst += part; 258 src += part; 259 len -= part; 260 } while (len); 261 return 0; 262 } 263 264 static inline int 265 __vringh_iov(struct vringh *vrh, u16 i, 266 struct vringh_kiov *riov, 267 struct vringh_kiov *wiov, 268 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, 269 struct vringh_range *range, 270 bool (*getrange)(struct vringh *, u64, 271 struct vringh_range *)), 272 bool (*getrange)(struct vringh *, u64, struct vringh_range *), 273 gfp_t gfp, 274 int (*copy)(const struct vringh *vrh, 275 void *dst, const void *src, size_t len)) 276 { 277 int err, count = 0, up_next, desc_max; 278 struct vring_desc desc, *descs; 279 struct vringh_range range = { -1ULL, 0 }, slowrange; 280 bool slow = false; 281 282 /* We start traversing vring's descriptor table. */ 283 descs = vrh->vring.desc; 284 desc_max = vrh->vring.num; 285 up_next = -1; 286 287 if (riov) 288 riov->i = riov->used = 0; 289 else if (wiov) 290 wiov->i = wiov->used = 0; 291 else 292 /* You must want something! */ 293 BUG(); 294 295 for (;;) { 296 void *addr; 297 struct vringh_kiov *iov; 298 size_t len; 299 300 if (unlikely(slow)) 301 err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange, 302 &slowrange, copy); 303 else 304 err = copy(vrh, &desc, &descs[i], sizeof(desc)); 305 if (unlikely(err)) 306 goto fail; 307 308 if (unlikely(desc.flags & 309 cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) { 310 u64 a = vringh64_to_cpu(vrh, desc.addr); 311 312 /* Make sure it's OK, and get offset. */ 313 len = vringh32_to_cpu(vrh, desc.len); 314 if (!rcheck(vrh, a, &len, &range, getrange)) { 315 err = -EINVAL; 316 goto fail; 317 } 318 319 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) { 320 slow = true; 321 /* We need to save this range to use offset */ 322 slowrange = range; 323 } 324 325 addr = (void *)(long)(a + range.offset); 326 err = move_to_indirect(vrh, &up_next, &i, addr, &desc, 327 &descs, &desc_max); 328 if (err) 329 goto fail; 330 continue; 331 } 332 333 if (count++ == vrh->vring.num) { 334 vringh_bad("Descriptor loop in %p", descs); 335 err = -ELOOP; 336 goto fail; 337 } 338 339 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE)) 340 iov = wiov; 341 else { 342 iov = riov; 343 if (unlikely(wiov && wiov->i)) { 344 vringh_bad("Readable desc %p after writable", 345 &descs[i]); 346 err = -EINVAL; 347 goto fail; 348 } 349 } 350 351 if (!iov) { 352 vringh_bad("Unexpected %s desc", 353 !wiov ? "writable" : "readable"); 354 err = -EPROTO; 355 goto fail; 356 } 357 358 again: 359 /* Make sure it's OK, and get offset. */ 360 len = vringh32_to_cpu(vrh, desc.len); 361 if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range, 362 getrange)) { 363 err = -EINVAL; 364 goto fail; 365 } 366 addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) + 367 range.offset); 368 369 if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) { 370 err = resize_iovec(iov, gfp); 371 if (err) 372 goto fail; 373 } 374 375 iov->iov[iov->used].iov_base = addr; 376 iov->iov[iov->used].iov_len = len; 377 iov->used++; 378 379 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) { 380 desc.len = cpu_to_vringh32(vrh, 381 vringh32_to_cpu(vrh, desc.len) - len); 382 desc.addr = cpu_to_vringh64(vrh, 383 vringh64_to_cpu(vrh, desc.addr) + len); 384 goto again; 385 } 386 387 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) { 388 i = vringh16_to_cpu(vrh, desc.next); 389 } else { 390 /* Just in case we need to finish traversing above. */ 391 if (unlikely(up_next > 0)) { 392 i = return_from_indirect(vrh, &up_next, 393 &descs, &desc_max); 394 slow = false; 395 } else 396 break; 397 } 398 399 if (i >= desc_max) { 400 vringh_bad("Chained index %u > %u", i, desc_max); 401 err = -EINVAL; 402 goto fail; 403 } 404 } 405 406 return 0; 407 408 fail: 409 return err; 410 } 411 412 static inline int __vringh_complete(struct vringh *vrh, 413 const struct vring_used_elem *used, 414 unsigned int num_used, 415 int (*putu16)(const struct vringh *vrh, 416 __virtio16 *p, u16 val), 417 int (*putused)(const struct vringh *vrh, 418 struct vring_used_elem *dst, 419 const struct vring_used_elem 420 *src, unsigned num)) 421 { 422 struct vring_used *used_ring; 423 int err; 424 u16 used_idx, off; 425 426 used_ring = vrh->vring.used; 427 used_idx = vrh->last_used_idx + vrh->completed; 428 429 off = used_idx % vrh->vring.num; 430 431 /* Compiler knows num_used == 1 sometimes, hence extra check */ 432 if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) { 433 u16 part = vrh->vring.num - off; 434 err = putused(vrh, &used_ring->ring[off], used, part); 435 if (!err) 436 err = putused(vrh, &used_ring->ring[0], used + part, 437 num_used - part); 438 } else 439 err = putused(vrh, &used_ring->ring[off], used, num_used); 440 441 if (err) { 442 vringh_bad("Failed to write %u used entries %u at %p", 443 num_used, off, &used_ring->ring[off]); 444 return err; 445 } 446 447 /* Make sure buffer is written before we update index. */ 448 virtio_wmb(vrh->weak_barriers); 449 450 err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used); 451 if (err) { 452 vringh_bad("Failed to update used index at %p", 453 &vrh->vring.used->idx); 454 return err; 455 } 456 457 vrh->completed += num_used; 458 return 0; 459 } 460 461 462 static inline int __vringh_need_notify(struct vringh *vrh, 463 int (*getu16)(const struct vringh *vrh, 464 u16 *val, 465 const __virtio16 *p)) 466 { 467 bool notify; 468 u16 used_event; 469 int err; 470 471 /* Flush out used index update. This is paired with the 472 * barrier that the Guest executes when enabling 473 * interrupts. */ 474 virtio_mb(vrh->weak_barriers); 475 476 /* Old-style, without event indices. */ 477 if (!vrh->event_indices) { 478 u16 flags; 479 err = getu16(vrh, &flags, &vrh->vring.avail->flags); 480 if (err) { 481 vringh_bad("Failed to get flags at %p", 482 &vrh->vring.avail->flags); 483 return err; 484 } 485 return (!(flags & VRING_AVAIL_F_NO_INTERRUPT)); 486 } 487 488 /* Modern: we know when other side wants to know. */ 489 err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring)); 490 if (err) { 491 vringh_bad("Failed to get used event idx at %p", 492 &vring_used_event(&vrh->vring)); 493 return err; 494 } 495 496 /* Just in case we added so many that we wrap. */ 497 if (unlikely(vrh->completed > 0xffff)) 498 notify = true; 499 else 500 notify = vring_need_event(used_event, 501 vrh->last_used_idx + vrh->completed, 502 vrh->last_used_idx); 503 504 vrh->last_used_idx += vrh->completed; 505 vrh->completed = 0; 506 return notify; 507 } 508 509 static inline bool __vringh_notify_enable(struct vringh *vrh, 510 int (*getu16)(const struct vringh *vrh, 511 u16 *val, const __virtio16 *p), 512 int (*putu16)(const struct vringh *vrh, 513 __virtio16 *p, u16 val)) 514 { 515 u16 avail; 516 517 if (!vrh->event_indices) { 518 /* Old-school; update flags. */ 519 if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) { 520 vringh_bad("Clearing used flags %p", 521 &vrh->vring.used->flags); 522 return true; 523 } 524 } else { 525 if (putu16(vrh, &vring_avail_event(&vrh->vring), 526 vrh->last_avail_idx) != 0) { 527 vringh_bad("Updating avail event index %p", 528 &vring_avail_event(&vrh->vring)); 529 return true; 530 } 531 } 532 533 /* They could have slipped one in as we were doing that: make 534 * sure it's written, then check again. */ 535 virtio_mb(vrh->weak_barriers); 536 537 if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) { 538 vringh_bad("Failed to check avail idx at %p", 539 &vrh->vring.avail->idx); 540 return true; 541 } 542 543 /* This is unlikely, so we just leave notifications enabled 544 * (if we're using event_indices, we'll only get one 545 * notification anyway). */ 546 return avail == vrh->last_avail_idx; 547 } 548 549 static inline void __vringh_notify_disable(struct vringh *vrh, 550 int (*putu16)(const struct vringh *vrh, 551 __virtio16 *p, u16 val)) 552 { 553 if (!vrh->event_indices) { 554 /* Old-school; update flags. */ 555 if (putu16(vrh, &vrh->vring.used->flags, 556 VRING_USED_F_NO_NOTIFY)) { 557 vringh_bad("Setting used flags %p", 558 &vrh->vring.used->flags); 559 } 560 } 561 } 562 563 /* Userspace access helpers: in this case, addresses are really userspace. */ 564 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p) 565 { 566 __virtio16 v = 0; 567 int rc = get_user(v, (__force __virtio16 __user *)p); 568 *val = vringh16_to_cpu(vrh, v); 569 return rc; 570 } 571 572 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val) 573 { 574 __virtio16 v = cpu_to_vringh16(vrh, val); 575 return put_user(v, (__force __virtio16 __user *)p); 576 } 577 578 static inline int copydesc_user(const struct vringh *vrh, 579 void *dst, const void *src, size_t len) 580 { 581 return copy_from_user(dst, (__force void __user *)src, len) ? 582 -EFAULT : 0; 583 } 584 585 static inline int putused_user(const struct vringh *vrh, 586 struct vring_used_elem *dst, 587 const struct vring_used_elem *src, 588 unsigned int num) 589 { 590 return copy_to_user((__force void __user *)dst, src, 591 sizeof(*dst) * num) ? -EFAULT : 0; 592 } 593 594 static inline int xfer_from_user(const struct vringh *vrh, void *src, 595 void *dst, size_t len) 596 { 597 return copy_from_user(dst, (__force void __user *)src, len) ? 598 -EFAULT : 0; 599 } 600 601 static inline int xfer_to_user(const struct vringh *vrh, 602 void *dst, void *src, size_t len) 603 { 604 return copy_to_user((__force void __user *)dst, src, len) ? 605 -EFAULT : 0; 606 } 607 608 /** 609 * vringh_init_user - initialize a vringh for a userspace vring. 610 * @vrh: the vringh to initialize. 611 * @features: the feature bits for this ring. 612 * @num: the number of elements. 613 * @weak_barriers: true if we only need memory barriers, not I/O. 614 * @desc: the userpace descriptor pointer. 615 * @avail: the userpace avail pointer. 616 * @used: the userpace used pointer. 617 * 618 * Returns an error if num is invalid: you should check pointers 619 * yourself! 620 */ 621 int vringh_init_user(struct vringh *vrh, u64 features, 622 unsigned int num, bool weak_barriers, 623 vring_desc_t __user *desc, 624 vring_avail_t __user *avail, 625 vring_used_t __user *used) 626 { 627 /* Sane power of 2 please! */ 628 if (!num || num > 0xffff || (num & (num - 1))) { 629 vringh_bad("Bad ring size %u", num); 630 return -EINVAL; 631 } 632 633 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1)); 634 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); 635 vrh->weak_barriers = weak_barriers; 636 vrh->completed = 0; 637 vrh->last_avail_idx = 0; 638 vrh->last_used_idx = 0; 639 vrh->vring.num = num; 640 /* vring expects kernel addresses, but only used via accessors. */ 641 vrh->vring.desc = (__force struct vring_desc *)desc; 642 vrh->vring.avail = (__force struct vring_avail *)avail; 643 vrh->vring.used = (__force struct vring_used *)used; 644 return 0; 645 } 646 EXPORT_SYMBOL(vringh_init_user); 647 648 /** 649 * vringh_getdesc_user - get next available descriptor from userspace ring. 650 * @vrh: the userspace vring. 651 * @riov: where to put the readable descriptors (or NULL) 652 * @wiov: where to put the writable descriptors (or NULL) 653 * @getrange: function to call to check ranges. 654 * @head: head index we received, for passing to vringh_complete_user(). 655 * 656 * Returns 0 if there was no descriptor, 1 if there was, or -errno. 657 * 658 * Note that on error return, you can tell the difference between an 659 * invalid ring and a single invalid descriptor: in the former case, 660 * *head will be vrh->vring.num. You may be able to ignore an invalid 661 * descriptor, but there's not much you can do with an invalid ring. 662 * 663 * Note that you may need to clean up riov and wiov, even on error! 664 */ 665 int vringh_getdesc_user(struct vringh *vrh, 666 struct vringh_iov *riov, 667 struct vringh_iov *wiov, 668 bool (*getrange)(struct vringh *vrh, 669 u64 addr, struct vringh_range *r), 670 u16 *head) 671 { 672 int err; 673 674 *head = vrh->vring.num; 675 err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx); 676 if (err < 0) 677 return err; 678 679 /* Empty... */ 680 if (err == vrh->vring.num) 681 return 0; 682 683 /* We need the layouts to be the identical for this to work */ 684 BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov)); 685 BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) != 686 offsetof(struct vringh_iov, iov)); 687 BUILD_BUG_ON(offsetof(struct vringh_kiov, i) != 688 offsetof(struct vringh_iov, i)); 689 BUILD_BUG_ON(offsetof(struct vringh_kiov, used) != 690 offsetof(struct vringh_iov, used)); 691 BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) != 692 offsetof(struct vringh_iov, max_num)); 693 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 694 BUILD_BUG_ON(offsetof(struct iovec, iov_base) != 695 offsetof(struct kvec, iov_base)); 696 BUILD_BUG_ON(offsetof(struct iovec, iov_len) != 697 offsetof(struct kvec, iov_len)); 698 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base) 699 != sizeof(((struct kvec *)NULL)->iov_base)); 700 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len) 701 != sizeof(((struct kvec *)NULL)->iov_len)); 702 703 *head = err; 704 err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov, 705 (struct vringh_kiov *)wiov, 706 range_check, getrange, GFP_KERNEL, copydesc_user); 707 if (err) 708 return err; 709 710 return 1; 711 } 712 EXPORT_SYMBOL(vringh_getdesc_user); 713 714 /** 715 * vringh_iov_pull_user - copy bytes from vring_iov. 716 * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume) 717 * @dst: the place to copy. 718 * @len: the maximum length to copy. 719 * 720 * Returns the bytes copied <= len or a negative errno. 721 */ 722 ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len) 723 { 724 return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov, 725 dst, len, xfer_from_user); 726 } 727 EXPORT_SYMBOL(vringh_iov_pull_user); 728 729 /** 730 * vringh_iov_push_user - copy bytes into vring_iov. 731 * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume) 732 * @dst: the place to copy. 733 * @len: the maximum length to copy. 734 * 735 * Returns the bytes copied <= len or a negative errno. 736 */ 737 ssize_t vringh_iov_push_user(struct vringh_iov *wiov, 738 const void *src, size_t len) 739 { 740 return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov, 741 (void *)src, len, xfer_to_user); 742 } 743 EXPORT_SYMBOL(vringh_iov_push_user); 744 745 /** 746 * vringh_abandon_user - we've decided not to handle the descriptor(s). 747 * @vrh: the vring. 748 * @num: the number of descriptors to put back (ie. num 749 * vringh_get_user() to undo). 750 * 751 * The next vringh_get_user() will return the old descriptor(s) again. 752 */ 753 void vringh_abandon_user(struct vringh *vrh, unsigned int num) 754 { 755 /* We only update vring_avail_event(vr) when we want to be notified, 756 * so we haven't changed that yet. */ 757 vrh->last_avail_idx -= num; 758 } 759 EXPORT_SYMBOL(vringh_abandon_user); 760 761 /** 762 * vringh_complete_user - we've finished with descriptor, publish it. 763 * @vrh: the vring. 764 * @head: the head as filled in by vringh_getdesc_user. 765 * @len: the length of data we have written. 766 * 767 * You should check vringh_need_notify_user() after one or more calls 768 * to this function. 769 */ 770 int vringh_complete_user(struct vringh *vrh, u16 head, u32 len) 771 { 772 struct vring_used_elem used; 773 774 used.id = cpu_to_vringh32(vrh, head); 775 used.len = cpu_to_vringh32(vrh, len); 776 return __vringh_complete(vrh, &used, 1, putu16_user, putused_user); 777 } 778 EXPORT_SYMBOL(vringh_complete_user); 779 780 /** 781 * vringh_complete_multi_user - we've finished with many descriptors. 782 * @vrh: the vring. 783 * @used: the head, length pairs. 784 * @num_used: the number of used elements. 785 * 786 * You should check vringh_need_notify_user() after one or more calls 787 * to this function. 788 */ 789 int vringh_complete_multi_user(struct vringh *vrh, 790 const struct vring_used_elem used[], 791 unsigned num_used) 792 { 793 return __vringh_complete(vrh, used, num_used, 794 putu16_user, putused_user); 795 } 796 EXPORT_SYMBOL(vringh_complete_multi_user); 797 798 /** 799 * vringh_notify_enable_user - we want to know if something changes. 800 * @vrh: the vring. 801 * 802 * This always enables notifications, but returns false if there are 803 * now more buffers available in the vring. 804 */ 805 bool vringh_notify_enable_user(struct vringh *vrh) 806 { 807 return __vringh_notify_enable(vrh, getu16_user, putu16_user); 808 } 809 EXPORT_SYMBOL(vringh_notify_enable_user); 810 811 /** 812 * vringh_notify_disable_user - don't tell us if something changes. 813 * @vrh: the vring. 814 * 815 * This is our normal running state: we disable and then only enable when 816 * we're going to sleep. 817 */ 818 void vringh_notify_disable_user(struct vringh *vrh) 819 { 820 __vringh_notify_disable(vrh, putu16_user); 821 } 822 EXPORT_SYMBOL(vringh_notify_disable_user); 823 824 /** 825 * vringh_need_notify_user - must we tell the other side about used buffers? 826 * @vrh: the vring we've called vringh_complete_user() on. 827 * 828 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 829 */ 830 int vringh_need_notify_user(struct vringh *vrh) 831 { 832 return __vringh_need_notify(vrh, getu16_user); 833 } 834 EXPORT_SYMBOL(vringh_need_notify_user); 835 836 /* Kernelspace access helpers. */ 837 static inline int getu16_kern(const struct vringh *vrh, 838 u16 *val, const __virtio16 *p) 839 { 840 *val = vringh16_to_cpu(vrh, READ_ONCE(*p)); 841 return 0; 842 } 843 844 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val) 845 { 846 WRITE_ONCE(*p, cpu_to_vringh16(vrh, val)); 847 return 0; 848 } 849 850 static inline int copydesc_kern(const struct vringh *vrh, 851 void *dst, const void *src, size_t len) 852 { 853 memcpy(dst, src, len); 854 return 0; 855 } 856 857 static inline int putused_kern(const struct vringh *vrh, 858 struct vring_used_elem *dst, 859 const struct vring_used_elem *src, 860 unsigned int num) 861 { 862 memcpy(dst, src, num * sizeof(*dst)); 863 return 0; 864 } 865 866 static inline int xfer_kern(const struct vringh *vrh, void *src, 867 void *dst, size_t len) 868 { 869 memcpy(dst, src, len); 870 return 0; 871 } 872 873 static inline int kern_xfer(const struct vringh *vrh, void *dst, 874 void *src, size_t len) 875 { 876 memcpy(dst, src, len); 877 return 0; 878 } 879 880 /** 881 * vringh_init_kern - initialize a vringh for a kernelspace vring. 882 * @vrh: the vringh to initialize. 883 * @features: the feature bits for this ring. 884 * @num: the number of elements. 885 * @weak_barriers: true if we only need memory barriers, not I/O. 886 * @desc: the userpace descriptor pointer. 887 * @avail: the userpace avail pointer. 888 * @used: the userpace used pointer. 889 * 890 * Returns an error if num is invalid. 891 */ 892 int vringh_init_kern(struct vringh *vrh, u64 features, 893 unsigned int num, bool weak_barriers, 894 struct vring_desc *desc, 895 struct vring_avail *avail, 896 struct vring_used *used) 897 { 898 /* Sane power of 2 please! */ 899 if (!num || num > 0xffff || (num & (num - 1))) { 900 vringh_bad("Bad ring size %u", num); 901 return -EINVAL; 902 } 903 904 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1)); 905 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); 906 vrh->weak_barriers = weak_barriers; 907 vrh->completed = 0; 908 vrh->last_avail_idx = 0; 909 vrh->last_used_idx = 0; 910 vrh->vring.num = num; 911 vrh->vring.desc = desc; 912 vrh->vring.avail = avail; 913 vrh->vring.used = used; 914 return 0; 915 } 916 EXPORT_SYMBOL(vringh_init_kern); 917 918 /** 919 * vringh_getdesc_kern - get next available descriptor from kernelspace ring. 920 * @vrh: the kernelspace vring. 921 * @riov: where to put the readable descriptors (or NULL) 922 * @wiov: where to put the writable descriptors (or NULL) 923 * @head: head index we received, for passing to vringh_complete_kern(). 924 * @gfp: flags for allocating larger riov/wiov. 925 * 926 * Returns 0 if there was no descriptor, 1 if there was, or -errno. 927 * 928 * Note that on error return, you can tell the difference between an 929 * invalid ring and a single invalid descriptor: in the former case, 930 * *head will be vrh->vring.num. You may be able to ignore an invalid 931 * descriptor, but there's not much you can do with an invalid ring. 932 * 933 * Note that you may need to clean up riov and wiov, even on error! 934 */ 935 int vringh_getdesc_kern(struct vringh *vrh, 936 struct vringh_kiov *riov, 937 struct vringh_kiov *wiov, 938 u16 *head, 939 gfp_t gfp) 940 { 941 int err; 942 943 err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx); 944 if (err < 0) 945 return err; 946 947 /* Empty... */ 948 if (err == vrh->vring.num) 949 return 0; 950 951 *head = err; 952 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, 953 gfp, copydesc_kern); 954 if (err) 955 return err; 956 957 return 1; 958 } 959 EXPORT_SYMBOL(vringh_getdesc_kern); 960 961 /** 962 * vringh_iov_pull_kern - copy bytes from vring_iov. 963 * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume) 964 * @dst: the place to copy. 965 * @len: the maximum length to copy. 966 * 967 * Returns the bytes copied <= len or a negative errno. 968 */ 969 ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len) 970 { 971 return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern); 972 } 973 EXPORT_SYMBOL(vringh_iov_pull_kern); 974 975 /** 976 * vringh_iov_push_kern - copy bytes into vring_iov. 977 * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume) 978 * @dst: the place to copy. 979 * @len: the maximum length to copy. 980 * 981 * Returns the bytes copied <= len or a negative errno. 982 */ 983 ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, 984 const void *src, size_t len) 985 { 986 return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer); 987 } 988 EXPORT_SYMBOL(vringh_iov_push_kern); 989 990 /** 991 * vringh_abandon_kern - we've decided not to handle the descriptor(s). 992 * @vrh: the vring. 993 * @num: the number of descriptors to put back (ie. num 994 * vringh_get_kern() to undo). 995 * 996 * The next vringh_get_kern() will return the old descriptor(s) again. 997 */ 998 void vringh_abandon_kern(struct vringh *vrh, unsigned int num) 999 { 1000 /* We only update vring_avail_event(vr) when we want to be notified, 1001 * so we haven't changed that yet. */ 1002 vrh->last_avail_idx -= num; 1003 } 1004 EXPORT_SYMBOL(vringh_abandon_kern); 1005 1006 /** 1007 * vringh_complete_kern - we've finished with descriptor, publish it. 1008 * @vrh: the vring. 1009 * @head: the head as filled in by vringh_getdesc_kern. 1010 * @len: the length of data we have written. 1011 * 1012 * You should check vringh_need_notify_kern() after one or more calls 1013 * to this function. 1014 */ 1015 int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len) 1016 { 1017 struct vring_used_elem used; 1018 1019 used.id = cpu_to_vringh32(vrh, head); 1020 used.len = cpu_to_vringh32(vrh, len); 1021 1022 return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern); 1023 } 1024 EXPORT_SYMBOL(vringh_complete_kern); 1025 1026 /** 1027 * vringh_notify_enable_kern - we want to know if something changes. 1028 * @vrh: the vring. 1029 * 1030 * This always enables notifications, but returns false if there are 1031 * now more buffers available in the vring. 1032 */ 1033 bool vringh_notify_enable_kern(struct vringh *vrh) 1034 { 1035 return __vringh_notify_enable(vrh, getu16_kern, putu16_kern); 1036 } 1037 EXPORT_SYMBOL(vringh_notify_enable_kern); 1038 1039 /** 1040 * vringh_notify_disable_kern - don't tell us if something changes. 1041 * @vrh: the vring. 1042 * 1043 * This is our normal running state: we disable and then only enable when 1044 * we're going to sleep. 1045 */ 1046 void vringh_notify_disable_kern(struct vringh *vrh) 1047 { 1048 __vringh_notify_disable(vrh, putu16_kern); 1049 } 1050 EXPORT_SYMBOL(vringh_notify_disable_kern); 1051 1052 /** 1053 * vringh_need_notify_kern - must we tell the other side about used buffers? 1054 * @vrh: the vring we've called vringh_complete_kern() on. 1055 * 1056 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 1057 */ 1058 int vringh_need_notify_kern(struct vringh *vrh) 1059 { 1060 return __vringh_need_notify(vrh, getu16_kern); 1061 } 1062 EXPORT_SYMBOL(vringh_need_notify_kern); 1063 1064 #if IS_REACHABLE(CONFIG_VHOST_IOTLB) 1065 1066 static int iotlb_translate(const struct vringh *vrh, 1067 u64 addr, u64 len, struct bio_vec iov[], 1068 int iov_size, u32 perm) 1069 { 1070 struct vhost_iotlb_map *map; 1071 struct vhost_iotlb *iotlb = vrh->iotlb; 1072 int ret = 0; 1073 u64 s = 0; 1074 1075 while (len > s) { 1076 u64 size, pa, pfn; 1077 1078 if (unlikely(ret >= iov_size)) { 1079 ret = -ENOBUFS; 1080 break; 1081 } 1082 1083 map = vhost_iotlb_itree_first(iotlb, addr, 1084 addr + len - 1); 1085 if (!map || map->start > addr) { 1086 ret = -EINVAL; 1087 break; 1088 } else if (!(map->perm & perm)) { 1089 ret = -EPERM; 1090 break; 1091 } 1092 1093 size = map->size - addr + map->start; 1094 pa = map->addr + addr - map->start; 1095 pfn = pa >> PAGE_SHIFT; 1096 iov[ret].bv_page = pfn_to_page(pfn); 1097 iov[ret].bv_len = min(len - s, size); 1098 iov[ret].bv_offset = pa & (PAGE_SIZE - 1); 1099 s += size; 1100 addr += size; 1101 ++ret; 1102 } 1103 1104 return ret; 1105 } 1106 1107 static inline int copy_from_iotlb(const struct vringh *vrh, void *dst, 1108 void *src, size_t len) 1109 { 1110 struct iov_iter iter; 1111 struct bio_vec iov[16]; 1112 int ret; 1113 1114 ret = iotlb_translate(vrh, (u64)(uintptr_t)src, 1115 len, iov, 16, VHOST_MAP_RO); 1116 if (ret < 0) 1117 return ret; 1118 1119 iov_iter_bvec(&iter, READ, iov, ret, len); 1120 1121 ret = copy_from_iter(dst, len, &iter); 1122 1123 return ret; 1124 } 1125 1126 static inline int copy_to_iotlb(const struct vringh *vrh, void *dst, 1127 void *src, size_t len) 1128 { 1129 struct iov_iter iter; 1130 struct bio_vec iov[16]; 1131 int ret; 1132 1133 ret = iotlb_translate(vrh, (u64)(uintptr_t)dst, 1134 len, iov, 16, VHOST_MAP_WO); 1135 if (ret < 0) 1136 return ret; 1137 1138 iov_iter_bvec(&iter, WRITE, iov, ret, len); 1139 1140 return copy_to_iter(src, len, &iter); 1141 } 1142 1143 static inline int getu16_iotlb(const struct vringh *vrh, 1144 u16 *val, const __virtio16 *p) 1145 { 1146 struct bio_vec iov; 1147 void *kaddr, *from; 1148 int ret; 1149 1150 /* Atomic read is needed for getu16 */ 1151 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), 1152 &iov, 1, VHOST_MAP_RO); 1153 if (ret < 0) 1154 return ret; 1155 1156 kaddr = kmap_atomic(iov.bv_page); 1157 from = kaddr + iov.bv_offset; 1158 *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from)); 1159 kunmap_atomic(kaddr); 1160 1161 return 0; 1162 } 1163 1164 static inline int putu16_iotlb(const struct vringh *vrh, 1165 __virtio16 *p, u16 val) 1166 { 1167 struct bio_vec iov; 1168 void *kaddr, *to; 1169 int ret; 1170 1171 /* Atomic write is needed for putu16 */ 1172 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), 1173 &iov, 1, VHOST_MAP_WO); 1174 if (ret < 0) 1175 return ret; 1176 1177 kaddr = kmap_atomic(iov.bv_page); 1178 to = kaddr + iov.bv_offset; 1179 WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val)); 1180 kunmap_atomic(kaddr); 1181 1182 return 0; 1183 } 1184 1185 static inline int copydesc_iotlb(const struct vringh *vrh, 1186 void *dst, const void *src, size_t len) 1187 { 1188 int ret; 1189 1190 ret = copy_from_iotlb(vrh, dst, (void *)src, len); 1191 if (ret != len) 1192 return -EFAULT; 1193 1194 return 0; 1195 } 1196 1197 static inline int xfer_from_iotlb(const struct vringh *vrh, void *src, 1198 void *dst, size_t len) 1199 { 1200 int ret; 1201 1202 ret = copy_from_iotlb(vrh, dst, src, len); 1203 if (ret != len) 1204 return -EFAULT; 1205 1206 return 0; 1207 } 1208 1209 static inline int xfer_to_iotlb(const struct vringh *vrh, 1210 void *dst, void *src, size_t len) 1211 { 1212 int ret; 1213 1214 ret = copy_to_iotlb(vrh, dst, src, len); 1215 if (ret != len) 1216 return -EFAULT; 1217 1218 return 0; 1219 } 1220 1221 static inline int putused_iotlb(const struct vringh *vrh, 1222 struct vring_used_elem *dst, 1223 const struct vring_used_elem *src, 1224 unsigned int num) 1225 { 1226 int size = num * sizeof(*dst); 1227 int ret; 1228 1229 ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst)); 1230 if (ret != size) 1231 return -EFAULT; 1232 1233 return 0; 1234 } 1235 1236 /** 1237 * vringh_init_iotlb - initialize a vringh for a ring with IOTLB. 1238 * @vrh: the vringh to initialize. 1239 * @features: the feature bits for this ring. 1240 * @num: the number of elements. 1241 * @weak_barriers: true if we only need memory barriers, not I/O. 1242 * @desc: the userpace descriptor pointer. 1243 * @avail: the userpace avail pointer. 1244 * @used: the userpace used pointer. 1245 * 1246 * Returns an error if num is invalid. 1247 */ 1248 int vringh_init_iotlb(struct vringh *vrh, u64 features, 1249 unsigned int num, bool weak_barriers, 1250 struct vring_desc *desc, 1251 struct vring_avail *avail, 1252 struct vring_used *used) 1253 { 1254 return vringh_init_kern(vrh, features, num, weak_barriers, 1255 desc, avail, used); 1256 } 1257 EXPORT_SYMBOL(vringh_init_iotlb); 1258 1259 /** 1260 * vringh_set_iotlb - initialize a vringh for a ring with IOTLB. 1261 * @vrh: the vring 1262 * @iotlb: iotlb associated with this vring 1263 */ 1264 void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb) 1265 { 1266 vrh->iotlb = iotlb; 1267 } 1268 EXPORT_SYMBOL(vringh_set_iotlb); 1269 1270 /** 1271 * vringh_getdesc_iotlb - get next available descriptor from ring with 1272 * IOTLB. 1273 * @vrh: the kernelspace vring. 1274 * @riov: where to put the readable descriptors (or NULL) 1275 * @wiov: where to put the writable descriptors (or NULL) 1276 * @head: head index we received, for passing to vringh_complete_iotlb(). 1277 * @gfp: flags for allocating larger riov/wiov. 1278 * 1279 * Returns 0 if there was no descriptor, 1 if there was, or -errno. 1280 * 1281 * Note that on error return, you can tell the difference between an 1282 * invalid ring and a single invalid descriptor: in the former case, 1283 * *head will be vrh->vring.num. You may be able to ignore an invalid 1284 * descriptor, but there's not much you can do with an invalid ring. 1285 * 1286 * Note that you may need to clean up riov and wiov, even on error! 1287 */ 1288 int vringh_getdesc_iotlb(struct vringh *vrh, 1289 struct vringh_kiov *riov, 1290 struct vringh_kiov *wiov, 1291 u16 *head, 1292 gfp_t gfp) 1293 { 1294 int err; 1295 1296 err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx); 1297 if (err < 0) 1298 return err; 1299 1300 /* Empty... */ 1301 if (err == vrh->vring.num) 1302 return 0; 1303 1304 *head = err; 1305 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, 1306 gfp, copydesc_iotlb); 1307 if (err) 1308 return err; 1309 1310 return 1; 1311 } 1312 EXPORT_SYMBOL(vringh_getdesc_iotlb); 1313 1314 /** 1315 * vringh_iov_pull_iotlb - copy bytes from vring_iov. 1316 * @vrh: the vring. 1317 * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume) 1318 * @dst: the place to copy. 1319 * @len: the maximum length to copy. 1320 * 1321 * Returns the bytes copied <= len or a negative errno. 1322 */ 1323 ssize_t vringh_iov_pull_iotlb(struct vringh *vrh, 1324 struct vringh_kiov *riov, 1325 void *dst, size_t len) 1326 { 1327 return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb); 1328 } 1329 EXPORT_SYMBOL(vringh_iov_pull_iotlb); 1330 1331 /** 1332 * vringh_iov_push_iotlb - copy bytes into vring_iov. 1333 * @vrh: the vring. 1334 * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume) 1335 * @dst: the place to copy. 1336 * @len: the maximum length to copy. 1337 * 1338 * Returns the bytes copied <= len or a negative errno. 1339 */ 1340 ssize_t vringh_iov_push_iotlb(struct vringh *vrh, 1341 struct vringh_kiov *wiov, 1342 const void *src, size_t len) 1343 { 1344 return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb); 1345 } 1346 EXPORT_SYMBOL(vringh_iov_push_iotlb); 1347 1348 /** 1349 * vringh_abandon_iotlb - we've decided not to handle the descriptor(s). 1350 * @vrh: the vring. 1351 * @num: the number of descriptors to put back (ie. num 1352 * vringh_get_iotlb() to undo). 1353 * 1354 * The next vringh_get_iotlb() will return the old descriptor(s) again. 1355 */ 1356 void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num) 1357 { 1358 /* We only update vring_avail_event(vr) when we want to be notified, 1359 * so we haven't changed that yet. 1360 */ 1361 vrh->last_avail_idx -= num; 1362 } 1363 EXPORT_SYMBOL(vringh_abandon_iotlb); 1364 1365 /** 1366 * vringh_complete_iotlb - we've finished with descriptor, publish it. 1367 * @vrh: the vring. 1368 * @head: the head as filled in by vringh_getdesc_iotlb. 1369 * @len: the length of data we have written. 1370 * 1371 * You should check vringh_need_notify_iotlb() after one or more calls 1372 * to this function. 1373 */ 1374 int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len) 1375 { 1376 struct vring_used_elem used; 1377 1378 used.id = cpu_to_vringh32(vrh, head); 1379 used.len = cpu_to_vringh32(vrh, len); 1380 1381 return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb); 1382 } 1383 EXPORT_SYMBOL(vringh_complete_iotlb); 1384 1385 /** 1386 * vringh_notify_enable_iotlb - we want to know if something changes. 1387 * @vrh: the vring. 1388 * 1389 * This always enables notifications, but returns false if there are 1390 * now more buffers available in the vring. 1391 */ 1392 bool vringh_notify_enable_iotlb(struct vringh *vrh) 1393 { 1394 return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb); 1395 } 1396 EXPORT_SYMBOL(vringh_notify_enable_iotlb); 1397 1398 /** 1399 * vringh_notify_disable_iotlb - don't tell us if something changes. 1400 * @vrh: the vring. 1401 * 1402 * This is our normal running state: we disable and then only enable when 1403 * we're going to sleep. 1404 */ 1405 void vringh_notify_disable_iotlb(struct vringh *vrh) 1406 { 1407 __vringh_notify_disable(vrh, putu16_iotlb); 1408 } 1409 EXPORT_SYMBOL(vringh_notify_disable_iotlb); 1410 1411 /** 1412 * vringh_need_notify_iotlb - must we tell the other side about used buffers? 1413 * @vrh: the vring we've called vringh_complete_iotlb() on. 1414 * 1415 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 1416 */ 1417 int vringh_need_notify_iotlb(struct vringh *vrh) 1418 { 1419 return __vringh_need_notify(vrh, getu16_iotlb); 1420 } 1421 EXPORT_SYMBOL(vringh_need_notify_iotlb); 1422 1423 #endif 1424 1425 MODULE_LICENSE("GPL"); 1426