1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Helpers for the host side of a virtio ring. 4 * 5 * Since these may be in userspace, we use (inline) accessors. 6 */ 7 #include <linux/compiler.h> 8 #include <linux/module.h> 9 #include <linux/vringh.h> 10 #include <linux/virtio_ring.h> 11 #include <linux/kernel.h> 12 #include <linux/ratelimit.h> 13 #include <linux/uaccess.h> 14 #include <linux/slab.h> 15 #include <linux/export.h> 16 #if IS_REACHABLE(CONFIG_VHOST_IOTLB) 17 #include <linux/bvec.h> 18 #include <linux/highmem.h> 19 #include <linux/vhost_iotlb.h> 20 #endif 21 #include <uapi/linux/virtio_config.h> 22 23 static __printf(1,2) __cold void vringh_bad(const char *fmt, ...) 24 { 25 static DEFINE_RATELIMIT_STATE(vringh_rs, 26 DEFAULT_RATELIMIT_INTERVAL, 27 DEFAULT_RATELIMIT_BURST); 28 if (__ratelimit(&vringh_rs)) { 29 va_list ap; 30 va_start(ap, fmt); 31 printk(KERN_NOTICE "vringh:"); 32 vprintk(fmt, ap); 33 va_end(ap); 34 } 35 } 36 37 /* Returns vring->num if empty, -ve on error. */ 38 static inline int __vringh_get_head(const struct vringh *vrh, 39 int (*getu16)(const struct vringh *vrh, 40 u16 *val, const __virtio16 *p), 41 u16 *last_avail_idx) 42 { 43 u16 avail_idx, i, head; 44 int err; 45 46 err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx); 47 if (err) { 48 vringh_bad("Failed to access avail idx at %p", 49 &vrh->vring.avail->idx); 50 return err; 51 } 52 53 if (*last_avail_idx == avail_idx) 54 return vrh->vring.num; 55 56 /* Only get avail ring entries after they have been exposed by guest. */ 57 virtio_rmb(vrh->weak_barriers); 58 59 i = *last_avail_idx & (vrh->vring.num - 1); 60 61 err = getu16(vrh, &head, &vrh->vring.avail->ring[i]); 62 if (err) { 63 vringh_bad("Failed to read head: idx %d address %p", 64 *last_avail_idx, &vrh->vring.avail->ring[i]); 65 return err; 66 } 67 68 if (head >= vrh->vring.num) { 69 vringh_bad("Guest says index %u > %u is available", 70 head, vrh->vring.num); 71 return -EINVAL; 72 } 73 74 (*last_avail_idx)++; 75 return head; 76 } 77 78 /* Copy some bytes to/from the iovec. Returns num copied. */ 79 static inline ssize_t vringh_iov_xfer(struct vringh *vrh, 80 struct vringh_kiov *iov, 81 void *ptr, size_t len, 82 int (*xfer)(const struct vringh *vrh, 83 void *addr, void *ptr, 84 size_t len)) 85 { 86 int err, done = 0; 87 88 while (len && iov->i < iov->used) { 89 size_t partlen; 90 91 partlen = min(iov->iov[iov->i].iov_len, len); 92 err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen); 93 if (err) 94 return err; 95 done += partlen; 96 len -= partlen; 97 ptr += partlen; 98 iov->consumed += partlen; 99 iov->iov[iov->i].iov_len -= partlen; 100 iov->iov[iov->i].iov_base += partlen; 101 102 if (!iov->iov[iov->i].iov_len) { 103 /* Fix up old iov element then increment. */ 104 iov->iov[iov->i].iov_len = iov->consumed; 105 iov->iov[iov->i].iov_base -= iov->consumed; 106 107 108 iov->consumed = 0; 109 iov->i++; 110 } 111 } 112 return done; 113 } 114 115 /* May reduce *len if range is shorter. */ 116 static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len, 117 struct vringh_range *range, 118 bool (*getrange)(struct vringh *, 119 u64, struct vringh_range *)) 120 { 121 if (addr < range->start || addr > range->end_incl) { 122 if (!getrange(vrh, addr, range)) 123 return false; 124 } 125 BUG_ON(addr < range->start || addr > range->end_incl); 126 127 /* To end of memory? */ 128 if (unlikely(addr + *len == 0)) { 129 if (range->end_incl == -1ULL) 130 return true; 131 goto truncate; 132 } 133 134 /* Otherwise, don't wrap. */ 135 if (addr + *len < addr) { 136 vringh_bad("Wrapping descriptor %zu@0x%llx", 137 *len, (unsigned long long)addr); 138 return false; 139 } 140 141 if (unlikely(addr + *len - 1 > range->end_incl)) 142 goto truncate; 143 return true; 144 145 truncate: 146 *len = range->end_incl + 1 - addr; 147 return true; 148 } 149 150 static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len, 151 struct vringh_range *range, 152 bool (*getrange)(struct vringh *, 153 u64, struct vringh_range *)) 154 { 155 return true; 156 } 157 158 /* No reason for this code to be inline. */ 159 static int move_to_indirect(const struct vringh *vrh, 160 int *up_next, u16 *i, void *addr, 161 const struct vring_desc *desc, 162 struct vring_desc **descs, int *desc_max) 163 { 164 u32 len; 165 166 /* Indirect tables can't have indirect. */ 167 if (*up_next != -1) { 168 vringh_bad("Multilevel indirect %u->%u", *up_next, *i); 169 return -EINVAL; 170 } 171 172 len = vringh32_to_cpu(vrh, desc->len); 173 if (unlikely(len % sizeof(struct vring_desc))) { 174 vringh_bad("Strange indirect len %u", desc->len); 175 return -EINVAL; 176 } 177 178 /* We will check this when we follow it! */ 179 if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) 180 *up_next = vringh16_to_cpu(vrh, desc->next); 181 else 182 *up_next = -2; 183 *descs = addr; 184 *desc_max = len / sizeof(struct vring_desc); 185 186 /* Now, start at the first indirect. */ 187 *i = 0; 188 return 0; 189 } 190 191 static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp) 192 { 193 struct kvec *new; 194 unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2; 195 196 if (new_num < 8) 197 new_num = 8; 198 199 flag = (iov->max_num & VRINGH_IOV_ALLOCATED); 200 if (flag) 201 new = krealloc_array(iov->iov, new_num, 202 sizeof(struct iovec), gfp); 203 else { 204 new = kmalloc_array(new_num, sizeof(struct iovec), gfp); 205 if (new) { 206 memcpy(new, iov->iov, 207 iov->max_num * sizeof(struct iovec)); 208 flag = VRINGH_IOV_ALLOCATED; 209 } 210 } 211 if (!new) 212 return -ENOMEM; 213 iov->iov = new; 214 iov->max_num = (new_num | flag); 215 return 0; 216 } 217 218 static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next, 219 struct vring_desc **descs, int *desc_max) 220 { 221 u16 i = *up_next; 222 223 *up_next = -1; 224 *descs = vrh->vring.desc; 225 *desc_max = vrh->vring.num; 226 return i; 227 } 228 229 static int slow_copy(struct vringh *vrh, void *dst, const void *src, 230 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, 231 struct vringh_range *range, 232 bool (*getrange)(struct vringh *vrh, 233 u64, 234 struct vringh_range *)), 235 bool (*getrange)(struct vringh *vrh, 236 u64 addr, 237 struct vringh_range *r), 238 struct vringh_range *range, 239 int (*copy)(const struct vringh *vrh, 240 void *dst, const void *src, size_t len)) 241 { 242 size_t part, len = sizeof(struct vring_desc); 243 244 do { 245 u64 addr; 246 int err; 247 248 part = len; 249 addr = (u64)(unsigned long)src - range->offset; 250 251 if (!rcheck(vrh, addr, &part, range, getrange)) 252 return -EINVAL; 253 254 err = copy(vrh, dst, src, part); 255 if (err) 256 return err; 257 258 dst += part; 259 src += part; 260 len -= part; 261 } while (len); 262 return 0; 263 } 264 265 static inline int 266 __vringh_iov(struct vringh *vrh, u16 i, 267 struct vringh_kiov *riov, 268 struct vringh_kiov *wiov, 269 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, 270 struct vringh_range *range, 271 bool (*getrange)(struct vringh *, u64, 272 struct vringh_range *)), 273 bool (*getrange)(struct vringh *, u64, struct vringh_range *), 274 gfp_t gfp, 275 int (*copy)(const struct vringh *vrh, 276 void *dst, const void *src, size_t len)) 277 { 278 int err, count = 0, up_next, desc_max; 279 struct vring_desc desc, *descs; 280 struct vringh_range range = { -1ULL, 0 }, slowrange; 281 bool slow = false; 282 283 /* We start traversing vring's descriptor table. */ 284 descs = vrh->vring.desc; 285 desc_max = vrh->vring.num; 286 up_next = -1; 287 288 /* You must want something! */ 289 if (WARN_ON(!riov && !wiov)) 290 return -EINVAL; 291 292 if (riov) 293 riov->i = riov->used = riov->consumed = 0; 294 if (wiov) 295 wiov->i = wiov->used = wiov->consumed = 0; 296 297 for (;;) { 298 void *addr; 299 struct vringh_kiov *iov; 300 size_t len; 301 302 if (unlikely(slow)) 303 err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange, 304 &slowrange, copy); 305 else 306 err = copy(vrh, &desc, &descs[i], sizeof(desc)); 307 if (unlikely(err)) 308 goto fail; 309 310 if (unlikely(desc.flags & 311 cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) { 312 u64 a = vringh64_to_cpu(vrh, desc.addr); 313 314 /* Make sure it's OK, and get offset. */ 315 len = vringh32_to_cpu(vrh, desc.len); 316 if (!rcheck(vrh, a, &len, &range, getrange)) { 317 err = -EINVAL; 318 goto fail; 319 } 320 321 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) { 322 slow = true; 323 /* We need to save this range to use offset */ 324 slowrange = range; 325 } 326 327 addr = (void *)(long)(a + range.offset); 328 err = move_to_indirect(vrh, &up_next, &i, addr, &desc, 329 &descs, &desc_max); 330 if (err) 331 goto fail; 332 continue; 333 } 334 335 if (count++ == vrh->vring.num) { 336 vringh_bad("Descriptor loop in %p", descs); 337 err = -ELOOP; 338 goto fail; 339 } 340 341 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE)) 342 iov = wiov; 343 else { 344 iov = riov; 345 if (unlikely(wiov && wiov->i)) { 346 vringh_bad("Readable desc %p after writable", 347 &descs[i]); 348 err = -EINVAL; 349 goto fail; 350 } 351 } 352 353 if (!iov) { 354 vringh_bad("Unexpected %s desc", 355 !wiov ? "writable" : "readable"); 356 err = -EPROTO; 357 goto fail; 358 } 359 360 again: 361 /* Make sure it's OK, and get offset. */ 362 len = vringh32_to_cpu(vrh, desc.len); 363 if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range, 364 getrange)) { 365 err = -EINVAL; 366 goto fail; 367 } 368 addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) + 369 range.offset); 370 371 if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) { 372 err = resize_iovec(iov, gfp); 373 if (err) 374 goto fail; 375 } 376 377 iov->iov[iov->used].iov_base = addr; 378 iov->iov[iov->used].iov_len = len; 379 iov->used++; 380 381 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) { 382 desc.len = cpu_to_vringh32(vrh, 383 vringh32_to_cpu(vrh, desc.len) - len); 384 desc.addr = cpu_to_vringh64(vrh, 385 vringh64_to_cpu(vrh, desc.addr) + len); 386 goto again; 387 } 388 389 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) { 390 i = vringh16_to_cpu(vrh, desc.next); 391 } else { 392 /* Just in case we need to finish traversing above. */ 393 if (unlikely(up_next > 0)) { 394 i = return_from_indirect(vrh, &up_next, 395 &descs, &desc_max); 396 slow = false; 397 } else 398 break; 399 } 400 401 if (i >= desc_max) { 402 vringh_bad("Chained index %u > %u", i, desc_max); 403 err = -EINVAL; 404 goto fail; 405 } 406 } 407 408 return 0; 409 410 fail: 411 return err; 412 } 413 414 static inline int __vringh_complete(struct vringh *vrh, 415 const struct vring_used_elem *used, 416 unsigned int num_used, 417 int (*putu16)(const struct vringh *vrh, 418 __virtio16 *p, u16 val), 419 int (*putused)(const struct vringh *vrh, 420 struct vring_used_elem *dst, 421 const struct vring_used_elem 422 *src, unsigned num)) 423 { 424 struct vring_used *used_ring; 425 int err; 426 u16 used_idx, off; 427 428 used_ring = vrh->vring.used; 429 used_idx = vrh->last_used_idx + vrh->completed; 430 431 off = used_idx % vrh->vring.num; 432 433 /* Compiler knows num_used == 1 sometimes, hence extra check */ 434 if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) { 435 u16 part = vrh->vring.num - off; 436 err = putused(vrh, &used_ring->ring[off], used, part); 437 if (!err) 438 err = putused(vrh, &used_ring->ring[0], used + part, 439 num_used - part); 440 } else 441 err = putused(vrh, &used_ring->ring[off], used, num_used); 442 443 if (err) { 444 vringh_bad("Failed to write %u used entries %u at %p", 445 num_used, off, &used_ring->ring[off]); 446 return err; 447 } 448 449 /* Make sure buffer is written before we update index. */ 450 virtio_wmb(vrh->weak_barriers); 451 452 err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used); 453 if (err) { 454 vringh_bad("Failed to update used index at %p", 455 &vrh->vring.used->idx); 456 return err; 457 } 458 459 vrh->completed += num_used; 460 return 0; 461 } 462 463 464 static inline int __vringh_need_notify(struct vringh *vrh, 465 int (*getu16)(const struct vringh *vrh, 466 u16 *val, 467 const __virtio16 *p)) 468 { 469 bool notify; 470 u16 used_event; 471 int err; 472 473 /* Flush out used index update. This is paired with the 474 * barrier that the Guest executes when enabling 475 * interrupts. */ 476 virtio_mb(vrh->weak_barriers); 477 478 /* Old-style, without event indices. */ 479 if (!vrh->event_indices) { 480 u16 flags; 481 err = getu16(vrh, &flags, &vrh->vring.avail->flags); 482 if (err) { 483 vringh_bad("Failed to get flags at %p", 484 &vrh->vring.avail->flags); 485 return err; 486 } 487 return (!(flags & VRING_AVAIL_F_NO_INTERRUPT)); 488 } 489 490 /* Modern: we know when other side wants to know. */ 491 err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring)); 492 if (err) { 493 vringh_bad("Failed to get used event idx at %p", 494 &vring_used_event(&vrh->vring)); 495 return err; 496 } 497 498 /* Just in case we added so many that we wrap. */ 499 if (unlikely(vrh->completed > 0xffff)) 500 notify = true; 501 else 502 notify = vring_need_event(used_event, 503 vrh->last_used_idx + vrh->completed, 504 vrh->last_used_idx); 505 506 vrh->last_used_idx += vrh->completed; 507 vrh->completed = 0; 508 return notify; 509 } 510 511 static inline bool __vringh_notify_enable(struct vringh *vrh, 512 int (*getu16)(const struct vringh *vrh, 513 u16 *val, const __virtio16 *p), 514 int (*putu16)(const struct vringh *vrh, 515 __virtio16 *p, u16 val)) 516 { 517 u16 avail; 518 519 if (!vrh->event_indices) { 520 /* Old-school; update flags. */ 521 if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) { 522 vringh_bad("Clearing used flags %p", 523 &vrh->vring.used->flags); 524 return true; 525 } 526 } else { 527 if (putu16(vrh, &vring_avail_event(&vrh->vring), 528 vrh->last_avail_idx) != 0) { 529 vringh_bad("Updating avail event index %p", 530 &vring_avail_event(&vrh->vring)); 531 return true; 532 } 533 } 534 535 /* They could have slipped one in as we were doing that: make 536 * sure it's written, then check again. */ 537 virtio_mb(vrh->weak_barriers); 538 539 if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) { 540 vringh_bad("Failed to check avail idx at %p", 541 &vrh->vring.avail->idx); 542 return true; 543 } 544 545 /* This is unlikely, so we just leave notifications enabled 546 * (if we're using event_indices, we'll only get one 547 * notification anyway). */ 548 return avail == vrh->last_avail_idx; 549 } 550 551 static inline void __vringh_notify_disable(struct vringh *vrh, 552 int (*putu16)(const struct vringh *vrh, 553 __virtio16 *p, u16 val)) 554 { 555 if (!vrh->event_indices) { 556 /* Old-school; update flags. */ 557 if (putu16(vrh, &vrh->vring.used->flags, 558 VRING_USED_F_NO_NOTIFY)) { 559 vringh_bad("Setting used flags %p", 560 &vrh->vring.used->flags); 561 } 562 } 563 } 564 565 /* Userspace access helpers: in this case, addresses are really userspace. */ 566 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p) 567 { 568 __virtio16 v = 0; 569 int rc = get_user(v, (__force __virtio16 __user *)p); 570 *val = vringh16_to_cpu(vrh, v); 571 return rc; 572 } 573 574 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val) 575 { 576 __virtio16 v = cpu_to_vringh16(vrh, val); 577 return put_user(v, (__force __virtio16 __user *)p); 578 } 579 580 static inline int copydesc_user(const struct vringh *vrh, 581 void *dst, const void *src, size_t len) 582 { 583 return copy_from_user(dst, (__force void __user *)src, len) ? 584 -EFAULT : 0; 585 } 586 587 static inline int putused_user(const struct vringh *vrh, 588 struct vring_used_elem *dst, 589 const struct vring_used_elem *src, 590 unsigned int num) 591 { 592 return copy_to_user((__force void __user *)dst, src, 593 sizeof(*dst) * num) ? -EFAULT : 0; 594 } 595 596 static inline int xfer_from_user(const struct vringh *vrh, void *src, 597 void *dst, size_t len) 598 { 599 return copy_from_user(dst, (__force void __user *)src, len) ? 600 -EFAULT : 0; 601 } 602 603 static inline int xfer_to_user(const struct vringh *vrh, 604 void *dst, void *src, size_t len) 605 { 606 return copy_to_user((__force void __user *)dst, src, len) ? 607 -EFAULT : 0; 608 } 609 610 /** 611 * vringh_init_user - initialize a vringh for a userspace vring. 612 * @vrh: the vringh to initialize. 613 * @features: the feature bits for this ring. 614 * @num: the number of elements. 615 * @weak_barriers: true if we only need memory barriers, not I/O. 616 * @desc: the userpace descriptor pointer. 617 * @avail: the userpace avail pointer. 618 * @used: the userpace used pointer. 619 * 620 * Returns an error if num is invalid: you should check pointers 621 * yourself! 622 */ 623 int vringh_init_user(struct vringh *vrh, u64 features, 624 unsigned int num, bool weak_barriers, 625 vring_desc_t __user *desc, 626 vring_avail_t __user *avail, 627 vring_used_t __user *used) 628 { 629 /* Sane power of 2 please! */ 630 if (!num || num > 0xffff || (num & (num - 1))) { 631 vringh_bad("Bad ring size %u", num); 632 return -EINVAL; 633 } 634 635 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1)); 636 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); 637 vrh->weak_barriers = weak_barriers; 638 vrh->completed = 0; 639 vrh->last_avail_idx = 0; 640 vrh->last_used_idx = 0; 641 vrh->vring.num = num; 642 /* vring expects kernel addresses, but only used via accessors. */ 643 vrh->vring.desc = (__force struct vring_desc *)desc; 644 vrh->vring.avail = (__force struct vring_avail *)avail; 645 vrh->vring.used = (__force struct vring_used *)used; 646 return 0; 647 } 648 EXPORT_SYMBOL(vringh_init_user); 649 650 /** 651 * vringh_getdesc_user - get next available descriptor from userspace ring. 652 * @vrh: the userspace vring. 653 * @riov: where to put the readable descriptors (or NULL) 654 * @wiov: where to put the writable descriptors (or NULL) 655 * @getrange: function to call to check ranges. 656 * @head: head index we received, for passing to vringh_complete_user(). 657 * 658 * Returns 0 if there was no descriptor, 1 if there was, or -errno. 659 * 660 * Note that on error return, you can tell the difference between an 661 * invalid ring and a single invalid descriptor: in the former case, 662 * *head will be vrh->vring.num. You may be able to ignore an invalid 663 * descriptor, but there's not much you can do with an invalid ring. 664 * 665 * Note that you can reuse riov and wiov with subsequent calls. Content is 666 * overwritten and memory reallocated if more space is needed. 667 * When you don't have to use riov and wiov anymore, you should clean up them 668 * calling vringh_iov_cleanup() to release the memory, even on error! 669 */ 670 int vringh_getdesc_user(struct vringh *vrh, 671 struct vringh_iov *riov, 672 struct vringh_iov *wiov, 673 bool (*getrange)(struct vringh *vrh, 674 u64 addr, struct vringh_range *r), 675 u16 *head) 676 { 677 int err; 678 679 *head = vrh->vring.num; 680 err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx); 681 if (err < 0) 682 return err; 683 684 /* Empty... */ 685 if (err == vrh->vring.num) 686 return 0; 687 688 /* We need the layouts to be the identical for this to work */ 689 BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov)); 690 BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) != 691 offsetof(struct vringh_iov, iov)); 692 BUILD_BUG_ON(offsetof(struct vringh_kiov, i) != 693 offsetof(struct vringh_iov, i)); 694 BUILD_BUG_ON(offsetof(struct vringh_kiov, used) != 695 offsetof(struct vringh_iov, used)); 696 BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) != 697 offsetof(struct vringh_iov, max_num)); 698 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 699 BUILD_BUG_ON(offsetof(struct iovec, iov_base) != 700 offsetof(struct kvec, iov_base)); 701 BUILD_BUG_ON(offsetof(struct iovec, iov_len) != 702 offsetof(struct kvec, iov_len)); 703 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base) 704 != sizeof(((struct kvec *)NULL)->iov_base)); 705 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len) 706 != sizeof(((struct kvec *)NULL)->iov_len)); 707 708 *head = err; 709 err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov, 710 (struct vringh_kiov *)wiov, 711 range_check, getrange, GFP_KERNEL, copydesc_user); 712 if (err) 713 return err; 714 715 return 1; 716 } 717 EXPORT_SYMBOL(vringh_getdesc_user); 718 719 /** 720 * vringh_iov_pull_user - copy bytes from vring_iov. 721 * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume) 722 * @dst: the place to copy. 723 * @len: the maximum length to copy. 724 * 725 * Returns the bytes copied <= len or a negative errno. 726 */ 727 ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len) 728 { 729 return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov, 730 dst, len, xfer_from_user); 731 } 732 EXPORT_SYMBOL(vringh_iov_pull_user); 733 734 /** 735 * vringh_iov_push_user - copy bytes into vring_iov. 736 * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume) 737 * @src: the place to copy from. 738 * @len: the maximum length to copy. 739 * 740 * Returns the bytes copied <= len or a negative errno. 741 */ 742 ssize_t vringh_iov_push_user(struct vringh_iov *wiov, 743 const void *src, size_t len) 744 { 745 return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov, 746 (void *)src, len, xfer_to_user); 747 } 748 EXPORT_SYMBOL(vringh_iov_push_user); 749 750 /** 751 * vringh_abandon_user - we've decided not to handle the descriptor(s). 752 * @vrh: the vring. 753 * @num: the number of descriptors to put back (ie. num 754 * vringh_get_user() to undo). 755 * 756 * The next vringh_get_user() will return the old descriptor(s) again. 757 */ 758 void vringh_abandon_user(struct vringh *vrh, unsigned int num) 759 { 760 /* We only update vring_avail_event(vr) when we want to be notified, 761 * so we haven't changed that yet. */ 762 vrh->last_avail_idx -= num; 763 } 764 EXPORT_SYMBOL(vringh_abandon_user); 765 766 /** 767 * vringh_complete_user - we've finished with descriptor, publish it. 768 * @vrh: the vring. 769 * @head: the head as filled in by vringh_getdesc_user. 770 * @len: the length of data we have written. 771 * 772 * You should check vringh_need_notify_user() after one or more calls 773 * to this function. 774 */ 775 int vringh_complete_user(struct vringh *vrh, u16 head, u32 len) 776 { 777 struct vring_used_elem used; 778 779 used.id = cpu_to_vringh32(vrh, head); 780 used.len = cpu_to_vringh32(vrh, len); 781 return __vringh_complete(vrh, &used, 1, putu16_user, putused_user); 782 } 783 EXPORT_SYMBOL(vringh_complete_user); 784 785 /** 786 * vringh_complete_multi_user - we've finished with many descriptors. 787 * @vrh: the vring. 788 * @used: the head, length pairs. 789 * @num_used: the number of used elements. 790 * 791 * You should check vringh_need_notify_user() after one or more calls 792 * to this function. 793 */ 794 int vringh_complete_multi_user(struct vringh *vrh, 795 const struct vring_used_elem used[], 796 unsigned num_used) 797 { 798 return __vringh_complete(vrh, used, num_used, 799 putu16_user, putused_user); 800 } 801 EXPORT_SYMBOL(vringh_complete_multi_user); 802 803 /** 804 * vringh_notify_enable_user - we want to know if something changes. 805 * @vrh: the vring. 806 * 807 * This always enables notifications, but returns false if there are 808 * now more buffers available in the vring. 809 */ 810 bool vringh_notify_enable_user(struct vringh *vrh) 811 { 812 return __vringh_notify_enable(vrh, getu16_user, putu16_user); 813 } 814 EXPORT_SYMBOL(vringh_notify_enable_user); 815 816 /** 817 * vringh_notify_disable_user - don't tell us if something changes. 818 * @vrh: the vring. 819 * 820 * This is our normal running state: we disable and then only enable when 821 * we're going to sleep. 822 */ 823 void vringh_notify_disable_user(struct vringh *vrh) 824 { 825 __vringh_notify_disable(vrh, putu16_user); 826 } 827 EXPORT_SYMBOL(vringh_notify_disable_user); 828 829 /** 830 * vringh_need_notify_user - must we tell the other side about used buffers? 831 * @vrh: the vring we've called vringh_complete_user() on. 832 * 833 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 834 */ 835 int vringh_need_notify_user(struct vringh *vrh) 836 { 837 return __vringh_need_notify(vrh, getu16_user); 838 } 839 EXPORT_SYMBOL(vringh_need_notify_user); 840 841 /* Kernelspace access helpers. */ 842 static inline int getu16_kern(const struct vringh *vrh, 843 u16 *val, const __virtio16 *p) 844 { 845 *val = vringh16_to_cpu(vrh, READ_ONCE(*p)); 846 return 0; 847 } 848 849 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val) 850 { 851 WRITE_ONCE(*p, cpu_to_vringh16(vrh, val)); 852 return 0; 853 } 854 855 static inline int copydesc_kern(const struct vringh *vrh, 856 void *dst, const void *src, size_t len) 857 { 858 memcpy(dst, src, len); 859 return 0; 860 } 861 862 static inline int putused_kern(const struct vringh *vrh, 863 struct vring_used_elem *dst, 864 const struct vring_used_elem *src, 865 unsigned int num) 866 { 867 memcpy(dst, src, num * sizeof(*dst)); 868 return 0; 869 } 870 871 static inline int xfer_kern(const struct vringh *vrh, void *src, 872 void *dst, size_t len) 873 { 874 memcpy(dst, src, len); 875 return 0; 876 } 877 878 static inline int kern_xfer(const struct vringh *vrh, void *dst, 879 void *src, size_t len) 880 { 881 memcpy(dst, src, len); 882 return 0; 883 } 884 885 /** 886 * vringh_init_kern - initialize a vringh for a kernelspace vring. 887 * @vrh: the vringh to initialize. 888 * @features: the feature bits for this ring. 889 * @num: the number of elements. 890 * @weak_barriers: true if we only need memory barriers, not I/O. 891 * @desc: the userpace descriptor pointer. 892 * @avail: the userpace avail pointer. 893 * @used: the userpace used pointer. 894 * 895 * Returns an error if num is invalid. 896 */ 897 int vringh_init_kern(struct vringh *vrh, u64 features, 898 unsigned int num, bool weak_barriers, 899 struct vring_desc *desc, 900 struct vring_avail *avail, 901 struct vring_used *used) 902 { 903 /* Sane power of 2 please! */ 904 if (!num || num > 0xffff || (num & (num - 1))) { 905 vringh_bad("Bad ring size %u", num); 906 return -EINVAL; 907 } 908 909 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1)); 910 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); 911 vrh->weak_barriers = weak_barriers; 912 vrh->completed = 0; 913 vrh->last_avail_idx = 0; 914 vrh->last_used_idx = 0; 915 vrh->vring.num = num; 916 vrh->vring.desc = desc; 917 vrh->vring.avail = avail; 918 vrh->vring.used = used; 919 return 0; 920 } 921 EXPORT_SYMBOL(vringh_init_kern); 922 923 /** 924 * vringh_getdesc_kern - get next available descriptor from kernelspace ring. 925 * @vrh: the kernelspace vring. 926 * @riov: where to put the readable descriptors (or NULL) 927 * @wiov: where to put the writable descriptors (or NULL) 928 * @head: head index we received, for passing to vringh_complete_kern(). 929 * @gfp: flags for allocating larger riov/wiov. 930 * 931 * Returns 0 if there was no descriptor, 1 if there was, or -errno. 932 * 933 * Note that on error return, you can tell the difference between an 934 * invalid ring and a single invalid descriptor: in the former case, 935 * *head will be vrh->vring.num. You may be able to ignore an invalid 936 * descriptor, but there's not much you can do with an invalid ring. 937 * 938 * Note that you can reuse riov and wiov with subsequent calls. Content is 939 * overwritten and memory reallocated if more space is needed. 940 * When you don't have to use riov and wiov anymore, you should clean up them 941 * calling vringh_kiov_cleanup() to release the memory, even on error! 942 */ 943 int vringh_getdesc_kern(struct vringh *vrh, 944 struct vringh_kiov *riov, 945 struct vringh_kiov *wiov, 946 u16 *head, 947 gfp_t gfp) 948 { 949 int err; 950 951 err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx); 952 if (err < 0) 953 return err; 954 955 /* Empty... */ 956 if (err == vrh->vring.num) 957 return 0; 958 959 *head = err; 960 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, 961 gfp, copydesc_kern); 962 if (err) 963 return err; 964 965 return 1; 966 } 967 EXPORT_SYMBOL(vringh_getdesc_kern); 968 969 /** 970 * vringh_iov_pull_kern - copy bytes from vring_iov. 971 * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume) 972 * @dst: the place to copy. 973 * @len: the maximum length to copy. 974 * 975 * Returns the bytes copied <= len or a negative errno. 976 */ 977 ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len) 978 { 979 return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern); 980 } 981 EXPORT_SYMBOL(vringh_iov_pull_kern); 982 983 /** 984 * vringh_iov_push_kern - copy bytes into vring_iov. 985 * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume) 986 * @src: the place to copy from. 987 * @len: the maximum length to copy. 988 * 989 * Returns the bytes copied <= len or a negative errno. 990 */ 991 ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, 992 const void *src, size_t len) 993 { 994 return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer); 995 } 996 EXPORT_SYMBOL(vringh_iov_push_kern); 997 998 /** 999 * vringh_abandon_kern - we've decided not to handle the descriptor(s). 1000 * @vrh: the vring. 1001 * @num: the number of descriptors to put back (ie. num 1002 * vringh_get_kern() to undo). 1003 * 1004 * The next vringh_get_kern() will return the old descriptor(s) again. 1005 */ 1006 void vringh_abandon_kern(struct vringh *vrh, unsigned int num) 1007 { 1008 /* We only update vring_avail_event(vr) when we want to be notified, 1009 * so we haven't changed that yet. */ 1010 vrh->last_avail_idx -= num; 1011 } 1012 EXPORT_SYMBOL(vringh_abandon_kern); 1013 1014 /** 1015 * vringh_complete_kern - we've finished with descriptor, publish it. 1016 * @vrh: the vring. 1017 * @head: the head as filled in by vringh_getdesc_kern. 1018 * @len: the length of data we have written. 1019 * 1020 * You should check vringh_need_notify_kern() after one or more calls 1021 * to this function. 1022 */ 1023 int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len) 1024 { 1025 struct vring_used_elem used; 1026 1027 used.id = cpu_to_vringh32(vrh, head); 1028 used.len = cpu_to_vringh32(vrh, len); 1029 1030 return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern); 1031 } 1032 EXPORT_SYMBOL(vringh_complete_kern); 1033 1034 /** 1035 * vringh_notify_enable_kern - we want to know if something changes. 1036 * @vrh: the vring. 1037 * 1038 * This always enables notifications, but returns false if there are 1039 * now more buffers available in the vring. 1040 */ 1041 bool vringh_notify_enable_kern(struct vringh *vrh) 1042 { 1043 return __vringh_notify_enable(vrh, getu16_kern, putu16_kern); 1044 } 1045 EXPORT_SYMBOL(vringh_notify_enable_kern); 1046 1047 /** 1048 * vringh_notify_disable_kern - don't tell us if something changes. 1049 * @vrh: the vring. 1050 * 1051 * This is our normal running state: we disable and then only enable when 1052 * we're going to sleep. 1053 */ 1054 void vringh_notify_disable_kern(struct vringh *vrh) 1055 { 1056 __vringh_notify_disable(vrh, putu16_kern); 1057 } 1058 EXPORT_SYMBOL(vringh_notify_disable_kern); 1059 1060 /** 1061 * vringh_need_notify_kern - must we tell the other side about used buffers? 1062 * @vrh: the vring we've called vringh_complete_kern() on. 1063 * 1064 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 1065 */ 1066 int vringh_need_notify_kern(struct vringh *vrh) 1067 { 1068 return __vringh_need_notify(vrh, getu16_kern); 1069 } 1070 EXPORT_SYMBOL(vringh_need_notify_kern); 1071 1072 #if IS_REACHABLE(CONFIG_VHOST_IOTLB) 1073 1074 static int iotlb_translate(const struct vringh *vrh, 1075 u64 addr, u64 len, struct bio_vec iov[], 1076 int iov_size, u32 perm) 1077 { 1078 struct vhost_iotlb_map *map; 1079 struct vhost_iotlb *iotlb = vrh->iotlb; 1080 int ret = 0; 1081 u64 s = 0; 1082 1083 spin_lock(vrh->iotlb_lock); 1084 1085 while (len > s) { 1086 u64 size, pa, pfn; 1087 1088 if (unlikely(ret >= iov_size)) { 1089 ret = -ENOBUFS; 1090 break; 1091 } 1092 1093 map = vhost_iotlb_itree_first(iotlb, addr, 1094 addr + len - 1); 1095 if (!map || map->start > addr) { 1096 ret = -EINVAL; 1097 break; 1098 } else if (!(map->perm & perm)) { 1099 ret = -EPERM; 1100 break; 1101 } 1102 1103 size = map->size - addr + map->start; 1104 pa = map->addr + addr - map->start; 1105 pfn = pa >> PAGE_SHIFT; 1106 iov[ret].bv_page = pfn_to_page(pfn); 1107 iov[ret].bv_len = min(len - s, size); 1108 iov[ret].bv_offset = pa & (PAGE_SIZE - 1); 1109 s += size; 1110 addr += size; 1111 ++ret; 1112 } 1113 1114 spin_unlock(vrh->iotlb_lock); 1115 1116 return ret; 1117 } 1118 1119 static inline int copy_from_iotlb(const struct vringh *vrh, void *dst, 1120 void *src, size_t len) 1121 { 1122 struct iov_iter iter; 1123 struct bio_vec iov[16]; 1124 int ret; 1125 1126 ret = iotlb_translate(vrh, (u64)(uintptr_t)src, 1127 len, iov, 16, VHOST_MAP_RO); 1128 if (ret < 0) 1129 return ret; 1130 1131 iov_iter_bvec(&iter, READ, iov, ret, len); 1132 1133 ret = copy_from_iter(dst, len, &iter); 1134 1135 return ret; 1136 } 1137 1138 static inline int copy_to_iotlb(const struct vringh *vrh, void *dst, 1139 void *src, size_t len) 1140 { 1141 struct iov_iter iter; 1142 struct bio_vec iov[16]; 1143 int ret; 1144 1145 ret = iotlb_translate(vrh, (u64)(uintptr_t)dst, 1146 len, iov, 16, VHOST_MAP_WO); 1147 if (ret < 0) 1148 return ret; 1149 1150 iov_iter_bvec(&iter, WRITE, iov, ret, len); 1151 1152 return copy_to_iter(src, len, &iter); 1153 } 1154 1155 static inline int getu16_iotlb(const struct vringh *vrh, 1156 u16 *val, const __virtio16 *p) 1157 { 1158 struct bio_vec iov; 1159 void *kaddr, *from; 1160 int ret; 1161 1162 /* Atomic read is needed for getu16 */ 1163 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), 1164 &iov, 1, VHOST_MAP_RO); 1165 if (ret < 0) 1166 return ret; 1167 1168 kaddr = kmap_atomic(iov.bv_page); 1169 from = kaddr + iov.bv_offset; 1170 *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from)); 1171 kunmap_atomic(kaddr); 1172 1173 return 0; 1174 } 1175 1176 static inline int putu16_iotlb(const struct vringh *vrh, 1177 __virtio16 *p, u16 val) 1178 { 1179 struct bio_vec iov; 1180 void *kaddr, *to; 1181 int ret; 1182 1183 /* Atomic write is needed for putu16 */ 1184 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), 1185 &iov, 1, VHOST_MAP_WO); 1186 if (ret < 0) 1187 return ret; 1188 1189 kaddr = kmap_atomic(iov.bv_page); 1190 to = kaddr + iov.bv_offset; 1191 WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val)); 1192 kunmap_atomic(kaddr); 1193 1194 return 0; 1195 } 1196 1197 static inline int copydesc_iotlb(const struct vringh *vrh, 1198 void *dst, const void *src, size_t len) 1199 { 1200 int ret; 1201 1202 ret = copy_from_iotlb(vrh, dst, (void *)src, len); 1203 if (ret != len) 1204 return -EFAULT; 1205 1206 return 0; 1207 } 1208 1209 static inline int xfer_from_iotlb(const struct vringh *vrh, void *src, 1210 void *dst, size_t len) 1211 { 1212 int ret; 1213 1214 ret = copy_from_iotlb(vrh, dst, src, len); 1215 if (ret != len) 1216 return -EFAULT; 1217 1218 return 0; 1219 } 1220 1221 static inline int xfer_to_iotlb(const struct vringh *vrh, 1222 void *dst, void *src, size_t len) 1223 { 1224 int ret; 1225 1226 ret = copy_to_iotlb(vrh, dst, src, len); 1227 if (ret != len) 1228 return -EFAULT; 1229 1230 return 0; 1231 } 1232 1233 static inline int putused_iotlb(const struct vringh *vrh, 1234 struct vring_used_elem *dst, 1235 const struct vring_used_elem *src, 1236 unsigned int num) 1237 { 1238 int size = num * sizeof(*dst); 1239 int ret; 1240 1241 ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst)); 1242 if (ret != size) 1243 return -EFAULT; 1244 1245 return 0; 1246 } 1247 1248 /** 1249 * vringh_init_iotlb - initialize a vringh for a ring with IOTLB. 1250 * @vrh: the vringh to initialize. 1251 * @features: the feature bits for this ring. 1252 * @num: the number of elements. 1253 * @weak_barriers: true if we only need memory barriers, not I/O. 1254 * @desc: the userpace descriptor pointer. 1255 * @avail: the userpace avail pointer. 1256 * @used: the userpace used pointer. 1257 * 1258 * Returns an error if num is invalid. 1259 */ 1260 int vringh_init_iotlb(struct vringh *vrh, u64 features, 1261 unsigned int num, bool weak_barriers, 1262 struct vring_desc *desc, 1263 struct vring_avail *avail, 1264 struct vring_used *used) 1265 { 1266 return vringh_init_kern(vrh, features, num, weak_barriers, 1267 desc, avail, used); 1268 } 1269 EXPORT_SYMBOL(vringh_init_iotlb); 1270 1271 /** 1272 * vringh_set_iotlb - initialize a vringh for a ring with IOTLB. 1273 * @vrh: the vring 1274 * @iotlb: iotlb associated with this vring 1275 * @iotlb_lock: spinlock to synchronize the iotlb accesses 1276 */ 1277 void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb, 1278 spinlock_t *iotlb_lock) 1279 { 1280 vrh->iotlb = iotlb; 1281 vrh->iotlb_lock = iotlb_lock; 1282 } 1283 EXPORT_SYMBOL(vringh_set_iotlb); 1284 1285 /** 1286 * vringh_getdesc_iotlb - get next available descriptor from ring with 1287 * IOTLB. 1288 * @vrh: the kernelspace vring. 1289 * @riov: where to put the readable descriptors (or NULL) 1290 * @wiov: where to put the writable descriptors (or NULL) 1291 * @head: head index we received, for passing to vringh_complete_iotlb(). 1292 * @gfp: flags for allocating larger riov/wiov. 1293 * 1294 * Returns 0 if there was no descriptor, 1 if there was, or -errno. 1295 * 1296 * Note that on error return, you can tell the difference between an 1297 * invalid ring and a single invalid descriptor: in the former case, 1298 * *head will be vrh->vring.num. You may be able to ignore an invalid 1299 * descriptor, but there's not much you can do with an invalid ring. 1300 * 1301 * Note that you can reuse riov and wiov with subsequent calls. Content is 1302 * overwritten and memory reallocated if more space is needed. 1303 * When you don't have to use riov and wiov anymore, you should clean up them 1304 * calling vringh_kiov_cleanup() to release the memory, even on error! 1305 */ 1306 int vringh_getdesc_iotlb(struct vringh *vrh, 1307 struct vringh_kiov *riov, 1308 struct vringh_kiov *wiov, 1309 u16 *head, 1310 gfp_t gfp) 1311 { 1312 int err; 1313 1314 err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx); 1315 if (err < 0) 1316 return err; 1317 1318 /* Empty... */ 1319 if (err == vrh->vring.num) 1320 return 0; 1321 1322 *head = err; 1323 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, 1324 gfp, copydesc_iotlb); 1325 if (err) 1326 return err; 1327 1328 return 1; 1329 } 1330 EXPORT_SYMBOL(vringh_getdesc_iotlb); 1331 1332 /** 1333 * vringh_iov_pull_iotlb - copy bytes from vring_iov. 1334 * @vrh: the vring. 1335 * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume) 1336 * @dst: the place to copy. 1337 * @len: the maximum length to copy. 1338 * 1339 * Returns the bytes copied <= len or a negative errno. 1340 */ 1341 ssize_t vringh_iov_pull_iotlb(struct vringh *vrh, 1342 struct vringh_kiov *riov, 1343 void *dst, size_t len) 1344 { 1345 return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb); 1346 } 1347 EXPORT_SYMBOL(vringh_iov_pull_iotlb); 1348 1349 /** 1350 * vringh_iov_push_iotlb - copy bytes into vring_iov. 1351 * @vrh: the vring. 1352 * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume) 1353 * @src: the place to copy from. 1354 * @len: the maximum length to copy. 1355 * 1356 * Returns the bytes copied <= len or a negative errno. 1357 */ 1358 ssize_t vringh_iov_push_iotlb(struct vringh *vrh, 1359 struct vringh_kiov *wiov, 1360 const void *src, size_t len) 1361 { 1362 return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb); 1363 } 1364 EXPORT_SYMBOL(vringh_iov_push_iotlb); 1365 1366 /** 1367 * vringh_abandon_iotlb - we've decided not to handle the descriptor(s). 1368 * @vrh: the vring. 1369 * @num: the number of descriptors to put back (ie. num 1370 * vringh_get_iotlb() to undo). 1371 * 1372 * The next vringh_get_iotlb() will return the old descriptor(s) again. 1373 */ 1374 void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num) 1375 { 1376 /* We only update vring_avail_event(vr) when we want to be notified, 1377 * so we haven't changed that yet. 1378 */ 1379 vrh->last_avail_idx -= num; 1380 } 1381 EXPORT_SYMBOL(vringh_abandon_iotlb); 1382 1383 /** 1384 * vringh_complete_iotlb - we've finished with descriptor, publish it. 1385 * @vrh: the vring. 1386 * @head: the head as filled in by vringh_getdesc_iotlb. 1387 * @len: the length of data we have written. 1388 * 1389 * You should check vringh_need_notify_iotlb() after one or more calls 1390 * to this function. 1391 */ 1392 int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len) 1393 { 1394 struct vring_used_elem used; 1395 1396 used.id = cpu_to_vringh32(vrh, head); 1397 used.len = cpu_to_vringh32(vrh, len); 1398 1399 return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb); 1400 } 1401 EXPORT_SYMBOL(vringh_complete_iotlb); 1402 1403 /** 1404 * vringh_notify_enable_iotlb - we want to know if something changes. 1405 * @vrh: the vring. 1406 * 1407 * This always enables notifications, but returns false if there are 1408 * now more buffers available in the vring. 1409 */ 1410 bool vringh_notify_enable_iotlb(struct vringh *vrh) 1411 { 1412 return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb); 1413 } 1414 EXPORT_SYMBOL(vringh_notify_enable_iotlb); 1415 1416 /** 1417 * vringh_notify_disable_iotlb - don't tell us if something changes. 1418 * @vrh: the vring. 1419 * 1420 * This is our normal running state: we disable and then only enable when 1421 * we're going to sleep. 1422 */ 1423 void vringh_notify_disable_iotlb(struct vringh *vrh) 1424 { 1425 __vringh_notify_disable(vrh, putu16_iotlb); 1426 } 1427 EXPORT_SYMBOL(vringh_notify_disable_iotlb); 1428 1429 /** 1430 * vringh_need_notify_iotlb - must we tell the other side about used buffers? 1431 * @vrh: the vring we've called vringh_complete_iotlb() on. 1432 * 1433 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 1434 */ 1435 int vringh_need_notify_iotlb(struct vringh *vrh) 1436 { 1437 return __vringh_need_notify(vrh, getu16_iotlb); 1438 } 1439 EXPORT_SYMBOL(vringh_need_notify_iotlb); 1440 1441 #endif 1442 1443 MODULE_LICENSE("GPL"); 1444