109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2f87d0fbbSRusty Russell /* 3f87d0fbbSRusty Russell * Helpers for the host side of a virtio ring. 4f87d0fbbSRusty Russell * 5f87d0fbbSRusty Russell * Since these may be in userspace, we use (inline) accessors. 6f87d0fbbSRusty Russell */ 79d1b972fSMark Rutland #include <linux/compiler.h> 8f558a845SDave Jones #include <linux/module.h> 9f87d0fbbSRusty Russell #include <linux/vringh.h> 10f87d0fbbSRusty Russell #include <linux/virtio_ring.h> 11f87d0fbbSRusty Russell #include <linux/kernel.h> 12f87d0fbbSRusty Russell #include <linux/ratelimit.h> 13f87d0fbbSRusty Russell #include <linux/uaccess.h> 14f87d0fbbSRusty Russell #include <linux/slab.h> 15f87d0fbbSRusty Russell #include <linux/export.h> 163302363aSMichael S. Tsirkin #if IS_REACHABLE(CONFIG_VHOST_IOTLB) 179ad9c49cSJason Wang #include <linux/bvec.h> 189ad9c49cSJason Wang #include <linux/highmem.h> 199ad9c49cSJason Wang #include <linux/vhost_iotlb.h> 203302363aSMichael S. Tsirkin #endif 21b9f7ac8cSMichael S. Tsirkin #include <uapi/linux/virtio_config.h> 22f87d0fbbSRusty Russell 23f87d0fbbSRusty Russell static __printf(1,2) __cold void vringh_bad(const char *fmt, ...) 24f87d0fbbSRusty Russell { 25f87d0fbbSRusty Russell static DEFINE_RATELIMIT_STATE(vringh_rs, 26f87d0fbbSRusty Russell DEFAULT_RATELIMIT_INTERVAL, 27f87d0fbbSRusty Russell DEFAULT_RATELIMIT_BURST); 28f87d0fbbSRusty Russell if (__ratelimit(&vringh_rs)) { 29f87d0fbbSRusty Russell va_list ap; 30f87d0fbbSRusty Russell va_start(ap, fmt); 31f87d0fbbSRusty Russell printk(KERN_NOTICE "vringh:"); 32f87d0fbbSRusty Russell vprintk(fmt, ap); 33f87d0fbbSRusty Russell va_end(ap); 34f87d0fbbSRusty Russell } 35f87d0fbbSRusty Russell } 36f87d0fbbSRusty Russell 37f87d0fbbSRusty Russell /* Returns vring->num if empty, -ve on error. */ 38f87d0fbbSRusty Russell static inline int __vringh_get_head(const struct vringh *vrh, 39b9f7ac8cSMichael S. Tsirkin int (*getu16)(const struct vringh *vrh, 40b9f7ac8cSMichael S. Tsirkin u16 *val, const __virtio16 *p), 41f87d0fbbSRusty Russell u16 *last_avail_idx) 42f87d0fbbSRusty Russell { 43f87d0fbbSRusty Russell u16 avail_idx, i, head; 44f87d0fbbSRusty Russell int err; 45f87d0fbbSRusty Russell 46b9f7ac8cSMichael S. Tsirkin err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx); 47f87d0fbbSRusty Russell if (err) { 48f87d0fbbSRusty Russell vringh_bad("Failed to access avail idx at %p", 49f87d0fbbSRusty Russell &vrh->vring.avail->idx); 50f87d0fbbSRusty Russell return err; 51f87d0fbbSRusty Russell } 52f87d0fbbSRusty Russell 53f87d0fbbSRusty Russell if (*last_avail_idx == avail_idx) 54f87d0fbbSRusty Russell return vrh->vring.num; 55f87d0fbbSRusty Russell 56f87d0fbbSRusty Russell /* Only get avail ring entries after they have been exposed by guest. */ 57f87d0fbbSRusty Russell virtio_rmb(vrh->weak_barriers); 58f87d0fbbSRusty Russell 59f87d0fbbSRusty Russell i = *last_avail_idx & (vrh->vring.num - 1); 60f87d0fbbSRusty Russell 61b9f7ac8cSMichael S. Tsirkin err = getu16(vrh, &head, &vrh->vring.avail->ring[i]); 62f87d0fbbSRusty Russell if (err) { 63f87d0fbbSRusty Russell vringh_bad("Failed to read head: idx %d address %p", 64f87d0fbbSRusty Russell *last_avail_idx, &vrh->vring.avail->ring[i]); 65f87d0fbbSRusty Russell return err; 66f87d0fbbSRusty Russell } 67f87d0fbbSRusty Russell 68f87d0fbbSRusty Russell if (head >= vrh->vring.num) { 69f87d0fbbSRusty Russell vringh_bad("Guest says index %u > %u is available", 70f87d0fbbSRusty Russell head, vrh->vring.num); 71f87d0fbbSRusty Russell return -EINVAL; 72f87d0fbbSRusty Russell } 73f87d0fbbSRusty Russell 74f87d0fbbSRusty Russell (*last_avail_idx)++; 75f87d0fbbSRusty Russell return head; 76f87d0fbbSRusty Russell } 77f87d0fbbSRusty Russell 78f87d0fbbSRusty Russell /* Copy some bytes to/from the iovec. Returns num copied. */ 799ad9c49cSJason Wang static inline ssize_t vringh_iov_xfer(struct vringh *vrh, 809ad9c49cSJason Wang struct vringh_kiov *iov, 81f87d0fbbSRusty Russell void *ptr, size_t len, 829ad9c49cSJason Wang int (*xfer)(const struct vringh *vrh, 839ad9c49cSJason Wang void *addr, void *ptr, 84f87d0fbbSRusty Russell size_t len)) 85f87d0fbbSRusty Russell { 86f87d0fbbSRusty Russell int err, done = 0; 87f87d0fbbSRusty Russell 88f87d0fbbSRusty Russell while (len && iov->i < iov->used) { 89f87d0fbbSRusty Russell size_t partlen; 90f87d0fbbSRusty Russell 91f87d0fbbSRusty Russell partlen = min(iov->iov[iov->i].iov_len, len); 929ad9c49cSJason Wang err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen); 93f87d0fbbSRusty Russell if (err) 94f87d0fbbSRusty Russell return err; 95f87d0fbbSRusty Russell done += partlen; 96f87d0fbbSRusty Russell len -= partlen; 97f87d0fbbSRusty Russell ptr += partlen; 98f87d0fbbSRusty Russell iov->consumed += partlen; 99f87d0fbbSRusty Russell iov->iov[iov->i].iov_len -= partlen; 100f87d0fbbSRusty Russell iov->iov[iov->i].iov_base += partlen; 101f87d0fbbSRusty Russell 102f87d0fbbSRusty Russell if (!iov->iov[iov->i].iov_len) { 103f87d0fbbSRusty Russell /* Fix up old iov element then increment. */ 104f87d0fbbSRusty Russell iov->iov[iov->i].iov_len = iov->consumed; 105f87d0fbbSRusty Russell iov->iov[iov->i].iov_base -= iov->consumed; 106f87d0fbbSRusty Russell 1079ad9c49cSJason Wang 108f87d0fbbSRusty Russell iov->consumed = 0; 109f87d0fbbSRusty Russell iov->i++; 110f87d0fbbSRusty Russell } 111f87d0fbbSRusty Russell } 112f87d0fbbSRusty Russell return done; 113f87d0fbbSRusty Russell } 114f87d0fbbSRusty Russell 115f87d0fbbSRusty Russell /* May reduce *len if range is shorter. */ 116f87d0fbbSRusty Russell static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len, 117f87d0fbbSRusty Russell struct vringh_range *range, 118f87d0fbbSRusty Russell bool (*getrange)(struct vringh *, 119f87d0fbbSRusty Russell u64, struct vringh_range *)) 120f87d0fbbSRusty Russell { 121f87d0fbbSRusty Russell if (addr < range->start || addr > range->end_incl) { 122f87d0fbbSRusty Russell if (!getrange(vrh, addr, range)) 123f87d0fbbSRusty Russell return false; 124f87d0fbbSRusty Russell } 125f87d0fbbSRusty Russell BUG_ON(addr < range->start || addr > range->end_incl); 126f87d0fbbSRusty Russell 127f87d0fbbSRusty Russell /* To end of memory? */ 128f87d0fbbSRusty Russell if (unlikely(addr + *len == 0)) { 129f87d0fbbSRusty Russell if (range->end_incl == -1ULL) 130f87d0fbbSRusty Russell return true; 131f87d0fbbSRusty Russell goto truncate; 132f87d0fbbSRusty Russell } 133f87d0fbbSRusty Russell 134f87d0fbbSRusty Russell /* Otherwise, don't wrap. */ 135f87d0fbbSRusty Russell if (addr + *len < addr) { 136f87d0fbbSRusty Russell vringh_bad("Wrapping descriptor %zu@0x%llx", 137f87d0fbbSRusty Russell *len, (unsigned long long)addr); 138f87d0fbbSRusty Russell return false; 139f87d0fbbSRusty Russell } 140f87d0fbbSRusty Russell 141f87d0fbbSRusty Russell if (unlikely(addr + *len - 1 > range->end_incl)) 142f87d0fbbSRusty Russell goto truncate; 143f87d0fbbSRusty Russell return true; 144f87d0fbbSRusty Russell 145f87d0fbbSRusty Russell truncate: 146f87d0fbbSRusty Russell *len = range->end_incl + 1 - addr; 147f87d0fbbSRusty Russell return true; 148f87d0fbbSRusty Russell } 149f87d0fbbSRusty Russell 150f87d0fbbSRusty Russell static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len, 151f87d0fbbSRusty Russell struct vringh_range *range, 152f87d0fbbSRusty Russell bool (*getrange)(struct vringh *, 153f87d0fbbSRusty Russell u64, struct vringh_range *)) 154f87d0fbbSRusty Russell { 155f87d0fbbSRusty Russell return true; 156f87d0fbbSRusty Russell } 157f87d0fbbSRusty Russell 158f87d0fbbSRusty Russell /* No reason for this code to be inline. */ 159b9f7ac8cSMichael S. Tsirkin static int move_to_indirect(const struct vringh *vrh, 160b9f7ac8cSMichael S. Tsirkin int *up_next, u16 *i, void *addr, 161f87d0fbbSRusty Russell const struct vring_desc *desc, 162f87d0fbbSRusty Russell struct vring_desc **descs, int *desc_max) 163f87d0fbbSRusty Russell { 164b9f7ac8cSMichael S. Tsirkin u32 len; 165b9f7ac8cSMichael S. Tsirkin 166f87d0fbbSRusty Russell /* Indirect tables can't have indirect. */ 167f87d0fbbSRusty Russell if (*up_next != -1) { 168f87d0fbbSRusty Russell vringh_bad("Multilevel indirect %u->%u", *up_next, *i); 169f87d0fbbSRusty Russell return -EINVAL; 170f87d0fbbSRusty Russell } 171f87d0fbbSRusty Russell 172b9f7ac8cSMichael S. Tsirkin len = vringh32_to_cpu(vrh, desc->len); 173b9f7ac8cSMichael S. Tsirkin if (unlikely(len % sizeof(struct vring_desc))) { 174f87d0fbbSRusty Russell vringh_bad("Strange indirect len %u", desc->len); 175f87d0fbbSRusty Russell return -EINVAL; 176f87d0fbbSRusty Russell } 177f87d0fbbSRusty Russell 178f87d0fbbSRusty Russell /* We will check this when we follow it! */ 179b9f7ac8cSMichael S. Tsirkin if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) 180b9f7ac8cSMichael S. Tsirkin *up_next = vringh16_to_cpu(vrh, desc->next); 181f87d0fbbSRusty Russell else 182f87d0fbbSRusty Russell *up_next = -2; 183f87d0fbbSRusty Russell *descs = addr; 184b9f7ac8cSMichael S. Tsirkin *desc_max = len / sizeof(struct vring_desc); 185f87d0fbbSRusty Russell 186f87d0fbbSRusty Russell /* Now, start at the first indirect. */ 187f87d0fbbSRusty Russell *i = 0; 188f87d0fbbSRusty Russell return 0; 189f87d0fbbSRusty Russell } 190f87d0fbbSRusty Russell 191f87d0fbbSRusty Russell static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp) 192f87d0fbbSRusty Russell { 193f87d0fbbSRusty Russell struct kvec *new; 194f87d0fbbSRusty Russell unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2; 195f87d0fbbSRusty Russell 196f87d0fbbSRusty Russell if (new_num < 8) 197f87d0fbbSRusty Russell new_num = 8; 198f87d0fbbSRusty Russell 199f87d0fbbSRusty Russell flag = (iov->max_num & VRINGH_IOV_ALLOCATED); 200f87d0fbbSRusty Russell if (flag) 201f87d0fbbSRusty Russell new = krealloc(iov->iov, new_num * sizeof(struct iovec), gfp); 202f87d0fbbSRusty Russell else { 2036da2ec56SKees Cook new = kmalloc_array(new_num, sizeof(struct iovec), gfp); 204f87d0fbbSRusty Russell if (new) { 205f87d0fbbSRusty Russell memcpy(new, iov->iov, 206f87d0fbbSRusty Russell iov->max_num * sizeof(struct iovec)); 207f87d0fbbSRusty Russell flag = VRINGH_IOV_ALLOCATED; 208f87d0fbbSRusty Russell } 209f87d0fbbSRusty Russell } 210f87d0fbbSRusty Russell if (!new) 211f87d0fbbSRusty Russell return -ENOMEM; 212f87d0fbbSRusty Russell iov->iov = new; 213f87d0fbbSRusty Russell iov->max_num = (new_num | flag); 214f87d0fbbSRusty Russell return 0; 215f87d0fbbSRusty Russell } 216f87d0fbbSRusty Russell 217f87d0fbbSRusty Russell static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next, 218f87d0fbbSRusty Russell struct vring_desc **descs, int *desc_max) 219f87d0fbbSRusty Russell { 220f87d0fbbSRusty Russell u16 i = *up_next; 221f87d0fbbSRusty Russell 222f87d0fbbSRusty Russell *up_next = -1; 223f87d0fbbSRusty Russell *descs = vrh->vring.desc; 224f87d0fbbSRusty Russell *desc_max = vrh->vring.num; 225f87d0fbbSRusty Russell return i; 226f87d0fbbSRusty Russell } 227f87d0fbbSRusty Russell 228f87d0fbbSRusty Russell static int slow_copy(struct vringh *vrh, void *dst, const void *src, 229f87d0fbbSRusty Russell bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, 230f87d0fbbSRusty Russell struct vringh_range *range, 231f87d0fbbSRusty Russell bool (*getrange)(struct vringh *vrh, 232f87d0fbbSRusty Russell u64, 233f87d0fbbSRusty Russell struct vringh_range *)), 234f87d0fbbSRusty Russell bool (*getrange)(struct vringh *vrh, 235f87d0fbbSRusty Russell u64 addr, 236f87d0fbbSRusty Russell struct vringh_range *r), 237f87d0fbbSRusty Russell struct vringh_range *range, 2389ad9c49cSJason Wang int (*copy)(const struct vringh *vrh, 2399ad9c49cSJason Wang void *dst, const void *src, size_t len)) 240f87d0fbbSRusty Russell { 241f87d0fbbSRusty Russell size_t part, len = sizeof(struct vring_desc); 242f87d0fbbSRusty Russell 243f87d0fbbSRusty Russell do { 244f87d0fbbSRusty Russell u64 addr; 245f87d0fbbSRusty Russell int err; 246f87d0fbbSRusty Russell 247f87d0fbbSRusty Russell part = len; 248f87d0fbbSRusty Russell addr = (u64)(unsigned long)src - range->offset; 249f87d0fbbSRusty Russell 250f87d0fbbSRusty Russell if (!rcheck(vrh, addr, &part, range, getrange)) 251f87d0fbbSRusty Russell return -EINVAL; 252f87d0fbbSRusty Russell 2539ad9c49cSJason Wang err = copy(vrh, dst, src, part); 254f87d0fbbSRusty Russell if (err) 255f87d0fbbSRusty Russell return err; 256f87d0fbbSRusty Russell 257f87d0fbbSRusty Russell dst += part; 258f87d0fbbSRusty Russell src += part; 259f87d0fbbSRusty Russell len -= part; 260f87d0fbbSRusty Russell } while (len); 261f87d0fbbSRusty Russell return 0; 262f87d0fbbSRusty Russell } 263f87d0fbbSRusty Russell 264f87d0fbbSRusty Russell static inline int 265f87d0fbbSRusty Russell __vringh_iov(struct vringh *vrh, u16 i, 266f87d0fbbSRusty Russell struct vringh_kiov *riov, 267f87d0fbbSRusty Russell struct vringh_kiov *wiov, 268f87d0fbbSRusty Russell bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, 269f87d0fbbSRusty Russell struct vringh_range *range, 270f87d0fbbSRusty Russell bool (*getrange)(struct vringh *, u64, 271f87d0fbbSRusty Russell struct vringh_range *)), 272f87d0fbbSRusty Russell bool (*getrange)(struct vringh *, u64, struct vringh_range *), 273f87d0fbbSRusty Russell gfp_t gfp, 2749ad9c49cSJason Wang int (*copy)(const struct vringh *vrh, 2759ad9c49cSJason Wang void *dst, const void *src, size_t len)) 276f87d0fbbSRusty Russell { 277f87d0fbbSRusty Russell int err, count = 0, up_next, desc_max; 278f87d0fbbSRusty Russell struct vring_desc desc, *descs; 279f87d0fbbSRusty Russell struct vringh_range range = { -1ULL, 0 }, slowrange; 280f87d0fbbSRusty Russell bool slow = false; 281f87d0fbbSRusty Russell 282f87d0fbbSRusty Russell /* We start traversing vring's descriptor table. */ 283f87d0fbbSRusty Russell descs = vrh->vring.desc; 284f87d0fbbSRusty Russell desc_max = vrh->vring.num; 285f87d0fbbSRusty Russell up_next = -1; 286f87d0fbbSRusty Russell 287f87d0fbbSRusty Russell if (riov) 288f87d0fbbSRusty Russell riov->i = riov->used = 0; 289f87d0fbbSRusty Russell else if (wiov) 290f87d0fbbSRusty Russell wiov->i = wiov->used = 0; 291f87d0fbbSRusty Russell else 292f87d0fbbSRusty Russell /* You must want something! */ 293f87d0fbbSRusty Russell BUG(); 294f87d0fbbSRusty Russell 295f87d0fbbSRusty Russell for (;;) { 296f87d0fbbSRusty Russell void *addr; 297f87d0fbbSRusty Russell struct vringh_kiov *iov; 298f87d0fbbSRusty Russell size_t len; 299f87d0fbbSRusty Russell 300f87d0fbbSRusty Russell if (unlikely(slow)) 301f87d0fbbSRusty Russell err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange, 302f87d0fbbSRusty Russell &slowrange, copy); 303f87d0fbbSRusty Russell else 3049ad9c49cSJason Wang err = copy(vrh, &desc, &descs[i], sizeof(desc)); 305f87d0fbbSRusty Russell if (unlikely(err)) 306f87d0fbbSRusty Russell goto fail; 307f87d0fbbSRusty Russell 308b9f7ac8cSMichael S. Tsirkin if (unlikely(desc.flags & 309b9f7ac8cSMichael S. Tsirkin cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) { 310b9f7ac8cSMichael S. Tsirkin u64 a = vringh64_to_cpu(vrh, desc.addr); 311b9f7ac8cSMichael S. Tsirkin 312f87d0fbbSRusty Russell /* Make sure it's OK, and get offset. */ 313b9f7ac8cSMichael S. Tsirkin len = vringh32_to_cpu(vrh, desc.len); 314b9f7ac8cSMichael S. Tsirkin if (!rcheck(vrh, a, &len, &range, getrange)) { 315f87d0fbbSRusty Russell err = -EINVAL; 316f87d0fbbSRusty Russell goto fail; 317f87d0fbbSRusty Russell } 318f87d0fbbSRusty Russell 319b9f7ac8cSMichael S. Tsirkin if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) { 320f87d0fbbSRusty Russell slow = true; 321f87d0fbbSRusty Russell /* We need to save this range to use offset */ 322f87d0fbbSRusty Russell slowrange = range; 323f87d0fbbSRusty Russell } 324f87d0fbbSRusty Russell 325b9f7ac8cSMichael S. Tsirkin addr = (void *)(long)(a + range.offset); 326b9f7ac8cSMichael S. Tsirkin err = move_to_indirect(vrh, &up_next, &i, addr, &desc, 327f87d0fbbSRusty Russell &descs, &desc_max); 328f87d0fbbSRusty Russell if (err) 329f87d0fbbSRusty Russell goto fail; 330f87d0fbbSRusty Russell continue; 331f87d0fbbSRusty Russell } 332f87d0fbbSRusty Russell 333f87d0fbbSRusty Russell if (count++ == vrh->vring.num) { 334f87d0fbbSRusty Russell vringh_bad("Descriptor loop in %p", descs); 335f87d0fbbSRusty Russell err = -ELOOP; 336f87d0fbbSRusty Russell goto fail; 337f87d0fbbSRusty Russell } 338f87d0fbbSRusty Russell 339b9f7ac8cSMichael S. Tsirkin if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE)) 340f87d0fbbSRusty Russell iov = wiov; 341f87d0fbbSRusty Russell else { 342f87d0fbbSRusty Russell iov = riov; 343f87d0fbbSRusty Russell if (unlikely(wiov && wiov->i)) { 344f87d0fbbSRusty Russell vringh_bad("Readable desc %p after writable", 345f87d0fbbSRusty Russell &descs[i]); 346f87d0fbbSRusty Russell err = -EINVAL; 347f87d0fbbSRusty Russell goto fail; 348f87d0fbbSRusty Russell } 349f87d0fbbSRusty Russell } 350f87d0fbbSRusty Russell 351f87d0fbbSRusty Russell if (!iov) { 352f87d0fbbSRusty Russell vringh_bad("Unexpected %s desc", 353f87d0fbbSRusty Russell !wiov ? "writable" : "readable"); 354f87d0fbbSRusty Russell err = -EPROTO; 355f87d0fbbSRusty Russell goto fail; 356f87d0fbbSRusty Russell } 357f87d0fbbSRusty Russell 358f87d0fbbSRusty Russell again: 359f87d0fbbSRusty Russell /* Make sure it's OK, and get offset. */ 360b9f7ac8cSMichael S. Tsirkin len = vringh32_to_cpu(vrh, desc.len); 361b9f7ac8cSMichael S. Tsirkin if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range, 362b9f7ac8cSMichael S. Tsirkin getrange)) { 363f87d0fbbSRusty Russell err = -EINVAL; 364f87d0fbbSRusty Russell goto fail; 365f87d0fbbSRusty Russell } 366b9f7ac8cSMichael S. Tsirkin addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) + 367b9f7ac8cSMichael S. Tsirkin range.offset); 368f87d0fbbSRusty Russell 369f87d0fbbSRusty Russell if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) { 370f87d0fbbSRusty Russell err = resize_iovec(iov, gfp); 371f87d0fbbSRusty Russell if (err) 372f87d0fbbSRusty Russell goto fail; 373f87d0fbbSRusty Russell } 374f87d0fbbSRusty Russell 375f87d0fbbSRusty Russell iov->iov[iov->used].iov_base = addr; 376f87d0fbbSRusty Russell iov->iov[iov->used].iov_len = len; 377f87d0fbbSRusty Russell iov->used++; 378f87d0fbbSRusty Russell 379b9f7ac8cSMichael S. Tsirkin if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) { 380b9f7ac8cSMichael S. Tsirkin desc.len = cpu_to_vringh32(vrh, 381b9f7ac8cSMichael S. Tsirkin vringh32_to_cpu(vrh, desc.len) - len); 382b9f7ac8cSMichael S. Tsirkin desc.addr = cpu_to_vringh64(vrh, 383b9f7ac8cSMichael S. Tsirkin vringh64_to_cpu(vrh, desc.addr) + len); 384f87d0fbbSRusty Russell goto again; 385f87d0fbbSRusty Russell } 386f87d0fbbSRusty Russell 387b9f7ac8cSMichael S. Tsirkin if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) { 388b9f7ac8cSMichael S. Tsirkin i = vringh16_to_cpu(vrh, desc.next); 389f87d0fbbSRusty Russell } else { 390f87d0fbbSRusty Russell /* Just in case we need to finish traversing above. */ 391f87d0fbbSRusty Russell if (unlikely(up_next > 0)) { 392f87d0fbbSRusty Russell i = return_from_indirect(vrh, &up_next, 393f87d0fbbSRusty Russell &descs, &desc_max); 394f87d0fbbSRusty Russell slow = false; 395f87d0fbbSRusty Russell } else 396f87d0fbbSRusty Russell break; 397f87d0fbbSRusty Russell } 398f87d0fbbSRusty Russell 399f87d0fbbSRusty Russell if (i >= desc_max) { 400f87d0fbbSRusty Russell vringh_bad("Chained index %u > %u", i, desc_max); 401f87d0fbbSRusty Russell err = -EINVAL; 402f87d0fbbSRusty Russell goto fail; 403f87d0fbbSRusty Russell } 404f87d0fbbSRusty Russell } 405f87d0fbbSRusty Russell 406f87d0fbbSRusty Russell return 0; 407f87d0fbbSRusty Russell 408f87d0fbbSRusty Russell fail: 409f87d0fbbSRusty Russell return err; 410f87d0fbbSRusty Russell } 411f87d0fbbSRusty Russell 412f87d0fbbSRusty Russell static inline int __vringh_complete(struct vringh *vrh, 413f87d0fbbSRusty Russell const struct vring_used_elem *used, 414f87d0fbbSRusty Russell unsigned int num_used, 415b9f7ac8cSMichael S. Tsirkin int (*putu16)(const struct vringh *vrh, 416b9f7ac8cSMichael S. Tsirkin __virtio16 *p, u16 val), 4179ad9c49cSJason Wang int (*putused)(const struct vringh *vrh, 4189ad9c49cSJason Wang struct vring_used_elem *dst, 419f87d0fbbSRusty Russell const struct vring_used_elem 420f87d0fbbSRusty Russell *src, unsigned num)) 421f87d0fbbSRusty Russell { 422f87d0fbbSRusty Russell struct vring_used *used_ring; 423f87d0fbbSRusty Russell int err; 424f87d0fbbSRusty Russell u16 used_idx, off; 425f87d0fbbSRusty Russell 426f87d0fbbSRusty Russell used_ring = vrh->vring.used; 427f87d0fbbSRusty Russell used_idx = vrh->last_used_idx + vrh->completed; 428f87d0fbbSRusty Russell 429f87d0fbbSRusty Russell off = used_idx % vrh->vring.num; 430f87d0fbbSRusty Russell 431f87d0fbbSRusty Russell /* Compiler knows num_used == 1 sometimes, hence extra check */ 432f87d0fbbSRusty Russell if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) { 433f87d0fbbSRusty Russell u16 part = vrh->vring.num - off; 4349ad9c49cSJason Wang err = putused(vrh, &used_ring->ring[off], used, part); 435f87d0fbbSRusty Russell if (!err) 4369ad9c49cSJason Wang err = putused(vrh, &used_ring->ring[0], used + part, 437f87d0fbbSRusty Russell num_used - part); 438f87d0fbbSRusty Russell } else 4399ad9c49cSJason Wang err = putused(vrh, &used_ring->ring[off], used, num_used); 440f87d0fbbSRusty Russell 441f87d0fbbSRusty Russell if (err) { 442f87d0fbbSRusty Russell vringh_bad("Failed to write %u used entries %u at %p", 443f87d0fbbSRusty Russell num_used, off, &used_ring->ring[off]); 444f87d0fbbSRusty Russell return err; 445f87d0fbbSRusty Russell } 446f87d0fbbSRusty Russell 447f87d0fbbSRusty Russell /* Make sure buffer is written before we update index. */ 448f87d0fbbSRusty Russell virtio_wmb(vrh->weak_barriers); 449f87d0fbbSRusty Russell 450b9f7ac8cSMichael S. Tsirkin err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used); 451f87d0fbbSRusty Russell if (err) { 452f87d0fbbSRusty Russell vringh_bad("Failed to update used index at %p", 453f87d0fbbSRusty Russell &vrh->vring.used->idx); 454f87d0fbbSRusty Russell return err; 455f87d0fbbSRusty Russell } 456f87d0fbbSRusty Russell 457f87d0fbbSRusty Russell vrh->completed += num_used; 458f87d0fbbSRusty Russell return 0; 459f87d0fbbSRusty Russell } 460f87d0fbbSRusty Russell 461f87d0fbbSRusty Russell 462f87d0fbbSRusty Russell static inline int __vringh_need_notify(struct vringh *vrh, 463b9f7ac8cSMichael S. Tsirkin int (*getu16)(const struct vringh *vrh, 464b9f7ac8cSMichael S. Tsirkin u16 *val, 465b9f7ac8cSMichael S. Tsirkin const __virtio16 *p)) 466f87d0fbbSRusty Russell { 467f87d0fbbSRusty Russell bool notify; 468f87d0fbbSRusty Russell u16 used_event; 469f87d0fbbSRusty Russell int err; 470f87d0fbbSRusty Russell 471f87d0fbbSRusty Russell /* Flush out used index update. This is paired with the 472f87d0fbbSRusty Russell * barrier that the Guest executes when enabling 473f87d0fbbSRusty Russell * interrupts. */ 474f87d0fbbSRusty Russell virtio_mb(vrh->weak_barriers); 475f87d0fbbSRusty Russell 476f87d0fbbSRusty Russell /* Old-style, without event indices. */ 477f87d0fbbSRusty Russell if (!vrh->event_indices) { 478f87d0fbbSRusty Russell u16 flags; 479b9f7ac8cSMichael S. Tsirkin err = getu16(vrh, &flags, &vrh->vring.avail->flags); 480f87d0fbbSRusty Russell if (err) { 481f87d0fbbSRusty Russell vringh_bad("Failed to get flags at %p", 482f87d0fbbSRusty Russell &vrh->vring.avail->flags); 483f87d0fbbSRusty Russell return err; 484f87d0fbbSRusty Russell } 485f87d0fbbSRusty Russell return (!(flags & VRING_AVAIL_F_NO_INTERRUPT)); 486f87d0fbbSRusty Russell } 487f87d0fbbSRusty Russell 488f87d0fbbSRusty Russell /* Modern: we know when other side wants to know. */ 489b9f7ac8cSMichael S. Tsirkin err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring)); 490f87d0fbbSRusty Russell if (err) { 491f87d0fbbSRusty Russell vringh_bad("Failed to get used event idx at %p", 492f87d0fbbSRusty Russell &vring_used_event(&vrh->vring)); 493f87d0fbbSRusty Russell return err; 494f87d0fbbSRusty Russell } 495f87d0fbbSRusty Russell 496f87d0fbbSRusty Russell /* Just in case we added so many that we wrap. */ 497f87d0fbbSRusty Russell if (unlikely(vrh->completed > 0xffff)) 498f87d0fbbSRusty Russell notify = true; 499f87d0fbbSRusty Russell else 500f87d0fbbSRusty Russell notify = vring_need_event(used_event, 501f87d0fbbSRusty Russell vrh->last_used_idx + vrh->completed, 502f87d0fbbSRusty Russell vrh->last_used_idx); 503f87d0fbbSRusty Russell 504f87d0fbbSRusty Russell vrh->last_used_idx += vrh->completed; 505f87d0fbbSRusty Russell vrh->completed = 0; 506f87d0fbbSRusty Russell return notify; 507f87d0fbbSRusty Russell } 508f87d0fbbSRusty Russell 509f87d0fbbSRusty Russell static inline bool __vringh_notify_enable(struct vringh *vrh, 510b9f7ac8cSMichael S. Tsirkin int (*getu16)(const struct vringh *vrh, 511b9f7ac8cSMichael S. Tsirkin u16 *val, const __virtio16 *p), 512b9f7ac8cSMichael S. Tsirkin int (*putu16)(const struct vringh *vrh, 513b9f7ac8cSMichael S. Tsirkin __virtio16 *p, u16 val)) 514f87d0fbbSRusty Russell { 515f87d0fbbSRusty Russell u16 avail; 516f87d0fbbSRusty Russell 517f87d0fbbSRusty Russell if (!vrh->event_indices) { 518f87d0fbbSRusty Russell /* Old-school; update flags. */ 519b9f7ac8cSMichael S. Tsirkin if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) { 520f87d0fbbSRusty Russell vringh_bad("Clearing used flags %p", 521f87d0fbbSRusty Russell &vrh->vring.used->flags); 522f87d0fbbSRusty Russell return true; 523f87d0fbbSRusty Russell } 524f87d0fbbSRusty Russell } else { 525b9f7ac8cSMichael S. Tsirkin if (putu16(vrh, &vring_avail_event(&vrh->vring), 526f87d0fbbSRusty Russell vrh->last_avail_idx) != 0) { 527f87d0fbbSRusty Russell vringh_bad("Updating avail event index %p", 528f87d0fbbSRusty Russell &vring_avail_event(&vrh->vring)); 529f87d0fbbSRusty Russell return true; 530f87d0fbbSRusty Russell } 531f87d0fbbSRusty Russell } 532f87d0fbbSRusty Russell 533f87d0fbbSRusty Russell /* They could have slipped one in as we were doing that: make 534f87d0fbbSRusty Russell * sure it's written, then check again. */ 535f87d0fbbSRusty Russell virtio_mb(vrh->weak_barriers); 536f87d0fbbSRusty Russell 537b9f7ac8cSMichael S. Tsirkin if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) { 538f87d0fbbSRusty Russell vringh_bad("Failed to check avail idx at %p", 539f87d0fbbSRusty Russell &vrh->vring.avail->idx); 540f87d0fbbSRusty Russell return true; 541f87d0fbbSRusty Russell } 542f87d0fbbSRusty Russell 543f87d0fbbSRusty Russell /* This is unlikely, so we just leave notifications enabled 544f87d0fbbSRusty Russell * (if we're using event_indices, we'll only get one 545f87d0fbbSRusty Russell * notification anyway). */ 546f87d0fbbSRusty Russell return avail == vrh->last_avail_idx; 547f87d0fbbSRusty Russell } 548f87d0fbbSRusty Russell 549f87d0fbbSRusty Russell static inline void __vringh_notify_disable(struct vringh *vrh, 550b9f7ac8cSMichael S. Tsirkin int (*putu16)(const struct vringh *vrh, 551b9f7ac8cSMichael S. Tsirkin __virtio16 *p, u16 val)) 552f87d0fbbSRusty Russell { 553f87d0fbbSRusty Russell if (!vrh->event_indices) { 554f87d0fbbSRusty Russell /* Old-school; update flags. */ 555b9f7ac8cSMichael S. Tsirkin if (putu16(vrh, &vrh->vring.used->flags, 556b9f7ac8cSMichael S. Tsirkin VRING_USED_F_NO_NOTIFY)) { 557f87d0fbbSRusty Russell vringh_bad("Setting used flags %p", 558f87d0fbbSRusty Russell &vrh->vring.used->flags); 559f87d0fbbSRusty Russell } 560f87d0fbbSRusty Russell } 561f87d0fbbSRusty Russell } 562f87d0fbbSRusty Russell 563f87d0fbbSRusty Russell /* Userspace access helpers: in this case, addresses are really userspace. */ 564b9f7ac8cSMichael S. Tsirkin static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p) 565f87d0fbbSRusty Russell { 566b9f7ac8cSMichael S. Tsirkin __virtio16 v = 0; 567b9f7ac8cSMichael S. Tsirkin int rc = get_user(v, (__force __virtio16 __user *)p); 568b9f7ac8cSMichael S. Tsirkin *val = vringh16_to_cpu(vrh, v); 569b9f7ac8cSMichael S. Tsirkin return rc; 570f87d0fbbSRusty Russell } 571f87d0fbbSRusty Russell 572b9f7ac8cSMichael S. Tsirkin static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val) 573f87d0fbbSRusty Russell { 574b9f7ac8cSMichael S. Tsirkin __virtio16 v = cpu_to_vringh16(vrh, val); 575b9f7ac8cSMichael S. Tsirkin return put_user(v, (__force __virtio16 __user *)p); 576f87d0fbbSRusty Russell } 577f87d0fbbSRusty Russell 5789ad9c49cSJason Wang static inline int copydesc_user(const struct vringh *vrh, 5799ad9c49cSJason Wang void *dst, const void *src, size_t len) 580f87d0fbbSRusty Russell { 581f87d0fbbSRusty Russell return copy_from_user(dst, (__force void __user *)src, len) ? 582f87d0fbbSRusty Russell -EFAULT : 0; 583f87d0fbbSRusty Russell } 584f87d0fbbSRusty Russell 5859ad9c49cSJason Wang static inline int putused_user(const struct vringh *vrh, 5869ad9c49cSJason Wang struct vring_used_elem *dst, 587f87d0fbbSRusty Russell const struct vring_used_elem *src, 588f87d0fbbSRusty Russell unsigned int num) 589f87d0fbbSRusty Russell { 590f87d0fbbSRusty Russell return copy_to_user((__force void __user *)dst, src, 591f87d0fbbSRusty Russell sizeof(*dst) * num) ? -EFAULT : 0; 592f87d0fbbSRusty Russell } 593f87d0fbbSRusty Russell 5949ad9c49cSJason Wang static inline int xfer_from_user(const struct vringh *vrh, void *src, 5959ad9c49cSJason Wang void *dst, size_t len) 596f87d0fbbSRusty Russell { 597f87d0fbbSRusty Russell return copy_from_user(dst, (__force void __user *)src, len) ? 598f87d0fbbSRusty Russell -EFAULT : 0; 599f87d0fbbSRusty Russell } 600f87d0fbbSRusty Russell 6019ad9c49cSJason Wang static inline int xfer_to_user(const struct vringh *vrh, 6029ad9c49cSJason Wang void *dst, void *src, size_t len) 603f87d0fbbSRusty Russell { 604f87d0fbbSRusty Russell return copy_to_user((__force void __user *)dst, src, len) ? 605f87d0fbbSRusty Russell -EFAULT : 0; 606f87d0fbbSRusty Russell } 607f87d0fbbSRusty Russell 608f87d0fbbSRusty Russell /** 609f87d0fbbSRusty Russell * vringh_init_user - initialize a vringh for a userspace vring. 610f87d0fbbSRusty Russell * @vrh: the vringh to initialize. 611f87d0fbbSRusty Russell * @features: the feature bits for this ring. 612f87d0fbbSRusty Russell * @num: the number of elements. 613f87d0fbbSRusty Russell * @weak_barriers: true if we only need memory barriers, not I/O. 614f87d0fbbSRusty Russell * @desc: the userpace descriptor pointer. 615f87d0fbbSRusty Russell * @avail: the userpace avail pointer. 616f87d0fbbSRusty Russell * @used: the userpace used pointer. 617f87d0fbbSRusty Russell * 618f87d0fbbSRusty Russell * Returns an error if num is invalid: you should check pointers 619f87d0fbbSRusty Russell * yourself! 620f87d0fbbSRusty Russell */ 621b97a8a90SMichael S. Tsirkin int vringh_init_user(struct vringh *vrh, u64 features, 622f87d0fbbSRusty Russell unsigned int num, bool weak_barriers, 623a865e420SMichael S. Tsirkin vring_desc_t __user *desc, 624a865e420SMichael S. Tsirkin vring_avail_t __user *avail, 625a865e420SMichael S. Tsirkin vring_used_t __user *used) 626f87d0fbbSRusty Russell { 627f87d0fbbSRusty Russell /* Sane power of 2 please! */ 628f87d0fbbSRusty Russell if (!num || num > 0xffff || (num & (num - 1))) { 629f87d0fbbSRusty Russell vringh_bad("Bad ring size %u", num); 630f87d0fbbSRusty Russell return -EINVAL; 631f87d0fbbSRusty Russell } 632f87d0fbbSRusty Russell 633b9f7ac8cSMichael S. Tsirkin vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1)); 634f87d0fbbSRusty Russell vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); 635f87d0fbbSRusty Russell vrh->weak_barriers = weak_barriers; 636f87d0fbbSRusty Russell vrh->completed = 0; 637f87d0fbbSRusty Russell vrh->last_avail_idx = 0; 638f87d0fbbSRusty Russell vrh->last_used_idx = 0; 639f87d0fbbSRusty Russell vrh->vring.num = num; 640f87d0fbbSRusty Russell /* vring expects kernel addresses, but only used via accessors. */ 641f87d0fbbSRusty Russell vrh->vring.desc = (__force struct vring_desc *)desc; 642f87d0fbbSRusty Russell vrh->vring.avail = (__force struct vring_avail *)avail; 643f87d0fbbSRusty Russell vrh->vring.used = (__force struct vring_used *)used; 644f87d0fbbSRusty Russell return 0; 645f87d0fbbSRusty Russell } 646f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_init_user); 647f87d0fbbSRusty Russell 648f87d0fbbSRusty Russell /** 649f87d0fbbSRusty Russell * vringh_getdesc_user - get next available descriptor from userspace ring. 650f87d0fbbSRusty Russell * @vrh: the userspace vring. 651f87d0fbbSRusty Russell * @riov: where to put the readable descriptors (or NULL) 652f87d0fbbSRusty Russell * @wiov: where to put the writable descriptors (or NULL) 653f87d0fbbSRusty Russell * @getrange: function to call to check ranges. 654f87d0fbbSRusty Russell * @head: head index we received, for passing to vringh_complete_user(). 655f87d0fbbSRusty Russell * 656f87d0fbbSRusty Russell * Returns 0 if there was no descriptor, 1 if there was, or -errno. 657f87d0fbbSRusty Russell * 658f87d0fbbSRusty Russell * Note that on error return, you can tell the difference between an 659f87d0fbbSRusty Russell * invalid ring and a single invalid descriptor: in the former case, 660f87d0fbbSRusty Russell * *head will be vrh->vring.num. You may be able to ignore an invalid 661f87d0fbbSRusty Russell * descriptor, but there's not much you can do with an invalid ring. 662f87d0fbbSRusty Russell * 663f87d0fbbSRusty Russell * Note that you may need to clean up riov and wiov, even on error! 664f87d0fbbSRusty Russell */ 665f87d0fbbSRusty Russell int vringh_getdesc_user(struct vringh *vrh, 666f87d0fbbSRusty Russell struct vringh_iov *riov, 667f87d0fbbSRusty Russell struct vringh_iov *wiov, 668f87d0fbbSRusty Russell bool (*getrange)(struct vringh *vrh, 669f87d0fbbSRusty Russell u64 addr, struct vringh_range *r), 670f87d0fbbSRusty Russell u16 *head) 671f87d0fbbSRusty Russell { 672f87d0fbbSRusty Russell int err; 673f87d0fbbSRusty Russell 674f87d0fbbSRusty Russell *head = vrh->vring.num; 675f87d0fbbSRusty Russell err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx); 676f87d0fbbSRusty Russell if (err < 0) 677f87d0fbbSRusty Russell return err; 678f87d0fbbSRusty Russell 679f87d0fbbSRusty Russell /* Empty... */ 680f87d0fbbSRusty Russell if (err == vrh->vring.num) 681f87d0fbbSRusty Russell return 0; 682f87d0fbbSRusty Russell 683f87d0fbbSRusty Russell /* We need the layouts to be the identical for this to work */ 684f87d0fbbSRusty Russell BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov)); 685f87d0fbbSRusty Russell BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) != 686f87d0fbbSRusty Russell offsetof(struct vringh_iov, iov)); 687f87d0fbbSRusty Russell BUILD_BUG_ON(offsetof(struct vringh_kiov, i) != 688f87d0fbbSRusty Russell offsetof(struct vringh_iov, i)); 689f87d0fbbSRusty Russell BUILD_BUG_ON(offsetof(struct vringh_kiov, used) != 690f87d0fbbSRusty Russell offsetof(struct vringh_iov, used)); 691f87d0fbbSRusty Russell BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) != 692f87d0fbbSRusty Russell offsetof(struct vringh_iov, max_num)); 693f87d0fbbSRusty Russell BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 694f87d0fbbSRusty Russell BUILD_BUG_ON(offsetof(struct iovec, iov_base) != 695f87d0fbbSRusty Russell offsetof(struct kvec, iov_base)); 696f87d0fbbSRusty Russell BUILD_BUG_ON(offsetof(struct iovec, iov_len) != 697f87d0fbbSRusty Russell offsetof(struct kvec, iov_len)); 698f87d0fbbSRusty Russell BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base) 699f87d0fbbSRusty Russell != sizeof(((struct kvec *)NULL)->iov_base)); 700f87d0fbbSRusty Russell BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len) 701f87d0fbbSRusty Russell != sizeof(((struct kvec *)NULL)->iov_len)); 702f87d0fbbSRusty Russell 703f87d0fbbSRusty Russell *head = err; 704f87d0fbbSRusty Russell err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov, 705f87d0fbbSRusty Russell (struct vringh_kiov *)wiov, 706f87d0fbbSRusty Russell range_check, getrange, GFP_KERNEL, copydesc_user); 707f87d0fbbSRusty Russell if (err) 708f87d0fbbSRusty Russell return err; 709f87d0fbbSRusty Russell 710f87d0fbbSRusty Russell return 1; 711f87d0fbbSRusty Russell } 712f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_getdesc_user); 713f87d0fbbSRusty Russell 714f87d0fbbSRusty Russell /** 715f87d0fbbSRusty Russell * vringh_iov_pull_user - copy bytes from vring_iov. 716f87d0fbbSRusty Russell * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume) 717f87d0fbbSRusty Russell * @dst: the place to copy. 718f87d0fbbSRusty Russell * @len: the maximum length to copy. 719f87d0fbbSRusty Russell * 720f87d0fbbSRusty Russell * Returns the bytes copied <= len or a negative errno. 721f87d0fbbSRusty Russell */ 722f87d0fbbSRusty Russell ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len) 723f87d0fbbSRusty Russell { 7249ad9c49cSJason Wang return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov, 725f87d0fbbSRusty Russell dst, len, xfer_from_user); 726f87d0fbbSRusty Russell } 727f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_iov_pull_user); 728f87d0fbbSRusty Russell 729f87d0fbbSRusty Russell /** 730f87d0fbbSRusty Russell * vringh_iov_push_user - copy bytes into vring_iov. 731f87d0fbbSRusty Russell * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume) 732f87d0fbbSRusty Russell * @dst: the place to copy. 733f87d0fbbSRusty Russell * @len: the maximum length to copy. 734f87d0fbbSRusty Russell * 735f87d0fbbSRusty Russell * Returns the bytes copied <= len or a negative errno. 736f87d0fbbSRusty Russell */ 737f87d0fbbSRusty Russell ssize_t vringh_iov_push_user(struct vringh_iov *wiov, 738f87d0fbbSRusty Russell const void *src, size_t len) 739f87d0fbbSRusty Russell { 7409ad9c49cSJason Wang return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov, 741f87d0fbbSRusty Russell (void *)src, len, xfer_to_user); 742f87d0fbbSRusty Russell } 743f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_iov_push_user); 744f87d0fbbSRusty Russell 745f87d0fbbSRusty Russell /** 746f87d0fbbSRusty Russell * vringh_abandon_user - we've decided not to handle the descriptor(s). 747f87d0fbbSRusty Russell * @vrh: the vring. 748f87d0fbbSRusty Russell * @num: the number of descriptors to put back (ie. num 749f87d0fbbSRusty Russell * vringh_get_user() to undo). 750f87d0fbbSRusty Russell * 751f87d0fbbSRusty Russell * The next vringh_get_user() will return the old descriptor(s) again. 752f87d0fbbSRusty Russell */ 753f87d0fbbSRusty Russell void vringh_abandon_user(struct vringh *vrh, unsigned int num) 754f87d0fbbSRusty Russell { 755f87d0fbbSRusty Russell /* We only update vring_avail_event(vr) when we want to be notified, 756f87d0fbbSRusty Russell * so we haven't changed that yet. */ 757f87d0fbbSRusty Russell vrh->last_avail_idx -= num; 758f87d0fbbSRusty Russell } 759f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_abandon_user); 760f87d0fbbSRusty Russell 761f87d0fbbSRusty Russell /** 762f87d0fbbSRusty Russell * vringh_complete_user - we've finished with descriptor, publish it. 763f87d0fbbSRusty Russell * @vrh: the vring. 764f87d0fbbSRusty Russell * @head: the head as filled in by vringh_getdesc_user. 765f87d0fbbSRusty Russell * @len: the length of data we have written. 766f87d0fbbSRusty Russell * 767f87d0fbbSRusty Russell * You should check vringh_need_notify_user() after one or more calls 768f87d0fbbSRusty Russell * to this function. 769f87d0fbbSRusty Russell */ 770f87d0fbbSRusty Russell int vringh_complete_user(struct vringh *vrh, u16 head, u32 len) 771f87d0fbbSRusty Russell { 772f87d0fbbSRusty Russell struct vring_used_elem used; 773f87d0fbbSRusty Russell 774b9f7ac8cSMichael S. Tsirkin used.id = cpu_to_vringh32(vrh, head); 775b9f7ac8cSMichael S. Tsirkin used.len = cpu_to_vringh32(vrh, len); 776f87d0fbbSRusty Russell return __vringh_complete(vrh, &used, 1, putu16_user, putused_user); 777f87d0fbbSRusty Russell } 778f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_complete_user); 779f87d0fbbSRusty Russell 780f87d0fbbSRusty Russell /** 781f87d0fbbSRusty Russell * vringh_complete_multi_user - we've finished with many descriptors. 782f87d0fbbSRusty Russell * @vrh: the vring. 783f87d0fbbSRusty Russell * @used: the head, length pairs. 784f87d0fbbSRusty Russell * @num_used: the number of used elements. 785f87d0fbbSRusty Russell * 786f87d0fbbSRusty Russell * You should check vringh_need_notify_user() after one or more calls 787f87d0fbbSRusty Russell * to this function. 788f87d0fbbSRusty Russell */ 789f87d0fbbSRusty Russell int vringh_complete_multi_user(struct vringh *vrh, 790f87d0fbbSRusty Russell const struct vring_used_elem used[], 791f87d0fbbSRusty Russell unsigned num_used) 792f87d0fbbSRusty Russell { 793f87d0fbbSRusty Russell return __vringh_complete(vrh, used, num_used, 794f87d0fbbSRusty Russell putu16_user, putused_user); 795f87d0fbbSRusty Russell } 796f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_complete_multi_user); 797f87d0fbbSRusty Russell 798f87d0fbbSRusty Russell /** 799f87d0fbbSRusty Russell * vringh_notify_enable_user - we want to know if something changes. 800f87d0fbbSRusty Russell * @vrh: the vring. 801f87d0fbbSRusty Russell * 802f87d0fbbSRusty Russell * This always enables notifications, but returns false if there are 803f87d0fbbSRusty Russell * now more buffers available in the vring. 804f87d0fbbSRusty Russell */ 805f87d0fbbSRusty Russell bool vringh_notify_enable_user(struct vringh *vrh) 806f87d0fbbSRusty Russell { 807f87d0fbbSRusty Russell return __vringh_notify_enable(vrh, getu16_user, putu16_user); 808f87d0fbbSRusty Russell } 809f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_notify_enable_user); 810f87d0fbbSRusty Russell 811f87d0fbbSRusty Russell /** 812f87d0fbbSRusty Russell * vringh_notify_disable_user - don't tell us if something changes. 813f87d0fbbSRusty Russell * @vrh: the vring. 814f87d0fbbSRusty Russell * 815f87d0fbbSRusty Russell * This is our normal running state: we disable and then only enable when 816f87d0fbbSRusty Russell * we're going to sleep. 817f87d0fbbSRusty Russell */ 818f87d0fbbSRusty Russell void vringh_notify_disable_user(struct vringh *vrh) 819f87d0fbbSRusty Russell { 820f87d0fbbSRusty Russell __vringh_notify_disable(vrh, putu16_user); 821f87d0fbbSRusty Russell } 822f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_notify_disable_user); 823f87d0fbbSRusty Russell 824f87d0fbbSRusty Russell /** 825f87d0fbbSRusty Russell * vringh_need_notify_user - must we tell the other side about used buffers? 826f87d0fbbSRusty Russell * @vrh: the vring we've called vringh_complete_user() on. 827f87d0fbbSRusty Russell * 828f87d0fbbSRusty Russell * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 829f87d0fbbSRusty Russell */ 830f87d0fbbSRusty Russell int vringh_need_notify_user(struct vringh *vrh) 831f87d0fbbSRusty Russell { 832f87d0fbbSRusty Russell return __vringh_need_notify(vrh, getu16_user); 833f87d0fbbSRusty Russell } 834f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_need_notify_user); 835f87d0fbbSRusty Russell 836f87d0fbbSRusty Russell /* Kernelspace access helpers. */ 837b9f7ac8cSMichael S. Tsirkin static inline int getu16_kern(const struct vringh *vrh, 838b9f7ac8cSMichael S. Tsirkin u16 *val, const __virtio16 *p) 839f87d0fbbSRusty Russell { 8409d1b972fSMark Rutland *val = vringh16_to_cpu(vrh, READ_ONCE(*p)); 841f87d0fbbSRusty Russell return 0; 842f87d0fbbSRusty Russell } 843f87d0fbbSRusty Russell 844b9f7ac8cSMichael S. Tsirkin static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val) 845f87d0fbbSRusty Russell { 8469d1b972fSMark Rutland WRITE_ONCE(*p, cpu_to_vringh16(vrh, val)); 847f87d0fbbSRusty Russell return 0; 848f87d0fbbSRusty Russell } 849f87d0fbbSRusty Russell 8509ad9c49cSJason Wang static inline int copydesc_kern(const struct vringh *vrh, 8519ad9c49cSJason Wang void *dst, const void *src, size_t len) 852f87d0fbbSRusty Russell { 853f87d0fbbSRusty Russell memcpy(dst, src, len); 854f87d0fbbSRusty Russell return 0; 855f87d0fbbSRusty Russell } 856f87d0fbbSRusty Russell 8579ad9c49cSJason Wang static inline int putused_kern(const struct vringh *vrh, 8589ad9c49cSJason Wang struct vring_used_elem *dst, 859f87d0fbbSRusty Russell const struct vring_used_elem *src, 860f87d0fbbSRusty Russell unsigned int num) 861f87d0fbbSRusty Russell { 862f87d0fbbSRusty Russell memcpy(dst, src, num * sizeof(*dst)); 863f87d0fbbSRusty Russell return 0; 864f87d0fbbSRusty Russell } 865f87d0fbbSRusty Russell 8669ad9c49cSJason Wang static inline int xfer_kern(const struct vringh *vrh, void *src, 8679ad9c49cSJason Wang void *dst, size_t len) 868f87d0fbbSRusty Russell { 869f87d0fbbSRusty Russell memcpy(dst, src, len); 870f87d0fbbSRusty Russell return 0; 871f87d0fbbSRusty Russell } 872f87d0fbbSRusty Russell 8739ad9c49cSJason Wang static inline int kern_xfer(const struct vringh *vrh, void *dst, 8749ad9c49cSJason Wang void *src, size_t len) 875b3683deeSJason Wang { 876b3683deeSJason Wang memcpy(dst, src, len); 877b3683deeSJason Wang return 0; 878b3683deeSJason Wang } 879b3683deeSJason Wang 880f87d0fbbSRusty Russell /** 881f87d0fbbSRusty Russell * vringh_init_kern - initialize a vringh for a kernelspace vring. 882f87d0fbbSRusty Russell * @vrh: the vringh to initialize. 883f87d0fbbSRusty Russell * @features: the feature bits for this ring. 884f87d0fbbSRusty Russell * @num: the number of elements. 885f87d0fbbSRusty Russell * @weak_barriers: true if we only need memory barriers, not I/O. 886f87d0fbbSRusty Russell * @desc: the userpace descriptor pointer. 887f87d0fbbSRusty Russell * @avail: the userpace avail pointer. 888f87d0fbbSRusty Russell * @used: the userpace used pointer. 889f87d0fbbSRusty Russell * 890f87d0fbbSRusty Russell * Returns an error if num is invalid. 891f87d0fbbSRusty Russell */ 892b97a8a90SMichael S. Tsirkin int vringh_init_kern(struct vringh *vrh, u64 features, 893f87d0fbbSRusty Russell unsigned int num, bool weak_barriers, 894f87d0fbbSRusty Russell struct vring_desc *desc, 895f87d0fbbSRusty Russell struct vring_avail *avail, 896f87d0fbbSRusty Russell struct vring_used *used) 897f87d0fbbSRusty Russell { 898f87d0fbbSRusty Russell /* Sane power of 2 please! */ 899f87d0fbbSRusty Russell if (!num || num > 0xffff || (num & (num - 1))) { 900f87d0fbbSRusty Russell vringh_bad("Bad ring size %u", num); 901f87d0fbbSRusty Russell return -EINVAL; 902f87d0fbbSRusty Russell } 903f87d0fbbSRusty Russell 904b9f7ac8cSMichael S. Tsirkin vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1)); 905f87d0fbbSRusty Russell vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); 906f87d0fbbSRusty Russell vrh->weak_barriers = weak_barriers; 907f87d0fbbSRusty Russell vrh->completed = 0; 908f87d0fbbSRusty Russell vrh->last_avail_idx = 0; 909f87d0fbbSRusty Russell vrh->last_used_idx = 0; 910f87d0fbbSRusty Russell vrh->vring.num = num; 911f87d0fbbSRusty Russell vrh->vring.desc = desc; 912f87d0fbbSRusty Russell vrh->vring.avail = avail; 913f87d0fbbSRusty Russell vrh->vring.used = used; 914f87d0fbbSRusty Russell return 0; 915f87d0fbbSRusty Russell } 916f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_init_kern); 917f87d0fbbSRusty Russell 918f87d0fbbSRusty Russell /** 919f87d0fbbSRusty Russell * vringh_getdesc_kern - get next available descriptor from kernelspace ring. 920f87d0fbbSRusty Russell * @vrh: the kernelspace vring. 921f87d0fbbSRusty Russell * @riov: where to put the readable descriptors (or NULL) 922f87d0fbbSRusty Russell * @wiov: where to put the writable descriptors (or NULL) 923f87d0fbbSRusty Russell * @head: head index we received, for passing to vringh_complete_kern(). 924f87d0fbbSRusty Russell * @gfp: flags for allocating larger riov/wiov. 925f87d0fbbSRusty Russell * 926f87d0fbbSRusty Russell * Returns 0 if there was no descriptor, 1 if there was, or -errno. 927f87d0fbbSRusty Russell * 928f87d0fbbSRusty Russell * Note that on error return, you can tell the difference between an 929f87d0fbbSRusty Russell * invalid ring and a single invalid descriptor: in the former case, 930f87d0fbbSRusty Russell * *head will be vrh->vring.num. You may be able to ignore an invalid 931f87d0fbbSRusty Russell * descriptor, but there's not much you can do with an invalid ring. 932f87d0fbbSRusty Russell * 933f87d0fbbSRusty Russell * Note that you may need to clean up riov and wiov, even on error! 934f87d0fbbSRusty Russell */ 935f87d0fbbSRusty Russell int vringh_getdesc_kern(struct vringh *vrh, 936f87d0fbbSRusty Russell struct vringh_kiov *riov, 937f87d0fbbSRusty Russell struct vringh_kiov *wiov, 938f87d0fbbSRusty Russell u16 *head, 939f87d0fbbSRusty Russell gfp_t gfp) 940f87d0fbbSRusty Russell { 941f87d0fbbSRusty Russell int err; 942f87d0fbbSRusty Russell 943f87d0fbbSRusty Russell err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx); 944f87d0fbbSRusty Russell if (err < 0) 945f87d0fbbSRusty Russell return err; 946f87d0fbbSRusty Russell 947f87d0fbbSRusty Russell /* Empty... */ 948f87d0fbbSRusty Russell if (err == vrh->vring.num) 949f87d0fbbSRusty Russell return 0; 950f87d0fbbSRusty Russell 951f87d0fbbSRusty Russell *head = err; 952f87d0fbbSRusty Russell err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, 953f87d0fbbSRusty Russell gfp, copydesc_kern); 954f87d0fbbSRusty Russell if (err) 955f87d0fbbSRusty Russell return err; 956f87d0fbbSRusty Russell 957f87d0fbbSRusty Russell return 1; 958f87d0fbbSRusty Russell } 959f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_getdesc_kern); 960f87d0fbbSRusty Russell 961f87d0fbbSRusty Russell /** 962f87d0fbbSRusty Russell * vringh_iov_pull_kern - copy bytes from vring_iov. 963f87d0fbbSRusty Russell * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume) 964f87d0fbbSRusty Russell * @dst: the place to copy. 965f87d0fbbSRusty Russell * @len: the maximum length to copy. 966f87d0fbbSRusty Russell * 967f87d0fbbSRusty Russell * Returns the bytes copied <= len or a negative errno. 968f87d0fbbSRusty Russell */ 969f87d0fbbSRusty Russell ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len) 970f87d0fbbSRusty Russell { 9719ad9c49cSJason Wang return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern); 972f87d0fbbSRusty Russell } 973f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_iov_pull_kern); 974f87d0fbbSRusty Russell 975f87d0fbbSRusty Russell /** 976f87d0fbbSRusty Russell * vringh_iov_push_kern - copy bytes into vring_iov. 977f87d0fbbSRusty Russell * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume) 978f87d0fbbSRusty Russell * @dst: the place to copy. 979f87d0fbbSRusty Russell * @len: the maximum length to copy. 980f87d0fbbSRusty Russell * 981f87d0fbbSRusty Russell * Returns the bytes copied <= len or a negative errno. 982f87d0fbbSRusty Russell */ 983f87d0fbbSRusty Russell ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, 984f87d0fbbSRusty Russell const void *src, size_t len) 985f87d0fbbSRusty Russell { 9869ad9c49cSJason Wang return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer); 987f87d0fbbSRusty Russell } 988f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_iov_push_kern); 989f87d0fbbSRusty Russell 990f87d0fbbSRusty Russell /** 991f87d0fbbSRusty Russell * vringh_abandon_kern - we've decided not to handle the descriptor(s). 992f87d0fbbSRusty Russell * @vrh: the vring. 993f87d0fbbSRusty Russell * @num: the number of descriptors to put back (ie. num 994f87d0fbbSRusty Russell * vringh_get_kern() to undo). 995f87d0fbbSRusty Russell * 996f87d0fbbSRusty Russell * The next vringh_get_kern() will return the old descriptor(s) again. 997f87d0fbbSRusty Russell */ 998f87d0fbbSRusty Russell void vringh_abandon_kern(struct vringh *vrh, unsigned int num) 999f87d0fbbSRusty Russell { 1000f87d0fbbSRusty Russell /* We only update vring_avail_event(vr) when we want to be notified, 1001f87d0fbbSRusty Russell * so we haven't changed that yet. */ 1002f87d0fbbSRusty Russell vrh->last_avail_idx -= num; 1003f87d0fbbSRusty Russell } 1004f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_abandon_kern); 1005f87d0fbbSRusty Russell 1006f87d0fbbSRusty Russell /** 1007f87d0fbbSRusty Russell * vringh_complete_kern - we've finished with descriptor, publish it. 1008f87d0fbbSRusty Russell * @vrh: the vring. 1009f87d0fbbSRusty Russell * @head: the head as filled in by vringh_getdesc_kern. 1010f87d0fbbSRusty Russell * @len: the length of data we have written. 1011f87d0fbbSRusty Russell * 1012f87d0fbbSRusty Russell * You should check vringh_need_notify_kern() after one or more calls 1013f87d0fbbSRusty Russell * to this function. 1014f87d0fbbSRusty Russell */ 1015f87d0fbbSRusty Russell int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len) 1016f87d0fbbSRusty Russell { 1017f87d0fbbSRusty Russell struct vring_used_elem used; 1018f87d0fbbSRusty Russell 1019b9f7ac8cSMichael S. Tsirkin used.id = cpu_to_vringh32(vrh, head); 1020b9f7ac8cSMichael S. Tsirkin used.len = cpu_to_vringh32(vrh, len); 1021f87d0fbbSRusty Russell 1022f87d0fbbSRusty Russell return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern); 1023f87d0fbbSRusty Russell } 1024f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_complete_kern); 1025f87d0fbbSRusty Russell 1026f87d0fbbSRusty Russell /** 1027f87d0fbbSRusty Russell * vringh_notify_enable_kern - we want to know if something changes. 1028f87d0fbbSRusty Russell * @vrh: the vring. 1029f87d0fbbSRusty Russell * 1030f87d0fbbSRusty Russell * This always enables notifications, but returns false if there are 1031f87d0fbbSRusty Russell * now more buffers available in the vring. 1032f87d0fbbSRusty Russell */ 1033f87d0fbbSRusty Russell bool vringh_notify_enable_kern(struct vringh *vrh) 1034f87d0fbbSRusty Russell { 1035f87d0fbbSRusty Russell return __vringh_notify_enable(vrh, getu16_kern, putu16_kern); 1036f87d0fbbSRusty Russell } 1037f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_notify_enable_kern); 1038f87d0fbbSRusty Russell 1039f87d0fbbSRusty Russell /** 1040f87d0fbbSRusty Russell * vringh_notify_disable_kern - don't tell us if something changes. 1041f87d0fbbSRusty Russell * @vrh: the vring. 1042f87d0fbbSRusty Russell * 1043f87d0fbbSRusty Russell * This is our normal running state: we disable and then only enable when 1044f87d0fbbSRusty Russell * we're going to sleep. 1045f87d0fbbSRusty Russell */ 1046f87d0fbbSRusty Russell void vringh_notify_disable_kern(struct vringh *vrh) 1047f87d0fbbSRusty Russell { 1048f87d0fbbSRusty Russell __vringh_notify_disable(vrh, putu16_kern); 1049f87d0fbbSRusty Russell } 1050f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_notify_disable_kern); 1051f87d0fbbSRusty Russell 1052f87d0fbbSRusty Russell /** 1053f87d0fbbSRusty Russell * vringh_need_notify_kern - must we tell the other side about used buffers? 1054f87d0fbbSRusty Russell * @vrh: the vring we've called vringh_complete_kern() on. 1055f87d0fbbSRusty Russell * 1056f87d0fbbSRusty Russell * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 1057f87d0fbbSRusty Russell */ 1058f87d0fbbSRusty Russell int vringh_need_notify_kern(struct vringh *vrh) 1059f87d0fbbSRusty Russell { 1060f87d0fbbSRusty Russell return __vringh_need_notify(vrh, getu16_kern); 1061f87d0fbbSRusty Russell } 1062f87d0fbbSRusty Russell EXPORT_SYMBOL(vringh_need_notify_kern); 1063f558a845SDave Jones 10643302363aSMichael S. Tsirkin #if IS_REACHABLE(CONFIG_VHOST_IOTLB) 10653302363aSMichael S. Tsirkin 10669ad9c49cSJason Wang static int iotlb_translate(const struct vringh *vrh, 10679ad9c49cSJason Wang u64 addr, u64 len, struct bio_vec iov[], 10689ad9c49cSJason Wang int iov_size, u32 perm) 10699ad9c49cSJason Wang { 10709ad9c49cSJason Wang struct vhost_iotlb_map *map; 10719ad9c49cSJason Wang struct vhost_iotlb *iotlb = vrh->iotlb; 10729ad9c49cSJason Wang int ret = 0; 10739ad9c49cSJason Wang u64 s = 0; 10749ad9c49cSJason Wang 10759ad9c49cSJason Wang while (len > s) { 10769ad9c49cSJason Wang u64 size, pa, pfn; 10779ad9c49cSJason Wang 10789ad9c49cSJason Wang if (unlikely(ret >= iov_size)) { 10799ad9c49cSJason Wang ret = -ENOBUFS; 10809ad9c49cSJason Wang break; 10819ad9c49cSJason Wang } 10829ad9c49cSJason Wang 10839ad9c49cSJason Wang map = vhost_iotlb_itree_first(iotlb, addr, 10849ad9c49cSJason Wang addr + len - 1); 10859ad9c49cSJason Wang if (!map || map->start > addr) { 10869ad9c49cSJason Wang ret = -EINVAL; 10879ad9c49cSJason Wang break; 10889ad9c49cSJason Wang } else if (!(map->perm & perm)) { 10899ad9c49cSJason Wang ret = -EPERM; 10909ad9c49cSJason Wang break; 10919ad9c49cSJason Wang } 10929ad9c49cSJason Wang 10939ad9c49cSJason Wang size = map->size - addr + map->start; 10949ad9c49cSJason Wang pa = map->addr + addr - map->start; 10959ad9c49cSJason Wang pfn = pa >> PAGE_SHIFT; 10969ad9c49cSJason Wang iov[ret].bv_page = pfn_to_page(pfn); 10979ad9c49cSJason Wang iov[ret].bv_len = min(len - s, size); 10989ad9c49cSJason Wang iov[ret].bv_offset = pa & (PAGE_SIZE - 1); 10999ad9c49cSJason Wang s += size; 11009ad9c49cSJason Wang addr += size; 11019ad9c49cSJason Wang ++ret; 11029ad9c49cSJason Wang } 11039ad9c49cSJason Wang 11049ad9c49cSJason Wang return ret; 11059ad9c49cSJason Wang } 11069ad9c49cSJason Wang 11079ad9c49cSJason Wang static inline int copy_from_iotlb(const struct vringh *vrh, void *dst, 11089ad9c49cSJason Wang void *src, size_t len) 11099ad9c49cSJason Wang { 11109ad9c49cSJason Wang struct iov_iter iter; 11119ad9c49cSJason Wang struct bio_vec iov[16]; 11129ad9c49cSJason Wang int ret; 11139ad9c49cSJason Wang 11149ad9c49cSJason Wang ret = iotlb_translate(vrh, (u64)(uintptr_t)src, 11159ad9c49cSJason Wang len, iov, 16, VHOST_MAP_RO); 11169ad9c49cSJason Wang if (ret < 0) 11179ad9c49cSJason Wang return ret; 11189ad9c49cSJason Wang 11199ad9c49cSJason Wang iov_iter_bvec(&iter, READ, iov, ret, len); 11209ad9c49cSJason Wang 11219ad9c49cSJason Wang ret = copy_from_iter(dst, len, &iter); 11229ad9c49cSJason Wang 11239ad9c49cSJason Wang return ret; 11249ad9c49cSJason Wang } 11259ad9c49cSJason Wang 11269ad9c49cSJason Wang static inline int copy_to_iotlb(const struct vringh *vrh, void *dst, 11279ad9c49cSJason Wang void *src, size_t len) 11289ad9c49cSJason Wang { 11299ad9c49cSJason Wang struct iov_iter iter; 11309ad9c49cSJason Wang struct bio_vec iov[16]; 11319ad9c49cSJason Wang int ret; 11329ad9c49cSJason Wang 11339ad9c49cSJason Wang ret = iotlb_translate(vrh, (u64)(uintptr_t)dst, 11349ad9c49cSJason Wang len, iov, 16, VHOST_MAP_WO); 11359ad9c49cSJason Wang if (ret < 0) 11369ad9c49cSJason Wang return ret; 11379ad9c49cSJason Wang 11389ad9c49cSJason Wang iov_iter_bvec(&iter, WRITE, iov, ret, len); 11399ad9c49cSJason Wang 11409ad9c49cSJason Wang return copy_to_iter(src, len, &iter); 11419ad9c49cSJason Wang } 11429ad9c49cSJason Wang 11439ad9c49cSJason Wang static inline int getu16_iotlb(const struct vringh *vrh, 11449ad9c49cSJason Wang u16 *val, const __virtio16 *p) 11459ad9c49cSJason Wang { 11469ad9c49cSJason Wang struct bio_vec iov; 11479ad9c49cSJason Wang void *kaddr, *from; 11489ad9c49cSJason Wang int ret; 11499ad9c49cSJason Wang 11509ad9c49cSJason Wang /* Atomic read is needed for getu16 */ 11519ad9c49cSJason Wang ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), 11529ad9c49cSJason Wang &iov, 1, VHOST_MAP_RO); 11539ad9c49cSJason Wang if (ret < 0) 11549ad9c49cSJason Wang return ret; 11559ad9c49cSJason Wang 11569ad9c49cSJason Wang kaddr = kmap_atomic(iov.bv_page); 11579ad9c49cSJason Wang from = kaddr + iov.bv_offset; 11589ad9c49cSJason Wang *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from)); 11599ad9c49cSJason Wang kunmap_atomic(kaddr); 11609ad9c49cSJason Wang 11619ad9c49cSJason Wang return 0; 11629ad9c49cSJason Wang } 11639ad9c49cSJason Wang 11649ad9c49cSJason Wang static inline int putu16_iotlb(const struct vringh *vrh, 11659ad9c49cSJason Wang __virtio16 *p, u16 val) 11669ad9c49cSJason Wang { 11679ad9c49cSJason Wang struct bio_vec iov; 11689ad9c49cSJason Wang void *kaddr, *to; 11699ad9c49cSJason Wang int ret; 11709ad9c49cSJason Wang 11719ad9c49cSJason Wang /* Atomic write is needed for putu16 */ 11729ad9c49cSJason Wang ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), 11739ad9c49cSJason Wang &iov, 1, VHOST_MAP_WO); 11749ad9c49cSJason Wang if (ret < 0) 11759ad9c49cSJason Wang return ret; 11769ad9c49cSJason Wang 11779ad9c49cSJason Wang kaddr = kmap_atomic(iov.bv_page); 11789ad9c49cSJason Wang to = kaddr + iov.bv_offset; 11799ad9c49cSJason Wang WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val)); 11809ad9c49cSJason Wang kunmap_atomic(kaddr); 11819ad9c49cSJason Wang 11829ad9c49cSJason Wang return 0; 11839ad9c49cSJason Wang } 11849ad9c49cSJason Wang 11859ad9c49cSJason Wang static inline int copydesc_iotlb(const struct vringh *vrh, 11869ad9c49cSJason Wang void *dst, const void *src, size_t len) 11879ad9c49cSJason Wang { 11889ad9c49cSJason Wang int ret; 11899ad9c49cSJason Wang 11909ad9c49cSJason Wang ret = copy_from_iotlb(vrh, dst, (void *)src, len); 11919ad9c49cSJason Wang if (ret != len) 11929ad9c49cSJason Wang return -EFAULT; 11939ad9c49cSJason Wang 11949ad9c49cSJason Wang return 0; 11959ad9c49cSJason Wang } 11969ad9c49cSJason Wang 11979ad9c49cSJason Wang static inline int xfer_from_iotlb(const struct vringh *vrh, void *src, 11989ad9c49cSJason Wang void *dst, size_t len) 11999ad9c49cSJason Wang { 12009ad9c49cSJason Wang int ret; 12019ad9c49cSJason Wang 12029ad9c49cSJason Wang ret = copy_from_iotlb(vrh, dst, src, len); 12039ad9c49cSJason Wang if (ret != len) 12049ad9c49cSJason Wang return -EFAULT; 12059ad9c49cSJason Wang 12069ad9c49cSJason Wang return 0; 12079ad9c49cSJason Wang } 12089ad9c49cSJason Wang 12099ad9c49cSJason Wang static inline int xfer_to_iotlb(const struct vringh *vrh, 12109ad9c49cSJason Wang void *dst, void *src, size_t len) 12119ad9c49cSJason Wang { 12129ad9c49cSJason Wang int ret; 12139ad9c49cSJason Wang 12149ad9c49cSJason Wang ret = copy_to_iotlb(vrh, dst, src, len); 12159ad9c49cSJason Wang if (ret != len) 12169ad9c49cSJason Wang return -EFAULT; 12179ad9c49cSJason Wang 12189ad9c49cSJason Wang return 0; 12199ad9c49cSJason Wang } 12209ad9c49cSJason Wang 12219ad9c49cSJason Wang static inline int putused_iotlb(const struct vringh *vrh, 12229ad9c49cSJason Wang struct vring_used_elem *dst, 12239ad9c49cSJason Wang const struct vring_used_elem *src, 12249ad9c49cSJason Wang unsigned int num) 12259ad9c49cSJason Wang { 12269ad9c49cSJason Wang int size = num * sizeof(*dst); 12279ad9c49cSJason Wang int ret; 12289ad9c49cSJason Wang 12299ad9c49cSJason Wang ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst)); 12309ad9c49cSJason Wang if (ret != size) 12319ad9c49cSJason Wang return -EFAULT; 12329ad9c49cSJason Wang 12339ad9c49cSJason Wang return 0; 12349ad9c49cSJason Wang } 12359ad9c49cSJason Wang 12369ad9c49cSJason Wang /** 12379ad9c49cSJason Wang * vringh_init_iotlb - initialize a vringh for a ring with IOTLB. 12389ad9c49cSJason Wang * @vrh: the vringh to initialize. 12399ad9c49cSJason Wang * @features: the feature bits for this ring. 12409ad9c49cSJason Wang * @num: the number of elements. 12419ad9c49cSJason Wang * @weak_barriers: true if we only need memory barriers, not I/O. 12429ad9c49cSJason Wang * @desc: the userpace descriptor pointer. 12439ad9c49cSJason Wang * @avail: the userpace avail pointer. 12449ad9c49cSJason Wang * @used: the userpace used pointer. 12459ad9c49cSJason Wang * 12469ad9c49cSJason Wang * Returns an error if num is invalid. 12479ad9c49cSJason Wang */ 12489ad9c49cSJason Wang int vringh_init_iotlb(struct vringh *vrh, u64 features, 12499ad9c49cSJason Wang unsigned int num, bool weak_barriers, 12509ad9c49cSJason Wang struct vring_desc *desc, 12519ad9c49cSJason Wang struct vring_avail *avail, 12529ad9c49cSJason Wang struct vring_used *used) 12539ad9c49cSJason Wang { 12549ad9c49cSJason Wang return vringh_init_kern(vrh, features, num, weak_barriers, 12559ad9c49cSJason Wang desc, avail, used); 12569ad9c49cSJason Wang } 12579ad9c49cSJason Wang EXPORT_SYMBOL(vringh_init_iotlb); 12589ad9c49cSJason Wang 12599ad9c49cSJason Wang /** 12609ad9c49cSJason Wang * vringh_set_iotlb - initialize a vringh for a ring with IOTLB. 12619ad9c49cSJason Wang * @vrh: the vring 12629ad9c49cSJason Wang * @iotlb: iotlb associated with this vring 12639ad9c49cSJason Wang */ 12649ad9c49cSJason Wang void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb) 12659ad9c49cSJason Wang { 12669ad9c49cSJason Wang vrh->iotlb = iotlb; 12679ad9c49cSJason Wang } 12689ad9c49cSJason Wang EXPORT_SYMBOL(vringh_set_iotlb); 12699ad9c49cSJason Wang 12709ad9c49cSJason Wang /** 12719ad9c49cSJason Wang * vringh_getdesc_iotlb - get next available descriptor from ring with 12729ad9c49cSJason Wang * IOTLB. 12739ad9c49cSJason Wang * @vrh: the kernelspace vring. 12749ad9c49cSJason Wang * @riov: where to put the readable descriptors (or NULL) 12759ad9c49cSJason Wang * @wiov: where to put the writable descriptors (or NULL) 12769ad9c49cSJason Wang * @head: head index we received, for passing to vringh_complete_iotlb(). 12779ad9c49cSJason Wang * @gfp: flags for allocating larger riov/wiov. 12789ad9c49cSJason Wang * 12799ad9c49cSJason Wang * Returns 0 if there was no descriptor, 1 if there was, or -errno. 12809ad9c49cSJason Wang * 12819ad9c49cSJason Wang * Note that on error return, you can tell the difference between an 12829ad9c49cSJason Wang * invalid ring and a single invalid descriptor: in the former case, 12839ad9c49cSJason Wang * *head will be vrh->vring.num. You may be able to ignore an invalid 12849ad9c49cSJason Wang * descriptor, but there's not much you can do with an invalid ring. 12859ad9c49cSJason Wang * 12869ad9c49cSJason Wang * Note that you may need to clean up riov and wiov, even on error! 12879ad9c49cSJason Wang */ 12889ad9c49cSJason Wang int vringh_getdesc_iotlb(struct vringh *vrh, 12899ad9c49cSJason Wang struct vringh_kiov *riov, 12909ad9c49cSJason Wang struct vringh_kiov *wiov, 12919ad9c49cSJason Wang u16 *head, 12929ad9c49cSJason Wang gfp_t gfp) 12939ad9c49cSJason Wang { 12949ad9c49cSJason Wang int err; 12959ad9c49cSJason Wang 12969ad9c49cSJason Wang err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx); 12979ad9c49cSJason Wang if (err < 0) 12989ad9c49cSJason Wang return err; 12999ad9c49cSJason Wang 13009ad9c49cSJason Wang /* Empty... */ 13019ad9c49cSJason Wang if (err == vrh->vring.num) 13029ad9c49cSJason Wang return 0; 13039ad9c49cSJason Wang 13049ad9c49cSJason Wang *head = err; 13059ad9c49cSJason Wang err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, 13069ad9c49cSJason Wang gfp, copydesc_iotlb); 13079ad9c49cSJason Wang if (err) 13089ad9c49cSJason Wang return err; 13099ad9c49cSJason Wang 13109ad9c49cSJason Wang return 1; 13119ad9c49cSJason Wang } 13129ad9c49cSJason Wang EXPORT_SYMBOL(vringh_getdesc_iotlb); 13139ad9c49cSJason Wang 13149ad9c49cSJason Wang /** 13159ad9c49cSJason Wang * vringh_iov_pull_iotlb - copy bytes from vring_iov. 13169ad9c49cSJason Wang * @vrh: the vring. 13179ad9c49cSJason Wang * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume) 13189ad9c49cSJason Wang * @dst: the place to copy. 13199ad9c49cSJason Wang * @len: the maximum length to copy. 13209ad9c49cSJason Wang * 13219ad9c49cSJason Wang * Returns the bytes copied <= len or a negative errno. 13229ad9c49cSJason Wang */ 13239ad9c49cSJason Wang ssize_t vringh_iov_pull_iotlb(struct vringh *vrh, 13249ad9c49cSJason Wang struct vringh_kiov *riov, 13259ad9c49cSJason Wang void *dst, size_t len) 13269ad9c49cSJason Wang { 13279ad9c49cSJason Wang return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb); 13289ad9c49cSJason Wang } 13299ad9c49cSJason Wang EXPORT_SYMBOL(vringh_iov_pull_iotlb); 13309ad9c49cSJason Wang 13319ad9c49cSJason Wang /** 13329ad9c49cSJason Wang * vringh_iov_push_iotlb - copy bytes into vring_iov. 13339ad9c49cSJason Wang * @vrh: the vring. 13349ad9c49cSJason Wang * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume) 13359ad9c49cSJason Wang * @dst: the place to copy. 13369ad9c49cSJason Wang * @len: the maximum length to copy. 13379ad9c49cSJason Wang * 13389ad9c49cSJason Wang * Returns the bytes copied <= len or a negative errno. 13399ad9c49cSJason Wang */ 13409ad9c49cSJason Wang ssize_t vringh_iov_push_iotlb(struct vringh *vrh, 13419ad9c49cSJason Wang struct vringh_kiov *wiov, 13429ad9c49cSJason Wang const void *src, size_t len) 13439ad9c49cSJason Wang { 13449ad9c49cSJason Wang return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb); 13459ad9c49cSJason Wang } 13469ad9c49cSJason Wang EXPORT_SYMBOL(vringh_iov_push_iotlb); 13479ad9c49cSJason Wang 13489ad9c49cSJason Wang /** 13499ad9c49cSJason Wang * vringh_abandon_iotlb - we've decided not to handle the descriptor(s). 13509ad9c49cSJason Wang * @vrh: the vring. 13519ad9c49cSJason Wang * @num: the number of descriptors to put back (ie. num 13529ad9c49cSJason Wang * vringh_get_iotlb() to undo). 13539ad9c49cSJason Wang * 13549ad9c49cSJason Wang * The next vringh_get_iotlb() will return the old descriptor(s) again. 13559ad9c49cSJason Wang */ 13569ad9c49cSJason Wang void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num) 13579ad9c49cSJason Wang { 13589ad9c49cSJason Wang /* We only update vring_avail_event(vr) when we want to be notified, 13599ad9c49cSJason Wang * so we haven't changed that yet. 13609ad9c49cSJason Wang */ 13619ad9c49cSJason Wang vrh->last_avail_idx -= num; 13629ad9c49cSJason Wang } 13639ad9c49cSJason Wang EXPORT_SYMBOL(vringh_abandon_iotlb); 13649ad9c49cSJason Wang 13659ad9c49cSJason Wang /** 13669ad9c49cSJason Wang * vringh_complete_iotlb - we've finished with descriptor, publish it. 13679ad9c49cSJason Wang * @vrh: the vring. 13689ad9c49cSJason Wang * @head: the head as filled in by vringh_getdesc_iotlb. 13699ad9c49cSJason Wang * @len: the length of data we have written. 13709ad9c49cSJason Wang * 13719ad9c49cSJason Wang * You should check vringh_need_notify_iotlb() after one or more calls 13729ad9c49cSJason Wang * to this function. 13739ad9c49cSJason Wang */ 13749ad9c49cSJason Wang int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len) 13759ad9c49cSJason Wang { 13769ad9c49cSJason Wang struct vring_used_elem used; 13779ad9c49cSJason Wang 13789ad9c49cSJason Wang used.id = cpu_to_vringh32(vrh, head); 13799ad9c49cSJason Wang used.len = cpu_to_vringh32(vrh, len); 13809ad9c49cSJason Wang 13819ad9c49cSJason Wang return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb); 13829ad9c49cSJason Wang } 13839ad9c49cSJason Wang EXPORT_SYMBOL(vringh_complete_iotlb); 13849ad9c49cSJason Wang 13859ad9c49cSJason Wang /** 13869ad9c49cSJason Wang * vringh_notify_enable_iotlb - we want to know if something changes. 13879ad9c49cSJason Wang * @vrh: the vring. 13889ad9c49cSJason Wang * 13899ad9c49cSJason Wang * This always enables notifications, but returns false if there are 13909ad9c49cSJason Wang * now more buffers available in the vring. 13919ad9c49cSJason Wang */ 13929ad9c49cSJason Wang bool vringh_notify_enable_iotlb(struct vringh *vrh) 13939ad9c49cSJason Wang { 13949ad9c49cSJason Wang return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb); 13959ad9c49cSJason Wang } 13969ad9c49cSJason Wang EXPORT_SYMBOL(vringh_notify_enable_iotlb); 13979ad9c49cSJason Wang 13989ad9c49cSJason Wang /** 13999ad9c49cSJason Wang * vringh_notify_disable_iotlb - don't tell us if something changes. 14009ad9c49cSJason Wang * @vrh: the vring. 14019ad9c49cSJason Wang * 14029ad9c49cSJason Wang * This is our normal running state: we disable and then only enable when 14039ad9c49cSJason Wang * we're going to sleep. 14049ad9c49cSJason Wang */ 14059ad9c49cSJason Wang void vringh_notify_disable_iotlb(struct vringh *vrh) 14069ad9c49cSJason Wang { 14079ad9c49cSJason Wang __vringh_notify_disable(vrh, putu16_iotlb); 14089ad9c49cSJason Wang } 14099ad9c49cSJason Wang EXPORT_SYMBOL(vringh_notify_disable_iotlb); 14109ad9c49cSJason Wang 14119ad9c49cSJason Wang /** 14129ad9c49cSJason Wang * vringh_need_notify_iotlb - must we tell the other side about used buffers? 14139ad9c49cSJason Wang * @vrh: the vring we've called vringh_complete_iotlb() on. 14149ad9c49cSJason Wang * 14159ad9c49cSJason Wang * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 14169ad9c49cSJason Wang */ 14179ad9c49cSJason Wang int vringh_need_notify_iotlb(struct vringh *vrh) 14189ad9c49cSJason Wang { 14199ad9c49cSJason Wang return __vringh_need_notify(vrh, getu16_iotlb); 14209ad9c49cSJason Wang } 14219ad9c49cSJason Wang EXPORT_SYMBOL(vringh_need_notify_iotlb); 14229ad9c49cSJason Wang 14233302363aSMichael S. Tsirkin #endif 14249ad9c49cSJason Wang 1425f558a845SDave Jones MODULE_LICENSE("GPL"); 1426