Lines Matching refs:vrh

38 static inline int __vringh_get_head(const struct vringh *vrh,  in __vringh_get_head()  argument
39 int (*getu16)(const struct vringh *vrh, in __vringh_get_head() argument
46 err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx); in __vringh_get_head()
49 &vrh->vring.avail->idx); in __vringh_get_head()
54 return vrh->vring.num; in __vringh_get_head()
57 virtio_rmb(vrh->weak_barriers); in __vringh_get_head()
59 i = *last_avail_idx & (vrh->vring.num - 1); in __vringh_get_head()
61 err = getu16(vrh, &head, &vrh->vring.avail->ring[i]); in __vringh_get_head()
64 *last_avail_idx, &vrh->vring.avail->ring[i]); in __vringh_get_head()
68 if (head >= vrh->vring.num) { in __vringh_get_head()
70 head, vrh->vring.num); in __vringh_get_head()
107 static inline ssize_t vringh_iov_xfer(struct vringh *vrh, in vringh_iov_xfer() argument
110 int (*xfer)(const struct vringh *vrh, in vringh_iov_xfer() argument
120 err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen); in vringh_iov_xfer()
143 static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len, in range_check() argument
149 if (!getrange(vrh, addr, range)) in range_check()
177 static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len, in no_range_check() argument
186 static int move_to_indirect(const struct vringh *vrh, in move_to_indirect() argument
199 len = vringh32_to_cpu(vrh, desc->len); in move_to_indirect()
206 if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) in move_to_indirect()
207 *up_next = vringh16_to_cpu(vrh, desc->next); in move_to_indirect()
245 static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next, in return_from_indirect() argument
251 *descs = vrh->vring.desc; in return_from_indirect()
252 *desc_max = vrh->vring.num; in return_from_indirect()
256 static int slow_copy(struct vringh *vrh, void *dst, const void *src, in slow_copy() argument
257 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, in slow_copy() argument
259 bool (*getrange)(struct vringh *vrh, in slow_copy() argument
262 bool (*getrange)(struct vringh *vrh, in slow_copy()
266 int (*copy)(const struct vringh *vrh, in slow_copy() argument
278 if (!rcheck(vrh, addr, &part, range, getrange)) in slow_copy()
281 err = copy(vrh, dst, src, part); in slow_copy()
293 __vringh_iov(struct vringh *vrh, u16 i, in __vringh_iov() argument
296 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, in __vringh_iov() argument
302 int (*copy)(const struct vringh *vrh, in __vringh_iov() argument
311 descs = vrh->vring.desc; in __vringh_iov()
312 desc_max = vrh->vring.num; in __vringh_iov()
330 err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange, in __vringh_iov()
333 err = copy(vrh, &desc, &descs[i], sizeof(desc)); in __vringh_iov()
338 cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) { in __vringh_iov()
339 u64 a = vringh64_to_cpu(vrh, desc.addr); in __vringh_iov()
342 len = vringh32_to_cpu(vrh, desc.len); in __vringh_iov()
343 if (!rcheck(vrh, a, &len, &range, getrange)) { in __vringh_iov()
348 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) { in __vringh_iov()
355 err = move_to_indirect(vrh, &up_next, &i, addr, &desc, in __vringh_iov()
367 if (count > vrh->vring.num || indirect_count > desc_max) { in __vringh_iov()
373 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE)) in __vringh_iov()
394 len = vringh32_to_cpu(vrh, desc.len); in __vringh_iov()
395 if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range, in __vringh_iov()
400 addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) + in __vringh_iov()
413 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) { in __vringh_iov()
414 desc.len = cpu_to_vringh32(vrh, in __vringh_iov()
415 vringh32_to_cpu(vrh, desc.len) - len); in __vringh_iov()
416 desc.addr = cpu_to_vringh64(vrh, in __vringh_iov()
417 vringh64_to_cpu(vrh, desc.addr) + len); in __vringh_iov()
421 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) { in __vringh_iov()
422 i = vringh16_to_cpu(vrh, desc.next); in __vringh_iov()
426 i = return_from_indirect(vrh, &up_next, in __vringh_iov()
447 static inline int __vringh_complete(struct vringh *vrh, in __vringh_complete() argument
450 int (*putu16)(const struct vringh *vrh, in __vringh_complete() argument
452 int (*putused)(const struct vringh *vrh, in __vringh_complete() argument
461 used_ring = vrh->vring.used; in __vringh_complete()
462 used_idx = vrh->last_used_idx + vrh->completed; in __vringh_complete()
464 off = used_idx % vrh->vring.num; in __vringh_complete()
467 if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) { in __vringh_complete()
468 u16 part = vrh->vring.num - off; in __vringh_complete()
469 err = putused(vrh, &used_ring->ring[off], used, part); in __vringh_complete()
471 err = putused(vrh, &used_ring->ring[0], used + part, in __vringh_complete()
474 err = putused(vrh, &used_ring->ring[off], used, num_used); in __vringh_complete()
483 virtio_wmb(vrh->weak_barriers); in __vringh_complete()
485 err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used); in __vringh_complete()
488 &vrh->vring.used->idx); in __vringh_complete()
492 vrh->completed += num_used; in __vringh_complete()
497 static inline int __vringh_need_notify(struct vringh *vrh, in __vringh_need_notify() argument
498 int (*getu16)(const struct vringh *vrh, in __vringh_need_notify() argument
509 virtio_mb(vrh->weak_barriers); in __vringh_need_notify()
512 if (!vrh->event_indices) { in __vringh_need_notify()
514 err = getu16(vrh, &flags, &vrh->vring.avail->flags); in __vringh_need_notify()
517 &vrh->vring.avail->flags); in __vringh_need_notify()
524 err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring)); in __vringh_need_notify()
527 &vring_used_event(&vrh->vring)); in __vringh_need_notify()
532 if (unlikely(vrh->completed > 0xffff)) in __vringh_need_notify()
536 vrh->last_used_idx + vrh->completed, in __vringh_need_notify()
537 vrh->last_used_idx); in __vringh_need_notify()
539 vrh->last_used_idx += vrh->completed; in __vringh_need_notify()
540 vrh->completed = 0; in __vringh_need_notify()
544 static inline bool __vringh_notify_enable(struct vringh *vrh, in __vringh_notify_enable() argument
545 int (*getu16)(const struct vringh *vrh, in __vringh_notify_enable() argument
547 int (*putu16)(const struct vringh *vrh, in __vringh_notify_enable() argument
552 if (!vrh->event_indices) { in __vringh_notify_enable()
554 if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) { in __vringh_notify_enable()
556 &vrh->vring.used->flags); in __vringh_notify_enable()
560 if (putu16(vrh, &vring_avail_event(&vrh->vring), in __vringh_notify_enable()
561 vrh->last_avail_idx) != 0) { in __vringh_notify_enable()
563 &vring_avail_event(&vrh->vring)); in __vringh_notify_enable()
570 virtio_mb(vrh->weak_barriers); in __vringh_notify_enable()
572 if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) { in __vringh_notify_enable()
574 &vrh->vring.avail->idx); in __vringh_notify_enable()
581 return avail == vrh->last_avail_idx; in __vringh_notify_enable()
584 static inline void __vringh_notify_disable(struct vringh *vrh, in __vringh_notify_disable() argument
585 int (*putu16)(const struct vringh *vrh, in __vringh_notify_disable() argument
588 if (!vrh->event_indices) { in __vringh_notify_disable()
590 if (putu16(vrh, &vrh->vring.used->flags, in __vringh_notify_disable()
593 &vrh->vring.used->flags); in __vringh_notify_disable()
599 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p) in getu16_user() argument
603 *val = vringh16_to_cpu(vrh, v); in getu16_user()
607 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val) in putu16_user() argument
609 __virtio16 v = cpu_to_vringh16(vrh, val); in putu16_user()
613 static inline int copydesc_user(const struct vringh *vrh, in copydesc_user() argument
620 static inline int putused_user(const struct vringh *vrh, in putused_user() argument
629 static inline int xfer_from_user(const struct vringh *vrh, void *src, in xfer_from_user() argument
636 static inline int xfer_to_user(const struct vringh *vrh, in xfer_to_user() argument
656 int vringh_init_user(struct vringh *vrh, u64 features, in vringh_init_user() argument
668 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1)); in vringh_init_user()
669 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); in vringh_init_user()
670 vrh->weak_barriers = weak_barriers; in vringh_init_user()
671 vrh->completed = 0; in vringh_init_user()
672 vrh->last_avail_idx = 0; in vringh_init_user()
673 vrh->last_used_idx = 0; in vringh_init_user()
674 vrh->vring.num = num; in vringh_init_user()
676 vrh->vring.desc = (__force struct vring_desc *)desc; in vringh_init_user()
677 vrh->vring.avail = (__force struct vring_avail *)avail; in vringh_init_user()
678 vrh->vring.used = (__force struct vring_used *)used; in vringh_init_user()
703 int vringh_getdesc_user(struct vringh *vrh, in vringh_getdesc_user() argument
706 bool (*getrange)(struct vringh *vrh, in vringh_getdesc_user() argument
712 *head = vrh->vring.num; in vringh_getdesc_user()
713 err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx); in vringh_getdesc_user()
718 if (err == vrh->vring.num) in vringh_getdesc_user()
742 err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov, in vringh_getdesc_user()
791 void vringh_abandon_user(struct vringh *vrh, unsigned int num) in vringh_abandon_user() argument
795 vrh->last_avail_idx -= num; in vringh_abandon_user()
808 int vringh_complete_user(struct vringh *vrh, u16 head, u32 len) in vringh_complete_user() argument
812 used.id = cpu_to_vringh32(vrh, head); in vringh_complete_user()
813 used.len = cpu_to_vringh32(vrh, len); in vringh_complete_user()
814 return __vringh_complete(vrh, &used, 1, putu16_user, putused_user); in vringh_complete_user()
827 int vringh_complete_multi_user(struct vringh *vrh, in vringh_complete_multi_user() argument
831 return __vringh_complete(vrh, used, num_used, in vringh_complete_multi_user()
843 bool vringh_notify_enable_user(struct vringh *vrh) in vringh_notify_enable_user() argument
845 return __vringh_notify_enable(vrh, getu16_user, putu16_user); in vringh_notify_enable_user()
856 void vringh_notify_disable_user(struct vringh *vrh) in vringh_notify_disable_user() argument
858 __vringh_notify_disable(vrh, putu16_user); in vringh_notify_disable_user()
868 int vringh_need_notify_user(struct vringh *vrh) in vringh_need_notify_user() argument
870 return __vringh_need_notify(vrh, getu16_user); in vringh_need_notify_user()
875 static inline int getu16_kern(const struct vringh *vrh, in getu16_kern() argument
878 *val = vringh16_to_cpu(vrh, READ_ONCE(*p)); in getu16_kern()
882 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val) in putu16_kern() argument
884 WRITE_ONCE(*p, cpu_to_vringh16(vrh, val)); in putu16_kern()
888 static inline int copydesc_kern(const struct vringh *vrh, in copydesc_kern() argument
895 static inline int putused_kern(const struct vringh *vrh, in putused_kern() argument
904 static inline int xfer_kern(const struct vringh *vrh, void *src, in xfer_kern() argument
911 static inline int kern_xfer(const struct vringh *vrh, void *dst, in kern_xfer() argument
930 int vringh_init_kern(struct vringh *vrh, u64 features, in vringh_init_kern() argument
942 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1)); in vringh_init_kern()
943 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); in vringh_init_kern()
944 vrh->weak_barriers = weak_barriers; in vringh_init_kern()
945 vrh->completed = 0; in vringh_init_kern()
946 vrh->last_avail_idx = 0; in vringh_init_kern()
947 vrh->last_used_idx = 0; in vringh_init_kern()
948 vrh->vring.num = num; in vringh_init_kern()
949 vrh->vring.desc = desc; in vringh_init_kern()
950 vrh->vring.avail = avail; in vringh_init_kern()
951 vrh->vring.used = used; in vringh_init_kern()
976 int vringh_getdesc_kern(struct vringh *vrh, in vringh_getdesc_kern() argument
984 err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx); in vringh_getdesc_kern()
989 if (err == vrh->vring.num) in vringh_getdesc_kern()
993 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, in vringh_getdesc_kern()
1039 void vringh_abandon_kern(struct vringh *vrh, unsigned int num) in vringh_abandon_kern() argument
1043 vrh->last_avail_idx -= num; in vringh_abandon_kern()
1056 int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len) in vringh_complete_kern() argument
1060 used.id = cpu_to_vringh32(vrh, head); in vringh_complete_kern()
1061 used.len = cpu_to_vringh32(vrh, len); in vringh_complete_kern()
1063 return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern); in vringh_complete_kern()
1074 bool vringh_notify_enable_kern(struct vringh *vrh) in vringh_notify_enable_kern() argument
1076 return __vringh_notify_enable(vrh, getu16_kern, putu16_kern); in vringh_notify_enable_kern()
1087 void vringh_notify_disable_kern(struct vringh *vrh) in vringh_notify_disable_kern() argument
1089 __vringh_notify_disable(vrh, putu16_kern); in vringh_notify_disable_kern()
1099 int vringh_need_notify_kern(struct vringh *vrh) in vringh_need_notify_kern() argument
1101 return __vringh_need_notify(vrh, getu16_kern); in vringh_need_notify_kern()
1115 static int iotlb_translate(const struct vringh *vrh, in iotlb_translate() argument
1120 struct vhost_iotlb *iotlb = vrh->iotlb; in iotlb_translate()
1124 spin_lock(vrh->iotlb_lock); in iotlb_translate()
1149 if (vrh->use_va) { in iotlb_translate()
1167 spin_unlock(vrh->iotlb_lock); in iotlb_translate()
1177 static inline int copy_from_iotlb(const struct vringh *vrh, void *dst, in copy_from_iotlb() argument
1195 ret = iotlb_translate(vrh, (u64)(uintptr_t)src, in copy_from_iotlb()
1203 if (vrh->use_va) { in copy_from_iotlb()
1223 static inline int copy_to_iotlb(const struct vringh *vrh, void *dst, in copy_to_iotlb() argument
1241 ret = iotlb_translate(vrh, (u64)(uintptr_t)dst, in copy_to_iotlb()
1249 if (vrh->use_va) { in copy_to_iotlb()
1269 static inline int getu16_iotlb(const struct vringh *vrh, in getu16_iotlb() argument
1284 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), in getu16_iotlb()
1289 if (vrh->use_va) { in getu16_iotlb()
1301 *val = vringh16_to_cpu(vrh, tmp); in getu16_iotlb()
1306 static inline int putu16_iotlb(const struct vringh *vrh, in putu16_iotlb() argument
1321 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), in putu16_iotlb()
1326 tmp = cpu_to_vringh16(vrh, val); in putu16_iotlb()
1328 if (vrh->use_va) { in putu16_iotlb()
1343 static inline int copydesc_iotlb(const struct vringh *vrh, in copydesc_iotlb() argument
1348 ret = copy_from_iotlb(vrh, dst, (void *)src, len); in copydesc_iotlb()
1355 static inline int xfer_from_iotlb(const struct vringh *vrh, void *src, in xfer_from_iotlb() argument
1360 ret = copy_from_iotlb(vrh, dst, src, len); in xfer_from_iotlb()
1367 static inline int xfer_to_iotlb(const struct vringh *vrh, in xfer_to_iotlb() argument
1372 ret = copy_to_iotlb(vrh, dst, src, len); in xfer_to_iotlb()
1379 static inline int putused_iotlb(const struct vringh *vrh, in putused_iotlb() argument
1387 ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst)); in putused_iotlb()
1406 int vringh_init_iotlb(struct vringh *vrh, u64 features, in vringh_init_iotlb() argument
1412 vrh->use_va = false; in vringh_init_iotlb()
1414 return vringh_init_kern(vrh, features, num, weak_barriers, in vringh_init_iotlb()
1432 int vringh_init_iotlb_va(struct vringh *vrh, u64 features, in vringh_init_iotlb_va() argument
1438 vrh->use_va = true; in vringh_init_iotlb_va()
1440 return vringh_init_kern(vrh, features, num, weak_barriers, in vringh_init_iotlb_va()
1451 void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb, in vringh_set_iotlb() argument
1454 vrh->iotlb = iotlb; in vringh_set_iotlb()
1455 vrh->iotlb_lock = iotlb_lock; in vringh_set_iotlb()
1480 int vringh_getdesc_iotlb(struct vringh *vrh, in vringh_getdesc_iotlb() argument
1488 err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx); in vringh_getdesc_iotlb()
1493 if (err == vrh->vring.num) in vringh_getdesc_iotlb()
1497 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, in vringh_getdesc_iotlb()
1515 ssize_t vringh_iov_pull_iotlb(struct vringh *vrh, in vringh_iov_pull_iotlb() argument
1519 return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb); in vringh_iov_pull_iotlb()
1532 ssize_t vringh_iov_push_iotlb(struct vringh *vrh, in vringh_iov_push_iotlb() argument
1536 return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb); in vringh_iov_push_iotlb()
1548 void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num) in vringh_abandon_iotlb() argument
1553 vrh->last_avail_idx -= num; in vringh_abandon_iotlb()
1566 int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len) in vringh_complete_iotlb() argument
1570 used.id = cpu_to_vringh32(vrh, head); in vringh_complete_iotlb()
1571 used.len = cpu_to_vringh32(vrh, len); in vringh_complete_iotlb()
1573 return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb); in vringh_complete_iotlb()
1584 bool vringh_notify_enable_iotlb(struct vringh *vrh) in vringh_notify_enable_iotlb() argument
1586 return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb); in vringh_notify_enable_iotlb()
1597 void vringh_notify_disable_iotlb(struct vringh *vrh) in vringh_notify_disable_iotlb() argument
1599 __vringh_notify_disable(vrh, putu16_iotlb); in vringh_notify_disable_iotlb()
1609 int vringh_need_notify_iotlb(struct vringh *vrh) in vringh_need_notify_iotlb() argument
1611 return __vringh_need_notify(vrh, getu16_iotlb); in vringh_need_notify_iotlb()