1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
20a8a69ddSRusty Russell #ifndef _LINUX_VIRTIO_RING_H
30a8a69ddSRusty Russell #define _LINUX_VIRTIO_RING_H
40a8a69ddSRusty Russell
5c5610a5dSMichael S. Tsirkin #include <asm/barrier.h>
60a8a69ddSRusty Russell #include <linux/irqreturn.h>
7607ca46eSDavid Howells #include <uapi/linux/virtio_ring.h>
8607ca46eSDavid Howells
9a9a0fef7SRusty Russell /*
10a9a0fef7SRusty Russell * Barriers in virtio are tricky. Non-SMP virtio guests can't assume
11a9a0fef7SRusty Russell * they're not on an SMP host system, so they need to assume real
12a9a0fef7SRusty Russell * barriers. Non-SMP virtio hosts could skip the barriers, but does
13a9a0fef7SRusty Russell * anyone care?
14a9a0fef7SRusty Russell *
15a9a0fef7SRusty Russell * For virtio_pci on SMP, we don't need to order with respect to MMIO
16a6596127SMichael S. Tsirkin * accesses through relaxed memory I/O windows, so virt_mb() et al are
17a9a0fef7SRusty Russell * sufficient.
18a9a0fef7SRusty Russell *
19a9a0fef7SRusty Russell * For using virtio to talk to real devices (eg. other heterogeneous
20a9a0fef7SRusty Russell * CPUs) we do need real barriers. In theory, we could be using both
21a9a0fef7SRusty Russell * kinds of virtio, so it's a runtime decision, and the branch is
22a9a0fef7SRusty Russell * actually quite cheap.
23a9a0fef7SRusty Russell */
24a9a0fef7SRusty Russell
virtio_mb(bool weak_barriers)25a9a0fef7SRusty Russell static inline void virtio_mb(bool weak_barriers)
26a9a0fef7SRusty Russell {
27a9a0fef7SRusty Russell if (weak_barriers)
28a6596127SMichael S. Tsirkin virt_mb();
29a9a0fef7SRusty Russell else
309e1a27eaSAlexander Duyck mb();
319e1a27eaSAlexander Duyck }
329e1a27eaSAlexander Duyck
virtio_rmb(bool weak_barriers)339e1a27eaSAlexander Duyck static inline void virtio_rmb(bool weak_barriers)
349e1a27eaSAlexander Duyck {
359e1a27eaSAlexander Duyck if (weak_barriers)
36a6596127SMichael S. Tsirkin virt_rmb();
379e1a27eaSAlexander Duyck else
3855e49dc4SMichael S. Tsirkin dma_rmb();
399e1a27eaSAlexander Duyck }
409e1a27eaSAlexander Duyck
virtio_wmb(bool weak_barriers)419e1a27eaSAlexander Duyck static inline void virtio_wmb(bool weak_barriers)
429e1a27eaSAlexander Duyck {
439e1a27eaSAlexander Duyck if (weak_barriers)
44a6596127SMichael S. Tsirkin virt_wmb();
459e1a27eaSAlexander Duyck else
4655e49dc4SMichael S. Tsirkin dma_wmb();
479e1a27eaSAlexander Duyck }
48a9a0fef7SRusty Russell
4954871968SMichael S. Tsirkin #define virtio_store_mb(weak_barriers, p, v) \
5054871968SMichael S. Tsirkin do { \
5154871968SMichael S. Tsirkin if (weak_barriers) { \
5254871968SMichael S. Tsirkin virt_store_mb(*p, v); \
5354871968SMichael S. Tsirkin } else { \
5454871968SMichael S. Tsirkin WRITE_ONCE(*p, v); \
5554871968SMichael S. Tsirkin mb(); \
5654871968SMichael S. Tsirkin } \
5754871968SMichael S. Tsirkin } while (0) \
58788e5b3aSMichael S. Tsirkin
590a8a69ddSRusty Russell struct virtio_device;
600a8a69ddSRusty Russell struct virtqueue;
61*38fc29eaSShunsuke Mie struct device;
620a8a69ddSRusty Russell
632a2d1382SAndy Lutomirski /*
642a2d1382SAndy Lutomirski * Creates a virtqueue and allocates the descriptor ring. If
652a2d1382SAndy Lutomirski * may_reduce_num is set, then this may allocate a smaller ring than
66cf94db21SCornelia Huck * expected. The caller should query virtqueue_get_vring_size to learn
672a2d1382SAndy Lutomirski * the actual size of the ring.
682a2d1382SAndy Lutomirski */
692a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue(unsigned int index,
702a2d1382SAndy Lutomirski unsigned int num,
712a2d1382SAndy Lutomirski unsigned int vring_align,
722a2d1382SAndy Lutomirski struct virtio_device *vdev,
732a2d1382SAndy Lutomirski bool weak_barriers,
742a2d1382SAndy Lutomirski bool may_reduce_num,
75f94682ddSMichael S. Tsirkin bool ctx,
762a2d1382SAndy Lutomirski bool (*notify)(struct virtqueue *vq),
772a2d1382SAndy Lutomirski void (*callback)(struct virtqueue *vq),
782a2d1382SAndy Lutomirski const char *name);
792a2d1382SAndy Lutomirski
802a2d1382SAndy Lutomirski /*
812713ea3cSJason Wang * Creates a virtqueue and allocates the descriptor ring with per
822713ea3cSJason Wang * virtqueue DMA device.
832713ea3cSJason Wang */
842713ea3cSJason Wang struct virtqueue *vring_create_virtqueue_dma(unsigned int index,
852713ea3cSJason Wang unsigned int num,
862713ea3cSJason Wang unsigned int vring_align,
872713ea3cSJason Wang struct virtio_device *vdev,
882713ea3cSJason Wang bool weak_barriers,
892713ea3cSJason Wang bool may_reduce_num,
902713ea3cSJason Wang bool ctx,
912713ea3cSJason Wang bool (*notify)(struct virtqueue *vq),
922713ea3cSJason Wang void (*callback)(struct virtqueue *vq),
932713ea3cSJason Wang const char *name,
942713ea3cSJason Wang struct device *dma_dev);
952713ea3cSJason Wang
962713ea3cSJason Wang /*
972a2d1382SAndy Lutomirski * Creates a virtqueue with a standard layout but a caller-allocated
982a2d1382SAndy Lutomirski * ring.
992a2d1382SAndy Lutomirski */
10017bb6d40SJason Wang struct virtqueue *vring_new_virtqueue(unsigned int index,
10117bb6d40SJason Wang unsigned int num,
10287c7d57cSRusty Russell unsigned int vring_align,
1030a8a69ddSRusty Russell struct virtio_device *vdev,
1047b21e34fSRusty Russell bool weak_barriers,
105f94682ddSMichael S. Tsirkin bool ctx,
1060a8a69ddSRusty Russell void *pages,
10746f9c2b9SHeinz Graalfs bool (*notify)(struct virtqueue *vq),
1089499f5e7SRusty Russell void (*callback)(struct virtqueue *vq),
1099499f5e7SRusty Russell const char *name);
1102a2d1382SAndy Lutomirski
1112a2d1382SAndy Lutomirski /*
1122a2d1382SAndy Lutomirski * Destroys a virtqueue. If created with vring_create_virtqueue, this
1132a2d1382SAndy Lutomirski * also frees the ring.
1142a2d1382SAndy Lutomirski */
1150a8a69ddSRusty Russell void vring_del_virtqueue(struct virtqueue *vq);
1162a2d1382SAndy Lutomirski
117e34f8725SRusty Russell /* Filter out transport-specific feature bits. */
118e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev);
1190a8a69ddSRusty Russell
1200a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq);
121af8ececdSViktor Prutyanov
122af8ececdSViktor Prutyanov u32 vring_notification_data(struct virtqueue *_vq);
1230a8a69ddSRusty Russell #endif /* _LINUX_VIRTIO_RING_H */
124