xref: /openbmc/qemu/subprojects/libvduse/standard-headers/linux/virtio_ring.h (revision f76b348ec78fb7316bbcc981127ae8894cfcc609)
19fbe302bSMichael S. Tsirkin #ifndef _LINUX_VIRTIO_RING_H
29fbe302bSMichael S. Tsirkin #define _LINUX_VIRTIO_RING_H
3d4083f50SAlexey Perevalov /* An interface for efficient virtio implementation, currently for use by KVM,
4d4083f50SAlexey Perevalov  * but hopefully others soon.  Do NOT change this since it will
59fbe302bSMichael S. Tsirkin  * break existing servers and clients.
69fbe302bSMichael S. Tsirkin  *
79fbe302bSMichael S. Tsirkin  * This header is BSD licensed so anyone can use the definitions to implement
89fbe302bSMichael S. Tsirkin  * compatible drivers/servers.
99fbe302bSMichael S. Tsirkin  *
109fbe302bSMichael S. Tsirkin  * Redistribution and use in source and binary forms, with or without
119fbe302bSMichael S. Tsirkin  * modification, are permitted provided that the following conditions
129fbe302bSMichael S. Tsirkin  * are met:
139fbe302bSMichael S. Tsirkin  * 1. Redistributions of source code must retain the above copyright
149fbe302bSMichael S. Tsirkin  *    notice, this list of conditions and the following disclaimer.
159fbe302bSMichael S. Tsirkin  * 2. Redistributions in binary form must reproduce the above copyright
169fbe302bSMichael S. Tsirkin  *    notice, this list of conditions and the following disclaimer in the
179fbe302bSMichael S. Tsirkin  *    documentation and/or other materials provided with the distribution.
189fbe302bSMichael S. Tsirkin  * 3. Neither the name of IBM nor the names of its contributors
199fbe302bSMichael S. Tsirkin  *    may be used to endorse or promote products derived from this software
209fbe302bSMichael S. Tsirkin  *    without specific prior written permission.
219fbe302bSMichael S. Tsirkin  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
229fbe302bSMichael S. Tsirkin  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
239fbe302bSMichael S. Tsirkin  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
249fbe302bSMichael S. Tsirkin  * ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
259fbe302bSMichael S. Tsirkin  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
269fbe302bSMichael S. Tsirkin  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
279fbe302bSMichael S. Tsirkin  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
289fbe302bSMichael S. Tsirkin  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
299fbe302bSMichael S. Tsirkin  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
309fbe302bSMichael S. Tsirkin  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
319fbe302bSMichael S. Tsirkin  * SUCH DAMAGE.
329fbe302bSMichael S. Tsirkin  *
339fbe302bSMichael S. Tsirkin  * Copyright Rusty Russell IBM Corporation 2007. */
34120758fbSPaolo Bonzini #include <stdint.h>
359fbe302bSMichael S. Tsirkin #include "standard-headers/linux/types.h"
369fbe302bSMichael S. Tsirkin #include "standard-headers/linux/virtio_types.h"
379fbe302bSMichael S. Tsirkin 
389fbe302bSMichael S. Tsirkin /* This marks a buffer as continuing via the next field. */
399fbe302bSMichael S. Tsirkin #define VRING_DESC_F_NEXT	1
409fbe302bSMichael S. Tsirkin /* This marks a buffer as write-only (otherwise read-only). */
419fbe302bSMichael S. Tsirkin #define VRING_DESC_F_WRITE	2
429fbe302bSMichael S. Tsirkin /* This means the buffer contains a list of buffer descriptors. */
439fbe302bSMichael S. Tsirkin #define VRING_DESC_F_INDIRECT	4
449fbe302bSMichael S. Tsirkin 
45da054c64SPaolo Bonzini /*
46da054c64SPaolo Bonzini  * Mark a descriptor as available or used in packed ring.
47da054c64SPaolo Bonzini  * Notice: they are defined as shifts instead of shifted values.
48da054c64SPaolo Bonzini  */
49da054c64SPaolo Bonzini #define VRING_PACKED_DESC_F_AVAIL	7
50da054c64SPaolo Bonzini #define VRING_PACKED_DESC_F_USED	15
51da054c64SPaolo Bonzini 
529fbe302bSMichael S. Tsirkin /* The Host uses this in used->flags to advise the Guest: don't kick me when
539fbe302bSMichael S. Tsirkin  * you add a buffer.  It's unreliable, so it's simply an optimization.  Guest
549fbe302bSMichael S. Tsirkin  * will still kick if it's out of buffers. */
559fbe302bSMichael S. Tsirkin #define VRING_USED_F_NO_NOTIFY	1
569fbe302bSMichael S. Tsirkin /* The Guest uses this in avail->flags to advise the Host: don't interrupt me
579fbe302bSMichael S. Tsirkin  * when you consume a buffer.  It's unreliable, so it's simply an
589fbe302bSMichael S. Tsirkin  * optimization.  */
599fbe302bSMichael S. Tsirkin #define VRING_AVAIL_F_NO_INTERRUPT	1
609fbe302bSMichael S. Tsirkin 
61da054c64SPaolo Bonzini /* Enable events in packed ring. */
62da054c64SPaolo Bonzini #define VRING_PACKED_EVENT_FLAG_ENABLE	0x0
63da054c64SPaolo Bonzini /* Disable events in packed ring. */
64da054c64SPaolo Bonzini #define VRING_PACKED_EVENT_FLAG_DISABLE	0x1
65da054c64SPaolo Bonzini /*
66da054c64SPaolo Bonzini  * Enable events for a specific descriptor in packed ring.
67da054c64SPaolo Bonzini  * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
68da054c64SPaolo Bonzini  * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated.
69da054c64SPaolo Bonzini  */
70da054c64SPaolo Bonzini #define VRING_PACKED_EVENT_FLAG_DESC	0x2
71da054c64SPaolo Bonzini 
72da054c64SPaolo Bonzini /*
73da054c64SPaolo Bonzini  * Wrap counter bit shift in event suppression structure
74da054c64SPaolo Bonzini  * of packed ring.
75da054c64SPaolo Bonzini  */
76da054c64SPaolo Bonzini #define VRING_PACKED_EVENT_F_WRAP_CTR	15
77da054c64SPaolo Bonzini 
789fbe302bSMichael S. Tsirkin /* We support indirect buffer descriptors */
799fbe302bSMichael S. Tsirkin #define VIRTIO_RING_F_INDIRECT_DESC	28
809fbe302bSMichael S. Tsirkin 
819fbe302bSMichael S. Tsirkin /* The Guest publishes the used index for which it expects an interrupt
829fbe302bSMichael S. Tsirkin  * at the end of the avail ring. Host should ignore the avail->flags field. */
839fbe302bSMichael S. Tsirkin /* The Host publishes the avail index for which it expects a kick
849fbe302bSMichael S. Tsirkin  * at the end of the used ring. Guest should ignore the used->flags field. */
859fbe302bSMichael S. Tsirkin #define VIRTIO_RING_F_EVENT_IDX		29
869fbe302bSMichael S. Tsirkin 
87*f76b348eSCornelia Huck /* Alignment requirements for vring elements.
88*f76b348eSCornelia Huck  * When using pre-virtio 1.0 layout, these fall out naturally.
89*f76b348eSCornelia Huck  */
90*f76b348eSCornelia Huck #define VRING_AVAIL_ALIGN_SIZE 2
91*f76b348eSCornelia Huck #define VRING_USED_ALIGN_SIZE 4
92*f76b348eSCornelia Huck #define VRING_DESC_ALIGN_SIZE 16
93*f76b348eSCornelia Huck 
949fbe302bSMichael S. Tsirkin /* Virtio ring descriptors: 16 bytes.  These can chain together via "next". */
959fbe302bSMichael S. Tsirkin struct vring_desc {
969fbe302bSMichael S. Tsirkin 	/* Address (guest-physical). */
979fbe302bSMichael S. Tsirkin 	__virtio64 addr;
989fbe302bSMichael S. Tsirkin 	/* Length. */
999fbe302bSMichael S. Tsirkin 	__virtio32 len;
1009fbe302bSMichael S. Tsirkin 	/* The flags as indicated above. */
1019fbe302bSMichael S. Tsirkin 	__virtio16 flags;
1029fbe302bSMichael S. Tsirkin 	/* We chain unused descriptors via this, too */
1039fbe302bSMichael S. Tsirkin 	__virtio16 next;
1049fbe302bSMichael S. Tsirkin };
1059fbe302bSMichael S. Tsirkin 
1069fbe302bSMichael S. Tsirkin struct vring_avail {
1079fbe302bSMichael S. Tsirkin 	__virtio16 flags;
1089fbe302bSMichael S. Tsirkin 	__virtio16 idx;
1099fbe302bSMichael S. Tsirkin 	__virtio16 ring[];
1109fbe302bSMichael S. Tsirkin };
1119fbe302bSMichael S. Tsirkin 
1129f2d175dSPaolo Bonzini /* uint32_t is used here for ids for padding reasons. */
1139fbe302bSMichael S. Tsirkin struct vring_used_elem {
1149fbe302bSMichael S. Tsirkin 	/* Index of start of used descriptor chain. */
1159fbe302bSMichael S. Tsirkin 	__virtio32 id;
1169fbe302bSMichael S. Tsirkin 	/* Total length of the descriptor chain which was used (written to) */
1179fbe302bSMichael S. Tsirkin 	__virtio32 len;
1189fbe302bSMichael S. Tsirkin };
1199fbe302bSMichael S. Tsirkin 
120*f76b348eSCornelia Huck typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
121*f76b348eSCornelia Huck 	vring_used_elem_t;
122*f76b348eSCornelia Huck 
1239fbe302bSMichael S. Tsirkin struct vring_used {
1249fbe302bSMichael S. Tsirkin 	__virtio16 flags;
1259fbe302bSMichael S. Tsirkin 	__virtio16 idx;
126*f76b348eSCornelia Huck 	vring_used_elem_t ring[];
1279fbe302bSMichael S. Tsirkin };
1289fbe302bSMichael S. Tsirkin 
129*f76b348eSCornelia Huck /*
130*f76b348eSCornelia Huck  * The ring element addresses are passed between components with different
131*f76b348eSCornelia Huck  * alignments assumptions. Thus, we might need to decrease the compiler-selected
132*f76b348eSCornelia Huck  * alignment, and so must use a typedef to make sure the aligned attribute
133*f76b348eSCornelia Huck  * actually takes hold:
134*f76b348eSCornelia Huck  *
135*f76b348eSCornelia Huck  * https://gcc.gnu.org/onlinedocs//gcc/Common-Type-Attributes.html#Common-Type-Attributes
136*f76b348eSCornelia Huck  *
137*f76b348eSCornelia Huck  * When used on a struct, or struct member, the aligned attribute can only
138*f76b348eSCornelia Huck  * increase the alignment; in order to decrease it, the packed attribute must
139*f76b348eSCornelia Huck  * be specified as well. When used as part of a typedef, the aligned attribute
140*f76b348eSCornelia Huck  * can both increase and decrease alignment, and specifying the packed
141*f76b348eSCornelia Huck  * attribute generates a warning.
142*f76b348eSCornelia Huck  */
143*f76b348eSCornelia Huck typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE)))
144*f76b348eSCornelia Huck 	vring_desc_t;
145*f76b348eSCornelia Huck typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE)))
146*f76b348eSCornelia Huck 	vring_avail_t;
147*f76b348eSCornelia Huck typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
148*f76b348eSCornelia Huck 	vring_used_t;
149*f76b348eSCornelia Huck 
1509fbe302bSMichael S. Tsirkin struct vring {
1519fbe302bSMichael S. Tsirkin 	unsigned int num;
1529fbe302bSMichael S. Tsirkin 
153*f76b348eSCornelia Huck 	vring_desc_t *desc;
1549fbe302bSMichael S. Tsirkin 
155*f76b348eSCornelia Huck 	vring_avail_t *avail;
1569fbe302bSMichael S. Tsirkin 
157*f76b348eSCornelia Huck 	vring_used_t *used;
1589fbe302bSMichael S. Tsirkin };
1599fbe302bSMichael S. Tsirkin 
160*f76b348eSCornelia Huck #ifndef VIRTIO_RING_NO_LEGACY
1619fbe302bSMichael S. Tsirkin 
1629fbe302bSMichael S. Tsirkin /* The standard layout for the ring is a continuous chunk of memory which looks
1639fbe302bSMichael S. Tsirkin  * like this.  We assume num is a power of 2.
1649fbe302bSMichael S. Tsirkin  *
1659fbe302bSMichael S. Tsirkin  * struct vring
1669fbe302bSMichael S. Tsirkin  * {
1679fbe302bSMichael S. Tsirkin  *	// The actual descriptors (16 bytes each)
1689fbe302bSMichael S. Tsirkin  *	struct vring_desc desc[num];
1699fbe302bSMichael S. Tsirkin  *
1709fbe302bSMichael S. Tsirkin  *	// A ring of available descriptor heads with free-running index.
1719fbe302bSMichael S. Tsirkin  *	__virtio16 avail_flags;
1729fbe302bSMichael S. Tsirkin  *	__virtio16 avail_idx;
1739fbe302bSMichael S. Tsirkin  *	__virtio16 available[num];
1749fbe302bSMichael S. Tsirkin  *	__virtio16 used_event_idx;
1759fbe302bSMichael S. Tsirkin  *
1769fbe302bSMichael S. Tsirkin  *	// Padding to the next align boundary.
1779fbe302bSMichael S. Tsirkin  *	char pad[];
1789fbe302bSMichael S. Tsirkin  *
1799fbe302bSMichael S. Tsirkin  *	// A ring of used descriptor heads with free-running index.
1809fbe302bSMichael S. Tsirkin  *	__virtio16 used_flags;
1819fbe302bSMichael S. Tsirkin  *	__virtio16 used_idx;
1829fbe302bSMichael S. Tsirkin  *	struct vring_used_elem used[num];
1839fbe302bSMichael S. Tsirkin  *	__virtio16 avail_event_idx;
1849fbe302bSMichael S. Tsirkin  * };
1859fbe302bSMichael S. Tsirkin  */
1869fbe302bSMichael S. Tsirkin /* We publish the used event index at the end of the available ring, and vice
1879fbe302bSMichael S. Tsirkin  * versa. They are at the end for backwards compatibility. */
1889fbe302bSMichael S. Tsirkin #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
1899fbe302bSMichael S. Tsirkin #define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num])
1909fbe302bSMichael S. Tsirkin 
191e0d2be2aSMichael S. Tsirkin static inline void vring_init(struct vring *vr, unsigned int num, void *p,
1929fbe302bSMichael S. Tsirkin 			      unsigned long align)
1939fbe302bSMichael S. Tsirkin {
1949fbe302bSMichael S. Tsirkin 	vr->num = num;
1959fbe302bSMichael S. Tsirkin 	vr->desc = p;
1962a886794SGreg Kurz 	vr->avail = (struct vring_avail *)((char *)p + num * sizeof(struct vring_desc));
197120758fbSPaolo Bonzini 	vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
1989fbe302bSMichael S. Tsirkin 		+ align-1) & ~(align - 1));
1999fbe302bSMichael S. Tsirkin }
2009fbe302bSMichael S. Tsirkin 
201e0d2be2aSMichael S. Tsirkin static inline unsigned vring_size(unsigned int num, unsigned long align)
2029fbe302bSMichael S. Tsirkin {
2039fbe302bSMichael S. Tsirkin 	return ((sizeof(struct vring_desc) * num + sizeof(__virtio16) * (3 + num)
2049fbe302bSMichael S. Tsirkin 		 + align - 1) & ~(align - 1))
2059fbe302bSMichael S. Tsirkin 		+ sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
2069fbe302bSMichael S. Tsirkin }
2079fbe302bSMichael S. Tsirkin 
208*f76b348eSCornelia Huck #endif /* VIRTIO_RING_NO_LEGACY */
209*f76b348eSCornelia Huck 
2109fbe302bSMichael S. Tsirkin /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
21124a31426SPaolo Bonzini /* Assuming a given event_idx value from the other side, if
2129fbe302bSMichael S. Tsirkin  * we have just incremented index from old to new_idx,
2139fbe302bSMichael S. Tsirkin  * should we trigger an event? */
214e0d2be2aSMichael S. Tsirkin static inline int vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
2159fbe302bSMichael S. Tsirkin {
2169fbe302bSMichael S. Tsirkin 	/* Note: Xen has similar logic for notification hold-off
2179fbe302bSMichael S. Tsirkin 	 * in include/xen/interface/io/ring.h with req_event and req_prod
2189fbe302bSMichael S. Tsirkin 	 * corresponding to event_idx + 1 and new_idx respectively.
2199fbe302bSMichael S. Tsirkin 	 * Note also that req_event and req_prod in Xen start at 1,
2209fbe302bSMichael S. Tsirkin 	 * event indexes in virtio start at 0. */
2219fbe302bSMichael S. Tsirkin 	return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
2229fbe302bSMichael S. Tsirkin }
2239fbe302bSMichael S. Tsirkin 
224da054c64SPaolo Bonzini struct vring_packed_desc_event {
225da054c64SPaolo Bonzini 	/* Descriptor Ring Change Event Offset/Wrap Counter. */
226da054c64SPaolo Bonzini 	uint16_t off_wrap;
227da054c64SPaolo Bonzini 	/* Descriptor Ring Change Event Flags. */
228da054c64SPaolo Bonzini 	uint16_t flags;
229da054c64SPaolo Bonzini };
230da054c64SPaolo Bonzini 
231da054c64SPaolo Bonzini struct vring_packed_desc {
232da054c64SPaolo Bonzini 	/* Buffer Address. */
233da054c64SPaolo Bonzini 	uint64_t addr;
234da054c64SPaolo Bonzini 	/* Buffer Length. */
235da054c64SPaolo Bonzini 	uint32_t len;
236da054c64SPaolo Bonzini 	/* Buffer ID. */
237da054c64SPaolo Bonzini 	uint16_t id;
238da054c64SPaolo Bonzini 	/* The flags depending on descriptor type. */
239da054c64SPaolo Bonzini 	uint16_t flags;
240da054c64SPaolo Bonzini };
241da054c64SPaolo Bonzini 
2429fbe302bSMichael S. Tsirkin #endif /* _LINUX_VIRTIO_RING_H */
243