xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision 8daafe9ebbd21a54bf91f9ff81decf215c203edd)
1fd534e9bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
20a8a69ddSRusty Russell /* Virtio ring implementation.
30a8a69ddSRusty Russell  *
40a8a69ddSRusty Russell  *  Copyright 2007 Rusty Russell IBM Corporation
50a8a69ddSRusty Russell  */
60a8a69ddSRusty Russell #include <linux/virtio.h>
70a8a69ddSRusty Russell #include <linux/virtio_ring.h>
8e34f8725SRusty Russell #include <linux/virtio_config.h>
90a8a69ddSRusty Russell #include <linux/device.h>
105a0e3ad6STejun Heo #include <linux/slab.h>
11b5a2c4f1SPaul Gortmaker #include <linux/module.h>
12e93300b1SRusty Russell #include <linux/hrtimer.h>
13780bc790SAndy Lutomirski #include <linux/dma-mapping.h>
1488938359SAlexander Potapenko #include <linux/kmsan.h>
15f8ce7263SMichael S. Tsirkin #include <linux/spinlock.h>
1678fe3987SAndy Lutomirski #include <xen/xen.h>
170a8a69ddSRusty Russell 
180a8a69ddSRusty Russell #ifdef DEBUG
190a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */
209499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
219499f5e7SRusty Russell 	do {							\
229499f5e7SRusty Russell 		dev_err(&(_vq)->vq.vdev->dev,			\
239499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
249499f5e7SRusty Russell 		BUG();						\
259499f5e7SRusty Russell 	} while (0)
26c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */
273a35ce7dSRoel Kluin #define START_USE(_vq)						\
28c5f841f1SRusty Russell 	do {							\
29c5f841f1SRusty Russell 		if ((_vq)->in_use)				\
309499f5e7SRusty Russell 			panic("%s:in_use = %i\n",		\
319499f5e7SRusty Russell 			      (_vq)->vq.name, (_vq)->in_use);	\
32c5f841f1SRusty Russell 		(_vq)->in_use = __LINE__;			\
33c5f841f1SRusty Russell 	} while (0)
343a35ce7dSRoel Kluin #define END_USE(_vq) \
3597a545abSRusty Russell 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
364d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(_vq)				\
374d6a105eSTiwei Bie 	do {							\
384d6a105eSTiwei Bie 		ktime_t now = ktime_get();			\
394d6a105eSTiwei Bie 								\
404d6a105eSTiwei Bie 		/* No kick or get, with .1 second between?  Warn. */ \
414d6a105eSTiwei Bie 		if ((_vq)->last_add_time_valid)			\
424d6a105eSTiwei Bie 			WARN_ON(ktime_to_ms(ktime_sub(now,	\
434d6a105eSTiwei Bie 				(_vq)->last_add_time)) > 100);	\
444d6a105eSTiwei Bie 		(_vq)->last_add_time = now;			\
454d6a105eSTiwei Bie 		(_vq)->last_add_time_valid = true;		\
464d6a105eSTiwei Bie 	} while (0)
474d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(_vq)				\
484d6a105eSTiwei Bie 	do {							\
494d6a105eSTiwei Bie 		if ((_vq)->last_add_time_valid) {		\
504d6a105eSTiwei Bie 			WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
514d6a105eSTiwei Bie 				      (_vq)->last_add_time)) > 100); \
524d6a105eSTiwei Bie 		}						\
534d6a105eSTiwei Bie 	} while (0)
544d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(_vq)				\
554d6a105eSTiwei Bie 	((_vq)->last_add_time_valid = false)
560a8a69ddSRusty Russell #else
579499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
589499f5e7SRusty Russell 	do {							\
599499f5e7SRusty Russell 		dev_err(&_vq->vq.vdev->dev,			\
609499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
619499f5e7SRusty Russell 		(_vq)->broken = true;				\
629499f5e7SRusty Russell 	} while (0)
630a8a69ddSRusty Russell #define START_USE(vq)
640a8a69ddSRusty Russell #define END_USE(vq)
654d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(vq)
664d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(vq)
674d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(vq)
680a8a69ddSRusty Russell #endif
690a8a69ddSRusty Russell 
70cbeedb72STiwei Bie struct vring_desc_state_split {
71780bc790SAndy Lutomirski 	void *data;			/* Data for callback. */
72780bc790SAndy Lutomirski 	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
73780bc790SAndy Lutomirski };
74780bc790SAndy Lutomirski 
751ce9e605STiwei Bie struct vring_desc_state_packed {
761ce9e605STiwei Bie 	void *data;			/* Data for callback. */
771ce9e605STiwei Bie 	struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
781ce9e605STiwei Bie 	u16 num;			/* Descriptor list length. */
791ce9e605STiwei Bie 	u16 last;			/* The last desc state in a list. */
801ce9e605STiwei Bie };
811ce9e605STiwei Bie 
821f28750fSJason Wang struct vring_desc_extra {
83ef5c366fSJason Wang 	dma_addr_t addr;		/* Descriptor DMA addr. */
84ef5c366fSJason Wang 	u32 len;			/* Descriptor length. */
851ce9e605STiwei Bie 	u16 flags;			/* Descriptor flags. */
86aeef9b47SJason Wang 	u16 next;			/* The next desc state in a list. */
871ce9e605STiwei Bie };
881ce9e605STiwei Bie 
89d76136e4SXuan Zhuo struct vring_virtqueue_split {
90d76136e4SXuan Zhuo 	/* Actual memory layout for this queue. */
91d76136e4SXuan Zhuo 	struct vring vring;
92d76136e4SXuan Zhuo 
93d76136e4SXuan Zhuo 	/* Last written value to avail->flags */
94d76136e4SXuan Zhuo 	u16 avail_flags_shadow;
95d76136e4SXuan Zhuo 
96d76136e4SXuan Zhuo 	/*
97d76136e4SXuan Zhuo 	 * Last written value to avail->idx in
98d76136e4SXuan Zhuo 	 * guest byte order.
99d76136e4SXuan Zhuo 	 */
100d76136e4SXuan Zhuo 	u16 avail_idx_shadow;
101d76136e4SXuan Zhuo 
102d76136e4SXuan Zhuo 	/* Per-descriptor state. */
103d76136e4SXuan Zhuo 	struct vring_desc_state_split *desc_state;
104d76136e4SXuan Zhuo 	struct vring_desc_extra *desc_extra;
105d76136e4SXuan Zhuo 
106d76136e4SXuan Zhuo 	/* DMA address and size information */
107d76136e4SXuan Zhuo 	dma_addr_t queue_dma_addr;
108d76136e4SXuan Zhuo 	size_t queue_size_in_bytes;
109af36b16fSXuan Zhuo 
110af36b16fSXuan Zhuo 	/*
111af36b16fSXuan Zhuo 	 * The parameters for creating vrings are reserved for creating new
112af36b16fSXuan Zhuo 	 * vring.
113af36b16fSXuan Zhuo 	 */
114af36b16fSXuan Zhuo 	u32 vring_align;
115af36b16fSXuan Zhuo 	bool may_reduce_num;
116d76136e4SXuan Zhuo };
117d76136e4SXuan Zhuo 
118d76136e4SXuan Zhuo struct vring_virtqueue_packed {
119d76136e4SXuan Zhuo 	/* Actual memory layout for this queue. */
120d76136e4SXuan Zhuo 	struct {
121d76136e4SXuan Zhuo 		unsigned int num;
122d76136e4SXuan Zhuo 		struct vring_packed_desc *desc;
123d76136e4SXuan Zhuo 		struct vring_packed_desc_event *driver;
124d76136e4SXuan Zhuo 		struct vring_packed_desc_event *device;
125d76136e4SXuan Zhuo 	} vring;
126d76136e4SXuan Zhuo 
127d76136e4SXuan Zhuo 	/* Driver ring wrap counter. */
128d76136e4SXuan Zhuo 	bool avail_wrap_counter;
129d76136e4SXuan Zhuo 
130d76136e4SXuan Zhuo 	/* Avail used flags. */
131d76136e4SXuan Zhuo 	u16 avail_used_flags;
132d76136e4SXuan Zhuo 
133d76136e4SXuan Zhuo 	/* Index of the next avail descriptor. */
134d76136e4SXuan Zhuo 	u16 next_avail_idx;
135d76136e4SXuan Zhuo 
136d76136e4SXuan Zhuo 	/*
137d76136e4SXuan Zhuo 	 * Last written value to driver->flags in
138d76136e4SXuan Zhuo 	 * guest byte order.
139d76136e4SXuan Zhuo 	 */
140d76136e4SXuan Zhuo 	u16 event_flags_shadow;
141d76136e4SXuan Zhuo 
142d76136e4SXuan Zhuo 	/* Per-descriptor state. */
143d76136e4SXuan Zhuo 	struct vring_desc_state_packed *desc_state;
144d76136e4SXuan Zhuo 	struct vring_desc_extra *desc_extra;
145d76136e4SXuan Zhuo 
146d76136e4SXuan Zhuo 	/* DMA address and size information */
147d76136e4SXuan Zhuo 	dma_addr_t ring_dma_addr;
148d76136e4SXuan Zhuo 	dma_addr_t driver_event_dma_addr;
149d76136e4SXuan Zhuo 	dma_addr_t device_event_dma_addr;
150d76136e4SXuan Zhuo 	size_t ring_size_in_bytes;
151d76136e4SXuan Zhuo 	size_t event_size_in_bytes;
152d76136e4SXuan Zhuo };
153d76136e4SXuan Zhuo 
15443b4f721SMichael S. Tsirkin struct vring_virtqueue {
1550a8a69ddSRusty Russell 	struct virtqueue vq;
1560a8a69ddSRusty Russell 
1571ce9e605STiwei Bie 	/* Is this a packed ring? */
1581ce9e605STiwei Bie 	bool packed_ring;
1591ce9e605STiwei Bie 
160fb3fba6bSTiwei Bie 	/* Is DMA API used? */
161fb3fba6bSTiwei Bie 	bool use_dma_api;
162fb3fba6bSTiwei Bie 
1637b21e34fSRusty Russell 	/* Can we use weak barriers? */
1647b21e34fSRusty Russell 	bool weak_barriers;
1657b21e34fSRusty Russell 
1660a8a69ddSRusty Russell 	/* Other side has made a mess, don't try any more. */
1670a8a69ddSRusty Russell 	bool broken;
1680a8a69ddSRusty Russell 
1699fa29b9dSMark McLoughlin 	/* Host supports indirect buffers */
1709fa29b9dSMark McLoughlin 	bool indirect;
1719fa29b9dSMark McLoughlin 
172a5c262c5SMichael S. Tsirkin 	/* Host publishes avail event idx */
173a5c262c5SMichael S. Tsirkin 	bool event;
174a5c262c5SMichael S. Tsirkin 
175*8daafe9eSXuan Zhuo 	/* Do DMA mapping by driver */
176*8daafe9eSXuan Zhuo 	bool premapped;
177*8daafe9eSXuan Zhuo 
1780a8a69ddSRusty Russell 	/* Head of free buffer list. */
1790a8a69ddSRusty Russell 	unsigned int free_head;
1800a8a69ddSRusty Russell 	/* Number we've added since last sync. */
1810a8a69ddSRusty Russell 	unsigned int num_added;
1820a8a69ddSRusty Russell 
183a7722890Shuangjie.albert 	/* Last used index  we've seen.
184a7722890Shuangjie.albert 	 * for split ring, it just contains last used index
185a7722890Shuangjie.albert 	 * for packed ring:
186a7722890Shuangjie.albert 	 * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index.
187a7722890Shuangjie.albert 	 * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter.
188a7722890Shuangjie.albert 	 */
1891bc4953eSAnthony Liguori 	u16 last_used_idx;
1900a8a69ddSRusty Russell 
1918d622d21SMichael S. Tsirkin 	/* Hint for event idx: already triggered no need to disable. */
1928d622d21SMichael S. Tsirkin 	bool event_triggered;
1938d622d21SMichael S. Tsirkin 
1941ce9e605STiwei Bie 	union {
1951ce9e605STiwei Bie 		/* Available for split ring */
196d76136e4SXuan Zhuo 		struct vring_virtqueue_split split;
197f277ec42SVenkatesh Srinivas 
1981ce9e605STiwei Bie 		/* Available for packed ring */
199d76136e4SXuan Zhuo 		struct vring_virtqueue_packed packed;
2001ce9e605STiwei Bie 	};
2011ce9e605STiwei Bie 
2020a8a69ddSRusty Russell 	/* How to notify other side. FIXME: commonalize hcalls! */
20346f9c2b9SHeinz Graalfs 	bool (*notify)(struct virtqueue *vq);
2040a8a69ddSRusty Russell 
2052a2d1382SAndy Lutomirski 	/* DMA, allocation, and size information */
2062a2d1382SAndy Lutomirski 	bool we_own_ring;
2072a2d1382SAndy Lutomirski 
2082713ea3cSJason Wang 	/* Device used for doing DMA */
2092713ea3cSJason Wang 	struct device *dma_dev;
2102713ea3cSJason Wang 
2110a8a69ddSRusty Russell #ifdef DEBUG
2120a8a69ddSRusty Russell 	/* They're supposed to lock for us. */
2130a8a69ddSRusty Russell 	unsigned int in_use;
214e93300b1SRusty Russell 
215e93300b1SRusty Russell 	/* Figure out if their kicks are too delayed. */
216e93300b1SRusty Russell 	bool last_add_time_valid;
217e93300b1SRusty Russell 	ktime_t last_add_time;
2180a8a69ddSRusty Russell #endif
2190a8a69ddSRusty Russell };
2200a8a69ddSRusty Russell 
22107d9629dSXuan Zhuo static struct virtqueue *__vring_new_virtqueue(unsigned int index,
222cd4c812aSXuan Zhuo 					       struct vring_virtqueue_split *vring_split,
22307d9629dSXuan Zhuo 					       struct virtio_device *vdev,
22407d9629dSXuan Zhuo 					       bool weak_barriers,
22507d9629dSXuan Zhuo 					       bool context,
22607d9629dSXuan Zhuo 					       bool (*notify)(struct virtqueue *),
22707d9629dSXuan Zhuo 					       void (*callback)(struct virtqueue *),
2282713ea3cSJason Wang 					       const char *name,
2292713ea3cSJason Wang 					       struct device *dma_dev);
230a2b36c8dSXuan Zhuo static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
2316fea20e5SXuan Zhuo static void vring_free(struct virtqueue *_vq);
232e6f633e5STiwei Bie 
233e6f633e5STiwei Bie /*
234e6f633e5STiwei Bie  * Helpers.
235e6f633e5STiwei Bie  */
236e6f633e5STiwei Bie 
2374b6ec919SFeng Liu #define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq)
2380a8a69ddSRusty Russell 
2394b6ec919SFeng Liu static bool virtqueue_use_indirect(const struct vring_virtqueue *vq,
2402f18c2d1STiwei Bie 				   unsigned int total_sg)
2412f18c2d1STiwei Bie {
2422f18c2d1STiwei Bie 	/*
2432f18c2d1STiwei Bie 	 * If the host supports indirect descriptor tables, and we have multiple
2442f18c2d1STiwei Bie 	 * buffers, then go indirect. FIXME: tune this threshold
2452f18c2d1STiwei Bie 	 */
2462f18c2d1STiwei Bie 	return (vq->indirect && total_sg > 1 && vq->vq.num_free);
2472f18c2d1STiwei Bie }
2482f18c2d1STiwei Bie 
249d26c96c8SAndy Lutomirski /*
2501a937693SMichael S. Tsirkin  * Modern virtio devices have feature bits to specify whether they need a
2511a937693SMichael S. Tsirkin  * quirk and bypass the IOMMU. If not there, just use the DMA API.
2521a937693SMichael S. Tsirkin  *
2531a937693SMichael S. Tsirkin  * If there, the interaction between virtio and DMA API is messy.
254d26c96c8SAndy Lutomirski  *
255d26c96c8SAndy Lutomirski  * On most systems with virtio, physical addresses match bus addresses,
256d26c96c8SAndy Lutomirski  * and it doesn't particularly matter whether we use the DMA API.
257d26c96c8SAndy Lutomirski  *
258d26c96c8SAndy Lutomirski  * On some systems, including Xen and any system with a physical device
259d26c96c8SAndy Lutomirski  * that speaks virtio behind a physical IOMMU, we must use the DMA API
260d26c96c8SAndy Lutomirski  * for virtio DMA to work at all.
261d26c96c8SAndy Lutomirski  *
262d26c96c8SAndy Lutomirski  * On other systems, including SPARC and PPC64, virtio-pci devices are
263d26c96c8SAndy Lutomirski  * enumerated as though they are behind an IOMMU, but the virtio host
264d26c96c8SAndy Lutomirski  * ignores the IOMMU, so we must either pretend that the IOMMU isn't
265d26c96c8SAndy Lutomirski  * there or somehow map everything as the identity.
266d26c96c8SAndy Lutomirski  *
267d26c96c8SAndy Lutomirski  * For the time being, we preserve historic behavior and bypass the DMA
268d26c96c8SAndy Lutomirski  * API.
2691a937693SMichael S. Tsirkin  *
2701a937693SMichael S. Tsirkin  * TODO: install a per-device DMA ops structure that does the right thing
2711a937693SMichael S. Tsirkin  * taking into account all the above quirks, and use the DMA API
2721a937693SMichael S. Tsirkin  * unconditionally on data path.
273d26c96c8SAndy Lutomirski  */
274d26c96c8SAndy Lutomirski 
2754b6ec919SFeng Liu static bool vring_use_dma_api(const struct virtio_device *vdev)
276d26c96c8SAndy Lutomirski {
27724b6842aSMichael S. Tsirkin 	if (!virtio_has_dma_quirk(vdev))
2781a937693SMichael S. Tsirkin 		return true;
2791a937693SMichael S. Tsirkin 
2801a937693SMichael S. Tsirkin 	/* Otherwise, we are left to guess. */
28178fe3987SAndy Lutomirski 	/*
28278fe3987SAndy Lutomirski 	 * In theory, it's possible to have a buggy QEMU-supposed
28378fe3987SAndy Lutomirski 	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
28478fe3987SAndy Lutomirski 	 * such a configuration, virtio has never worked and will
28578fe3987SAndy Lutomirski 	 * not work without an even larger kludge.  Instead, enable
28678fe3987SAndy Lutomirski 	 * the DMA API if we're a Xen guest, which at least allows
28778fe3987SAndy Lutomirski 	 * all of the sensible Xen configurations to work correctly.
28878fe3987SAndy Lutomirski 	 */
28978fe3987SAndy Lutomirski 	if (xen_domain())
29078fe3987SAndy Lutomirski 		return true;
29178fe3987SAndy Lutomirski 
292d26c96c8SAndy Lutomirski 	return false;
293d26c96c8SAndy Lutomirski }
294d26c96c8SAndy Lutomirski 
2954b6ec919SFeng Liu size_t virtio_max_dma_size(const struct virtio_device *vdev)
296e6d6dd6cSJoerg Roedel {
297e6d6dd6cSJoerg Roedel 	size_t max_segment_size = SIZE_MAX;
298e6d6dd6cSJoerg Roedel 
299e6d6dd6cSJoerg Roedel 	if (vring_use_dma_api(vdev))
300817fc978SWill Deacon 		max_segment_size = dma_max_mapping_size(vdev->dev.parent);
301e6d6dd6cSJoerg Roedel 
302e6d6dd6cSJoerg Roedel 	return max_segment_size;
303e6d6dd6cSJoerg Roedel }
304e6d6dd6cSJoerg Roedel EXPORT_SYMBOL_GPL(virtio_max_dma_size);
305e6d6dd6cSJoerg Roedel 
306d79dca75STiwei Bie static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
3072713ea3cSJason Wang 			       dma_addr_t *dma_handle, gfp_t flag,
3082713ea3cSJason Wang 			       struct device *dma_dev)
309d79dca75STiwei Bie {
310d79dca75STiwei Bie 	if (vring_use_dma_api(vdev)) {
3112713ea3cSJason Wang 		return dma_alloc_coherent(dma_dev, size,
312d79dca75STiwei Bie 					  dma_handle, flag);
313d79dca75STiwei Bie 	} else {
314d79dca75STiwei Bie 		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
315d79dca75STiwei Bie 
316d79dca75STiwei Bie 		if (queue) {
317d79dca75STiwei Bie 			phys_addr_t phys_addr = virt_to_phys(queue);
318d79dca75STiwei Bie 			*dma_handle = (dma_addr_t)phys_addr;
319d79dca75STiwei Bie 
320d79dca75STiwei Bie 			/*
321d79dca75STiwei Bie 			 * Sanity check: make sure we dind't truncate
322d79dca75STiwei Bie 			 * the address.  The only arches I can find that
323d79dca75STiwei Bie 			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
324d79dca75STiwei Bie 			 * are certain non-highmem MIPS and x86
325d79dca75STiwei Bie 			 * configurations, but these configurations
326d79dca75STiwei Bie 			 * should never allocate physical pages above 32
327d79dca75STiwei Bie 			 * bits, so this is fine.  Just in case, throw a
328d79dca75STiwei Bie 			 * warning and abort if we end up with an
329d79dca75STiwei Bie 			 * unrepresentable address.
330d79dca75STiwei Bie 			 */
331d79dca75STiwei Bie 			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
332d79dca75STiwei Bie 				free_pages_exact(queue, PAGE_ALIGN(size));
333d79dca75STiwei Bie 				return NULL;
334d79dca75STiwei Bie 			}
335d79dca75STiwei Bie 		}
336d79dca75STiwei Bie 		return queue;
337d79dca75STiwei Bie 	}
338d79dca75STiwei Bie }
339d79dca75STiwei Bie 
340d79dca75STiwei Bie static void vring_free_queue(struct virtio_device *vdev, size_t size,
3412713ea3cSJason Wang 			     void *queue, dma_addr_t dma_handle,
3422713ea3cSJason Wang 			     struct device *dma_dev)
343d79dca75STiwei Bie {
344d79dca75STiwei Bie 	if (vring_use_dma_api(vdev))
3452713ea3cSJason Wang 		dma_free_coherent(dma_dev, size, queue, dma_handle);
346d79dca75STiwei Bie 	else
347d79dca75STiwei Bie 		free_pages_exact(queue, PAGE_ALIGN(size));
348d79dca75STiwei Bie }
349d79dca75STiwei Bie 
350780bc790SAndy Lutomirski /*
351780bc790SAndy Lutomirski  * The DMA ops on various arches are rather gnarly right now, and
352780bc790SAndy Lutomirski  * making all of the arch DMA ops work on the vring device itself
3532713ea3cSJason Wang  * is a mess.
354780bc790SAndy Lutomirski  */
3551adbd6b2SFeng Liu static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
356780bc790SAndy Lutomirski {
3572713ea3cSJason Wang 	return vq->dma_dev;
358780bc790SAndy Lutomirski }
359780bc790SAndy Lutomirski 
360780bc790SAndy Lutomirski /* Map one sg entry. */
3610e27fa6dSXuan Zhuo static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
3620e27fa6dSXuan Zhuo 			    enum dma_data_direction direction, dma_addr_t *addr)
363780bc790SAndy Lutomirski {
36488938359SAlexander Potapenko 	if (!vq->use_dma_api) {
36588938359SAlexander Potapenko 		/*
36688938359SAlexander Potapenko 		 * If DMA is not used, KMSAN doesn't know that the scatterlist
36788938359SAlexander Potapenko 		 * is initialized by the hardware. Explicitly check/unpoison it
36888938359SAlexander Potapenko 		 * depending on the direction.
36988938359SAlexander Potapenko 		 */
37088938359SAlexander Potapenko 		kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
3710e27fa6dSXuan Zhuo 		*addr = (dma_addr_t)sg_phys(sg);
3720e27fa6dSXuan Zhuo 		return 0;
37388938359SAlexander Potapenko 	}
374780bc790SAndy Lutomirski 
375780bc790SAndy Lutomirski 	/*
376780bc790SAndy Lutomirski 	 * We can't use dma_map_sg, because we don't use scatterlists in
377780bc790SAndy Lutomirski 	 * the way it expects (we don't guarantee that the scatterlist
378780bc790SAndy Lutomirski 	 * will exist for the lifetime of the mapping).
379780bc790SAndy Lutomirski 	 */
3800e27fa6dSXuan Zhuo 	*addr = dma_map_page(vring_dma_dev(vq),
381780bc790SAndy Lutomirski 			    sg_page(sg), sg->offset, sg->length,
382780bc790SAndy Lutomirski 			    direction);
3830e27fa6dSXuan Zhuo 
3840e27fa6dSXuan Zhuo 	if (dma_mapping_error(vring_dma_dev(vq), *addr))
3850e27fa6dSXuan Zhuo 		return -ENOMEM;
3860e27fa6dSXuan Zhuo 
3870e27fa6dSXuan Zhuo 	return 0;
388780bc790SAndy Lutomirski }
389780bc790SAndy Lutomirski 
390780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
391780bc790SAndy Lutomirski 				   void *cpu_addr, size_t size,
392780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
393780bc790SAndy Lutomirski {
394fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
395780bc790SAndy Lutomirski 		return (dma_addr_t)virt_to_phys(cpu_addr);
396780bc790SAndy Lutomirski 
397780bc790SAndy Lutomirski 	return dma_map_single(vring_dma_dev(vq),
398780bc790SAndy Lutomirski 			      cpu_addr, size, direction);
399780bc790SAndy Lutomirski }
400780bc790SAndy Lutomirski 
401e6f633e5STiwei Bie static int vring_mapping_error(const struct vring_virtqueue *vq,
402e6f633e5STiwei Bie 			       dma_addr_t addr)
403e6f633e5STiwei Bie {
404fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
405e6f633e5STiwei Bie 		return 0;
406e6f633e5STiwei Bie 
407e6f633e5STiwei Bie 	return dma_mapping_error(vring_dma_dev(vq), addr);
408e6f633e5STiwei Bie }
409e6f633e5STiwei Bie 
4103a897128SXuan Zhuo static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
4113a897128SXuan Zhuo {
4123a897128SXuan Zhuo 	vq->vq.num_free = num;
4133a897128SXuan Zhuo 
4143a897128SXuan Zhuo 	if (vq->packed_ring)
4153a897128SXuan Zhuo 		vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
4163a897128SXuan Zhuo 	else
4173a897128SXuan Zhuo 		vq->last_used_idx = 0;
4183a897128SXuan Zhuo 
4193a897128SXuan Zhuo 	vq->event_triggered = false;
4203a897128SXuan Zhuo 	vq->num_added = 0;
4213a897128SXuan Zhuo 
4223a897128SXuan Zhuo #ifdef DEBUG
4233a897128SXuan Zhuo 	vq->in_use = false;
4243a897128SXuan Zhuo 	vq->last_add_time_valid = false;
4253a897128SXuan Zhuo #endif
4263a897128SXuan Zhuo }
4273a897128SXuan Zhuo 
428e6f633e5STiwei Bie 
429e6f633e5STiwei Bie /*
430e6f633e5STiwei Bie  * Split ring specific functions - *_split().
431e6f633e5STiwei Bie  */
432e6f633e5STiwei Bie 
43372b5e895SJason Wang static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
4344b6ec919SFeng Liu 					   const struct vring_desc *desc)
435780bc790SAndy Lutomirski {
436780bc790SAndy Lutomirski 	u16 flags;
437780bc790SAndy Lutomirski 
438fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
439780bc790SAndy Lutomirski 		return;
440780bc790SAndy Lutomirski 
441780bc790SAndy Lutomirski 	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
442780bc790SAndy Lutomirski 
443780bc790SAndy Lutomirski 	dma_unmap_page(vring_dma_dev(vq),
444780bc790SAndy Lutomirski 		       virtio64_to_cpu(vq->vq.vdev, desc->addr),
445780bc790SAndy Lutomirski 		       virtio32_to_cpu(vq->vq.vdev, desc->len),
446780bc790SAndy Lutomirski 		       (flags & VRING_DESC_F_WRITE) ?
447780bc790SAndy Lutomirski 		       DMA_FROM_DEVICE : DMA_TO_DEVICE);
448780bc790SAndy Lutomirski }
449780bc790SAndy Lutomirski 
45072b5e895SJason Wang static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
45172b5e895SJason Wang 					  unsigned int i)
45272b5e895SJason Wang {
45372b5e895SJason Wang 	struct vring_desc_extra *extra = vq->split.desc_extra;
45472b5e895SJason Wang 	u16 flags;
45572b5e895SJason Wang 
45672b5e895SJason Wang 	if (!vq->use_dma_api)
45772b5e895SJason Wang 		goto out;
45872b5e895SJason Wang 
45972b5e895SJason Wang 	flags = extra[i].flags;
46072b5e895SJason Wang 
46172b5e895SJason Wang 	if (flags & VRING_DESC_F_INDIRECT) {
46272b5e895SJason Wang 		dma_unmap_single(vring_dma_dev(vq),
46372b5e895SJason Wang 				 extra[i].addr,
46472b5e895SJason Wang 				 extra[i].len,
46572b5e895SJason Wang 				 (flags & VRING_DESC_F_WRITE) ?
46672b5e895SJason Wang 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
46772b5e895SJason Wang 	} else {
46872b5e895SJason Wang 		dma_unmap_page(vring_dma_dev(vq),
46972b5e895SJason Wang 			       extra[i].addr,
47072b5e895SJason Wang 			       extra[i].len,
47172b5e895SJason Wang 			       (flags & VRING_DESC_F_WRITE) ?
47272b5e895SJason Wang 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
47372b5e895SJason Wang 	}
47472b5e895SJason Wang 
47572b5e895SJason Wang out:
47672b5e895SJason Wang 	return extra[i].next;
47772b5e895SJason Wang }
47872b5e895SJason Wang 
479138fd251STiwei Bie static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
480138fd251STiwei Bie 					       unsigned int total_sg,
481138fd251STiwei Bie 					       gfp_t gfp)
4829fa29b9dSMark McLoughlin {
4839fa29b9dSMark McLoughlin 	struct vring_desc *desc;
484b25bd251SRusty Russell 	unsigned int i;
4859fa29b9dSMark McLoughlin 
486b92b1b89SWill Deacon 	/*
487b92b1b89SWill Deacon 	 * We require lowmem mappings for the descriptors because
488b92b1b89SWill Deacon 	 * otherwise virt_to_phys will give us bogus addresses in the
489b92b1b89SWill Deacon 	 * virtqueue.
490b92b1b89SWill Deacon 	 */
49182107539SMichal Hocko 	gfp &= ~__GFP_HIGHMEM;
492b92b1b89SWill Deacon 
4936da2ec56SKees Cook 	desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
4949fa29b9dSMark McLoughlin 	if (!desc)
495b25bd251SRusty Russell 		return NULL;
4969fa29b9dSMark McLoughlin 
497b25bd251SRusty Russell 	for (i = 0; i < total_sg; i++)
49800e6f3d9SMichael S. Tsirkin 		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
499b25bd251SRusty Russell 	return desc;
5009fa29b9dSMark McLoughlin }
5019fa29b9dSMark McLoughlin 
502fe4c3862SJason Wang static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
503fe4c3862SJason Wang 						    struct vring_desc *desc,
504fe4c3862SJason Wang 						    unsigned int i,
505fe4c3862SJason Wang 						    dma_addr_t addr,
506fe4c3862SJason Wang 						    unsigned int len,
50772b5e895SJason Wang 						    u16 flags,
50872b5e895SJason Wang 						    bool indirect)
509fe4c3862SJason Wang {
51072b5e895SJason Wang 	struct vring_virtqueue *vring = to_vvq(vq);
51172b5e895SJason Wang 	struct vring_desc_extra *extra = vring->split.desc_extra;
51272b5e895SJason Wang 	u16 next;
51372b5e895SJason Wang 
514fe4c3862SJason Wang 	desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
515fe4c3862SJason Wang 	desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
516fe4c3862SJason Wang 	desc[i].len = cpu_to_virtio32(vq->vdev, len);
517fe4c3862SJason Wang 
51872b5e895SJason Wang 	if (!indirect) {
51972b5e895SJason Wang 		next = extra[i].next;
52072b5e895SJason Wang 		desc[i].next = cpu_to_virtio16(vq->vdev, next);
52172b5e895SJason Wang 
52272b5e895SJason Wang 		extra[i].addr = addr;
52372b5e895SJason Wang 		extra[i].len = len;
52472b5e895SJason Wang 		extra[i].flags = flags;
52572b5e895SJason Wang 	} else
52672b5e895SJason Wang 		next = virtio16_to_cpu(vq->vdev, desc[i].next);
52772b5e895SJason Wang 
52872b5e895SJason Wang 	return next;
529fe4c3862SJason Wang }
530fe4c3862SJason Wang 
531138fd251STiwei Bie static inline int virtqueue_add_split(struct virtqueue *_vq,
53213816c76SRusty Russell 				      struct scatterlist *sgs[],
533eeebf9b1SRusty Russell 				      unsigned int total_sg,
53413816c76SRusty Russell 				      unsigned int out_sgs,
53513816c76SRusty Russell 				      unsigned int in_sgs,
536bbd603efSMichael S. Tsirkin 				      void *data,
5375a08b04fSMichael S. Tsirkin 				      void *ctx,
538bbd603efSMichael S. Tsirkin 				      gfp_t gfp)
5390a8a69ddSRusty Russell {
5400a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
54113816c76SRusty Russell 	struct scatterlist *sg;
542b25bd251SRusty Russell 	struct vring_desc *desc;
5433f649ab7SKees Cook 	unsigned int i, n, avail, descs_used, prev, err_idx;
5441fe9b6feSMichael S. Tsirkin 	int head;
545b25bd251SRusty Russell 	bool indirect;
5460a8a69ddSRusty Russell 
5479fa29b9dSMark McLoughlin 	START_USE(vq);
5489fa29b9dSMark McLoughlin 
5490a8a69ddSRusty Russell 	BUG_ON(data == NULL);
5505a08b04fSMichael S. Tsirkin 	BUG_ON(ctx && vq->indirect);
5519fa29b9dSMark McLoughlin 
55270670444SRusty Russell 	if (unlikely(vq->broken)) {
55370670444SRusty Russell 		END_USE(vq);
55470670444SRusty Russell 		return -EIO;
55570670444SRusty Russell 	}
55670670444SRusty Russell 
5574d6a105eSTiwei Bie 	LAST_ADD_TIME_UPDATE(vq);
558e93300b1SRusty Russell 
55913816c76SRusty Russell 	BUG_ON(total_sg == 0);
5600a8a69ddSRusty Russell 
561b25bd251SRusty Russell 	head = vq->free_head;
562b25bd251SRusty Russell 
56335c51e09SXianting Tian 	if (virtqueue_use_indirect(vq, total_sg))
564138fd251STiwei Bie 		desc = alloc_indirect_split(_vq, total_sg, gfp);
56544ed8089SRichard W.M. Jones 	else {
566b25bd251SRusty Russell 		desc = NULL;
567e593bf97STiwei Bie 		WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
56844ed8089SRichard W.M. Jones 	}
569b25bd251SRusty Russell 
570b25bd251SRusty Russell 	if (desc) {
571b25bd251SRusty Russell 		/* Use a single buffer which doesn't continue */
572780bc790SAndy Lutomirski 		indirect = true;
573b25bd251SRusty Russell 		/* Set up rest to use this indirect table. */
574b25bd251SRusty Russell 		i = 0;
575b25bd251SRusty Russell 		descs_used = 1;
576b25bd251SRusty Russell 	} else {
577780bc790SAndy Lutomirski 		indirect = false;
578e593bf97STiwei Bie 		desc = vq->split.vring.desc;
579b25bd251SRusty Russell 		i = head;
580b25bd251SRusty Russell 		descs_used = total_sg;
581b25bd251SRusty Russell 	}
582b25bd251SRusty Russell 
583b4b4ff73SXianting Tian 	if (unlikely(vq->vq.num_free < descs_used)) {
5840a8a69ddSRusty Russell 		pr_debug("Can't add buf len %i - avail = %i\n",
585b25bd251SRusty Russell 			 descs_used, vq->vq.num_free);
58644653eaeSRusty Russell 		/* FIXME: for historical reasons, we force a notify here if
58744653eaeSRusty Russell 		 * there are outgoing parts to the buffer.  Presumably the
58844653eaeSRusty Russell 		 * host should service the ring ASAP. */
58913816c76SRusty Russell 		if (out_sgs)
590426e3e0aSRusty Russell 			vq->notify(&vq->vq);
59158625edfSWei Yongjun 		if (indirect)
59258625edfSWei Yongjun 			kfree(desc);
5930a8a69ddSRusty Russell 		END_USE(vq);
5940a8a69ddSRusty Russell 		return -ENOSPC;
5950a8a69ddSRusty Russell 	}
5960a8a69ddSRusty Russell 
59713816c76SRusty Russell 	for (n = 0; n < out_sgs; n++) {
598eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
5990e27fa6dSXuan Zhuo 			dma_addr_t addr;
6000e27fa6dSXuan Zhuo 
6010e27fa6dSXuan Zhuo 			if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr))
602780bc790SAndy Lutomirski 				goto unmap_release;
603780bc790SAndy Lutomirski 
6040a8a69ddSRusty Russell 			prev = i;
60572b5e895SJason Wang 			/* Note that we trust indirect descriptor
60672b5e895SJason Wang 			 * table since it use stream DMA mapping.
60772b5e895SJason Wang 			 */
608fe4c3862SJason Wang 			i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
60972b5e895SJason Wang 						     VRING_DESC_F_NEXT,
61072b5e895SJason Wang 						     indirect);
6110a8a69ddSRusty Russell 		}
61213816c76SRusty Russell 	}
61313816c76SRusty Russell 	for (; n < (out_sgs + in_sgs); n++) {
614eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
6150e27fa6dSXuan Zhuo 			dma_addr_t addr;
6160e27fa6dSXuan Zhuo 
6170e27fa6dSXuan Zhuo 			if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr))
618780bc790SAndy Lutomirski 				goto unmap_release;
619780bc790SAndy Lutomirski 
6200a8a69ddSRusty Russell 			prev = i;
62172b5e895SJason Wang 			/* Note that we trust indirect descriptor
62272b5e895SJason Wang 			 * table since it use stream DMA mapping.
62372b5e895SJason Wang 			 */
624fe4c3862SJason Wang 			i = virtqueue_add_desc_split(_vq, desc, i, addr,
625fe4c3862SJason Wang 						     sg->length,
626fe4c3862SJason Wang 						     VRING_DESC_F_NEXT |
62772b5e895SJason Wang 						     VRING_DESC_F_WRITE,
62872b5e895SJason Wang 						     indirect);
62913816c76SRusty Russell 		}
6300a8a69ddSRusty Russell 	}
6310a8a69ddSRusty Russell 	/* Last one doesn't continue. */
63200e6f3d9SMichael S. Tsirkin 	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
63372b5e895SJason Wang 	if (!indirect && vq->use_dma_api)
634890d3356SVincent Whitchurch 		vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
63572b5e895SJason Wang 			~VRING_DESC_F_NEXT;
6360a8a69ddSRusty Russell 
637780bc790SAndy Lutomirski 	if (indirect) {
638780bc790SAndy Lutomirski 		/* Now that the indirect table is filled in, map it. */
639780bc790SAndy Lutomirski 		dma_addr_t addr = vring_map_single(
640780bc790SAndy Lutomirski 			vq, desc, total_sg * sizeof(struct vring_desc),
641780bc790SAndy Lutomirski 			DMA_TO_DEVICE);
642780bc790SAndy Lutomirski 		if (vring_mapping_error(vq, addr))
643780bc790SAndy Lutomirski 			goto unmap_release;
644780bc790SAndy Lutomirski 
645fe4c3862SJason Wang 		virtqueue_add_desc_split(_vq, vq->split.vring.desc,
646fe4c3862SJason Wang 					 head, addr,
647fe4c3862SJason Wang 					 total_sg * sizeof(struct vring_desc),
64872b5e895SJason Wang 					 VRING_DESC_F_INDIRECT,
64972b5e895SJason Wang 					 false);
650780bc790SAndy Lutomirski 	}
651780bc790SAndy Lutomirski 
652780bc790SAndy Lutomirski 	/* We're using some buffers from the free list. */
653780bc790SAndy Lutomirski 	vq->vq.num_free -= descs_used;
654780bc790SAndy Lutomirski 
6550a8a69ddSRusty Russell 	/* Update free pointer */
656b25bd251SRusty Russell 	if (indirect)
65772b5e895SJason Wang 		vq->free_head = vq->split.desc_extra[head].next;
658b25bd251SRusty Russell 	else
6590a8a69ddSRusty Russell 		vq->free_head = i;
6600a8a69ddSRusty Russell 
661780bc790SAndy Lutomirski 	/* Store token and indirect buffer state. */
662cbeedb72STiwei Bie 	vq->split.desc_state[head].data = data;
663780bc790SAndy Lutomirski 	if (indirect)
664cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = desc;
66587646a34SJason Wang 	else
666cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = ctx;
6670a8a69ddSRusty Russell 
6680a8a69ddSRusty Russell 	/* Put entry in available array (but don't update avail->idx until they
6693b720b8cSRusty Russell 	 * do sync). */
670e593bf97STiwei Bie 	avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
671e593bf97STiwei Bie 	vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
6720a8a69ddSRusty Russell 
673ee7cd898SRusty Russell 	/* Descriptors and available array need to be set before we expose the
674ee7cd898SRusty Russell 	 * new available array entries. */
675a9a0fef7SRusty Russell 	virtio_wmb(vq->weak_barriers);
676e593bf97STiwei Bie 	vq->split.avail_idx_shadow++;
677e593bf97STiwei Bie 	vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
678e593bf97STiwei Bie 						vq->split.avail_idx_shadow);
679ee7cd898SRusty Russell 	vq->num_added++;
680ee7cd898SRusty Russell 
6815e05bf58STetsuo Handa 	pr_debug("Added buffer head %i to %p\n", head, vq);
6825e05bf58STetsuo Handa 	END_USE(vq);
6835e05bf58STetsuo Handa 
684ee7cd898SRusty Russell 	/* This is very unlikely, but theoretically possible.  Kick
685ee7cd898SRusty Russell 	 * just in case. */
686ee7cd898SRusty Russell 	if (unlikely(vq->num_added == (1 << 16) - 1))
687ee7cd898SRusty Russell 		virtqueue_kick(_vq);
688ee7cd898SRusty Russell 
68998e8c6bcSRusty Russell 	return 0;
690780bc790SAndy Lutomirski 
691780bc790SAndy Lutomirski unmap_release:
692780bc790SAndy Lutomirski 	err_idx = i;
693cf8f1696SMatthias Lange 
694cf8f1696SMatthias Lange 	if (indirect)
695cf8f1696SMatthias Lange 		i = 0;
696cf8f1696SMatthias Lange 	else
697780bc790SAndy Lutomirski 		i = head;
698780bc790SAndy Lutomirski 
699780bc790SAndy Lutomirski 	for (n = 0; n < total_sg; n++) {
700780bc790SAndy Lutomirski 		if (i == err_idx)
701780bc790SAndy Lutomirski 			break;
70272b5e895SJason Wang 		if (indirect) {
70372b5e895SJason Wang 			vring_unmap_one_split_indirect(vq, &desc[i]);
704cf8f1696SMatthias Lange 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
70572b5e895SJason Wang 		} else
70672b5e895SJason Wang 			i = vring_unmap_one_split(vq, i);
707780bc790SAndy Lutomirski 	}
708780bc790SAndy Lutomirski 
709780bc790SAndy Lutomirski 	if (indirect)
710780bc790SAndy Lutomirski 		kfree(desc);
711780bc790SAndy Lutomirski 
7123cc36f6eSMichael S. Tsirkin 	END_USE(vq);
713f7728002SHalil Pasic 	return -ENOMEM;
7140a8a69ddSRusty Russell }
71513816c76SRusty Russell 
716138fd251STiwei Bie static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
7170a8a69ddSRusty Russell {
7180a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
719a5c262c5SMichael S. Tsirkin 	u16 new, old;
72041f0377fSRusty Russell 	bool needs_kick;
72141f0377fSRusty Russell 
7220a8a69ddSRusty Russell 	START_USE(vq);
723a72caae2SJason Wang 	/* We need to expose available array entries before checking avail
724a72caae2SJason Wang 	 * event. */
725a9a0fef7SRusty Russell 	virtio_mb(vq->weak_barriers);
7260a8a69ddSRusty Russell 
727e593bf97STiwei Bie 	old = vq->split.avail_idx_shadow - vq->num_added;
728e593bf97STiwei Bie 	new = vq->split.avail_idx_shadow;
7290a8a69ddSRusty Russell 	vq->num_added = 0;
7300a8a69ddSRusty Russell 
7314d6a105eSTiwei Bie 	LAST_ADD_TIME_CHECK(vq);
7324d6a105eSTiwei Bie 	LAST_ADD_TIME_INVALID(vq);
733e93300b1SRusty Russell 
73441f0377fSRusty Russell 	if (vq->event) {
735e593bf97STiwei Bie 		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
736e593bf97STiwei Bie 					vring_avail_event(&vq->split.vring)),
73741f0377fSRusty Russell 					      new, old);
73841f0377fSRusty Russell 	} else {
739e593bf97STiwei Bie 		needs_kick = !(vq->split.vring.used->flags &
740e593bf97STiwei Bie 					cpu_to_virtio16(_vq->vdev,
741e593bf97STiwei Bie 						VRING_USED_F_NO_NOTIFY));
74241f0377fSRusty Russell 	}
7430a8a69ddSRusty Russell 	END_USE(vq);
74441f0377fSRusty Russell 	return needs_kick;
74541f0377fSRusty Russell }
746138fd251STiwei Bie 
747138fd251STiwei Bie static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
7485a08b04fSMichael S. Tsirkin 			     void **ctx)
7490a8a69ddSRusty Russell {
750780bc790SAndy Lutomirski 	unsigned int i, j;
751c60923cbSGonglei 	__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
7520a8a69ddSRusty Russell 
7530a8a69ddSRusty Russell 	/* Clear data ptr. */
754cbeedb72STiwei Bie 	vq->split.desc_state[head].data = NULL;
7550a8a69ddSRusty Russell 
756780bc790SAndy Lutomirski 	/* Put back on free list: unmap first-level descriptors and find end */
7570a8a69ddSRusty Russell 	i = head;
7589fa29b9dSMark McLoughlin 
759e593bf97STiwei Bie 	while (vq->split.vring.desc[i].flags & nextflag) {
76072b5e895SJason Wang 		vring_unmap_one_split(vq, i);
76172b5e895SJason Wang 		i = vq->split.desc_extra[i].next;
76206ca287dSRusty Russell 		vq->vq.num_free++;
7630a8a69ddSRusty Russell 	}
7640a8a69ddSRusty Russell 
76572b5e895SJason Wang 	vring_unmap_one_split(vq, i);
76672b5e895SJason Wang 	vq->split.desc_extra[i].next = vq->free_head;
7670a8a69ddSRusty Russell 	vq->free_head = head;
768780bc790SAndy Lutomirski 
7690a8a69ddSRusty Russell 	/* Plus final descriptor */
77006ca287dSRusty Russell 	vq->vq.num_free++;
771780bc790SAndy Lutomirski 
7725a08b04fSMichael S. Tsirkin 	if (vq->indirect) {
773cbeedb72STiwei Bie 		struct vring_desc *indir_desc =
774cbeedb72STiwei Bie 				vq->split.desc_state[head].indir_desc;
7755a08b04fSMichael S. Tsirkin 		u32 len;
7765a08b04fSMichael S. Tsirkin 
7775a08b04fSMichael S. Tsirkin 		/* Free the indirect table, if any, now that it's unmapped. */
7785a08b04fSMichael S. Tsirkin 		if (!indir_desc)
7795a08b04fSMichael S. Tsirkin 			return;
7805a08b04fSMichael S. Tsirkin 
78172b5e895SJason Wang 		len = vq->split.desc_extra[head].len;
782780bc790SAndy Lutomirski 
78372b5e895SJason Wang 		BUG_ON(!(vq->split.desc_extra[head].flags &
78472b5e895SJason Wang 				VRING_DESC_F_INDIRECT));
785780bc790SAndy Lutomirski 		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
786780bc790SAndy Lutomirski 
787610c708bSXuan Zhuo 		if (vq->use_dma_api) {
788780bc790SAndy Lutomirski 			for (j = 0; j < len / sizeof(struct vring_desc); j++)
78972b5e895SJason Wang 				vring_unmap_one_split_indirect(vq, &indir_desc[j]);
790610c708bSXuan Zhuo 		}
791780bc790SAndy Lutomirski 
7925a08b04fSMichael S. Tsirkin 		kfree(indir_desc);
793cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = NULL;
7945a08b04fSMichael S. Tsirkin 	} else if (ctx) {
795cbeedb72STiwei Bie 		*ctx = vq->split.desc_state[head].indir_desc;
796780bc790SAndy Lutomirski 	}
7970a8a69ddSRusty Russell }
7980a8a69ddSRusty Russell 
7991adbd6b2SFeng Liu static bool more_used_split(const struct vring_virtqueue *vq)
8000a8a69ddSRusty Russell {
801e593bf97STiwei Bie 	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
802e593bf97STiwei Bie 			vq->split.vring.used->idx);
8030a8a69ddSRusty Russell }
8040a8a69ddSRusty Russell 
805138fd251STiwei Bie static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
806138fd251STiwei Bie 					 unsigned int *len,
8075a08b04fSMichael S. Tsirkin 					 void **ctx)
8080a8a69ddSRusty Russell {
8090a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
8100a8a69ddSRusty Russell 	void *ret;
8110a8a69ddSRusty Russell 	unsigned int i;
8123b720b8cSRusty Russell 	u16 last_used;
8130a8a69ddSRusty Russell 
8140a8a69ddSRusty Russell 	START_USE(vq);
8150a8a69ddSRusty Russell 
8165ef82752SRusty Russell 	if (unlikely(vq->broken)) {
8175ef82752SRusty Russell 		END_USE(vq);
8185ef82752SRusty Russell 		return NULL;
8195ef82752SRusty Russell 	}
8205ef82752SRusty Russell 
821138fd251STiwei Bie 	if (!more_used_split(vq)) {
8220a8a69ddSRusty Russell 		pr_debug("No more buffers in queue\n");
8230a8a69ddSRusty Russell 		END_USE(vq);
8240a8a69ddSRusty Russell 		return NULL;
8250a8a69ddSRusty Russell 	}
8260a8a69ddSRusty Russell 
8272d61ba95SMichael S. Tsirkin 	/* Only get used array entries after they have been exposed by host. */
828a9a0fef7SRusty Russell 	virtio_rmb(vq->weak_barriers);
8292d61ba95SMichael S. Tsirkin 
830e593bf97STiwei Bie 	last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
831e593bf97STiwei Bie 	i = virtio32_to_cpu(_vq->vdev,
832e593bf97STiwei Bie 			vq->split.vring.used->ring[last_used].id);
833e593bf97STiwei Bie 	*len = virtio32_to_cpu(_vq->vdev,
834e593bf97STiwei Bie 			vq->split.vring.used->ring[last_used].len);
8350a8a69ddSRusty Russell 
836e593bf97STiwei Bie 	if (unlikely(i >= vq->split.vring.num)) {
8370a8a69ddSRusty Russell 		BAD_RING(vq, "id %u out of range\n", i);
8380a8a69ddSRusty Russell 		return NULL;
8390a8a69ddSRusty Russell 	}
840cbeedb72STiwei Bie 	if (unlikely(!vq->split.desc_state[i].data)) {
8410a8a69ddSRusty Russell 		BAD_RING(vq, "id %u is not a head!\n", i);
8420a8a69ddSRusty Russell 		return NULL;
8430a8a69ddSRusty Russell 	}
8440a8a69ddSRusty Russell 
845138fd251STiwei Bie 	/* detach_buf_split clears data, so grab it now. */
846cbeedb72STiwei Bie 	ret = vq->split.desc_state[i].data;
847138fd251STiwei Bie 	detach_buf_split(vq, i, ctx);
8480a8a69ddSRusty Russell 	vq->last_used_idx++;
849a5c262c5SMichael S. Tsirkin 	/* If we expect an interrupt for the next entry, tell host
850a5c262c5SMichael S. Tsirkin 	 * by writing event index and flush out the write before
851a5c262c5SMichael S. Tsirkin 	 * the read in the next get_buf call. */
852e593bf97STiwei Bie 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
853788e5b3aSMichael S. Tsirkin 		virtio_store_mb(vq->weak_barriers,
854e593bf97STiwei Bie 				&vring_used_event(&vq->split.vring),
855788e5b3aSMichael S. Tsirkin 				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
856a5c262c5SMichael S. Tsirkin 
8574d6a105eSTiwei Bie 	LAST_ADD_TIME_INVALID(vq);
858e93300b1SRusty Russell 
8590a8a69ddSRusty Russell 	END_USE(vq);
8600a8a69ddSRusty Russell 	return ret;
8610a8a69ddSRusty Russell }
862138fd251STiwei Bie 
863138fd251STiwei Bie static void virtqueue_disable_cb_split(struct virtqueue *_vq)
864138fd251STiwei Bie {
865138fd251STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
866138fd251STiwei Bie 
867e593bf97STiwei Bie 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
868e593bf97STiwei Bie 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
8696c0b057cSAlbert Huang 
8706c0b057cSAlbert Huang 		/*
8716c0b057cSAlbert Huang 		 * If device triggered an event already it won't trigger one again:
8726c0b057cSAlbert Huang 		 * no need to disable.
8736c0b057cSAlbert Huang 		 */
8746c0b057cSAlbert Huang 		if (vq->event_triggered)
8756c0b057cSAlbert Huang 			return;
8766c0b057cSAlbert Huang 
8778d622d21SMichael S. Tsirkin 		if (vq->event)
8788d622d21SMichael S. Tsirkin 			/* TODO: this is a hack. Figure out a cleaner value to write. */
8798d622d21SMichael S. Tsirkin 			vring_used_event(&vq->split.vring) = 0x0;
8808d622d21SMichael S. Tsirkin 		else
881e593bf97STiwei Bie 			vq->split.vring.avail->flags =
882e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
883e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
884138fd251STiwei Bie 	}
885138fd251STiwei Bie }
886138fd251STiwei Bie 
88731532340SSolomon Tan static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
888cc229884SMichael S. Tsirkin {
889cc229884SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
890cc229884SMichael S. Tsirkin 	u16 last_used_idx;
891cc229884SMichael S. Tsirkin 
892cc229884SMichael S. Tsirkin 	START_USE(vq);
893cc229884SMichael S. Tsirkin 
894cc229884SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
895cc229884SMichael S. Tsirkin 	 * more to do. */
896cc229884SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
897cc229884SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
898cc229884SMichael S. Tsirkin 	 * entry. Always do both to keep code simple. */
899e593bf97STiwei Bie 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
900e593bf97STiwei Bie 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
9010ea1e4a6SLadi Prosek 		if (!vq->event)
902e593bf97STiwei Bie 			vq->split.vring.avail->flags =
903e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
904e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
905f277ec42SVenkatesh Srinivas 	}
906e593bf97STiwei Bie 	vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
907e593bf97STiwei Bie 			last_used_idx = vq->last_used_idx);
908cc229884SMichael S. Tsirkin 	END_USE(vq);
909cc229884SMichael S. Tsirkin 	return last_used_idx;
910cc229884SMichael S. Tsirkin }
911138fd251STiwei Bie 
91231532340SSolomon Tan static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx)
913138fd251STiwei Bie {
914138fd251STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
915138fd251STiwei Bie 
916138fd251STiwei Bie 	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
917e593bf97STiwei Bie 			vq->split.vring.used->idx);
918138fd251STiwei Bie }
919138fd251STiwei Bie 
920138fd251STiwei Bie static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
9217ab358c2SMichael S. Tsirkin {
9227ab358c2SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
9237ab358c2SMichael S. Tsirkin 	u16 bufs;
9247ab358c2SMichael S. Tsirkin 
9257ab358c2SMichael S. Tsirkin 	START_USE(vq);
9267ab358c2SMichael S. Tsirkin 
9277ab358c2SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
9287ab358c2SMichael S. Tsirkin 	 * more to do. */
9297ab358c2SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
9307ab358c2SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
9310ea1e4a6SLadi Prosek 	 * entry. Always update the event index to keep code simple. */
932e593bf97STiwei Bie 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
933e593bf97STiwei Bie 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
9340ea1e4a6SLadi Prosek 		if (!vq->event)
935e593bf97STiwei Bie 			vq->split.vring.avail->flags =
936e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
937e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
938f277ec42SVenkatesh Srinivas 	}
9397ab358c2SMichael S. Tsirkin 	/* TODO: tune this threshold */
940e593bf97STiwei Bie 	bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
941788e5b3aSMichael S. Tsirkin 
942788e5b3aSMichael S. Tsirkin 	virtio_store_mb(vq->weak_barriers,
943e593bf97STiwei Bie 			&vring_used_event(&vq->split.vring),
944788e5b3aSMichael S. Tsirkin 			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
945788e5b3aSMichael S. Tsirkin 
946e593bf97STiwei Bie 	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
947e593bf97STiwei Bie 					- vq->last_used_idx) > bufs)) {
9487ab358c2SMichael S. Tsirkin 		END_USE(vq);
9497ab358c2SMichael S. Tsirkin 		return false;
9507ab358c2SMichael S. Tsirkin 	}
9517ab358c2SMichael S. Tsirkin 
9527ab358c2SMichael S. Tsirkin 	END_USE(vq);
9537ab358c2SMichael S. Tsirkin 	return true;
9547ab358c2SMichael S. Tsirkin }
9557ab358c2SMichael S. Tsirkin 
956138fd251STiwei Bie static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
957c021eac4SShirley Ma {
958c021eac4SShirley Ma 	struct vring_virtqueue *vq = to_vvq(_vq);
959c021eac4SShirley Ma 	unsigned int i;
960c021eac4SShirley Ma 	void *buf;
961c021eac4SShirley Ma 
962c021eac4SShirley Ma 	START_USE(vq);
963c021eac4SShirley Ma 
964e593bf97STiwei Bie 	for (i = 0; i < vq->split.vring.num; i++) {
965cbeedb72STiwei Bie 		if (!vq->split.desc_state[i].data)
966c021eac4SShirley Ma 			continue;
967138fd251STiwei Bie 		/* detach_buf_split clears data, so grab it now. */
968cbeedb72STiwei Bie 		buf = vq->split.desc_state[i].data;
969138fd251STiwei Bie 		detach_buf_split(vq, i, NULL);
970e593bf97STiwei Bie 		vq->split.avail_idx_shadow--;
971e593bf97STiwei Bie 		vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
972e593bf97STiwei Bie 				vq->split.avail_idx_shadow);
973c021eac4SShirley Ma 		END_USE(vq);
974c021eac4SShirley Ma 		return buf;
975c021eac4SShirley Ma 	}
976c021eac4SShirley Ma 	/* That should have freed everything. */
977e593bf97STiwei Bie 	BUG_ON(vq->vq.num_free != vq->split.vring.num);
978c021eac4SShirley Ma 
979c021eac4SShirley Ma 	END_USE(vq);
980c021eac4SShirley Ma 	return NULL;
981c021eac4SShirley Ma }
982138fd251STiwei Bie 
983198fa7beSXuan Zhuo static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split,
984198fa7beSXuan Zhuo 				       struct vring_virtqueue *vq)
985198fa7beSXuan Zhuo {
986198fa7beSXuan Zhuo 	struct virtio_device *vdev;
987198fa7beSXuan Zhuo 
988198fa7beSXuan Zhuo 	vdev = vq->vq.vdev;
989198fa7beSXuan Zhuo 
990198fa7beSXuan Zhuo 	vring_split->avail_flags_shadow = 0;
991198fa7beSXuan Zhuo 	vring_split->avail_idx_shadow = 0;
992198fa7beSXuan Zhuo 
993198fa7beSXuan Zhuo 	/* No callback?  Tell other side not to bother us. */
994198fa7beSXuan Zhuo 	if (!vq->vq.callback) {
995198fa7beSXuan Zhuo 		vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
996198fa7beSXuan Zhuo 		if (!vq->event)
997198fa7beSXuan Zhuo 			vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
998198fa7beSXuan Zhuo 					vring_split->avail_flags_shadow);
999198fa7beSXuan Zhuo 	}
1000198fa7beSXuan Zhuo }
1001198fa7beSXuan Zhuo 
1002e5175b41SXuan Zhuo static void virtqueue_reinit_split(struct vring_virtqueue *vq)
1003e5175b41SXuan Zhuo {
1004e5175b41SXuan Zhuo 	int num;
1005e5175b41SXuan Zhuo 
1006e5175b41SXuan Zhuo 	num = vq->split.vring.num;
1007e5175b41SXuan Zhuo 
1008e5175b41SXuan Zhuo 	vq->split.vring.avail->flags = 0;
1009e5175b41SXuan Zhuo 	vq->split.vring.avail->idx = 0;
1010e5175b41SXuan Zhuo 
1011e5175b41SXuan Zhuo 	/* reset avail event */
1012e5175b41SXuan Zhuo 	vq->split.vring.avail->ring[num] = 0;
1013e5175b41SXuan Zhuo 
1014e5175b41SXuan Zhuo 	vq->split.vring.used->flags = 0;
1015e5175b41SXuan Zhuo 	vq->split.vring.used->idx = 0;
1016e5175b41SXuan Zhuo 
1017e5175b41SXuan Zhuo 	/* reset used event */
1018e5175b41SXuan Zhuo 	*(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0;
1019e5175b41SXuan Zhuo 
1020e5175b41SXuan Zhuo 	virtqueue_init(vq, num);
1021e5175b41SXuan Zhuo 
1022e5175b41SXuan Zhuo 	virtqueue_vring_init_split(&vq->split, vq);
1023e5175b41SXuan Zhuo }
1024e5175b41SXuan Zhuo 
1025e1d6a423SXuan Zhuo static void virtqueue_vring_attach_split(struct vring_virtqueue *vq,
1026e1d6a423SXuan Zhuo 					 struct vring_virtqueue_split *vring_split)
1027e1d6a423SXuan Zhuo {
1028e1d6a423SXuan Zhuo 	vq->split = *vring_split;
1029e1d6a423SXuan Zhuo 
1030e1d6a423SXuan Zhuo 	/* Put everything in free lists. */
1031e1d6a423SXuan Zhuo 	vq->free_head = 0;
1032e1d6a423SXuan Zhuo }
1033e1d6a423SXuan Zhuo 
1034a2b36c8dSXuan Zhuo static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split)
1035a2b36c8dSXuan Zhuo {
1036a2b36c8dSXuan Zhuo 	struct vring_desc_state_split *state;
1037a2b36c8dSXuan Zhuo 	struct vring_desc_extra *extra;
1038a2b36c8dSXuan Zhuo 	u32 num = vring_split->vring.num;
1039a2b36c8dSXuan Zhuo 
1040a2b36c8dSXuan Zhuo 	state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL);
1041a2b36c8dSXuan Zhuo 	if (!state)
1042a2b36c8dSXuan Zhuo 		goto err_state;
1043a2b36c8dSXuan Zhuo 
1044a2b36c8dSXuan Zhuo 	extra = vring_alloc_desc_extra(num);
1045a2b36c8dSXuan Zhuo 	if (!extra)
1046a2b36c8dSXuan Zhuo 		goto err_extra;
1047a2b36c8dSXuan Zhuo 
1048a2b36c8dSXuan Zhuo 	memset(state, 0, num * sizeof(struct vring_desc_state_split));
1049a2b36c8dSXuan Zhuo 
1050a2b36c8dSXuan Zhuo 	vring_split->desc_state = state;
1051a2b36c8dSXuan Zhuo 	vring_split->desc_extra = extra;
1052a2b36c8dSXuan Zhuo 	return 0;
1053a2b36c8dSXuan Zhuo 
1054a2b36c8dSXuan Zhuo err_extra:
1055a2b36c8dSXuan Zhuo 	kfree(state);
1056a2b36c8dSXuan Zhuo err_state:
1057a2b36c8dSXuan Zhuo 	return -ENOMEM;
1058a2b36c8dSXuan Zhuo }
1059a2b36c8dSXuan Zhuo 
106089f05d94SXuan Zhuo static void vring_free_split(struct vring_virtqueue_split *vring_split,
10612713ea3cSJason Wang 			     struct virtio_device *vdev, struct device *dma_dev)
106289f05d94SXuan Zhuo {
106389f05d94SXuan Zhuo 	vring_free_queue(vdev, vring_split->queue_size_in_bytes,
106489f05d94SXuan Zhuo 			 vring_split->vring.desc,
10652713ea3cSJason Wang 			 vring_split->queue_dma_addr,
10662713ea3cSJason Wang 			 dma_dev);
106789f05d94SXuan Zhuo 
106889f05d94SXuan Zhuo 	kfree(vring_split->desc_state);
106989f05d94SXuan Zhuo 	kfree(vring_split->desc_extra);
107089f05d94SXuan Zhuo }
107189f05d94SXuan Zhuo 
1072c2d87fe6SXuan Zhuo static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
1073c2d87fe6SXuan Zhuo 				   struct virtio_device *vdev,
1074c2d87fe6SXuan Zhuo 				   u32 num,
1075c2d87fe6SXuan Zhuo 				   unsigned int vring_align,
10762713ea3cSJason Wang 				   bool may_reduce_num,
10772713ea3cSJason Wang 				   struct device *dma_dev)
1078c2d87fe6SXuan Zhuo {
1079c2d87fe6SXuan Zhuo 	void *queue = NULL;
1080c2d87fe6SXuan Zhuo 	dma_addr_t dma_addr;
1081c2d87fe6SXuan Zhuo 
1082c2d87fe6SXuan Zhuo 	/* We assume num is a power of 2. */
1083b9d978a8SShaoqin Huang 	if (!is_power_of_2(num)) {
1084c2d87fe6SXuan Zhuo 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1085c2d87fe6SXuan Zhuo 		return -EINVAL;
1086c2d87fe6SXuan Zhuo 	}
1087c2d87fe6SXuan Zhuo 
1088c2d87fe6SXuan Zhuo 	/* TODO: allocate each queue chunk individually */
1089c2d87fe6SXuan Zhuo 	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1090c2d87fe6SXuan Zhuo 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1091c2d87fe6SXuan Zhuo 					  &dma_addr,
10922713ea3cSJason Wang 					  GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
10932713ea3cSJason Wang 					  dma_dev);
1094c2d87fe6SXuan Zhuo 		if (queue)
1095c2d87fe6SXuan Zhuo 			break;
1096c2d87fe6SXuan Zhuo 		if (!may_reduce_num)
1097c2d87fe6SXuan Zhuo 			return -ENOMEM;
1098c2d87fe6SXuan Zhuo 	}
1099c2d87fe6SXuan Zhuo 
1100c2d87fe6SXuan Zhuo 	if (!num)
1101c2d87fe6SXuan Zhuo 		return -ENOMEM;
1102c2d87fe6SXuan Zhuo 
1103c2d87fe6SXuan Zhuo 	if (!queue) {
1104c2d87fe6SXuan Zhuo 		/* Try to get a single page. You are my only hope! */
1105c2d87fe6SXuan Zhuo 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
11062713ea3cSJason Wang 					  &dma_addr, GFP_KERNEL | __GFP_ZERO,
11072713ea3cSJason Wang 					  dma_dev);
1108c2d87fe6SXuan Zhuo 	}
1109c2d87fe6SXuan Zhuo 	if (!queue)
1110c2d87fe6SXuan Zhuo 		return -ENOMEM;
1111c2d87fe6SXuan Zhuo 
1112c2d87fe6SXuan Zhuo 	vring_init(&vring_split->vring, num, queue, vring_align);
1113c2d87fe6SXuan Zhuo 
1114c2d87fe6SXuan Zhuo 	vring_split->queue_dma_addr = dma_addr;
1115c2d87fe6SXuan Zhuo 	vring_split->queue_size_in_bytes = vring_size(num, vring_align);
1116c2d87fe6SXuan Zhuo 
1117af36b16fSXuan Zhuo 	vring_split->vring_align = vring_align;
1118af36b16fSXuan Zhuo 	vring_split->may_reduce_num = may_reduce_num;
1119af36b16fSXuan Zhuo 
1120c2d87fe6SXuan Zhuo 	return 0;
1121c2d87fe6SXuan Zhuo }
1122c2d87fe6SXuan Zhuo 
1123d79dca75STiwei Bie static struct virtqueue *vring_create_virtqueue_split(
1124d79dca75STiwei Bie 	unsigned int index,
1125d79dca75STiwei Bie 	unsigned int num,
1126d79dca75STiwei Bie 	unsigned int vring_align,
1127d79dca75STiwei Bie 	struct virtio_device *vdev,
1128d79dca75STiwei Bie 	bool weak_barriers,
1129d79dca75STiwei Bie 	bool may_reduce_num,
1130d79dca75STiwei Bie 	bool context,
1131d79dca75STiwei Bie 	bool (*notify)(struct virtqueue *),
1132d79dca75STiwei Bie 	void (*callback)(struct virtqueue *),
11332713ea3cSJason Wang 	const char *name,
11342713ea3cSJason Wang 	struct device *dma_dev)
1135d79dca75STiwei Bie {
1136cd4c812aSXuan Zhuo 	struct vring_virtqueue_split vring_split = {};
1137d79dca75STiwei Bie 	struct virtqueue *vq;
1138c2d87fe6SXuan Zhuo 	int err;
1139d79dca75STiwei Bie 
1140c2d87fe6SXuan Zhuo 	err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
11412713ea3cSJason Wang 				      may_reduce_num, dma_dev);
1142c2d87fe6SXuan Zhuo 	if (err)
1143d79dca75STiwei Bie 		return NULL;
1144d79dca75STiwei Bie 
1145cd4c812aSXuan Zhuo 	vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
11462713ea3cSJason Wang 				   context, notify, callback, name, dma_dev);
1147d79dca75STiwei Bie 	if (!vq) {
11482713ea3cSJason Wang 		vring_free_split(&vring_split, vdev, dma_dev);
1149d79dca75STiwei Bie 		return NULL;
1150d79dca75STiwei Bie 	}
1151d79dca75STiwei Bie 
1152d79dca75STiwei Bie 	to_vvq(vq)->we_own_ring = true;
1153d79dca75STiwei Bie 
1154d79dca75STiwei Bie 	return vq;
1155d79dca75STiwei Bie }
1156d79dca75STiwei Bie 
11576fea20e5SXuan Zhuo static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
11586fea20e5SXuan Zhuo {
11596fea20e5SXuan Zhuo 	struct vring_virtqueue_split vring_split = {};
11606fea20e5SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
11616fea20e5SXuan Zhuo 	struct virtio_device *vdev = _vq->vdev;
11626fea20e5SXuan Zhuo 	int err;
11636fea20e5SXuan Zhuo 
11646fea20e5SXuan Zhuo 	err = vring_alloc_queue_split(&vring_split, vdev, num,
11656fea20e5SXuan Zhuo 				      vq->split.vring_align,
11662713ea3cSJason Wang 				      vq->split.may_reduce_num,
11672713ea3cSJason Wang 				      vring_dma_dev(vq));
11686fea20e5SXuan Zhuo 	if (err)
11696fea20e5SXuan Zhuo 		goto err;
11706fea20e5SXuan Zhuo 
11716fea20e5SXuan Zhuo 	err = vring_alloc_state_extra_split(&vring_split);
11726fea20e5SXuan Zhuo 	if (err)
11736fea20e5SXuan Zhuo 		goto err_state_extra;
11746fea20e5SXuan Zhuo 
11756fea20e5SXuan Zhuo 	vring_free(&vq->vq);
11766fea20e5SXuan Zhuo 
11776fea20e5SXuan Zhuo 	virtqueue_vring_init_split(&vring_split, vq);
11786fea20e5SXuan Zhuo 
11796fea20e5SXuan Zhuo 	virtqueue_init(vq, vring_split.vring.num);
11806fea20e5SXuan Zhuo 	virtqueue_vring_attach_split(vq, &vring_split);
11816fea20e5SXuan Zhuo 
11826fea20e5SXuan Zhuo 	return 0;
11836fea20e5SXuan Zhuo 
11846fea20e5SXuan Zhuo err_state_extra:
11852713ea3cSJason Wang 	vring_free_split(&vring_split, vdev, vring_dma_dev(vq));
11866fea20e5SXuan Zhuo err:
11876fea20e5SXuan Zhuo 	virtqueue_reinit_split(vq);
11886fea20e5SXuan Zhuo 	return -ENOMEM;
11896fea20e5SXuan Zhuo }
11906fea20e5SXuan Zhuo 
1191e6f633e5STiwei Bie 
1192e6f633e5STiwei Bie /*
11931ce9e605STiwei Bie  * Packed ring specific functions - *_packed().
11941ce9e605STiwei Bie  */
11951adbd6b2SFeng Liu static bool packed_used_wrap_counter(u16 last_used_idx)
1196a7722890Shuangjie.albert {
1197a7722890Shuangjie.albert 	return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
1198a7722890Shuangjie.albert }
1199a7722890Shuangjie.albert 
12001adbd6b2SFeng Liu static u16 packed_last_used(u16 last_used_idx)
1201a7722890Shuangjie.albert {
1202a7722890Shuangjie.albert 	return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
1203a7722890Shuangjie.albert }
12041ce9e605STiwei Bie 
1205d80dc15bSXuan Zhuo static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
12064b6ec919SFeng Liu 				     const struct vring_desc_extra *extra)
12071ce9e605STiwei Bie {
12081ce9e605STiwei Bie 	u16 flags;
12091ce9e605STiwei Bie 
12101ce9e605STiwei Bie 	if (!vq->use_dma_api)
12111ce9e605STiwei Bie 		return;
12121ce9e605STiwei Bie 
1213d80dc15bSXuan Zhuo 	flags = extra->flags;
12141ce9e605STiwei Bie 
12151ce9e605STiwei Bie 	if (flags & VRING_DESC_F_INDIRECT) {
12161ce9e605STiwei Bie 		dma_unmap_single(vring_dma_dev(vq),
1217d80dc15bSXuan Zhuo 				 extra->addr, extra->len,
12181ce9e605STiwei Bie 				 (flags & VRING_DESC_F_WRITE) ?
12191ce9e605STiwei Bie 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
12201ce9e605STiwei Bie 	} else {
12211ce9e605STiwei Bie 		dma_unmap_page(vring_dma_dev(vq),
1222d80dc15bSXuan Zhuo 			       extra->addr, extra->len,
12231ce9e605STiwei Bie 			       (flags & VRING_DESC_F_WRITE) ?
12241ce9e605STiwei Bie 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
12251ce9e605STiwei Bie 	}
12261ce9e605STiwei Bie }
12271ce9e605STiwei Bie 
12281ce9e605STiwei Bie static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
12294b6ec919SFeng Liu 				    const struct vring_packed_desc *desc)
12301ce9e605STiwei Bie {
12311ce9e605STiwei Bie 	u16 flags;
12321ce9e605STiwei Bie 
12331ce9e605STiwei Bie 	if (!vq->use_dma_api)
12341ce9e605STiwei Bie 		return;
12351ce9e605STiwei Bie 
12361ce9e605STiwei Bie 	flags = le16_to_cpu(desc->flags);
12371ce9e605STiwei Bie 
12381ce9e605STiwei Bie 	dma_unmap_page(vring_dma_dev(vq),
12391ce9e605STiwei Bie 		       le64_to_cpu(desc->addr),
12401ce9e605STiwei Bie 		       le32_to_cpu(desc->len),
12411ce9e605STiwei Bie 		       (flags & VRING_DESC_F_WRITE) ?
12421ce9e605STiwei Bie 		       DMA_FROM_DEVICE : DMA_TO_DEVICE);
12431ce9e605STiwei Bie }
12441ce9e605STiwei Bie 
12451ce9e605STiwei Bie static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
12461ce9e605STiwei Bie 						       gfp_t gfp)
12471ce9e605STiwei Bie {
12481ce9e605STiwei Bie 	struct vring_packed_desc *desc;
12491ce9e605STiwei Bie 
12501ce9e605STiwei Bie 	/*
12511ce9e605STiwei Bie 	 * We require lowmem mappings for the descriptors because
12521ce9e605STiwei Bie 	 * otherwise virt_to_phys will give us bogus addresses in the
12531ce9e605STiwei Bie 	 * virtqueue.
12541ce9e605STiwei Bie 	 */
12551ce9e605STiwei Bie 	gfp &= ~__GFP_HIGHMEM;
12561ce9e605STiwei Bie 
12571ce9e605STiwei Bie 	desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
12581ce9e605STiwei Bie 
12591ce9e605STiwei Bie 	return desc;
12601ce9e605STiwei Bie }
12611ce9e605STiwei Bie 
12621ce9e605STiwei Bie static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
12631ce9e605STiwei Bie 					 struct scatterlist *sgs[],
12641ce9e605STiwei Bie 					 unsigned int total_sg,
12651ce9e605STiwei Bie 					 unsigned int out_sgs,
12661ce9e605STiwei Bie 					 unsigned int in_sgs,
12671ce9e605STiwei Bie 					 void *data,
12681ce9e605STiwei Bie 					 gfp_t gfp)
12691ce9e605STiwei Bie {
12701ce9e605STiwei Bie 	struct vring_packed_desc *desc;
12711ce9e605STiwei Bie 	struct scatterlist *sg;
12721ce9e605STiwei Bie 	unsigned int i, n, err_idx;
12731ce9e605STiwei Bie 	u16 head, id;
12741ce9e605STiwei Bie 	dma_addr_t addr;
12751ce9e605STiwei Bie 
12761ce9e605STiwei Bie 	head = vq->packed.next_avail_idx;
12771ce9e605STiwei Bie 	desc = alloc_indirect_packed(total_sg, gfp);
1278fc6d70f4SXuan Zhuo 	if (!desc)
1279fc6d70f4SXuan Zhuo 		return -ENOMEM;
12801ce9e605STiwei Bie 
12811ce9e605STiwei Bie 	if (unlikely(vq->vq.num_free < 1)) {
12821ce9e605STiwei Bie 		pr_debug("Can't add buf len 1 - avail = 0\n");
1283df0bfe75SYueHaibing 		kfree(desc);
12841ce9e605STiwei Bie 		END_USE(vq);
12851ce9e605STiwei Bie 		return -ENOSPC;
12861ce9e605STiwei Bie 	}
12871ce9e605STiwei Bie 
12881ce9e605STiwei Bie 	i = 0;
12891ce9e605STiwei Bie 	id = vq->free_head;
12901ce9e605STiwei Bie 	BUG_ON(id == vq->packed.vring.num);
12911ce9e605STiwei Bie 
12921ce9e605STiwei Bie 	for (n = 0; n < out_sgs + in_sgs; n++) {
12931ce9e605STiwei Bie 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
12940e27fa6dSXuan Zhuo 			if (vring_map_one_sg(vq, sg, n < out_sgs ?
12950e27fa6dSXuan Zhuo 					     DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
12961ce9e605STiwei Bie 				goto unmap_release;
12971ce9e605STiwei Bie 
12981ce9e605STiwei Bie 			desc[i].flags = cpu_to_le16(n < out_sgs ?
12991ce9e605STiwei Bie 						0 : VRING_DESC_F_WRITE);
13001ce9e605STiwei Bie 			desc[i].addr = cpu_to_le64(addr);
13011ce9e605STiwei Bie 			desc[i].len = cpu_to_le32(sg->length);
13021ce9e605STiwei Bie 			i++;
13031ce9e605STiwei Bie 		}
13041ce9e605STiwei Bie 	}
13051ce9e605STiwei Bie 
13061ce9e605STiwei Bie 	/* Now that the indirect table is filled in, map it. */
13071ce9e605STiwei Bie 	addr = vring_map_single(vq, desc,
13081ce9e605STiwei Bie 			total_sg * sizeof(struct vring_packed_desc),
13091ce9e605STiwei Bie 			DMA_TO_DEVICE);
13101ce9e605STiwei Bie 	if (vring_mapping_error(vq, addr))
13111ce9e605STiwei Bie 		goto unmap_release;
13121ce9e605STiwei Bie 
13131ce9e605STiwei Bie 	vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
13141ce9e605STiwei Bie 	vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
13151ce9e605STiwei Bie 				sizeof(struct vring_packed_desc));
13161ce9e605STiwei Bie 	vq->packed.vring.desc[head].id = cpu_to_le16(id);
13171ce9e605STiwei Bie 
13181ce9e605STiwei Bie 	if (vq->use_dma_api) {
13191ce9e605STiwei Bie 		vq->packed.desc_extra[id].addr = addr;
13201ce9e605STiwei Bie 		vq->packed.desc_extra[id].len = total_sg *
13211ce9e605STiwei Bie 				sizeof(struct vring_packed_desc);
13221ce9e605STiwei Bie 		vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
13231ce9e605STiwei Bie 						  vq->packed.avail_used_flags;
13241ce9e605STiwei Bie 	}
13251ce9e605STiwei Bie 
13261ce9e605STiwei Bie 	/*
13271ce9e605STiwei Bie 	 * A driver MUST NOT make the first descriptor in the list
13281ce9e605STiwei Bie 	 * available before all subsequent descriptors comprising
13291ce9e605STiwei Bie 	 * the list are made available.
13301ce9e605STiwei Bie 	 */
13311ce9e605STiwei Bie 	virtio_wmb(vq->weak_barriers);
13321ce9e605STiwei Bie 	vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
13331ce9e605STiwei Bie 						vq->packed.avail_used_flags);
13341ce9e605STiwei Bie 
13351ce9e605STiwei Bie 	/* We're using some buffers from the free list. */
13361ce9e605STiwei Bie 	vq->vq.num_free -= 1;
13371ce9e605STiwei Bie 
13381ce9e605STiwei Bie 	/* Update free pointer */
13391ce9e605STiwei Bie 	n = head + 1;
13401ce9e605STiwei Bie 	if (n >= vq->packed.vring.num) {
13411ce9e605STiwei Bie 		n = 0;
13421ce9e605STiwei Bie 		vq->packed.avail_wrap_counter ^= 1;
13431ce9e605STiwei Bie 		vq->packed.avail_used_flags ^=
13441ce9e605STiwei Bie 				1 << VRING_PACKED_DESC_F_AVAIL |
13451ce9e605STiwei Bie 				1 << VRING_PACKED_DESC_F_USED;
13461ce9e605STiwei Bie 	}
13471ce9e605STiwei Bie 	vq->packed.next_avail_idx = n;
1348aeef9b47SJason Wang 	vq->free_head = vq->packed.desc_extra[id].next;
13491ce9e605STiwei Bie 
13501ce9e605STiwei Bie 	/* Store token and indirect buffer state. */
13511ce9e605STiwei Bie 	vq->packed.desc_state[id].num = 1;
13521ce9e605STiwei Bie 	vq->packed.desc_state[id].data = data;
13531ce9e605STiwei Bie 	vq->packed.desc_state[id].indir_desc = desc;
13541ce9e605STiwei Bie 	vq->packed.desc_state[id].last = id;
13551ce9e605STiwei Bie 
13561ce9e605STiwei Bie 	vq->num_added += 1;
13571ce9e605STiwei Bie 
13581ce9e605STiwei Bie 	pr_debug("Added buffer head %i to %p\n", head, vq);
13591ce9e605STiwei Bie 	END_USE(vq);
13601ce9e605STiwei Bie 
13611ce9e605STiwei Bie 	return 0;
13621ce9e605STiwei Bie 
13631ce9e605STiwei Bie unmap_release:
13641ce9e605STiwei Bie 	err_idx = i;
13651ce9e605STiwei Bie 
13661ce9e605STiwei Bie 	for (i = 0; i < err_idx; i++)
13671ce9e605STiwei Bie 		vring_unmap_desc_packed(vq, &desc[i]);
13681ce9e605STiwei Bie 
13691ce9e605STiwei Bie 	kfree(desc);
13701ce9e605STiwei Bie 
13711ce9e605STiwei Bie 	END_USE(vq);
1372f7728002SHalil Pasic 	return -ENOMEM;
13731ce9e605STiwei Bie }
13741ce9e605STiwei Bie 
13751ce9e605STiwei Bie static inline int virtqueue_add_packed(struct virtqueue *_vq,
13761ce9e605STiwei Bie 				       struct scatterlist *sgs[],
13771ce9e605STiwei Bie 				       unsigned int total_sg,
13781ce9e605STiwei Bie 				       unsigned int out_sgs,
13791ce9e605STiwei Bie 				       unsigned int in_sgs,
13801ce9e605STiwei Bie 				       void *data,
13811ce9e605STiwei Bie 				       void *ctx,
13821ce9e605STiwei Bie 				       gfp_t gfp)
13831ce9e605STiwei Bie {
13841ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
13851ce9e605STiwei Bie 	struct vring_packed_desc *desc;
13861ce9e605STiwei Bie 	struct scatterlist *sg;
13871ce9e605STiwei Bie 	unsigned int i, n, c, descs_used, err_idx;
13883f649ab7SKees Cook 	__le16 head_flags, flags;
13893f649ab7SKees Cook 	u16 head, id, prev, curr, avail_used_flags;
1390fc6d70f4SXuan Zhuo 	int err;
13911ce9e605STiwei Bie 
13921ce9e605STiwei Bie 	START_USE(vq);
13931ce9e605STiwei Bie 
13941ce9e605STiwei Bie 	BUG_ON(data == NULL);
13951ce9e605STiwei Bie 	BUG_ON(ctx && vq->indirect);
13961ce9e605STiwei Bie 
13971ce9e605STiwei Bie 	if (unlikely(vq->broken)) {
13981ce9e605STiwei Bie 		END_USE(vq);
13991ce9e605STiwei Bie 		return -EIO;
14001ce9e605STiwei Bie 	}
14011ce9e605STiwei Bie 
14021ce9e605STiwei Bie 	LAST_ADD_TIME_UPDATE(vq);
14031ce9e605STiwei Bie 
14041ce9e605STiwei Bie 	BUG_ON(total_sg == 0);
14051ce9e605STiwei Bie 
140635c51e09SXianting Tian 	if (virtqueue_use_indirect(vq, total_sg)) {
1407fc6d70f4SXuan Zhuo 		err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1408fc6d70f4SXuan Zhuo 						    in_sgs, data, gfp);
14091861ba62SMichael S. Tsirkin 		if (err != -ENOMEM) {
14101861ba62SMichael S. Tsirkin 			END_USE(vq);
1411fc6d70f4SXuan Zhuo 			return err;
14121861ba62SMichael S. Tsirkin 		}
1413fc6d70f4SXuan Zhuo 
1414fc6d70f4SXuan Zhuo 		/* fall back on direct */
1415fc6d70f4SXuan Zhuo 	}
14161ce9e605STiwei Bie 
14171ce9e605STiwei Bie 	head = vq->packed.next_avail_idx;
14181ce9e605STiwei Bie 	avail_used_flags = vq->packed.avail_used_flags;
14191ce9e605STiwei Bie 
14201ce9e605STiwei Bie 	WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
14211ce9e605STiwei Bie 
14221ce9e605STiwei Bie 	desc = vq->packed.vring.desc;
14231ce9e605STiwei Bie 	i = head;
14241ce9e605STiwei Bie 	descs_used = total_sg;
14251ce9e605STiwei Bie 
14261ce9e605STiwei Bie 	if (unlikely(vq->vq.num_free < descs_used)) {
14271ce9e605STiwei Bie 		pr_debug("Can't add buf len %i - avail = %i\n",
14281ce9e605STiwei Bie 			 descs_used, vq->vq.num_free);
14291ce9e605STiwei Bie 		END_USE(vq);
14301ce9e605STiwei Bie 		return -ENOSPC;
14311ce9e605STiwei Bie 	}
14321ce9e605STiwei Bie 
14331ce9e605STiwei Bie 	id = vq->free_head;
14341ce9e605STiwei Bie 	BUG_ON(id == vq->packed.vring.num);
14351ce9e605STiwei Bie 
14361ce9e605STiwei Bie 	curr = id;
14371ce9e605STiwei Bie 	c = 0;
14381ce9e605STiwei Bie 	for (n = 0; n < out_sgs + in_sgs; n++) {
14391ce9e605STiwei Bie 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
14400e27fa6dSXuan Zhuo 			dma_addr_t addr;
14410e27fa6dSXuan Zhuo 
14420e27fa6dSXuan Zhuo 			if (vring_map_one_sg(vq, sg, n < out_sgs ?
14430e27fa6dSXuan Zhuo 					     DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
14441ce9e605STiwei Bie 				goto unmap_release;
14451ce9e605STiwei Bie 
14461ce9e605STiwei Bie 			flags = cpu_to_le16(vq->packed.avail_used_flags |
14471ce9e605STiwei Bie 				    (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
14481ce9e605STiwei Bie 				    (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
14491ce9e605STiwei Bie 			if (i == head)
14501ce9e605STiwei Bie 				head_flags = flags;
14511ce9e605STiwei Bie 			else
14521ce9e605STiwei Bie 				desc[i].flags = flags;
14531ce9e605STiwei Bie 
14541ce9e605STiwei Bie 			desc[i].addr = cpu_to_le64(addr);
14551ce9e605STiwei Bie 			desc[i].len = cpu_to_le32(sg->length);
14561ce9e605STiwei Bie 			desc[i].id = cpu_to_le16(id);
14571ce9e605STiwei Bie 
14581ce9e605STiwei Bie 			if (unlikely(vq->use_dma_api)) {
14591ce9e605STiwei Bie 				vq->packed.desc_extra[curr].addr = addr;
14601ce9e605STiwei Bie 				vq->packed.desc_extra[curr].len = sg->length;
14611ce9e605STiwei Bie 				vq->packed.desc_extra[curr].flags =
14621ce9e605STiwei Bie 					le16_to_cpu(flags);
14631ce9e605STiwei Bie 			}
14641ce9e605STiwei Bie 			prev = curr;
1465aeef9b47SJason Wang 			curr = vq->packed.desc_extra[curr].next;
14661ce9e605STiwei Bie 
14671ce9e605STiwei Bie 			if ((unlikely(++i >= vq->packed.vring.num))) {
14681ce9e605STiwei Bie 				i = 0;
14691ce9e605STiwei Bie 				vq->packed.avail_used_flags ^=
14701ce9e605STiwei Bie 					1 << VRING_PACKED_DESC_F_AVAIL |
14711ce9e605STiwei Bie 					1 << VRING_PACKED_DESC_F_USED;
14721ce9e605STiwei Bie 			}
14731ce9e605STiwei Bie 		}
14741ce9e605STiwei Bie 	}
14751ce9e605STiwei Bie 
14761ce9e605STiwei Bie 	if (i < head)
14771ce9e605STiwei Bie 		vq->packed.avail_wrap_counter ^= 1;
14781ce9e605STiwei Bie 
14791ce9e605STiwei Bie 	/* We're using some buffers from the free list. */
14801ce9e605STiwei Bie 	vq->vq.num_free -= descs_used;
14811ce9e605STiwei Bie 
14821ce9e605STiwei Bie 	/* Update free pointer */
14831ce9e605STiwei Bie 	vq->packed.next_avail_idx = i;
14841ce9e605STiwei Bie 	vq->free_head = curr;
14851ce9e605STiwei Bie 
14861ce9e605STiwei Bie 	/* Store token. */
14871ce9e605STiwei Bie 	vq->packed.desc_state[id].num = descs_used;
14881ce9e605STiwei Bie 	vq->packed.desc_state[id].data = data;
14891ce9e605STiwei Bie 	vq->packed.desc_state[id].indir_desc = ctx;
14901ce9e605STiwei Bie 	vq->packed.desc_state[id].last = prev;
14911ce9e605STiwei Bie 
14921ce9e605STiwei Bie 	/*
14931ce9e605STiwei Bie 	 * A driver MUST NOT make the first descriptor in the list
14941ce9e605STiwei Bie 	 * available before all subsequent descriptors comprising
14951ce9e605STiwei Bie 	 * the list are made available.
14961ce9e605STiwei Bie 	 */
14971ce9e605STiwei Bie 	virtio_wmb(vq->weak_barriers);
14981ce9e605STiwei Bie 	vq->packed.vring.desc[head].flags = head_flags;
14991ce9e605STiwei Bie 	vq->num_added += descs_used;
15001ce9e605STiwei Bie 
15011ce9e605STiwei Bie 	pr_debug("Added buffer head %i to %p\n", head, vq);
15021ce9e605STiwei Bie 	END_USE(vq);
15031ce9e605STiwei Bie 
15041ce9e605STiwei Bie 	return 0;
15051ce9e605STiwei Bie 
15061ce9e605STiwei Bie unmap_release:
15071ce9e605STiwei Bie 	err_idx = i;
15081ce9e605STiwei Bie 	i = head;
150944593865SJason Wang 	curr = vq->free_head;
15101ce9e605STiwei Bie 
15111ce9e605STiwei Bie 	vq->packed.avail_used_flags = avail_used_flags;
15121ce9e605STiwei Bie 
15131ce9e605STiwei Bie 	for (n = 0; n < total_sg; n++) {
15141ce9e605STiwei Bie 		if (i == err_idx)
15151ce9e605STiwei Bie 			break;
1516d80dc15bSXuan Zhuo 		vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
151744593865SJason Wang 		curr = vq->packed.desc_extra[curr].next;
15181ce9e605STiwei Bie 		i++;
15191ce9e605STiwei Bie 		if (i >= vq->packed.vring.num)
15201ce9e605STiwei Bie 			i = 0;
15211ce9e605STiwei Bie 	}
15221ce9e605STiwei Bie 
15231ce9e605STiwei Bie 	END_USE(vq);
15241ce9e605STiwei Bie 	return -EIO;
15251ce9e605STiwei Bie }
15261ce9e605STiwei Bie 
15271ce9e605STiwei Bie static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
15281ce9e605STiwei Bie {
15291ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1530f51f9826STiwei Bie 	u16 new, old, off_wrap, flags, wrap_counter, event_idx;
15311ce9e605STiwei Bie 	bool needs_kick;
15321ce9e605STiwei Bie 	union {
15331ce9e605STiwei Bie 		struct {
15341ce9e605STiwei Bie 			__le16 off_wrap;
15351ce9e605STiwei Bie 			__le16 flags;
15361ce9e605STiwei Bie 		};
15371ce9e605STiwei Bie 		u32 u32;
15381ce9e605STiwei Bie 	} snapshot;
15391ce9e605STiwei Bie 
15401ce9e605STiwei Bie 	START_USE(vq);
15411ce9e605STiwei Bie 
15421ce9e605STiwei Bie 	/*
15431ce9e605STiwei Bie 	 * We need to expose the new flags value before checking notification
15441ce9e605STiwei Bie 	 * suppressions.
15451ce9e605STiwei Bie 	 */
15461ce9e605STiwei Bie 	virtio_mb(vq->weak_barriers);
15471ce9e605STiwei Bie 
1548f51f9826STiwei Bie 	old = vq->packed.next_avail_idx - vq->num_added;
1549f51f9826STiwei Bie 	new = vq->packed.next_avail_idx;
15501ce9e605STiwei Bie 	vq->num_added = 0;
15511ce9e605STiwei Bie 
15521ce9e605STiwei Bie 	snapshot.u32 = *(u32 *)vq->packed.vring.device;
15531ce9e605STiwei Bie 	flags = le16_to_cpu(snapshot.flags);
15541ce9e605STiwei Bie 
15551ce9e605STiwei Bie 	LAST_ADD_TIME_CHECK(vq);
15561ce9e605STiwei Bie 	LAST_ADD_TIME_INVALID(vq);
15571ce9e605STiwei Bie 
1558f51f9826STiwei Bie 	if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
15591ce9e605STiwei Bie 		needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1560f51f9826STiwei Bie 		goto out;
1561f51f9826STiwei Bie 	}
1562f51f9826STiwei Bie 
1563f51f9826STiwei Bie 	off_wrap = le16_to_cpu(snapshot.off_wrap);
1564f51f9826STiwei Bie 
1565f51f9826STiwei Bie 	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1566f51f9826STiwei Bie 	event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1567f51f9826STiwei Bie 	if (wrap_counter != vq->packed.avail_wrap_counter)
1568f51f9826STiwei Bie 		event_idx -= vq->packed.vring.num;
1569f51f9826STiwei Bie 
1570f51f9826STiwei Bie 	needs_kick = vring_need_event(event_idx, new, old);
1571f51f9826STiwei Bie out:
15721ce9e605STiwei Bie 	END_USE(vq);
15731ce9e605STiwei Bie 	return needs_kick;
15741ce9e605STiwei Bie }
15751ce9e605STiwei Bie 
15761ce9e605STiwei Bie static void detach_buf_packed(struct vring_virtqueue *vq,
15771ce9e605STiwei Bie 			      unsigned int id, void **ctx)
15781ce9e605STiwei Bie {
15791ce9e605STiwei Bie 	struct vring_desc_state_packed *state = NULL;
15801ce9e605STiwei Bie 	struct vring_packed_desc *desc;
15811ce9e605STiwei Bie 	unsigned int i, curr;
15821ce9e605STiwei Bie 
15831ce9e605STiwei Bie 	state = &vq->packed.desc_state[id];
15841ce9e605STiwei Bie 
15851ce9e605STiwei Bie 	/* Clear data ptr. */
15861ce9e605STiwei Bie 	state->data = NULL;
15871ce9e605STiwei Bie 
1588aeef9b47SJason Wang 	vq->packed.desc_extra[state->last].next = vq->free_head;
15891ce9e605STiwei Bie 	vq->free_head = id;
15901ce9e605STiwei Bie 	vq->vq.num_free += state->num;
15911ce9e605STiwei Bie 
15921ce9e605STiwei Bie 	if (unlikely(vq->use_dma_api)) {
15931ce9e605STiwei Bie 		curr = id;
15941ce9e605STiwei Bie 		for (i = 0; i < state->num; i++) {
1595d80dc15bSXuan Zhuo 			vring_unmap_extra_packed(vq,
15961ce9e605STiwei Bie 						 &vq->packed.desc_extra[curr]);
1597aeef9b47SJason Wang 			curr = vq->packed.desc_extra[curr].next;
15981ce9e605STiwei Bie 		}
15991ce9e605STiwei Bie 	}
16001ce9e605STiwei Bie 
16011ce9e605STiwei Bie 	if (vq->indirect) {
16021ce9e605STiwei Bie 		u32 len;
16031ce9e605STiwei Bie 
16041ce9e605STiwei Bie 		/* Free the indirect table, if any, now that it's unmapped. */
16051ce9e605STiwei Bie 		desc = state->indir_desc;
16061ce9e605STiwei Bie 		if (!desc)
16071ce9e605STiwei Bie 			return;
16081ce9e605STiwei Bie 
16091ce9e605STiwei Bie 		if (vq->use_dma_api) {
16101ce9e605STiwei Bie 			len = vq->packed.desc_extra[id].len;
16111ce9e605STiwei Bie 			for (i = 0; i < len / sizeof(struct vring_packed_desc);
16121ce9e605STiwei Bie 					i++)
16131ce9e605STiwei Bie 				vring_unmap_desc_packed(vq, &desc[i]);
16141ce9e605STiwei Bie 		}
16151ce9e605STiwei Bie 		kfree(desc);
16161ce9e605STiwei Bie 		state->indir_desc = NULL;
16171ce9e605STiwei Bie 	} else if (ctx) {
16181ce9e605STiwei Bie 		*ctx = state->indir_desc;
16191ce9e605STiwei Bie 	}
16201ce9e605STiwei Bie }
16211ce9e605STiwei Bie 
16221ce9e605STiwei Bie static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
16231ce9e605STiwei Bie 				       u16 idx, bool used_wrap_counter)
16241ce9e605STiwei Bie {
16251ce9e605STiwei Bie 	bool avail, used;
16261ce9e605STiwei Bie 	u16 flags;
16271ce9e605STiwei Bie 
16281ce9e605STiwei Bie 	flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
16291ce9e605STiwei Bie 	avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
16301ce9e605STiwei Bie 	used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
16311ce9e605STiwei Bie 
16321ce9e605STiwei Bie 	return avail == used && used == used_wrap_counter;
16331ce9e605STiwei Bie }
16341ce9e605STiwei Bie 
16351adbd6b2SFeng Liu static bool more_used_packed(const struct vring_virtqueue *vq)
16361ce9e605STiwei Bie {
1637a7722890Shuangjie.albert 	u16 last_used;
1638a7722890Shuangjie.albert 	u16 last_used_idx;
1639a7722890Shuangjie.albert 	bool used_wrap_counter;
1640a7722890Shuangjie.albert 
1641a7722890Shuangjie.albert 	last_used_idx = READ_ONCE(vq->last_used_idx);
1642a7722890Shuangjie.albert 	last_used = packed_last_used(last_used_idx);
1643a7722890Shuangjie.albert 	used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1644a7722890Shuangjie.albert 	return is_used_desc_packed(vq, last_used, used_wrap_counter);
16451ce9e605STiwei Bie }
16461ce9e605STiwei Bie 
16471ce9e605STiwei Bie static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
16481ce9e605STiwei Bie 					  unsigned int *len,
16491ce9e605STiwei Bie 					  void **ctx)
16501ce9e605STiwei Bie {
16511ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1652a7722890Shuangjie.albert 	u16 last_used, id, last_used_idx;
1653a7722890Shuangjie.albert 	bool used_wrap_counter;
16541ce9e605STiwei Bie 	void *ret;
16551ce9e605STiwei Bie 
16561ce9e605STiwei Bie 	START_USE(vq);
16571ce9e605STiwei Bie 
16581ce9e605STiwei Bie 	if (unlikely(vq->broken)) {
16591ce9e605STiwei Bie 		END_USE(vq);
16601ce9e605STiwei Bie 		return NULL;
16611ce9e605STiwei Bie 	}
16621ce9e605STiwei Bie 
16631ce9e605STiwei Bie 	if (!more_used_packed(vq)) {
16641ce9e605STiwei Bie 		pr_debug("No more buffers in queue\n");
16651ce9e605STiwei Bie 		END_USE(vq);
16661ce9e605STiwei Bie 		return NULL;
16671ce9e605STiwei Bie 	}
16681ce9e605STiwei Bie 
16691ce9e605STiwei Bie 	/* Only get used elements after they have been exposed by host. */
16701ce9e605STiwei Bie 	virtio_rmb(vq->weak_barriers);
16711ce9e605STiwei Bie 
1672a7722890Shuangjie.albert 	last_used_idx = READ_ONCE(vq->last_used_idx);
1673a7722890Shuangjie.albert 	used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1674a7722890Shuangjie.albert 	last_used = packed_last_used(last_used_idx);
16751ce9e605STiwei Bie 	id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
16761ce9e605STiwei Bie 	*len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
16771ce9e605STiwei Bie 
16781ce9e605STiwei Bie 	if (unlikely(id >= vq->packed.vring.num)) {
16791ce9e605STiwei Bie 		BAD_RING(vq, "id %u out of range\n", id);
16801ce9e605STiwei Bie 		return NULL;
16811ce9e605STiwei Bie 	}
16821ce9e605STiwei Bie 	if (unlikely(!vq->packed.desc_state[id].data)) {
16831ce9e605STiwei Bie 		BAD_RING(vq, "id %u is not a head!\n", id);
16841ce9e605STiwei Bie 		return NULL;
16851ce9e605STiwei Bie 	}
16861ce9e605STiwei Bie 
16871ce9e605STiwei Bie 	/* detach_buf_packed clears data, so grab it now. */
16881ce9e605STiwei Bie 	ret = vq->packed.desc_state[id].data;
16891ce9e605STiwei Bie 	detach_buf_packed(vq, id, ctx);
16901ce9e605STiwei Bie 
1691a7722890Shuangjie.albert 	last_used += vq->packed.desc_state[id].num;
1692a7722890Shuangjie.albert 	if (unlikely(last_used >= vq->packed.vring.num)) {
1693a7722890Shuangjie.albert 		last_used -= vq->packed.vring.num;
1694a7722890Shuangjie.albert 		used_wrap_counter ^= 1;
16951ce9e605STiwei Bie 	}
16961ce9e605STiwei Bie 
1697a7722890Shuangjie.albert 	last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1698a7722890Shuangjie.albert 	WRITE_ONCE(vq->last_used_idx, last_used);
1699a7722890Shuangjie.albert 
1700f51f9826STiwei Bie 	/*
1701f51f9826STiwei Bie 	 * If we expect an interrupt for the next entry, tell host
1702f51f9826STiwei Bie 	 * by writing event index and flush out the write before
1703f51f9826STiwei Bie 	 * the read in the next get_buf call.
1704f51f9826STiwei Bie 	 */
1705f51f9826STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1706f51f9826STiwei Bie 		virtio_store_mb(vq->weak_barriers,
1707f51f9826STiwei Bie 				&vq->packed.vring.driver->off_wrap,
1708a7722890Shuangjie.albert 				cpu_to_le16(vq->last_used_idx));
1709f51f9826STiwei Bie 
17101ce9e605STiwei Bie 	LAST_ADD_TIME_INVALID(vq);
17111ce9e605STiwei Bie 
17121ce9e605STiwei Bie 	END_USE(vq);
17131ce9e605STiwei Bie 	return ret;
17141ce9e605STiwei Bie }
17151ce9e605STiwei Bie 
17161ce9e605STiwei Bie static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
17171ce9e605STiwei Bie {
17181ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
17191ce9e605STiwei Bie 
17201ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
17211ce9e605STiwei Bie 		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
17226c0b057cSAlbert Huang 
17236c0b057cSAlbert Huang 		/*
17246c0b057cSAlbert Huang 		 * If device triggered an event already it won't trigger one again:
17256c0b057cSAlbert Huang 		 * no need to disable.
17266c0b057cSAlbert Huang 		 */
17276c0b057cSAlbert Huang 		if (vq->event_triggered)
17286c0b057cSAlbert Huang 			return;
17296c0b057cSAlbert Huang 
17301ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
17311ce9e605STiwei Bie 			cpu_to_le16(vq->packed.event_flags_shadow);
17321ce9e605STiwei Bie 	}
17331ce9e605STiwei Bie }
17341ce9e605STiwei Bie 
173531532340SSolomon Tan static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
17361ce9e605STiwei Bie {
17371ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
17381ce9e605STiwei Bie 
17391ce9e605STiwei Bie 	START_USE(vq);
17401ce9e605STiwei Bie 
17411ce9e605STiwei Bie 	/*
17421ce9e605STiwei Bie 	 * We optimistically turn back on interrupts, then check if there was
17431ce9e605STiwei Bie 	 * more to do.
17441ce9e605STiwei Bie 	 */
17451ce9e605STiwei Bie 
1746f51f9826STiwei Bie 	if (vq->event) {
1747f51f9826STiwei Bie 		vq->packed.vring.driver->off_wrap =
1748a7722890Shuangjie.albert 			cpu_to_le16(vq->last_used_idx);
1749f51f9826STiwei Bie 		/*
1750f51f9826STiwei Bie 		 * We need to update event offset and event wrap
1751f51f9826STiwei Bie 		 * counter first before updating event flags.
1752f51f9826STiwei Bie 		 */
1753f51f9826STiwei Bie 		virtio_wmb(vq->weak_barriers);
1754f51f9826STiwei Bie 	}
1755f51f9826STiwei Bie 
17561ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1757f51f9826STiwei Bie 		vq->packed.event_flags_shadow = vq->event ?
1758f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_DESC :
1759f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_ENABLE;
17601ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
17611ce9e605STiwei Bie 				cpu_to_le16(vq->packed.event_flags_shadow);
17621ce9e605STiwei Bie 	}
17631ce9e605STiwei Bie 
17641ce9e605STiwei Bie 	END_USE(vq);
1765a7722890Shuangjie.albert 	return vq->last_used_idx;
17661ce9e605STiwei Bie }
17671ce9e605STiwei Bie 
17681ce9e605STiwei Bie static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
17691ce9e605STiwei Bie {
17701ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
17711ce9e605STiwei Bie 	bool wrap_counter;
17721ce9e605STiwei Bie 	u16 used_idx;
17731ce9e605STiwei Bie 
17741ce9e605STiwei Bie 	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
17751ce9e605STiwei Bie 	used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
17761ce9e605STiwei Bie 
17771ce9e605STiwei Bie 	return is_used_desc_packed(vq, used_idx, wrap_counter);
17781ce9e605STiwei Bie }
17791ce9e605STiwei Bie 
17801ce9e605STiwei Bie static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
17811ce9e605STiwei Bie {
17821ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1783a7722890Shuangjie.albert 	u16 used_idx, wrap_counter, last_used_idx;
1784f51f9826STiwei Bie 	u16 bufs;
17851ce9e605STiwei Bie 
17861ce9e605STiwei Bie 	START_USE(vq);
17871ce9e605STiwei Bie 
17881ce9e605STiwei Bie 	/*
17891ce9e605STiwei Bie 	 * We optimistically turn back on interrupts, then check if there was
17901ce9e605STiwei Bie 	 * more to do.
17911ce9e605STiwei Bie 	 */
17921ce9e605STiwei Bie 
1793f51f9826STiwei Bie 	if (vq->event) {
1794f51f9826STiwei Bie 		/* TODO: tune this threshold */
1795f51f9826STiwei Bie 		bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1796a7722890Shuangjie.albert 		last_used_idx = READ_ONCE(vq->last_used_idx);
1797a7722890Shuangjie.albert 		wrap_counter = packed_used_wrap_counter(last_used_idx);
17981ce9e605STiwei Bie 
1799a7722890Shuangjie.albert 		used_idx = packed_last_used(last_used_idx) + bufs;
1800f51f9826STiwei Bie 		if (used_idx >= vq->packed.vring.num) {
1801f51f9826STiwei Bie 			used_idx -= vq->packed.vring.num;
1802f51f9826STiwei Bie 			wrap_counter ^= 1;
1803f51f9826STiwei Bie 		}
1804f51f9826STiwei Bie 
1805f51f9826STiwei Bie 		vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1806f51f9826STiwei Bie 			(wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1807f51f9826STiwei Bie 
1808f51f9826STiwei Bie 		/*
1809f51f9826STiwei Bie 		 * We need to update event offset and event wrap
1810f51f9826STiwei Bie 		 * counter first before updating event flags.
1811f51f9826STiwei Bie 		 */
1812f51f9826STiwei Bie 		virtio_wmb(vq->weak_barriers);
1813f51f9826STiwei Bie 	}
1814f51f9826STiwei Bie 
18151ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1816f51f9826STiwei Bie 		vq->packed.event_flags_shadow = vq->event ?
1817f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_DESC :
1818f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_ENABLE;
18191ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
18201ce9e605STiwei Bie 				cpu_to_le16(vq->packed.event_flags_shadow);
18211ce9e605STiwei Bie 	}
18221ce9e605STiwei Bie 
18231ce9e605STiwei Bie 	/*
18241ce9e605STiwei Bie 	 * We need to update event suppression structure first
18251ce9e605STiwei Bie 	 * before re-checking for more used buffers.
18261ce9e605STiwei Bie 	 */
18271ce9e605STiwei Bie 	virtio_mb(vq->weak_barriers);
18281ce9e605STiwei Bie 
1829a7722890Shuangjie.albert 	last_used_idx = READ_ONCE(vq->last_used_idx);
1830a7722890Shuangjie.albert 	wrap_counter = packed_used_wrap_counter(last_used_idx);
1831a7722890Shuangjie.albert 	used_idx = packed_last_used(last_used_idx);
1832a7722890Shuangjie.albert 	if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
18331ce9e605STiwei Bie 		END_USE(vq);
18341ce9e605STiwei Bie 		return false;
18351ce9e605STiwei Bie 	}
18361ce9e605STiwei Bie 
18371ce9e605STiwei Bie 	END_USE(vq);
18381ce9e605STiwei Bie 	return true;
18391ce9e605STiwei Bie }
18401ce9e605STiwei Bie 
18411ce9e605STiwei Bie static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
18421ce9e605STiwei Bie {
18431ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
18441ce9e605STiwei Bie 	unsigned int i;
18451ce9e605STiwei Bie 	void *buf;
18461ce9e605STiwei Bie 
18471ce9e605STiwei Bie 	START_USE(vq);
18481ce9e605STiwei Bie 
18491ce9e605STiwei Bie 	for (i = 0; i < vq->packed.vring.num; i++) {
18501ce9e605STiwei Bie 		if (!vq->packed.desc_state[i].data)
18511ce9e605STiwei Bie 			continue;
18521ce9e605STiwei Bie 		/* detach_buf clears data, so grab it now. */
18531ce9e605STiwei Bie 		buf = vq->packed.desc_state[i].data;
18541ce9e605STiwei Bie 		detach_buf_packed(vq, i, NULL);
18551ce9e605STiwei Bie 		END_USE(vq);
18561ce9e605STiwei Bie 		return buf;
18571ce9e605STiwei Bie 	}
18581ce9e605STiwei Bie 	/* That should have freed everything. */
18591ce9e605STiwei Bie 	BUG_ON(vq->vq.num_free != vq->packed.vring.num);
18601ce9e605STiwei Bie 
18611ce9e605STiwei Bie 	END_USE(vq);
18621ce9e605STiwei Bie 	return NULL;
18631ce9e605STiwei Bie }
18641ce9e605STiwei Bie 
186596ef18a2SXuan Zhuo static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
18665a222421SJason Wang {
18675a222421SJason Wang 	struct vring_desc_extra *desc_extra;
18685a222421SJason Wang 	unsigned int i;
18695a222421SJason Wang 
18705a222421SJason Wang 	desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
18715a222421SJason Wang 				   GFP_KERNEL);
18725a222421SJason Wang 	if (!desc_extra)
18735a222421SJason Wang 		return NULL;
18745a222421SJason Wang 
18755a222421SJason Wang 	memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
18765a222421SJason Wang 
18775a222421SJason Wang 	for (i = 0; i < num - 1; i++)
18785a222421SJason Wang 		desc_extra[i].next = i + 1;
18795a222421SJason Wang 
18805a222421SJason Wang 	return desc_extra;
18815a222421SJason Wang }
18825a222421SJason Wang 
18836356f8bbSXuan Zhuo static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
18842713ea3cSJason Wang 			      struct virtio_device *vdev,
18852713ea3cSJason Wang 			      struct device *dma_dev)
18866356f8bbSXuan Zhuo {
18876356f8bbSXuan Zhuo 	if (vring_packed->vring.desc)
18886356f8bbSXuan Zhuo 		vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
18896356f8bbSXuan Zhuo 				 vring_packed->vring.desc,
18902713ea3cSJason Wang 				 vring_packed->ring_dma_addr,
18912713ea3cSJason Wang 				 dma_dev);
18926356f8bbSXuan Zhuo 
18936356f8bbSXuan Zhuo 	if (vring_packed->vring.driver)
18946356f8bbSXuan Zhuo 		vring_free_queue(vdev, vring_packed->event_size_in_bytes,
18956356f8bbSXuan Zhuo 				 vring_packed->vring.driver,
18962713ea3cSJason Wang 				 vring_packed->driver_event_dma_addr,
18972713ea3cSJason Wang 				 dma_dev);
18986356f8bbSXuan Zhuo 
18996356f8bbSXuan Zhuo 	if (vring_packed->vring.device)
19006356f8bbSXuan Zhuo 		vring_free_queue(vdev, vring_packed->event_size_in_bytes,
19016356f8bbSXuan Zhuo 				 vring_packed->vring.device,
19022713ea3cSJason Wang 				 vring_packed->device_event_dma_addr,
19032713ea3cSJason Wang 				 dma_dev);
19046356f8bbSXuan Zhuo 
19056356f8bbSXuan Zhuo 	kfree(vring_packed->desc_state);
19066356f8bbSXuan Zhuo 	kfree(vring_packed->desc_extra);
19076356f8bbSXuan Zhuo }
19086356f8bbSXuan Zhuo 
19096b60b9c0SXuan Zhuo static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
19106b60b9c0SXuan Zhuo 				    struct virtio_device *vdev,
19112713ea3cSJason Wang 				    u32 num, struct device *dma_dev)
19126b60b9c0SXuan Zhuo {
19136b60b9c0SXuan Zhuo 	struct vring_packed_desc *ring;
19146b60b9c0SXuan Zhuo 	struct vring_packed_desc_event *driver, *device;
19156b60b9c0SXuan Zhuo 	dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
19166b60b9c0SXuan Zhuo 	size_t ring_size_in_bytes, event_size_in_bytes;
19176b60b9c0SXuan Zhuo 
19186b60b9c0SXuan Zhuo 	ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
19196b60b9c0SXuan Zhuo 
19206b60b9c0SXuan Zhuo 	ring = vring_alloc_queue(vdev, ring_size_in_bytes,
19216b60b9c0SXuan Zhuo 				 &ring_dma_addr,
19222713ea3cSJason Wang 				 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
19232713ea3cSJason Wang 				 dma_dev);
19246b60b9c0SXuan Zhuo 	if (!ring)
19256b60b9c0SXuan Zhuo 		goto err;
19266b60b9c0SXuan Zhuo 
19276b60b9c0SXuan Zhuo 	vring_packed->vring.desc         = ring;
19286b60b9c0SXuan Zhuo 	vring_packed->ring_dma_addr      = ring_dma_addr;
19296b60b9c0SXuan Zhuo 	vring_packed->ring_size_in_bytes = ring_size_in_bytes;
19306b60b9c0SXuan Zhuo 
19316b60b9c0SXuan Zhuo 	event_size_in_bytes = sizeof(struct vring_packed_desc_event);
19326b60b9c0SXuan Zhuo 
19336b60b9c0SXuan Zhuo 	driver = vring_alloc_queue(vdev, event_size_in_bytes,
19346b60b9c0SXuan Zhuo 				   &driver_event_dma_addr,
19352713ea3cSJason Wang 				   GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
19362713ea3cSJason Wang 				   dma_dev);
19376b60b9c0SXuan Zhuo 	if (!driver)
19386b60b9c0SXuan Zhuo 		goto err;
19396b60b9c0SXuan Zhuo 
19406b60b9c0SXuan Zhuo 	vring_packed->vring.driver          = driver;
19416b60b9c0SXuan Zhuo 	vring_packed->event_size_in_bytes   = event_size_in_bytes;
19426b60b9c0SXuan Zhuo 	vring_packed->driver_event_dma_addr = driver_event_dma_addr;
19436b60b9c0SXuan Zhuo 
19446b60b9c0SXuan Zhuo 	device = vring_alloc_queue(vdev, event_size_in_bytes,
19456b60b9c0SXuan Zhuo 				   &device_event_dma_addr,
19462713ea3cSJason Wang 				   GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
19472713ea3cSJason Wang 				   dma_dev);
19486b60b9c0SXuan Zhuo 	if (!device)
19496b60b9c0SXuan Zhuo 		goto err;
19506b60b9c0SXuan Zhuo 
19516b60b9c0SXuan Zhuo 	vring_packed->vring.device          = device;
19526b60b9c0SXuan Zhuo 	vring_packed->device_event_dma_addr = device_event_dma_addr;
19536b60b9c0SXuan Zhuo 
19546b60b9c0SXuan Zhuo 	vring_packed->vring.num = num;
19556b60b9c0SXuan Zhuo 
19566b60b9c0SXuan Zhuo 	return 0;
19576b60b9c0SXuan Zhuo 
19586b60b9c0SXuan Zhuo err:
19592713ea3cSJason Wang 	vring_free_packed(vring_packed, vdev, dma_dev);
19606b60b9c0SXuan Zhuo 	return -ENOMEM;
19616b60b9c0SXuan Zhuo }
19626b60b9c0SXuan Zhuo 
1963ef3167cfSXuan Zhuo static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed)
1964ef3167cfSXuan Zhuo {
1965ef3167cfSXuan Zhuo 	struct vring_desc_state_packed *state;
1966ef3167cfSXuan Zhuo 	struct vring_desc_extra *extra;
1967ef3167cfSXuan Zhuo 	u32 num = vring_packed->vring.num;
1968ef3167cfSXuan Zhuo 
1969ef3167cfSXuan Zhuo 	state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL);
1970ef3167cfSXuan Zhuo 	if (!state)
1971ef3167cfSXuan Zhuo 		goto err_desc_state;
1972ef3167cfSXuan Zhuo 
1973ef3167cfSXuan Zhuo 	memset(state, 0, num * sizeof(struct vring_desc_state_packed));
1974ef3167cfSXuan Zhuo 
1975ef3167cfSXuan Zhuo 	extra = vring_alloc_desc_extra(num);
1976ef3167cfSXuan Zhuo 	if (!extra)
1977ef3167cfSXuan Zhuo 		goto err_desc_extra;
1978ef3167cfSXuan Zhuo 
1979ef3167cfSXuan Zhuo 	vring_packed->desc_state = state;
1980ef3167cfSXuan Zhuo 	vring_packed->desc_extra = extra;
1981ef3167cfSXuan Zhuo 
1982ef3167cfSXuan Zhuo 	return 0;
1983ef3167cfSXuan Zhuo 
1984ef3167cfSXuan Zhuo err_desc_extra:
1985ef3167cfSXuan Zhuo 	kfree(state);
1986ef3167cfSXuan Zhuo err_desc_state:
1987ef3167cfSXuan Zhuo 	return -ENOMEM;
1988ef3167cfSXuan Zhuo }
1989ef3167cfSXuan Zhuo 
19901a107c87SXuan Zhuo static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed,
19911a107c87SXuan Zhuo 					bool callback)
19921a107c87SXuan Zhuo {
19931a107c87SXuan Zhuo 	vring_packed->next_avail_idx = 0;
19941a107c87SXuan Zhuo 	vring_packed->avail_wrap_counter = 1;
19951a107c87SXuan Zhuo 	vring_packed->event_flags_shadow = 0;
19961a107c87SXuan Zhuo 	vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
19971a107c87SXuan Zhuo 
19981a107c87SXuan Zhuo 	/* No callback?  Tell other side not to bother us. */
19991a107c87SXuan Zhuo 	if (!callback) {
20001a107c87SXuan Zhuo 		vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
20011a107c87SXuan Zhuo 		vring_packed->vring.driver->flags =
20021a107c87SXuan Zhuo 			cpu_to_le16(vring_packed->event_flags_shadow);
20031a107c87SXuan Zhuo 	}
20041a107c87SXuan Zhuo }
20051a107c87SXuan Zhuo 
200651d649f1SXuan Zhuo static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq,
200751d649f1SXuan Zhuo 					  struct vring_virtqueue_packed *vring_packed)
200851d649f1SXuan Zhuo {
200951d649f1SXuan Zhuo 	vq->packed = *vring_packed;
201051d649f1SXuan Zhuo 
201151d649f1SXuan Zhuo 	/* Put everything in free lists. */
201251d649f1SXuan Zhuo 	vq->free_head = 0;
201351d649f1SXuan Zhuo }
201451d649f1SXuan Zhuo 
201556775e14SXuan Zhuo static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
201656775e14SXuan Zhuo {
201756775e14SXuan Zhuo 	memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes);
201856775e14SXuan Zhuo 	memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes);
201956775e14SXuan Zhuo 
202056775e14SXuan Zhuo 	/* we need to reset the desc.flags. For more, see is_used_desc_packed() */
202156775e14SXuan Zhuo 	memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes);
202256775e14SXuan Zhuo 
202356775e14SXuan Zhuo 	virtqueue_init(vq, vq->packed.vring.num);
202456775e14SXuan Zhuo 	virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
202556775e14SXuan Zhuo }
202656775e14SXuan Zhuo 
20271ce9e605STiwei Bie static struct virtqueue *vring_create_virtqueue_packed(
20281ce9e605STiwei Bie 	unsigned int index,
20291ce9e605STiwei Bie 	unsigned int num,
20301ce9e605STiwei Bie 	unsigned int vring_align,
20311ce9e605STiwei Bie 	struct virtio_device *vdev,
20321ce9e605STiwei Bie 	bool weak_barriers,
20331ce9e605STiwei Bie 	bool may_reduce_num,
20341ce9e605STiwei Bie 	bool context,
20351ce9e605STiwei Bie 	bool (*notify)(struct virtqueue *),
20361ce9e605STiwei Bie 	void (*callback)(struct virtqueue *),
20372713ea3cSJason Wang 	const char *name,
20382713ea3cSJason Wang 	struct device *dma_dev)
20391ce9e605STiwei Bie {
20406b60b9c0SXuan Zhuo 	struct vring_virtqueue_packed vring_packed = {};
20411ce9e605STiwei Bie 	struct vring_virtqueue *vq;
2042ef3167cfSXuan Zhuo 	int err;
20431ce9e605STiwei Bie 
20442713ea3cSJason Wang 	if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev))
20451ce9e605STiwei Bie 		goto err_ring;
20461ce9e605STiwei Bie 
20471ce9e605STiwei Bie 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
20481ce9e605STiwei Bie 	if (!vq)
20491ce9e605STiwei Bie 		goto err_vq;
20501ce9e605STiwei Bie 
20511ce9e605STiwei Bie 	vq->vq.callback = callback;
20521ce9e605STiwei Bie 	vq->vq.vdev = vdev;
20531ce9e605STiwei Bie 	vq->vq.name = name;
20541ce9e605STiwei Bie 	vq->vq.index = index;
20554913e854SXuan Zhuo 	vq->vq.reset = false;
20561ce9e605STiwei Bie 	vq->we_own_ring = true;
20571ce9e605STiwei Bie 	vq->notify = notify;
20581ce9e605STiwei Bie 	vq->weak_barriers = weak_barriers;
2059c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
20608b4ec69dSJason Wang 	vq->broken = true;
2061c346dae4SJason Wang #else
2062c346dae4SJason Wang 	vq->broken = false;
2063c346dae4SJason Wang #endif
20641ce9e605STiwei Bie 	vq->packed_ring = true;
20652713ea3cSJason Wang 	vq->dma_dev = dma_dev;
20661ce9e605STiwei Bie 	vq->use_dma_api = vring_use_dma_api(vdev);
2067*8daafe9eSXuan Zhuo 	vq->premapped = false;
20681ce9e605STiwei Bie 
20691ce9e605STiwei Bie 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
20701ce9e605STiwei Bie 		!context;
20711ce9e605STiwei Bie 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
20721ce9e605STiwei Bie 
207345383fb0STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
207445383fb0STiwei Bie 		vq->weak_barriers = false;
207545383fb0STiwei Bie 
2076ef3167cfSXuan Zhuo 	err = vring_alloc_state_extra_packed(&vring_packed);
2077ef3167cfSXuan Zhuo 	if (err)
2078ef3167cfSXuan Zhuo 		goto err_state_extra;
20791ce9e605STiwei Bie 
20801a107c87SXuan Zhuo 	virtqueue_vring_init_packed(&vring_packed, !!callback);
20811ce9e605STiwei Bie 
20823a897128SXuan Zhuo 	virtqueue_init(vq, num);
208351d649f1SXuan Zhuo 	virtqueue_vring_attach_packed(vq, &vring_packed);
20843a897128SXuan Zhuo 
20850e566c8fSParav Pandit 	spin_lock(&vdev->vqs_list_lock);
2086e152d8afSDan Carpenter 	list_add_tail(&vq->vq.list, &vdev->vqs);
20870e566c8fSParav Pandit 	spin_unlock(&vdev->vqs_list_lock);
20881ce9e605STiwei Bie 	return &vq->vq;
20891ce9e605STiwei Bie 
2090ef3167cfSXuan Zhuo err_state_extra:
20911ce9e605STiwei Bie 	kfree(vq);
20921ce9e605STiwei Bie err_vq:
20932713ea3cSJason Wang 	vring_free_packed(&vring_packed, vdev, dma_dev);
20941ce9e605STiwei Bie err_ring:
20951ce9e605STiwei Bie 	return NULL;
20961ce9e605STiwei Bie }
20971ce9e605STiwei Bie 
2098947f9fcfSXuan Zhuo static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
2099947f9fcfSXuan Zhuo {
2100947f9fcfSXuan Zhuo 	struct vring_virtqueue_packed vring_packed = {};
2101947f9fcfSXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
2102947f9fcfSXuan Zhuo 	struct virtio_device *vdev = _vq->vdev;
2103947f9fcfSXuan Zhuo 	int err;
2104947f9fcfSXuan Zhuo 
21052713ea3cSJason Wang 	if (vring_alloc_queue_packed(&vring_packed, vdev, num, vring_dma_dev(vq)))
2106947f9fcfSXuan Zhuo 		goto err_ring;
2107947f9fcfSXuan Zhuo 
2108947f9fcfSXuan Zhuo 	err = vring_alloc_state_extra_packed(&vring_packed);
2109947f9fcfSXuan Zhuo 	if (err)
2110947f9fcfSXuan Zhuo 		goto err_state_extra;
2111947f9fcfSXuan Zhuo 
2112947f9fcfSXuan Zhuo 	vring_free(&vq->vq);
2113947f9fcfSXuan Zhuo 
2114947f9fcfSXuan Zhuo 	virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback);
2115947f9fcfSXuan Zhuo 
2116947f9fcfSXuan Zhuo 	virtqueue_init(vq, vring_packed.vring.num);
2117947f9fcfSXuan Zhuo 	virtqueue_vring_attach_packed(vq, &vring_packed);
2118947f9fcfSXuan Zhuo 
2119947f9fcfSXuan Zhuo 	return 0;
2120947f9fcfSXuan Zhuo 
2121947f9fcfSXuan Zhuo err_state_extra:
21222713ea3cSJason Wang 	vring_free_packed(&vring_packed, vdev, vring_dma_dev(vq));
2123947f9fcfSXuan Zhuo err_ring:
2124947f9fcfSXuan Zhuo 	virtqueue_reinit_packed(vq);
2125947f9fcfSXuan Zhuo 	return -ENOMEM;
2126947f9fcfSXuan Zhuo }
2127947f9fcfSXuan Zhuo 
21281ce9e605STiwei Bie 
21291ce9e605STiwei Bie /*
2130e6f633e5STiwei Bie  * Generic functions and exported symbols.
2131e6f633e5STiwei Bie  */
2132e6f633e5STiwei Bie 
2133e6f633e5STiwei Bie static inline int virtqueue_add(struct virtqueue *_vq,
2134e6f633e5STiwei Bie 				struct scatterlist *sgs[],
2135e6f633e5STiwei Bie 				unsigned int total_sg,
2136e6f633e5STiwei Bie 				unsigned int out_sgs,
2137e6f633e5STiwei Bie 				unsigned int in_sgs,
2138e6f633e5STiwei Bie 				void *data,
2139e6f633e5STiwei Bie 				void *ctx,
2140e6f633e5STiwei Bie 				gfp_t gfp)
2141e6f633e5STiwei Bie {
21421ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
21431ce9e605STiwei Bie 
21441ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
21451ce9e605STiwei Bie 					out_sgs, in_sgs, data, ctx, gfp) :
21461ce9e605STiwei Bie 				 virtqueue_add_split(_vq, sgs, total_sg,
2147e6f633e5STiwei Bie 					out_sgs, in_sgs, data, ctx, gfp);
2148e6f633e5STiwei Bie }
2149e6f633e5STiwei Bie 
2150e6f633e5STiwei Bie /**
2151e6f633e5STiwei Bie  * virtqueue_add_sgs - expose buffers to other end
2152a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2153e6f633e5STiwei Bie  * @sgs: array of terminated scatterlists.
2154a5581206SJiang Biao  * @out_sgs: the number of scatterlists readable by other side
2155a5581206SJiang Biao  * @in_sgs: the number of scatterlists which are writable (after readable ones)
2156e6f633e5STiwei Bie  * @data: the token identifying the buffer.
2157e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
2158e6f633e5STiwei Bie  *
2159e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
2160e6f633e5STiwei Bie  * at the same time (except where noted).
2161e6f633e5STiwei Bie  *
2162e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2163e6f633e5STiwei Bie  */
2164e6f633e5STiwei Bie int virtqueue_add_sgs(struct virtqueue *_vq,
2165e6f633e5STiwei Bie 		      struct scatterlist *sgs[],
2166e6f633e5STiwei Bie 		      unsigned int out_sgs,
2167e6f633e5STiwei Bie 		      unsigned int in_sgs,
2168e6f633e5STiwei Bie 		      void *data,
2169e6f633e5STiwei Bie 		      gfp_t gfp)
2170e6f633e5STiwei Bie {
2171e6f633e5STiwei Bie 	unsigned int i, total_sg = 0;
2172e6f633e5STiwei Bie 
2173e6f633e5STiwei Bie 	/* Count them first. */
2174e6f633e5STiwei Bie 	for (i = 0; i < out_sgs + in_sgs; i++) {
2175e6f633e5STiwei Bie 		struct scatterlist *sg;
2176e6f633e5STiwei Bie 
2177e6f633e5STiwei Bie 		for (sg = sgs[i]; sg; sg = sg_next(sg))
2178e6f633e5STiwei Bie 			total_sg++;
2179e6f633e5STiwei Bie 	}
2180e6f633e5STiwei Bie 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
2181e6f633e5STiwei Bie 			     data, NULL, gfp);
2182e6f633e5STiwei Bie }
2183e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
2184e6f633e5STiwei Bie 
2185e6f633e5STiwei Bie /**
2186e6f633e5STiwei Bie  * virtqueue_add_outbuf - expose output buffers to other end
2187e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
2188e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
2189e6f633e5STiwei Bie  * @num: the number of entries in @sg readable by other side
2190e6f633e5STiwei Bie  * @data: the token identifying the buffer.
2191e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
2192e6f633e5STiwei Bie  *
2193e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
2194e6f633e5STiwei Bie  * at the same time (except where noted).
2195e6f633e5STiwei Bie  *
2196e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2197e6f633e5STiwei Bie  */
2198e6f633e5STiwei Bie int virtqueue_add_outbuf(struct virtqueue *vq,
2199e6f633e5STiwei Bie 			 struct scatterlist *sg, unsigned int num,
2200e6f633e5STiwei Bie 			 void *data,
2201e6f633e5STiwei Bie 			 gfp_t gfp)
2202e6f633e5STiwei Bie {
2203e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
2204e6f633e5STiwei Bie }
2205e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
2206e6f633e5STiwei Bie 
2207e6f633e5STiwei Bie /**
2208e6f633e5STiwei Bie  * virtqueue_add_inbuf - expose input buffers to other end
2209e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
2210e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
2211e6f633e5STiwei Bie  * @num: the number of entries in @sg writable by other side
2212e6f633e5STiwei Bie  * @data: the token identifying the buffer.
2213e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
2214e6f633e5STiwei Bie  *
2215e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
2216e6f633e5STiwei Bie  * at the same time (except where noted).
2217e6f633e5STiwei Bie  *
2218e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2219e6f633e5STiwei Bie  */
2220e6f633e5STiwei Bie int virtqueue_add_inbuf(struct virtqueue *vq,
2221e6f633e5STiwei Bie 			struct scatterlist *sg, unsigned int num,
2222e6f633e5STiwei Bie 			void *data,
2223e6f633e5STiwei Bie 			gfp_t gfp)
2224e6f633e5STiwei Bie {
2225e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
2226e6f633e5STiwei Bie }
2227e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
2228e6f633e5STiwei Bie 
2229e6f633e5STiwei Bie /**
2230e6f633e5STiwei Bie  * virtqueue_add_inbuf_ctx - expose input buffers to other end
2231e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
2232e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
2233e6f633e5STiwei Bie  * @num: the number of entries in @sg writable by other side
2234e6f633e5STiwei Bie  * @data: the token identifying the buffer.
2235e6f633e5STiwei Bie  * @ctx: extra context for the token
2236e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
2237e6f633e5STiwei Bie  *
2238e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
2239e6f633e5STiwei Bie  * at the same time (except where noted).
2240e6f633e5STiwei Bie  *
2241e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2242e6f633e5STiwei Bie  */
2243e6f633e5STiwei Bie int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
2244e6f633e5STiwei Bie 			struct scatterlist *sg, unsigned int num,
2245e6f633e5STiwei Bie 			void *data,
2246e6f633e5STiwei Bie 			void *ctx,
2247e6f633e5STiwei Bie 			gfp_t gfp)
2248e6f633e5STiwei Bie {
2249e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
2250e6f633e5STiwei Bie }
2251e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
2252e6f633e5STiwei Bie 
2253e6f633e5STiwei Bie /**
2254e6f633e5STiwei Bie  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
2255a5581206SJiang Biao  * @_vq: the struct virtqueue
2256e6f633e5STiwei Bie  *
2257e6f633e5STiwei Bie  * Instead of virtqueue_kick(), you can do:
2258e6f633e5STiwei Bie  *	if (virtqueue_kick_prepare(vq))
2259e6f633e5STiwei Bie  *		virtqueue_notify(vq);
2260e6f633e5STiwei Bie  *
2261e6f633e5STiwei Bie  * This is sometimes useful because the virtqueue_kick_prepare() needs
2262e6f633e5STiwei Bie  * to be serialized, but the actual virtqueue_notify() call does not.
2263e6f633e5STiwei Bie  */
2264e6f633e5STiwei Bie bool virtqueue_kick_prepare(struct virtqueue *_vq)
2265e6f633e5STiwei Bie {
22661ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
22671ce9e605STiwei Bie 
22681ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
22691ce9e605STiwei Bie 				 virtqueue_kick_prepare_split(_vq);
2270e6f633e5STiwei Bie }
2271e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
2272e6f633e5STiwei Bie 
2273e6f633e5STiwei Bie /**
2274e6f633e5STiwei Bie  * virtqueue_notify - second half of split virtqueue_kick call.
2275a5581206SJiang Biao  * @_vq: the struct virtqueue
2276e6f633e5STiwei Bie  *
2277e6f633e5STiwei Bie  * This does not need to be serialized.
2278e6f633e5STiwei Bie  *
2279e6f633e5STiwei Bie  * Returns false if host notify failed or queue is broken, otherwise true.
2280e6f633e5STiwei Bie  */
2281e6f633e5STiwei Bie bool virtqueue_notify(struct virtqueue *_vq)
2282e6f633e5STiwei Bie {
2283e6f633e5STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
2284e6f633e5STiwei Bie 
2285e6f633e5STiwei Bie 	if (unlikely(vq->broken))
2286e6f633e5STiwei Bie 		return false;
2287e6f633e5STiwei Bie 
2288e6f633e5STiwei Bie 	/* Prod other side to tell it about changes. */
2289e6f633e5STiwei Bie 	if (!vq->notify(_vq)) {
2290e6f633e5STiwei Bie 		vq->broken = true;
2291e6f633e5STiwei Bie 		return false;
2292e6f633e5STiwei Bie 	}
2293e6f633e5STiwei Bie 	return true;
2294e6f633e5STiwei Bie }
2295e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_notify);
2296e6f633e5STiwei Bie 
2297e6f633e5STiwei Bie /**
2298e6f633e5STiwei Bie  * virtqueue_kick - update after add_buf
2299e6f633e5STiwei Bie  * @vq: the struct virtqueue
2300e6f633e5STiwei Bie  *
2301e6f633e5STiwei Bie  * After one or more virtqueue_add_* calls, invoke this to kick
2302e6f633e5STiwei Bie  * the other side.
2303e6f633e5STiwei Bie  *
2304e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2305e6f633e5STiwei Bie  * operations at the same time (except where noted).
2306e6f633e5STiwei Bie  *
2307e6f633e5STiwei Bie  * Returns false if kick failed, otherwise true.
2308e6f633e5STiwei Bie  */
2309e6f633e5STiwei Bie bool virtqueue_kick(struct virtqueue *vq)
2310e6f633e5STiwei Bie {
2311e6f633e5STiwei Bie 	if (virtqueue_kick_prepare(vq))
2312e6f633e5STiwei Bie 		return virtqueue_notify(vq);
2313e6f633e5STiwei Bie 	return true;
2314e6f633e5STiwei Bie }
2315e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick);
2316e6f633e5STiwei Bie 
2317e6f633e5STiwei Bie /**
231831c11db6SYang Li  * virtqueue_get_buf_ctx - get the next used buffer
2319a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2320e6f633e5STiwei Bie  * @len: the length written into the buffer
2321a5581206SJiang Biao  * @ctx: extra context for the token
2322e6f633e5STiwei Bie  *
2323e6f633e5STiwei Bie  * If the device wrote data into the buffer, @len will be set to the
2324e6f633e5STiwei Bie  * amount written.  This means you don't need to clear the buffer
2325e6f633e5STiwei Bie  * beforehand to ensure there's no data leakage in the case of short
2326e6f633e5STiwei Bie  * writes.
2327e6f633e5STiwei Bie  *
2328e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2329e6f633e5STiwei Bie  * operations at the same time (except where noted).
2330e6f633e5STiwei Bie  *
2331e6f633e5STiwei Bie  * Returns NULL if there are no used buffers, or the "data" token
2332e6f633e5STiwei Bie  * handed to virtqueue_add_*().
2333e6f633e5STiwei Bie  */
2334e6f633e5STiwei Bie void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
2335e6f633e5STiwei Bie 			    void **ctx)
2336e6f633e5STiwei Bie {
23371ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
23381ce9e605STiwei Bie 
23391ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
23401ce9e605STiwei Bie 				 virtqueue_get_buf_ctx_split(_vq, len, ctx);
2341e6f633e5STiwei Bie }
2342e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
2343e6f633e5STiwei Bie 
2344e6f633e5STiwei Bie void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
2345e6f633e5STiwei Bie {
2346e6f633e5STiwei Bie 	return virtqueue_get_buf_ctx(_vq, len, NULL);
2347e6f633e5STiwei Bie }
2348e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf);
2349e6f633e5STiwei Bie /**
2350e6f633e5STiwei Bie  * virtqueue_disable_cb - disable callbacks
2351a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2352e6f633e5STiwei Bie  *
2353e6f633e5STiwei Bie  * Note that this is not necessarily synchronous, hence unreliable and only
2354e6f633e5STiwei Bie  * useful as an optimization.
2355e6f633e5STiwei Bie  *
2356e6f633e5STiwei Bie  * Unlike other operations, this need not be serialized.
2357e6f633e5STiwei Bie  */
2358e6f633e5STiwei Bie void virtqueue_disable_cb(struct virtqueue *_vq)
2359e6f633e5STiwei Bie {
23601ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
23611ce9e605STiwei Bie 
23621ce9e605STiwei Bie 	if (vq->packed_ring)
23631ce9e605STiwei Bie 		virtqueue_disable_cb_packed(_vq);
23641ce9e605STiwei Bie 	else
2365e6f633e5STiwei Bie 		virtqueue_disable_cb_split(_vq);
2366e6f633e5STiwei Bie }
2367e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
2368e6f633e5STiwei Bie 
2369e6f633e5STiwei Bie /**
2370e6f633e5STiwei Bie  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2371a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2372e6f633e5STiwei Bie  *
2373e6f633e5STiwei Bie  * This re-enables callbacks; it returns current queue state
2374e6f633e5STiwei Bie  * in an opaque unsigned value. This value should be later tested by
2375e6f633e5STiwei Bie  * virtqueue_poll, to detect a possible race between the driver checking for
2376e6f633e5STiwei Bie  * more work, and enabling callbacks.
2377e6f633e5STiwei Bie  *
2378e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2379e6f633e5STiwei Bie  * operations at the same time (except where noted).
2380e6f633e5STiwei Bie  */
238131532340SSolomon Tan unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
2382e6f633e5STiwei Bie {
23831ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
23841ce9e605STiwei Bie 
23858d622d21SMichael S. Tsirkin 	if (vq->event_triggered)
23868d622d21SMichael S. Tsirkin 		vq->event_triggered = false;
23878d622d21SMichael S. Tsirkin 
23881ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
23891ce9e605STiwei Bie 				 virtqueue_enable_cb_prepare_split(_vq);
2390e6f633e5STiwei Bie }
2391e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
2392e6f633e5STiwei Bie 
2393e6f633e5STiwei Bie /**
2394e6f633e5STiwei Bie  * virtqueue_poll - query pending used buffers
2395a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2396e6f633e5STiwei Bie  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
2397e6f633e5STiwei Bie  *
2398e6f633e5STiwei Bie  * Returns "true" if there are pending used buffers in the queue.
2399e6f633e5STiwei Bie  *
2400e6f633e5STiwei Bie  * This does not need to be serialized.
2401e6f633e5STiwei Bie  */
240231532340SSolomon Tan bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
2403e6f633e5STiwei Bie {
2404e6f633e5STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
2405e6f633e5STiwei Bie 
2406481a0d74SMao Wenan 	if (unlikely(vq->broken))
2407481a0d74SMao Wenan 		return false;
2408481a0d74SMao Wenan 
2409e6f633e5STiwei Bie 	virtio_mb(vq->weak_barriers);
24101ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
24111ce9e605STiwei Bie 				 virtqueue_poll_split(_vq, last_used_idx);
2412e6f633e5STiwei Bie }
2413e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_poll);
2414e6f633e5STiwei Bie 
2415e6f633e5STiwei Bie /**
2416e6f633e5STiwei Bie  * virtqueue_enable_cb - restart callbacks after disable_cb.
2417a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2418e6f633e5STiwei Bie  *
2419e6f633e5STiwei Bie  * This re-enables callbacks; it returns "false" if there are pending
2420e6f633e5STiwei Bie  * buffers in the queue, to detect a possible race between the driver
2421e6f633e5STiwei Bie  * checking for more work, and enabling callbacks.
2422e6f633e5STiwei Bie  *
2423e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2424e6f633e5STiwei Bie  * operations at the same time (except where noted).
2425e6f633e5STiwei Bie  */
2426e6f633e5STiwei Bie bool virtqueue_enable_cb(struct virtqueue *_vq)
2427e6f633e5STiwei Bie {
242831532340SSolomon Tan 	unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq);
2429e6f633e5STiwei Bie 
2430e6f633e5STiwei Bie 	return !virtqueue_poll(_vq, last_used_idx);
2431e6f633e5STiwei Bie }
2432e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2433e6f633e5STiwei Bie 
2434e6f633e5STiwei Bie /**
2435e6f633e5STiwei Bie  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2436a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2437e6f633e5STiwei Bie  *
2438e6f633e5STiwei Bie  * This re-enables callbacks but hints to the other side to delay
2439e6f633e5STiwei Bie  * interrupts until most of the available buffers have been processed;
2440e6f633e5STiwei Bie  * it returns "false" if there are many pending buffers in the queue,
2441e6f633e5STiwei Bie  * to detect a possible race between the driver checking for more work,
2442e6f633e5STiwei Bie  * and enabling callbacks.
2443e6f633e5STiwei Bie  *
2444e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2445e6f633e5STiwei Bie  * operations at the same time (except where noted).
2446e6f633e5STiwei Bie  */
2447e6f633e5STiwei Bie bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2448e6f633e5STiwei Bie {
24491ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
24501ce9e605STiwei Bie 
24518d622d21SMichael S. Tsirkin 	if (vq->event_triggered)
24528d622d21SMichael S. Tsirkin 		vq->event_triggered = false;
24538d622d21SMichael S. Tsirkin 
24541ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
24551ce9e605STiwei Bie 				 virtqueue_enable_cb_delayed_split(_vq);
2456e6f633e5STiwei Bie }
2457e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2458e6f633e5STiwei Bie 
2459138fd251STiwei Bie /**
2460138fd251STiwei Bie  * virtqueue_detach_unused_buf - detach first unused buffer
2461a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2462138fd251STiwei Bie  *
2463138fd251STiwei Bie  * Returns NULL or the "data" token handed to virtqueue_add_*().
2464a62eecb3SXuan Zhuo  * This is not valid on an active queue; it is useful for device
2465a62eecb3SXuan Zhuo  * shutdown or the reset queue.
2466138fd251STiwei Bie  */
2467138fd251STiwei Bie void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2468138fd251STiwei Bie {
24691ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
24701ce9e605STiwei Bie 
24711ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
24721ce9e605STiwei Bie 				 virtqueue_detach_unused_buf_split(_vq);
2473138fd251STiwei Bie }
24747c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2475c021eac4SShirley Ma 
2476138fd251STiwei Bie static inline bool more_used(const struct vring_virtqueue *vq)
2477138fd251STiwei Bie {
24781ce9e605STiwei Bie 	return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2479138fd251STiwei Bie }
2480138fd251STiwei Bie 
24815c669c4aSRicardo Cañuelo /**
24825c669c4aSRicardo Cañuelo  * vring_interrupt - notify a virtqueue on an interrupt
24835c669c4aSRicardo Cañuelo  * @irq: the IRQ number (ignored)
24845c669c4aSRicardo Cañuelo  * @_vq: the struct virtqueue to notify
24855c669c4aSRicardo Cañuelo  *
24865c669c4aSRicardo Cañuelo  * Calls the callback function of @_vq to process the virtqueue
24875c669c4aSRicardo Cañuelo  * notification.
24885c669c4aSRicardo Cañuelo  */
24890a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq)
24900a8a69ddSRusty Russell {
24910a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
24920a8a69ddSRusty Russell 
24930a8a69ddSRusty Russell 	if (!more_used(vq)) {
24940a8a69ddSRusty Russell 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
24950a8a69ddSRusty Russell 		return IRQ_NONE;
24960a8a69ddSRusty Russell 	}
24970a8a69ddSRusty Russell 
24988b4ec69dSJason Wang 	if (unlikely(vq->broken)) {
2499c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
25008b4ec69dSJason Wang 		dev_warn_once(&vq->vq.vdev->dev,
25018b4ec69dSJason Wang 			      "virtio vring IRQ raised before DRIVER_OK");
25028b4ec69dSJason Wang 		return IRQ_NONE;
2503c346dae4SJason Wang #else
2504c346dae4SJason Wang 		return IRQ_HANDLED;
2505c346dae4SJason Wang #endif
25068b4ec69dSJason Wang 	}
25070a8a69ddSRusty Russell 
25088d622d21SMichael S. Tsirkin 	/* Just a hint for performance: so it's ok that this can be racy! */
25098d622d21SMichael S. Tsirkin 	if (vq->event)
25108d622d21SMichael S. Tsirkin 		vq->event_triggered = true;
25118d622d21SMichael S. Tsirkin 
25120a8a69ddSRusty Russell 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
251318445c4dSRusty Russell 	if (vq->vq.callback)
251418445c4dSRusty Russell 		vq->vq.callback(&vq->vq);
25150a8a69ddSRusty Russell 
25160a8a69ddSRusty Russell 	return IRQ_HANDLED;
25170a8a69ddSRusty Russell }
2518c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt);
25190a8a69ddSRusty Russell 
25201ce9e605STiwei Bie /* Only available for split ring */
252107d9629dSXuan Zhuo static struct virtqueue *__vring_new_virtqueue(unsigned int index,
2522cd4c812aSXuan Zhuo 					       struct vring_virtqueue_split *vring_split,
25230a8a69ddSRusty Russell 					       struct virtio_device *vdev,
25247b21e34fSRusty Russell 					       bool weak_barriers,
2525f94682ddSMichael S. Tsirkin 					       bool context,
252646f9c2b9SHeinz Graalfs 					       bool (*notify)(struct virtqueue *),
25279499f5e7SRusty Russell 					       void (*callback)(struct virtqueue *),
25282713ea3cSJason Wang 					       const char *name,
25292713ea3cSJason Wang 					       struct device *dma_dev)
25300a8a69ddSRusty Russell {
25312a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq;
2532a2b36c8dSXuan Zhuo 	int err;
25330a8a69ddSRusty Russell 
25341ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
25351ce9e605STiwei Bie 		return NULL;
25361ce9e605STiwei Bie 
2537cbeedb72STiwei Bie 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
25380a8a69ddSRusty Russell 	if (!vq)
25390a8a69ddSRusty Russell 		return NULL;
25400a8a69ddSRusty Russell 
25411ce9e605STiwei Bie 	vq->packed_ring = false;
25420a8a69ddSRusty Russell 	vq->vq.callback = callback;
25430a8a69ddSRusty Russell 	vq->vq.vdev = vdev;
25449499f5e7SRusty Russell 	vq->vq.name = name;
254506ca287dSRusty Russell 	vq->vq.index = index;
25464913e854SXuan Zhuo 	vq->vq.reset = false;
25472a2d1382SAndy Lutomirski 	vq->we_own_ring = false;
25480a8a69ddSRusty Russell 	vq->notify = notify;
25497b21e34fSRusty Russell 	vq->weak_barriers = weak_barriers;
2550c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
25518b4ec69dSJason Wang 	vq->broken = true;
2552c346dae4SJason Wang #else
2553c346dae4SJason Wang 	vq->broken = false;
2554c346dae4SJason Wang #endif
25552713ea3cSJason Wang 	vq->dma_dev = dma_dev;
2556fb3fba6bSTiwei Bie 	vq->use_dma_api = vring_use_dma_api(vdev);
2557*8daafe9eSXuan Zhuo 	vq->premapped = false;
25580a8a69ddSRusty Russell 
25595a08b04fSMichael S. Tsirkin 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
25605a08b04fSMichael S. Tsirkin 		!context;
2561a5c262c5SMichael S. Tsirkin 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
25629fa29b9dSMark McLoughlin 
256345383fb0STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
256445383fb0STiwei Bie 		vq->weak_barriers = false;
256545383fb0STiwei Bie 
2566a2b36c8dSXuan Zhuo 	err = vring_alloc_state_extra_split(vring_split);
2567a2b36c8dSXuan Zhuo 	if (err) {
2568a2b36c8dSXuan Zhuo 		kfree(vq);
2569a2b36c8dSXuan Zhuo 		return NULL;
2570a2b36c8dSXuan Zhuo 	}
257172b5e895SJason Wang 
2572198fa7beSXuan Zhuo 	virtqueue_vring_init_split(vring_split, vq);
2573198fa7beSXuan Zhuo 
2574cd4c812aSXuan Zhuo 	virtqueue_init(vq, vring_split->vring.num);
2575e1d6a423SXuan Zhuo 	virtqueue_vring_attach_split(vq, vring_split);
25763a897128SXuan Zhuo 
25770e566c8fSParav Pandit 	spin_lock(&vdev->vqs_list_lock);
2578e152d8afSDan Carpenter 	list_add_tail(&vq->vq.list, &vdev->vqs);
25790e566c8fSParav Pandit 	spin_unlock(&vdev->vqs_list_lock);
25800a8a69ddSRusty Russell 	return &vq->vq;
25810a8a69ddSRusty Russell }
25822a2d1382SAndy Lutomirski 
25832a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue(
25842a2d1382SAndy Lutomirski 	unsigned int index,
25852a2d1382SAndy Lutomirski 	unsigned int num,
25862a2d1382SAndy Lutomirski 	unsigned int vring_align,
25872a2d1382SAndy Lutomirski 	struct virtio_device *vdev,
25882a2d1382SAndy Lutomirski 	bool weak_barriers,
25892a2d1382SAndy Lutomirski 	bool may_reduce_num,
2590f94682ddSMichael S. Tsirkin 	bool context,
25912a2d1382SAndy Lutomirski 	bool (*notify)(struct virtqueue *),
25922a2d1382SAndy Lutomirski 	void (*callback)(struct virtqueue *),
25932a2d1382SAndy Lutomirski 	const char *name)
25942a2d1382SAndy Lutomirski {
25951ce9e605STiwei Bie 
25961ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
25971ce9e605STiwei Bie 		return vring_create_virtqueue_packed(index, num, vring_align,
25981ce9e605STiwei Bie 				vdev, weak_barriers, may_reduce_num,
25992713ea3cSJason Wang 				context, notify, callback, name, vdev->dev.parent);
26001ce9e605STiwei Bie 
2601d79dca75STiwei Bie 	return vring_create_virtqueue_split(index, num, vring_align,
2602d79dca75STiwei Bie 			vdev, weak_barriers, may_reduce_num,
26032713ea3cSJason Wang 			context, notify, callback, name, vdev->dev.parent);
26042a2d1382SAndy Lutomirski }
26052a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(vring_create_virtqueue);
26062a2d1382SAndy Lutomirski 
26072713ea3cSJason Wang struct virtqueue *vring_create_virtqueue_dma(
26082713ea3cSJason Wang 	unsigned int index,
26092713ea3cSJason Wang 	unsigned int num,
26102713ea3cSJason Wang 	unsigned int vring_align,
26112713ea3cSJason Wang 	struct virtio_device *vdev,
26122713ea3cSJason Wang 	bool weak_barriers,
26132713ea3cSJason Wang 	bool may_reduce_num,
26142713ea3cSJason Wang 	bool context,
26152713ea3cSJason Wang 	bool (*notify)(struct virtqueue *),
26162713ea3cSJason Wang 	void (*callback)(struct virtqueue *),
26172713ea3cSJason Wang 	const char *name,
26182713ea3cSJason Wang 	struct device *dma_dev)
26192713ea3cSJason Wang {
26202713ea3cSJason Wang 
26212713ea3cSJason Wang 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
26222713ea3cSJason Wang 		return vring_create_virtqueue_packed(index, num, vring_align,
26232713ea3cSJason Wang 				vdev, weak_barriers, may_reduce_num,
26242713ea3cSJason Wang 				context, notify, callback, name, dma_dev);
26252713ea3cSJason Wang 
26262713ea3cSJason Wang 	return vring_create_virtqueue_split(index, num, vring_align,
26272713ea3cSJason Wang 			vdev, weak_barriers, may_reduce_num,
26282713ea3cSJason Wang 			context, notify, callback, name, dma_dev);
26292713ea3cSJason Wang }
26302713ea3cSJason Wang EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
26312713ea3cSJason Wang 
2632c790e8e1SXuan Zhuo /**
2633c790e8e1SXuan Zhuo  * virtqueue_resize - resize the vring of vq
2634c790e8e1SXuan Zhuo  * @_vq: the struct virtqueue we're talking about.
2635c790e8e1SXuan Zhuo  * @num: new ring num
2636c790e8e1SXuan Zhuo  * @recycle: callback for recycle the useless buffer
2637c790e8e1SXuan Zhuo  *
2638c790e8e1SXuan Zhuo  * When it is really necessary to create a new vring, it will set the current vq
2639c790e8e1SXuan Zhuo  * into the reset state. Then call the passed callback to recycle the buffer
2640c790e8e1SXuan Zhuo  * that is no longer used. Only after the new vring is successfully created, the
2641c790e8e1SXuan Zhuo  * old vring will be released.
2642c790e8e1SXuan Zhuo  *
2643c790e8e1SXuan Zhuo  * Caller must ensure we don't call this with other virtqueue operations
2644c790e8e1SXuan Zhuo  * at the same time (except where noted).
2645c790e8e1SXuan Zhuo  *
2646c790e8e1SXuan Zhuo  * Returns zero or a negative error.
2647c790e8e1SXuan Zhuo  * 0: success.
2648c790e8e1SXuan Zhuo  * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
2649c790e8e1SXuan Zhuo  *  vq can still work normally
2650c790e8e1SXuan Zhuo  * -EBUSY: Failed to sync with device, vq may not work properly
2651c790e8e1SXuan Zhuo  * -ENOENT: Transport or device not supported
2652c790e8e1SXuan Zhuo  * -E2BIG/-EINVAL: num error
2653c790e8e1SXuan Zhuo  * -EPERM: Operation not permitted
2654c790e8e1SXuan Zhuo  *
2655c790e8e1SXuan Zhuo  */
2656c790e8e1SXuan Zhuo int virtqueue_resize(struct virtqueue *_vq, u32 num,
2657c790e8e1SXuan Zhuo 		     void (*recycle)(struct virtqueue *vq, void *buf))
2658c790e8e1SXuan Zhuo {
2659c790e8e1SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
2660c790e8e1SXuan Zhuo 	struct virtio_device *vdev = vq->vq.vdev;
2661c790e8e1SXuan Zhuo 	void *buf;
2662c790e8e1SXuan Zhuo 	int err;
2663c790e8e1SXuan Zhuo 
2664c790e8e1SXuan Zhuo 	if (!vq->we_own_ring)
2665c790e8e1SXuan Zhuo 		return -EPERM;
2666c790e8e1SXuan Zhuo 
2667c790e8e1SXuan Zhuo 	if (num > vq->vq.num_max)
2668c790e8e1SXuan Zhuo 		return -E2BIG;
2669c790e8e1SXuan Zhuo 
2670c790e8e1SXuan Zhuo 	if (!num)
2671c790e8e1SXuan Zhuo 		return -EINVAL;
2672c790e8e1SXuan Zhuo 
2673c790e8e1SXuan Zhuo 	if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
2674c790e8e1SXuan Zhuo 		return 0;
2675c790e8e1SXuan Zhuo 
2676c790e8e1SXuan Zhuo 	if (!vdev->config->disable_vq_and_reset)
2677c790e8e1SXuan Zhuo 		return -ENOENT;
2678c790e8e1SXuan Zhuo 
2679c790e8e1SXuan Zhuo 	if (!vdev->config->enable_vq_after_reset)
2680c790e8e1SXuan Zhuo 		return -ENOENT;
2681c790e8e1SXuan Zhuo 
2682c790e8e1SXuan Zhuo 	err = vdev->config->disable_vq_and_reset(_vq);
2683c790e8e1SXuan Zhuo 	if (err)
2684c790e8e1SXuan Zhuo 		return err;
2685c790e8e1SXuan Zhuo 
2686c790e8e1SXuan Zhuo 	while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
2687c790e8e1SXuan Zhuo 		recycle(_vq, buf);
2688c790e8e1SXuan Zhuo 
2689c790e8e1SXuan Zhuo 	if (vq->packed_ring)
2690c790e8e1SXuan Zhuo 		err = virtqueue_resize_packed(_vq, num);
2691c790e8e1SXuan Zhuo 	else
2692c790e8e1SXuan Zhuo 		err = virtqueue_resize_split(_vq, num);
2693c790e8e1SXuan Zhuo 
2694c790e8e1SXuan Zhuo 	if (vdev->config->enable_vq_after_reset(_vq))
2695c790e8e1SXuan Zhuo 		return -EBUSY;
2696c790e8e1SXuan Zhuo 
2697c790e8e1SXuan Zhuo 	return err;
2698c790e8e1SXuan Zhuo }
2699c790e8e1SXuan Zhuo EXPORT_SYMBOL_GPL(virtqueue_resize);
2700c790e8e1SXuan Zhuo 
2701*8daafe9eSXuan Zhuo /**
2702*8daafe9eSXuan Zhuo  * virtqueue_set_dma_premapped - set the vring premapped mode
2703*8daafe9eSXuan Zhuo  * @_vq: the struct virtqueue we're talking about.
2704*8daafe9eSXuan Zhuo  *
2705*8daafe9eSXuan Zhuo  * Enable the premapped mode of the vq.
2706*8daafe9eSXuan Zhuo  *
2707*8daafe9eSXuan Zhuo  * The vring in premapped mode does not do dma internally, so the driver must
2708*8daafe9eSXuan Zhuo  * do dma mapping in advance. The driver must pass the dma_address through
2709*8daafe9eSXuan Zhuo  * dma_address of scatterlist. When the driver got a used buffer from
2710*8daafe9eSXuan Zhuo  * the vring, it has to unmap the dma address.
2711*8daafe9eSXuan Zhuo  *
2712*8daafe9eSXuan Zhuo  * This function must be called immediately after creating the vq, or after vq
2713*8daafe9eSXuan Zhuo  * reset, and before adding any buffers to it.
2714*8daafe9eSXuan Zhuo  *
2715*8daafe9eSXuan Zhuo  * Caller must ensure we don't call this with other virtqueue operations
2716*8daafe9eSXuan Zhuo  * at the same time (except where noted).
2717*8daafe9eSXuan Zhuo  *
2718*8daafe9eSXuan Zhuo  * Returns zero or a negative error.
2719*8daafe9eSXuan Zhuo  * 0: success.
2720*8daafe9eSXuan Zhuo  * -EINVAL: vring does not use the dma api, so we can not enable premapped mode.
2721*8daafe9eSXuan Zhuo  */
2722*8daafe9eSXuan Zhuo int virtqueue_set_dma_premapped(struct virtqueue *_vq)
2723*8daafe9eSXuan Zhuo {
2724*8daafe9eSXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
2725*8daafe9eSXuan Zhuo 	u32 num;
2726*8daafe9eSXuan Zhuo 
2727*8daafe9eSXuan Zhuo 	START_USE(vq);
2728*8daafe9eSXuan Zhuo 
2729*8daafe9eSXuan Zhuo 	num = vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2730*8daafe9eSXuan Zhuo 
2731*8daafe9eSXuan Zhuo 	if (num != vq->vq.num_free) {
2732*8daafe9eSXuan Zhuo 		END_USE(vq);
2733*8daafe9eSXuan Zhuo 		return -EINVAL;
2734*8daafe9eSXuan Zhuo 	}
2735*8daafe9eSXuan Zhuo 
2736*8daafe9eSXuan Zhuo 	if (!vq->use_dma_api) {
2737*8daafe9eSXuan Zhuo 		END_USE(vq);
2738*8daafe9eSXuan Zhuo 		return -EINVAL;
2739*8daafe9eSXuan Zhuo 	}
2740*8daafe9eSXuan Zhuo 
2741*8daafe9eSXuan Zhuo 	vq->premapped = true;
2742*8daafe9eSXuan Zhuo 
2743*8daafe9eSXuan Zhuo 	END_USE(vq);
2744*8daafe9eSXuan Zhuo 
2745*8daafe9eSXuan Zhuo 	return 0;
2746*8daafe9eSXuan Zhuo }
2747*8daafe9eSXuan Zhuo EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped);
2748*8daafe9eSXuan Zhuo 
27491ce9e605STiwei Bie /* Only available for split ring */
27502a2d1382SAndy Lutomirski struct virtqueue *vring_new_virtqueue(unsigned int index,
27512a2d1382SAndy Lutomirski 				      unsigned int num,
27522a2d1382SAndy Lutomirski 				      unsigned int vring_align,
27532a2d1382SAndy Lutomirski 				      struct virtio_device *vdev,
27542a2d1382SAndy Lutomirski 				      bool weak_barriers,
2755f94682ddSMichael S. Tsirkin 				      bool context,
27562a2d1382SAndy Lutomirski 				      void *pages,
27572a2d1382SAndy Lutomirski 				      bool (*notify)(struct virtqueue *vq),
27582a2d1382SAndy Lutomirski 				      void (*callback)(struct virtqueue *vq),
27592a2d1382SAndy Lutomirski 				      const char *name)
27602a2d1382SAndy Lutomirski {
2761cd4c812aSXuan Zhuo 	struct vring_virtqueue_split vring_split = {};
27621ce9e605STiwei Bie 
27631ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
27641ce9e605STiwei Bie 		return NULL;
27651ce9e605STiwei Bie 
2766cd4c812aSXuan Zhuo 	vring_init(&vring_split.vring, num, pages, vring_align);
2767cd4c812aSXuan Zhuo 	return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
27682713ea3cSJason Wang 				     context, notify, callback, name,
27692713ea3cSJason Wang 				     vdev->dev.parent);
27702a2d1382SAndy Lutomirski }
2771c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue);
27720a8a69ddSRusty Russell 
27733ea19e32SXuan Zhuo static void vring_free(struct virtqueue *_vq)
27740a8a69ddSRusty Russell {
27752a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
27762a2d1382SAndy Lutomirski 
27772a2d1382SAndy Lutomirski 	if (vq->we_own_ring) {
27781ce9e605STiwei Bie 		if (vq->packed_ring) {
27791ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
27801ce9e605STiwei Bie 					 vq->packed.ring_size_in_bytes,
27811ce9e605STiwei Bie 					 vq->packed.vring.desc,
27822713ea3cSJason Wang 					 vq->packed.ring_dma_addr,
27832713ea3cSJason Wang 					 vring_dma_dev(vq));
27841ce9e605STiwei Bie 
27851ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
27861ce9e605STiwei Bie 					 vq->packed.event_size_in_bytes,
27871ce9e605STiwei Bie 					 vq->packed.vring.driver,
27882713ea3cSJason Wang 					 vq->packed.driver_event_dma_addr,
27892713ea3cSJason Wang 					 vring_dma_dev(vq));
27901ce9e605STiwei Bie 
27911ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
27921ce9e605STiwei Bie 					 vq->packed.event_size_in_bytes,
27931ce9e605STiwei Bie 					 vq->packed.vring.device,
27942713ea3cSJason Wang 					 vq->packed.device_event_dma_addr,
27952713ea3cSJason Wang 					 vring_dma_dev(vq));
27961ce9e605STiwei Bie 
27971ce9e605STiwei Bie 			kfree(vq->packed.desc_state);
27981ce9e605STiwei Bie 			kfree(vq->packed.desc_extra);
27991ce9e605STiwei Bie 		} else {
2800d79dca75STiwei Bie 			vring_free_queue(vq->vq.vdev,
2801d79dca75STiwei Bie 					 vq->split.queue_size_in_bytes,
2802d79dca75STiwei Bie 					 vq->split.vring.desc,
28032713ea3cSJason Wang 					 vq->split.queue_dma_addr,
28042713ea3cSJason Wang 					 vring_dma_dev(vq));
2805f13f09a1SSuman Anna 		}
2806f13f09a1SSuman Anna 	}
280772b5e895SJason Wang 	if (!vq->packed_ring) {
2808cbeedb72STiwei Bie 		kfree(vq->split.desc_state);
280972b5e895SJason Wang 		kfree(vq->split.desc_extra);
281072b5e895SJason Wang 	}
28113ea19e32SXuan Zhuo }
28123ea19e32SXuan Zhuo 
28133ea19e32SXuan Zhuo void vring_del_virtqueue(struct virtqueue *_vq)
28143ea19e32SXuan Zhuo {
28153ea19e32SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
28163ea19e32SXuan Zhuo 
28173ea19e32SXuan Zhuo 	spin_lock(&vq->vq.vdev->vqs_list_lock);
28183ea19e32SXuan Zhuo 	list_del(&_vq->list);
28193ea19e32SXuan Zhuo 	spin_unlock(&vq->vq.vdev->vqs_list_lock);
28203ea19e32SXuan Zhuo 
28213ea19e32SXuan Zhuo 	vring_free(_vq);
28223ea19e32SXuan Zhuo 
28232a2d1382SAndy Lutomirski 	kfree(vq);
28240a8a69ddSRusty Russell }
2825c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue);
28260a8a69ddSRusty Russell 
2827af8ececdSViktor Prutyanov u32 vring_notification_data(struct virtqueue *_vq)
2828af8ececdSViktor Prutyanov {
2829af8ececdSViktor Prutyanov 	struct vring_virtqueue *vq = to_vvq(_vq);
2830af8ececdSViktor Prutyanov 	u16 next;
2831af8ececdSViktor Prutyanov 
2832af8ececdSViktor Prutyanov 	if (vq->packed_ring)
2833af8ececdSViktor Prutyanov 		next = (vq->packed.next_avail_idx &
2834af8ececdSViktor Prutyanov 				~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))) |
2835af8ececdSViktor Prutyanov 			vq->packed.avail_wrap_counter <<
2836af8ececdSViktor Prutyanov 				VRING_PACKED_EVENT_F_WRAP_CTR;
2837af8ececdSViktor Prutyanov 	else
2838af8ececdSViktor Prutyanov 		next = vq->split.avail_idx_shadow;
2839af8ececdSViktor Prutyanov 
2840af8ececdSViktor Prutyanov 	return next << 16 | _vq->index;
2841af8ececdSViktor Prutyanov }
2842af8ececdSViktor Prutyanov EXPORT_SYMBOL_GPL(vring_notification_data);
2843af8ececdSViktor Prutyanov 
2844e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */
2845e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev)
2846e34f8725SRusty Russell {
2847e34f8725SRusty Russell 	unsigned int i;
2848e34f8725SRusty Russell 
2849e34f8725SRusty Russell 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2850e34f8725SRusty Russell 		switch (i) {
28519fa29b9dSMark McLoughlin 		case VIRTIO_RING_F_INDIRECT_DESC:
28529fa29b9dSMark McLoughlin 			break;
2853a5c262c5SMichael S. Tsirkin 		case VIRTIO_RING_F_EVENT_IDX:
2854a5c262c5SMichael S. Tsirkin 			break;
2855747ae34aSMichael S. Tsirkin 		case VIRTIO_F_VERSION_1:
2856747ae34aSMichael S. Tsirkin 			break;
2857321bd212SMichael S. Tsirkin 		case VIRTIO_F_ACCESS_PLATFORM:
28581a937693SMichael S. Tsirkin 			break;
2859f959a128STiwei Bie 		case VIRTIO_F_RING_PACKED:
2860f959a128STiwei Bie 			break;
286145383fb0STiwei Bie 		case VIRTIO_F_ORDER_PLATFORM:
286245383fb0STiwei Bie 			break;
2863af8ececdSViktor Prutyanov 		case VIRTIO_F_NOTIFICATION_DATA:
2864af8ececdSViktor Prutyanov 			break;
2865e34f8725SRusty Russell 		default:
2866e34f8725SRusty Russell 			/* We don't understand this bit. */
2867e16e12beSMichael S. Tsirkin 			__virtio_clear_bit(vdev, i);
2868e34f8725SRusty Russell 		}
2869e34f8725SRusty Russell 	}
2870e34f8725SRusty Russell }
2871e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features);
2872e34f8725SRusty Russell 
28735dfc1762SRusty Russell /**
28745dfc1762SRusty Russell  * virtqueue_get_vring_size - return the size of the virtqueue's vring
2875a5581206SJiang Biao  * @_vq: the struct virtqueue containing the vring of interest.
28765dfc1762SRusty Russell  *
28775dfc1762SRusty Russell  * Returns the size of the vring.  This is mainly used for boasting to
28785dfc1762SRusty Russell  * userspace.  Unlike other operations, this need not be serialized.
28795dfc1762SRusty Russell  */
28804b6ec919SFeng Liu unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq)
28818f9f4668SRick Jones {
28828f9f4668SRick Jones 
28834b6ec919SFeng Liu 	const struct vring_virtqueue *vq = to_vvq(_vq);
28848f9f4668SRick Jones 
28851ce9e605STiwei Bie 	return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
28868f9f4668SRick Jones }
28878f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
28888f9f4668SRick Jones 
288932510631SXuan Zhuo /*
289032510631SXuan Zhuo  * This function should only be called by the core, not directly by the driver.
289132510631SXuan Zhuo  */
289232510631SXuan Zhuo void __virtqueue_break(struct virtqueue *_vq)
289332510631SXuan Zhuo {
289432510631SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
289532510631SXuan Zhuo 
289632510631SXuan Zhuo 	/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
289732510631SXuan Zhuo 	WRITE_ONCE(vq->broken, true);
289832510631SXuan Zhuo }
289932510631SXuan Zhuo EXPORT_SYMBOL_GPL(__virtqueue_break);
290032510631SXuan Zhuo 
290132510631SXuan Zhuo /*
290232510631SXuan Zhuo  * This function should only be called by the core, not directly by the driver.
290332510631SXuan Zhuo  */
290432510631SXuan Zhuo void __virtqueue_unbreak(struct virtqueue *_vq)
290532510631SXuan Zhuo {
290632510631SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
290732510631SXuan Zhuo 
290832510631SXuan Zhuo 	/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
290932510631SXuan Zhuo 	WRITE_ONCE(vq->broken, false);
291032510631SXuan Zhuo }
291132510631SXuan Zhuo EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
291232510631SXuan Zhuo 
29134b6ec919SFeng Liu bool virtqueue_is_broken(const struct virtqueue *_vq)
2914b3b32c94SHeinz Graalfs {
29154b6ec919SFeng Liu 	const struct vring_virtqueue *vq = to_vvq(_vq);
2916b3b32c94SHeinz Graalfs 
291760f07798SParav Pandit 	return READ_ONCE(vq->broken);
2918b3b32c94SHeinz Graalfs }
2919b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2920b3b32c94SHeinz Graalfs 
2921e2dcdfe9SRusty Russell /*
2922e2dcdfe9SRusty Russell  * This should prevent the device from being used, allowing drivers to
2923e2dcdfe9SRusty Russell  * recover.  You may need to grab appropriate locks to flush.
2924e2dcdfe9SRusty Russell  */
2925e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev)
2926e2dcdfe9SRusty Russell {
2927e2dcdfe9SRusty Russell 	struct virtqueue *_vq;
2928e2dcdfe9SRusty Russell 
29290e566c8fSParav Pandit 	spin_lock(&dev->vqs_list_lock);
2930e2dcdfe9SRusty Russell 	list_for_each_entry(_vq, &dev->vqs, list) {
2931e2dcdfe9SRusty Russell 		struct vring_virtqueue *vq = to_vvq(_vq);
293260f07798SParav Pandit 
293360f07798SParav Pandit 		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
293460f07798SParav Pandit 		WRITE_ONCE(vq->broken, true);
2935e2dcdfe9SRusty Russell 	}
29360e566c8fSParav Pandit 	spin_unlock(&dev->vqs_list_lock);
2937e2dcdfe9SRusty Russell }
2938e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device);
2939e2dcdfe9SRusty Russell 
2940be83f04dSJason Wang /*
2941be83f04dSJason Wang  * This should allow the device to be used by the driver. You may
2942be83f04dSJason Wang  * need to grab appropriate locks to flush the write to
2943be83f04dSJason Wang  * vq->broken. This should only be used in some specific case e.g
2944be83f04dSJason Wang  * (probing and restoring). This function should only be called by the
2945be83f04dSJason Wang  * core, not directly by the driver.
2946be83f04dSJason Wang  */
2947be83f04dSJason Wang void __virtio_unbreak_device(struct virtio_device *dev)
2948be83f04dSJason Wang {
2949be83f04dSJason Wang 	struct virtqueue *_vq;
2950be83f04dSJason Wang 
2951be83f04dSJason Wang 	spin_lock(&dev->vqs_list_lock);
2952be83f04dSJason Wang 	list_for_each_entry(_vq, &dev->vqs, list) {
2953be83f04dSJason Wang 		struct vring_virtqueue *vq = to_vvq(_vq);
2954be83f04dSJason Wang 
2955be83f04dSJason Wang 		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2956be83f04dSJason Wang 		WRITE_ONCE(vq->broken, false);
2957be83f04dSJason Wang 	}
2958be83f04dSJason Wang 	spin_unlock(&dev->vqs_list_lock);
2959be83f04dSJason Wang }
2960be83f04dSJason Wang EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
2961be83f04dSJason Wang 
29624b6ec919SFeng Liu dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq)
296389062652SCornelia Huck {
29644b6ec919SFeng Liu 	const struct vring_virtqueue *vq = to_vvq(_vq);
296589062652SCornelia Huck 
29662a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
296789062652SCornelia Huck 
29681ce9e605STiwei Bie 	if (vq->packed_ring)
29691ce9e605STiwei Bie 		return vq->packed.ring_dma_addr;
29701ce9e605STiwei Bie 
2971d79dca75STiwei Bie 	return vq->split.queue_dma_addr;
29722a2d1382SAndy Lutomirski }
29732a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
29742a2d1382SAndy Lutomirski 
29754b6ec919SFeng Liu dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *_vq)
297689062652SCornelia Huck {
29774b6ec919SFeng Liu 	const struct vring_virtqueue *vq = to_vvq(_vq);
297889062652SCornelia Huck 
29792a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
29802a2d1382SAndy Lutomirski 
29811ce9e605STiwei Bie 	if (vq->packed_ring)
29821ce9e605STiwei Bie 		return vq->packed.driver_event_dma_addr;
29831ce9e605STiwei Bie 
2984d79dca75STiwei Bie 	return vq->split.queue_dma_addr +
2985e593bf97STiwei Bie 		((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
298689062652SCornelia Huck }
29872a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
29882a2d1382SAndy Lutomirski 
29894b6ec919SFeng Liu dma_addr_t virtqueue_get_used_addr(const struct virtqueue *_vq)
29902a2d1382SAndy Lutomirski {
29914b6ec919SFeng Liu 	const struct vring_virtqueue *vq = to_vvq(_vq);
29922a2d1382SAndy Lutomirski 
29932a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
29942a2d1382SAndy Lutomirski 
29951ce9e605STiwei Bie 	if (vq->packed_ring)
29961ce9e605STiwei Bie 		return vq->packed.device_event_dma_addr;
29971ce9e605STiwei Bie 
2998d79dca75STiwei Bie 	return vq->split.queue_dma_addr +
2999e593bf97STiwei Bie 		((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
30002a2d1382SAndy Lutomirski }
30012a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
30022a2d1382SAndy Lutomirski 
30031ce9e605STiwei Bie /* Only available for split ring */
30044b6ec919SFeng Liu const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
30052a2d1382SAndy Lutomirski {
3006e593bf97STiwei Bie 	return &to_vvq(vq)->split.vring;
30072a2d1382SAndy Lutomirski }
30082a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_vring);
300989062652SCornelia Huck 
3010c6fd4701SRusty Russell MODULE_LICENSE("GPL");
3011