xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision 07d9629d49584b6f79faa6158cd7aef7e6919703)
1fd534e9bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
20a8a69ddSRusty Russell /* Virtio ring implementation.
30a8a69ddSRusty Russell  *
40a8a69ddSRusty Russell  *  Copyright 2007 Rusty Russell IBM Corporation
50a8a69ddSRusty Russell  */
60a8a69ddSRusty Russell #include <linux/virtio.h>
70a8a69ddSRusty Russell #include <linux/virtio_ring.h>
8e34f8725SRusty Russell #include <linux/virtio_config.h>
90a8a69ddSRusty Russell #include <linux/device.h>
105a0e3ad6STejun Heo #include <linux/slab.h>
11b5a2c4f1SPaul Gortmaker #include <linux/module.h>
12e93300b1SRusty Russell #include <linux/hrtimer.h>
13780bc790SAndy Lutomirski #include <linux/dma-mapping.h>
14f8ce7263SMichael S. Tsirkin #include <linux/spinlock.h>
1578fe3987SAndy Lutomirski #include <xen/xen.h>
160a8a69ddSRusty Russell 
170a8a69ddSRusty Russell #ifdef DEBUG
180a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */
199499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
209499f5e7SRusty Russell 	do {							\
219499f5e7SRusty Russell 		dev_err(&(_vq)->vq.vdev->dev,			\
229499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
239499f5e7SRusty Russell 		BUG();						\
249499f5e7SRusty Russell 	} while (0)
25c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */
263a35ce7dSRoel Kluin #define START_USE(_vq)						\
27c5f841f1SRusty Russell 	do {							\
28c5f841f1SRusty Russell 		if ((_vq)->in_use)				\
299499f5e7SRusty Russell 			panic("%s:in_use = %i\n",		\
309499f5e7SRusty Russell 			      (_vq)->vq.name, (_vq)->in_use);	\
31c5f841f1SRusty Russell 		(_vq)->in_use = __LINE__;			\
32c5f841f1SRusty Russell 	} while (0)
333a35ce7dSRoel Kluin #define END_USE(_vq) \
3497a545abSRusty Russell 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
354d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(_vq)				\
364d6a105eSTiwei Bie 	do {							\
374d6a105eSTiwei Bie 		ktime_t now = ktime_get();			\
384d6a105eSTiwei Bie 								\
394d6a105eSTiwei Bie 		/* No kick or get, with .1 second between?  Warn. */ \
404d6a105eSTiwei Bie 		if ((_vq)->last_add_time_valid)			\
414d6a105eSTiwei Bie 			WARN_ON(ktime_to_ms(ktime_sub(now,	\
424d6a105eSTiwei Bie 				(_vq)->last_add_time)) > 100);	\
434d6a105eSTiwei Bie 		(_vq)->last_add_time = now;			\
444d6a105eSTiwei Bie 		(_vq)->last_add_time_valid = true;		\
454d6a105eSTiwei Bie 	} while (0)
464d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(_vq)				\
474d6a105eSTiwei Bie 	do {							\
484d6a105eSTiwei Bie 		if ((_vq)->last_add_time_valid) {		\
494d6a105eSTiwei Bie 			WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
504d6a105eSTiwei Bie 				      (_vq)->last_add_time)) > 100); \
514d6a105eSTiwei Bie 		}						\
524d6a105eSTiwei Bie 	} while (0)
534d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(_vq)				\
544d6a105eSTiwei Bie 	((_vq)->last_add_time_valid = false)
550a8a69ddSRusty Russell #else
569499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
579499f5e7SRusty Russell 	do {							\
589499f5e7SRusty Russell 		dev_err(&_vq->vq.vdev->dev,			\
599499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
609499f5e7SRusty Russell 		(_vq)->broken = true;				\
619499f5e7SRusty Russell 	} while (0)
620a8a69ddSRusty Russell #define START_USE(vq)
630a8a69ddSRusty Russell #define END_USE(vq)
644d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(vq)
654d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(vq)
664d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(vq)
670a8a69ddSRusty Russell #endif
680a8a69ddSRusty Russell 
69cbeedb72STiwei Bie struct vring_desc_state_split {
70780bc790SAndy Lutomirski 	void *data;			/* Data for callback. */
71780bc790SAndy Lutomirski 	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
72780bc790SAndy Lutomirski };
73780bc790SAndy Lutomirski 
741ce9e605STiwei Bie struct vring_desc_state_packed {
751ce9e605STiwei Bie 	void *data;			/* Data for callback. */
761ce9e605STiwei Bie 	struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
771ce9e605STiwei Bie 	u16 num;			/* Descriptor list length. */
781ce9e605STiwei Bie 	u16 last;			/* The last desc state in a list. */
791ce9e605STiwei Bie };
801ce9e605STiwei Bie 
811f28750fSJason Wang struct vring_desc_extra {
82ef5c366fSJason Wang 	dma_addr_t addr;		/* Descriptor DMA addr. */
83ef5c366fSJason Wang 	u32 len;			/* Descriptor length. */
841ce9e605STiwei Bie 	u16 flags;			/* Descriptor flags. */
85aeef9b47SJason Wang 	u16 next;			/* The next desc state in a list. */
861ce9e605STiwei Bie };
871ce9e605STiwei Bie 
88d76136e4SXuan Zhuo struct vring_virtqueue_split {
89d76136e4SXuan Zhuo 	/* Actual memory layout for this queue. */
90d76136e4SXuan Zhuo 	struct vring vring;
91d76136e4SXuan Zhuo 
92d76136e4SXuan Zhuo 	/* Last written value to avail->flags */
93d76136e4SXuan Zhuo 	u16 avail_flags_shadow;
94d76136e4SXuan Zhuo 
95d76136e4SXuan Zhuo 	/*
96d76136e4SXuan Zhuo 	 * Last written value to avail->idx in
97d76136e4SXuan Zhuo 	 * guest byte order.
98d76136e4SXuan Zhuo 	 */
99d76136e4SXuan Zhuo 	u16 avail_idx_shadow;
100d76136e4SXuan Zhuo 
101d76136e4SXuan Zhuo 	/* Per-descriptor state. */
102d76136e4SXuan Zhuo 	struct vring_desc_state_split *desc_state;
103d76136e4SXuan Zhuo 	struct vring_desc_extra *desc_extra;
104d76136e4SXuan Zhuo 
105d76136e4SXuan Zhuo 	/* DMA address and size information */
106d76136e4SXuan Zhuo 	dma_addr_t queue_dma_addr;
107d76136e4SXuan Zhuo 	size_t queue_size_in_bytes;
108d76136e4SXuan Zhuo };
109d76136e4SXuan Zhuo 
110d76136e4SXuan Zhuo struct vring_virtqueue_packed {
111d76136e4SXuan Zhuo 	/* Actual memory layout for this queue. */
112d76136e4SXuan Zhuo 	struct {
113d76136e4SXuan Zhuo 		unsigned int num;
114d76136e4SXuan Zhuo 		struct vring_packed_desc *desc;
115d76136e4SXuan Zhuo 		struct vring_packed_desc_event *driver;
116d76136e4SXuan Zhuo 		struct vring_packed_desc_event *device;
117d76136e4SXuan Zhuo 	} vring;
118d76136e4SXuan Zhuo 
119d76136e4SXuan Zhuo 	/* Driver ring wrap counter. */
120d76136e4SXuan Zhuo 	bool avail_wrap_counter;
121d76136e4SXuan Zhuo 
122d76136e4SXuan Zhuo 	/* Avail used flags. */
123d76136e4SXuan Zhuo 	u16 avail_used_flags;
124d76136e4SXuan Zhuo 
125d76136e4SXuan Zhuo 	/* Index of the next avail descriptor. */
126d76136e4SXuan Zhuo 	u16 next_avail_idx;
127d76136e4SXuan Zhuo 
128d76136e4SXuan Zhuo 	/*
129d76136e4SXuan Zhuo 	 * Last written value to driver->flags in
130d76136e4SXuan Zhuo 	 * guest byte order.
131d76136e4SXuan Zhuo 	 */
132d76136e4SXuan Zhuo 	u16 event_flags_shadow;
133d76136e4SXuan Zhuo 
134d76136e4SXuan Zhuo 	/* Per-descriptor state. */
135d76136e4SXuan Zhuo 	struct vring_desc_state_packed *desc_state;
136d76136e4SXuan Zhuo 	struct vring_desc_extra *desc_extra;
137d76136e4SXuan Zhuo 
138d76136e4SXuan Zhuo 	/* DMA address and size information */
139d76136e4SXuan Zhuo 	dma_addr_t ring_dma_addr;
140d76136e4SXuan Zhuo 	dma_addr_t driver_event_dma_addr;
141d76136e4SXuan Zhuo 	dma_addr_t device_event_dma_addr;
142d76136e4SXuan Zhuo 	size_t ring_size_in_bytes;
143d76136e4SXuan Zhuo 	size_t event_size_in_bytes;
144d76136e4SXuan Zhuo };
145d76136e4SXuan Zhuo 
14643b4f721SMichael S. Tsirkin struct vring_virtqueue {
1470a8a69ddSRusty Russell 	struct virtqueue vq;
1480a8a69ddSRusty Russell 
1491ce9e605STiwei Bie 	/* Is this a packed ring? */
1501ce9e605STiwei Bie 	bool packed_ring;
1511ce9e605STiwei Bie 
152fb3fba6bSTiwei Bie 	/* Is DMA API used? */
153fb3fba6bSTiwei Bie 	bool use_dma_api;
154fb3fba6bSTiwei Bie 
1557b21e34fSRusty Russell 	/* Can we use weak barriers? */
1567b21e34fSRusty Russell 	bool weak_barriers;
1577b21e34fSRusty Russell 
1580a8a69ddSRusty Russell 	/* Other side has made a mess, don't try any more. */
1590a8a69ddSRusty Russell 	bool broken;
1600a8a69ddSRusty Russell 
1619fa29b9dSMark McLoughlin 	/* Host supports indirect buffers */
1629fa29b9dSMark McLoughlin 	bool indirect;
1639fa29b9dSMark McLoughlin 
164a5c262c5SMichael S. Tsirkin 	/* Host publishes avail event idx */
165a5c262c5SMichael S. Tsirkin 	bool event;
166a5c262c5SMichael S. Tsirkin 
1670a8a69ddSRusty Russell 	/* Head of free buffer list. */
1680a8a69ddSRusty Russell 	unsigned int free_head;
1690a8a69ddSRusty Russell 	/* Number we've added since last sync. */
1700a8a69ddSRusty Russell 	unsigned int num_added;
1710a8a69ddSRusty Russell 
172a7722890Shuangjie.albert 	/* Last used index  we've seen.
173a7722890Shuangjie.albert 	 * for split ring, it just contains last used index
174a7722890Shuangjie.albert 	 * for packed ring:
175a7722890Shuangjie.albert 	 * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index.
176a7722890Shuangjie.albert 	 * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter.
177a7722890Shuangjie.albert 	 */
1781bc4953eSAnthony Liguori 	u16 last_used_idx;
1790a8a69ddSRusty Russell 
1808d622d21SMichael S. Tsirkin 	/* Hint for event idx: already triggered no need to disable. */
1818d622d21SMichael S. Tsirkin 	bool event_triggered;
1828d622d21SMichael S. Tsirkin 
1831ce9e605STiwei Bie 	union {
1841ce9e605STiwei Bie 		/* Available for split ring */
185d76136e4SXuan Zhuo 		struct vring_virtqueue_split split;
186f277ec42SVenkatesh Srinivas 
1871ce9e605STiwei Bie 		/* Available for packed ring */
188d76136e4SXuan Zhuo 		struct vring_virtqueue_packed packed;
1891ce9e605STiwei Bie 	};
1901ce9e605STiwei Bie 
1910a8a69ddSRusty Russell 	/* How to notify other side. FIXME: commonalize hcalls! */
19246f9c2b9SHeinz Graalfs 	bool (*notify)(struct virtqueue *vq);
1930a8a69ddSRusty Russell 
1942a2d1382SAndy Lutomirski 	/* DMA, allocation, and size information */
1952a2d1382SAndy Lutomirski 	bool we_own_ring;
1962a2d1382SAndy Lutomirski 
1970a8a69ddSRusty Russell #ifdef DEBUG
1980a8a69ddSRusty Russell 	/* They're supposed to lock for us. */
1990a8a69ddSRusty Russell 	unsigned int in_use;
200e93300b1SRusty Russell 
201e93300b1SRusty Russell 	/* Figure out if their kicks are too delayed. */
202e93300b1SRusty Russell 	bool last_add_time_valid;
203e93300b1SRusty Russell 	ktime_t last_add_time;
2040a8a69ddSRusty Russell #endif
2050a8a69ddSRusty Russell };
2060a8a69ddSRusty Russell 
207*07d9629dSXuan Zhuo static struct virtqueue *__vring_new_virtqueue(unsigned int index,
208*07d9629dSXuan Zhuo 					       struct vring vring,
209*07d9629dSXuan Zhuo 					       struct virtio_device *vdev,
210*07d9629dSXuan Zhuo 					       bool weak_barriers,
211*07d9629dSXuan Zhuo 					       bool context,
212*07d9629dSXuan Zhuo 					       bool (*notify)(struct virtqueue *),
213*07d9629dSXuan Zhuo 					       void (*callback)(struct virtqueue *),
214*07d9629dSXuan Zhuo 					       const char *name);
215e6f633e5STiwei Bie 
216e6f633e5STiwei Bie /*
217e6f633e5STiwei Bie  * Helpers.
218e6f633e5STiwei Bie  */
219e6f633e5STiwei Bie 
2200a8a69ddSRusty Russell #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
2210a8a69ddSRusty Russell 
22235c51e09SXianting Tian static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
2232f18c2d1STiwei Bie 					  unsigned int total_sg)
2242f18c2d1STiwei Bie {
2252f18c2d1STiwei Bie 	/*
2262f18c2d1STiwei Bie 	 * If the host supports indirect descriptor tables, and we have multiple
2272f18c2d1STiwei Bie 	 * buffers, then go indirect. FIXME: tune this threshold
2282f18c2d1STiwei Bie 	 */
2292f18c2d1STiwei Bie 	return (vq->indirect && total_sg > 1 && vq->vq.num_free);
2302f18c2d1STiwei Bie }
2312f18c2d1STiwei Bie 
232d26c96c8SAndy Lutomirski /*
2331a937693SMichael S. Tsirkin  * Modern virtio devices have feature bits to specify whether they need a
2341a937693SMichael S. Tsirkin  * quirk and bypass the IOMMU. If not there, just use the DMA API.
2351a937693SMichael S. Tsirkin  *
2361a937693SMichael S. Tsirkin  * If there, the interaction between virtio and DMA API is messy.
237d26c96c8SAndy Lutomirski  *
238d26c96c8SAndy Lutomirski  * On most systems with virtio, physical addresses match bus addresses,
239d26c96c8SAndy Lutomirski  * and it doesn't particularly matter whether we use the DMA API.
240d26c96c8SAndy Lutomirski  *
241d26c96c8SAndy Lutomirski  * On some systems, including Xen and any system with a physical device
242d26c96c8SAndy Lutomirski  * that speaks virtio behind a physical IOMMU, we must use the DMA API
243d26c96c8SAndy Lutomirski  * for virtio DMA to work at all.
244d26c96c8SAndy Lutomirski  *
245d26c96c8SAndy Lutomirski  * On other systems, including SPARC and PPC64, virtio-pci devices are
246d26c96c8SAndy Lutomirski  * enumerated as though they are behind an IOMMU, but the virtio host
247d26c96c8SAndy Lutomirski  * ignores the IOMMU, so we must either pretend that the IOMMU isn't
248d26c96c8SAndy Lutomirski  * there or somehow map everything as the identity.
249d26c96c8SAndy Lutomirski  *
250d26c96c8SAndy Lutomirski  * For the time being, we preserve historic behavior and bypass the DMA
251d26c96c8SAndy Lutomirski  * API.
2521a937693SMichael S. Tsirkin  *
2531a937693SMichael S. Tsirkin  * TODO: install a per-device DMA ops structure that does the right thing
2541a937693SMichael S. Tsirkin  * taking into account all the above quirks, and use the DMA API
2551a937693SMichael S. Tsirkin  * unconditionally on data path.
256d26c96c8SAndy Lutomirski  */
257d26c96c8SAndy Lutomirski 
258d26c96c8SAndy Lutomirski static bool vring_use_dma_api(struct virtio_device *vdev)
259d26c96c8SAndy Lutomirski {
26024b6842aSMichael S. Tsirkin 	if (!virtio_has_dma_quirk(vdev))
2611a937693SMichael S. Tsirkin 		return true;
2621a937693SMichael S. Tsirkin 
2631a937693SMichael S. Tsirkin 	/* Otherwise, we are left to guess. */
26478fe3987SAndy Lutomirski 	/*
26578fe3987SAndy Lutomirski 	 * In theory, it's possible to have a buggy QEMU-supposed
26678fe3987SAndy Lutomirski 	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
26778fe3987SAndy Lutomirski 	 * such a configuration, virtio has never worked and will
26878fe3987SAndy Lutomirski 	 * not work without an even larger kludge.  Instead, enable
26978fe3987SAndy Lutomirski 	 * the DMA API if we're a Xen guest, which at least allows
27078fe3987SAndy Lutomirski 	 * all of the sensible Xen configurations to work correctly.
27178fe3987SAndy Lutomirski 	 */
27278fe3987SAndy Lutomirski 	if (xen_domain())
27378fe3987SAndy Lutomirski 		return true;
27478fe3987SAndy Lutomirski 
275d26c96c8SAndy Lutomirski 	return false;
276d26c96c8SAndy Lutomirski }
277d26c96c8SAndy Lutomirski 
278e6d6dd6cSJoerg Roedel size_t virtio_max_dma_size(struct virtio_device *vdev)
279e6d6dd6cSJoerg Roedel {
280e6d6dd6cSJoerg Roedel 	size_t max_segment_size = SIZE_MAX;
281e6d6dd6cSJoerg Roedel 
282e6d6dd6cSJoerg Roedel 	if (vring_use_dma_api(vdev))
283817fc978SWill Deacon 		max_segment_size = dma_max_mapping_size(vdev->dev.parent);
284e6d6dd6cSJoerg Roedel 
285e6d6dd6cSJoerg Roedel 	return max_segment_size;
286e6d6dd6cSJoerg Roedel }
287e6d6dd6cSJoerg Roedel EXPORT_SYMBOL_GPL(virtio_max_dma_size);
288e6d6dd6cSJoerg Roedel 
289d79dca75STiwei Bie static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
290d79dca75STiwei Bie 			      dma_addr_t *dma_handle, gfp_t flag)
291d79dca75STiwei Bie {
292d79dca75STiwei Bie 	if (vring_use_dma_api(vdev)) {
293d79dca75STiwei Bie 		return dma_alloc_coherent(vdev->dev.parent, size,
294d79dca75STiwei Bie 					  dma_handle, flag);
295d79dca75STiwei Bie 	} else {
296d79dca75STiwei Bie 		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
297d79dca75STiwei Bie 
298d79dca75STiwei Bie 		if (queue) {
299d79dca75STiwei Bie 			phys_addr_t phys_addr = virt_to_phys(queue);
300d79dca75STiwei Bie 			*dma_handle = (dma_addr_t)phys_addr;
301d79dca75STiwei Bie 
302d79dca75STiwei Bie 			/*
303d79dca75STiwei Bie 			 * Sanity check: make sure we dind't truncate
304d79dca75STiwei Bie 			 * the address.  The only arches I can find that
305d79dca75STiwei Bie 			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
306d79dca75STiwei Bie 			 * are certain non-highmem MIPS and x86
307d79dca75STiwei Bie 			 * configurations, but these configurations
308d79dca75STiwei Bie 			 * should never allocate physical pages above 32
309d79dca75STiwei Bie 			 * bits, so this is fine.  Just in case, throw a
310d79dca75STiwei Bie 			 * warning and abort if we end up with an
311d79dca75STiwei Bie 			 * unrepresentable address.
312d79dca75STiwei Bie 			 */
313d79dca75STiwei Bie 			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
314d79dca75STiwei Bie 				free_pages_exact(queue, PAGE_ALIGN(size));
315d79dca75STiwei Bie 				return NULL;
316d79dca75STiwei Bie 			}
317d79dca75STiwei Bie 		}
318d79dca75STiwei Bie 		return queue;
319d79dca75STiwei Bie 	}
320d79dca75STiwei Bie }
321d79dca75STiwei Bie 
322d79dca75STiwei Bie static void vring_free_queue(struct virtio_device *vdev, size_t size,
323d79dca75STiwei Bie 			     void *queue, dma_addr_t dma_handle)
324d79dca75STiwei Bie {
325d79dca75STiwei Bie 	if (vring_use_dma_api(vdev))
326d79dca75STiwei Bie 		dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
327d79dca75STiwei Bie 	else
328d79dca75STiwei Bie 		free_pages_exact(queue, PAGE_ALIGN(size));
329d79dca75STiwei Bie }
330d79dca75STiwei Bie 
331780bc790SAndy Lutomirski /*
332780bc790SAndy Lutomirski  * The DMA ops on various arches are rather gnarly right now, and
333780bc790SAndy Lutomirski  * making all of the arch DMA ops work on the vring device itself
334780bc790SAndy Lutomirski  * is a mess.  For now, we use the parent device for DMA ops.
335780bc790SAndy Lutomirski  */
33675bfa81bSMichael S. Tsirkin static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
337780bc790SAndy Lutomirski {
338780bc790SAndy Lutomirski 	return vq->vq.vdev->dev.parent;
339780bc790SAndy Lutomirski }
340780bc790SAndy Lutomirski 
341780bc790SAndy Lutomirski /* Map one sg entry. */
342780bc790SAndy Lutomirski static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
343780bc790SAndy Lutomirski 				   struct scatterlist *sg,
344780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
345780bc790SAndy Lutomirski {
346fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
347780bc790SAndy Lutomirski 		return (dma_addr_t)sg_phys(sg);
348780bc790SAndy Lutomirski 
349780bc790SAndy Lutomirski 	/*
350780bc790SAndy Lutomirski 	 * We can't use dma_map_sg, because we don't use scatterlists in
351780bc790SAndy Lutomirski 	 * the way it expects (we don't guarantee that the scatterlist
352780bc790SAndy Lutomirski 	 * will exist for the lifetime of the mapping).
353780bc790SAndy Lutomirski 	 */
354780bc790SAndy Lutomirski 	return dma_map_page(vring_dma_dev(vq),
355780bc790SAndy Lutomirski 			    sg_page(sg), sg->offset, sg->length,
356780bc790SAndy Lutomirski 			    direction);
357780bc790SAndy Lutomirski }
358780bc790SAndy Lutomirski 
359780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
360780bc790SAndy Lutomirski 				   void *cpu_addr, size_t size,
361780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
362780bc790SAndy Lutomirski {
363fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
364780bc790SAndy Lutomirski 		return (dma_addr_t)virt_to_phys(cpu_addr);
365780bc790SAndy Lutomirski 
366780bc790SAndy Lutomirski 	return dma_map_single(vring_dma_dev(vq),
367780bc790SAndy Lutomirski 			      cpu_addr, size, direction);
368780bc790SAndy Lutomirski }
369780bc790SAndy Lutomirski 
370e6f633e5STiwei Bie static int vring_mapping_error(const struct vring_virtqueue *vq,
371e6f633e5STiwei Bie 			       dma_addr_t addr)
372e6f633e5STiwei Bie {
373fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
374e6f633e5STiwei Bie 		return 0;
375e6f633e5STiwei Bie 
376e6f633e5STiwei Bie 	return dma_mapping_error(vring_dma_dev(vq), addr);
377e6f633e5STiwei Bie }
378e6f633e5STiwei Bie 
3793a897128SXuan Zhuo static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
3803a897128SXuan Zhuo {
3813a897128SXuan Zhuo 	vq->vq.num_free = num;
3823a897128SXuan Zhuo 
3833a897128SXuan Zhuo 	if (vq->packed_ring)
3843a897128SXuan Zhuo 		vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
3853a897128SXuan Zhuo 	else
3863a897128SXuan Zhuo 		vq->last_used_idx = 0;
3873a897128SXuan Zhuo 
3883a897128SXuan Zhuo 	vq->event_triggered = false;
3893a897128SXuan Zhuo 	vq->num_added = 0;
3903a897128SXuan Zhuo 
3913a897128SXuan Zhuo #ifdef DEBUG
3923a897128SXuan Zhuo 	vq->in_use = false;
3933a897128SXuan Zhuo 	vq->last_add_time_valid = false;
3943a897128SXuan Zhuo #endif
3953a897128SXuan Zhuo }
3963a897128SXuan Zhuo 
397e6f633e5STiwei Bie 
398e6f633e5STiwei Bie /*
399e6f633e5STiwei Bie  * Split ring specific functions - *_split().
400e6f633e5STiwei Bie  */
401e6f633e5STiwei Bie 
40272b5e895SJason Wang static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
403780bc790SAndy Lutomirski 					   struct vring_desc *desc)
404780bc790SAndy Lutomirski {
405780bc790SAndy Lutomirski 	u16 flags;
406780bc790SAndy Lutomirski 
407fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
408780bc790SAndy Lutomirski 		return;
409780bc790SAndy Lutomirski 
410780bc790SAndy Lutomirski 	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
411780bc790SAndy Lutomirski 
412780bc790SAndy Lutomirski 	dma_unmap_page(vring_dma_dev(vq),
413780bc790SAndy Lutomirski 		       virtio64_to_cpu(vq->vq.vdev, desc->addr),
414780bc790SAndy Lutomirski 		       virtio32_to_cpu(vq->vq.vdev, desc->len),
415780bc790SAndy Lutomirski 		       (flags & VRING_DESC_F_WRITE) ?
416780bc790SAndy Lutomirski 		       DMA_FROM_DEVICE : DMA_TO_DEVICE);
417780bc790SAndy Lutomirski }
418780bc790SAndy Lutomirski 
41972b5e895SJason Wang static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
42072b5e895SJason Wang 					  unsigned int i)
42172b5e895SJason Wang {
42272b5e895SJason Wang 	struct vring_desc_extra *extra = vq->split.desc_extra;
42372b5e895SJason Wang 	u16 flags;
42472b5e895SJason Wang 
42572b5e895SJason Wang 	if (!vq->use_dma_api)
42672b5e895SJason Wang 		goto out;
42772b5e895SJason Wang 
42872b5e895SJason Wang 	flags = extra[i].flags;
42972b5e895SJason Wang 
43072b5e895SJason Wang 	if (flags & VRING_DESC_F_INDIRECT) {
43172b5e895SJason Wang 		dma_unmap_single(vring_dma_dev(vq),
43272b5e895SJason Wang 				 extra[i].addr,
43372b5e895SJason Wang 				 extra[i].len,
43472b5e895SJason Wang 				 (flags & VRING_DESC_F_WRITE) ?
43572b5e895SJason Wang 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
43672b5e895SJason Wang 	} else {
43772b5e895SJason Wang 		dma_unmap_page(vring_dma_dev(vq),
43872b5e895SJason Wang 			       extra[i].addr,
43972b5e895SJason Wang 			       extra[i].len,
44072b5e895SJason Wang 			       (flags & VRING_DESC_F_WRITE) ?
44172b5e895SJason Wang 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
44272b5e895SJason Wang 	}
44372b5e895SJason Wang 
44472b5e895SJason Wang out:
44572b5e895SJason Wang 	return extra[i].next;
44672b5e895SJason Wang }
44772b5e895SJason Wang 
448138fd251STiwei Bie static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
449138fd251STiwei Bie 					       unsigned int total_sg,
450138fd251STiwei Bie 					       gfp_t gfp)
4519fa29b9dSMark McLoughlin {
4529fa29b9dSMark McLoughlin 	struct vring_desc *desc;
453b25bd251SRusty Russell 	unsigned int i;
4549fa29b9dSMark McLoughlin 
455b92b1b89SWill Deacon 	/*
456b92b1b89SWill Deacon 	 * We require lowmem mappings for the descriptors because
457b92b1b89SWill Deacon 	 * otherwise virt_to_phys will give us bogus addresses in the
458b92b1b89SWill Deacon 	 * virtqueue.
459b92b1b89SWill Deacon 	 */
46082107539SMichal Hocko 	gfp &= ~__GFP_HIGHMEM;
461b92b1b89SWill Deacon 
4626da2ec56SKees Cook 	desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
4639fa29b9dSMark McLoughlin 	if (!desc)
464b25bd251SRusty Russell 		return NULL;
4659fa29b9dSMark McLoughlin 
466b25bd251SRusty Russell 	for (i = 0; i < total_sg; i++)
46700e6f3d9SMichael S. Tsirkin 		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
468b25bd251SRusty Russell 	return desc;
4699fa29b9dSMark McLoughlin }
4709fa29b9dSMark McLoughlin 
471fe4c3862SJason Wang static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
472fe4c3862SJason Wang 						    struct vring_desc *desc,
473fe4c3862SJason Wang 						    unsigned int i,
474fe4c3862SJason Wang 						    dma_addr_t addr,
475fe4c3862SJason Wang 						    unsigned int len,
47672b5e895SJason Wang 						    u16 flags,
47772b5e895SJason Wang 						    bool indirect)
478fe4c3862SJason Wang {
47972b5e895SJason Wang 	struct vring_virtqueue *vring = to_vvq(vq);
48072b5e895SJason Wang 	struct vring_desc_extra *extra = vring->split.desc_extra;
48172b5e895SJason Wang 	u16 next;
48272b5e895SJason Wang 
483fe4c3862SJason Wang 	desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
484fe4c3862SJason Wang 	desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
485fe4c3862SJason Wang 	desc[i].len = cpu_to_virtio32(vq->vdev, len);
486fe4c3862SJason Wang 
48772b5e895SJason Wang 	if (!indirect) {
48872b5e895SJason Wang 		next = extra[i].next;
48972b5e895SJason Wang 		desc[i].next = cpu_to_virtio16(vq->vdev, next);
49072b5e895SJason Wang 
49172b5e895SJason Wang 		extra[i].addr = addr;
49272b5e895SJason Wang 		extra[i].len = len;
49372b5e895SJason Wang 		extra[i].flags = flags;
49472b5e895SJason Wang 	} else
49572b5e895SJason Wang 		next = virtio16_to_cpu(vq->vdev, desc[i].next);
49672b5e895SJason Wang 
49772b5e895SJason Wang 	return next;
498fe4c3862SJason Wang }
499fe4c3862SJason Wang 
500138fd251STiwei Bie static inline int virtqueue_add_split(struct virtqueue *_vq,
50113816c76SRusty Russell 				      struct scatterlist *sgs[],
502eeebf9b1SRusty Russell 				      unsigned int total_sg,
50313816c76SRusty Russell 				      unsigned int out_sgs,
50413816c76SRusty Russell 				      unsigned int in_sgs,
505bbd603efSMichael S. Tsirkin 				      void *data,
5065a08b04fSMichael S. Tsirkin 				      void *ctx,
507bbd603efSMichael S. Tsirkin 				      gfp_t gfp)
5080a8a69ddSRusty Russell {
5090a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
51013816c76SRusty Russell 	struct scatterlist *sg;
511b25bd251SRusty Russell 	struct vring_desc *desc;
5123f649ab7SKees Cook 	unsigned int i, n, avail, descs_used, prev, err_idx;
5131fe9b6feSMichael S. Tsirkin 	int head;
514b25bd251SRusty Russell 	bool indirect;
5150a8a69ddSRusty Russell 
5169fa29b9dSMark McLoughlin 	START_USE(vq);
5179fa29b9dSMark McLoughlin 
5180a8a69ddSRusty Russell 	BUG_ON(data == NULL);
5195a08b04fSMichael S. Tsirkin 	BUG_ON(ctx && vq->indirect);
5209fa29b9dSMark McLoughlin 
52170670444SRusty Russell 	if (unlikely(vq->broken)) {
52270670444SRusty Russell 		END_USE(vq);
52370670444SRusty Russell 		return -EIO;
52470670444SRusty Russell 	}
52570670444SRusty Russell 
5264d6a105eSTiwei Bie 	LAST_ADD_TIME_UPDATE(vq);
527e93300b1SRusty Russell 
52813816c76SRusty Russell 	BUG_ON(total_sg == 0);
5290a8a69ddSRusty Russell 
530b25bd251SRusty Russell 	head = vq->free_head;
531b25bd251SRusty Russell 
53235c51e09SXianting Tian 	if (virtqueue_use_indirect(vq, total_sg))
533138fd251STiwei Bie 		desc = alloc_indirect_split(_vq, total_sg, gfp);
53444ed8089SRichard W.M. Jones 	else {
535b25bd251SRusty Russell 		desc = NULL;
536e593bf97STiwei Bie 		WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
53744ed8089SRichard W.M. Jones 	}
538b25bd251SRusty Russell 
539b25bd251SRusty Russell 	if (desc) {
540b25bd251SRusty Russell 		/* Use a single buffer which doesn't continue */
541780bc790SAndy Lutomirski 		indirect = true;
542b25bd251SRusty Russell 		/* Set up rest to use this indirect table. */
543b25bd251SRusty Russell 		i = 0;
544b25bd251SRusty Russell 		descs_used = 1;
545b25bd251SRusty Russell 	} else {
546780bc790SAndy Lutomirski 		indirect = false;
547e593bf97STiwei Bie 		desc = vq->split.vring.desc;
548b25bd251SRusty Russell 		i = head;
549b25bd251SRusty Russell 		descs_used = total_sg;
550b25bd251SRusty Russell 	}
551b25bd251SRusty Russell 
552b4b4ff73SXianting Tian 	if (unlikely(vq->vq.num_free < descs_used)) {
5530a8a69ddSRusty Russell 		pr_debug("Can't add buf len %i - avail = %i\n",
554b25bd251SRusty Russell 			 descs_used, vq->vq.num_free);
55544653eaeSRusty Russell 		/* FIXME: for historical reasons, we force a notify here if
55644653eaeSRusty Russell 		 * there are outgoing parts to the buffer.  Presumably the
55744653eaeSRusty Russell 		 * host should service the ring ASAP. */
55813816c76SRusty Russell 		if (out_sgs)
559426e3e0aSRusty Russell 			vq->notify(&vq->vq);
56058625edfSWei Yongjun 		if (indirect)
56158625edfSWei Yongjun 			kfree(desc);
5620a8a69ddSRusty Russell 		END_USE(vq);
5630a8a69ddSRusty Russell 		return -ENOSPC;
5640a8a69ddSRusty Russell 	}
5650a8a69ddSRusty Russell 
56613816c76SRusty Russell 	for (n = 0; n < out_sgs; n++) {
567eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
568780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
569780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
570780bc790SAndy Lutomirski 				goto unmap_release;
571780bc790SAndy Lutomirski 
5720a8a69ddSRusty Russell 			prev = i;
57372b5e895SJason Wang 			/* Note that we trust indirect descriptor
57472b5e895SJason Wang 			 * table since it use stream DMA mapping.
57572b5e895SJason Wang 			 */
576fe4c3862SJason Wang 			i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
57772b5e895SJason Wang 						     VRING_DESC_F_NEXT,
57872b5e895SJason Wang 						     indirect);
5790a8a69ddSRusty Russell 		}
58013816c76SRusty Russell 	}
58113816c76SRusty Russell 	for (; n < (out_sgs + in_sgs); n++) {
582eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
583780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
584780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
585780bc790SAndy Lutomirski 				goto unmap_release;
586780bc790SAndy Lutomirski 
5870a8a69ddSRusty Russell 			prev = i;
58872b5e895SJason Wang 			/* Note that we trust indirect descriptor
58972b5e895SJason Wang 			 * table since it use stream DMA mapping.
59072b5e895SJason Wang 			 */
591fe4c3862SJason Wang 			i = virtqueue_add_desc_split(_vq, desc, i, addr,
592fe4c3862SJason Wang 						     sg->length,
593fe4c3862SJason Wang 						     VRING_DESC_F_NEXT |
59472b5e895SJason Wang 						     VRING_DESC_F_WRITE,
59572b5e895SJason Wang 						     indirect);
59613816c76SRusty Russell 		}
5970a8a69ddSRusty Russell 	}
5980a8a69ddSRusty Russell 	/* Last one doesn't continue. */
59900e6f3d9SMichael S. Tsirkin 	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
60072b5e895SJason Wang 	if (!indirect && vq->use_dma_api)
601890d3356SVincent Whitchurch 		vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
60272b5e895SJason Wang 			~VRING_DESC_F_NEXT;
6030a8a69ddSRusty Russell 
604780bc790SAndy Lutomirski 	if (indirect) {
605780bc790SAndy Lutomirski 		/* Now that the indirect table is filled in, map it. */
606780bc790SAndy Lutomirski 		dma_addr_t addr = vring_map_single(
607780bc790SAndy Lutomirski 			vq, desc, total_sg * sizeof(struct vring_desc),
608780bc790SAndy Lutomirski 			DMA_TO_DEVICE);
609780bc790SAndy Lutomirski 		if (vring_mapping_error(vq, addr))
610780bc790SAndy Lutomirski 			goto unmap_release;
611780bc790SAndy Lutomirski 
612fe4c3862SJason Wang 		virtqueue_add_desc_split(_vq, vq->split.vring.desc,
613fe4c3862SJason Wang 					 head, addr,
614fe4c3862SJason Wang 					 total_sg * sizeof(struct vring_desc),
61572b5e895SJason Wang 					 VRING_DESC_F_INDIRECT,
61672b5e895SJason Wang 					 false);
617780bc790SAndy Lutomirski 	}
618780bc790SAndy Lutomirski 
619780bc790SAndy Lutomirski 	/* We're using some buffers from the free list. */
620780bc790SAndy Lutomirski 	vq->vq.num_free -= descs_used;
621780bc790SAndy Lutomirski 
6220a8a69ddSRusty Russell 	/* Update free pointer */
623b25bd251SRusty Russell 	if (indirect)
62472b5e895SJason Wang 		vq->free_head = vq->split.desc_extra[head].next;
625b25bd251SRusty Russell 	else
6260a8a69ddSRusty Russell 		vq->free_head = i;
6270a8a69ddSRusty Russell 
628780bc790SAndy Lutomirski 	/* Store token and indirect buffer state. */
629cbeedb72STiwei Bie 	vq->split.desc_state[head].data = data;
630780bc790SAndy Lutomirski 	if (indirect)
631cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = desc;
63287646a34SJason Wang 	else
633cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = ctx;
6340a8a69ddSRusty Russell 
6350a8a69ddSRusty Russell 	/* Put entry in available array (but don't update avail->idx until they
6363b720b8cSRusty Russell 	 * do sync). */
637e593bf97STiwei Bie 	avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
638e593bf97STiwei Bie 	vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
6390a8a69ddSRusty Russell 
640ee7cd898SRusty Russell 	/* Descriptors and available array need to be set before we expose the
641ee7cd898SRusty Russell 	 * new available array entries. */
642a9a0fef7SRusty Russell 	virtio_wmb(vq->weak_barriers);
643e593bf97STiwei Bie 	vq->split.avail_idx_shadow++;
644e593bf97STiwei Bie 	vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
645e593bf97STiwei Bie 						vq->split.avail_idx_shadow);
646ee7cd898SRusty Russell 	vq->num_added++;
647ee7cd898SRusty Russell 
6485e05bf58STetsuo Handa 	pr_debug("Added buffer head %i to %p\n", head, vq);
6495e05bf58STetsuo Handa 	END_USE(vq);
6505e05bf58STetsuo Handa 
651ee7cd898SRusty Russell 	/* This is very unlikely, but theoretically possible.  Kick
652ee7cd898SRusty Russell 	 * just in case. */
653ee7cd898SRusty Russell 	if (unlikely(vq->num_added == (1 << 16) - 1))
654ee7cd898SRusty Russell 		virtqueue_kick(_vq);
655ee7cd898SRusty Russell 
65698e8c6bcSRusty Russell 	return 0;
657780bc790SAndy Lutomirski 
658780bc790SAndy Lutomirski unmap_release:
659780bc790SAndy Lutomirski 	err_idx = i;
660cf8f1696SMatthias Lange 
661cf8f1696SMatthias Lange 	if (indirect)
662cf8f1696SMatthias Lange 		i = 0;
663cf8f1696SMatthias Lange 	else
664780bc790SAndy Lutomirski 		i = head;
665780bc790SAndy Lutomirski 
666780bc790SAndy Lutomirski 	for (n = 0; n < total_sg; n++) {
667780bc790SAndy Lutomirski 		if (i == err_idx)
668780bc790SAndy Lutomirski 			break;
66972b5e895SJason Wang 		if (indirect) {
67072b5e895SJason Wang 			vring_unmap_one_split_indirect(vq, &desc[i]);
671cf8f1696SMatthias Lange 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
67272b5e895SJason Wang 		} else
67372b5e895SJason Wang 			i = vring_unmap_one_split(vq, i);
674780bc790SAndy Lutomirski 	}
675780bc790SAndy Lutomirski 
676780bc790SAndy Lutomirski 	if (indirect)
677780bc790SAndy Lutomirski 		kfree(desc);
678780bc790SAndy Lutomirski 
6793cc36f6eSMichael S. Tsirkin 	END_USE(vq);
680f7728002SHalil Pasic 	return -ENOMEM;
6810a8a69ddSRusty Russell }
68213816c76SRusty Russell 
683138fd251STiwei Bie static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
6840a8a69ddSRusty Russell {
6850a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
686a5c262c5SMichael S. Tsirkin 	u16 new, old;
68741f0377fSRusty Russell 	bool needs_kick;
68841f0377fSRusty Russell 
6890a8a69ddSRusty Russell 	START_USE(vq);
690a72caae2SJason Wang 	/* We need to expose available array entries before checking avail
691a72caae2SJason Wang 	 * event. */
692a9a0fef7SRusty Russell 	virtio_mb(vq->weak_barriers);
6930a8a69ddSRusty Russell 
694e593bf97STiwei Bie 	old = vq->split.avail_idx_shadow - vq->num_added;
695e593bf97STiwei Bie 	new = vq->split.avail_idx_shadow;
6960a8a69ddSRusty Russell 	vq->num_added = 0;
6970a8a69ddSRusty Russell 
6984d6a105eSTiwei Bie 	LAST_ADD_TIME_CHECK(vq);
6994d6a105eSTiwei Bie 	LAST_ADD_TIME_INVALID(vq);
700e93300b1SRusty Russell 
70141f0377fSRusty Russell 	if (vq->event) {
702e593bf97STiwei Bie 		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
703e593bf97STiwei Bie 					vring_avail_event(&vq->split.vring)),
70441f0377fSRusty Russell 					      new, old);
70541f0377fSRusty Russell 	} else {
706e593bf97STiwei Bie 		needs_kick = !(vq->split.vring.used->flags &
707e593bf97STiwei Bie 					cpu_to_virtio16(_vq->vdev,
708e593bf97STiwei Bie 						VRING_USED_F_NO_NOTIFY));
70941f0377fSRusty Russell 	}
7100a8a69ddSRusty Russell 	END_USE(vq);
71141f0377fSRusty Russell 	return needs_kick;
71241f0377fSRusty Russell }
713138fd251STiwei Bie 
714138fd251STiwei Bie static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
7155a08b04fSMichael S. Tsirkin 			     void **ctx)
7160a8a69ddSRusty Russell {
717780bc790SAndy Lutomirski 	unsigned int i, j;
718c60923cbSGonglei 	__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
7190a8a69ddSRusty Russell 
7200a8a69ddSRusty Russell 	/* Clear data ptr. */
721cbeedb72STiwei Bie 	vq->split.desc_state[head].data = NULL;
7220a8a69ddSRusty Russell 
723780bc790SAndy Lutomirski 	/* Put back on free list: unmap first-level descriptors and find end */
7240a8a69ddSRusty Russell 	i = head;
7259fa29b9dSMark McLoughlin 
726e593bf97STiwei Bie 	while (vq->split.vring.desc[i].flags & nextflag) {
72772b5e895SJason Wang 		vring_unmap_one_split(vq, i);
72872b5e895SJason Wang 		i = vq->split.desc_extra[i].next;
72906ca287dSRusty Russell 		vq->vq.num_free++;
7300a8a69ddSRusty Russell 	}
7310a8a69ddSRusty Russell 
73272b5e895SJason Wang 	vring_unmap_one_split(vq, i);
73372b5e895SJason Wang 	vq->split.desc_extra[i].next = vq->free_head;
7340a8a69ddSRusty Russell 	vq->free_head = head;
735780bc790SAndy Lutomirski 
7360a8a69ddSRusty Russell 	/* Plus final descriptor */
73706ca287dSRusty Russell 	vq->vq.num_free++;
738780bc790SAndy Lutomirski 
7395a08b04fSMichael S. Tsirkin 	if (vq->indirect) {
740cbeedb72STiwei Bie 		struct vring_desc *indir_desc =
741cbeedb72STiwei Bie 				vq->split.desc_state[head].indir_desc;
7425a08b04fSMichael S. Tsirkin 		u32 len;
7435a08b04fSMichael S. Tsirkin 
7445a08b04fSMichael S. Tsirkin 		/* Free the indirect table, if any, now that it's unmapped. */
7455a08b04fSMichael S. Tsirkin 		if (!indir_desc)
7465a08b04fSMichael S. Tsirkin 			return;
7475a08b04fSMichael S. Tsirkin 
74872b5e895SJason Wang 		len = vq->split.desc_extra[head].len;
749780bc790SAndy Lutomirski 
75072b5e895SJason Wang 		BUG_ON(!(vq->split.desc_extra[head].flags &
75172b5e895SJason Wang 				VRING_DESC_F_INDIRECT));
752780bc790SAndy Lutomirski 		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
753780bc790SAndy Lutomirski 
754780bc790SAndy Lutomirski 		for (j = 0; j < len / sizeof(struct vring_desc); j++)
75572b5e895SJason Wang 			vring_unmap_one_split_indirect(vq, &indir_desc[j]);
756780bc790SAndy Lutomirski 
7575a08b04fSMichael S. Tsirkin 		kfree(indir_desc);
758cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = NULL;
7595a08b04fSMichael S. Tsirkin 	} else if (ctx) {
760cbeedb72STiwei Bie 		*ctx = vq->split.desc_state[head].indir_desc;
761780bc790SAndy Lutomirski 	}
7620a8a69ddSRusty Russell }
7630a8a69ddSRusty Russell 
764138fd251STiwei Bie static inline bool more_used_split(const struct vring_virtqueue *vq)
7650a8a69ddSRusty Russell {
766e593bf97STiwei Bie 	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
767e593bf97STiwei Bie 			vq->split.vring.used->idx);
7680a8a69ddSRusty Russell }
7690a8a69ddSRusty Russell 
770138fd251STiwei Bie static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
771138fd251STiwei Bie 					 unsigned int *len,
7725a08b04fSMichael S. Tsirkin 					 void **ctx)
7730a8a69ddSRusty Russell {
7740a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
7750a8a69ddSRusty Russell 	void *ret;
7760a8a69ddSRusty Russell 	unsigned int i;
7773b720b8cSRusty Russell 	u16 last_used;
7780a8a69ddSRusty Russell 
7790a8a69ddSRusty Russell 	START_USE(vq);
7800a8a69ddSRusty Russell 
7815ef82752SRusty Russell 	if (unlikely(vq->broken)) {
7825ef82752SRusty Russell 		END_USE(vq);
7835ef82752SRusty Russell 		return NULL;
7845ef82752SRusty Russell 	}
7855ef82752SRusty Russell 
786138fd251STiwei Bie 	if (!more_used_split(vq)) {
7870a8a69ddSRusty Russell 		pr_debug("No more buffers in queue\n");
7880a8a69ddSRusty Russell 		END_USE(vq);
7890a8a69ddSRusty Russell 		return NULL;
7900a8a69ddSRusty Russell 	}
7910a8a69ddSRusty Russell 
7922d61ba95SMichael S. Tsirkin 	/* Only get used array entries after they have been exposed by host. */
793a9a0fef7SRusty Russell 	virtio_rmb(vq->weak_barriers);
7942d61ba95SMichael S. Tsirkin 
795e593bf97STiwei Bie 	last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
796e593bf97STiwei Bie 	i = virtio32_to_cpu(_vq->vdev,
797e593bf97STiwei Bie 			vq->split.vring.used->ring[last_used].id);
798e593bf97STiwei Bie 	*len = virtio32_to_cpu(_vq->vdev,
799e593bf97STiwei Bie 			vq->split.vring.used->ring[last_used].len);
8000a8a69ddSRusty Russell 
801e593bf97STiwei Bie 	if (unlikely(i >= vq->split.vring.num)) {
8020a8a69ddSRusty Russell 		BAD_RING(vq, "id %u out of range\n", i);
8030a8a69ddSRusty Russell 		return NULL;
8040a8a69ddSRusty Russell 	}
805cbeedb72STiwei Bie 	if (unlikely(!vq->split.desc_state[i].data)) {
8060a8a69ddSRusty Russell 		BAD_RING(vq, "id %u is not a head!\n", i);
8070a8a69ddSRusty Russell 		return NULL;
8080a8a69ddSRusty Russell 	}
8090a8a69ddSRusty Russell 
810138fd251STiwei Bie 	/* detach_buf_split clears data, so grab it now. */
811cbeedb72STiwei Bie 	ret = vq->split.desc_state[i].data;
812138fd251STiwei Bie 	detach_buf_split(vq, i, ctx);
8130a8a69ddSRusty Russell 	vq->last_used_idx++;
814a5c262c5SMichael S. Tsirkin 	/* If we expect an interrupt for the next entry, tell host
815a5c262c5SMichael S. Tsirkin 	 * by writing event index and flush out the write before
816a5c262c5SMichael S. Tsirkin 	 * the read in the next get_buf call. */
817e593bf97STiwei Bie 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
818788e5b3aSMichael S. Tsirkin 		virtio_store_mb(vq->weak_barriers,
819e593bf97STiwei Bie 				&vring_used_event(&vq->split.vring),
820788e5b3aSMichael S. Tsirkin 				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
821a5c262c5SMichael S. Tsirkin 
8224d6a105eSTiwei Bie 	LAST_ADD_TIME_INVALID(vq);
823e93300b1SRusty Russell 
8240a8a69ddSRusty Russell 	END_USE(vq);
8250a8a69ddSRusty Russell 	return ret;
8260a8a69ddSRusty Russell }
827138fd251STiwei Bie 
828138fd251STiwei Bie static void virtqueue_disable_cb_split(struct virtqueue *_vq)
829138fd251STiwei Bie {
830138fd251STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
831138fd251STiwei Bie 
832e593bf97STiwei Bie 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
833e593bf97STiwei Bie 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
8348d622d21SMichael S. Tsirkin 		if (vq->event)
8358d622d21SMichael S. Tsirkin 			/* TODO: this is a hack. Figure out a cleaner value to write. */
8368d622d21SMichael S. Tsirkin 			vring_used_event(&vq->split.vring) = 0x0;
8378d622d21SMichael S. Tsirkin 		else
838e593bf97STiwei Bie 			vq->split.vring.avail->flags =
839e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
840e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
841138fd251STiwei Bie 	}
842138fd251STiwei Bie }
843138fd251STiwei Bie 
84431532340SSolomon Tan static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
845cc229884SMichael S. Tsirkin {
846cc229884SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
847cc229884SMichael S. Tsirkin 	u16 last_used_idx;
848cc229884SMichael S. Tsirkin 
849cc229884SMichael S. Tsirkin 	START_USE(vq);
850cc229884SMichael S. Tsirkin 
851cc229884SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
852cc229884SMichael S. Tsirkin 	 * more to do. */
853cc229884SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
854cc229884SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
855cc229884SMichael S. Tsirkin 	 * entry. Always do both to keep code simple. */
856e593bf97STiwei Bie 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
857e593bf97STiwei Bie 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
8580ea1e4a6SLadi Prosek 		if (!vq->event)
859e593bf97STiwei Bie 			vq->split.vring.avail->flags =
860e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
861e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
862f277ec42SVenkatesh Srinivas 	}
863e593bf97STiwei Bie 	vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
864e593bf97STiwei Bie 			last_used_idx = vq->last_used_idx);
865cc229884SMichael S. Tsirkin 	END_USE(vq);
866cc229884SMichael S. Tsirkin 	return last_used_idx;
867cc229884SMichael S. Tsirkin }
868138fd251STiwei Bie 
86931532340SSolomon Tan static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx)
870138fd251STiwei Bie {
871138fd251STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
872138fd251STiwei Bie 
873138fd251STiwei Bie 	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
874e593bf97STiwei Bie 			vq->split.vring.used->idx);
875138fd251STiwei Bie }
876138fd251STiwei Bie 
877138fd251STiwei Bie static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
8787ab358c2SMichael S. Tsirkin {
8797ab358c2SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
8807ab358c2SMichael S. Tsirkin 	u16 bufs;
8817ab358c2SMichael S. Tsirkin 
8827ab358c2SMichael S. Tsirkin 	START_USE(vq);
8837ab358c2SMichael S. Tsirkin 
8847ab358c2SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
8857ab358c2SMichael S. Tsirkin 	 * more to do. */
8867ab358c2SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
8877ab358c2SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
8880ea1e4a6SLadi Prosek 	 * entry. Always update the event index to keep code simple. */
889e593bf97STiwei Bie 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
890e593bf97STiwei Bie 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
8910ea1e4a6SLadi Prosek 		if (!vq->event)
892e593bf97STiwei Bie 			vq->split.vring.avail->flags =
893e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
894e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
895f277ec42SVenkatesh Srinivas 	}
8967ab358c2SMichael S. Tsirkin 	/* TODO: tune this threshold */
897e593bf97STiwei Bie 	bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
898788e5b3aSMichael S. Tsirkin 
899788e5b3aSMichael S. Tsirkin 	virtio_store_mb(vq->weak_barriers,
900e593bf97STiwei Bie 			&vring_used_event(&vq->split.vring),
901788e5b3aSMichael S. Tsirkin 			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
902788e5b3aSMichael S. Tsirkin 
903e593bf97STiwei Bie 	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
904e593bf97STiwei Bie 					- vq->last_used_idx) > bufs)) {
9057ab358c2SMichael S. Tsirkin 		END_USE(vq);
9067ab358c2SMichael S. Tsirkin 		return false;
9077ab358c2SMichael S. Tsirkin 	}
9087ab358c2SMichael S. Tsirkin 
9097ab358c2SMichael S. Tsirkin 	END_USE(vq);
9107ab358c2SMichael S. Tsirkin 	return true;
9117ab358c2SMichael S. Tsirkin }
9127ab358c2SMichael S. Tsirkin 
913138fd251STiwei Bie static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
914c021eac4SShirley Ma {
915c021eac4SShirley Ma 	struct vring_virtqueue *vq = to_vvq(_vq);
916c021eac4SShirley Ma 	unsigned int i;
917c021eac4SShirley Ma 	void *buf;
918c021eac4SShirley Ma 
919c021eac4SShirley Ma 	START_USE(vq);
920c021eac4SShirley Ma 
921e593bf97STiwei Bie 	for (i = 0; i < vq->split.vring.num; i++) {
922cbeedb72STiwei Bie 		if (!vq->split.desc_state[i].data)
923c021eac4SShirley Ma 			continue;
924138fd251STiwei Bie 		/* detach_buf_split clears data, so grab it now. */
925cbeedb72STiwei Bie 		buf = vq->split.desc_state[i].data;
926138fd251STiwei Bie 		detach_buf_split(vq, i, NULL);
927e593bf97STiwei Bie 		vq->split.avail_idx_shadow--;
928e593bf97STiwei Bie 		vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
929e593bf97STiwei Bie 				vq->split.avail_idx_shadow);
930c021eac4SShirley Ma 		END_USE(vq);
931c021eac4SShirley Ma 		return buf;
932c021eac4SShirley Ma 	}
933c021eac4SShirley Ma 	/* That should have freed everything. */
934e593bf97STiwei Bie 	BUG_ON(vq->vq.num_free != vq->split.vring.num);
935c021eac4SShirley Ma 
936c021eac4SShirley Ma 	END_USE(vq);
937c021eac4SShirley Ma 	return NULL;
938c021eac4SShirley Ma }
939138fd251STiwei Bie 
940d79dca75STiwei Bie static struct virtqueue *vring_create_virtqueue_split(
941d79dca75STiwei Bie 	unsigned int index,
942d79dca75STiwei Bie 	unsigned int num,
943d79dca75STiwei Bie 	unsigned int vring_align,
944d79dca75STiwei Bie 	struct virtio_device *vdev,
945d79dca75STiwei Bie 	bool weak_barriers,
946d79dca75STiwei Bie 	bool may_reduce_num,
947d79dca75STiwei Bie 	bool context,
948d79dca75STiwei Bie 	bool (*notify)(struct virtqueue *),
949d79dca75STiwei Bie 	void (*callback)(struct virtqueue *),
950d79dca75STiwei Bie 	const char *name)
951d79dca75STiwei Bie {
952d79dca75STiwei Bie 	struct virtqueue *vq;
953d79dca75STiwei Bie 	void *queue = NULL;
954d79dca75STiwei Bie 	dma_addr_t dma_addr;
955d79dca75STiwei Bie 	size_t queue_size_in_bytes;
956d79dca75STiwei Bie 	struct vring vring;
957d79dca75STiwei Bie 
958d79dca75STiwei Bie 	/* We assume num is a power of 2. */
959d79dca75STiwei Bie 	if (num & (num - 1)) {
960d79dca75STiwei Bie 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
961d79dca75STiwei Bie 		return NULL;
962d79dca75STiwei Bie 	}
963d79dca75STiwei Bie 
964d79dca75STiwei Bie 	/* TODO: allocate each queue chunk individually */
965d79dca75STiwei Bie 	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
966d79dca75STiwei Bie 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
967d79dca75STiwei Bie 					  &dma_addr,
968d79dca75STiwei Bie 					  GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
969d79dca75STiwei Bie 		if (queue)
970d79dca75STiwei Bie 			break;
971cf94db21SCornelia Huck 		if (!may_reduce_num)
972cf94db21SCornelia Huck 			return NULL;
973d79dca75STiwei Bie 	}
974d79dca75STiwei Bie 
975d79dca75STiwei Bie 	if (!num)
976d79dca75STiwei Bie 		return NULL;
977d79dca75STiwei Bie 
978d79dca75STiwei Bie 	if (!queue) {
979d79dca75STiwei Bie 		/* Try to get a single page. You are my only hope! */
980d79dca75STiwei Bie 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
981d79dca75STiwei Bie 					  &dma_addr, GFP_KERNEL|__GFP_ZERO);
982d79dca75STiwei Bie 	}
983d79dca75STiwei Bie 	if (!queue)
984d79dca75STiwei Bie 		return NULL;
985d79dca75STiwei Bie 
986d79dca75STiwei Bie 	queue_size_in_bytes = vring_size(num, vring_align);
987d79dca75STiwei Bie 	vring_init(&vring, num, queue, vring_align);
988d79dca75STiwei Bie 
989d79dca75STiwei Bie 	vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
990d79dca75STiwei Bie 				   notify, callback, name);
991d79dca75STiwei Bie 	if (!vq) {
992d79dca75STiwei Bie 		vring_free_queue(vdev, queue_size_in_bytes, queue,
993d79dca75STiwei Bie 				 dma_addr);
994d79dca75STiwei Bie 		return NULL;
995d79dca75STiwei Bie 	}
996d79dca75STiwei Bie 
997d79dca75STiwei Bie 	to_vvq(vq)->split.queue_dma_addr = dma_addr;
998d79dca75STiwei Bie 	to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
999d79dca75STiwei Bie 	to_vvq(vq)->we_own_ring = true;
1000d79dca75STiwei Bie 
1001d79dca75STiwei Bie 	return vq;
1002d79dca75STiwei Bie }
1003d79dca75STiwei Bie 
1004e6f633e5STiwei Bie 
1005e6f633e5STiwei Bie /*
10061ce9e605STiwei Bie  * Packed ring specific functions - *_packed().
10071ce9e605STiwei Bie  */
1008a7722890Shuangjie.albert static inline bool packed_used_wrap_counter(u16 last_used_idx)
1009a7722890Shuangjie.albert {
1010a7722890Shuangjie.albert 	return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
1011a7722890Shuangjie.albert }
1012a7722890Shuangjie.albert 
1013a7722890Shuangjie.albert static inline u16 packed_last_used(u16 last_used_idx)
1014a7722890Shuangjie.albert {
1015a7722890Shuangjie.albert 	return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
1016a7722890Shuangjie.albert }
10171ce9e605STiwei Bie 
1018d80dc15bSXuan Zhuo static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
1019d80dc15bSXuan Zhuo 				     struct vring_desc_extra *extra)
10201ce9e605STiwei Bie {
10211ce9e605STiwei Bie 	u16 flags;
10221ce9e605STiwei Bie 
10231ce9e605STiwei Bie 	if (!vq->use_dma_api)
10241ce9e605STiwei Bie 		return;
10251ce9e605STiwei Bie 
1026d80dc15bSXuan Zhuo 	flags = extra->flags;
10271ce9e605STiwei Bie 
10281ce9e605STiwei Bie 	if (flags & VRING_DESC_F_INDIRECT) {
10291ce9e605STiwei Bie 		dma_unmap_single(vring_dma_dev(vq),
1030d80dc15bSXuan Zhuo 				 extra->addr, extra->len,
10311ce9e605STiwei Bie 				 (flags & VRING_DESC_F_WRITE) ?
10321ce9e605STiwei Bie 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
10331ce9e605STiwei Bie 	} else {
10341ce9e605STiwei Bie 		dma_unmap_page(vring_dma_dev(vq),
1035d80dc15bSXuan Zhuo 			       extra->addr, extra->len,
10361ce9e605STiwei Bie 			       (flags & VRING_DESC_F_WRITE) ?
10371ce9e605STiwei Bie 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
10381ce9e605STiwei Bie 	}
10391ce9e605STiwei Bie }
10401ce9e605STiwei Bie 
10411ce9e605STiwei Bie static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
10421ce9e605STiwei Bie 				   struct vring_packed_desc *desc)
10431ce9e605STiwei Bie {
10441ce9e605STiwei Bie 	u16 flags;
10451ce9e605STiwei Bie 
10461ce9e605STiwei Bie 	if (!vq->use_dma_api)
10471ce9e605STiwei Bie 		return;
10481ce9e605STiwei Bie 
10491ce9e605STiwei Bie 	flags = le16_to_cpu(desc->flags);
10501ce9e605STiwei Bie 
10511ce9e605STiwei Bie 	dma_unmap_page(vring_dma_dev(vq),
10521ce9e605STiwei Bie 		       le64_to_cpu(desc->addr),
10531ce9e605STiwei Bie 		       le32_to_cpu(desc->len),
10541ce9e605STiwei Bie 		       (flags & VRING_DESC_F_WRITE) ?
10551ce9e605STiwei Bie 		       DMA_FROM_DEVICE : DMA_TO_DEVICE);
10561ce9e605STiwei Bie }
10571ce9e605STiwei Bie 
10581ce9e605STiwei Bie static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
10591ce9e605STiwei Bie 						       gfp_t gfp)
10601ce9e605STiwei Bie {
10611ce9e605STiwei Bie 	struct vring_packed_desc *desc;
10621ce9e605STiwei Bie 
10631ce9e605STiwei Bie 	/*
10641ce9e605STiwei Bie 	 * We require lowmem mappings for the descriptors because
10651ce9e605STiwei Bie 	 * otherwise virt_to_phys will give us bogus addresses in the
10661ce9e605STiwei Bie 	 * virtqueue.
10671ce9e605STiwei Bie 	 */
10681ce9e605STiwei Bie 	gfp &= ~__GFP_HIGHMEM;
10691ce9e605STiwei Bie 
10701ce9e605STiwei Bie 	desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
10711ce9e605STiwei Bie 
10721ce9e605STiwei Bie 	return desc;
10731ce9e605STiwei Bie }
10741ce9e605STiwei Bie 
10751ce9e605STiwei Bie static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
10761ce9e605STiwei Bie 					 struct scatterlist *sgs[],
10771ce9e605STiwei Bie 					 unsigned int total_sg,
10781ce9e605STiwei Bie 					 unsigned int out_sgs,
10791ce9e605STiwei Bie 					 unsigned int in_sgs,
10801ce9e605STiwei Bie 					 void *data,
10811ce9e605STiwei Bie 					 gfp_t gfp)
10821ce9e605STiwei Bie {
10831ce9e605STiwei Bie 	struct vring_packed_desc *desc;
10841ce9e605STiwei Bie 	struct scatterlist *sg;
10851ce9e605STiwei Bie 	unsigned int i, n, err_idx;
10861ce9e605STiwei Bie 	u16 head, id;
10871ce9e605STiwei Bie 	dma_addr_t addr;
10881ce9e605STiwei Bie 
10891ce9e605STiwei Bie 	head = vq->packed.next_avail_idx;
10901ce9e605STiwei Bie 	desc = alloc_indirect_packed(total_sg, gfp);
1091fc6d70f4SXuan Zhuo 	if (!desc)
1092fc6d70f4SXuan Zhuo 		return -ENOMEM;
10931ce9e605STiwei Bie 
10941ce9e605STiwei Bie 	if (unlikely(vq->vq.num_free < 1)) {
10951ce9e605STiwei Bie 		pr_debug("Can't add buf len 1 - avail = 0\n");
1096df0bfe75SYueHaibing 		kfree(desc);
10971ce9e605STiwei Bie 		END_USE(vq);
10981ce9e605STiwei Bie 		return -ENOSPC;
10991ce9e605STiwei Bie 	}
11001ce9e605STiwei Bie 
11011ce9e605STiwei Bie 	i = 0;
11021ce9e605STiwei Bie 	id = vq->free_head;
11031ce9e605STiwei Bie 	BUG_ON(id == vq->packed.vring.num);
11041ce9e605STiwei Bie 
11051ce9e605STiwei Bie 	for (n = 0; n < out_sgs + in_sgs; n++) {
11061ce9e605STiwei Bie 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
11071ce9e605STiwei Bie 			addr = vring_map_one_sg(vq, sg, n < out_sgs ?
11081ce9e605STiwei Bie 					DMA_TO_DEVICE : DMA_FROM_DEVICE);
11091ce9e605STiwei Bie 			if (vring_mapping_error(vq, addr))
11101ce9e605STiwei Bie 				goto unmap_release;
11111ce9e605STiwei Bie 
11121ce9e605STiwei Bie 			desc[i].flags = cpu_to_le16(n < out_sgs ?
11131ce9e605STiwei Bie 						0 : VRING_DESC_F_WRITE);
11141ce9e605STiwei Bie 			desc[i].addr = cpu_to_le64(addr);
11151ce9e605STiwei Bie 			desc[i].len = cpu_to_le32(sg->length);
11161ce9e605STiwei Bie 			i++;
11171ce9e605STiwei Bie 		}
11181ce9e605STiwei Bie 	}
11191ce9e605STiwei Bie 
11201ce9e605STiwei Bie 	/* Now that the indirect table is filled in, map it. */
11211ce9e605STiwei Bie 	addr = vring_map_single(vq, desc,
11221ce9e605STiwei Bie 			total_sg * sizeof(struct vring_packed_desc),
11231ce9e605STiwei Bie 			DMA_TO_DEVICE);
11241ce9e605STiwei Bie 	if (vring_mapping_error(vq, addr))
11251ce9e605STiwei Bie 		goto unmap_release;
11261ce9e605STiwei Bie 
11271ce9e605STiwei Bie 	vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
11281ce9e605STiwei Bie 	vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
11291ce9e605STiwei Bie 				sizeof(struct vring_packed_desc));
11301ce9e605STiwei Bie 	vq->packed.vring.desc[head].id = cpu_to_le16(id);
11311ce9e605STiwei Bie 
11321ce9e605STiwei Bie 	if (vq->use_dma_api) {
11331ce9e605STiwei Bie 		vq->packed.desc_extra[id].addr = addr;
11341ce9e605STiwei Bie 		vq->packed.desc_extra[id].len = total_sg *
11351ce9e605STiwei Bie 				sizeof(struct vring_packed_desc);
11361ce9e605STiwei Bie 		vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
11371ce9e605STiwei Bie 						  vq->packed.avail_used_flags;
11381ce9e605STiwei Bie 	}
11391ce9e605STiwei Bie 
11401ce9e605STiwei Bie 	/*
11411ce9e605STiwei Bie 	 * A driver MUST NOT make the first descriptor in the list
11421ce9e605STiwei Bie 	 * available before all subsequent descriptors comprising
11431ce9e605STiwei Bie 	 * the list are made available.
11441ce9e605STiwei Bie 	 */
11451ce9e605STiwei Bie 	virtio_wmb(vq->weak_barriers);
11461ce9e605STiwei Bie 	vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
11471ce9e605STiwei Bie 						vq->packed.avail_used_flags);
11481ce9e605STiwei Bie 
11491ce9e605STiwei Bie 	/* We're using some buffers from the free list. */
11501ce9e605STiwei Bie 	vq->vq.num_free -= 1;
11511ce9e605STiwei Bie 
11521ce9e605STiwei Bie 	/* Update free pointer */
11531ce9e605STiwei Bie 	n = head + 1;
11541ce9e605STiwei Bie 	if (n >= vq->packed.vring.num) {
11551ce9e605STiwei Bie 		n = 0;
11561ce9e605STiwei Bie 		vq->packed.avail_wrap_counter ^= 1;
11571ce9e605STiwei Bie 		vq->packed.avail_used_flags ^=
11581ce9e605STiwei Bie 				1 << VRING_PACKED_DESC_F_AVAIL |
11591ce9e605STiwei Bie 				1 << VRING_PACKED_DESC_F_USED;
11601ce9e605STiwei Bie 	}
11611ce9e605STiwei Bie 	vq->packed.next_avail_idx = n;
1162aeef9b47SJason Wang 	vq->free_head = vq->packed.desc_extra[id].next;
11631ce9e605STiwei Bie 
11641ce9e605STiwei Bie 	/* Store token and indirect buffer state. */
11651ce9e605STiwei Bie 	vq->packed.desc_state[id].num = 1;
11661ce9e605STiwei Bie 	vq->packed.desc_state[id].data = data;
11671ce9e605STiwei Bie 	vq->packed.desc_state[id].indir_desc = desc;
11681ce9e605STiwei Bie 	vq->packed.desc_state[id].last = id;
11691ce9e605STiwei Bie 
11701ce9e605STiwei Bie 	vq->num_added += 1;
11711ce9e605STiwei Bie 
11721ce9e605STiwei Bie 	pr_debug("Added buffer head %i to %p\n", head, vq);
11731ce9e605STiwei Bie 	END_USE(vq);
11741ce9e605STiwei Bie 
11751ce9e605STiwei Bie 	return 0;
11761ce9e605STiwei Bie 
11771ce9e605STiwei Bie unmap_release:
11781ce9e605STiwei Bie 	err_idx = i;
11791ce9e605STiwei Bie 
11801ce9e605STiwei Bie 	for (i = 0; i < err_idx; i++)
11811ce9e605STiwei Bie 		vring_unmap_desc_packed(vq, &desc[i]);
11821ce9e605STiwei Bie 
11831ce9e605STiwei Bie 	kfree(desc);
11841ce9e605STiwei Bie 
11851ce9e605STiwei Bie 	END_USE(vq);
1186f7728002SHalil Pasic 	return -ENOMEM;
11871ce9e605STiwei Bie }
11881ce9e605STiwei Bie 
11891ce9e605STiwei Bie static inline int virtqueue_add_packed(struct virtqueue *_vq,
11901ce9e605STiwei Bie 				       struct scatterlist *sgs[],
11911ce9e605STiwei Bie 				       unsigned int total_sg,
11921ce9e605STiwei Bie 				       unsigned int out_sgs,
11931ce9e605STiwei Bie 				       unsigned int in_sgs,
11941ce9e605STiwei Bie 				       void *data,
11951ce9e605STiwei Bie 				       void *ctx,
11961ce9e605STiwei Bie 				       gfp_t gfp)
11971ce9e605STiwei Bie {
11981ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
11991ce9e605STiwei Bie 	struct vring_packed_desc *desc;
12001ce9e605STiwei Bie 	struct scatterlist *sg;
12011ce9e605STiwei Bie 	unsigned int i, n, c, descs_used, err_idx;
12023f649ab7SKees Cook 	__le16 head_flags, flags;
12033f649ab7SKees Cook 	u16 head, id, prev, curr, avail_used_flags;
1204fc6d70f4SXuan Zhuo 	int err;
12051ce9e605STiwei Bie 
12061ce9e605STiwei Bie 	START_USE(vq);
12071ce9e605STiwei Bie 
12081ce9e605STiwei Bie 	BUG_ON(data == NULL);
12091ce9e605STiwei Bie 	BUG_ON(ctx && vq->indirect);
12101ce9e605STiwei Bie 
12111ce9e605STiwei Bie 	if (unlikely(vq->broken)) {
12121ce9e605STiwei Bie 		END_USE(vq);
12131ce9e605STiwei Bie 		return -EIO;
12141ce9e605STiwei Bie 	}
12151ce9e605STiwei Bie 
12161ce9e605STiwei Bie 	LAST_ADD_TIME_UPDATE(vq);
12171ce9e605STiwei Bie 
12181ce9e605STiwei Bie 	BUG_ON(total_sg == 0);
12191ce9e605STiwei Bie 
122035c51e09SXianting Tian 	if (virtqueue_use_indirect(vq, total_sg)) {
1221fc6d70f4SXuan Zhuo 		err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1222fc6d70f4SXuan Zhuo 						    in_sgs, data, gfp);
12231861ba62SMichael S. Tsirkin 		if (err != -ENOMEM) {
12241861ba62SMichael S. Tsirkin 			END_USE(vq);
1225fc6d70f4SXuan Zhuo 			return err;
12261861ba62SMichael S. Tsirkin 		}
1227fc6d70f4SXuan Zhuo 
1228fc6d70f4SXuan Zhuo 		/* fall back on direct */
1229fc6d70f4SXuan Zhuo 	}
12301ce9e605STiwei Bie 
12311ce9e605STiwei Bie 	head = vq->packed.next_avail_idx;
12321ce9e605STiwei Bie 	avail_used_flags = vq->packed.avail_used_flags;
12331ce9e605STiwei Bie 
12341ce9e605STiwei Bie 	WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
12351ce9e605STiwei Bie 
12361ce9e605STiwei Bie 	desc = vq->packed.vring.desc;
12371ce9e605STiwei Bie 	i = head;
12381ce9e605STiwei Bie 	descs_used = total_sg;
12391ce9e605STiwei Bie 
12401ce9e605STiwei Bie 	if (unlikely(vq->vq.num_free < descs_used)) {
12411ce9e605STiwei Bie 		pr_debug("Can't add buf len %i - avail = %i\n",
12421ce9e605STiwei Bie 			 descs_used, vq->vq.num_free);
12431ce9e605STiwei Bie 		END_USE(vq);
12441ce9e605STiwei Bie 		return -ENOSPC;
12451ce9e605STiwei Bie 	}
12461ce9e605STiwei Bie 
12471ce9e605STiwei Bie 	id = vq->free_head;
12481ce9e605STiwei Bie 	BUG_ON(id == vq->packed.vring.num);
12491ce9e605STiwei Bie 
12501ce9e605STiwei Bie 	curr = id;
12511ce9e605STiwei Bie 	c = 0;
12521ce9e605STiwei Bie 	for (n = 0; n < out_sgs + in_sgs; n++) {
12531ce9e605STiwei Bie 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
12541ce9e605STiwei Bie 			dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
12551ce9e605STiwei Bie 					DMA_TO_DEVICE : DMA_FROM_DEVICE);
12561ce9e605STiwei Bie 			if (vring_mapping_error(vq, addr))
12571ce9e605STiwei Bie 				goto unmap_release;
12581ce9e605STiwei Bie 
12591ce9e605STiwei Bie 			flags = cpu_to_le16(vq->packed.avail_used_flags |
12601ce9e605STiwei Bie 				    (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
12611ce9e605STiwei Bie 				    (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
12621ce9e605STiwei Bie 			if (i == head)
12631ce9e605STiwei Bie 				head_flags = flags;
12641ce9e605STiwei Bie 			else
12651ce9e605STiwei Bie 				desc[i].flags = flags;
12661ce9e605STiwei Bie 
12671ce9e605STiwei Bie 			desc[i].addr = cpu_to_le64(addr);
12681ce9e605STiwei Bie 			desc[i].len = cpu_to_le32(sg->length);
12691ce9e605STiwei Bie 			desc[i].id = cpu_to_le16(id);
12701ce9e605STiwei Bie 
12711ce9e605STiwei Bie 			if (unlikely(vq->use_dma_api)) {
12721ce9e605STiwei Bie 				vq->packed.desc_extra[curr].addr = addr;
12731ce9e605STiwei Bie 				vq->packed.desc_extra[curr].len = sg->length;
12741ce9e605STiwei Bie 				vq->packed.desc_extra[curr].flags =
12751ce9e605STiwei Bie 					le16_to_cpu(flags);
12761ce9e605STiwei Bie 			}
12771ce9e605STiwei Bie 			prev = curr;
1278aeef9b47SJason Wang 			curr = vq->packed.desc_extra[curr].next;
12791ce9e605STiwei Bie 
12801ce9e605STiwei Bie 			if ((unlikely(++i >= vq->packed.vring.num))) {
12811ce9e605STiwei Bie 				i = 0;
12821ce9e605STiwei Bie 				vq->packed.avail_used_flags ^=
12831ce9e605STiwei Bie 					1 << VRING_PACKED_DESC_F_AVAIL |
12841ce9e605STiwei Bie 					1 << VRING_PACKED_DESC_F_USED;
12851ce9e605STiwei Bie 			}
12861ce9e605STiwei Bie 		}
12871ce9e605STiwei Bie 	}
12881ce9e605STiwei Bie 
12891ce9e605STiwei Bie 	if (i < head)
12901ce9e605STiwei Bie 		vq->packed.avail_wrap_counter ^= 1;
12911ce9e605STiwei Bie 
12921ce9e605STiwei Bie 	/* We're using some buffers from the free list. */
12931ce9e605STiwei Bie 	vq->vq.num_free -= descs_used;
12941ce9e605STiwei Bie 
12951ce9e605STiwei Bie 	/* Update free pointer */
12961ce9e605STiwei Bie 	vq->packed.next_avail_idx = i;
12971ce9e605STiwei Bie 	vq->free_head = curr;
12981ce9e605STiwei Bie 
12991ce9e605STiwei Bie 	/* Store token. */
13001ce9e605STiwei Bie 	vq->packed.desc_state[id].num = descs_used;
13011ce9e605STiwei Bie 	vq->packed.desc_state[id].data = data;
13021ce9e605STiwei Bie 	vq->packed.desc_state[id].indir_desc = ctx;
13031ce9e605STiwei Bie 	vq->packed.desc_state[id].last = prev;
13041ce9e605STiwei Bie 
13051ce9e605STiwei Bie 	/*
13061ce9e605STiwei Bie 	 * A driver MUST NOT make the first descriptor in the list
13071ce9e605STiwei Bie 	 * available before all subsequent descriptors comprising
13081ce9e605STiwei Bie 	 * the list are made available.
13091ce9e605STiwei Bie 	 */
13101ce9e605STiwei Bie 	virtio_wmb(vq->weak_barriers);
13111ce9e605STiwei Bie 	vq->packed.vring.desc[head].flags = head_flags;
13121ce9e605STiwei Bie 	vq->num_added += descs_used;
13131ce9e605STiwei Bie 
13141ce9e605STiwei Bie 	pr_debug("Added buffer head %i to %p\n", head, vq);
13151ce9e605STiwei Bie 	END_USE(vq);
13161ce9e605STiwei Bie 
13171ce9e605STiwei Bie 	return 0;
13181ce9e605STiwei Bie 
13191ce9e605STiwei Bie unmap_release:
13201ce9e605STiwei Bie 	err_idx = i;
13211ce9e605STiwei Bie 	i = head;
132244593865SJason Wang 	curr = vq->free_head;
13231ce9e605STiwei Bie 
13241ce9e605STiwei Bie 	vq->packed.avail_used_flags = avail_used_flags;
13251ce9e605STiwei Bie 
13261ce9e605STiwei Bie 	for (n = 0; n < total_sg; n++) {
13271ce9e605STiwei Bie 		if (i == err_idx)
13281ce9e605STiwei Bie 			break;
1329d80dc15bSXuan Zhuo 		vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
133044593865SJason Wang 		curr = vq->packed.desc_extra[curr].next;
13311ce9e605STiwei Bie 		i++;
13321ce9e605STiwei Bie 		if (i >= vq->packed.vring.num)
13331ce9e605STiwei Bie 			i = 0;
13341ce9e605STiwei Bie 	}
13351ce9e605STiwei Bie 
13361ce9e605STiwei Bie 	END_USE(vq);
13371ce9e605STiwei Bie 	return -EIO;
13381ce9e605STiwei Bie }
13391ce9e605STiwei Bie 
13401ce9e605STiwei Bie static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
13411ce9e605STiwei Bie {
13421ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1343f51f9826STiwei Bie 	u16 new, old, off_wrap, flags, wrap_counter, event_idx;
13441ce9e605STiwei Bie 	bool needs_kick;
13451ce9e605STiwei Bie 	union {
13461ce9e605STiwei Bie 		struct {
13471ce9e605STiwei Bie 			__le16 off_wrap;
13481ce9e605STiwei Bie 			__le16 flags;
13491ce9e605STiwei Bie 		};
13501ce9e605STiwei Bie 		u32 u32;
13511ce9e605STiwei Bie 	} snapshot;
13521ce9e605STiwei Bie 
13531ce9e605STiwei Bie 	START_USE(vq);
13541ce9e605STiwei Bie 
13551ce9e605STiwei Bie 	/*
13561ce9e605STiwei Bie 	 * We need to expose the new flags value before checking notification
13571ce9e605STiwei Bie 	 * suppressions.
13581ce9e605STiwei Bie 	 */
13591ce9e605STiwei Bie 	virtio_mb(vq->weak_barriers);
13601ce9e605STiwei Bie 
1361f51f9826STiwei Bie 	old = vq->packed.next_avail_idx - vq->num_added;
1362f51f9826STiwei Bie 	new = vq->packed.next_avail_idx;
13631ce9e605STiwei Bie 	vq->num_added = 0;
13641ce9e605STiwei Bie 
13651ce9e605STiwei Bie 	snapshot.u32 = *(u32 *)vq->packed.vring.device;
13661ce9e605STiwei Bie 	flags = le16_to_cpu(snapshot.flags);
13671ce9e605STiwei Bie 
13681ce9e605STiwei Bie 	LAST_ADD_TIME_CHECK(vq);
13691ce9e605STiwei Bie 	LAST_ADD_TIME_INVALID(vq);
13701ce9e605STiwei Bie 
1371f51f9826STiwei Bie 	if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
13721ce9e605STiwei Bie 		needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1373f51f9826STiwei Bie 		goto out;
1374f51f9826STiwei Bie 	}
1375f51f9826STiwei Bie 
1376f51f9826STiwei Bie 	off_wrap = le16_to_cpu(snapshot.off_wrap);
1377f51f9826STiwei Bie 
1378f51f9826STiwei Bie 	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1379f51f9826STiwei Bie 	event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1380f51f9826STiwei Bie 	if (wrap_counter != vq->packed.avail_wrap_counter)
1381f51f9826STiwei Bie 		event_idx -= vq->packed.vring.num;
1382f51f9826STiwei Bie 
1383f51f9826STiwei Bie 	needs_kick = vring_need_event(event_idx, new, old);
1384f51f9826STiwei Bie out:
13851ce9e605STiwei Bie 	END_USE(vq);
13861ce9e605STiwei Bie 	return needs_kick;
13871ce9e605STiwei Bie }
13881ce9e605STiwei Bie 
13891ce9e605STiwei Bie static void detach_buf_packed(struct vring_virtqueue *vq,
13901ce9e605STiwei Bie 			      unsigned int id, void **ctx)
13911ce9e605STiwei Bie {
13921ce9e605STiwei Bie 	struct vring_desc_state_packed *state = NULL;
13931ce9e605STiwei Bie 	struct vring_packed_desc *desc;
13941ce9e605STiwei Bie 	unsigned int i, curr;
13951ce9e605STiwei Bie 
13961ce9e605STiwei Bie 	state = &vq->packed.desc_state[id];
13971ce9e605STiwei Bie 
13981ce9e605STiwei Bie 	/* Clear data ptr. */
13991ce9e605STiwei Bie 	state->data = NULL;
14001ce9e605STiwei Bie 
1401aeef9b47SJason Wang 	vq->packed.desc_extra[state->last].next = vq->free_head;
14021ce9e605STiwei Bie 	vq->free_head = id;
14031ce9e605STiwei Bie 	vq->vq.num_free += state->num;
14041ce9e605STiwei Bie 
14051ce9e605STiwei Bie 	if (unlikely(vq->use_dma_api)) {
14061ce9e605STiwei Bie 		curr = id;
14071ce9e605STiwei Bie 		for (i = 0; i < state->num; i++) {
1408d80dc15bSXuan Zhuo 			vring_unmap_extra_packed(vq,
14091ce9e605STiwei Bie 						 &vq->packed.desc_extra[curr]);
1410aeef9b47SJason Wang 			curr = vq->packed.desc_extra[curr].next;
14111ce9e605STiwei Bie 		}
14121ce9e605STiwei Bie 	}
14131ce9e605STiwei Bie 
14141ce9e605STiwei Bie 	if (vq->indirect) {
14151ce9e605STiwei Bie 		u32 len;
14161ce9e605STiwei Bie 
14171ce9e605STiwei Bie 		/* Free the indirect table, if any, now that it's unmapped. */
14181ce9e605STiwei Bie 		desc = state->indir_desc;
14191ce9e605STiwei Bie 		if (!desc)
14201ce9e605STiwei Bie 			return;
14211ce9e605STiwei Bie 
14221ce9e605STiwei Bie 		if (vq->use_dma_api) {
14231ce9e605STiwei Bie 			len = vq->packed.desc_extra[id].len;
14241ce9e605STiwei Bie 			for (i = 0; i < len / sizeof(struct vring_packed_desc);
14251ce9e605STiwei Bie 					i++)
14261ce9e605STiwei Bie 				vring_unmap_desc_packed(vq, &desc[i]);
14271ce9e605STiwei Bie 		}
14281ce9e605STiwei Bie 		kfree(desc);
14291ce9e605STiwei Bie 		state->indir_desc = NULL;
14301ce9e605STiwei Bie 	} else if (ctx) {
14311ce9e605STiwei Bie 		*ctx = state->indir_desc;
14321ce9e605STiwei Bie 	}
14331ce9e605STiwei Bie }
14341ce9e605STiwei Bie 
14351ce9e605STiwei Bie static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
14361ce9e605STiwei Bie 				       u16 idx, bool used_wrap_counter)
14371ce9e605STiwei Bie {
14381ce9e605STiwei Bie 	bool avail, used;
14391ce9e605STiwei Bie 	u16 flags;
14401ce9e605STiwei Bie 
14411ce9e605STiwei Bie 	flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
14421ce9e605STiwei Bie 	avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
14431ce9e605STiwei Bie 	used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
14441ce9e605STiwei Bie 
14451ce9e605STiwei Bie 	return avail == used && used == used_wrap_counter;
14461ce9e605STiwei Bie }
14471ce9e605STiwei Bie 
14481ce9e605STiwei Bie static inline bool more_used_packed(const struct vring_virtqueue *vq)
14491ce9e605STiwei Bie {
1450a7722890Shuangjie.albert 	u16 last_used;
1451a7722890Shuangjie.albert 	u16 last_used_idx;
1452a7722890Shuangjie.albert 	bool used_wrap_counter;
1453a7722890Shuangjie.albert 
1454a7722890Shuangjie.albert 	last_used_idx = READ_ONCE(vq->last_used_idx);
1455a7722890Shuangjie.albert 	last_used = packed_last_used(last_used_idx);
1456a7722890Shuangjie.albert 	used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1457a7722890Shuangjie.albert 	return is_used_desc_packed(vq, last_used, used_wrap_counter);
14581ce9e605STiwei Bie }
14591ce9e605STiwei Bie 
14601ce9e605STiwei Bie static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
14611ce9e605STiwei Bie 					  unsigned int *len,
14621ce9e605STiwei Bie 					  void **ctx)
14631ce9e605STiwei Bie {
14641ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1465a7722890Shuangjie.albert 	u16 last_used, id, last_used_idx;
1466a7722890Shuangjie.albert 	bool used_wrap_counter;
14671ce9e605STiwei Bie 	void *ret;
14681ce9e605STiwei Bie 
14691ce9e605STiwei Bie 	START_USE(vq);
14701ce9e605STiwei Bie 
14711ce9e605STiwei Bie 	if (unlikely(vq->broken)) {
14721ce9e605STiwei Bie 		END_USE(vq);
14731ce9e605STiwei Bie 		return NULL;
14741ce9e605STiwei Bie 	}
14751ce9e605STiwei Bie 
14761ce9e605STiwei Bie 	if (!more_used_packed(vq)) {
14771ce9e605STiwei Bie 		pr_debug("No more buffers in queue\n");
14781ce9e605STiwei Bie 		END_USE(vq);
14791ce9e605STiwei Bie 		return NULL;
14801ce9e605STiwei Bie 	}
14811ce9e605STiwei Bie 
14821ce9e605STiwei Bie 	/* Only get used elements after they have been exposed by host. */
14831ce9e605STiwei Bie 	virtio_rmb(vq->weak_barriers);
14841ce9e605STiwei Bie 
1485a7722890Shuangjie.albert 	last_used_idx = READ_ONCE(vq->last_used_idx);
1486a7722890Shuangjie.albert 	used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1487a7722890Shuangjie.albert 	last_used = packed_last_used(last_used_idx);
14881ce9e605STiwei Bie 	id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
14891ce9e605STiwei Bie 	*len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
14901ce9e605STiwei Bie 
14911ce9e605STiwei Bie 	if (unlikely(id >= vq->packed.vring.num)) {
14921ce9e605STiwei Bie 		BAD_RING(vq, "id %u out of range\n", id);
14931ce9e605STiwei Bie 		return NULL;
14941ce9e605STiwei Bie 	}
14951ce9e605STiwei Bie 	if (unlikely(!vq->packed.desc_state[id].data)) {
14961ce9e605STiwei Bie 		BAD_RING(vq, "id %u is not a head!\n", id);
14971ce9e605STiwei Bie 		return NULL;
14981ce9e605STiwei Bie 	}
14991ce9e605STiwei Bie 
15001ce9e605STiwei Bie 	/* detach_buf_packed clears data, so grab it now. */
15011ce9e605STiwei Bie 	ret = vq->packed.desc_state[id].data;
15021ce9e605STiwei Bie 	detach_buf_packed(vq, id, ctx);
15031ce9e605STiwei Bie 
1504a7722890Shuangjie.albert 	last_used += vq->packed.desc_state[id].num;
1505a7722890Shuangjie.albert 	if (unlikely(last_used >= vq->packed.vring.num)) {
1506a7722890Shuangjie.albert 		last_used -= vq->packed.vring.num;
1507a7722890Shuangjie.albert 		used_wrap_counter ^= 1;
15081ce9e605STiwei Bie 	}
15091ce9e605STiwei Bie 
1510a7722890Shuangjie.albert 	last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1511a7722890Shuangjie.albert 	WRITE_ONCE(vq->last_used_idx, last_used);
1512a7722890Shuangjie.albert 
1513f51f9826STiwei Bie 	/*
1514f51f9826STiwei Bie 	 * If we expect an interrupt for the next entry, tell host
1515f51f9826STiwei Bie 	 * by writing event index and flush out the write before
1516f51f9826STiwei Bie 	 * the read in the next get_buf call.
1517f51f9826STiwei Bie 	 */
1518f51f9826STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1519f51f9826STiwei Bie 		virtio_store_mb(vq->weak_barriers,
1520f51f9826STiwei Bie 				&vq->packed.vring.driver->off_wrap,
1521a7722890Shuangjie.albert 				cpu_to_le16(vq->last_used_idx));
1522f51f9826STiwei Bie 
15231ce9e605STiwei Bie 	LAST_ADD_TIME_INVALID(vq);
15241ce9e605STiwei Bie 
15251ce9e605STiwei Bie 	END_USE(vq);
15261ce9e605STiwei Bie 	return ret;
15271ce9e605STiwei Bie }
15281ce9e605STiwei Bie 
15291ce9e605STiwei Bie static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
15301ce9e605STiwei Bie {
15311ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
15321ce9e605STiwei Bie 
15331ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
15341ce9e605STiwei Bie 		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
15351ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
15361ce9e605STiwei Bie 			cpu_to_le16(vq->packed.event_flags_shadow);
15371ce9e605STiwei Bie 	}
15381ce9e605STiwei Bie }
15391ce9e605STiwei Bie 
154031532340SSolomon Tan static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
15411ce9e605STiwei Bie {
15421ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
15431ce9e605STiwei Bie 
15441ce9e605STiwei Bie 	START_USE(vq);
15451ce9e605STiwei Bie 
15461ce9e605STiwei Bie 	/*
15471ce9e605STiwei Bie 	 * We optimistically turn back on interrupts, then check if there was
15481ce9e605STiwei Bie 	 * more to do.
15491ce9e605STiwei Bie 	 */
15501ce9e605STiwei Bie 
1551f51f9826STiwei Bie 	if (vq->event) {
1552f51f9826STiwei Bie 		vq->packed.vring.driver->off_wrap =
1553a7722890Shuangjie.albert 			cpu_to_le16(vq->last_used_idx);
1554f51f9826STiwei Bie 		/*
1555f51f9826STiwei Bie 		 * We need to update event offset and event wrap
1556f51f9826STiwei Bie 		 * counter first before updating event flags.
1557f51f9826STiwei Bie 		 */
1558f51f9826STiwei Bie 		virtio_wmb(vq->weak_barriers);
1559f51f9826STiwei Bie 	}
1560f51f9826STiwei Bie 
15611ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1562f51f9826STiwei Bie 		vq->packed.event_flags_shadow = vq->event ?
1563f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_DESC :
1564f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_ENABLE;
15651ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
15661ce9e605STiwei Bie 				cpu_to_le16(vq->packed.event_flags_shadow);
15671ce9e605STiwei Bie 	}
15681ce9e605STiwei Bie 
15691ce9e605STiwei Bie 	END_USE(vq);
1570a7722890Shuangjie.albert 	return vq->last_used_idx;
15711ce9e605STiwei Bie }
15721ce9e605STiwei Bie 
15731ce9e605STiwei Bie static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
15741ce9e605STiwei Bie {
15751ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
15761ce9e605STiwei Bie 	bool wrap_counter;
15771ce9e605STiwei Bie 	u16 used_idx;
15781ce9e605STiwei Bie 
15791ce9e605STiwei Bie 	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
15801ce9e605STiwei Bie 	used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
15811ce9e605STiwei Bie 
15821ce9e605STiwei Bie 	return is_used_desc_packed(vq, used_idx, wrap_counter);
15831ce9e605STiwei Bie }
15841ce9e605STiwei Bie 
15851ce9e605STiwei Bie static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
15861ce9e605STiwei Bie {
15871ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1588a7722890Shuangjie.albert 	u16 used_idx, wrap_counter, last_used_idx;
1589f51f9826STiwei Bie 	u16 bufs;
15901ce9e605STiwei Bie 
15911ce9e605STiwei Bie 	START_USE(vq);
15921ce9e605STiwei Bie 
15931ce9e605STiwei Bie 	/*
15941ce9e605STiwei Bie 	 * We optimistically turn back on interrupts, then check if there was
15951ce9e605STiwei Bie 	 * more to do.
15961ce9e605STiwei Bie 	 */
15971ce9e605STiwei Bie 
1598f51f9826STiwei Bie 	if (vq->event) {
1599f51f9826STiwei Bie 		/* TODO: tune this threshold */
1600f51f9826STiwei Bie 		bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1601a7722890Shuangjie.albert 		last_used_idx = READ_ONCE(vq->last_used_idx);
1602a7722890Shuangjie.albert 		wrap_counter = packed_used_wrap_counter(last_used_idx);
16031ce9e605STiwei Bie 
1604a7722890Shuangjie.albert 		used_idx = packed_last_used(last_used_idx) + bufs;
1605f51f9826STiwei Bie 		if (used_idx >= vq->packed.vring.num) {
1606f51f9826STiwei Bie 			used_idx -= vq->packed.vring.num;
1607f51f9826STiwei Bie 			wrap_counter ^= 1;
1608f51f9826STiwei Bie 		}
1609f51f9826STiwei Bie 
1610f51f9826STiwei Bie 		vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1611f51f9826STiwei Bie 			(wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1612f51f9826STiwei Bie 
1613f51f9826STiwei Bie 		/*
1614f51f9826STiwei Bie 		 * We need to update event offset and event wrap
1615f51f9826STiwei Bie 		 * counter first before updating event flags.
1616f51f9826STiwei Bie 		 */
1617f51f9826STiwei Bie 		virtio_wmb(vq->weak_barriers);
1618f51f9826STiwei Bie 	}
1619f51f9826STiwei Bie 
16201ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1621f51f9826STiwei Bie 		vq->packed.event_flags_shadow = vq->event ?
1622f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_DESC :
1623f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_ENABLE;
16241ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
16251ce9e605STiwei Bie 				cpu_to_le16(vq->packed.event_flags_shadow);
16261ce9e605STiwei Bie 	}
16271ce9e605STiwei Bie 
16281ce9e605STiwei Bie 	/*
16291ce9e605STiwei Bie 	 * We need to update event suppression structure first
16301ce9e605STiwei Bie 	 * before re-checking for more used buffers.
16311ce9e605STiwei Bie 	 */
16321ce9e605STiwei Bie 	virtio_mb(vq->weak_barriers);
16331ce9e605STiwei Bie 
1634a7722890Shuangjie.albert 	last_used_idx = READ_ONCE(vq->last_used_idx);
1635a7722890Shuangjie.albert 	wrap_counter = packed_used_wrap_counter(last_used_idx);
1636a7722890Shuangjie.albert 	used_idx = packed_last_used(last_used_idx);
1637a7722890Shuangjie.albert 	if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
16381ce9e605STiwei Bie 		END_USE(vq);
16391ce9e605STiwei Bie 		return false;
16401ce9e605STiwei Bie 	}
16411ce9e605STiwei Bie 
16421ce9e605STiwei Bie 	END_USE(vq);
16431ce9e605STiwei Bie 	return true;
16441ce9e605STiwei Bie }
16451ce9e605STiwei Bie 
16461ce9e605STiwei Bie static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
16471ce9e605STiwei Bie {
16481ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
16491ce9e605STiwei Bie 	unsigned int i;
16501ce9e605STiwei Bie 	void *buf;
16511ce9e605STiwei Bie 
16521ce9e605STiwei Bie 	START_USE(vq);
16531ce9e605STiwei Bie 
16541ce9e605STiwei Bie 	for (i = 0; i < vq->packed.vring.num; i++) {
16551ce9e605STiwei Bie 		if (!vq->packed.desc_state[i].data)
16561ce9e605STiwei Bie 			continue;
16571ce9e605STiwei Bie 		/* detach_buf clears data, so grab it now. */
16581ce9e605STiwei Bie 		buf = vq->packed.desc_state[i].data;
16591ce9e605STiwei Bie 		detach_buf_packed(vq, i, NULL);
16601ce9e605STiwei Bie 		END_USE(vq);
16611ce9e605STiwei Bie 		return buf;
16621ce9e605STiwei Bie 	}
16631ce9e605STiwei Bie 	/* That should have freed everything. */
16641ce9e605STiwei Bie 	BUG_ON(vq->vq.num_free != vq->packed.vring.num);
16651ce9e605STiwei Bie 
16661ce9e605STiwei Bie 	END_USE(vq);
16671ce9e605STiwei Bie 	return NULL;
16681ce9e605STiwei Bie }
16691ce9e605STiwei Bie 
167096ef18a2SXuan Zhuo static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
16715a222421SJason Wang {
16725a222421SJason Wang 	struct vring_desc_extra *desc_extra;
16735a222421SJason Wang 	unsigned int i;
16745a222421SJason Wang 
16755a222421SJason Wang 	desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
16765a222421SJason Wang 				   GFP_KERNEL);
16775a222421SJason Wang 	if (!desc_extra)
16785a222421SJason Wang 		return NULL;
16795a222421SJason Wang 
16805a222421SJason Wang 	memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
16815a222421SJason Wang 
16825a222421SJason Wang 	for (i = 0; i < num - 1; i++)
16835a222421SJason Wang 		desc_extra[i].next = i + 1;
16845a222421SJason Wang 
16855a222421SJason Wang 	return desc_extra;
16865a222421SJason Wang }
16875a222421SJason Wang 
16881ce9e605STiwei Bie static struct virtqueue *vring_create_virtqueue_packed(
16891ce9e605STiwei Bie 	unsigned int index,
16901ce9e605STiwei Bie 	unsigned int num,
16911ce9e605STiwei Bie 	unsigned int vring_align,
16921ce9e605STiwei Bie 	struct virtio_device *vdev,
16931ce9e605STiwei Bie 	bool weak_barriers,
16941ce9e605STiwei Bie 	bool may_reduce_num,
16951ce9e605STiwei Bie 	bool context,
16961ce9e605STiwei Bie 	bool (*notify)(struct virtqueue *),
16971ce9e605STiwei Bie 	void (*callback)(struct virtqueue *),
16981ce9e605STiwei Bie 	const char *name)
16991ce9e605STiwei Bie {
17001ce9e605STiwei Bie 	struct vring_virtqueue *vq;
17011ce9e605STiwei Bie 	struct vring_packed_desc *ring;
17021ce9e605STiwei Bie 	struct vring_packed_desc_event *driver, *device;
17031ce9e605STiwei Bie 	dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
17041ce9e605STiwei Bie 	size_t ring_size_in_bytes, event_size_in_bytes;
17051ce9e605STiwei Bie 
17061ce9e605STiwei Bie 	ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
17071ce9e605STiwei Bie 
17081ce9e605STiwei Bie 	ring = vring_alloc_queue(vdev, ring_size_in_bytes,
17091ce9e605STiwei Bie 				 &ring_dma_addr,
17101ce9e605STiwei Bie 				 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
17111ce9e605STiwei Bie 	if (!ring)
17121ce9e605STiwei Bie 		goto err_ring;
17131ce9e605STiwei Bie 
17141ce9e605STiwei Bie 	event_size_in_bytes = sizeof(struct vring_packed_desc_event);
17151ce9e605STiwei Bie 
17161ce9e605STiwei Bie 	driver = vring_alloc_queue(vdev, event_size_in_bytes,
17171ce9e605STiwei Bie 				   &driver_event_dma_addr,
17181ce9e605STiwei Bie 				   GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
17191ce9e605STiwei Bie 	if (!driver)
17201ce9e605STiwei Bie 		goto err_driver;
17211ce9e605STiwei Bie 
17221ce9e605STiwei Bie 	device = vring_alloc_queue(vdev, event_size_in_bytes,
17231ce9e605STiwei Bie 				   &device_event_dma_addr,
17241ce9e605STiwei Bie 				   GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
17251ce9e605STiwei Bie 	if (!device)
17261ce9e605STiwei Bie 		goto err_device;
17271ce9e605STiwei Bie 
17281ce9e605STiwei Bie 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
17291ce9e605STiwei Bie 	if (!vq)
17301ce9e605STiwei Bie 		goto err_vq;
17311ce9e605STiwei Bie 
17321ce9e605STiwei Bie 	vq->vq.callback = callback;
17331ce9e605STiwei Bie 	vq->vq.vdev = vdev;
17341ce9e605STiwei Bie 	vq->vq.name = name;
17351ce9e605STiwei Bie 	vq->vq.index = index;
17361ce9e605STiwei Bie 	vq->we_own_ring = true;
17371ce9e605STiwei Bie 	vq->notify = notify;
17381ce9e605STiwei Bie 	vq->weak_barriers = weak_barriers;
1739c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
17408b4ec69dSJason Wang 	vq->broken = true;
1741c346dae4SJason Wang #else
1742c346dae4SJason Wang 	vq->broken = false;
1743c346dae4SJason Wang #endif
17441ce9e605STiwei Bie 	vq->packed_ring = true;
17451ce9e605STiwei Bie 	vq->use_dma_api = vring_use_dma_api(vdev);
17461ce9e605STiwei Bie 
17471ce9e605STiwei Bie 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
17481ce9e605STiwei Bie 		!context;
17491ce9e605STiwei Bie 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
17501ce9e605STiwei Bie 
175145383fb0STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
175245383fb0STiwei Bie 		vq->weak_barriers = false;
175345383fb0STiwei Bie 
17541ce9e605STiwei Bie 	vq->packed.ring_dma_addr = ring_dma_addr;
17551ce9e605STiwei Bie 	vq->packed.driver_event_dma_addr = driver_event_dma_addr;
17561ce9e605STiwei Bie 	vq->packed.device_event_dma_addr = device_event_dma_addr;
17571ce9e605STiwei Bie 
17581ce9e605STiwei Bie 	vq->packed.ring_size_in_bytes = ring_size_in_bytes;
17591ce9e605STiwei Bie 	vq->packed.event_size_in_bytes = event_size_in_bytes;
17601ce9e605STiwei Bie 
17611ce9e605STiwei Bie 	vq->packed.vring.num = num;
17621ce9e605STiwei Bie 	vq->packed.vring.desc = ring;
17631ce9e605STiwei Bie 	vq->packed.vring.driver = driver;
17641ce9e605STiwei Bie 	vq->packed.vring.device = device;
17651ce9e605STiwei Bie 
17661ce9e605STiwei Bie 	vq->packed.next_avail_idx = 0;
17671ce9e605STiwei Bie 	vq->packed.avail_wrap_counter = 1;
17681ce9e605STiwei Bie 	vq->packed.event_flags_shadow = 0;
17691ce9e605STiwei Bie 	vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
17701ce9e605STiwei Bie 
17711ce9e605STiwei Bie 	vq->packed.desc_state = kmalloc_array(num,
17721ce9e605STiwei Bie 			sizeof(struct vring_desc_state_packed),
17731ce9e605STiwei Bie 			GFP_KERNEL);
17741ce9e605STiwei Bie 	if (!vq->packed.desc_state)
17751ce9e605STiwei Bie 		goto err_desc_state;
17761ce9e605STiwei Bie 
17771ce9e605STiwei Bie 	memset(vq->packed.desc_state, 0,
17781ce9e605STiwei Bie 		num * sizeof(struct vring_desc_state_packed));
17791ce9e605STiwei Bie 
17801ce9e605STiwei Bie 	/* Put everything in free lists. */
17811ce9e605STiwei Bie 	vq->free_head = 0;
17821ce9e605STiwei Bie 
178396ef18a2SXuan Zhuo 	vq->packed.desc_extra = vring_alloc_desc_extra(num);
17841ce9e605STiwei Bie 	if (!vq->packed.desc_extra)
17851ce9e605STiwei Bie 		goto err_desc_extra;
17861ce9e605STiwei Bie 
17871ce9e605STiwei Bie 	/* No callback?  Tell other side not to bother us. */
17881ce9e605STiwei Bie 	if (!callback) {
17891ce9e605STiwei Bie 		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
17901ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
17911ce9e605STiwei Bie 			cpu_to_le16(vq->packed.event_flags_shadow);
17921ce9e605STiwei Bie 	}
17931ce9e605STiwei Bie 
17943a897128SXuan Zhuo 	virtqueue_init(vq, num);
17953a897128SXuan Zhuo 
17960e566c8fSParav Pandit 	spin_lock(&vdev->vqs_list_lock);
1797e152d8afSDan Carpenter 	list_add_tail(&vq->vq.list, &vdev->vqs);
17980e566c8fSParav Pandit 	spin_unlock(&vdev->vqs_list_lock);
17991ce9e605STiwei Bie 	return &vq->vq;
18001ce9e605STiwei Bie 
18011ce9e605STiwei Bie err_desc_extra:
18021ce9e605STiwei Bie 	kfree(vq->packed.desc_state);
18031ce9e605STiwei Bie err_desc_state:
18041ce9e605STiwei Bie 	kfree(vq);
18051ce9e605STiwei Bie err_vq:
1806ae93d8eaSDan Carpenter 	vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
18071ce9e605STiwei Bie err_device:
1808ae93d8eaSDan Carpenter 	vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
18091ce9e605STiwei Bie err_driver:
18101ce9e605STiwei Bie 	vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
18111ce9e605STiwei Bie err_ring:
18121ce9e605STiwei Bie 	return NULL;
18131ce9e605STiwei Bie }
18141ce9e605STiwei Bie 
18151ce9e605STiwei Bie 
18161ce9e605STiwei Bie /*
1817e6f633e5STiwei Bie  * Generic functions and exported symbols.
1818e6f633e5STiwei Bie  */
1819e6f633e5STiwei Bie 
1820e6f633e5STiwei Bie static inline int virtqueue_add(struct virtqueue *_vq,
1821e6f633e5STiwei Bie 				struct scatterlist *sgs[],
1822e6f633e5STiwei Bie 				unsigned int total_sg,
1823e6f633e5STiwei Bie 				unsigned int out_sgs,
1824e6f633e5STiwei Bie 				unsigned int in_sgs,
1825e6f633e5STiwei Bie 				void *data,
1826e6f633e5STiwei Bie 				void *ctx,
1827e6f633e5STiwei Bie 				gfp_t gfp)
1828e6f633e5STiwei Bie {
18291ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
18301ce9e605STiwei Bie 
18311ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
18321ce9e605STiwei Bie 					out_sgs, in_sgs, data, ctx, gfp) :
18331ce9e605STiwei Bie 				 virtqueue_add_split(_vq, sgs, total_sg,
1834e6f633e5STiwei Bie 					out_sgs, in_sgs, data, ctx, gfp);
1835e6f633e5STiwei Bie }
1836e6f633e5STiwei Bie 
1837e6f633e5STiwei Bie /**
1838e6f633e5STiwei Bie  * virtqueue_add_sgs - expose buffers to other end
1839a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
1840e6f633e5STiwei Bie  * @sgs: array of terminated scatterlists.
1841a5581206SJiang Biao  * @out_sgs: the number of scatterlists readable by other side
1842a5581206SJiang Biao  * @in_sgs: the number of scatterlists which are writable (after readable ones)
1843e6f633e5STiwei Bie  * @data: the token identifying the buffer.
1844e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
1845e6f633e5STiwei Bie  *
1846e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
1847e6f633e5STiwei Bie  * at the same time (except where noted).
1848e6f633e5STiwei Bie  *
1849e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1850e6f633e5STiwei Bie  */
1851e6f633e5STiwei Bie int virtqueue_add_sgs(struct virtqueue *_vq,
1852e6f633e5STiwei Bie 		      struct scatterlist *sgs[],
1853e6f633e5STiwei Bie 		      unsigned int out_sgs,
1854e6f633e5STiwei Bie 		      unsigned int in_sgs,
1855e6f633e5STiwei Bie 		      void *data,
1856e6f633e5STiwei Bie 		      gfp_t gfp)
1857e6f633e5STiwei Bie {
1858e6f633e5STiwei Bie 	unsigned int i, total_sg = 0;
1859e6f633e5STiwei Bie 
1860e6f633e5STiwei Bie 	/* Count them first. */
1861e6f633e5STiwei Bie 	for (i = 0; i < out_sgs + in_sgs; i++) {
1862e6f633e5STiwei Bie 		struct scatterlist *sg;
1863e6f633e5STiwei Bie 
1864e6f633e5STiwei Bie 		for (sg = sgs[i]; sg; sg = sg_next(sg))
1865e6f633e5STiwei Bie 			total_sg++;
1866e6f633e5STiwei Bie 	}
1867e6f633e5STiwei Bie 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1868e6f633e5STiwei Bie 			     data, NULL, gfp);
1869e6f633e5STiwei Bie }
1870e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1871e6f633e5STiwei Bie 
1872e6f633e5STiwei Bie /**
1873e6f633e5STiwei Bie  * virtqueue_add_outbuf - expose output buffers to other end
1874e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1875e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
1876e6f633e5STiwei Bie  * @num: the number of entries in @sg readable by other side
1877e6f633e5STiwei Bie  * @data: the token identifying the buffer.
1878e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
1879e6f633e5STiwei Bie  *
1880e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
1881e6f633e5STiwei Bie  * at the same time (except where noted).
1882e6f633e5STiwei Bie  *
1883e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1884e6f633e5STiwei Bie  */
1885e6f633e5STiwei Bie int virtqueue_add_outbuf(struct virtqueue *vq,
1886e6f633e5STiwei Bie 			 struct scatterlist *sg, unsigned int num,
1887e6f633e5STiwei Bie 			 void *data,
1888e6f633e5STiwei Bie 			 gfp_t gfp)
1889e6f633e5STiwei Bie {
1890e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1891e6f633e5STiwei Bie }
1892e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1893e6f633e5STiwei Bie 
1894e6f633e5STiwei Bie /**
1895e6f633e5STiwei Bie  * virtqueue_add_inbuf - expose input buffers to other end
1896e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1897e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
1898e6f633e5STiwei Bie  * @num: the number of entries in @sg writable by other side
1899e6f633e5STiwei Bie  * @data: the token identifying the buffer.
1900e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
1901e6f633e5STiwei Bie  *
1902e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
1903e6f633e5STiwei Bie  * at the same time (except where noted).
1904e6f633e5STiwei Bie  *
1905e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1906e6f633e5STiwei Bie  */
1907e6f633e5STiwei Bie int virtqueue_add_inbuf(struct virtqueue *vq,
1908e6f633e5STiwei Bie 			struct scatterlist *sg, unsigned int num,
1909e6f633e5STiwei Bie 			void *data,
1910e6f633e5STiwei Bie 			gfp_t gfp)
1911e6f633e5STiwei Bie {
1912e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1913e6f633e5STiwei Bie }
1914e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1915e6f633e5STiwei Bie 
1916e6f633e5STiwei Bie /**
1917e6f633e5STiwei Bie  * virtqueue_add_inbuf_ctx - expose input buffers to other end
1918e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1919e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
1920e6f633e5STiwei Bie  * @num: the number of entries in @sg writable by other side
1921e6f633e5STiwei Bie  * @data: the token identifying the buffer.
1922e6f633e5STiwei Bie  * @ctx: extra context for the token
1923e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
1924e6f633e5STiwei Bie  *
1925e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
1926e6f633e5STiwei Bie  * at the same time (except where noted).
1927e6f633e5STiwei Bie  *
1928e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1929e6f633e5STiwei Bie  */
1930e6f633e5STiwei Bie int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1931e6f633e5STiwei Bie 			struct scatterlist *sg, unsigned int num,
1932e6f633e5STiwei Bie 			void *data,
1933e6f633e5STiwei Bie 			void *ctx,
1934e6f633e5STiwei Bie 			gfp_t gfp)
1935e6f633e5STiwei Bie {
1936e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1937e6f633e5STiwei Bie }
1938e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1939e6f633e5STiwei Bie 
1940e6f633e5STiwei Bie /**
1941e6f633e5STiwei Bie  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
1942a5581206SJiang Biao  * @_vq: the struct virtqueue
1943e6f633e5STiwei Bie  *
1944e6f633e5STiwei Bie  * Instead of virtqueue_kick(), you can do:
1945e6f633e5STiwei Bie  *	if (virtqueue_kick_prepare(vq))
1946e6f633e5STiwei Bie  *		virtqueue_notify(vq);
1947e6f633e5STiwei Bie  *
1948e6f633e5STiwei Bie  * This is sometimes useful because the virtqueue_kick_prepare() needs
1949e6f633e5STiwei Bie  * to be serialized, but the actual virtqueue_notify() call does not.
1950e6f633e5STiwei Bie  */
1951e6f633e5STiwei Bie bool virtqueue_kick_prepare(struct virtqueue *_vq)
1952e6f633e5STiwei Bie {
19531ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
19541ce9e605STiwei Bie 
19551ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
19561ce9e605STiwei Bie 				 virtqueue_kick_prepare_split(_vq);
1957e6f633e5STiwei Bie }
1958e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1959e6f633e5STiwei Bie 
1960e6f633e5STiwei Bie /**
1961e6f633e5STiwei Bie  * virtqueue_notify - second half of split virtqueue_kick call.
1962a5581206SJiang Biao  * @_vq: the struct virtqueue
1963e6f633e5STiwei Bie  *
1964e6f633e5STiwei Bie  * This does not need to be serialized.
1965e6f633e5STiwei Bie  *
1966e6f633e5STiwei Bie  * Returns false if host notify failed or queue is broken, otherwise true.
1967e6f633e5STiwei Bie  */
1968e6f633e5STiwei Bie bool virtqueue_notify(struct virtqueue *_vq)
1969e6f633e5STiwei Bie {
1970e6f633e5STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1971e6f633e5STiwei Bie 
1972e6f633e5STiwei Bie 	if (unlikely(vq->broken))
1973e6f633e5STiwei Bie 		return false;
1974e6f633e5STiwei Bie 
1975e6f633e5STiwei Bie 	/* Prod other side to tell it about changes. */
1976e6f633e5STiwei Bie 	if (!vq->notify(_vq)) {
1977e6f633e5STiwei Bie 		vq->broken = true;
1978e6f633e5STiwei Bie 		return false;
1979e6f633e5STiwei Bie 	}
1980e6f633e5STiwei Bie 	return true;
1981e6f633e5STiwei Bie }
1982e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_notify);
1983e6f633e5STiwei Bie 
1984e6f633e5STiwei Bie /**
1985e6f633e5STiwei Bie  * virtqueue_kick - update after add_buf
1986e6f633e5STiwei Bie  * @vq: the struct virtqueue
1987e6f633e5STiwei Bie  *
1988e6f633e5STiwei Bie  * After one or more virtqueue_add_* calls, invoke this to kick
1989e6f633e5STiwei Bie  * the other side.
1990e6f633e5STiwei Bie  *
1991e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
1992e6f633e5STiwei Bie  * operations at the same time (except where noted).
1993e6f633e5STiwei Bie  *
1994e6f633e5STiwei Bie  * Returns false if kick failed, otherwise true.
1995e6f633e5STiwei Bie  */
1996e6f633e5STiwei Bie bool virtqueue_kick(struct virtqueue *vq)
1997e6f633e5STiwei Bie {
1998e6f633e5STiwei Bie 	if (virtqueue_kick_prepare(vq))
1999e6f633e5STiwei Bie 		return virtqueue_notify(vq);
2000e6f633e5STiwei Bie 	return true;
2001e6f633e5STiwei Bie }
2002e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick);
2003e6f633e5STiwei Bie 
2004e6f633e5STiwei Bie /**
200531c11db6SYang Li  * virtqueue_get_buf_ctx - get the next used buffer
2006a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2007e6f633e5STiwei Bie  * @len: the length written into the buffer
2008a5581206SJiang Biao  * @ctx: extra context for the token
2009e6f633e5STiwei Bie  *
2010e6f633e5STiwei Bie  * If the device wrote data into the buffer, @len will be set to the
2011e6f633e5STiwei Bie  * amount written.  This means you don't need to clear the buffer
2012e6f633e5STiwei Bie  * beforehand to ensure there's no data leakage in the case of short
2013e6f633e5STiwei Bie  * writes.
2014e6f633e5STiwei Bie  *
2015e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2016e6f633e5STiwei Bie  * operations at the same time (except where noted).
2017e6f633e5STiwei Bie  *
2018e6f633e5STiwei Bie  * Returns NULL if there are no used buffers, or the "data" token
2019e6f633e5STiwei Bie  * handed to virtqueue_add_*().
2020e6f633e5STiwei Bie  */
2021e6f633e5STiwei Bie void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
2022e6f633e5STiwei Bie 			    void **ctx)
2023e6f633e5STiwei Bie {
20241ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
20251ce9e605STiwei Bie 
20261ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
20271ce9e605STiwei Bie 				 virtqueue_get_buf_ctx_split(_vq, len, ctx);
2028e6f633e5STiwei Bie }
2029e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
2030e6f633e5STiwei Bie 
2031e6f633e5STiwei Bie void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
2032e6f633e5STiwei Bie {
2033e6f633e5STiwei Bie 	return virtqueue_get_buf_ctx(_vq, len, NULL);
2034e6f633e5STiwei Bie }
2035e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf);
2036e6f633e5STiwei Bie /**
2037e6f633e5STiwei Bie  * virtqueue_disable_cb - disable callbacks
2038a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2039e6f633e5STiwei Bie  *
2040e6f633e5STiwei Bie  * Note that this is not necessarily synchronous, hence unreliable and only
2041e6f633e5STiwei Bie  * useful as an optimization.
2042e6f633e5STiwei Bie  *
2043e6f633e5STiwei Bie  * Unlike other operations, this need not be serialized.
2044e6f633e5STiwei Bie  */
2045e6f633e5STiwei Bie void virtqueue_disable_cb(struct virtqueue *_vq)
2046e6f633e5STiwei Bie {
20471ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
20481ce9e605STiwei Bie 
20498d622d21SMichael S. Tsirkin 	/* If device triggered an event already it won't trigger one again:
20508d622d21SMichael S. Tsirkin 	 * no need to disable.
20518d622d21SMichael S. Tsirkin 	 */
20528d622d21SMichael S. Tsirkin 	if (vq->event_triggered)
20538d622d21SMichael S. Tsirkin 		return;
20548d622d21SMichael S. Tsirkin 
20551ce9e605STiwei Bie 	if (vq->packed_ring)
20561ce9e605STiwei Bie 		virtqueue_disable_cb_packed(_vq);
20571ce9e605STiwei Bie 	else
2058e6f633e5STiwei Bie 		virtqueue_disable_cb_split(_vq);
2059e6f633e5STiwei Bie }
2060e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
2061e6f633e5STiwei Bie 
2062e6f633e5STiwei Bie /**
2063e6f633e5STiwei Bie  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2064a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2065e6f633e5STiwei Bie  *
2066e6f633e5STiwei Bie  * This re-enables callbacks; it returns current queue state
2067e6f633e5STiwei Bie  * in an opaque unsigned value. This value should be later tested by
2068e6f633e5STiwei Bie  * virtqueue_poll, to detect a possible race between the driver checking for
2069e6f633e5STiwei Bie  * more work, and enabling callbacks.
2070e6f633e5STiwei Bie  *
2071e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2072e6f633e5STiwei Bie  * operations at the same time (except where noted).
2073e6f633e5STiwei Bie  */
207431532340SSolomon Tan unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
2075e6f633e5STiwei Bie {
20761ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
20771ce9e605STiwei Bie 
20788d622d21SMichael S. Tsirkin 	if (vq->event_triggered)
20798d622d21SMichael S. Tsirkin 		vq->event_triggered = false;
20808d622d21SMichael S. Tsirkin 
20811ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
20821ce9e605STiwei Bie 				 virtqueue_enable_cb_prepare_split(_vq);
2083e6f633e5STiwei Bie }
2084e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
2085e6f633e5STiwei Bie 
2086e6f633e5STiwei Bie /**
2087e6f633e5STiwei Bie  * virtqueue_poll - query pending used buffers
2088a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2089e6f633e5STiwei Bie  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
2090e6f633e5STiwei Bie  *
2091e6f633e5STiwei Bie  * Returns "true" if there are pending used buffers in the queue.
2092e6f633e5STiwei Bie  *
2093e6f633e5STiwei Bie  * This does not need to be serialized.
2094e6f633e5STiwei Bie  */
209531532340SSolomon Tan bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
2096e6f633e5STiwei Bie {
2097e6f633e5STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
2098e6f633e5STiwei Bie 
2099481a0d74SMao Wenan 	if (unlikely(vq->broken))
2100481a0d74SMao Wenan 		return false;
2101481a0d74SMao Wenan 
2102e6f633e5STiwei Bie 	virtio_mb(vq->weak_barriers);
21031ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
21041ce9e605STiwei Bie 				 virtqueue_poll_split(_vq, last_used_idx);
2105e6f633e5STiwei Bie }
2106e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_poll);
2107e6f633e5STiwei Bie 
2108e6f633e5STiwei Bie /**
2109e6f633e5STiwei Bie  * virtqueue_enable_cb - restart callbacks after disable_cb.
2110a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2111e6f633e5STiwei Bie  *
2112e6f633e5STiwei Bie  * This re-enables callbacks; it returns "false" if there are pending
2113e6f633e5STiwei Bie  * buffers in the queue, to detect a possible race between the driver
2114e6f633e5STiwei Bie  * checking for more work, and enabling callbacks.
2115e6f633e5STiwei Bie  *
2116e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2117e6f633e5STiwei Bie  * operations at the same time (except where noted).
2118e6f633e5STiwei Bie  */
2119e6f633e5STiwei Bie bool virtqueue_enable_cb(struct virtqueue *_vq)
2120e6f633e5STiwei Bie {
212131532340SSolomon Tan 	unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq);
2122e6f633e5STiwei Bie 
2123e6f633e5STiwei Bie 	return !virtqueue_poll(_vq, last_used_idx);
2124e6f633e5STiwei Bie }
2125e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2126e6f633e5STiwei Bie 
2127e6f633e5STiwei Bie /**
2128e6f633e5STiwei Bie  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2129a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2130e6f633e5STiwei Bie  *
2131e6f633e5STiwei Bie  * This re-enables callbacks but hints to the other side to delay
2132e6f633e5STiwei Bie  * interrupts until most of the available buffers have been processed;
2133e6f633e5STiwei Bie  * it returns "false" if there are many pending buffers in the queue,
2134e6f633e5STiwei Bie  * to detect a possible race between the driver checking for more work,
2135e6f633e5STiwei Bie  * and enabling callbacks.
2136e6f633e5STiwei Bie  *
2137e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2138e6f633e5STiwei Bie  * operations at the same time (except where noted).
2139e6f633e5STiwei Bie  */
2140e6f633e5STiwei Bie bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2141e6f633e5STiwei Bie {
21421ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
21431ce9e605STiwei Bie 
21448d622d21SMichael S. Tsirkin 	if (vq->event_triggered)
21458d622d21SMichael S. Tsirkin 		vq->event_triggered = false;
21468d622d21SMichael S. Tsirkin 
21471ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
21481ce9e605STiwei Bie 				 virtqueue_enable_cb_delayed_split(_vq);
2149e6f633e5STiwei Bie }
2150e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2151e6f633e5STiwei Bie 
2152138fd251STiwei Bie /**
2153138fd251STiwei Bie  * virtqueue_detach_unused_buf - detach first unused buffer
2154a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2155138fd251STiwei Bie  *
2156138fd251STiwei Bie  * Returns NULL or the "data" token handed to virtqueue_add_*().
2157a62eecb3SXuan Zhuo  * This is not valid on an active queue; it is useful for device
2158a62eecb3SXuan Zhuo  * shutdown or the reset queue.
2159138fd251STiwei Bie  */
2160138fd251STiwei Bie void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2161138fd251STiwei Bie {
21621ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
21631ce9e605STiwei Bie 
21641ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
21651ce9e605STiwei Bie 				 virtqueue_detach_unused_buf_split(_vq);
2166138fd251STiwei Bie }
21677c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2168c021eac4SShirley Ma 
2169138fd251STiwei Bie static inline bool more_used(const struct vring_virtqueue *vq)
2170138fd251STiwei Bie {
21711ce9e605STiwei Bie 	return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2172138fd251STiwei Bie }
2173138fd251STiwei Bie 
21740a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq)
21750a8a69ddSRusty Russell {
21760a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
21770a8a69ddSRusty Russell 
21780a8a69ddSRusty Russell 	if (!more_used(vq)) {
21790a8a69ddSRusty Russell 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
21800a8a69ddSRusty Russell 		return IRQ_NONE;
21810a8a69ddSRusty Russell 	}
21820a8a69ddSRusty Russell 
21838b4ec69dSJason Wang 	if (unlikely(vq->broken)) {
2184c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
21858b4ec69dSJason Wang 		dev_warn_once(&vq->vq.vdev->dev,
21868b4ec69dSJason Wang 			      "virtio vring IRQ raised before DRIVER_OK");
21878b4ec69dSJason Wang 		return IRQ_NONE;
2188c346dae4SJason Wang #else
2189c346dae4SJason Wang 		return IRQ_HANDLED;
2190c346dae4SJason Wang #endif
21918b4ec69dSJason Wang 	}
21920a8a69ddSRusty Russell 
21938d622d21SMichael S. Tsirkin 	/* Just a hint for performance: so it's ok that this can be racy! */
21948d622d21SMichael S. Tsirkin 	if (vq->event)
21958d622d21SMichael S. Tsirkin 		vq->event_triggered = true;
21968d622d21SMichael S. Tsirkin 
21970a8a69ddSRusty Russell 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
219818445c4dSRusty Russell 	if (vq->vq.callback)
219918445c4dSRusty Russell 		vq->vq.callback(&vq->vq);
22000a8a69ddSRusty Russell 
22010a8a69ddSRusty Russell 	return IRQ_HANDLED;
22020a8a69ddSRusty Russell }
2203c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt);
22040a8a69ddSRusty Russell 
22051ce9e605STiwei Bie /* Only available for split ring */
2206*07d9629dSXuan Zhuo static struct virtqueue *__vring_new_virtqueue(unsigned int index,
22072a2d1382SAndy Lutomirski 					       struct vring vring,
22080a8a69ddSRusty Russell 					       struct virtio_device *vdev,
22097b21e34fSRusty Russell 					       bool weak_barriers,
2210f94682ddSMichael S. Tsirkin 					       bool context,
221146f9c2b9SHeinz Graalfs 					       bool (*notify)(struct virtqueue *),
22129499f5e7SRusty Russell 					       void (*callback)(struct virtqueue *),
22139499f5e7SRusty Russell 					       const char *name)
22140a8a69ddSRusty Russell {
22152a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq;
22160a8a69ddSRusty Russell 
22171ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
22181ce9e605STiwei Bie 		return NULL;
22191ce9e605STiwei Bie 
2220cbeedb72STiwei Bie 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
22210a8a69ddSRusty Russell 	if (!vq)
22220a8a69ddSRusty Russell 		return NULL;
22230a8a69ddSRusty Russell 
22241ce9e605STiwei Bie 	vq->packed_ring = false;
22250a8a69ddSRusty Russell 	vq->vq.callback = callback;
22260a8a69ddSRusty Russell 	vq->vq.vdev = vdev;
22279499f5e7SRusty Russell 	vq->vq.name = name;
222806ca287dSRusty Russell 	vq->vq.index = index;
22292a2d1382SAndy Lutomirski 	vq->we_own_ring = false;
22300a8a69ddSRusty Russell 	vq->notify = notify;
22317b21e34fSRusty Russell 	vq->weak_barriers = weak_barriers;
2232c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
22338b4ec69dSJason Wang 	vq->broken = true;
2234c346dae4SJason Wang #else
2235c346dae4SJason Wang 	vq->broken = false;
2236c346dae4SJason Wang #endif
2237fb3fba6bSTiwei Bie 	vq->use_dma_api = vring_use_dma_api(vdev);
22380a8a69ddSRusty Russell 
22395a08b04fSMichael S. Tsirkin 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
22405a08b04fSMichael S. Tsirkin 		!context;
2241a5c262c5SMichael S. Tsirkin 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
22429fa29b9dSMark McLoughlin 
224345383fb0STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
224445383fb0STiwei Bie 		vq->weak_barriers = false;
224545383fb0STiwei Bie 
2246d79dca75STiwei Bie 	vq->split.queue_dma_addr = 0;
2247d79dca75STiwei Bie 	vq->split.queue_size_in_bytes = 0;
2248d79dca75STiwei Bie 
2249e593bf97STiwei Bie 	vq->split.vring = vring;
2250e593bf97STiwei Bie 	vq->split.avail_flags_shadow = 0;
2251e593bf97STiwei Bie 	vq->split.avail_idx_shadow = 0;
2252e593bf97STiwei Bie 
22530a8a69ddSRusty Russell 	/* No callback?  Tell other side not to bother us. */
2254f277ec42SVenkatesh Srinivas 	if (!callback) {
2255e593bf97STiwei Bie 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
22560ea1e4a6SLadi Prosek 		if (!vq->event)
2257e593bf97STiwei Bie 			vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2258e593bf97STiwei Bie 					vq->split.avail_flags_shadow);
2259f277ec42SVenkatesh Srinivas 	}
22600a8a69ddSRusty Russell 
2261cbeedb72STiwei Bie 	vq->split.desc_state = kmalloc_array(vring.num,
2262cbeedb72STiwei Bie 			sizeof(struct vring_desc_state_split), GFP_KERNEL);
22635bc72234SJason Wang 	if (!vq->split.desc_state)
22645bc72234SJason Wang 		goto err_state;
2265cbeedb72STiwei Bie 
226696ef18a2SXuan Zhuo 	vq->split.desc_extra = vring_alloc_desc_extra(vring.num);
226772b5e895SJason Wang 	if (!vq->split.desc_extra)
226872b5e895SJason Wang 		goto err_extra;
226972b5e895SJason Wang 
22700a8a69ddSRusty Russell 	/* Put everything in free lists. */
22710a8a69ddSRusty Russell 	vq->free_head = 0;
2272cbeedb72STiwei Bie 	memset(vq->split.desc_state, 0, vring.num *
2273cbeedb72STiwei Bie 			sizeof(struct vring_desc_state_split));
22740a8a69ddSRusty Russell 
22753a897128SXuan Zhuo 	virtqueue_init(vq, vring.num);
22763a897128SXuan Zhuo 
22770e566c8fSParav Pandit 	spin_lock(&vdev->vqs_list_lock);
2278e152d8afSDan Carpenter 	list_add_tail(&vq->vq.list, &vdev->vqs);
22790e566c8fSParav Pandit 	spin_unlock(&vdev->vqs_list_lock);
22800a8a69ddSRusty Russell 	return &vq->vq;
22815bc72234SJason Wang 
228272b5e895SJason Wang err_extra:
228372b5e895SJason Wang 	kfree(vq->split.desc_state);
22845bc72234SJason Wang err_state:
22855bc72234SJason Wang 	kfree(vq);
22865bc72234SJason Wang 	return NULL;
22870a8a69ddSRusty Russell }
22882a2d1382SAndy Lutomirski 
22892a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue(
22902a2d1382SAndy Lutomirski 	unsigned int index,
22912a2d1382SAndy Lutomirski 	unsigned int num,
22922a2d1382SAndy Lutomirski 	unsigned int vring_align,
22932a2d1382SAndy Lutomirski 	struct virtio_device *vdev,
22942a2d1382SAndy Lutomirski 	bool weak_barriers,
22952a2d1382SAndy Lutomirski 	bool may_reduce_num,
2296f94682ddSMichael S. Tsirkin 	bool context,
22972a2d1382SAndy Lutomirski 	bool (*notify)(struct virtqueue *),
22982a2d1382SAndy Lutomirski 	void (*callback)(struct virtqueue *),
22992a2d1382SAndy Lutomirski 	const char *name)
23002a2d1382SAndy Lutomirski {
23011ce9e605STiwei Bie 
23021ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
23031ce9e605STiwei Bie 		return vring_create_virtqueue_packed(index, num, vring_align,
23041ce9e605STiwei Bie 				vdev, weak_barriers, may_reduce_num,
23051ce9e605STiwei Bie 				context, notify, callback, name);
23061ce9e605STiwei Bie 
2307d79dca75STiwei Bie 	return vring_create_virtqueue_split(index, num, vring_align,
2308d79dca75STiwei Bie 			vdev, weak_barriers, may_reduce_num,
2309d79dca75STiwei Bie 			context, notify, callback, name);
23102a2d1382SAndy Lutomirski }
23112a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(vring_create_virtqueue);
23122a2d1382SAndy Lutomirski 
23131ce9e605STiwei Bie /* Only available for split ring */
23142a2d1382SAndy Lutomirski struct virtqueue *vring_new_virtqueue(unsigned int index,
23152a2d1382SAndy Lutomirski 				      unsigned int num,
23162a2d1382SAndy Lutomirski 				      unsigned int vring_align,
23172a2d1382SAndy Lutomirski 				      struct virtio_device *vdev,
23182a2d1382SAndy Lutomirski 				      bool weak_barriers,
2319f94682ddSMichael S. Tsirkin 				      bool context,
23202a2d1382SAndy Lutomirski 				      void *pages,
23212a2d1382SAndy Lutomirski 				      bool (*notify)(struct virtqueue *vq),
23222a2d1382SAndy Lutomirski 				      void (*callback)(struct virtqueue *vq),
23232a2d1382SAndy Lutomirski 				      const char *name)
23242a2d1382SAndy Lutomirski {
23252a2d1382SAndy Lutomirski 	struct vring vring;
23261ce9e605STiwei Bie 
23271ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
23281ce9e605STiwei Bie 		return NULL;
23291ce9e605STiwei Bie 
23302a2d1382SAndy Lutomirski 	vring_init(&vring, num, pages, vring_align);
2331f94682ddSMichael S. Tsirkin 	return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
23322a2d1382SAndy Lutomirski 				     notify, callback, name);
23332a2d1382SAndy Lutomirski }
2334c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue);
23350a8a69ddSRusty Russell 
23363ea19e32SXuan Zhuo static void vring_free(struct virtqueue *_vq)
23370a8a69ddSRusty Russell {
23382a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
23392a2d1382SAndy Lutomirski 
23402a2d1382SAndy Lutomirski 	if (vq->we_own_ring) {
23411ce9e605STiwei Bie 		if (vq->packed_ring) {
23421ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
23431ce9e605STiwei Bie 					 vq->packed.ring_size_in_bytes,
23441ce9e605STiwei Bie 					 vq->packed.vring.desc,
23451ce9e605STiwei Bie 					 vq->packed.ring_dma_addr);
23461ce9e605STiwei Bie 
23471ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
23481ce9e605STiwei Bie 					 vq->packed.event_size_in_bytes,
23491ce9e605STiwei Bie 					 vq->packed.vring.driver,
23501ce9e605STiwei Bie 					 vq->packed.driver_event_dma_addr);
23511ce9e605STiwei Bie 
23521ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
23531ce9e605STiwei Bie 					 vq->packed.event_size_in_bytes,
23541ce9e605STiwei Bie 					 vq->packed.vring.device,
23551ce9e605STiwei Bie 					 vq->packed.device_event_dma_addr);
23561ce9e605STiwei Bie 
23571ce9e605STiwei Bie 			kfree(vq->packed.desc_state);
23581ce9e605STiwei Bie 			kfree(vq->packed.desc_extra);
23591ce9e605STiwei Bie 		} else {
2360d79dca75STiwei Bie 			vring_free_queue(vq->vq.vdev,
2361d79dca75STiwei Bie 					 vq->split.queue_size_in_bytes,
2362d79dca75STiwei Bie 					 vq->split.vring.desc,
2363d79dca75STiwei Bie 					 vq->split.queue_dma_addr);
2364f13f09a1SSuman Anna 		}
2365f13f09a1SSuman Anna 	}
236672b5e895SJason Wang 	if (!vq->packed_ring) {
2367cbeedb72STiwei Bie 		kfree(vq->split.desc_state);
236872b5e895SJason Wang 		kfree(vq->split.desc_extra);
236972b5e895SJason Wang 	}
23703ea19e32SXuan Zhuo }
23713ea19e32SXuan Zhuo 
23723ea19e32SXuan Zhuo void vring_del_virtqueue(struct virtqueue *_vq)
23733ea19e32SXuan Zhuo {
23743ea19e32SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
23753ea19e32SXuan Zhuo 
23763ea19e32SXuan Zhuo 	spin_lock(&vq->vq.vdev->vqs_list_lock);
23773ea19e32SXuan Zhuo 	list_del(&_vq->list);
23783ea19e32SXuan Zhuo 	spin_unlock(&vq->vq.vdev->vqs_list_lock);
23793ea19e32SXuan Zhuo 
23803ea19e32SXuan Zhuo 	vring_free(_vq);
23813ea19e32SXuan Zhuo 
23822a2d1382SAndy Lutomirski 	kfree(vq);
23830a8a69ddSRusty Russell }
2384c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue);
23850a8a69ddSRusty Russell 
2386e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */
2387e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev)
2388e34f8725SRusty Russell {
2389e34f8725SRusty Russell 	unsigned int i;
2390e34f8725SRusty Russell 
2391e34f8725SRusty Russell 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2392e34f8725SRusty Russell 		switch (i) {
23939fa29b9dSMark McLoughlin 		case VIRTIO_RING_F_INDIRECT_DESC:
23949fa29b9dSMark McLoughlin 			break;
2395a5c262c5SMichael S. Tsirkin 		case VIRTIO_RING_F_EVENT_IDX:
2396a5c262c5SMichael S. Tsirkin 			break;
2397747ae34aSMichael S. Tsirkin 		case VIRTIO_F_VERSION_1:
2398747ae34aSMichael S. Tsirkin 			break;
2399321bd212SMichael S. Tsirkin 		case VIRTIO_F_ACCESS_PLATFORM:
24001a937693SMichael S. Tsirkin 			break;
2401f959a128STiwei Bie 		case VIRTIO_F_RING_PACKED:
2402f959a128STiwei Bie 			break;
240345383fb0STiwei Bie 		case VIRTIO_F_ORDER_PLATFORM:
240445383fb0STiwei Bie 			break;
2405e34f8725SRusty Russell 		default:
2406e34f8725SRusty Russell 			/* We don't understand this bit. */
2407e16e12beSMichael S. Tsirkin 			__virtio_clear_bit(vdev, i);
2408e34f8725SRusty Russell 		}
2409e34f8725SRusty Russell 	}
2410e34f8725SRusty Russell }
2411e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features);
2412e34f8725SRusty Russell 
24135dfc1762SRusty Russell /**
24145dfc1762SRusty Russell  * virtqueue_get_vring_size - return the size of the virtqueue's vring
2415a5581206SJiang Biao  * @_vq: the struct virtqueue containing the vring of interest.
24165dfc1762SRusty Russell  *
24175dfc1762SRusty Russell  * Returns the size of the vring.  This is mainly used for boasting to
24185dfc1762SRusty Russell  * userspace.  Unlike other operations, this need not be serialized.
24195dfc1762SRusty Russell  */
24208f9f4668SRick Jones unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
24218f9f4668SRick Jones {
24228f9f4668SRick Jones 
24238f9f4668SRick Jones 	struct vring_virtqueue *vq = to_vvq(_vq);
24248f9f4668SRick Jones 
24251ce9e605STiwei Bie 	return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
24268f9f4668SRick Jones }
24278f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
24288f9f4668SRick Jones 
2429b3b32c94SHeinz Graalfs bool virtqueue_is_broken(struct virtqueue *_vq)
2430b3b32c94SHeinz Graalfs {
2431b3b32c94SHeinz Graalfs 	struct vring_virtqueue *vq = to_vvq(_vq);
2432b3b32c94SHeinz Graalfs 
243360f07798SParav Pandit 	return READ_ONCE(vq->broken);
2434b3b32c94SHeinz Graalfs }
2435b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2436b3b32c94SHeinz Graalfs 
2437e2dcdfe9SRusty Russell /*
2438e2dcdfe9SRusty Russell  * This should prevent the device from being used, allowing drivers to
2439e2dcdfe9SRusty Russell  * recover.  You may need to grab appropriate locks to flush.
2440e2dcdfe9SRusty Russell  */
2441e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev)
2442e2dcdfe9SRusty Russell {
2443e2dcdfe9SRusty Russell 	struct virtqueue *_vq;
2444e2dcdfe9SRusty Russell 
24450e566c8fSParav Pandit 	spin_lock(&dev->vqs_list_lock);
2446e2dcdfe9SRusty Russell 	list_for_each_entry(_vq, &dev->vqs, list) {
2447e2dcdfe9SRusty Russell 		struct vring_virtqueue *vq = to_vvq(_vq);
244860f07798SParav Pandit 
244960f07798SParav Pandit 		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
245060f07798SParav Pandit 		WRITE_ONCE(vq->broken, true);
2451e2dcdfe9SRusty Russell 	}
24520e566c8fSParav Pandit 	spin_unlock(&dev->vqs_list_lock);
2453e2dcdfe9SRusty Russell }
2454e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device);
2455e2dcdfe9SRusty Russell 
2456be83f04dSJason Wang /*
2457be83f04dSJason Wang  * This should allow the device to be used by the driver. You may
2458be83f04dSJason Wang  * need to grab appropriate locks to flush the write to
2459be83f04dSJason Wang  * vq->broken. This should only be used in some specific case e.g
2460be83f04dSJason Wang  * (probing and restoring). This function should only be called by the
2461be83f04dSJason Wang  * core, not directly by the driver.
2462be83f04dSJason Wang  */
2463be83f04dSJason Wang void __virtio_unbreak_device(struct virtio_device *dev)
2464be83f04dSJason Wang {
2465be83f04dSJason Wang 	struct virtqueue *_vq;
2466be83f04dSJason Wang 
2467be83f04dSJason Wang 	spin_lock(&dev->vqs_list_lock);
2468be83f04dSJason Wang 	list_for_each_entry(_vq, &dev->vqs, list) {
2469be83f04dSJason Wang 		struct vring_virtqueue *vq = to_vvq(_vq);
2470be83f04dSJason Wang 
2471be83f04dSJason Wang 		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2472be83f04dSJason Wang 		WRITE_ONCE(vq->broken, false);
2473be83f04dSJason Wang 	}
2474be83f04dSJason Wang 	spin_unlock(&dev->vqs_list_lock);
2475be83f04dSJason Wang }
2476be83f04dSJason Wang EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
2477be83f04dSJason Wang 
24782a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
247989062652SCornelia Huck {
248089062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
248189062652SCornelia Huck 
24822a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
248389062652SCornelia Huck 
24841ce9e605STiwei Bie 	if (vq->packed_ring)
24851ce9e605STiwei Bie 		return vq->packed.ring_dma_addr;
24861ce9e605STiwei Bie 
2487d79dca75STiwei Bie 	return vq->split.queue_dma_addr;
24882a2d1382SAndy Lutomirski }
24892a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
24902a2d1382SAndy Lutomirski 
24912a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
249289062652SCornelia Huck {
249389062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
249489062652SCornelia Huck 
24952a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
24962a2d1382SAndy Lutomirski 
24971ce9e605STiwei Bie 	if (vq->packed_ring)
24981ce9e605STiwei Bie 		return vq->packed.driver_event_dma_addr;
24991ce9e605STiwei Bie 
2500d79dca75STiwei Bie 	return vq->split.queue_dma_addr +
2501e593bf97STiwei Bie 		((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
250289062652SCornelia Huck }
25032a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
25042a2d1382SAndy Lutomirski 
25052a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
25062a2d1382SAndy Lutomirski {
25072a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
25082a2d1382SAndy Lutomirski 
25092a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
25102a2d1382SAndy Lutomirski 
25111ce9e605STiwei Bie 	if (vq->packed_ring)
25121ce9e605STiwei Bie 		return vq->packed.device_event_dma_addr;
25131ce9e605STiwei Bie 
2514d79dca75STiwei Bie 	return vq->split.queue_dma_addr +
2515e593bf97STiwei Bie 		((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
25162a2d1382SAndy Lutomirski }
25172a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
25182a2d1382SAndy Lutomirski 
25191ce9e605STiwei Bie /* Only available for split ring */
25202a2d1382SAndy Lutomirski const struct vring *virtqueue_get_vring(struct virtqueue *vq)
25212a2d1382SAndy Lutomirski {
2522e593bf97STiwei Bie 	return &to_vvq(vq)->split.vring;
25232a2d1382SAndy Lutomirski }
25242a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_vring);
252589062652SCornelia Huck 
2526c6fd4701SRusty Russell MODULE_LICENSE("GPL");
2527