xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision af36b16f6c1e51975a3815eb21c21c47f3114393)
1fd534e9bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
20a8a69ddSRusty Russell /* Virtio ring implementation.
30a8a69ddSRusty Russell  *
40a8a69ddSRusty Russell  *  Copyright 2007 Rusty Russell IBM Corporation
50a8a69ddSRusty Russell  */
60a8a69ddSRusty Russell #include <linux/virtio.h>
70a8a69ddSRusty Russell #include <linux/virtio_ring.h>
8e34f8725SRusty Russell #include <linux/virtio_config.h>
90a8a69ddSRusty Russell #include <linux/device.h>
105a0e3ad6STejun Heo #include <linux/slab.h>
11b5a2c4f1SPaul Gortmaker #include <linux/module.h>
12e93300b1SRusty Russell #include <linux/hrtimer.h>
13780bc790SAndy Lutomirski #include <linux/dma-mapping.h>
14f8ce7263SMichael S. Tsirkin #include <linux/spinlock.h>
1578fe3987SAndy Lutomirski #include <xen/xen.h>
160a8a69ddSRusty Russell 
170a8a69ddSRusty Russell #ifdef DEBUG
180a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */
199499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
209499f5e7SRusty Russell 	do {							\
219499f5e7SRusty Russell 		dev_err(&(_vq)->vq.vdev->dev,			\
229499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
239499f5e7SRusty Russell 		BUG();						\
249499f5e7SRusty Russell 	} while (0)
25c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */
263a35ce7dSRoel Kluin #define START_USE(_vq)						\
27c5f841f1SRusty Russell 	do {							\
28c5f841f1SRusty Russell 		if ((_vq)->in_use)				\
299499f5e7SRusty Russell 			panic("%s:in_use = %i\n",		\
309499f5e7SRusty Russell 			      (_vq)->vq.name, (_vq)->in_use);	\
31c5f841f1SRusty Russell 		(_vq)->in_use = __LINE__;			\
32c5f841f1SRusty Russell 	} while (0)
333a35ce7dSRoel Kluin #define END_USE(_vq) \
3497a545abSRusty Russell 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
354d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(_vq)				\
364d6a105eSTiwei Bie 	do {							\
374d6a105eSTiwei Bie 		ktime_t now = ktime_get();			\
384d6a105eSTiwei Bie 								\
394d6a105eSTiwei Bie 		/* No kick or get, with .1 second between?  Warn. */ \
404d6a105eSTiwei Bie 		if ((_vq)->last_add_time_valid)			\
414d6a105eSTiwei Bie 			WARN_ON(ktime_to_ms(ktime_sub(now,	\
424d6a105eSTiwei Bie 				(_vq)->last_add_time)) > 100);	\
434d6a105eSTiwei Bie 		(_vq)->last_add_time = now;			\
444d6a105eSTiwei Bie 		(_vq)->last_add_time_valid = true;		\
454d6a105eSTiwei Bie 	} while (0)
464d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(_vq)				\
474d6a105eSTiwei Bie 	do {							\
484d6a105eSTiwei Bie 		if ((_vq)->last_add_time_valid) {		\
494d6a105eSTiwei Bie 			WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
504d6a105eSTiwei Bie 				      (_vq)->last_add_time)) > 100); \
514d6a105eSTiwei Bie 		}						\
524d6a105eSTiwei Bie 	} while (0)
534d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(_vq)				\
544d6a105eSTiwei Bie 	((_vq)->last_add_time_valid = false)
550a8a69ddSRusty Russell #else
569499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
579499f5e7SRusty Russell 	do {							\
589499f5e7SRusty Russell 		dev_err(&_vq->vq.vdev->dev,			\
599499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
609499f5e7SRusty Russell 		(_vq)->broken = true;				\
619499f5e7SRusty Russell 	} while (0)
620a8a69ddSRusty Russell #define START_USE(vq)
630a8a69ddSRusty Russell #define END_USE(vq)
644d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(vq)
654d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(vq)
664d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(vq)
670a8a69ddSRusty Russell #endif
680a8a69ddSRusty Russell 
69cbeedb72STiwei Bie struct vring_desc_state_split {
70780bc790SAndy Lutomirski 	void *data;			/* Data for callback. */
71780bc790SAndy Lutomirski 	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
72780bc790SAndy Lutomirski };
73780bc790SAndy Lutomirski 
741ce9e605STiwei Bie struct vring_desc_state_packed {
751ce9e605STiwei Bie 	void *data;			/* Data for callback. */
761ce9e605STiwei Bie 	struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
771ce9e605STiwei Bie 	u16 num;			/* Descriptor list length. */
781ce9e605STiwei Bie 	u16 last;			/* The last desc state in a list. */
791ce9e605STiwei Bie };
801ce9e605STiwei Bie 
811f28750fSJason Wang struct vring_desc_extra {
82ef5c366fSJason Wang 	dma_addr_t addr;		/* Descriptor DMA addr. */
83ef5c366fSJason Wang 	u32 len;			/* Descriptor length. */
841ce9e605STiwei Bie 	u16 flags;			/* Descriptor flags. */
85aeef9b47SJason Wang 	u16 next;			/* The next desc state in a list. */
861ce9e605STiwei Bie };
871ce9e605STiwei Bie 
88d76136e4SXuan Zhuo struct vring_virtqueue_split {
89d76136e4SXuan Zhuo 	/* Actual memory layout for this queue. */
90d76136e4SXuan Zhuo 	struct vring vring;
91d76136e4SXuan Zhuo 
92d76136e4SXuan Zhuo 	/* Last written value to avail->flags */
93d76136e4SXuan Zhuo 	u16 avail_flags_shadow;
94d76136e4SXuan Zhuo 
95d76136e4SXuan Zhuo 	/*
96d76136e4SXuan Zhuo 	 * Last written value to avail->idx in
97d76136e4SXuan Zhuo 	 * guest byte order.
98d76136e4SXuan Zhuo 	 */
99d76136e4SXuan Zhuo 	u16 avail_idx_shadow;
100d76136e4SXuan Zhuo 
101d76136e4SXuan Zhuo 	/* Per-descriptor state. */
102d76136e4SXuan Zhuo 	struct vring_desc_state_split *desc_state;
103d76136e4SXuan Zhuo 	struct vring_desc_extra *desc_extra;
104d76136e4SXuan Zhuo 
105d76136e4SXuan Zhuo 	/* DMA address and size information */
106d76136e4SXuan Zhuo 	dma_addr_t queue_dma_addr;
107d76136e4SXuan Zhuo 	size_t queue_size_in_bytes;
108*af36b16fSXuan Zhuo 
109*af36b16fSXuan Zhuo 	/*
110*af36b16fSXuan Zhuo 	 * The parameters for creating vrings are reserved for creating new
111*af36b16fSXuan Zhuo 	 * vring.
112*af36b16fSXuan Zhuo 	 */
113*af36b16fSXuan Zhuo 	u32 vring_align;
114*af36b16fSXuan Zhuo 	bool may_reduce_num;
115d76136e4SXuan Zhuo };
116d76136e4SXuan Zhuo 
117d76136e4SXuan Zhuo struct vring_virtqueue_packed {
118d76136e4SXuan Zhuo 	/* Actual memory layout for this queue. */
119d76136e4SXuan Zhuo 	struct {
120d76136e4SXuan Zhuo 		unsigned int num;
121d76136e4SXuan Zhuo 		struct vring_packed_desc *desc;
122d76136e4SXuan Zhuo 		struct vring_packed_desc_event *driver;
123d76136e4SXuan Zhuo 		struct vring_packed_desc_event *device;
124d76136e4SXuan Zhuo 	} vring;
125d76136e4SXuan Zhuo 
126d76136e4SXuan Zhuo 	/* Driver ring wrap counter. */
127d76136e4SXuan Zhuo 	bool avail_wrap_counter;
128d76136e4SXuan Zhuo 
129d76136e4SXuan Zhuo 	/* Avail used flags. */
130d76136e4SXuan Zhuo 	u16 avail_used_flags;
131d76136e4SXuan Zhuo 
132d76136e4SXuan Zhuo 	/* Index of the next avail descriptor. */
133d76136e4SXuan Zhuo 	u16 next_avail_idx;
134d76136e4SXuan Zhuo 
135d76136e4SXuan Zhuo 	/*
136d76136e4SXuan Zhuo 	 * Last written value to driver->flags in
137d76136e4SXuan Zhuo 	 * guest byte order.
138d76136e4SXuan Zhuo 	 */
139d76136e4SXuan Zhuo 	u16 event_flags_shadow;
140d76136e4SXuan Zhuo 
141d76136e4SXuan Zhuo 	/* Per-descriptor state. */
142d76136e4SXuan Zhuo 	struct vring_desc_state_packed *desc_state;
143d76136e4SXuan Zhuo 	struct vring_desc_extra *desc_extra;
144d76136e4SXuan Zhuo 
145d76136e4SXuan Zhuo 	/* DMA address and size information */
146d76136e4SXuan Zhuo 	dma_addr_t ring_dma_addr;
147d76136e4SXuan Zhuo 	dma_addr_t driver_event_dma_addr;
148d76136e4SXuan Zhuo 	dma_addr_t device_event_dma_addr;
149d76136e4SXuan Zhuo 	size_t ring_size_in_bytes;
150d76136e4SXuan Zhuo 	size_t event_size_in_bytes;
151d76136e4SXuan Zhuo };
152d76136e4SXuan Zhuo 
15343b4f721SMichael S. Tsirkin struct vring_virtqueue {
1540a8a69ddSRusty Russell 	struct virtqueue vq;
1550a8a69ddSRusty Russell 
1561ce9e605STiwei Bie 	/* Is this a packed ring? */
1571ce9e605STiwei Bie 	bool packed_ring;
1581ce9e605STiwei Bie 
159fb3fba6bSTiwei Bie 	/* Is DMA API used? */
160fb3fba6bSTiwei Bie 	bool use_dma_api;
161fb3fba6bSTiwei Bie 
1627b21e34fSRusty Russell 	/* Can we use weak barriers? */
1637b21e34fSRusty Russell 	bool weak_barriers;
1647b21e34fSRusty Russell 
1650a8a69ddSRusty Russell 	/* Other side has made a mess, don't try any more. */
1660a8a69ddSRusty Russell 	bool broken;
1670a8a69ddSRusty Russell 
1689fa29b9dSMark McLoughlin 	/* Host supports indirect buffers */
1699fa29b9dSMark McLoughlin 	bool indirect;
1709fa29b9dSMark McLoughlin 
171a5c262c5SMichael S. Tsirkin 	/* Host publishes avail event idx */
172a5c262c5SMichael S. Tsirkin 	bool event;
173a5c262c5SMichael S. Tsirkin 
1740a8a69ddSRusty Russell 	/* Head of free buffer list. */
1750a8a69ddSRusty Russell 	unsigned int free_head;
1760a8a69ddSRusty Russell 	/* Number we've added since last sync. */
1770a8a69ddSRusty Russell 	unsigned int num_added;
1780a8a69ddSRusty Russell 
179a7722890Shuangjie.albert 	/* Last used index  we've seen.
180a7722890Shuangjie.albert 	 * for split ring, it just contains last used index
181a7722890Shuangjie.albert 	 * for packed ring:
182a7722890Shuangjie.albert 	 * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index.
183a7722890Shuangjie.albert 	 * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter.
184a7722890Shuangjie.albert 	 */
1851bc4953eSAnthony Liguori 	u16 last_used_idx;
1860a8a69ddSRusty Russell 
1878d622d21SMichael S. Tsirkin 	/* Hint for event idx: already triggered no need to disable. */
1888d622d21SMichael S. Tsirkin 	bool event_triggered;
1898d622d21SMichael S. Tsirkin 
1901ce9e605STiwei Bie 	union {
1911ce9e605STiwei Bie 		/* Available for split ring */
192d76136e4SXuan Zhuo 		struct vring_virtqueue_split split;
193f277ec42SVenkatesh Srinivas 
1941ce9e605STiwei Bie 		/* Available for packed ring */
195d76136e4SXuan Zhuo 		struct vring_virtqueue_packed packed;
1961ce9e605STiwei Bie 	};
1971ce9e605STiwei Bie 
1980a8a69ddSRusty Russell 	/* How to notify other side. FIXME: commonalize hcalls! */
19946f9c2b9SHeinz Graalfs 	bool (*notify)(struct virtqueue *vq);
2000a8a69ddSRusty Russell 
2012a2d1382SAndy Lutomirski 	/* DMA, allocation, and size information */
2022a2d1382SAndy Lutomirski 	bool we_own_ring;
2032a2d1382SAndy Lutomirski 
2040a8a69ddSRusty Russell #ifdef DEBUG
2050a8a69ddSRusty Russell 	/* They're supposed to lock for us. */
2060a8a69ddSRusty Russell 	unsigned int in_use;
207e93300b1SRusty Russell 
208e93300b1SRusty Russell 	/* Figure out if their kicks are too delayed. */
209e93300b1SRusty Russell 	bool last_add_time_valid;
210e93300b1SRusty Russell 	ktime_t last_add_time;
2110a8a69ddSRusty Russell #endif
2120a8a69ddSRusty Russell };
2130a8a69ddSRusty Russell 
21407d9629dSXuan Zhuo static struct virtqueue *__vring_new_virtqueue(unsigned int index,
215cd4c812aSXuan Zhuo 					       struct vring_virtqueue_split *vring_split,
21607d9629dSXuan Zhuo 					       struct virtio_device *vdev,
21707d9629dSXuan Zhuo 					       bool weak_barriers,
21807d9629dSXuan Zhuo 					       bool context,
21907d9629dSXuan Zhuo 					       bool (*notify)(struct virtqueue *),
22007d9629dSXuan Zhuo 					       void (*callback)(struct virtqueue *),
22107d9629dSXuan Zhuo 					       const char *name);
222a2b36c8dSXuan Zhuo static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
223e6f633e5STiwei Bie 
224e6f633e5STiwei Bie /*
225e6f633e5STiwei Bie  * Helpers.
226e6f633e5STiwei Bie  */
227e6f633e5STiwei Bie 
2280a8a69ddSRusty Russell #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
2290a8a69ddSRusty Russell 
23035c51e09SXianting Tian static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq,
2312f18c2d1STiwei Bie 					  unsigned int total_sg)
2322f18c2d1STiwei Bie {
2332f18c2d1STiwei Bie 	/*
2342f18c2d1STiwei Bie 	 * If the host supports indirect descriptor tables, and we have multiple
2352f18c2d1STiwei Bie 	 * buffers, then go indirect. FIXME: tune this threshold
2362f18c2d1STiwei Bie 	 */
2372f18c2d1STiwei Bie 	return (vq->indirect && total_sg > 1 && vq->vq.num_free);
2382f18c2d1STiwei Bie }
2392f18c2d1STiwei Bie 
240d26c96c8SAndy Lutomirski /*
2411a937693SMichael S. Tsirkin  * Modern virtio devices have feature bits to specify whether they need a
2421a937693SMichael S. Tsirkin  * quirk and bypass the IOMMU. If not there, just use the DMA API.
2431a937693SMichael S. Tsirkin  *
2441a937693SMichael S. Tsirkin  * If there, the interaction between virtio and DMA API is messy.
245d26c96c8SAndy Lutomirski  *
246d26c96c8SAndy Lutomirski  * On most systems with virtio, physical addresses match bus addresses,
247d26c96c8SAndy Lutomirski  * and it doesn't particularly matter whether we use the DMA API.
248d26c96c8SAndy Lutomirski  *
249d26c96c8SAndy Lutomirski  * On some systems, including Xen and any system with a physical device
250d26c96c8SAndy Lutomirski  * that speaks virtio behind a physical IOMMU, we must use the DMA API
251d26c96c8SAndy Lutomirski  * for virtio DMA to work at all.
252d26c96c8SAndy Lutomirski  *
253d26c96c8SAndy Lutomirski  * On other systems, including SPARC and PPC64, virtio-pci devices are
254d26c96c8SAndy Lutomirski  * enumerated as though they are behind an IOMMU, but the virtio host
255d26c96c8SAndy Lutomirski  * ignores the IOMMU, so we must either pretend that the IOMMU isn't
256d26c96c8SAndy Lutomirski  * there or somehow map everything as the identity.
257d26c96c8SAndy Lutomirski  *
258d26c96c8SAndy Lutomirski  * For the time being, we preserve historic behavior and bypass the DMA
259d26c96c8SAndy Lutomirski  * API.
2601a937693SMichael S. Tsirkin  *
2611a937693SMichael S. Tsirkin  * TODO: install a per-device DMA ops structure that does the right thing
2621a937693SMichael S. Tsirkin  * taking into account all the above quirks, and use the DMA API
2631a937693SMichael S. Tsirkin  * unconditionally on data path.
264d26c96c8SAndy Lutomirski  */
265d26c96c8SAndy Lutomirski 
266d26c96c8SAndy Lutomirski static bool vring_use_dma_api(struct virtio_device *vdev)
267d26c96c8SAndy Lutomirski {
26824b6842aSMichael S. Tsirkin 	if (!virtio_has_dma_quirk(vdev))
2691a937693SMichael S. Tsirkin 		return true;
2701a937693SMichael S. Tsirkin 
2711a937693SMichael S. Tsirkin 	/* Otherwise, we are left to guess. */
27278fe3987SAndy Lutomirski 	/*
27378fe3987SAndy Lutomirski 	 * In theory, it's possible to have a buggy QEMU-supposed
27478fe3987SAndy Lutomirski 	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
27578fe3987SAndy Lutomirski 	 * such a configuration, virtio has never worked and will
27678fe3987SAndy Lutomirski 	 * not work without an even larger kludge.  Instead, enable
27778fe3987SAndy Lutomirski 	 * the DMA API if we're a Xen guest, which at least allows
27878fe3987SAndy Lutomirski 	 * all of the sensible Xen configurations to work correctly.
27978fe3987SAndy Lutomirski 	 */
28078fe3987SAndy Lutomirski 	if (xen_domain())
28178fe3987SAndy Lutomirski 		return true;
28278fe3987SAndy Lutomirski 
283d26c96c8SAndy Lutomirski 	return false;
284d26c96c8SAndy Lutomirski }
285d26c96c8SAndy Lutomirski 
286e6d6dd6cSJoerg Roedel size_t virtio_max_dma_size(struct virtio_device *vdev)
287e6d6dd6cSJoerg Roedel {
288e6d6dd6cSJoerg Roedel 	size_t max_segment_size = SIZE_MAX;
289e6d6dd6cSJoerg Roedel 
290e6d6dd6cSJoerg Roedel 	if (vring_use_dma_api(vdev))
291817fc978SWill Deacon 		max_segment_size = dma_max_mapping_size(vdev->dev.parent);
292e6d6dd6cSJoerg Roedel 
293e6d6dd6cSJoerg Roedel 	return max_segment_size;
294e6d6dd6cSJoerg Roedel }
295e6d6dd6cSJoerg Roedel EXPORT_SYMBOL_GPL(virtio_max_dma_size);
296e6d6dd6cSJoerg Roedel 
297d79dca75STiwei Bie static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
298d79dca75STiwei Bie 			      dma_addr_t *dma_handle, gfp_t flag)
299d79dca75STiwei Bie {
300d79dca75STiwei Bie 	if (vring_use_dma_api(vdev)) {
301d79dca75STiwei Bie 		return dma_alloc_coherent(vdev->dev.parent, size,
302d79dca75STiwei Bie 					  dma_handle, flag);
303d79dca75STiwei Bie 	} else {
304d79dca75STiwei Bie 		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
305d79dca75STiwei Bie 
306d79dca75STiwei Bie 		if (queue) {
307d79dca75STiwei Bie 			phys_addr_t phys_addr = virt_to_phys(queue);
308d79dca75STiwei Bie 			*dma_handle = (dma_addr_t)phys_addr;
309d79dca75STiwei Bie 
310d79dca75STiwei Bie 			/*
311d79dca75STiwei Bie 			 * Sanity check: make sure we dind't truncate
312d79dca75STiwei Bie 			 * the address.  The only arches I can find that
313d79dca75STiwei Bie 			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
314d79dca75STiwei Bie 			 * are certain non-highmem MIPS and x86
315d79dca75STiwei Bie 			 * configurations, but these configurations
316d79dca75STiwei Bie 			 * should never allocate physical pages above 32
317d79dca75STiwei Bie 			 * bits, so this is fine.  Just in case, throw a
318d79dca75STiwei Bie 			 * warning and abort if we end up with an
319d79dca75STiwei Bie 			 * unrepresentable address.
320d79dca75STiwei Bie 			 */
321d79dca75STiwei Bie 			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
322d79dca75STiwei Bie 				free_pages_exact(queue, PAGE_ALIGN(size));
323d79dca75STiwei Bie 				return NULL;
324d79dca75STiwei Bie 			}
325d79dca75STiwei Bie 		}
326d79dca75STiwei Bie 		return queue;
327d79dca75STiwei Bie 	}
328d79dca75STiwei Bie }
329d79dca75STiwei Bie 
330d79dca75STiwei Bie static void vring_free_queue(struct virtio_device *vdev, size_t size,
331d79dca75STiwei Bie 			     void *queue, dma_addr_t dma_handle)
332d79dca75STiwei Bie {
333d79dca75STiwei Bie 	if (vring_use_dma_api(vdev))
334d79dca75STiwei Bie 		dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
335d79dca75STiwei Bie 	else
336d79dca75STiwei Bie 		free_pages_exact(queue, PAGE_ALIGN(size));
337d79dca75STiwei Bie }
338d79dca75STiwei Bie 
339780bc790SAndy Lutomirski /*
340780bc790SAndy Lutomirski  * The DMA ops on various arches are rather gnarly right now, and
341780bc790SAndy Lutomirski  * making all of the arch DMA ops work on the vring device itself
342780bc790SAndy Lutomirski  * is a mess.  For now, we use the parent device for DMA ops.
343780bc790SAndy Lutomirski  */
34475bfa81bSMichael S. Tsirkin static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
345780bc790SAndy Lutomirski {
346780bc790SAndy Lutomirski 	return vq->vq.vdev->dev.parent;
347780bc790SAndy Lutomirski }
348780bc790SAndy Lutomirski 
349780bc790SAndy Lutomirski /* Map one sg entry. */
350780bc790SAndy Lutomirski static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
351780bc790SAndy Lutomirski 				   struct scatterlist *sg,
352780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
353780bc790SAndy Lutomirski {
354fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
355780bc790SAndy Lutomirski 		return (dma_addr_t)sg_phys(sg);
356780bc790SAndy Lutomirski 
357780bc790SAndy Lutomirski 	/*
358780bc790SAndy Lutomirski 	 * We can't use dma_map_sg, because we don't use scatterlists in
359780bc790SAndy Lutomirski 	 * the way it expects (we don't guarantee that the scatterlist
360780bc790SAndy Lutomirski 	 * will exist for the lifetime of the mapping).
361780bc790SAndy Lutomirski 	 */
362780bc790SAndy Lutomirski 	return dma_map_page(vring_dma_dev(vq),
363780bc790SAndy Lutomirski 			    sg_page(sg), sg->offset, sg->length,
364780bc790SAndy Lutomirski 			    direction);
365780bc790SAndy Lutomirski }
366780bc790SAndy Lutomirski 
367780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
368780bc790SAndy Lutomirski 				   void *cpu_addr, size_t size,
369780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
370780bc790SAndy Lutomirski {
371fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
372780bc790SAndy Lutomirski 		return (dma_addr_t)virt_to_phys(cpu_addr);
373780bc790SAndy Lutomirski 
374780bc790SAndy Lutomirski 	return dma_map_single(vring_dma_dev(vq),
375780bc790SAndy Lutomirski 			      cpu_addr, size, direction);
376780bc790SAndy Lutomirski }
377780bc790SAndy Lutomirski 
378e6f633e5STiwei Bie static int vring_mapping_error(const struct vring_virtqueue *vq,
379e6f633e5STiwei Bie 			       dma_addr_t addr)
380e6f633e5STiwei Bie {
381fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
382e6f633e5STiwei Bie 		return 0;
383e6f633e5STiwei Bie 
384e6f633e5STiwei Bie 	return dma_mapping_error(vring_dma_dev(vq), addr);
385e6f633e5STiwei Bie }
386e6f633e5STiwei Bie 
3873a897128SXuan Zhuo static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
3883a897128SXuan Zhuo {
3893a897128SXuan Zhuo 	vq->vq.num_free = num;
3903a897128SXuan Zhuo 
3913a897128SXuan Zhuo 	if (vq->packed_ring)
3923a897128SXuan Zhuo 		vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
3933a897128SXuan Zhuo 	else
3943a897128SXuan Zhuo 		vq->last_used_idx = 0;
3953a897128SXuan Zhuo 
3963a897128SXuan Zhuo 	vq->event_triggered = false;
3973a897128SXuan Zhuo 	vq->num_added = 0;
3983a897128SXuan Zhuo 
3993a897128SXuan Zhuo #ifdef DEBUG
4003a897128SXuan Zhuo 	vq->in_use = false;
4013a897128SXuan Zhuo 	vq->last_add_time_valid = false;
4023a897128SXuan Zhuo #endif
4033a897128SXuan Zhuo }
4043a897128SXuan Zhuo 
405e6f633e5STiwei Bie 
406e6f633e5STiwei Bie /*
407e6f633e5STiwei Bie  * Split ring specific functions - *_split().
408e6f633e5STiwei Bie  */
409e6f633e5STiwei Bie 
41072b5e895SJason Wang static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
411780bc790SAndy Lutomirski 					   struct vring_desc *desc)
412780bc790SAndy Lutomirski {
413780bc790SAndy Lutomirski 	u16 flags;
414780bc790SAndy Lutomirski 
415fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
416780bc790SAndy Lutomirski 		return;
417780bc790SAndy Lutomirski 
418780bc790SAndy Lutomirski 	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
419780bc790SAndy Lutomirski 
420780bc790SAndy Lutomirski 	dma_unmap_page(vring_dma_dev(vq),
421780bc790SAndy Lutomirski 		       virtio64_to_cpu(vq->vq.vdev, desc->addr),
422780bc790SAndy Lutomirski 		       virtio32_to_cpu(vq->vq.vdev, desc->len),
423780bc790SAndy Lutomirski 		       (flags & VRING_DESC_F_WRITE) ?
424780bc790SAndy Lutomirski 		       DMA_FROM_DEVICE : DMA_TO_DEVICE);
425780bc790SAndy Lutomirski }
426780bc790SAndy Lutomirski 
42772b5e895SJason Wang static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
42872b5e895SJason Wang 					  unsigned int i)
42972b5e895SJason Wang {
43072b5e895SJason Wang 	struct vring_desc_extra *extra = vq->split.desc_extra;
43172b5e895SJason Wang 	u16 flags;
43272b5e895SJason Wang 
43372b5e895SJason Wang 	if (!vq->use_dma_api)
43472b5e895SJason Wang 		goto out;
43572b5e895SJason Wang 
43672b5e895SJason Wang 	flags = extra[i].flags;
43772b5e895SJason Wang 
43872b5e895SJason Wang 	if (flags & VRING_DESC_F_INDIRECT) {
43972b5e895SJason Wang 		dma_unmap_single(vring_dma_dev(vq),
44072b5e895SJason Wang 				 extra[i].addr,
44172b5e895SJason Wang 				 extra[i].len,
44272b5e895SJason Wang 				 (flags & VRING_DESC_F_WRITE) ?
44372b5e895SJason Wang 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
44472b5e895SJason Wang 	} else {
44572b5e895SJason Wang 		dma_unmap_page(vring_dma_dev(vq),
44672b5e895SJason Wang 			       extra[i].addr,
44772b5e895SJason Wang 			       extra[i].len,
44872b5e895SJason Wang 			       (flags & VRING_DESC_F_WRITE) ?
44972b5e895SJason Wang 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
45072b5e895SJason Wang 	}
45172b5e895SJason Wang 
45272b5e895SJason Wang out:
45372b5e895SJason Wang 	return extra[i].next;
45472b5e895SJason Wang }
45572b5e895SJason Wang 
456138fd251STiwei Bie static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
457138fd251STiwei Bie 					       unsigned int total_sg,
458138fd251STiwei Bie 					       gfp_t gfp)
4599fa29b9dSMark McLoughlin {
4609fa29b9dSMark McLoughlin 	struct vring_desc *desc;
461b25bd251SRusty Russell 	unsigned int i;
4629fa29b9dSMark McLoughlin 
463b92b1b89SWill Deacon 	/*
464b92b1b89SWill Deacon 	 * We require lowmem mappings for the descriptors because
465b92b1b89SWill Deacon 	 * otherwise virt_to_phys will give us bogus addresses in the
466b92b1b89SWill Deacon 	 * virtqueue.
467b92b1b89SWill Deacon 	 */
46882107539SMichal Hocko 	gfp &= ~__GFP_HIGHMEM;
469b92b1b89SWill Deacon 
4706da2ec56SKees Cook 	desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
4719fa29b9dSMark McLoughlin 	if (!desc)
472b25bd251SRusty Russell 		return NULL;
4739fa29b9dSMark McLoughlin 
474b25bd251SRusty Russell 	for (i = 0; i < total_sg; i++)
47500e6f3d9SMichael S. Tsirkin 		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
476b25bd251SRusty Russell 	return desc;
4779fa29b9dSMark McLoughlin }
4789fa29b9dSMark McLoughlin 
479fe4c3862SJason Wang static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
480fe4c3862SJason Wang 						    struct vring_desc *desc,
481fe4c3862SJason Wang 						    unsigned int i,
482fe4c3862SJason Wang 						    dma_addr_t addr,
483fe4c3862SJason Wang 						    unsigned int len,
48472b5e895SJason Wang 						    u16 flags,
48572b5e895SJason Wang 						    bool indirect)
486fe4c3862SJason Wang {
48772b5e895SJason Wang 	struct vring_virtqueue *vring = to_vvq(vq);
48872b5e895SJason Wang 	struct vring_desc_extra *extra = vring->split.desc_extra;
48972b5e895SJason Wang 	u16 next;
49072b5e895SJason Wang 
491fe4c3862SJason Wang 	desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
492fe4c3862SJason Wang 	desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
493fe4c3862SJason Wang 	desc[i].len = cpu_to_virtio32(vq->vdev, len);
494fe4c3862SJason Wang 
49572b5e895SJason Wang 	if (!indirect) {
49672b5e895SJason Wang 		next = extra[i].next;
49772b5e895SJason Wang 		desc[i].next = cpu_to_virtio16(vq->vdev, next);
49872b5e895SJason Wang 
49972b5e895SJason Wang 		extra[i].addr = addr;
50072b5e895SJason Wang 		extra[i].len = len;
50172b5e895SJason Wang 		extra[i].flags = flags;
50272b5e895SJason Wang 	} else
50372b5e895SJason Wang 		next = virtio16_to_cpu(vq->vdev, desc[i].next);
50472b5e895SJason Wang 
50572b5e895SJason Wang 	return next;
506fe4c3862SJason Wang }
507fe4c3862SJason Wang 
508138fd251STiwei Bie static inline int virtqueue_add_split(struct virtqueue *_vq,
50913816c76SRusty Russell 				      struct scatterlist *sgs[],
510eeebf9b1SRusty Russell 				      unsigned int total_sg,
51113816c76SRusty Russell 				      unsigned int out_sgs,
51213816c76SRusty Russell 				      unsigned int in_sgs,
513bbd603efSMichael S. Tsirkin 				      void *data,
5145a08b04fSMichael S. Tsirkin 				      void *ctx,
515bbd603efSMichael S. Tsirkin 				      gfp_t gfp)
5160a8a69ddSRusty Russell {
5170a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
51813816c76SRusty Russell 	struct scatterlist *sg;
519b25bd251SRusty Russell 	struct vring_desc *desc;
5203f649ab7SKees Cook 	unsigned int i, n, avail, descs_used, prev, err_idx;
5211fe9b6feSMichael S. Tsirkin 	int head;
522b25bd251SRusty Russell 	bool indirect;
5230a8a69ddSRusty Russell 
5249fa29b9dSMark McLoughlin 	START_USE(vq);
5259fa29b9dSMark McLoughlin 
5260a8a69ddSRusty Russell 	BUG_ON(data == NULL);
5275a08b04fSMichael S. Tsirkin 	BUG_ON(ctx && vq->indirect);
5289fa29b9dSMark McLoughlin 
52970670444SRusty Russell 	if (unlikely(vq->broken)) {
53070670444SRusty Russell 		END_USE(vq);
53170670444SRusty Russell 		return -EIO;
53270670444SRusty Russell 	}
53370670444SRusty Russell 
5344d6a105eSTiwei Bie 	LAST_ADD_TIME_UPDATE(vq);
535e93300b1SRusty Russell 
53613816c76SRusty Russell 	BUG_ON(total_sg == 0);
5370a8a69ddSRusty Russell 
538b25bd251SRusty Russell 	head = vq->free_head;
539b25bd251SRusty Russell 
54035c51e09SXianting Tian 	if (virtqueue_use_indirect(vq, total_sg))
541138fd251STiwei Bie 		desc = alloc_indirect_split(_vq, total_sg, gfp);
54244ed8089SRichard W.M. Jones 	else {
543b25bd251SRusty Russell 		desc = NULL;
544e593bf97STiwei Bie 		WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
54544ed8089SRichard W.M. Jones 	}
546b25bd251SRusty Russell 
547b25bd251SRusty Russell 	if (desc) {
548b25bd251SRusty Russell 		/* Use a single buffer which doesn't continue */
549780bc790SAndy Lutomirski 		indirect = true;
550b25bd251SRusty Russell 		/* Set up rest to use this indirect table. */
551b25bd251SRusty Russell 		i = 0;
552b25bd251SRusty Russell 		descs_used = 1;
553b25bd251SRusty Russell 	} else {
554780bc790SAndy Lutomirski 		indirect = false;
555e593bf97STiwei Bie 		desc = vq->split.vring.desc;
556b25bd251SRusty Russell 		i = head;
557b25bd251SRusty Russell 		descs_used = total_sg;
558b25bd251SRusty Russell 	}
559b25bd251SRusty Russell 
560b4b4ff73SXianting Tian 	if (unlikely(vq->vq.num_free < descs_used)) {
5610a8a69ddSRusty Russell 		pr_debug("Can't add buf len %i - avail = %i\n",
562b25bd251SRusty Russell 			 descs_used, vq->vq.num_free);
56344653eaeSRusty Russell 		/* FIXME: for historical reasons, we force a notify here if
56444653eaeSRusty Russell 		 * there are outgoing parts to the buffer.  Presumably the
56544653eaeSRusty Russell 		 * host should service the ring ASAP. */
56613816c76SRusty Russell 		if (out_sgs)
567426e3e0aSRusty Russell 			vq->notify(&vq->vq);
56858625edfSWei Yongjun 		if (indirect)
56958625edfSWei Yongjun 			kfree(desc);
5700a8a69ddSRusty Russell 		END_USE(vq);
5710a8a69ddSRusty Russell 		return -ENOSPC;
5720a8a69ddSRusty Russell 	}
5730a8a69ddSRusty Russell 
57413816c76SRusty Russell 	for (n = 0; n < out_sgs; n++) {
575eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
576780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
577780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
578780bc790SAndy Lutomirski 				goto unmap_release;
579780bc790SAndy Lutomirski 
5800a8a69ddSRusty Russell 			prev = i;
58172b5e895SJason Wang 			/* Note that we trust indirect descriptor
58272b5e895SJason Wang 			 * table since it use stream DMA mapping.
58372b5e895SJason Wang 			 */
584fe4c3862SJason Wang 			i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
58572b5e895SJason Wang 						     VRING_DESC_F_NEXT,
58672b5e895SJason Wang 						     indirect);
5870a8a69ddSRusty Russell 		}
58813816c76SRusty Russell 	}
58913816c76SRusty Russell 	for (; n < (out_sgs + in_sgs); n++) {
590eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
591780bc790SAndy Lutomirski 			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
592780bc790SAndy Lutomirski 			if (vring_mapping_error(vq, addr))
593780bc790SAndy Lutomirski 				goto unmap_release;
594780bc790SAndy Lutomirski 
5950a8a69ddSRusty Russell 			prev = i;
59672b5e895SJason Wang 			/* Note that we trust indirect descriptor
59772b5e895SJason Wang 			 * table since it use stream DMA mapping.
59872b5e895SJason Wang 			 */
599fe4c3862SJason Wang 			i = virtqueue_add_desc_split(_vq, desc, i, addr,
600fe4c3862SJason Wang 						     sg->length,
601fe4c3862SJason Wang 						     VRING_DESC_F_NEXT |
60272b5e895SJason Wang 						     VRING_DESC_F_WRITE,
60372b5e895SJason Wang 						     indirect);
60413816c76SRusty Russell 		}
6050a8a69ddSRusty Russell 	}
6060a8a69ddSRusty Russell 	/* Last one doesn't continue. */
60700e6f3d9SMichael S. Tsirkin 	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
60872b5e895SJason Wang 	if (!indirect && vq->use_dma_api)
609890d3356SVincent Whitchurch 		vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
61072b5e895SJason Wang 			~VRING_DESC_F_NEXT;
6110a8a69ddSRusty Russell 
612780bc790SAndy Lutomirski 	if (indirect) {
613780bc790SAndy Lutomirski 		/* Now that the indirect table is filled in, map it. */
614780bc790SAndy Lutomirski 		dma_addr_t addr = vring_map_single(
615780bc790SAndy Lutomirski 			vq, desc, total_sg * sizeof(struct vring_desc),
616780bc790SAndy Lutomirski 			DMA_TO_DEVICE);
617780bc790SAndy Lutomirski 		if (vring_mapping_error(vq, addr))
618780bc790SAndy Lutomirski 			goto unmap_release;
619780bc790SAndy Lutomirski 
620fe4c3862SJason Wang 		virtqueue_add_desc_split(_vq, vq->split.vring.desc,
621fe4c3862SJason Wang 					 head, addr,
622fe4c3862SJason Wang 					 total_sg * sizeof(struct vring_desc),
62372b5e895SJason Wang 					 VRING_DESC_F_INDIRECT,
62472b5e895SJason Wang 					 false);
625780bc790SAndy Lutomirski 	}
626780bc790SAndy Lutomirski 
627780bc790SAndy Lutomirski 	/* We're using some buffers from the free list. */
628780bc790SAndy Lutomirski 	vq->vq.num_free -= descs_used;
629780bc790SAndy Lutomirski 
6300a8a69ddSRusty Russell 	/* Update free pointer */
631b25bd251SRusty Russell 	if (indirect)
63272b5e895SJason Wang 		vq->free_head = vq->split.desc_extra[head].next;
633b25bd251SRusty Russell 	else
6340a8a69ddSRusty Russell 		vq->free_head = i;
6350a8a69ddSRusty Russell 
636780bc790SAndy Lutomirski 	/* Store token and indirect buffer state. */
637cbeedb72STiwei Bie 	vq->split.desc_state[head].data = data;
638780bc790SAndy Lutomirski 	if (indirect)
639cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = desc;
64087646a34SJason Wang 	else
641cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = ctx;
6420a8a69ddSRusty Russell 
6430a8a69ddSRusty Russell 	/* Put entry in available array (but don't update avail->idx until they
6443b720b8cSRusty Russell 	 * do sync). */
645e593bf97STiwei Bie 	avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
646e593bf97STiwei Bie 	vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
6470a8a69ddSRusty Russell 
648ee7cd898SRusty Russell 	/* Descriptors and available array need to be set before we expose the
649ee7cd898SRusty Russell 	 * new available array entries. */
650a9a0fef7SRusty Russell 	virtio_wmb(vq->weak_barriers);
651e593bf97STiwei Bie 	vq->split.avail_idx_shadow++;
652e593bf97STiwei Bie 	vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
653e593bf97STiwei Bie 						vq->split.avail_idx_shadow);
654ee7cd898SRusty Russell 	vq->num_added++;
655ee7cd898SRusty Russell 
6565e05bf58STetsuo Handa 	pr_debug("Added buffer head %i to %p\n", head, vq);
6575e05bf58STetsuo Handa 	END_USE(vq);
6585e05bf58STetsuo Handa 
659ee7cd898SRusty Russell 	/* This is very unlikely, but theoretically possible.  Kick
660ee7cd898SRusty Russell 	 * just in case. */
661ee7cd898SRusty Russell 	if (unlikely(vq->num_added == (1 << 16) - 1))
662ee7cd898SRusty Russell 		virtqueue_kick(_vq);
663ee7cd898SRusty Russell 
66498e8c6bcSRusty Russell 	return 0;
665780bc790SAndy Lutomirski 
666780bc790SAndy Lutomirski unmap_release:
667780bc790SAndy Lutomirski 	err_idx = i;
668cf8f1696SMatthias Lange 
669cf8f1696SMatthias Lange 	if (indirect)
670cf8f1696SMatthias Lange 		i = 0;
671cf8f1696SMatthias Lange 	else
672780bc790SAndy Lutomirski 		i = head;
673780bc790SAndy Lutomirski 
674780bc790SAndy Lutomirski 	for (n = 0; n < total_sg; n++) {
675780bc790SAndy Lutomirski 		if (i == err_idx)
676780bc790SAndy Lutomirski 			break;
67772b5e895SJason Wang 		if (indirect) {
67872b5e895SJason Wang 			vring_unmap_one_split_indirect(vq, &desc[i]);
679cf8f1696SMatthias Lange 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
68072b5e895SJason Wang 		} else
68172b5e895SJason Wang 			i = vring_unmap_one_split(vq, i);
682780bc790SAndy Lutomirski 	}
683780bc790SAndy Lutomirski 
684780bc790SAndy Lutomirski 	if (indirect)
685780bc790SAndy Lutomirski 		kfree(desc);
686780bc790SAndy Lutomirski 
6873cc36f6eSMichael S. Tsirkin 	END_USE(vq);
688f7728002SHalil Pasic 	return -ENOMEM;
6890a8a69ddSRusty Russell }
69013816c76SRusty Russell 
691138fd251STiwei Bie static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
6920a8a69ddSRusty Russell {
6930a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
694a5c262c5SMichael S. Tsirkin 	u16 new, old;
69541f0377fSRusty Russell 	bool needs_kick;
69641f0377fSRusty Russell 
6970a8a69ddSRusty Russell 	START_USE(vq);
698a72caae2SJason Wang 	/* We need to expose available array entries before checking avail
699a72caae2SJason Wang 	 * event. */
700a9a0fef7SRusty Russell 	virtio_mb(vq->weak_barriers);
7010a8a69ddSRusty Russell 
702e593bf97STiwei Bie 	old = vq->split.avail_idx_shadow - vq->num_added;
703e593bf97STiwei Bie 	new = vq->split.avail_idx_shadow;
7040a8a69ddSRusty Russell 	vq->num_added = 0;
7050a8a69ddSRusty Russell 
7064d6a105eSTiwei Bie 	LAST_ADD_TIME_CHECK(vq);
7074d6a105eSTiwei Bie 	LAST_ADD_TIME_INVALID(vq);
708e93300b1SRusty Russell 
70941f0377fSRusty Russell 	if (vq->event) {
710e593bf97STiwei Bie 		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
711e593bf97STiwei Bie 					vring_avail_event(&vq->split.vring)),
71241f0377fSRusty Russell 					      new, old);
71341f0377fSRusty Russell 	} else {
714e593bf97STiwei Bie 		needs_kick = !(vq->split.vring.used->flags &
715e593bf97STiwei Bie 					cpu_to_virtio16(_vq->vdev,
716e593bf97STiwei Bie 						VRING_USED_F_NO_NOTIFY));
71741f0377fSRusty Russell 	}
7180a8a69ddSRusty Russell 	END_USE(vq);
71941f0377fSRusty Russell 	return needs_kick;
72041f0377fSRusty Russell }
721138fd251STiwei Bie 
722138fd251STiwei Bie static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
7235a08b04fSMichael S. Tsirkin 			     void **ctx)
7240a8a69ddSRusty Russell {
725780bc790SAndy Lutomirski 	unsigned int i, j;
726c60923cbSGonglei 	__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
7270a8a69ddSRusty Russell 
7280a8a69ddSRusty Russell 	/* Clear data ptr. */
729cbeedb72STiwei Bie 	vq->split.desc_state[head].data = NULL;
7300a8a69ddSRusty Russell 
731780bc790SAndy Lutomirski 	/* Put back on free list: unmap first-level descriptors and find end */
7320a8a69ddSRusty Russell 	i = head;
7339fa29b9dSMark McLoughlin 
734e593bf97STiwei Bie 	while (vq->split.vring.desc[i].flags & nextflag) {
73572b5e895SJason Wang 		vring_unmap_one_split(vq, i);
73672b5e895SJason Wang 		i = vq->split.desc_extra[i].next;
73706ca287dSRusty Russell 		vq->vq.num_free++;
7380a8a69ddSRusty Russell 	}
7390a8a69ddSRusty Russell 
74072b5e895SJason Wang 	vring_unmap_one_split(vq, i);
74172b5e895SJason Wang 	vq->split.desc_extra[i].next = vq->free_head;
7420a8a69ddSRusty Russell 	vq->free_head = head;
743780bc790SAndy Lutomirski 
7440a8a69ddSRusty Russell 	/* Plus final descriptor */
74506ca287dSRusty Russell 	vq->vq.num_free++;
746780bc790SAndy Lutomirski 
7475a08b04fSMichael S. Tsirkin 	if (vq->indirect) {
748cbeedb72STiwei Bie 		struct vring_desc *indir_desc =
749cbeedb72STiwei Bie 				vq->split.desc_state[head].indir_desc;
7505a08b04fSMichael S. Tsirkin 		u32 len;
7515a08b04fSMichael S. Tsirkin 
7525a08b04fSMichael S. Tsirkin 		/* Free the indirect table, if any, now that it's unmapped. */
7535a08b04fSMichael S. Tsirkin 		if (!indir_desc)
7545a08b04fSMichael S. Tsirkin 			return;
7555a08b04fSMichael S. Tsirkin 
75672b5e895SJason Wang 		len = vq->split.desc_extra[head].len;
757780bc790SAndy Lutomirski 
75872b5e895SJason Wang 		BUG_ON(!(vq->split.desc_extra[head].flags &
75972b5e895SJason Wang 				VRING_DESC_F_INDIRECT));
760780bc790SAndy Lutomirski 		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
761780bc790SAndy Lutomirski 
762780bc790SAndy Lutomirski 		for (j = 0; j < len / sizeof(struct vring_desc); j++)
76372b5e895SJason Wang 			vring_unmap_one_split_indirect(vq, &indir_desc[j]);
764780bc790SAndy Lutomirski 
7655a08b04fSMichael S. Tsirkin 		kfree(indir_desc);
766cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = NULL;
7675a08b04fSMichael S. Tsirkin 	} else if (ctx) {
768cbeedb72STiwei Bie 		*ctx = vq->split.desc_state[head].indir_desc;
769780bc790SAndy Lutomirski 	}
7700a8a69ddSRusty Russell }
7710a8a69ddSRusty Russell 
772138fd251STiwei Bie static inline bool more_used_split(const struct vring_virtqueue *vq)
7730a8a69ddSRusty Russell {
774e593bf97STiwei Bie 	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
775e593bf97STiwei Bie 			vq->split.vring.used->idx);
7760a8a69ddSRusty Russell }
7770a8a69ddSRusty Russell 
778138fd251STiwei Bie static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
779138fd251STiwei Bie 					 unsigned int *len,
7805a08b04fSMichael S. Tsirkin 					 void **ctx)
7810a8a69ddSRusty Russell {
7820a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
7830a8a69ddSRusty Russell 	void *ret;
7840a8a69ddSRusty Russell 	unsigned int i;
7853b720b8cSRusty Russell 	u16 last_used;
7860a8a69ddSRusty Russell 
7870a8a69ddSRusty Russell 	START_USE(vq);
7880a8a69ddSRusty Russell 
7895ef82752SRusty Russell 	if (unlikely(vq->broken)) {
7905ef82752SRusty Russell 		END_USE(vq);
7915ef82752SRusty Russell 		return NULL;
7925ef82752SRusty Russell 	}
7935ef82752SRusty Russell 
794138fd251STiwei Bie 	if (!more_used_split(vq)) {
7950a8a69ddSRusty Russell 		pr_debug("No more buffers in queue\n");
7960a8a69ddSRusty Russell 		END_USE(vq);
7970a8a69ddSRusty Russell 		return NULL;
7980a8a69ddSRusty Russell 	}
7990a8a69ddSRusty Russell 
8002d61ba95SMichael S. Tsirkin 	/* Only get used array entries after they have been exposed by host. */
801a9a0fef7SRusty Russell 	virtio_rmb(vq->weak_barriers);
8022d61ba95SMichael S. Tsirkin 
803e593bf97STiwei Bie 	last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
804e593bf97STiwei Bie 	i = virtio32_to_cpu(_vq->vdev,
805e593bf97STiwei Bie 			vq->split.vring.used->ring[last_used].id);
806e593bf97STiwei Bie 	*len = virtio32_to_cpu(_vq->vdev,
807e593bf97STiwei Bie 			vq->split.vring.used->ring[last_used].len);
8080a8a69ddSRusty Russell 
809e593bf97STiwei Bie 	if (unlikely(i >= vq->split.vring.num)) {
8100a8a69ddSRusty Russell 		BAD_RING(vq, "id %u out of range\n", i);
8110a8a69ddSRusty Russell 		return NULL;
8120a8a69ddSRusty Russell 	}
813cbeedb72STiwei Bie 	if (unlikely(!vq->split.desc_state[i].data)) {
8140a8a69ddSRusty Russell 		BAD_RING(vq, "id %u is not a head!\n", i);
8150a8a69ddSRusty Russell 		return NULL;
8160a8a69ddSRusty Russell 	}
8170a8a69ddSRusty Russell 
818138fd251STiwei Bie 	/* detach_buf_split clears data, so grab it now. */
819cbeedb72STiwei Bie 	ret = vq->split.desc_state[i].data;
820138fd251STiwei Bie 	detach_buf_split(vq, i, ctx);
8210a8a69ddSRusty Russell 	vq->last_used_idx++;
822a5c262c5SMichael S. Tsirkin 	/* If we expect an interrupt for the next entry, tell host
823a5c262c5SMichael S. Tsirkin 	 * by writing event index and flush out the write before
824a5c262c5SMichael S. Tsirkin 	 * the read in the next get_buf call. */
825e593bf97STiwei Bie 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
826788e5b3aSMichael S. Tsirkin 		virtio_store_mb(vq->weak_barriers,
827e593bf97STiwei Bie 				&vring_used_event(&vq->split.vring),
828788e5b3aSMichael S. Tsirkin 				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
829a5c262c5SMichael S. Tsirkin 
8304d6a105eSTiwei Bie 	LAST_ADD_TIME_INVALID(vq);
831e93300b1SRusty Russell 
8320a8a69ddSRusty Russell 	END_USE(vq);
8330a8a69ddSRusty Russell 	return ret;
8340a8a69ddSRusty Russell }
835138fd251STiwei Bie 
836138fd251STiwei Bie static void virtqueue_disable_cb_split(struct virtqueue *_vq)
837138fd251STiwei Bie {
838138fd251STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
839138fd251STiwei Bie 
840e593bf97STiwei Bie 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
841e593bf97STiwei Bie 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
8428d622d21SMichael S. Tsirkin 		if (vq->event)
8438d622d21SMichael S. Tsirkin 			/* TODO: this is a hack. Figure out a cleaner value to write. */
8448d622d21SMichael S. Tsirkin 			vring_used_event(&vq->split.vring) = 0x0;
8458d622d21SMichael S. Tsirkin 		else
846e593bf97STiwei Bie 			vq->split.vring.avail->flags =
847e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
848e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
849138fd251STiwei Bie 	}
850138fd251STiwei Bie }
851138fd251STiwei Bie 
85231532340SSolomon Tan static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
853cc229884SMichael S. Tsirkin {
854cc229884SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
855cc229884SMichael S. Tsirkin 	u16 last_used_idx;
856cc229884SMichael S. Tsirkin 
857cc229884SMichael S. Tsirkin 	START_USE(vq);
858cc229884SMichael S. Tsirkin 
859cc229884SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
860cc229884SMichael S. Tsirkin 	 * more to do. */
861cc229884SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
862cc229884SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
863cc229884SMichael S. Tsirkin 	 * entry. Always do both to keep code simple. */
864e593bf97STiwei Bie 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
865e593bf97STiwei Bie 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
8660ea1e4a6SLadi Prosek 		if (!vq->event)
867e593bf97STiwei Bie 			vq->split.vring.avail->flags =
868e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
869e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
870f277ec42SVenkatesh Srinivas 	}
871e593bf97STiwei Bie 	vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
872e593bf97STiwei Bie 			last_used_idx = vq->last_used_idx);
873cc229884SMichael S. Tsirkin 	END_USE(vq);
874cc229884SMichael S. Tsirkin 	return last_used_idx;
875cc229884SMichael S. Tsirkin }
876138fd251STiwei Bie 
87731532340SSolomon Tan static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx)
878138fd251STiwei Bie {
879138fd251STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
880138fd251STiwei Bie 
881138fd251STiwei Bie 	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
882e593bf97STiwei Bie 			vq->split.vring.used->idx);
883138fd251STiwei Bie }
884138fd251STiwei Bie 
885138fd251STiwei Bie static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
8867ab358c2SMichael S. Tsirkin {
8877ab358c2SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
8887ab358c2SMichael S. Tsirkin 	u16 bufs;
8897ab358c2SMichael S. Tsirkin 
8907ab358c2SMichael S. Tsirkin 	START_USE(vq);
8917ab358c2SMichael S. Tsirkin 
8927ab358c2SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
8937ab358c2SMichael S. Tsirkin 	 * more to do. */
8947ab358c2SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
8957ab358c2SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
8960ea1e4a6SLadi Prosek 	 * entry. Always update the event index to keep code simple. */
897e593bf97STiwei Bie 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
898e593bf97STiwei Bie 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
8990ea1e4a6SLadi Prosek 		if (!vq->event)
900e593bf97STiwei Bie 			vq->split.vring.avail->flags =
901e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
902e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
903f277ec42SVenkatesh Srinivas 	}
9047ab358c2SMichael S. Tsirkin 	/* TODO: tune this threshold */
905e593bf97STiwei Bie 	bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
906788e5b3aSMichael S. Tsirkin 
907788e5b3aSMichael S. Tsirkin 	virtio_store_mb(vq->weak_barriers,
908e593bf97STiwei Bie 			&vring_used_event(&vq->split.vring),
909788e5b3aSMichael S. Tsirkin 			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
910788e5b3aSMichael S. Tsirkin 
911e593bf97STiwei Bie 	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
912e593bf97STiwei Bie 					- vq->last_used_idx) > bufs)) {
9137ab358c2SMichael S. Tsirkin 		END_USE(vq);
9147ab358c2SMichael S. Tsirkin 		return false;
9157ab358c2SMichael S. Tsirkin 	}
9167ab358c2SMichael S. Tsirkin 
9177ab358c2SMichael S. Tsirkin 	END_USE(vq);
9187ab358c2SMichael S. Tsirkin 	return true;
9197ab358c2SMichael S. Tsirkin }
9207ab358c2SMichael S. Tsirkin 
921138fd251STiwei Bie static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
922c021eac4SShirley Ma {
923c021eac4SShirley Ma 	struct vring_virtqueue *vq = to_vvq(_vq);
924c021eac4SShirley Ma 	unsigned int i;
925c021eac4SShirley Ma 	void *buf;
926c021eac4SShirley Ma 
927c021eac4SShirley Ma 	START_USE(vq);
928c021eac4SShirley Ma 
929e593bf97STiwei Bie 	for (i = 0; i < vq->split.vring.num; i++) {
930cbeedb72STiwei Bie 		if (!vq->split.desc_state[i].data)
931c021eac4SShirley Ma 			continue;
932138fd251STiwei Bie 		/* detach_buf_split clears data, so grab it now. */
933cbeedb72STiwei Bie 		buf = vq->split.desc_state[i].data;
934138fd251STiwei Bie 		detach_buf_split(vq, i, NULL);
935e593bf97STiwei Bie 		vq->split.avail_idx_shadow--;
936e593bf97STiwei Bie 		vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
937e593bf97STiwei Bie 				vq->split.avail_idx_shadow);
938c021eac4SShirley Ma 		END_USE(vq);
939c021eac4SShirley Ma 		return buf;
940c021eac4SShirley Ma 	}
941c021eac4SShirley Ma 	/* That should have freed everything. */
942e593bf97STiwei Bie 	BUG_ON(vq->vq.num_free != vq->split.vring.num);
943c021eac4SShirley Ma 
944c021eac4SShirley Ma 	END_USE(vq);
945c021eac4SShirley Ma 	return NULL;
946c021eac4SShirley Ma }
947138fd251STiwei Bie 
948198fa7beSXuan Zhuo static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split,
949198fa7beSXuan Zhuo 				       struct vring_virtqueue *vq)
950198fa7beSXuan Zhuo {
951198fa7beSXuan Zhuo 	struct virtio_device *vdev;
952198fa7beSXuan Zhuo 
953198fa7beSXuan Zhuo 	vdev = vq->vq.vdev;
954198fa7beSXuan Zhuo 
955198fa7beSXuan Zhuo 	vring_split->avail_flags_shadow = 0;
956198fa7beSXuan Zhuo 	vring_split->avail_idx_shadow = 0;
957198fa7beSXuan Zhuo 
958198fa7beSXuan Zhuo 	/* No callback?  Tell other side not to bother us. */
959198fa7beSXuan Zhuo 	if (!vq->vq.callback) {
960198fa7beSXuan Zhuo 		vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
961198fa7beSXuan Zhuo 		if (!vq->event)
962198fa7beSXuan Zhuo 			vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
963198fa7beSXuan Zhuo 					vring_split->avail_flags_shadow);
964198fa7beSXuan Zhuo 	}
965198fa7beSXuan Zhuo }
966198fa7beSXuan Zhuo 
967e5175b41SXuan Zhuo static void virtqueue_reinit_split(struct vring_virtqueue *vq)
968e5175b41SXuan Zhuo {
969e5175b41SXuan Zhuo 	int num;
970e5175b41SXuan Zhuo 
971e5175b41SXuan Zhuo 	num = vq->split.vring.num;
972e5175b41SXuan Zhuo 
973e5175b41SXuan Zhuo 	vq->split.vring.avail->flags = 0;
974e5175b41SXuan Zhuo 	vq->split.vring.avail->idx = 0;
975e5175b41SXuan Zhuo 
976e5175b41SXuan Zhuo 	/* reset avail event */
977e5175b41SXuan Zhuo 	vq->split.vring.avail->ring[num] = 0;
978e5175b41SXuan Zhuo 
979e5175b41SXuan Zhuo 	vq->split.vring.used->flags = 0;
980e5175b41SXuan Zhuo 	vq->split.vring.used->idx = 0;
981e5175b41SXuan Zhuo 
982e5175b41SXuan Zhuo 	/* reset used event */
983e5175b41SXuan Zhuo 	*(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0;
984e5175b41SXuan Zhuo 
985e5175b41SXuan Zhuo 	virtqueue_init(vq, num);
986e5175b41SXuan Zhuo 
987e5175b41SXuan Zhuo 	virtqueue_vring_init_split(&vq->split, vq);
988e5175b41SXuan Zhuo }
989e5175b41SXuan Zhuo 
990e1d6a423SXuan Zhuo static void virtqueue_vring_attach_split(struct vring_virtqueue *vq,
991e1d6a423SXuan Zhuo 					 struct vring_virtqueue_split *vring_split)
992e1d6a423SXuan Zhuo {
993e1d6a423SXuan Zhuo 	vq->split = *vring_split;
994e1d6a423SXuan Zhuo 
995e1d6a423SXuan Zhuo 	/* Put everything in free lists. */
996e1d6a423SXuan Zhuo 	vq->free_head = 0;
997e1d6a423SXuan Zhuo }
998e1d6a423SXuan Zhuo 
999a2b36c8dSXuan Zhuo static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split)
1000a2b36c8dSXuan Zhuo {
1001a2b36c8dSXuan Zhuo 	struct vring_desc_state_split *state;
1002a2b36c8dSXuan Zhuo 	struct vring_desc_extra *extra;
1003a2b36c8dSXuan Zhuo 	u32 num = vring_split->vring.num;
1004a2b36c8dSXuan Zhuo 
1005a2b36c8dSXuan Zhuo 	state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL);
1006a2b36c8dSXuan Zhuo 	if (!state)
1007a2b36c8dSXuan Zhuo 		goto err_state;
1008a2b36c8dSXuan Zhuo 
1009a2b36c8dSXuan Zhuo 	extra = vring_alloc_desc_extra(num);
1010a2b36c8dSXuan Zhuo 	if (!extra)
1011a2b36c8dSXuan Zhuo 		goto err_extra;
1012a2b36c8dSXuan Zhuo 
1013a2b36c8dSXuan Zhuo 	memset(state, 0, num * sizeof(struct vring_desc_state_split));
1014a2b36c8dSXuan Zhuo 
1015a2b36c8dSXuan Zhuo 	vring_split->desc_state = state;
1016a2b36c8dSXuan Zhuo 	vring_split->desc_extra = extra;
1017a2b36c8dSXuan Zhuo 	return 0;
1018a2b36c8dSXuan Zhuo 
1019a2b36c8dSXuan Zhuo err_extra:
1020a2b36c8dSXuan Zhuo 	kfree(state);
1021a2b36c8dSXuan Zhuo err_state:
1022a2b36c8dSXuan Zhuo 	return -ENOMEM;
1023a2b36c8dSXuan Zhuo }
1024a2b36c8dSXuan Zhuo 
102589f05d94SXuan Zhuo static void vring_free_split(struct vring_virtqueue_split *vring_split,
102689f05d94SXuan Zhuo 			     struct virtio_device *vdev)
102789f05d94SXuan Zhuo {
102889f05d94SXuan Zhuo 	vring_free_queue(vdev, vring_split->queue_size_in_bytes,
102989f05d94SXuan Zhuo 			 vring_split->vring.desc,
103089f05d94SXuan Zhuo 			 vring_split->queue_dma_addr);
103189f05d94SXuan Zhuo 
103289f05d94SXuan Zhuo 	kfree(vring_split->desc_state);
103389f05d94SXuan Zhuo 	kfree(vring_split->desc_extra);
103489f05d94SXuan Zhuo }
103589f05d94SXuan Zhuo 
1036c2d87fe6SXuan Zhuo static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
1037c2d87fe6SXuan Zhuo 				   struct virtio_device *vdev,
1038c2d87fe6SXuan Zhuo 				   u32 num,
1039c2d87fe6SXuan Zhuo 				   unsigned int vring_align,
1040c2d87fe6SXuan Zhuo 				   bool may_reduce_num)
1041c2d87fe6SXuan Zhuo {
1042c2d87fe6SXuan Zhuo 	void *queue = NULL;
1043c2d87fe6SXuan Zhuo 	dma_addr_t dma_addr;
1044c2d87fe6SXuan Zhuo 
1045c2d87fe6SXuan Zhuo 	/* We assume num is a power of 2. */
1046c2d87fe6SXuan Zhuo 	if (num & (num - 1)) {
1047c2d87fe6SXuan Zhuo 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1048c2d87fe6SXuan Zhuo 		return -EINVAL;
1049c2d87fe6SXuan Zhuo 	}
1050c2d87fe6SXuan Zhuo 
1051c2d87fe6SXuan Zhuo 	/* TODO: allocate each queue chunk individually */
1052c2d87fe6SXuan Zhuo 	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1053c2d87fe6SXuan Zhuo 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1054c2d87fe6SXuan Zhuo 					  &dma_addr,
1055c2d87fe6SXuan Zhuo 					  GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
1056c2d87fe6SXuan Zhuo 		if (queue)
1057c2d87fe6SXuan Zhuo 			break;
1058c2d87fe6SXuan Zhuo 		if (!may_reduce_num)
1059c2d87fe6SXuan Zhuo 			return -ENOMEM;
1060c2d87fe6SXuan Zhuo 	}
1061c2d87fe6SXuan Zhuo 
1062c2d87fe6SXuan Zhuo 	if (!num)
1063c2d87fe6SXuan Zhuo 		return -ENOMEM;
1064c2d87fe6SXuan Zhuo 
1065c2d87fe6SXuan Zhuo 	if (!queue) {
1066c2d87fe6SXuan Zhuo 		/* Try to get a single page. You are my only hope! */
1067c2d87fe6SXuan Zhuo 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1068c2d87fe6SXuan Zhuo 					  &dma_addr, GFP_KERNEL|__GFP_ZERO);
1069c2d87fe6SXuan Zhuo 	}
1070c2d87fe6SXuan Zhuo 	if (!queue)
1071c2d87fe6SXuan Zhuo 		return -ENOMEM;
1072c2d87fe6SXuan Zhuo 
1073c2d87fe6SXuan Zhuo 	vring_init(&vring_split->vring, num, queue, vring_align);
1074c2d87fe6SXuan Zhuo 
1075c2d87fe6SXuan Zhuo 	vring_split->queue_dma_addr = dma_addr;
1076c2d87fe6SXuan Zhuo 	vring_split->queue_size_in_bytes = vring_size(num, vring_align);
1077c2d87fe6SXuan Zhuo 
1078*af36b16fSXuan Zhuo 	vring_split->vring_align = vring_align;
1079*af36b16fSXuan Zhuo 	vring_split->may_reduce_num = may_reduce_num;
1080*af36b16fSXuan Zhuo 
1081c2d87fe6SXuan Zhuo 	return 0;
1082c2d87fe6SXuan Zhuo }
1083c2d87fe6SXuan Zhuo 
1084d79dca75STiwei Bie static struct virtqueue *vring_create_virtqueue_split(
1085d79dca75STiwei Bie 	unsigned int index,
1086d79dca75STiwei Bie 	unsigned int num,
1087d79dca75STiwei Bie 	unsigned int vring_align,
1088d79dca75STiwei Bie 	struct virtio_device *vdev,
1089d79dca75STiwei Bie 	bool weak_barriers,
1090d79dca75STiwei Bie 	bool may_reduce_num,
1091d79dca75STiwei Bie 	bool context,
1092d79dca75STiwei Bie 	bool (*notify)(struct virtqueue *),
1093d79dca75STiwei Bie 	void (*callback)(struct virtqueue *),
1094d79dca75STiwei Bie 	const char *name)
1095d79dca75STiwei Bie {
1096cd4c812aSXuan Zhuo 	struct vring_virtqueue_split vring_split = {};
1097d79dca75STiwei Bie 	struct virtqueue *vq;
1098c2d87fe6SXuan Zhuo 	int err;
1099d79dca75STiwei Bie 
1100c2d87fe6SXuan Zhuo 	err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
1101c2d87fe6SXuan Zhuo 				      may_reduce_num);
1102c2d87fe6SXuan Zhuo 	if (err)
1103d79dca75STiwei Bie 		return NULL;
1104d79dca75STiwei Bie 
1105cd4c812aSXuan Zhuo 	vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
1106cd4c812aSXuan Zhuo 				   context, notify, callback, name);
1107d79dca75STiwei Bie 	if (!vq) {
1108c2d87fe6SXuan Zhuo 		vring_free_split(&vring_split, vdev);
1109d79dca75STiwei Bie 		return NULL;
1110d79dca75STiwei Bie 	}
1111d79dca75STiwei Bie 
1112d79dca75STiwei Bie 	to_vvq(vq)->we_own_ring = true;
1113d79dca75STiwei Bie 
1114d79dca75STiwei Bie 	return vq;
1115d79dca75STiwei Bie }
1116d79dca75STiwei Bie 
1117e6f633e5STiwei Bie 
1118e6f633e5STiwei Bie /*
11191ce9e605STiwei Bie  * Packed ring specific functions - *_packed().
11201ce9e605STiwei Bie  */
1121a7722890Shuangjie.albert static inline bool packed_used_wrap_counter(u16 last_used_idx)
1122a7722890Shuangjie.albert {
1123a7722890Shuangjie.albert 	return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
1124a7722890Shuangjie.albert }
1125a7722890Shuangjie.albert 
1126a7722890Shuangjie.albert static inline u16 packed_last_used(u16 last_used_idx)
1127a7722890Shuangjie.albert {
1128a7722890Shuangjie.albert 	return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
1129a7722890Shuangjie.albert }
11301ce9e605STiwei Bie 
1131d80dc15bSXuan Zhuo static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
1132d80dc15bSXuan Zhuo 				     struct vring_desc_extra *extra)
11331ce9e605STiwei Bie {
11341ce9e605STiwei Bie 	u16 flags;
11351ce9e605STiwei Bie 
11361ce9e605STiwei Bie 	if (!vq->use_dma_api)
11371ce9e605STiwei Bie 		return;
11381ce9e605STiwei Bie 
1139d80dc15bSXuan Zhuo 	flags = extra->flags;
11401ce9e605STiwei Bie 
11411ce9e605STiwei Bie 	if (flags & VRING_DESC_F_INDIRECT) {
11421ce9e605STiwei Bie 		dma_unmap_single(vring_dma_dev(vq),
1143d80dc15bSXuan Zhuo 				 extra->addr, extra->len,
11441ce9e605STiwei Bie 				 (flags & VRING_DESC_F_WRITE) ?
11451ce9e605STiwei Bie 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
11461ce9e605STiwei Bie 	} else {
11471ce9e605STiwei Bie 		dma_unmap_page(vring_dma_dev(vq),
1148d80dc15bSXuan Zhuo 			       extra->addr, extra->len,
11491ce9e605STiwei Bie 			       (flags & VRING_DESC_F_WRITE) ?
11501ce9e605STiwei Bie 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
11511ce9e605STiwei Bie 	}
11521ce9e605STiwei Bie }
11531ce9e605STiwei Bie 
11541ce9e605STiwei Bie static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
11551ce9e605STiwei Bie 				   struct vring_packed_desc *desc)
11561ce9e605STiwei Bie {
11571ce9e605STiwei Bie 	u16 flags;
11581ce9e605STiwei Bie 
11591ce9e605STiwei Bie 	if (!vq->use_dma_api)
11601ce9e605STiwei Bie 		return;
11611ce9e605STiwei Bie 
11621ce9e605STiwei Bie 	flags = le16_to_cpu(desc->flags);
11631ce9e605STiwei Bie 
11641ce9e605STiwei Bie 	dma_unmap_page(vring_dma_dev(vq),
11651ce9e605STiwei Bie 		       le64_to_cpu(desc->addr),
11661ce9e605STiwei Bie 		       le32_to_cpu(desc->len),
11671ce9e605STiwei Bie 		       (flags & VRING_DESC_F_WRITE) ?
11681ce9e605STiwei Bie 		       DMA_FROM_DEVICE : DMA_TO_DEVICE);
11691ce9e605STiwei Bie }
11701ce9e605STiwei Bie 
11711ce9e605STiwei Bie static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
11721ce9e605STiwei Bie 						       gfp_t gfp)
11731ce9e605STiwei Bie {
11741ce9e605STiwei Bie 	struct vring_packed_desc *desc;
11751ce9e605STiwei Bie 
11761ce9e605STiwei Bie 	/*
11771ce9e605STiwei Bie 	 * We require lowmem mappings for the descriptors because
11781ce9e605STiwei Bie 	 * otherwise virt_to_phys will give us bogus addresses in the
11791ce9e605STiwei Bie 	 * virtqueue.
11801ce9e605STiwei Bie 	 */
11811ce9e605STiwei Bie 	gfp &= ~__GFP_HIGHMEM;
11821ce9e605STiwei Bie 
11831ce9e605STiwei Bie 	desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
11841ce9e605STiwei Bie 
11851ce9e605STiwei Bie 	return desc;
11861ce9e605STiwei Bie }
11871ce9e605STiwei Bie 
11881ce9e605STiwei Bie static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
11891ce9e605STiwei Bie 					 struct scatterlist *sgs[],
11901ce9e605STiwei Bie 					 unsigned int total_sg,
11911ce9e605STiwei Bie 					 unsigned int out_sgs,
11921ce9e605STiwei Bie 					 unsigned int in_sgs,
11931ce9e605STiwei Bie 					 void *data,
11941ce9e605STiwei Bie 					 gfp_t gfp)
11951ce9e605STiwei Bie {
11961ce9e605STiwei Bie 	struct vring_packed_desc *desc;
11971ce9e605STiwei Bie 	struct scatterlist *sg;
11981ce9e605STiwei Bie 	unsigned int i, n, err_idx;
11991ce9e605STiwei Bie 	u16 head, id;
12001ce9e605STiwei Bie 	dma_addr_t addr;
12011ce9e605STiwei Bie 
12021ce9e605STiwei Bie 	head = vq->packed.next_avail_idx;
12031ce9e605STiwei Bie 	desc = alloc_indirect_packed(total_sg, gfp);
1204fc6d70f4SXuan Zhuo 	if (!desc)
1205fc6d70f4SXuan Zhuo 		return -ENOMEM;
12061ce9e605STiwei Bie 
12071ce9e605STiwei Bie 	if (unlikely(vq->vq.num_free < 1)) {
12081ce9e605STiwei Bie 		pr_debug("Can't add buf len 1 - avail = 0\n");
1209df0bfe75SYueHaibing 		kfree(desc);
12101ce9e605STiwei Bie 		END_USE(vq);
12111ce9e605STiwei Bie 		return -ENOSPC;
12121ce9e605STiwei Bie 	}
12131ce9e605STiwei Bie 
12141ce9e605STiwei Bie 	i = 0;
12151ce9e605STiwei Bie 	id = vq->free_head;
12161ce9e605STiwei Bie 	BUG_ON(id == vq->packed.vring.num);
12171ce9e605STiwei Bie 
12181ce9e605STiwei Bie 	for (n = 0; n < out_sgs + in_sgs; n++) {
12191ce9e605STiwei Bie 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
12201ce9e605STiwei Bie 			addr = vring_map_one_sg(vq, sg, n < out_sgs ?
12211ce9e605STiwei Bie 					DMA_TO_DEVICE : DMA_FROM_DEVICE);
12221ce9e605STiwei Bie 			if (vring_mapping_error(vq, addr))
12231ce9e605STiwei Bie 				goto unmap_release;
12241ce9e605STiwei Bie 
12251ce9e605STiwei Bie 			desc[i].flags = cpu_to_le16(n < out_sgs ?
12261ce9e605STiwei Bie 						0 : VRING_DESC_F_WRITE);
12271ce9e605STiwei Bie 			desc[i].addr = cpu_to_le64(addr);
12281ce9e605STiwei Bie 			desc[i].len = cpu_to_le32(sg->length);
12291ce9e605STiwei Bie 			i++;
12301ce9e605STiwei Bie 		}
12311ce9e605STiwei Bie 	}
12321ce9e605STiwei Bie 
12331ce9e605STiwei Bie 	/* Now that the indirect table is filled in, map it. */
12341ce9e605STiwei Bie 	addr = vring_map_single(vq, desc,
12351ce9e605STiwei Bie 			total_sg * sizeof(struct vring_packed_desc),
12361ce9e605STiwei Bie 			DMA_TO_DEVICE);
12371ce9e605STiwei Bie 	if (vring_mapping_error(vq, addr))
12381ce9e605STiwei Bie 		goto unmap_release;
12391ce9e605STiwei Bie 
12401ce9e605STiwei Bie 	vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
12411ce9e605STiwei Bie 	vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
12421ce9e605STiwei Bie 				sizeof(struct vring_packed_desc));
12431ce9e605STiwei Bie 	vq->packed.vring.desc[head].id = cpu_to_le16(id);
12441ce9e605STiwei Bie 
12451ce9e605STiwei Bie 	if (vq->use_dma_api) {
12461ce9e605STiwei Bie 		vq->packed.desc_extra[id].addr = addr;
12471ce9e605STiwei Bie 		vq->packed.desc_extra[id].len = total_sg *
12481ce9e605STiwei Bie 				sizeof(struct vring_packed_desc);
12491ce9e605STiwei Bie 		vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
12501ce9e605STiwei Bie 						  vq->packed.avail_used_flags;
12511ce9e605STiwei Bie 	}
12521ce9e605STiwei Bie 
12531ce9e605STiwei Bie 	/*
12541ce9e605STiwei Bie 	 * A driver MUST NOT make the first descriptor in the list
12551ce9e605STiwei Bie 	 * available before all subsequent descriptors comprising
12561ce9e605STiwei Bie 	 * the list are made available.
12571ce9e605STiwei Bie 	 */
12581ce9e605STiwei Bie 	virtio_wmb(vq->weak_barriers);
12591ce9e605STiwei Bie 	vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
12601ce9e605STiwei Bie 						vq->packed.avail_used_flags);
12611ce9e605STiwei Bie 
12621ce9e605STiwei Bie 	/* We're using some buffers from the free list. */
12631ce9e605STiwei Bie 	vq->vq.num_free -= 1;
12641ce9e605STiwei Bie 
12651ce9e605STiwei Bie 	/* Update free pointer */
12661ce9e605STiwei Bie 	n = head + 1;
12671ce9e605STiwei Bie 	if (n >= vq->packed.vring.num) {
12681ce9e605STiwei Bie 		n = 0;
12691ce9e605STiwei Bie 		vq->packed.avail_wrap_counter ^= 1;
12701ce9e605STiwei Bie 		vq->packed.avail_used_flags ^=
12711ce9e605STiwei Bie 				1 << VRING_PACKED_DESC_F_AVAIL |
12721ce9e605STiwei Bie 				1 << VRING_PACKED_DESC_F_USED;
12731ce9e605STiwei Bie 	}
12741ce9e605STiwei Bie 	vq->packed.next_avail_idx = n;
1275aeef9b47SJason Wang 	vq->free_head = vq->packed.desc_extra[id].next;
12761ce9e605STiwei Bie 
12771ce9e605STiwei Bie 	/* Store token and indirect buffer state. */
12781ce9e605STiwei Bie 	vq->packed.desc_state[id].num = 1;
12791ce9e605STiwei Bie 	vq->packed.desc_state[id].data = data;
12801ce9e605STiwei Bie 	vq->packed.desc_state[id].indir_desc = desc;
12811ce9e605STiwei Bie 	vq->packed.desc_state[id].last = id;
12821ce9e605STiwei Bie 
12831ce9e605STiwei Bie 	vq->num_added += 1;
12841ce9e605STiwei Bie 
12851ce9e605STiwei Bie 	pr_debug("Added buffer head %i to %p\n", head, vq);
12861ce9e605STiwei Bie 	END_USE(vq);
12871ce9e605STiwei Bie 
12881ce9e605STiwei Bie 	return 0;
12891ce9e605STiwei Bie 
12901ce9e605STiwei Bie unmap_release:
12911ce9e605STiwei Bie 	err_idx = i;
12921ce9e605STiwei Bie 
12931ce9e605STiwei Bie 	for (i = 0; i < err_idx; i++)
12941ce9e605STiwei Bie 		vring_unmap_desc_packed(vq, &desc[i]);
12951ce9e605STiwei Bie 
12961ce9e605STiwei Bie 	kfree(desc);
12971ce9e605STiwei Bie 
12981ce9e605STiwei Bie 	END_USE(vq);
1299f7728002SHalil Pasic 	return -ENOMEM;
13001ce9e605STiwei Bie }
13011ce9e605STiwei Bie 
13021ce9e605STiwei Bie static inline int virtqueue_add_packed(struct virtqueue *_vq,
13031ce9e605STiwei Bie 				       struct scatterlist *sgs[],
13041ce9e605STiwei Bie 				       unsigned int total_sg,
13051ce9e605STiwei Bie 				       unsigned int out_sgs,
13061ce9e605STiwei Bie 				       unsigned int in_sgs,
13071ce9e605STiwei Bie 				       void *data,
13081ce9e605STiwei Bie 				       void *ctx,
13091ce9e605STiwei Bie 				       gfp_t gfp)
13101ce9e605STiwei Bie {
13111ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
13121ce9e605STiwei Bie 	struct vring_packed_desc *desc;
13131ce9e605STiwei Bie 	struct scatterlist *sg;
13141ce9e605STiwei Bie 	unsigned int i, n, c, descs_used, err_idx;
13153f649ab7SKees Cook 	__le16 head_flags, flags;
13163f649ab7SKees Cook 	u16 head, id, prev, curr, avail_used_flags;
1317fc6d70f4SXuan Zhuo 	int err;
13181ce9e605STiwei Bie 
13191ce9e605STiwei Bie 	START_USE(vq);
13201ce9e605STiwei Bie 
13211ce9e605STiwei Bie 	BUG_ON(data == NULL);
13221ce9e605STiwei Bie 	BUG_ON(ctx && vq->indirect);
13231ce9e605STiwei Bie 
13241ce9e605STiwei Bie 	if (unlikely(vq->broken)) {
13251ce9e605STiwei Bie 		END_USE(vq);
13261ce9e605STiwei Bie 		return -EIO;
13271ce9e605STiwei Bie 	}
13281ce9e605STiwei Bie 
13291ce9e605STiwei Bie 	LAST_ADD_TIME_UPDATE(vq);
13301ce9e605STiwei Bie 
13311ce9e605STiwei Bie 	BUG_ON(total_sg == 0);
13321ce9e605STiwei Bie 
133335c51e09SXianting Tian 	if (virtqueue_use_indirect(vq, total_sg)) {
1334fc6d70f4SXuan Zhuo 		err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1335fc6d70f4SXuan Zhuo 						    in_sgs, data, gfp);
13361861ba62SMichael S. Tsirkin 		if (err != -ENOMEM) {
13371861ba62SMichael S. Tsirkin 			END_USE(vq);
1338fc6d70f4SXuan Zhuo 			return err;
13391861ba62SMichael S. Tsirkin 		}
1340fc6d70f4SXuan Zhuo 
1341fc6d70f4SXuan Zhuo 		/* fall back on direct */
1342fc6d70f4SXuan Zhuo 	}
13431ce9e605STiwei Bie 
13441ce9e605STiwei Bie 	head = vq->packed.next_avail_idx;
13451ce9e605STiwei Bie 	avail_used_flags = vq->packed.avail_used_flags;
13461ce9e605STiwei Bie 
13471ce9e605STiwei Bie 	WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
13481ce9e605STiwei Bie 
13491ce9e605STiwei Bie 	desc = vq->packed.vring.desc;
13501ce9e605STiwei Bie 	i = head;
13511ce9e605STiwei Bie 	descs_used = total_sg;
13521ce9e605STiwei Bie 
13531ce9e605STiwei Bie 	if (unlikely(vq->vq.num_free < descs_used)) {
13541ce9e605STiwei Bie 		pr_debug("Can't add buf len %i - avail = %i\n",
13551ce9e605STiwei Bie 			 descs_used, vq->vq.num_free);
13561ce9e605STiwei Bie 		END_USE(vq);
13571ce9e605STiwei Bie 		return -ENOSPC;
13581ce9e605STiwei Bie 	}
13591ce9e605STiwei Bie 
13601ce9e605STiwei Bie 	id = vq->free_head;
13611ce9e605STiwei Bie 	BUG_ON(id == vq->packed.vring.num);
13621ce9e605STiwei Bie 
13631ce9e605STiwei Bie 	curr = id;
13641ce9e605STiwei Bie 	c = 0;
13651ce9e605STiwei Bie 	for (n = 0; n < out_sgs + in_sgs; n++) {
13661ce9e605STiwei Bie 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
13671ce9e605STiwei Bie 			dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
13681ce9e605STiwei Bie 					DMA_TO_DEVICE : DMA_FROM_DEVICE);
13691ce9e605STiwei Bie 			if (vring_mapping_error(vq, addr))
13701ce9e605STiwei Bie 				goto unmap_release;
13711ce9e605STiwei Bie 
13721ce9e605STiwei Bie 			flags = cpu_to_le16(vq->packed.avail_used_flags |
13731ce9e605STiwei Bie 				    (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
13741ce9e605STiwei Bie 				    (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
13751ce9e605STiwei Bie 			if (i == head)
13761ce9e605STiwei Bie 				head_flags = flags;
13771ce9e605STiwei Bie 			else
13781ce9e605STiwei Bie 				desc[i].flags = flags;
13791ce9e605STiwei Bie 
13801ce9e605STiwei Bie 			desc[i].addr = cpu_to_le64(addr);
13811ce9e605STiwei Bie 			desc[i].len = cpu_to_le32(sg->length);
13821ce9e605STiwei Bie 			desc[i].id = cpu_to_le16(id);
13831ce9e605STiwei Bie 
13841ce9e605STiwei Bie 			if (unlikely(vq->use_dma_api)) {
13851ce9e605STiwei Bie 				vq->packed.desc_extra[curr].addr = addr;
13861ce9e605STiwei Bie 				vq->packed.desc_extra[curr].len = sg->length;
13871ce9e605STiwei Bie 				vq->packed.desc_extra[curr].flags =
13881ce9e605STiwei Bie 					le16_to_cpu(flags);
13891ce9e605STiwei Bie 			}
13901ce9e605STiwei Bie 			prev = curr;
1391aeef9b47SJason Wang 			curr = vq->packed.desc_extra[curr].next;
13921ce9e605STiwei Bie 
13931ce9e605STiwei Bie 			if ((unlikely(++i >= vq->packed.vring.num))) {
13941ce9e605STiwei Bie 				i = 0;
13951ce9e605STiwei Bie 				vq->packed.avail_used_flags ^=
13961ce9e605STiwei Bie 					1 << VRING_PACKED_DESC_F_AVAIL |
13971ce9e605STiwei Bie 					1 << VRING_PACKED_DESC_F_USED;
13981ce9e605STiwei Bie 			}
13991ce9e605STiwei Bie 		}
14001ce9e605STiwei Bie 	}
14011ce9e605STiwei Bie 
14021ce9e605STiwei Bie 	if (i < head)
14031ce9e605STiwei Bie 		vq->packed.avail_wrap_counter ^= 1;
14041ce9e605STiwei Bie 
14051ce9e605STiwei Bie 	/* We're using some buffers from the free list. */
14061ce9e605STiwei Bie 	vq->vq.num_free -= descs_used;
14071ce9e605STiwei Bie 
14081ce9e605STiwei Bie 	/* Update free pointer */
14091ce9e605STiwei Bie 	vq->packed.next_avail_idx = i;
14101ce9e605STiwei Bie 	vq->free_head = curr;
14111ce9e605STiwei Bie 
14121ce9e605STiwei Bie 	/* Store token. */
14131ce9e605STiwei Bie 	vq->packed.desc_state[id].num = descs_used;
14141ce9e605STiwei Bie 	vq->packed.desc_state[id].data = data;
14151ce9e605STiwei Bie 	vq->packed.desc_state[id].indir_desc = ctx;
14161ce9e605STiwei Bie 	vq->packed.desc_state[id].last = prev;
14171ce9e605STiwei Bie 
14181ce9e605STiwei Bie 	/*
14191ce9e605STiwei Bie 	 * A driver MUST NOT make the first descriptor in the list
14201ce9e605STiwei Bie 	 * available before all subsequent descriptors comprising
14211ce9e605STiwei Bie 	 * the list are made available.
14221ce9e605STiwei Bie 	 */
14231ce9e605STiwei Bie 	virtio_wmb(vq->weak_barriers);
14241ce9e605STiwei Bie 	vq->packed.vring.desc[head].flags = head_flags;
14251ce9e605STiwei Bie 	vq->num_added += descs_used;
14261ce9e605STiwei Bie 
14271ce9e605STiwei Bie 	pr_debug("Added buffer head %i to %p\n", head, vq);
14281ce9e605STiwei Bie 	END_USE(vq);
14291ce9e605STiwei Bie 
14301ce9e605STiwei Bie 	return 0;
14311ce9e605STiwei Bie 
14321ce9e605STiwei Bie unmap_release:
14331ce9e605STiwei Bie 	err_idx = i;
14341ce9e605STiwei Bie 	i = head;
143544593865SJason Wang 	curr = vq->free_head;
14361ce9e605STiwei Bie 
14371ce9e605STiwei Bie 	vq->packed.avail_used_flags = avail_used_flags;
14381ce9e605STiwei Bie 
14391ce9e605STiwei Bie 	for (n = 0; n < total_sg; n++) {
14401ce9e605STiwei Bie 		if (i == err_idx)
14411ce9e605STiwei Bie 			break;
1442d80dc15bSXuan Zhuo 		vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
144344593865SJason Wang 		curr = vq->packed.desc_extra[curr].next;
14441ce9e605STiwei Bie 		i++;
14451ce9e605STiwei Bie 		if (i >= vq->packed.vring.num)
14461ce9e605STiwei Bie 			i = 0;
14471ce9e605STiwei Bie 	}
14481ce9e605STiwei Bie 
14491ce9e605STiwei Bie 	END_USE(vq);
14501ce9e605STiwei Bie 	return -EIO;
14511ce9e605STiwei Bie }
14521ce9e605STiwei Bie 
14531ce9e605STiwei Bie static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
14541ce9e605STiwei Bie {
14551ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1456f51f9826STiwei Bie 	u16 new, old, off_wrap, flags, wrap_counter, event_idx;
14571ce9e605STiwei Bie 	bool needs_kick;
14581ce9e605STiwei Bie 	union {
14591ce9e605STiwei Bie 		struct {
14601ce9e605STiwei Bie 			__le16 off_wrap;
14611ce9e605STiwei Bie 			__le16 flags;
14621ce9e605STiwei Bie 		};
14631ce9e605STiwei Bie 		u32 u32;
14641ce9e605STiwei Bie 	} snapshot;
14651ce9e605STiwei Bie 
14661ce9e605STiwei Bie 	START_USE(vq);
14671ce9e605STiwei Bie 
14681ce9e605STiwei Bie 	/*
14691ce9e605STiwei Bie 	 * We need to expose the new flags value before checking notification
14701ce9e605STiwei Bie 	 * suppressions.
14711ce9e605STiwei Bie 	 */
14721ce9e605STiwei Bie 	virtio_mb(vq->weak_barriers);
14731ce9e605STiwei Bie 
1474f51f9826STiwei Bie 	old = vq->packed.next_avail_idx - vq->num_added;
1475f51f9826STiwei Bie 	new = vq->packed.next_avail_idx;
14761ce9e605STiwei Bie 	vq->num_added = 0;
14771ce9e605STiwei Bie 
14781ce9e605STiwei Bie 	snapshot.u32 = *(u32 *)vq->packed.vring.device;
14791ce9e605STiwei Bie 	flags = le16_to_cpu(snapshot.flags);
14801ce9e605STiwei Bie 
14811ce9e605STiwei Bie 	LAST_ADD_TIME_CHECK(vq);
14821ce9e605STiwei Bie 	LAST_ADD_TIME_INVALID(vq);
14831ce9e605STiwei Bie 
1484f51f9826STiwei Bie 	if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
14851ce9e605STiwei Bie 		needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1486f51f9826STiwei Bie 		goto out;
1487f51f9826STiwei Bie 	}
1488f51f9826STiwei Bie 
1489f51f9826STiwei Bie 	off_wrap = le16_to_cpu(snapshot.off_wrap);
1490f51f9826STiwei Bie 
1491f51f9826STiwei Bie 	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1492f51f9826STiwei Bie 	event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1493f51f9826STiwei Bie 	if (wrap_counter != vq->packed.avail_wrap_counter)
1494f51f9826STiwei Bie 		event_idx -= vq->packed.vring.num;
1495f51f9826STiwei Bie 
1496f51f9826STiwei Bie 	needs_kick = vring_need_event(event_idx, new, old);
1497f51f9826STiwei Bie out:
14981ce9e605STiwei Bie 	END_USE(vq);
14991ce9e605STiwei Bie 	return needs_kick;
15001ce9e605STiwei Bie }
15011ce9e605STiwei Bie 
15021ce9e605STiwei Bie static void detach_buf_packed(struct vring_virtqueue *vq,
15031ce9e605STiwei Bie 			      unsigned int id, void **ctx)
15041ce9e605STiwei Bie {
15051ce9e605STiwei Bie 	struct vring_desc_state_packed *state = NULL;
15061ce9e605STiwei Bie 	struct vring_packed_desc *desc;
15071ce9e605STiwei Bie 	unsigned int i, curr;
15081ce9e605STiwei Bie 
15091ce9e605STiwei Bie 	state = &vq->packed.desc_state[id];
15101ce9e605STiwei Bie 
15111ce9e605STiwei Bie 	/* Clear data ptr. */
15121ce9e605STiwei Bie 	state->data = NULL;
15131ce9e605STiwei Bie 
1514aeef9b47SJason Wang 	vq->packed.desc_extra[state->last].next = vq->free_head;
15151ce9e605STiwei Bie 	vq->free_head = id;
15161ce9e605STiwei Bie 	vq->vq.num_free += state->num;
15171ce9e605STiwei Bie 
15181ce9e605STiwei Bie 	if (unlikely(vq->use_dma_api)) {
15191ce9e605STiwei Bie 		curr = id;
15201ce9e605STiwei Bie 		for (i = 0; i < state->num; i++) {
1521d80dc15bSXuan Zhuo 			vring_unmap_extra_packed(vq,
15221ce9e605STiwei Bie 						 &vq->packed.desc_extra[curr]);
1523aeef9b47SJason Wang 			curr = vq->packed.desc_extra[curr].next;
15241ce9e605STiwei Bie 		}
15251ce9e605STiwei Bie 	}
15261ce9e605STiwei Bie 
15271ce9e605STiwei Bie 	if (vq->indirect) {
15281ce9e605STiwei Bie 		u32 len;
15291ce9e605STiwei Bie 
15301ce9e605STiwei Bie 		/* Free the indirect table, if any, now that it's unmapped. */
15311ce9e605STiwei Bie 		desc = state->indir_desc;
15321ce9e605STiwei Bie 		if (!desc)
15331ce9e605STiwei Bie 			return;
15341ce9e605STiwei Bie 
15351ce9e605STiwei Bie 		if (vq->use_dma_api) {
15361ce9e605STiwei Bie 			len = vq->packed.desc_extra[id].len;
15371ce9e605STiwei Bie 			for (i = 0; i < len / sizeof(struct vring_packed_desc);
15381ce9e605STiwei Bie 					i++)
15391ce9e605STiwei Bie 				vring_unmap_desc_packed(vq, &desc[i]);
15401ce9e605STiwei Bie 		}
15411ce9e605STiwei Bie 		kfree(desc);
15421ce9e605STiwei Bie 		state->indir_desc = NULL;
15431ce9e605STiwei Bie 	} else if (ctx) {
15441ce9e605STiwei Bie 		*ctx = state->indir_desc;
15451ce9e605STiwei Bie 	}
15461ce9e605STiwei Bie }
15471ce9e605STiwei Bie 
15481ce9e605STiwei Bie static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
15491ce9e605STiwei Bie 				       u16 idx, bool used_wrap_counter)
15501ce9e605STiwei Bie {
15511ce9e605STiwei Bie 	bool avail, used;
15521ce9e605STiwei Bie 	u16 flags;
15531ce9e605STiwei Bie 
15541ce9e605STiwei Bie 	flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
15551ce9e605STiwei Bie 	avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
15561ce9e605STiwei Bie 	used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
15571ce9e605STiwei Bie 
15581ce9e605STiwei Bie 	return avail == used && used == used_wrap_counter;
15591ce9e605STiwei Bie }
15601ce9e605STiwei Bie 
15611ce9e605STiwei Bie static inline bool more_used_packed(const struct vring_virtqueue *vq)
15621ce9e605STiwei Bie {
1563a7722890Shuangjie.albert 	u16 last_used;
1564a7722890Shuangjie.albert 	u16 last_used_idx;
1565a7722890Shuangjie.albert 	bool used_wrap_counter;
1566a7722890Shuangjie.albert 
1567a7722890Shuangjie.albert 	last_used_idx = READ_ONCE(vq->last_used_idx);
1568a7722890Shuangjie.albert 	last_used = packed_last_used(last_used_idx);
1569a7722890Shuangjie.albert 	used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1570a7722890Shuangjie.albert 	return is_used_desc_packed(vq, last_used, used_wrap_counter);
15711ce9e605STiwei Bie }
15721ce9e605STiwei Bie 
15731ce9e605STiwei Bie static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
15741ce9e605STiwei Bie 					  unsigned int *len,
15751ce9e605STiwei Bie 					  void **ctx)
15761ce9e605STiwei Bie {
15771ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1578a7722890Shuangjie.albert 	u16 last_used, id, last_used_idx;
1579a7722890Shuangjie.albert 	bool used_wrap_counter;
15801ce9e605STiwei Bie 	void *ret;
15811ce9e605STiwei Bie 
15821ce9e605STiwei Bie 	START_USE(vq);
15831ce9e605STiwei Bie 
15841ce9e605STiwei Bie 	if (unlikely(vq->broken)) {
15851ce9e605STiwei Bie 		END_USE(vq);
15861ce9e605STiwei Bie 		return NULL;
15871ce9e605STiwei Bie 	}
15881ce9e605STiwei Bie 
15891ce9e605STiwei Bie 	if (!more_used_packed(vq)) {
15901ce9e605STiwei Bie 		pr_debug("No more buffers in queue\n");
15911ce9e605STiwei Bie 		END_USE(vq);
15921ce9e605STiwei Bie 		return NULL;
15931ce9e605STiwei Bie 	}
15941ce9e605STiwei Bie 
15951ce9e605STiwei Bie 	/* Only get used elements after they have been exposed by host. */
15961ce9e605STiwei Bie 	virtio_rmb(vq->weak_barriers);
15971ce9e605STiwei Bie 
1598a7722890Shuangjie.albert 	last_used_idx = READ_ONCE(vq->last_used_idx);
1599a7722890Shuangjie.albert 	used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1600a7722890Shuangjie.albert 	last_used = packed_last_used(last_used_idx);
16011ce9e605STiwei Bie 	id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
16021ce9e605STiwei Bie 	*len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
16031ce9e605STiwei Bie 
16041ce9e605STiwei Bie 	if (unlikely(id >= vq->packed.vring.num)) {
16051ce9e605STiwei Bie 		BAD_RING(vq, "id %u out of range\n", id);
16061ce9e605STiwei Bie 		return NULL;
16071ce9e605STiwei Bie 	}
16081ce9e605STiwei Bie 	if (unlikely(!vq->packed.desc_state[id].data)) {
16091ce9e605STiwei Bie 		BAD_RING(vq, "id %u is not a head!\n", id);
16101ce9e605STiwei Bie 		return NULL;
16111ce9e605STiwei Bie 	}
16121ce9e605STiwei Bie 
16131ce9e605STiwei Bie 	/* detach_buf_packed clears data, so grab it now. */
16141ce9e605STiwei Bie 	ret = vq->packed.desc_state[id].data;
16151ce9e605STiwei Bie 	detach_buf_packed(vq, id, ctx);
16161ce9e605STiwei Bie 
1617a7722890Shuangjie.albert 	last_used += vq->packed.desc_state[id].num;
1618a7722890Shuangjie.albert 	if (unlikely(last_used >= vq->packed.vring.num)) {
1619a7722890Shuangjie.albert 		last_used -= vq->packed.vring.num;
1620a7722890Shuangjie.albert 		used_wrap_counter ^= 1;
16211ce9e605STiwei Bie 	}
16221ce9e605STiwei Bie 
1623a7722890Shuangjie.albert 	last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1624a7722890Shuangjie.albert 	WRITE_ONCE(vq->last_used_idx, last_used);
1625a7722890Shuangjie.albert 
1626f51f9826STiwei Bie 	/*
1627f51f9826STiwei Bie 	 * If we expect an interrupt for the next entry, tell host
1628f51f9826STiwei Bie 	 * by writing event index and flush out the write before
1629f51f9826STiwei Bie 	 * the read in the next get_buf call.
1630f51f9826STiwei Bie 	 */
1631f51f9826STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1632f51f9826STiwei Bie 		virtio_store_mb(vq->weak_barriers,
1633f51f9826STiwei Bie 				&vq->packed.vring.driver->off_wrap,
1634a7722890Shuangjie.albert 				cpu_to_le16(vq->last_used_idx));
1635f51f9826STiwei Bie 
16361ce9e605STiwei Bie 	LAST_ADD_TIME_INVALID(vq);
16371ce9e605STiwei Bie 
16381ce9e605STiwei Bie 	END_USE(vq);
16391ce9e605STiwei Bie 	return ret;
16401ce9e605STiwei Bie }
16411ce9e605STiwei Bie 
16421ce9e605STiwei Bie static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
16431ce9e605STiwei Bie {
16441ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
16451ce9e605STiwei Bie 
16461ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
16471ce9e605STiwei Bie 		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
16481ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
16491ce9e605STiwei Bie 			cpu_to_le16(vq->packed.event_flags_shadow);
16501ce9e605STiwei Bie 	}
16511ce9e605STiwei Bie }
16521ce9e605STiwei Bie 
165331532340SSolomon Tan static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
16541ce9e605STiwei Bie {
16551ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
16561ce9e605STiwei Bie 
16571ce9e605STiwei Bie 	START_USE(vq);
16581ce9e605STiwei Bie 
16591ce9e605STiwei Bie 	/*
16601ce9e605STiwei Bie 	 * We optimistically turn back on interrupts, then check if there was
16611ce9e605STiwei Bie 	 * more to do.
16621ce9e605STiwei Bie 	 */
16631ce9e605STiwei Bie 
1664f51f9826STiwei Bie 	if (vq->event) {
1665f51f9826STiwei Bie 		vq->packed.vring.driver->off_wrap =
1666a7722890Shuangjie.albert 			cpu_to_le16(vq->last_used_idx);
1667f51f9826STiwei Bie 		/*
1668f51f9826STiwei Bie 		 * We need to update event offset and event wrap
1669f51f9826STiwei Bie 		 * counter first before updating event flags.
1670f51f9826STiwei Bie 		 */
1671f51f9826STiwei Bie 		virtio_wmb(vq->weak_barriers);
1672f51f9826STiwei Bie 	}
1673f51f9826STiwei Bie 
16741ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1675f51f9826STiwei Bie 		vq->packed.event_flags_shadow = vq->event ?
1676f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_DESC :
1677f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_ENABLE;
16781ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
16791ce9e605STiwei Bie 				cpu_to_le16(vq->packed.event_flags_shadow);
16801ce9e605STiwei Bie 	}
16811ce9e605STiwei Bie 
16821ce9e605STiwei Bie 	END_USE(vq);
1683a7722890Shuangjie.albert 	return vq->last_used_idx;
16841ce9e605STiwei Bie }
16851ce9e605STiwei Bie 
16861ce9e605STiwei Bie static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
16871ce9e605STiwei Bie {
16881ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
16891ce9e605STiwei Bie 	bool wrap_counter;
16901ce9e605STiwei Bie 	u16 used_idx;
16911ce9e605STiwei Bie 
16921ce9e605STiwei Bie 	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
16931ce9e605STiwei Bie 	used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
16941ce9e605STiwei Bie 
16951ce9e605STiwei Bie 	return is_used_desc_packed(vq, used_idx, wrap_counter);
16961ce9e605STiwei Bie }
16971ce9e605STiwei Bie 
16981ce9e605STiwei Bie static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
16991ce9e605STiwei Bie {
17001ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1701a7722890Shuangjie.albert 	u16 used_idx, wrap_counter, last_used_idx;
1702f51f9826STiwei Bie 	u16 bufs;
17031ce9e605STiwei Bie 
17041ce9e605STiwei Bie 	START_USE(vq);
17051ce9e605STiwei Bie 
17061ce9e605STiwei Bie 	/*
17071ce9e605STiwei Bie 	 * We optimistically turn back on interrupts, then check if there was
17081ce9e605STiwei Bie 	 * more to do.
17091ce9e605STiwei Bie 	 */
17101ce9e605STiwei Bie 
1711f51f9826STiwei Bie 	if (vq->event) {
1712f51f9826STiwei Bie 		/* TODO: tune this threshold */
1713f51f9826STiwei Bie 		bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1714a7722890Shuangjie.albert 		last_used_idx = READ_ONCE(vq->last_used_idx);
1715a7722890Shuangjie.albert 		wrap_counter = packed_used_wrap_counter(last_used_idx);
17161ce9e605STiwei Bie 
1717a7722890Shuangjie.albert 		used_idx = packed_last_used(last_used_idx) + bufs;
1718f51f9826STiwei Bie 		if (used_idx >= vq->packed.vring.num) {
1719f51f9826STiwei Bie 			used_idx -= vq->packed.vring.num;
1720f51f9826STiwei Bie 			wrap_counter ^= 1;
1721f51f9826STiwei Bie 		}
1722f51f9826STiwei Bie 
1723f51f9826STiwei Bie 		vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1724f51f9826STiwei Bie 			(wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1725f51f9826STiwei Bie 
1726f51f9826STiwei Bie 		/*
1727f51f9826STiwei Bie 		 * We need to update event offset and event wrap
1728f51f9826STiwei Bie 		 * counter first before updating event flags.
1729f51f9826STiwei Bie 		 */
1730f51f9826STiwei Bie 		virtio_wmb(vq->weak_barriers);
1731f51f9826STiwei Bie 	}
1732f51f9826STiwei Bie 
17331ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1734f51f9826STiwei Bie 		vq->packed.event_flags_shadow = vq->event ?
1735f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_DESC :
1736f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_ENABLE;
17371ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
17381ce9e605STiwei Bie 				cpu_to_le16(vq->packed.event_flags_shadow);
17391ce9e605STiwei Bie 	}
17401ce9e605STiwei Bie 
17411ce9e605STiwei Bie 	/*
17421ce9e605STiwei Bie 	 * We need to update event suppression structure first
17431ce9e605STiwei Bie 	 * before re-checking for more used buffers.
17441ce9e605STiwei Bie 	 */
17451ce9e605STiwei Bie 	virtio_mb(vq->weak_barriers);
17461ce9e605STiwei Bie 
1747a7722890Shuangjie.albert 	last_used_idx = READ_ONCE(vq->last_used_idx);
1748a7722890Shuangjie.albert 	wrap_counter = packed_used_wrap_counter(last_used_idx);
1749a7722890Shuangjie.albert 	used_idx = packed_last_used(last_used_idx);
1750a7722890Shuangjie.albert 	if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
17511ce9e605STiwei Bie 		END_USE(vq);
17521ce9e605STiwei Bie 		return false;
17531ce9e605STiwei Bie 	}
17541ce9e605STiwei Bie 
17551ce9e605STiwei Bie 	END_USE(vq);
17561ce9e605STiwei Bie 	return true;
17571ce9e605STiwei Bie }
17581ce9e605STiwei Bie 
17591ce9e605STiwei Bie static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
17601ce9e605STiwei Bie {
17611ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
17621ce9e605STiwei Bie 	unsigned int i;
17631ce9e605STiwei Bie 	void *buf;
17641ce9e605STiwei Bie 
17651ce9e605STiwei Bie 	START_USE(vq);
17661ce9e605STiwei Bie 
17671ce9e605STiwei Bie 	for (i = 0; i < vq->packed.vring.num; i++) {
17681ce9e605STiwei Bie 		if (!vq->packed.desc_state[i].data)
17691ce9e605STiwei Bie 			continue;
17701ce9e605STiwei Bie 		/* detach_buf clears data, so grab it now. */
17711ce9e605STiwei Bie 		buf = vq->packed.desc_state[i].data;
17721ce9e605STiwei Bie 		detach_buf_packed(vq, i, NULL);
17731ce9e605STiwei Bie 		END_USE(vq);
17741ce9e605STiwei Bie 		return buf;
17751ce9e605STiwei Bie 	}
17761ce9e605STiwei Bie 	/* That should have freed everything. */
17771ce9e605STiwei Bie 	BUG_ON(vq->vq.num_free != vq->packed.vring.num);
17781ce9e605STiwei Bie 
17791ce9e605STiwei Bie 	END_USE(vq);
17801ce9e605STiwei Bie 	return NULL;
17811ce9e605STiwei Bie }
17821ce9e605STiwei Bie 
178396ef18a2SXuan Zhuo static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
17845a222421SJason Wang {
17855a222421SJason Wang 	struct vring_desc_extra *desc_extra;
17865a222421SJason Wang 	unsigned int i;
17875a222421SJason Wang 
17885a222421SJason Wang 	desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
17895a222421SJason Wang 				   GFP_KERNEL);
17905a222421SJason Wang 	if (!desc_extra)
17915a222421SJason Wang 		return NULL;
17925a222421SJason Wang 
17935a222421SJason Wang 	memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
17945a222421SJason Wang 
17955a222421SJason Wang 	for (i = 0; i < num - 1; i++)
17965a222421SJason Wang 		desc_extra[i].next = i + 1;
17975a222421SJason Wang 
17985a222421SJason Wang 	return desc_extra;
17995a222421SJason Wang }
18005a222421SJason Wang 
18011ce9e605STiwei Bie static struct virtqueue *vring_create_virtqueue_packed(
18021ce9e605STiwei Bie 	unsigned int index,
18031ce9e605STiwei Bie 	unsigned int num,
18041ce9e605STiwei Bie 	unsigned int vring_align,
18051ce9e605STiwei Bie 	struct virtio_device *vdev,
18061ce9e605STiwei Bie 	bool weak_barriers,
18071ce9e605STiwei Bie 	bool may_reduce_num,
18081ce9e605STiwei Bie 	bool context,
18091ce9e605STiwei Bie 	bool (*notify)(struct virtqueue *),
18101ce9e605STiwei Bie 	void (*callback)(struct virtqueue *),
18111ce9e605STiwei Bie 	const char *name)
18121ce9e605STiwei Bie {
18131ce9e605STiwei Bie 	struct vring_virtqueue *vq;
18141ce9e605STiwei Bie 	struct vring_packed_desc *ring;
18151ce9e605STiwei Bie 	struct vring_packed_desc_event *driver, *device;
18161ce9e605STiwei Bie 	dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
18171ce9e605STiwei Bie 	size_t ring_size_in_bytes, event_size_in_bytes;
18181ce9e605STiwei Bie 
18191ce9e605STiwei Bie 	ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
18201ce9e605STiwei Bie 
18211ce9e605STiwei Bie 	ring = vring_alloc_queue(vdev, ring_size_in_bytes,
18221ce9e605STiwei Bie 				 &ring_dma_addr,
18231ce9e605STiwei Bie 				 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
18241ce9e605STiwei Bie 	if (!ring)
18251ce9e605STiwei Bie 		goto err_ring;
18261ce9e605STiwei Bie 
18271ce9e605STiwei Bie 	event_size_in_bytes = sizeof(struct vring_packed_desc_event);
18281ce9e605STiwei Bie 
18291ce9e605STiwei Bie 	driver = vring_alloc_queue(vdev, event_size_in_bytes,
18301ce9e605STiwei Bie 				   &driver_event_dma_addr,
18311ce9e605STiwei Bie 				   GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
18321ce9e605STiwei Bie 	if (!driver)
18331ce9e605STiwei Bie 		goto err_driver;
18341ce9e605STiwei Bie 
18351ce9e605STiwei Bie 	device = vring_alloc_queue(vdev, event_size_in_bytes,
18361ce9e605STiwei Bie 				   &device_event_dma_addr,
18371ce9e605STiwei Bie 				   GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
18381ce9e605STiwei Bie 	if (!device)
18391ce9e605STiwei Bie 		goto err_device;
18401ce9e605STiwei Bie 
18411ce9e605STiwei Bie 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
18421ce9e605STiwei Bie 	if (!vq)
18431ce9e605STiwei Bie 		goto err_vq;
18441ce9e605STiwei Bie 
18451ce9e605STiwei Bie 	vq->vq.callback = callback;
18461ce9e605STiwei Bie 	vq->vq.vdev = vdev;
18471ce9e605STiwei Bie 	vq->vq.name = name;
18481ce9e605STiwei Bie 	vq->vq.index = index;
18491ce9e605STiwei Bie 	vq->we_own_ring = true;
18501ce9e605STiwei Bie 	vq->notify = notify;
18511ce9e605STiwei Bie 	vq->weak_barriers = weak_barriers;
1852c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
18538b4ec69dSJason Wang 	vq->broken = true;
1854c346dae4SJason Wang #else
1855c346dae4SJason Wang 	vq->broken = false;
1856c346dae4SJason Wang #endif
18571ce9e605STiwei Bie 	vq->packed_ring = true;
18581ce9e605STiwei Bie 	vq->use_dma_api = vring_use_dma_api(vdev);
18591ce9e605STiwei Bie 
18601ce9e605STiwei Bie 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
18611ce9e605STiwei Bie 		!context;
18621ce9e605STiwei Bie 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
18631ce9e605STiwei Bie 
186445383fb0STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
186545383fb0STiwei Bie 		vq->weak_barriers = false;
186645383fb0STiwei Bie 
18671ce9e605STiwei Bie 	vq->packed.ring_dma_addr = ring_dma_addr;
18681ce9e605STiwei Bie 	vq->packed.driver_event_dma_addr = driver_event_dma_addr;
18691ce9e605STiwei Bie 	vq->packed.device_event_dma_addr = device_event_dma_addr;
18701ce9e605STiwei Bie 
18711ce9e605STiwei Bie 	vq->packed.ring_size_in_bytes = ring_size_in_bytes;
18721ce9e605STiwei Bie 	vq->packed.event_size_in_bytes = event_size_in_bytes;
18731ce9e605STiwei Bie 
18741ce9e605STiwei Bie 	vq->packed.vring.num = num;
18751ce9e605STiwei Bie 	vq->packed.vring.desc = ring;
18761ce9e605STiwei Bie 	vq->packed.vring.driver = driver;
18771ce9e605STiwei Bie 	vq->packed.vring.device = device;
18781ce9e605STiwei Bie 
18791ce9e605STiwei Bie 	vq->packed.next_avail_idx = 0;
18801ce9e605STiwei Bie 	vq->packed.avail_wrap_counter = 1;
18811ce9e605STiwei Bie 	vq->packed.event_flags_shadow = 0;
18821ce9e605STiwei Bie 	vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
18831ce9e605STiwei Bie 
18841ce9e605STiwei Bie 	vq->packed.desc_state = kmalloc_array(num,
18851ce9e605STiwei Bie 			sizeof(struct vring_desc_state_packed),
18861ce9e605STiwei Bie 			GFP_KERNEL);
18871ce9e605STiwei Bie 	if (!vq->packed.desc_state)
18881ce9e605STiwei Bie 		goto err_desc_state;
18891ce9e605STiwei Bie 
18901ce9e605STiwei Bie 	memset(vq->packed.desc_state, 0,
18911ce9e605STiwei Bie 		num * sizeof(struct vring_desc_state_packed));
18921ce9e605STiwei Bie 
18931ce9e605STiwei Bie 	/* Put everything in free lists. */
18941ce9e605STiwei Bie 	vq->free_head = 0;
18951ce9e605STiwei Bie 
189696ef18a2SXuan Zhuo 	vq->packed.desc_extra = vring_alloc_desc_extra(num);
18971ce9e605STiwei Bie 	if (!vq->packed.desc_extra)
18981ce9e605STiwei Bie 		goto err_desc_extra;
18991ce9e605STiwei Bie 
19001ce9e605STiwei Bie 	/* No callback?  Tell other side not to bother us. */
19011ce9e605STiwei Bie 	if (!callback) {
19021ce9e605STiwei Bie 		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
19031ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
19041ce9e605STiwei Bie 			cpu_to_le16(vq->packed.event_flags_shadow);
19051ce9e605STiwei Bie 	}
19061ce9e605STiwei Bie 
19073a897128SXuan Zhuo 	virtqueue_init(vq, num);
19083a897128SXuan Zhuo 
19090e566c8fSParav Pandit 	spin_lock(&vdev->vqs_list_lock);
1910e152d8afSDan Carpenter 	list_add_tail(&vq->vq.list, &vdev->vqs);
19110e566c8fSParav Pandit 	spin_unlock(&vdev->vqs_list_lock);
19121ce9e605STiwei Bie 	return &vq->vq;
19131ce9e605STiwei Bie 
19141ce9e605STiwei Bie err_desc_extra:
19151ce9e605STiwei Bie 	kfree(vq->packed.desc_state);
19161ce9e605STiwei Bie err_desc_state:
19171ce9e605STiwei Bie 	kfree(vq);
19181ce9e605STiwei Bie err_vq:
1919ae93d8eaSDan Carpenter 	vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
19201ce9e605STiwei Bie err_device:
1921ae93d8eaSDan Carpenter 	vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
19221ce9e605STiwei Bie err_driver:
19231ce9e605STiwei Bie 	vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
19241ce9e605STiwei Bie err_ring:
19251ce9e605STiwei Bie 	return NULL;
19261ce9e605STiwei Bie }
19271ce9e605STiwei Bie 
19281ce9e605STiwei Bie 
19291ce9e605STiwei Bie /*
1930e6f633e5STiwei Bie  * Generic functions and exported symbols.
1931e6f633e5STiwei Bie  */
1932e6f633e5STiwei Bie 
1933e6f633e5STiwei Bie static inline int virtqueue_add(struct virtqueue *_vq,
1934e6f633e5STiwei Bie 				struct scatterlist *sgs[],
1935e6f633e5STiwei Bie 				unsigned int total_sg,
1936e6f633e5STiwei Bie 				unsigned int out_sgs,
1937e6f633e5STiwei Bie 				unsigned int in_sgs,
1938e6f633e5STiwei Bie 				void *data,
1939e6f633e5STiwei Bie 				void *ctx,
1940e6f633e5STiwei Bie 				gfp_t gfp)
1941e6f633e5STiwei Bie {
19421ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
19431ce9e605STiwei Bie 
19441ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
19451ce9e605STiwei Bie 					out_sgs, in_sgs, data, ctx, gfp) :
19461ce9e605STiwei Bie 				 virtqueue_add_split(_vq, sgs, total_sg,
1947e6f633e5STiwei Bie 					out_sgs, in_sgs, data, ctx, gfp);
1948e6f633e5STiwei Bie }
1949e6f633e5STiwei Bie 
1950e6f633e5STiwei Bie /**
1951e6f633e5STiwei Bie  * virtqueue_add_sgs - expose buffers to other end
1952a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
1953e6f633e5STiwei Bie  * @sgs: array of terminated scatterlists.
1954a5581206SJiang Biao  * @out_sgs: the number of scatterlists readable by other side
1955a5581206SJiang Biao  * @in_sgs: the number of scatterlists which are writable (after readable ones)
1956e6f633e5STiwei Bie  * @data: the token identifying the buffer.
1957e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
1958e6f633e5STiwei Bie  *
1959e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
1960e6f633e5STiwei Bie  * at the same time (except where noted).
1961e6f633e5STiwei Bie  *
1962e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1963e6f633e5STiwei Bie  */
1964e6f633e5STiwei Bie int virtqueue_add_sgs(struct virtqueue *_vq,
1965e6f633e5STiwei Bie 		      struct scatterlist *sgs[],
1966e6f633e5STiwei Bie 		      unsigned int out_sgs,
1967e6f633e5STiwei Bie 		      unsigned int in_sgs,
1968e6f633e5STiwei Bie 		      void *data,
1969e6f633e5STiwei Bie 		      gfp_t gfp)
1970e6f633e5STiwei Bie {
1971e6f633e5STiwei Bie 	unsigned int i, total_sg = 0;
1972e6f633e5STiwei Bie 
1973e6f633e5STiwei Bie 	/* Count them first. */
1974e6f633e5STiwei Bie 	for (i = 0; i < out_sgs + in_sgs; i++) {
1975e6f633e5STiwei Bie 		struct scatterlist *sg;
1976e6f633e5STiwei Bie 
1977e6f633e5STiwei Bie 		for (sg = sgs[i]; sg; sg = sg_next(sg))
1978e6f633e5STiwei Bie 			total_sg++;
1979e6f633e5STiwei Bie 	}
1980e6f633e5STiwei Bie 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1981e6f633e5STiwei Bie 			     data, NULL, gfp);
1982e6f633e5STiwei Bie }
1983e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1984e6f633e5STiwei Bie 
1985e6f633e5STiwei Bie /**
1986e6f633e5STiwei Bie  * virtqueue_add_outbuf - expose output buffers to other end
1987e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
1988e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
1989e6f633e5STiwei Bie  * @num: the number of entries in @sg readable by other side
1990e6f633e5STiwei Bie  * @data: the token identifying the buffer.
1991e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
1992e6f633e5STiwei Bie  *
1993e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
1994e6f633e5STiwei Bie  * at the same time (except where noted).
1995e6f633e5STiwei Bie  *
1996e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1997e6f633e5STiwei Bie  */
1998e6f633e5STiwei Bie int virtqueue_add_outbuf(struct virtqueue *vq,
1999e6f633e5STiwei Bie 			 struct scatterlist *sg, unsigned int num,
2000e6f633e5STiwei Bie 			 void *data,
2001e6f633e5STiwei Bie 			 gfp_t gfp)
2002e6f633e5STiwei Bie {
2003e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
2004e6f633e5STiwei Bie }
2005e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
2006e6f633e5STiwei Bie 
2007e6f633e5STiwei Bie /**
2008e6f633e5STiwei Bie  * virtqueue_add_inbuf - expose input buffers to other end
2009e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
2010e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
2011e6f633e5STiwei Bie  * @num: the number of entries in @sg writable by other side
2012e6f633e5STiwei Bie  * @data: the token identifying the buffer.
2013e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
2014e6f633e5STiwei Bie  *
2015e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
2016e6f633e5STiwei Bie  * at the same time (except where noted).
2017e6f633e5STiwei Bie  *
2018e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2019e6f633e5STiwei Bie  */
2020e6f633e5STiwei Bie int virtqueue_add_inbuf(struct virtqueue *vq,
2021e6f633e5STiwei Bie 			struct scatterlist *sg, unsigned int num,
2022e6f633e5STiwei Bie 			void *data,
2023e6f633e5STiwei Bie 			gfp_t gfp)
2024e6f633e5STiwei Bie {
2025e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
2026e6f633e5STiwei Bie }
2027e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
2028e6f633e5STiwei Bie 
2029e6f633e5STiwei Bie /**
2030e6f633e5STiwei Bie  * virtqueue_add_inbuf_ctx - expose input buffers to other end
2031e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
2032e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
2033e6f633e5STiwei Bie  * @num: the number of entries in @sg writable by other side
2034e6f633e5STiwei Bie  * @data: the token identifying the buffer.
2035e6f633e5STiwei Bie  * @ctx: extra context for the token
2036e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
2037e6f633e5STiwei Bie  *
2038e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
2039e6f633e5STiwei Bie  * at the same time (except where noted).
2040e6f633e5STiwei Bie  *
2041e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2042e6f633e5STiwei Bie  */
2043e6f633e5STiwei Bie int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
2044e6f633e5STiwei Bie 			struct scatterlist *sg, unsigned int num,
2045e6f633e5STiwei Bie 			void *data,
2046e6f633e5STiwei Bie 			void *ctx,
2047e6f633e5STiwei Bie 			gfp_t gfp)
2048e6f633e5STiwei Bie {
2049e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
2050e6f633e5STiwei Bie }
2051e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
2052e6f633e5STiwei Bie 
2053e6f633e5STiwei Bie /**
2054e6f633e5STiwei Bie  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
2055a5581206SJiang Biao  * @_vq: the struct virtqueue
2056e6f633e5STiwei Bie  *
2057e6f633e5STiwei Bie  * Instead of virtqueue_kick(), you can do:
2058e6f633e5STiwei Bie  *	if (virtqueue_kick_prepare(vq))
2059e6f633e5STiwei Bie  *		virtqueue_notify(vq);
2060e6f633e5STiwei Bie  *
2061e6f633e5STiwei Bie  * This is sometimes useful because the virtqueue_kick_prepare() needs
2062e6f633e5STiwei Bie  * to be serialized, but the actual virtqueue_notify() call does not.
2063e6f633e5STiwei Bie  */
2064e6f633e5STiwei Bie bool virtqueue_kick_prepare(struct virtqueue *_vq)
2065e6f633e5STiwei Bie {
20661ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
20671ce9e605STiwei Bie 
20681ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
20691ce9e605STiwei Bie 				 virtqueue_kick_prepare_split(_vq);
2070e6f633e5STiwei Bie }
2071e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
2072e6f633e5STiwei Bie 
2073e6f633e5STiwei Bie /**
2074e6f633e5STiwei Bie  * virtqueue_notify - second half of split virtqueue_kick call.
2075a5581206SJiang Biao  * @_vq: the struct virtqueue
2076e6f633e5STiwei Bie  *
2077e6f633e5STiwei Bie  * This does not need to be serialized.
2078e6f633e5STiwei Bie  *
2079e6f633e5STiwei Bie  * Returns false if host notify failed or queue is broken, otherwise true.
2080e6f633e5STiwei Bie  */
2081e6f633e5STiwei Bie bool virtqueue_notify(struct virtqueue *_vq)
2082e6f633e5STiwei Bie {
2083e6f633e5STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
2084e6f633e5STiwei Bie 
2085e6f633e5STiwei Bie 	if (unlikely(vq->broken))
2086e6f633e5STiwei Bie 		return false;
2087e6f633e5STiwei Bie 
2088e6f633e5STiwei Bie 	/* Prod other side to tell it about changes. */
2089e6f633e5STiwei Bie 	if (!vq->notify(_vq)) {
2090e6f633e5STiwei Bie 		vq->broken = true;
2091e6f633e5STiwei Bie 		return false;
2092e6f633e5STiwei Bie 	}
2093e6f633e5STiwei Bie 	return true;
2094e6f633e5STiwei Bie }
2095e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_notify);
2096e6f633e5STiwei Bie 
2097e6f633e5STiwei Bie /**
2098e6f633e5STiwei Bie  * virtqueue_kick - update after add_buf
2099e6f633e5STiwei Bie  * @vq: the struct virtqueue
2100e6f633e5STiwei Bie  *
2101e6f633e5STiwei Bie  * After one or more virtqueue_add_* calls, invoke this to kick
2102e6f633e5STiwei Bie  * the other side.
2103e6f633e5STiwei Bie  *
2104e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2105e6f633e5STiwei Bie  * operations at the same time (except where noted).
2106e6f633e5STiwei Bie  *
2107e6f633e5STiwei Bie  * Returns false if kick failed, otherwise true.
2108e6f633e5STiwei Bie  */
2109e6f633e5STiwei Bie bool virtqueue_kick(struct virtqueue *vq)
2110e6f633e5STiwei Bie {
2111e6f633e5STiwei Bie 	if (virtqueue_kick_prepare(vq))
2112e6f633e5STiwei Bie 		return virtqueue_notify(vq);
2113e6f633e5STiwei Bie 	return true;
2114e6f633e5STiwei Bie }
2115e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick);
2116e6f633e5STiwei Bie 
2117e6f633e5STiwei Bie /**
211831c11db6SYang Li  * virtqueue_get_buf_ctx - get the next used buffer
2119a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2120e6f633e5STiwei Bie  * @len: the length written into the buffer
2121a5581206SJiang Biao  * @ctx: extra context for the token
2122e6f633e5STiwei Bie  *
2123e6f633e5STiwei Bie  * If the device wrote data into the buffer, @len will be set to the
2124e6f633e5STiwei Bie  * amount written.  This means you don't need to clear the buffer
2125e6f633e5STiwei Bie  * beforehand to ensure there's no data leakage in the case of short
2126e6f633e5STiwei Bie  * writes.
2127e6f633e5STiwei Bie  *
2128e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2129e6f633e5STiwei Bie  * operations at the same time (except where noted).
2130e6f633e5STiwei Bie  *
2131e6f633e5STiwei Bie  * Returns NULL if there are no used buffers, or the "data" token
2132e6f633e5STiwei Bie  * handed to virtqueue_add_*().
2133e6f633e5STiwei Bie  */
2134e6f633e5STiwei Bie void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
2135e6f633e5STiwei Bie 			    void **ctx)
2136e6f633e5STiwei Bie {
21371ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
21381ce9e605STiwei Bie 
21391ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
21401ce9e605STiwei Bie 				 virtqueue_get_buf_ctx_split(_vq, len, ctx);
2141e6f633e5STiwei Bie }
2142e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
2143e6f633e5STiwei Bie 
2144e6f633e5STiwei Bie void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
2145e6f633e5STiwei Bie {
2146e6f633e5STiwei Bie 	return virtqueue_get_buf_ctx(_vq, len, NULL);
2147e6f633e5STiwei Bie }
2148e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf);
2149e6f633e5STiwei Bie /**
2150e6f633e5STiwei Bie  * virtqueue_disable_cb - disable callbacks
2151a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2152e6f633e5STiwei Bie  *
2153e6f633e5STiwei Bie  * Note that this is not necessarily synchronous, hence unreliable and only
2154e6f633e5STiwei Bie  * useful as an optimization.
2155e6f633e5STiwei Bie  *
2156e6f633e5STiwei Bie  * Unlike other operations, this need not be serialized.
2157e6f633e5STiwei Bie  */
2158e6f633e5STiwei Bie void virtqueue_disable_cb(struct virtqueue *_vq)
2159e6f633e5STiwei Bie {
21601ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
21611ce9e605STiwei Bie 
21628d622d21SMichael S. Tsirkin 	/* If device triggered an event already it won't trigger one again:
21638d622d21SMichael S. Tsirkin 	 * no need to disable.
21648d622d21SMichael S. Tsirkin 	 */
21658d622d21SMichael S. Tsirkin 	if (vq->event_triggered)
21668d622d21SMichael S. Tsirkin 		return;
21678d622d21SMichael S. Tsirkin 
21681ce9e605STiwei Bie 	if (vq->packed_ring)
21691ce9e605STiwei Bie 		virtqueue_disable_cb_packed(_vq);
21701ce9e605STiwei Bie 	else
2171e6f633e5STiwei Bie 		virtqueue_disable_cb_split(_vq);
2172e6f633e5STiwei Bie }
2173e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
2174e6f633e5STiwei Bie 
2175e6f633e5STiwei Bie /**
2176e6f633e5STiwei Bie  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2177a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2178e6f633e5STiwei Bie  *
2179e6f633e5STiwei Bie  * This re-enables callbacks; it returns current queue state
2180e6f633e5STiwei Bie  * in an opaque unsigned value. This value should be later tested by
2181e6f633e5STiwei Bie  * virtqueue_poll, to detect a possible race between the driver checking for
2182e6f633e5STiwei Bie  * more work, and enabling callbacks.
2183e6f633e5STiwei Bie  *
2184e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2185e6f633e5STiwei Bie  * operations at the same time (except where noted).
2186e6f633e5STiwei Bie  */
218731532340SSolomon Tan unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
2188e6f633e5STiwei Bie {
21891ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
21901ce9e605STiwei Bie 
21918d622d21SMichael S. Tsirkin 	if (vq->event_triggered)
21928d622d21SMichael S. Tsirkin 		vq->event_triggered = false;
21938d622d21SMichael S. Tsirkin 
21941ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
21951ce9e605STiwei Bie 				 virtqueue_enable_cb_prepare_split(_vq);
2196e6f633e5STiwei Bie }
2197e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
2198e6f633e5STiwei Bie 
2199e6f633e5STiwei Bie /**
2200e6f633e5STiwei Bie  * virtqueue_poll - query pending used buffers
2201a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2202e6f633e5STiwei Bie  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
2203e6f633e5STiwei Bie  *
2204e6f633e5STiwei Bie  * Returns "true" if there are pending used buffers in the queue.
2205e6f633e5STiwei Bie  *
2206e6f633e5STiwei Bie  * This does not need to be serialized.
2207e6f633e5STiwei Bie  */
220831532340SSolomon Tan bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
2209e6f633e5STiwei Bie {
2210e6f633e5STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
2211e6f633e5STiwei Bie 
2212481a0d74SMao Wenan 	if (unlikely(vq->broken))
2213481a0d74SMao Wenan 		return false;
2214481a0d74SMao Wenan 
2215e6f633e5STiwei Bie 	virtio_mb(vq->weak_barriers);
22161ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
22171ce9e605STiwei Bie 				 virtqueue_poll_split(_vq, last_used_idx);
2218e6f633e5STiwei Bie }
2219e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_poll);
2220e6f633e5STiwei Bie 
2221e6f633e5STiwei Bie /**
2222e6f633e5STiwei Bie  * virtqueue_enable_cb - restart callbacks after disable_cb.
2223a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2224e6f633e5STiwei Bie  *
2225e6f633e5STiwei Bie  * This re-enables callbacks; it returns "false" if there are pending
2226e6f633e5STiwei Bie  * buffers in the queue, to detect a possible race between the driver
2227e6f633e5STiwei Bie  * checking for more work, and enabling callbacks.
2228e6f633e5STiwei Bie  *
2229e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2230e6f633e5STiwei Bie  * operations at the same time (except where noted).
2231e6f633e5STiwei Bie  */
2232e6f633e5STiwei Bie bool virtqueue_enable_cb(struct virtqueue *_vq)
2233e6f633e5STiwei Bie {
223431532340SSolomon Tan 	unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq);
2235e6f633e5STiwei Bie 
2236e6f633e5STiwei Bie 	return !virtqueue_poll(_vq, last_used_idx);
2237e6f633e5STiwei Bie }
2238e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2239e6f633e5STiwei Bie 
2240e6f633e5STiwei Bie /**
2241e6f633e5STiwei Bie  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2242a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2243e6f633e5STiwei Bie  *
2244e6f633e5STiwei Bie  * This re-enables callbacks but hints to the other side to delay
2245e6f633e5STiwei Bie  * interrupts until most of the available buffers have been processed;
2246e6f633e5STiwei Bie  * it returns "false" if there are many pending buffers in the queue,
2247e6f633e5STiwei Bie  * to detect a possible race between the driver checking for more work,
2248e6f633e5STiwei Bie  * and enabling callbacks.
2249e6f633e5STiwei Bie  *
2250e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2251e6f633e5STiwei Bie  * operations at the same time (except where noted).
2252e6f633e5STiwei Bie  */
2253e6f633e5STiwei Bie bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2254e6f633e5STiwei Bie {
22551ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
22561ce9e605STiwei Bie 
22578d622d21SMichael S. Tsirkin 	if (vq->event_triggered)
22588d622d21SMichael S. Tsirkin 		vq->event_triggered = false;
22598d622d21SMichael S. Tsirkin 
22601ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
22611ce9e605STiwei Bie 				 virtqueue_enable_cb_delayed_split(_vq);
2262e6f633e5STiwei Bie }
2263e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2264e6f633e5STiwei Bie 
2265138fd251STiwei Bie /**
2266138fd251STiwei Bie  * virtqueue_detach_unused_buf - detach first unused buffer
2267a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2268138fd251STiwei Bie  *
2269138fd251STiwei Bie  * Returns NULL or the "data" token handed to virtqueue_add_*().
2270a62eecb3SXuan Zhuo  * This is not valid on an active queue; it is useful for device
2271a62eecb3SXuan Zhuo  * shutdown or the reset queue.
2272138fd251STiwei Bie  */
2273138fd251STiwei Bie void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2274138fd251STiwei Bie {
22751ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
22761ce9e605STiwei Bie 
22771ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
22781ce9e605STiwei Bie 				 virtqueue_detach_unused_buf_split(_vq);
2279138fd251STiwei Bie }
22807c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2281c021eac4SShirley Ma 
2282138fd251STiwei Bie static inline bool more_used(const struct vring_virtqueue *vq)
2283138fd251STiwei Bie {
22841ce9e605STiwei Bie 	return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2285138fd251STiwei Bie }
2286138fd251STiwei Bie 
22870a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq)
22880a8a69ddSRusty Russell {
22890a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
22900a8a69ddSRusty Russell 
22910a8a69ddSRusty Russell 	if (!more_used(vq)) {
22920a8a69ddSRusty Russell 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
22930a8a69ddSRusty Russell 		return IRQ_NONE;
22940a8a69ddSRusty Russell 	}
22950a8a69ddSRusty Russell 
22968b4ec69dSJason Wang 	if (unlikely(vq->broken)) {
2297c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
22988b4ec69dSJason Wang 		dev_warn_once(&vq->vq.vdev->dev,
22998b4ec69dSJason Wang 			      "virtio vring IRQ raised before DRIVER_OK");
23008b4ec69dSJason Wang 		return IRQ_NONE;
2301c346dae4SJason Wang #else
2302c346dae4SJason Wang 		return IRQ_HANDLED;
2303c346dae4SJason Wang #endif
23048b4ec69dSJason Wang 	}
23050a8a69ddSRusty Russell 
23068d622d21SMichael S. Tsirkin 	/* Just a hint for performance: so it's ok that this can be racy! */
23078d622d21SMichael S. Tsirkin 	if (vq->event)
23088d622d21SMichael S. Tsirkin 		vq->event_triggered = true;
23098d622d21SMichael S. Tsirkin 
23100a8a69ddSRusty Russell 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
231118445c4dSRusty Russell 	if (vq->vq.callback)
231218445c4dSRusty Russell 		vq->vq.callback(&vq->vq);
23130a8a69ddSRusty Russell 
23140a8a69ddSRusty Russell 	return IRQ_HANDLED;
23150a8a69ddSRusty Russell }
2316c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt);
23170a8a69ddSRusty Russell 
23181ce9e605STiwei Bie /* Only available for split ring */
231907d9629dSXuan Zhuo static struct virtqueue *__vring_new_virtqueue(unsigned int index,
2320cd4c812aSXuan Zhuo 					       struct vring_virtqueue_split *vring_split,
23210a8a69ddSRusty Russell 					       struct virtio_device *vdev,
23227b21e34fSRusty Russell 					       bool weak_barriers,
2323f94682ddSMichael S. Tsirkin 					       bool context,
232446f9c2b9SHeinz Graalfs 					       bool (*notify)(struct virtqueue *),
23259499f5e7SRusty Russell 					       void (*callback)(struct virtqueue *),
23269499f5e7SRusty Russell 					       const char *name)
23270a8a69ddSRusty Russell {
23282a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq;
2329a2b36c8dSXuan Zhuo 	int err;
23300a8a69ddSRusty Russell 
23311ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
23321ce9e605STiwei Bie 		return NULL;
23331ce9e605STiwei Bie 
2334cbeedb72STiwei Bie 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
23350a8a69ddSRusty Russell 	if (!vq)
23360a8a69ddSRusty Russell 		return NULL;
23370a8a69ddSRusty Russell 
23381ce9e605STiwei Bie 	vq->packed_ring = false;
23390a8a69ddSRusty Russell 	vq->vq.callback = callback;
23400a8a69ddSRusty Russell 	vq->vq.vdev = vdev;
23419499f5e7SRusty Russell 	vq->vq.name = name;
234206ca287dSRusty Russell 	vq->vq.index = index;
23432a2d1382SAndy Lutomirski 	vq->we_own_ring = false;
23440a8a69ddSRusty Russell 	vq->notify = notify;
23457b21e34fSRusty Russell 	vq->weak_barriers = weak_barriers;
2346c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
23478b4ec69dSJason Wang 	vq->broken = true;
2348c346dae4SJason Wang #else
2349c346dae4SJason Wang 	vq->broken = false;
2350c346dae4SJason Wang #endif
2351fb3fba6bSTiwei Bie 	vq->use_dma_api = vring_use_dma_api(vdev);
23520a8a69ddSRusty Russell 
23535a08b04fSMichael S. Tsirkin 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
23545a08b04fSMichael S. Tsirkin 		!context;
2355a5c262c5SMichael S. Tsirkin 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
23569fa29b9dSMark McLoughlin 
235745383fb0STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
235845383fb0STiwei Bie 		vq->weak_barriers = false;
235945383fb0STiwei Bie 
2360a2b36c8dSXuan Zhuo 	err = vring_alloc_state_extra_split(vring_split);
2361a2b36c8dSXuan Zhuo 	if (err) {
2362a2b36c8dSXuan Zhuo 		kfree(vq);
2363a2b36c8dSXuan Zhuo 		return NULL;
2364a2b36c8dSXuan Zhuo 	}
236572b5e895SJason Wang 
2366198fa7beSXuan Zhuo 	virtqueue_vring_init_split(vring_split, vq);
2367198fa7beSXuan Zhuo 
2368cd4c812aSXuan Zhuo 	virtqueue_init(vq, vring_split->vring.num);
2369e1d6a423SXuan Zhuo 	virtqueue_vring_attach_split(vq, vring_split);
23703a897128SXuan Zhuo 
23710e566c8fSParav Pandit 	spin_lock(&vdev->vqs_list_lock);
2372e152d8afSDan Carpenter 	list_add_tail(&vq->vq.list, &vdev->vqs);
23730e566c8fSParav Pandit 	spin_unlock(&vdev->vqs_list_lock);
23740a8a69ddSRusty Russell 	return &vq->vq;
23750a8a69ddSRusty Russell }
23762a2d1382SAndy Lutomirski 
23772a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue(
23782a2d1382SAndy Lutomirski 	unsigned int index,
23792a2d1382SAndy Lutomirski 	unsigned int num,
23802a2d1382SAndy Lutomirski 	unsigned int vring_align,
23812a2d1382SAndy Lutomirski 	struct virtio_device *vdev,
23822a2d1382SAndy Lutomirski 	bool weak_barriers,
23832a2d1382SAndy Lutomirski 	bool may_reduce_num,
2384f94682ddSMichael S. Tsirkin 	bool context,
23852a2d1382SAndy Lutomirski 	bool (*notify)(struct virtqueue *),
23862a2d1382SAndy Lutomirski 	void (*callback)(struct virtqueue *),
23872a2d1382SAndy Lutomirski 	const char *name)
23882a2d1382SAndy Lutomirski {
23891ce9e605STiwei Bie 
23901ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
23911ce9e605STiwei Bie 		return vring_create_virtqueue_packed(index, num, vring_align,
23921ce9e605STiwei Bie 				vdev, weak_barriers, may_reduce_num,
23931ce9e605STiwei Bie 				context, notify, callback, name);
23941ce9e605STiwei Bie 
2395d79dca75STiwei Bie 	return vring_create_virtqueue_split(index, num, vring_align,
2396d79dca75STiwei Bie 			vdev, weak_barriers, may_reduce_num,
2397d79dca75STiwei Bie 			context, notify, callback, name);
23982a2d1382SAndy Lutomirski }
23992a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(vring_create_virtqueue);
24002a2d1382SAndy Lutomirski 
24011ce9e605STiwei Bie /* Only available for split ring */
24022a2d1382SAndy Lutomirski struct virtqueue *vring_new_virtqueue(unsigned int index,
24032a2d1382SAndy Lutomirski 				      unsigned int num,
24042a2d1382SAndy Lutomirski 				      unsigned int vring_align,
24052a2d1382SAndy Lutomirski 				      struct virtio_device *vdev,
24062a2d1382SAndy Lutomirski 				      bool weak_barriers,
2407f94682ddSMichael S. Tsirkin 				      bool context,
24082a2d1382SAndy Lutomirski 				      void *pages,
24092a2d1382SAndy Lutomirski 				      bool (*notify)(struct virtqueue *vq),
24102a2d1382SAndy Lutomirski 				      void (*callback)(struct virtqueue *vq),
24112a2d1382SAndy Lutomirski 				      const char *name)
24122a2d1382SAndy Lutomirski {
2413cd4c812aSXuan Zhuo 	struct vring_virtqueue_split vring_split = {};
24141ce9e605STiwei Bie 
24151ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
24161ce9e605STiwei Bie 		return NULL;
24171ce9e605STiwei Bie 
2418cd4c812aSXuan Zhuo 	vring_init(&vring_split.vring, num, pages, vring_align);
2419cd4c812aSXuan Zhuo 	return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
2420cd4c812aSXuan Zhuo 				     context, notify, callback, name);
24212a2d1382SAndy Lutomirski }
2422c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue);
24230a8a69ddSRusty Russell 
24243ea19e32SXuan Zhuo static void vring_free(struct virtqueue *_vq)
24250a8a69ddSRusty Russell {
24262a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
24272a2d1382SAndy Lutomirski 
24282a2d1382SAndy Lutomirski 	if (vq->we_own_ring) {
24291ce9e605STiwei Bie 		if (vq->packed_ring) {
24301ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
24311ce9e605STiwei Bie 					 vq->packed.ring_size_in_bytes,
24321ce9e605STiwei Bie 					 vq->packed.vring.desc,
24331ce9e605STiwei Bie 					 vq->packed.ring_dma_addr);
24341ce9e605STiwei Bie 
24351ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
24361ce9e605STiwei Bie 					 vq->packed.event_size_in_bytes,
24371ce9e605STiwei Bie 					 vq->packed.vring.driver,
24381ce9e605STiwei Bie 					 vq->packed.driver_event_dma_addr);
24391ce9e605STiwei Bie 
24401ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
24411ce9e605STiwei Bie 					 vq->packed.event_size_in_bytes,
24421ce9e605STiwei Bie 					 vq->packed.vring.device,
24431ce9e605STiwei Bie 					 vq->packed.device_event_dma_addr);
24441ce9e605STiwei Bie 
24451ce9e605STiwei Bie 			kfree(vq->packed.desc_state);
24461ce9e605STiwei Bie 			kfree(vq->packed.desc_extra);
24471ce9e605STiwei Bie 		} else {
2448d79dca75STiwei Bie 			vring_free_queue(vq->vq.vdev,
2449d79dca75STiwei Bie 					 vq->split.queue_size_in_bytes,
2450d79dca75STiwei Bie 					 vq->split.vring.desc,
2451d79dca75STiwei Bie 					 vq->split.queue_dma_addr);
2452f13f09a1SSuman Anna 		}
2453f13f09a1SSuman Anna 	}
245472b5e895SJason Wang 	if (!vq->packed_ring) {
2455cbeedb72STiwei Bie 		kfree(vq->split.desc_state);
245672b5e895SJason Wang 		kfree(vq->split.desc_extra);
245772b5e895SJason Wang 	}
24583ea19e32SXuan Zhuo }
24593ea19e32SXuan Zhuo 
24603ea19e32SXuan Zhuo void vring_del_virtqueue(struct virtqueue *_vq)
24613ea19e32SXuan Zhuo {
24623ea19e32SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
24633ea19e32SXuan Zhuo 
24643ea19e32SXuan Zhuo 	spin_lock(&vq->vq.vdev->vqs_list_lock);
24653ea19e32SXuan Zhuo 	list_del(&_vq->list);
24663ea19e32SXuan Zhuo 	spin_unlock(&vq->vq.vdev->vqs_list_lock);
24673ea19e32SXuan Zhuo 
24683ea19e32SXuan Zhuo 	vring_free(_vq);
24693ea19e32SXuan Zhuo 
24702a2d1382SAndy Lutomirski 	kfree(vq);
24710a8a69ddSRusty Russell }
2472c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue);
24730a8a69ddSRusty Russell 
2474e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */
2475e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev)
2476e34f8725SRusty Russell {
2477e34f8725SRusty Russell 	unsigned int i;
2478e34f8725SRusty Russell 
2479e34f8725SRusty Russell 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2480e34f8725SRusty Russell 		switch (i) {
24819fa29b9dSMark McLoughlin 		case VIRTIO_RING_F_INDIRECT_DESC:
24829fa29b9dSMark McLoughlin 			break;
2483a5c262c5SMichael S. Tsirkin 		case VIRTIO_RING_F_EVENT_IDX:
2484a5c262c5SMichael S. Tsirkin 			break;
2485747ae34aSMichael S. Tsirkin 		case VIRTIO_F_VERSION_1:
2486747ae34aSMichael S. Tsirkin 			break;
2487321bd212SMichael S. Tsirkin 		case VIRTIO_F_ACCESS_PLATFORM:
24881a937693SMichael S. Tsirkin 			break;
2489f959a128STiwei Bie 		case VIRTIO_F_RING_PACKED:
2490f959a128STiwei Bie 			break;
249145383fb0STiwei Bie 		case VIRTIO_F_ORDER_PLATFORM:
249245383fb0STiwei Bie 			break;
2493e34f8725SRusty Russell 		default:
2494e34f8725SRusty Russell 			/* We don't understand this bit. */
2495e16e12beSMichael S. Tsirkin 			__virtio_clear_bit(vdev, i);
2496e34f8725SRusty Russell 		}
2497e34f8725SRusty Russell 	}
2498e34f8725SRusty Russell }
2499e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features);
2500e34f8725SRusty Russell 
25015dfc1762SRusty Russell /**
25025dfc1762SRusty Russell  * virtqueue_get_vring_size - return the size of the virtqueue's vring
2503a5581206SJiang Biao  * @_vq: the struct virtqueue containing the vring of interest.
25045dfc1762SRusty Russell  *
25055dfc1762SRusty Russell  * Returns the size of the vring.  This is mainly used for boasting to
25065dfc1762SRusty Russell  * userspace.  Unlike other operations, this need not be serialized.
25075dfc1762SRusty Russell  */
25088f9f4668SRick Jones unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
25098f9f4668SRick Jones {
25108f9f4668SRick Jones 
25118f9f4668SRick Jones 	struct vring_virtqueue *vq = to_vvq(_vq);
25128f9f4668SRick Jones 
25131ce9e605STiwei Bie 	return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
25148f9f4668SRick Jones }
25158f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
25168f9f4668SRick Jones 
2517b3b32c94SHeinz Graalfs bool virtqueue_is_broken(struct virtqueue *_vq)
2518b3b32c94SHeinz Graalfs {
2519b3b32c94SHeinz Graalfs 	struct vring_virtqueue *vq = to_vvq(_vq);
2520b3b32c94SHeinz Graalfs 
252160f07798SParav Pandit 	return READ_ONCE(vq->broken);
2522b3b32c94SHeinz Graalfs }
2523b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2524b3b32c94SHeinz Graalfs 
2525e2dcdfe9SRusty Russell /*
2526e2dcdfe9SRusty Russell  * This should prevent the device from being used, allowing drivers to
2527e2dcdfe9SRusty Russell  * recover.  You may need to grab appropriate locks to flush.
2528e2dcdfe9SRusty Russell  */
2529e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev)
2530e2dcdfe9SRusty Russell {
2531e2dcdfe9SRusty Russell 	struct virtqueue *_vq;
2532e2dcdfe9SRusty Russell 
25330e566c8fSParav Pandit 	spin_lock(&dev->vqs_list_lock);
2534e2dcdfe9SRusty Russell 	list_for_each_entry(_vq, &dev->vqs, list) {
2535e2dcdfe9SRusty Russell 		struct vring_virtqueue *vq = to_vvq(_vq);
253660f07798SParav Pandit 
253760f07798SParav Pandit 		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
253860f07798SParav Pandit 		WRITE_ONCE(vq->broken, true);
2539e2dcdfe9SRusty Russell 	}
25400e566c8fSParav Pandit 	spin_unlock(&dev->vqs_list_lock);
2541e2dcdfe9SRusty Russell }
2542e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device);
2543e2dcdfe9SRusty Russell 
2544be83f04dSJason Wang /*
2545be83f04dSJason Wang  * This should allow the device to be used by the driver. You may
2546be83f04dSJason Wang  * need to grab appropriate locks to flush the write to
2547be83f04dSJason Wang  * vq->broken. This should only be used in some specific case e.g
2548be83f04dSJason Wang  * (probing and restoring). This function should only be called by the
2549be83f04dSJason Wang  * core, not directly by the driver.
2550be83f04dSJason Wang  */
2551be83f04dSJason Wang void __virtio_unbreak_device(struct virtio_device *dev)
2552be83f04dSJason Wang {
2553be83f04dSJason Wang 	struct virtqueue *_vq;
2554be83f04dSJason Wang 
2555be83f04dSJason Wang 	spin_lock(&dev->vqs_list_lock);
2556be83f04dSJason Wang 	list_for_each_entry(_vq, &dev->vqs, list) {
2557be83f04dSJason Wang 		struct vring_virtqueue *vq = to_vvq(_vq);
2558be83f04dSJason Wang 
2559be83f04dSJason Wang 		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2560be83f04dSJason Wang 		WRITE_ONCE(vq->broken, false);
2561be83f04dSJason Wang 	}
2562be83f04dSJason Wang 	spin_unlock(&dev->vqs_list_lock);
2563be83f04dSJason Wang }
2564be83f04dSJason Wang EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
2565be83f04dSJason Wang 
25662a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
256789062652SCornelia Huck {
256889062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
256989062652SCornelia Huck 
25702a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
257189062652SCornelia Huck 
25721ce9e605STiwei Bie 	if (vq->packed_ring)
25731ce9e605STiwei Bie 		return vq->packed.ring_dma_addr;
25741ce9e605STiwei Bie 
2575d79dca75STiwei Bie 	return vq->split.queue_dma_addr;
25762a2d1382SAndy Lutomirski }
25772a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
25782a2d1382SAndy Lutomirski 
25792a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
258089062652SCornelia Huck {
258189062652SCornelia Huck 	struct vring_virtqueue *vq = to_vvq(_vq);
258289062652SCornelia Huck 
25832a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
25842a2d1382SAndy Lutomirski 
25851ce9e605STiwei Bie 	if (vq->packed_ring)
25861ce9e605STiwei Bie 		return vq->packed.driver_event_dma_addr;
25871ce9e605STiwei Bie 
2588d79dca75STiwei Bie 	return vq->split.queue_dma_addr +
2589e593bf97STiwei Bie 		((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
259089062652SCornelia Huck }
25912a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
25922a2d1382SAndy Lutomirski 
25932a2d1382SAndy Lutomirski dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
25942a2d1382SAndy Lutomirski {
25952a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
25962a2d1382SAndy Lutomirski 
25972a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
25982a2d1382SAndy Lutomirski 
25991ce9e605STiwei Bie 	if (vq->packed_ring)
26001ce9e605STiwei Bie 		return vq->packed.device_event_dma_addr;
26011ce9e605STiwei Bie 
2602d79dca75STiwei Bie 	return vq->split.queue_dma_addr +
2603e593bf97STiwei Bie 		((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
26042a2d1382SAndy Lutomirski }
26052a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
26062a2d1382SAndy Lutomirski 
26071ce9e605STiwei Bie /* Only available for split ring */
26082a2d1382SAndy Lutomirski const struct vring *virtqueue_get_vring(struct virtqueue *vq)
26092a2d1382SAndy Lutomirski {
2610e593bf97STiwei Bie 	return &to_vvq(vq)->split.vring;
26112a2d1382SAndy Lutomirski }
26122a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_vring);
261389062652SCornelia Huck 
2614c6fd4701SRusty Russell MODULE_LICENSE("GPL");
2615