xref: /openbmc/linux/drivers/virtio/virtio_ring.c (revision ca2478a7d974f38d29d27acb42a952c7f168916e)
1fd534e9bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
20a8a69ddSRusty Russell /* Virtio ring implementation.
30a8a69ddSRusty Russell  *
40a8a69ddSRusty Russell  *  Copyright 2007 Rusty Russell IBM Corporation
50a8a69ddSRusty Russell  */
60a8a69ddSRusty Russell #include <linux/virtio.h>
70a8a69ddSRusty Russell #include <linux/virtio_ring.h>
8e34f8725SRusty Russell #include <linux/virtio_config.h>
90a8a69ddSRusty Russell #include <linux/device.h>
105a0e3ad6STejun Heo #include <linux/slab.h>
11b5a2c4f1SPaul Gortmaker #include <linux/module.h>
12e93300b1SRusty Russell #include <linux/hrtimer.h>
13780bc790SAndy Lutomirski #include <linux/dma-mapping.h>
1488938359SAlexander Potapenko #include <linux/kmsan.h>
15f8ce7263SMichael S. Tsirkin #include <linux/spinlock.h>
1678fe3987SAndy Lutomirski #include <xen/xen.h>
170a8a69ddSRusty Russell 
180a8a69ddSRusty Russell #ifdef DEBUG
190a8a69ddSRusty Russell /* For development, we want to crash whenever the ring is screwed. */
209499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
219499f5e7SRusty Russell 	do {							\
229499f5e7SRusty Russell 		dev_err(&(_vq)->vq.vdev->dev,			\
239499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
249499f5e7SRusty Russell 		BUG();						\
259499f5e7SRusty Russell 	} while (0)
26c5f841f1SRusty Russell /* Caller is supposed to guarantee no reentry. */
273a35ce7dSRoel Kluin #define START_USE(_vq)						\
28c5f841f1SRusty Russell 	do {							\
29c5f841f1SRusty Russell 		if ((_vq)->in_use)				\
309499f5e7SRusty Russell 			panic("%s:in_use = %i\n",		\
319499f5e7SRusty Russell 			      (_vq)->vq.name, (_vq)->in_use);	\
32c5f841f1SRusty Russell 		(_vq)->in_use = __LINE__;			\
33c5f841f1SRusty Russell 	} while (0)
343a35ce7dSRoel Kluin #define END_USE(_vq) \
3597a545abSRusty Russell 	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
364d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(_vq)				\
374d6a105eSTiwei Bie 	do {							\
384d6a105eSTiwei Bie 		ktime_t now = ktime_get();			\
394d6a105eSTiwei Bie 								\
404d6a105eSTiwei Bie 		/* No kick or get, with .1 second between?  Warn. */ \
414d6a105eSTiwei Bie 		if ((_vq)->last_add_time_valid)			\
424d6a105eSTiwei Bie 			WARN_ON(ktime_to_ms(ktime_sub(now,	\
434d6a105eSTiwei Bie 				(_vq)->last_add_time)) > 100);	\
444d6a105eSTiwei Bie 		(_vq)->last_add_time = now;			\
454d6a105eSTiwei Bie 		(_vq)->last_add_time_valid = true;		\
464d6a105eSTiwei Bie 	} while (0)
474d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(_vq)				\
484d6a105eSTiwei Bie 	do {							\
494d6a105eSTiwei Bie 		if ((_vq)->last_add_time_valid) {		\
504d6a105eSTiwei Bie 			WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
514d6a105eSTiwei Bie 				      (_vq)->last_add_time)) > 100); \
524d6a105eSTiwei Bie 		}						\
534d6a105eSTiwei Bie 	} while (0)
544d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(_vq)				\
554d6a105eSTiwei Bie 	((_vq)->last_add_time_valid = false)
560a8a69ddSRusty Russell #else
579499f5e7SRusty Russell #define BAD_RING(_vq, fmt, args...)				\
589499f5e7SRusty Russell 	do {							\
599499f5e7SRusty Russell 		dev_err(&_vq->vq.vdev->dev,			\
609499f5e7SRusty Russell 			"%s:"fmt, (_vq)->vq.name, ##args);	\
619499f5e7SRusty Russell 		(_vq)->broken = true;				\
629499f5e7SRusty Russell 	} while (0)
630a8a69ddSRusty Russell #define START_USE(vq)
640a8a69ddSRusty Russell #define END_USE(vq)
654d6a105eSTiwei Bie #define LAST_ADD_TIME_UPDATE(vq)
664d6a105eSTiwei Bie #define LAST_ADD_TIME_CHECK(vq)
674d6a105eSTiwei Bie #define LAST_ADD_TIME_INVALID(vq)
680a8a69ddSRusty Russell #endif
690a8a69ddSRusty Russell 
70cbeedb72STiwei Bie struct vring_desc_state_split {
71780bc790SAndy Lutomirski 	void *data;			/* Data for callback. */
72780bc790SAndy Lutomirski 	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
73780bc790SAndy Lutomirski };
74780bc790SAndy Lutomirski 
751ce9e605STiwei Bie struct vring_desc_state_packed {
761ce9e605STiwei Bie 	void *data;			/* Data for callback. */
771ce9e605STiwei Bie 	struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
781ce9e605STiwei Bie 	u16 num;			/* Descriptor list length. */
791ce9e605STiwei Bie 	u16 last;			/* The last desc state in a list. */
801ce9e605STiwei Bie };
811ce9e605STiwei Bie 
821f28750fSJason Wang struct vring_desc_extra {
83ef5c366fSJason Wang 	dma_addr_t addr;		/* Descriptor DMA addr. */
84ef5c366fSJason Wang 	u32 len;			/* Descriptor length. */
851ce9e605STiwei Bie 	u16 flags;			/* Descriptor flags. */
86aeef9b47SJason Wang 	u16 next;			/* The next desc state in a list. */
871ce9e605STiwei Bie };
881ce9e605STiwei Bie 
89d76136e4SXuan Zhuo struct vring_virtqueue_split {
90d76136e4SXuan Zhuo 	/* Actual memory layout for this queue. */
91d76136e4SXuan Zhuo 	struct vring vring;
92d76136e4SXuan Zhuo 
93d76136e4SXuan Zhuo 	/* Last written value to avail->flags */
94d76136e4SXuan Zhuo 	u16 avail_flags_shadow;
95d76136e4SXuan Zhuo 
96d76136e4SXuan Zhuo 	/*
97d76136e4SXuan Zhuo 	 * Last written value to avail->idx in
98d76136e4SXuan Zhuo 	 * guest byte order.
99d76136e4SXuan Zhuo 	 */
100d76136e4SXuan Zhuo 	u16 avail_idx_shadow;
101d76136e4SXuan Zhuo 
102d76136e4SXuan Zhuo 	/* Per-descriptor state. */
103d76136e4SXuan Zhuo 	struct vring_desc_state_split *desc_state;
104d76136e4SXuan Zhuo 	struct vring_desc_extra *desc_extra;
105d76136e4SXuan Zhuo 
106d76136e4SXuan Zhuo 	/* DMA address and size information */
107d76136e4SXuan Zhuo 	dma_addr_t queue_dma_addr;
108d76136e4SXuan Zhuo 	size_t queue_size_in_bytes;
109af36b16fSXuan Zhuo 
110af36b16fSXuan Zhuo 	/*
111af36b16fSXuan Zhuo 	 * The parameters for creating vrings are reserved for creating new
112af36b16fSXuan Zhuo 	 * vring.
113af36b16fSXuan Zhuo 	 */
114af36b16fSXuan Zhuo 	u32 vring_align;
115af36b16fSXuan Zhuo 	bool may_reduce_num;
116d76136e4SXuan Zhuo };
117d76136e4SXuan Zhuo 
118d76136e4SXuan Zhuo struct vring_virtqueue_packed {
119d76136e4SXuan Zhuo 	/* Actual memory layout for this queue. */
120d76136e4SXuan Zhuo 	struct {
121d76136e4SXuan Zhuo 		unsigned int num;
122d76136e4SXuan Zhuo 		struct vring_packed_desc *desc;
123d76136e4SXuan Zhuo 		struct vring_packed_desc_event *driver;
124d76136e4SXuan Zhuo 		struct vring_packed_desc_event *device;
125d76136e4SXuan Zhuo 	} vring;
126d76136e4SXuan Zhuo 
127d76136e4SXuan Zhuo 	/* Driver ring wrap counter. */
128d76136e4SXuan Zhuo 	bool avail_wrap_counter;
129d76136e4SXuan Zhuo 
130d76136e4SXuan Zhuo 	/* Avail used flags. */
131d76136e4SXuan Zhuo 	u16 avail_used_flags;
132d76136e4SXuan Zhuo 
133d76136e4SXuan Zhuo 	/* Index of the next avail descriptor. */
134d76136e4SXuan Zhuo 	u16 next_avail_idx;
135d76136e4SXuan Zhuo 
136d76136e4SXuan Zhuo 	/*
137d76136e4SXuan Zhuo 	 * Last written value to driver->flags in
138d76136e4SXuan Zhuo 	 * guest byte order.
139d76136e4SXuan Zhuo 	 */
140d76136e4SXuan Zhuo 	u16 event_flags_shadow;
141d76136e4SXuan Zhuo 
142d76136e4SXuan Zhuo 	/* Per-descriptor state. */
143d76136e4SXuan Zhuo 	struct vring_desc_state_packed *desc_state;
144d76136e4SXuan Zhuo 	struct vring_desc_extra *desc_extra;
145d76136e4SXuan Zhuo 
146d76136e4SXuan Zhuo 	/* DMA address and size information */
147d76136e4SXuan Zhuo 	dma_addr_t ring_dma_addr;
148d76136e4SXuan Zhuo 	dma_addr_t driver_event_dma_addr;
149d76136e4SXuan Zhuo 	dma_addr_t device_event_dma_addr;
150d76136e4SXuan Zhuo 	size_t ring_size_in_bytes;
151d76136e4SXuan Zhuo 	size_t event_size_in_bytes;
152d76136e4SXuan Zhuo };
153d76136e4SXuan Zhuo 
15443b4f721SMichael S. Tsirkin struct vring_virtqueue {
1550a8a69ddSRusty Russell 	struct virtqueue vq;
1560a8a69ddSRusty Russell 
1571ce9e605STiwei Bie 	/* Is this a packed ring? */
1581ce9e605STiwei Bie 	bool packed_ring;
1591ce9e605STiwei Bie 
160fb3fba6bSTiwei Bie 	/* Is DMA API used? */
161fb3fba6bSTiwei Bie 	bool use_dma_api;
162fb3fba6bSTiwei Bie 
1637b21e34fSRusty Russell 	/* Can we use weak barriers? */
1647b21e34fSRusty Russell 	bool weak_barriers;
1657b21e34fSRusty Russell 
1660a8a69ddSRusty Russell 	/* Other side has made a mess, don't try any more. */
1670a8a69ddSRusty Russell 	bool broken;
1680a8a69ddSRusty Russell 
1699fa29b9dSMark McLoughlin 	/* Host supports indirect buffers */
1709fa29b9dSMark McLoughlin 	bool indirect;
1719fa29b9dSMark McLoughlin 
172a5c262c5SMichael S. Tsirkin 	/* Host publishes avail event idx */
173a5c262c5SMichael S. Tsirkin 	bool event;
174a5c262c5SMichael S. Tsirkin 
1758daafe9eSXuan Zhuo 	/* Do DMA mapping by driver */
1768daafe9eSXuan Zhuo 	bool premapped;
1778daafe9eSXuan Zhuo 
178b319940fSXuan Zhuo 	/* Do unmap or not for desc. Just when premapped is False and
179b319940fSXuan Zhuo 	 * use_dma_api is true, this is true.
180b319940fSXuan Zhuo 	 */
181b319940fSXuan Zhuo 	bool do_unmap;
182b319940fSXuan Zhuo 
1830a8a69ddSRusty Russell 	/* Head of free buffer list. */
1840a8a69ddSRusty Russell 	unsigned int free_head;
1850a8a69ddSRusty Russell 	/* Number we've added since last sync. */
1860a8a69ddSRusty Russell 	unsigned int num_added;
1870a8a69ddSRusty Russell 
188a7722890Shuangjie.albert 	/* Last used index  we've seen.
189a7722890Shuangjie.albert 	 * for split ring, it just contains last used index
190a7722890Shuangjie.albert 	 * for packed ring:
191a7722890Shuangjie.albert 	 * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index.
192a7722890Shuangjie.albert 	 * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter.
193a7722890Shuangjie.albert 	 */
1941bc4953eSAnthony Liguori 	u16 last_used_idx;
1950a8a69ddSRusty Russell 
1968d622d21SMichael S. Tsirkin 	/* Hint for event idx: already triggered no need to disable. */
1978d622d21SMichael S. Tsirkin 	bool event_triggered;
1988d622d21SMichael S. Tsirkin 
1991ce9e605STiwei Bie 	union {
2001ce9e605STiwei Bie 		/* Available for split ring */
201d76136e4SXuan Zhuo 		struct vring_virtqueue_split split;
202f277ec42SVenkatesh Srinivas 
2031ce9e605STiwei Bie 		/* Available for packed ring */
204d76136e4SXuan Zhuo 		struct vring_virtqueue_packed packed;
2051ce9e605STiwei Bie 	};
2061ce9e605STiwei Bie 
2070a8a69ddSRusty Russell 	/* How to notify other side. FIXME: commonalize hcalls! */
20846f9c2b9SHeinz Graalfs 	bool (*notify)(struct virtqueue *vq);
2090a8a69ddSRusty Russell 
2102a2d1382SAndy Lutomirski 	/* DMA, allocation, and size information */
2112a2d1382SAndy Lutomirski 	bool we_own_ring;
2122a2d1382SAndy Lutomirski 
2132713ea3cSJason Wang 	/* Device used for doing DMA */
2142713ea3cSJason Wang 	struct device *dma_dev;
2152713ea3cSJason Wang 
2160a8a69ddSRusty Russell #ifdef DEBUG
2170a8a69ddSRusty Russell 	/* They're supposed to lock for us. */
2180a8a69ddSRusty Russell 	unsigned int in_use;
219e93300b1SRusty Russell 
220e93300b1SRusty Russell 	/* Figure out if their kicks are too delayed. */
221e93300b1SRusty Russell 	bool last_add_time_valid;
222e93300b1SRusty Russell 	ktime_t last_add_time;
2230a8a69ddSRusty Russell #endif
2240a8a69ddSRusty Russell };
2250a8a69ddSRusty Russell 
22607d9629dSXuan Zhuo static struct virtqueue *__vring_new_virtqueue(unsigned int index,
227cd4c812aSXuan Zhuo 					       struct vring_virtqueue_split *vring_split,
22807d9629dSXuan Zhuo 					       struct virtio_device *vdev,
22907d9629dSXuan Zhuo 					       bool weak_barriers,
23007d9629dSXuan Zhuo 					       bool context,
23107d9629dSXuan Zhuo 					       bool (*notify)(struct virtqueue *),
23207d9629dSXuan Zhuo 					       void (*callback)(struct virtqueue *),
2332713ea3cSJason Wang 					       const char *name,
2342713ea3cSJason Wang 					       struct device *dma_dev);
235a2b36c8dSXuan Zhuo static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num);
2366fea20e5SXuan Zhuo static void vring_free(struct virtqueue *_vq);
237e6f633e5STiwei Bie 
238e6f633e5STiwei Bie /*
239e6f633e5STiwei Bie  * Helpers.
240e6f633e5STiwei Bie  */
241e6f633e5STiwei Bie 
2424b6ec919SFeng Liu #define to_vvq(_vq) container_of_const(_vq, struct vring_virtqueue, vq)
2430a8a69ddSRusty Russell 
virtqueue_use_indirect(const struct vring_virtqueue * vq,unsigned int total_sg)2444b6ec919SFeng Liu static bool virtqueue_use_indirect(const struct vring_virtqueue *vq,
2452f18c2d1STiwei Bie 				   unsigned int total_sg)
2462f18c2d1STiwei Bie {
2472f18c2d1STiwei Bie 	/*
2482f18c2d1STiwei Bie 	 * If the host supports indirect descriptor tables, and we have multiple
2492f18c2d1STiwei Bie 	 * buffers, then go indirect. FIXME: tune this threshold
2502f18c2d1STiwei Bie 	 */
2512f18c2d1STiwei Bie 	return (vq->indirect && total_sg > 1 && vq->vq.num_free);
2522f18c2d1STiwei Bie }
2532f18c2d1STiwei Bie 
254d26c96c8SAndy Lutomirski /*
2551a937693SMichael S. Tsirkin  * Modern virtio devices have feature bits to specify whether they need a
2561a937693SMichael S. Tsirkin  * quirk and bypass the IOMMU. If not there, just use the DMA API.
2571a937693SMichael S. Tsirkin  *
2581a937693SMichael S. Tsirkin  * If there, the interaction between virtio and DMA API is messy.
259d26c96c8SAndy Lutomirski  *
260d26c96c8SAndy Lutomirski  * On most systems with virtio, physical addresses match bus addresses,
261d26c96c8SAndy Lutomirski  * and it doesn't particularly matter whether we use the DMA API.
262d26c96c8SAndy Lutomirski  *
263d26c96c8SAndy Lutomirski  * On some systems, including Xen and any system with a physical device
264d26c96c8SAndy Lutomirski  * that speaks virtio behind a physical IOMMU, we must use the DMA API
265d26c96c8SAndy Lutomirski  * for virtio DMA to work at all.
266d26c96c8SAndy Lutomirski  *
267d26c96c8SAndy Lutomirski  * On other systems, including SPARC and PPC64, virtio-pci devices are
268d26c96c8SAndy Lutomirski  * enumerated as though they are behind an IOMMU, but the virtio host
269d26c96c8SAndy Lutomirski  * ignores the IOMMU, so we must either pretend that the IOMMU isn't
270d26c96c8SAndy Lutomirski  * there or somehow map everything as the identity.
271d26c96c8SAndy Lutomirski  *
272d26c96c8SAndy Lutomirski  * For the time being, we preserve historic behavior and bypass the DMA
273d26c96c8SAndy Lutomirski  * API.
2741a937693SMichael S. Tsirkin  *
2751a937693SMichael S. Tsirkin  * TODO: install a per-device DMA ops structure that does the right thing
2761a937693SMichael S. Tsirkin  * taking into account all the above quirks, and use the DMA API
2771a937693SMichael S. Tsirkin  * unconditionally on data path.
278d26c96c8SAndy Lutomirski  */
279d26c96c8SAndy Lutomirski 
vring_use_dma_api(const struct virtio_device * vdev)2804b6ec919SFeng Liu static bool vring_use_dma_api(const struct virtio_device *vdev)
281d26c96c8SAndy Lutomirski {
28224b6842aSMichael S. Tsirkin 	if (!virtio_has_dma_quirk(vdev))
2831a937693SMichael S. Tsirkin 		return true;
2841a937693SMichael S. Tsirkin 
2851a937693SMichael S. Tsirkin 	/* Otherwise, we are left to guess. */
28678fe3987SAndy Lutomirski 	/*
28778fe3987SAndy Lutomirski 	 * In theory, it's possible to have a buggy QEMU-supposed
28878fe3987SAndy Lutomirski 	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
28978fe3987SAndy Lutomirski 	 * such a configuration, virtio has never worked and will
29078fe3987SAndy Lutomirski 	 * not work without an even larger kludge.  Instead, enable
29178fe3987SAndy Lutomirski 	 * the DMA API if we're a Xen guest, which at least allows
29278fe3987SAndy Lutomirski 	 * all of the sensible Xen configurations to work correctly.
29378fe3987SAndy Lutomirski 	 */
29478fe3987SAndy Lutomirski 	if (xen_domain())
29578fe3987SAndy Lutomirski 		return true;
29678fe3987SAndy Lutomirski 
297d26c96c8SAndy Lutomirski 	return false;
298d26c96c8SAndy Lutomirski }
299d26c96c8SAndy Lutomirski 
virtio_max_dma_size(const struct virtio_device * vdev)3004b6ec919SFeng Liu size_t virtio_max_dma_size(const struct virtio_device *vdev)
301e6d6dd6cSJoerg Roedel {
302e6d6dd6cSJoerg Roedel 	size_t max_segment_size = SIZE_MAX;
303e6d6dd6cSJoerg Roedel 
304e6d6dd6cSJoerg Roedel 	if (vring_use_dma_api(vdev))
305817fc978SWill Deacon 		max_segment_size = dma_max_mapping_size(vdev->dev.parent);
306e6d6dd6cSJoerg Roedel 
307e6d6dd6cSJoerg Roedel 	return max_segment_size;
308e6d6dd6cSJoerg Roedel }
309e6d6dd6cSJoerg Roedel EXPORT_SYMBOL_GPL(virtio_max_dma_size);
310e6d6dd6cSJoerg Roedel 
vring_alloc_queue(struct virtio_device * vdev,size_t size,dma_addr_t * dma_handle,gfp_t flag,struct device * dma_dev)311d79dca75STiwei Bie static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
3122713ea3cSJason Wang 			       dma_addr_t *dma_handle, gfp_t flag,
3132713ea3cSJason Wang 			       struct device *dma_dev)
314d79dca75STiwei Bie {
315d79dca75STiwei Bie 	if (vring_use_dma_api(vdev)) {
3162713ea3cSJason Wang 		return dma_alloc_coherent(dma_dev, size,
317d79dca75STiwei Bie 					  dma_handle, flag);
318d79dca75STiwei Bie 	} else {
319d79dca75STiwei Bie 		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
320d79dca75STiwei Bie 
321d79dca75STiwei Bie 		if (queue) {
322d79dca75STiwei Bie 			phys_addr_t phys_addr = virt_to_phys(queue);
323d79dca75STiwei Bie 			*dma_handle = (dma_addr_t)phys_addr;
324d79dca75STiwei Bie 
325d79dca75STiwei Bie 			/*
326d79dca75STiwei Bie 			 * Sanity check: make sure we dind't truncate
327d79dca75STiwei Bie 			 * the address.  The only arches I can find that
328d79dca75STiwei Bie 			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
329d79dca75STiwei Bie 			 * are certain non-highmem MIPS and x86
330d79dca75STiwei Bie 			 * configurations, but these configurations
331d79dca75STiwei Bie 			 * should never allocate physical pages above 32
332d79dca75STiwei Bie 			 * bits, so this is fine.  Just in case, throw a
333d79dca75STiwei Bie 			 * warning and abort if we end up with an
334d79dca75STiwei Bie 			 * unrepresentable address.
335d79dca75STiwei Bie 			 */
336d79dca75STiwei Bie 			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
337d79dca75STiwei Bie 				free_pages_exact(queue, PAGE_ALIGN(size));
338d79dca75STiwei Bie 				return NULL;
339d79dca75STiwei Bie 			}
340d79dca75STiwei Bie 		}
341d79dca75STiwei Bie 		return queue;
342d79dca75STiwei Bie 	}
343d79dca75STiwei Bie }
344d79dca75STiwei Bie 
vring_free_queue(struct virtio_device * vdev,size_t size,void * queue,dma_addr_t dma_handle,struct device * dma_dev)345d79dca75STiwei Bie static void vring_free_queue(struct virtio_device *vdev, size_t size,
3462713ea3cSJason Wang 			     void *queue, dma_addr_t dma_handle,
3472713ea3cSJason Wang 			     struct device *dma_dev)
348d79dca75STiwei Bie {
349d79dca75STiwei Bie 	if (vring_use_dma_api(vdev))
3502713ea3cSJason Wang 		dma_free_coherent(dma_dev, size, queue, dma_handle);
351d79dca75STiwei Bie 	else
352d79dca75STiwei Bie 		free_pages_exact(queue, PAGE_ALIGN(size));
353d79dca75STiwei Bie }
354d79dca75STiwei Bie 
355780bc790SAndy Lutomirski /*
356780bc790SAndy Lutomirski  * The DMA ops on various arches are rather gnarly right now, and
357780bc790SAndy Lutomirski  * making all of the arch DMA ops work on the vring device itself
3582713ea3cSJason Wang  * is a mess.
359780bc790SAndy Lutomirski  */
vring_dma_dev(const struct vring_virtqueue * vq)3601adbd6b2SFeng Liu static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
361780bc790SAndy Lutomirski {
3622713ea3cSJason Wang 	return vq->dma_dev;
363780bc790SAndy Lutomirski }
364780bc790SAndy Lutomirski 
365780bc790SAndy Lutomirski /* Map one sg entry. */
vring_map_one_sg(const struct vring_virtqueue * vq,struct scatterlist * sg,enum dma_data_direction direction,dma_addr_t * addr)3660e27fa6dSXuan Zhuo static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
3670e27fa6dSXuan Zhuo 			    enum dma_data_direction direction, dma_addr_t *addr)
368780bc790SAndy Lutomirski {
369d7344a2fSXuan Zhuo 	if (vq->premapped) {
370d7344a2fSXuan Zhuo 		*addr = sg_dma_address(sg);
371d7344a2fSXuan Zhuo 		return 0;
372d7344a2fSXuan Zhuo 	}
373d7344a2fSXuan Zhuo 
37488938359SAlexander Potapenko 	if (!vq->use_dma_api) {
37588938359SAlexander Potapenko 		/*
37688938359SAlexander Potapenko 		 * If DMA is not used, KMSAN doesn't know that the scatterlist
37788938359SAlexander Potapenko 		 * is initialized by the hardware. Explicitly check/unpoison it
37888938359SAlexander Potapenko 		 * depending on the direction.
37988938359SAlexander Potapenko 		 */
38088938359SAlexander Potapenko 		kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
3810e27fa6dSXuan Zhuo 		*addr = (dma_addr_t)sg_phys(sg);
3820e27fa6dSXuan Zhuo 		return 0;
38388938359SAlexander Potapenko 	}
384780bc790SAndy Lutomirski 
385780bc790SAndy Lutomirski 	/*
386780bc790SAndy Lutomirski 	 * We can't use dma_map_sg, because we don't use scatterlists in
387780bc790SAndy Lutomirski 	 * the way it expects (we don't guarantee that the scatterlist
388780bc790SAndy Lutomirski 	 * will exist for the lifetime of the mapping).
389780bc790SAndy Lutomirski 	 */
3900e27fa6dSXuan Zhuo 	*addr = dma_map_page(vring_dma_dev(vq),
391780bc790SAndy Lutomirski 			    sg_page(sg), sg->offset, sg->length,
392780bc790SAndy Lutomirski 			    direction);
3930e27fa6dSXuan Zhuo 
3940e27fa6dSXuan Zhuo 	if (dma_mapping_error(vring_dma_dev(vq), *addr))
3950e27fa6dSXuan Zhuo 		return -ENOMEM;
3960e27fa6dSXuan Zhuo 
3970e27fa6dSXuan Zhuo 	return 0;
398780bc790SAndy Lutomirski }
399780bc790SAndy Lutomirski 
vring_map_single(const struct vring_virtqueue * vq,void * cpu_addr,size_t size,enum dma_data_direction direction)400780bc790SAndy Lutomirski static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
401780bc790SAndy Lutomirski 				   void *cpu_addr, size_t size,
402780bc790SAndy Lutomirski 				   enum dma_data_direction direction)
403780bc790SAndy Lutomirski {
404fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
405780bc790SAndy Lutomirski 		return (dma_addr_t)virt_to_phys(cpu_addr);
406780bc790SAndy Lutomirski 
407780bc790SAndy Lutomirski 	return dma_map_single(vring_dma_dev(vq),
408780bc790SAndy Lutomirski 			      cpu_addr, size, direction);
409780bc790SAndy Lutomirski }
410780bc790SAndy Lutomirski 
vring_mapping_error(const struct vring_virtqueue * vq,dma_addr_t addr)411e6f633e5STiwei Bie static int vring_mapping_error(const struct vring_virtqueue *vq,
412e6f633e5STiwei Bie 			       dma_addr_t addr)
413e6f633e5STiwei Bie {
414fb3fba6bSTiwei Bie 	if (!vq->use_dma_api)
415e6f633e5STiwei Bie 		return 0;
416e6f633e5STiwei Bie 
417e6f633e5STiwei Bie 	return dma_mapping_error(vring_dma_dev(vq), addr);
418e6f633e5STiwei Bie }
419e6f633e5STiwei Bie 
virtqueue_init(struct vring_virtqueue * vq,u32 num)4203a897128SXuan Zhuo static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
4213a897128SXuan Zhuo {
4223a897128SXuan Zhuo 	vq->vq.num_free = num;
4233a897128SXuan Zhuo 
4243a897128SXuan Zhuo 	if (vq->packed_ring)
4253a897128SXuan Zhuo 		vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR);
4263a897128SXuan Zhuo 	else
4273a897128SXuan Zhuo 		vq->last_used_idx = 0;
4283a897128SXuan Zhuo 
4293a897128SXuan Zhuo 	vq->event_triggered = false;
4303a897128SXuan Zhuo 	vq->num_added = 0;
4313a897128SXuan Zhuo 
4323a897128SXuan Zhuo #ifdef DEBUG
4333a897128SXuan Zhuo 	vq->in_use = false;
4343a897128SXuan Zhuo 	vq->last_add_time_valid = false;
4353a897128SXuan Zhuo #endif
4363a897128SXuan Zhuo }
4373a897128SXuan Zhuo 
438e6f633e5STiwei Bie 
439e6f633e5STiwei Bie /*
440e6f633e5STiwei Bie  * Split ring specific functions - *_split().
441e6f633e5STiwei Bie  */
442e6f633e5STiwei Bie 
vring_unmap_one_split_indirect(const struct vring_virtqueue * vq,const struct vring_desc * desc)44372b5e895SJason Wang static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
4444b6ec919SFeng Liu 					   const struct vring_desc *desc)
445780bc790SAndy Lutomirski {
446780bc790SAndy Lutomirski 	u16 flags;
447780bc790SAndy Lutomirski 
448b319940fSXuan Zhuo 	if (!vq->do_unmap)
449780bc790SAndy Lutomirski 		return;
450780bc790SAndy Lutomirski 
451780bc790SAndy Lutomirski 	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
452780bc790SAndy Lutomirski 
453780bc790SAndy Lutomirski 	dma_unmap_page(vring_dma_dev(vq),
454780bc790SAndy Lutomirski 		       virtio64_to_cpu(vq->vq.vdev, desc->addr),
455780bc790SAndy Lutomirski 		       virtio32_to_cpu(vq->vq.vdev, desc->len),
456780bc790SAndy Lutomirski 		       (flags & VRING_DESC_F_WRITE) ?
457780bc790SAndy Lutomirski 		       DMA_FROM_DEVICE : DMA_TO_DEVICE);
458780bc790SAndy Lutomirski }
459780bc790SAndy Lutomirski 
vring_unmap_one_split(const struct vring_virtqueue * vq,unsigned int i)46072b5e895SJason Wang static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
46172b5e895SJason Wang 					  unsigned int i)
46272b5e895SJason Wang {
46372b5e895SJason Wang 	struct vring_desc_extra *extra = vq->split.desc_extra;
46472b5e895SJason Wang 	u16 flags;
46572b5e895SJason Wang 
46672b5e895SJason Wang 	flags = extra[i].flags;
46772b5e895SJason Wang 
46872b5e895SJason Wang 	if (flags & VRING_DESC_F_INDIRECT) {
469b319940fSXuan Zhuo 		if (!vq->use_dma_api)
470b319940fSXuan Zhuo 			goto out;
471b319940fSXuan Zhuo 
47272b5e895SJason Wang 		dma_unmap_single(vring_dma_dev(vq),
47372b5e895SJason Wang 				 extra[i].addr,
47472b5e895SJason Wang 				 extra[i].len,
47572b5e895SJason Wang 				 (flags & VRING_DESC_F_WRITE) ?
47672b5e895SJason Wang 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
47772b5e895SJason Wang 	} else {
478b319940fSXuan Zhuo 		if (!vq->do_unmap)
479b319940fSXuan Zhuo 			goto out;
480b319940fSXuan Zhuo 
48172b5e895SJason Wang 		dma_unmap_page(vring_dma_dev(vq),
48272b5e895SJason Wang 			       extra[i].addr,
48372b5e895SJason Wang 			       extra[i].len,
48472b5e895SJason Wang 			       (flags & VRING_DESC_F_WRITE) ?
48572b5e895SJason Wang 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
48672b5e895SJason Wang 	}
48772b5e895SJason Wang 
48872b5e895SJason Wang out:
48972b5e895SJason Wang 	return extra[i].next;
49072b5e895SJason Wang }
49172b5e895SJason Wang 
alloc_indirect_split(struct virtqueue * _vq,unsigned int total_sg,gfp_t gfp)492138fd251STiwei Bie static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
493138fd251STiwei Bie 					       unsigned int total_sg,
494138fd251STiwei Bie 					       gfp_t gfp)
4959fa29b9dSMark McLoughlin {
4969fa29b9dSMark McLoughlin 	struct vring_desc *desc;
497b25bd251SRusty Russell 	unsigned int i;
4989fa29b9dSMark McLoughlin 
499b92b1b89SWill Deacon 	/*
500b92b1b89SWill Deacon 	 * We require lowmem mappings for the descriptors because
501b92b1b89SWill Deacon 	 * otherwise virt_to_phys will give us bogus addresses in the
502b92b1b89SWill Deacon 	 * virtqueue.
503b92b1b89SWill Deacon 	 */
50482107539SMichal Hocko 	gfp &= ~__GFP_HIGHMEM;
505b92b1b89SWill Deacon 
5066da2ec56SKees Cook 	desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
5079fa29b9dSMark McLoughlin 	if (!desc)
508b25bd251SRusty Russell 		return NULL;
5099fa29b9dSMark McLoughlin 
510b25bd251SRusty Russell 	for (i = 0; i < total_sg; i++)
51100e6f3d9SMichael S. Tsirkin 		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
512b25bd251SRusty Russell 	return desc;
5139fa29b9dSMark McLoughlin }
5149fa29b9dSMark McLoughlin 
virtqueue_add_desc_split(struct virtqueue * vq,struct vring_desc * desc,unsigned int i,dma_addr_t addr,unsigned int len,u16 flags,bool indirect)515fe4c3862SJason Wang static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
516fe4c3862SJason Wang 						    struct vring_desc *desc,
517fe4c3862SJason Wang 						    unsigned int i,
518fe4c3862SJason Wang 						    dma_addr_t addr,
519fe4c3862SJason Wang 						    unsigned int len,
52072b5e895SJason Wang 						    u16 flags,
52172b5e895SJason Wang 						    bool indirect)
522fe4c3862SJason Wang {
52372b5e895SJason Wang 	struct vring_virtqueue *vring = to_vvq(vq);
52472b5e895SJason Wang 	struct vring_desc_extra *extra = vring->split.desc_extra;
52572b5e895SJason Wang 	u16 next;
52672b5e895SJason Wang 
527fe4c3862SJason Wang 	desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
528fe4c3862SJason Wang 	desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
529fe4c3862SJason Wang 	desc[i].len = cpu_to_virtio32(vq->vdev, len);
530fe4c3862SJason Wang 
53172b5e895SJason Wang 	if (!indirect) {
53272b5e895SJason Wang 		next = extra[i].next;
53372b5e895SJason Wang 		desc[i].next = cpu_to_virtio16(vq->vdev, next);
53472b5e895SJason Wang 
53572b5e895SJason Wang 		extra[i].addr = addr;
53672b5e895SJason Wang 		extra[i].len = len;
53772b5e895SJason Wang 		extra[i].flags = flags;
53872b5e895SJason Wang 	} else
53972b5e895SJason Wang 		next = virtio16_to_cpu(vq->vdev, desc[i].next);
54072b5e895SJason Wang 
54172b5e895SJason Wang 	return next;
542fe4c3862SJason Wang }
543fe4c3862SJason Wang 
virtqueue_add_split(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int total_sg,unsigned int out_sgs,unsigned int in_sgs,void * data,void * ctx,gfp_t gfp)544138fd251STiwei Bie static inline int virtqueue_add_split(struct virtqueue *_vq,
54513816c76SRusty Russell 				      struct scatterlist *sgs[],
546eeebf9b1SRusty Russell 				      unsigned int total_sg,
54713816c76SRusty Russell 				      unsigned int out_sgs,
54813816c76SRusty Russell 				      unsigned int in_sgs,
549bbd603efSMichael S. Tsirkin 				      void *data,
5505a08b04fSMichael S. Tsirkin 				      void *ctx,
551bbd603efSMichael S. Tsirkin 				      gfp_t gfp)
5520a8a69ddSRusty Russell {
5530a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
55413816c76SRusty Russell 	struct scatterlist *sg;
555b25bd251SRusty Russell 	struct vring_desc *desc;
5563f649ab7SKees Cook 	unsigned int i, n, avail, descs_used, prev, err_idx;
5571fe9b6feSMichael S. Tsirkin 	int head;
558b25bd251SRusty Russell 	bool indirect;
5590a8a69ddSRusty Russell 
5609fa29b9dSMark McLoughlin 	START_USE(vq);
5619fa29b9dSMark McLoughlin 
5620a8a69ddSRusty Russell 	BUG_ON(data == NULL);
5635a08b04fSMichael S. Tsirkin 	BUG_ON(ctx && vq->indirect);
5649fa29b9dSMark McLoughlin 
56570670444SRusty Russell 	if (unlikely(vq->broken)) {
56670670444SRusty Russell 		END_USE(vq);
56770670444SRusty Russell 		return -EIO;
56870670444SRusty Russell 	}
56970670444SRusty Russell 
5704d6a105eSTiwei Bie 	LAST_ADD_TIME_UPDATE(vq);
571e93300b1SRusty Russell 
57213816c76SRusty Russell 	BUG_ON(total_sg == 0);
5730a8a69ddSRusty Russell 
574b25bd251SRusty Russell 	head = vq->free_head;
575b25bd251SRusty Russell 
57635c51e09SXianting Tian 	if (virtqueue_use_indirect(vq, total_sg))
577138fd251STiwei Bie 		desc = alloc_indirect_split(_vq, total_sg, gfp);
57844ed8089SRichard W.M. Jones 	else {
579b25bd251SRusty Russell 		desc = NULL;
580e593bf97STiwei Bie 		WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
58144ed8089SRichard W.M. Jones 	}
582b25bd251SRusty Russell 
583b25bd251SRusty Russell 	if (desc) {
584b25bd251SRusty Russell 		/* Use a single buffer which doesn't continue */
585780bc790SAndy Lutomirski 		indirect = true;
586b25bd251SRusty Russell 		/* Set up rest to use this indirect table. */
587b25bd251SRusty Russell 		i = 0;
588b25bd251SRusty Russell 		descs_used = 1;
589b25bd251SRusty Russell 	} else {
590780bc790SAndy Lutomirski 		indirect = false;
591e593bf97STiwei Bie 		desc = vq->split.vring.desc;
592b25bd251SRusty Russell 		i = head;
593b25bd251SRusty Russell 		descs_used = total_sg;
594b25bd251SRusty Russell 	}
595b25bd251SRusty Russell 
596b4b4ff73SXianting Tian 	if (unlikely(vq->vq.num_free < descs_used)) {
5970a8a69ddSRusty Russell 		pr_debug("Can't add buf len %i - avail = %i\n",
598b25bd251SRusty Russell 			 descs_used, vq->vq.num_free);
59944653eaeSRusty Russell 		/* FIXME: for historical reasons, we force a notify here if
60044653eaeSRusty Russell 		 * there are outgoing parts to the buffer.  Presumably the
60144653eaeSRusty Russell 		 * host should service the ring ASAP. */
60213816c76SRusty Russell 		if (out_sgs)
603426e3e0aSRusty Russell 			vq->notify(&vq->vq);
60458625edfSWei Yongjun 		if (indirect)
60558625edfSWei Yongjun 			kfree(desc);
6060a8a69ddSRusty Russell 		END_USE(vq);
6070a8a69ddSRusty Russell 		return -ENOSPC;
6080a8a69ddSRusty Russell 	}
6090a8a69ddSRusty Russell 
61013816c76SRusty Russell 	for (n = 0; n < out_sgs; n++) {
611eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
6120e27fa6dSXuan Zhuo 			dma_addr_t addr;
6130e27fa6dSXuan Zhuo 
6140e27fa6dSXuan Zhuo 			if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr))
615780bc790SAndy Lutomirski 				goto unmap_release;
616780bc790SAndy Lutomirski 
6170a8a69ddSRusty Russell 			prev = i;
61872b5e895SJason Wang 			/* Note that we trust indirect descriptor
61972b5e895SJason Wang 			 * table since it use stream DMA mapping.
62072b5e895SJason Wang 			 */
621fe4c3862SJason Wang 			i = virtqueue_add_desc_split(_vq, desc, i, addr, sg->length,
62272b5e895SJason Wang 						     VRING_DESC_F_NEXT,
62372b5e895SJason Wang 						     indirect);
6240a8a69ddSRusty Russell 		}
62513816c76SRusty Russell 	}
62613816c76SRusty Russell 	for (; n < (out_sgs + in_sgs); n++) {
627eeebf9b1SRusty Russell 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
6280e27fa6dSXuan Zhuo 			dma_addr_t addr;
6290e27fa6dSXuan Zhuo 
6300e27fa6dSXuan Zhuo 			if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr))
631780bc790SAndy Lutomirski 				goto unmap_release;
632780bc790SAndy Lutomirski 
6330a8a69ddSRusty Russell 			prev = i;
63472b5e895SJason Wang 			/* Note that we trust indirect descriptor
63572b5e895SJason Wang 			 * table since it use stream DMA mapping.
63672b5e895SJason Wang 			 */
637fe4c3862SJason Wang 			i = virtqueue_add_desc_split(_vq, desc, i, addr,
638fe4c3862SJason Wang 						     sg->length,
639fe4c3862SJason Wang 						     VRING_DESC_F_NEXT |
64072b5e895SJason Wang 						     VRING_DESC_F_WRITE,
64172b5e895SJason Wang 						     indirect);
64213816c76SRusty Russell 		}
6430a8a69ddSRusty Russell 	}
6440a8a69ddSRusty Russell 	/* Last one doesn't continue. */
64500e6f3d9SMichael S. Tsirkin 	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
646b319940fSXuan Zhuo 	if (!indirect && vq->do_unmap)
647890d3356SVincent Whitchurch 		vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
64872b5e895SJason Wang 			~VRING_DESC_F_NEXT;
6490a8a69ddSRusty Russell 
650780bc790SAndy Lutomirski 	if (indirect) {
651780bc790SAndy Lutomirski 		/* Now that the indirect table is filled in, map it. */
652780bc790SAndy Lutomirski 		dma_addr_t addr = vring_map_single(
653780bc790SAndy Lutomirski 			vq, desc, total_sg * sizeof(struct vring_desc),
654780bc790SAndy Lutomirski 			DMA_TO_DEVICE);
655d7344a2fSXuan Zhuo 		if (vring_mapping_error(vq, addr)) {
656d7344a2fSXuan Zhuo 			if (vq->premapped)
657d7344a2fSXuan Zhuo 				goto free_indirect;
658d7344a2fSXuan Zhuo 
659780bc790SAndy Lutomirski 			goto unmap_release;
660d7344a2fSXuan Zhuo 		}
661780bc790SAndy Lutomirski 
662fe4c3862SJason Wang 		virtqueue_add_desc_split(_vq, vq->split.vring.desc,
663fe4c3862SJason Wang 					 head, addr,
664fe4c3862SJason Wang 					 total_sg * sizeof(struct vring_desc),
66572b5e895SJason Wang 					 VRING_DESC_F_INDIRECT,
66672b5e895SJason Wang 					 false);
667780bc790SAndy Lutomirski 	}
668780bc790SAndy Lutomirski 
669780bc790SAndy Lutomirski 	/* We're using some buffers from the free list. */
670780bc790SAndy Lutomirski 	vq->vq.num_free -= descs_used;
671780bc790SAndy Lutomirski 
6720a8a69ddSRusty Russell 	/* Update free pointer */
673b25bd251SRusty Russell 	if (indirect)
67472b5e895SJason Wang 		vq->free_head = vq->split.desc_extra[head].next;
675b25bd251SRusty Russell 	else
6760a8a69ddSRusty Russell 		vq->free_head = i;
6770a8a69ddSRusty Russell 
678780bc790SAndy Lutomirski 	/* Store token and indirect buffer state. */
679cbeedb72STiwei Bie 	vq->split.desc_state[head].data = data;
680780bc790SAndy Lutomirski 	if (indirect)
681cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = desc;
68287646a34SJason Wang 	else
683cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = ctx;
6840a8a69ddSRusty Russell 
6850a8a69ddSRusty Russell 	/* Put entry in available array (but don't update avail->idx until they
6863b720b8cSRusty Russell 	 * do sync). */
687e593bf97STiwei Bie 	avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
688e593bf97STiwei Bie 	vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
6890a8a69ddSRusty Russell 
690ee7cd898SRusty Russell 	/* Descriptors and available array need to be set before we expose the
691ee7cd898SRusty Russell 	 * new available array entries. */
692a9a0fef7SRusty Russell 	virtio_wmb(vq->weak_barriers);
693e593bf97STiwei Bie 	vq->split.avail_idx_shadow++;
694e593bf97STiwei Bie 	vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
695e593bf97STiwei Bie 						vq->split.avail_idx_shadow);
696ee7cd898SRusty Russell 	vq->num_added++;
697ee7cd898SRusty Russell 
6985e05bf58STetsuo Handa 	pr_debug("Added buffer head %i to %p\n", head, vq);
6995e05bf58STetsuo Handa 	END_USE(vq);
7005e05bf58STetsuo Handa 
701ee7cd898SRusty Russell 	/* This is very unlikely, but theoretically possible.  Kick
702ee7cd898SRusty Russell 	 * just in case. */
703ee7cd898SRusty Russell 	if (unlikely(vq->num_added == (1 << 16) - 1))
704ee7cd898SRusty Russell 		virtqueue_kick(_vq);
705ee7cd898SRusty Russell 
70698e8c6bcSRusty Russell 	return 0;
707780bc790SAndy Lutomirski 
708780bc790SAndy Lutomirski unmap_release:
709780bc790SAndy Lutomirski 	err_idx = i;
710cf8f1696SMatthias Lange 
711cf8f1696SMatthias Lange 	if (indirect)
712cf8f1696SMatthias Lange 		i = 0;
713cf8f1696SMatthias Lange 	else
714780bc790SAndy Lutomirski 		i = head;
715780bc790SAndy Lutomirski 
716780bc790SAndy Lutomirski 	for (n = 0; n < total_sg; n++) {
717780bc790SAndy Lutomirski 		if (i == err_idx)
718780bc790SAndy Lutomirski 			break;
71972b5e895SJason Wang 		if (indirect) {
72072b5e895SJason Wang 			vring_unmap_one_split_indirect(vq, &desc[i]);
721cf8f1696SMatthias Lange 			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
72272b5e895SJason Wang 		} else
72372b5e895SJason Wang 			i = vring_unmap_one_split(vq, i);
724780bc790SAndy Lutomirski 	}
725780bc790SAndy Lutomirski 
726d7344a2fSXuan Zhuo free_indirect:
727780bc790SAndy Lutomirski 	if (indirect)
728780bc790SAndy Lutomirski 		kfree(desc);
729780bc790SAndy Lutomirski 
7303cc36f6eSMichael S. Tsirkin 	END_USE(vq);
731f7728002SHalil Pasic 	return -ENOMEM;
7320a8a69ddSRusty Russell }
73313816c76SRusty Russell 
virtqueue_kick_prepare_split(struct virtqueue * _vq)734138fd251STiwei Bie static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
7350a8a69ddSRusty Russell {
7360a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
737a5c262c5SMichael S. Tsirkin 	u16 new, old;
73841f0377fSRusty Russell 	bool needs_kick;
73941f0377fSRusty Russell 
7400a8a69ddSRusty Russell 	START_USE(vq);
741a72caae2SJason Wang 	/* We need to expose available array entries before checking avail
742a72caae2SJason Wang 	 * event. */
743a9a0fef7SRusty Russell 	virtio_mb(vq->weak_barriers);
7440a8a69ddSRusty Russell 
745e593bf97STiwei Bie 	old = vq->split.avail_idx_shadow - vq->num_added;
746e593bf97STiwei Bie 	new = vq->split.avail_idx_shadow;
7470a8a69ddSRusty Russell 	vq->num_added = 0;
7480a8a69ddSRusty Russell 
7494d6a105eSTiwei Bie 	LAST_ADD_TIME_CHECK(vq);
7504d6a105eSTiwei Bie 	LAST_ADD_TIME_INVALID(vq);
751e93300b1SRusty Russell 
75241f0377fSRusty Russell 	if (vq->event) {
753e593bf97STiwei Bie 		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
754e593bf97STiwei Bie 					vring_avail_event(&vq->split.vring)),
75541f0377fSRusty Russell 					      new, old);
75641f0377fSRusty Russell 	} else {
757e593bf97STiwei Bie 		needs_kick = !(vq->split.vring.used->flags &
758e593bf97STiwei Bie 					cpu_to_virtio16(_vq->vdev,
759e593bf97STiwei Bie 						VRING_USED_F_NO_NOTIFY));
76041f0377fSRusty Russell 	}
7610a8a69ddSRusty Russell 	END_USE(vq);
76241f0377fSRusty Russell 	return needs_kick;
76341f0377fSRusty Russell }
764138fd251STiwei Bie 
detach_buf_split(struct vring_virtqueue * vq,unsigned int head,void ** ctx)765138fd251STiwei Bie static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
7665a08b04fSMichael S. Tsirkin 			     void **ctx)
7670a8a69ddSRusty Russell {
768780bc790SAndy Lutomirski 	unsigned int i, j;
769c60923cbSGonglei 	__virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
7700a8a69ddSRusty Russell 
7710a8a69ddSRusty Russell 	/* Clear data ptr. */
772cbeedb72STiwei Bie 	vq->split.desc_state[head].data = NULL;
7730a8a69ddSRusty Russell 
774780bc790SAndy Lutomirski 	/* Put back on free list: unmap first-level descriptors and find end */
7750a8a69ddSRusty Russell 	i = head;
7769fa29b9dSMark McLoughlin 
777e593bf97STiwei Bie 	while (vq->split.vring.desc[i].flags & nextflag) {
77872b5e895SJason Wang 		vring_unmap_one_split(vq, i);
77972b5e895SJason Wang 		i = vq->split.desc_extra[i].next;
78006ca287dSRusty Russell 		vq->vq.num_free++;
7810a8a69ddSRusty Russell 	}
7820a8a69ddSRusty Russell 
78372b5e895SJason Wang 	vring_unmap_one_split(vq, i);
78472b5e895SJason Wang 	vq->split.desc_extra[i].next = vq->free_head;
7850a8a69ddSRusty Russell 	vq->free_head = head;
786780bc790SAndy Lutomirski 
7870a8a69ddSRusty Russell 	/* Plus final descriptor */
78806ca287dSRusty Russell 	vq->vq.num_free++;
789780bc790SAndy Lutomirski 
7905a08b04fSMichael S. Tsirkin 	if (vq->indirect) {
791cbeedb72STiwei Bie 		struct vring_desc *indir_desc =
792cbeedb72STiwei Bie 				vq->split.desc_state[head].indir_desc;
7935a08b04fSMichael S. Tsirkin 		u32 len;
7945a08b04fSMichael S. Tsirkin 
7955a08b04fSMichael S. Tsirkin 		/* Free the indirect table, if any, now that it's unmapped. */
7965a08b04fSMichael S. Tsirkin 		if (!indir_desc)
7975a08b04fSMichael S. Tsirkin 			return;
7985a08b04fSMichael S. Tsirkin 
79972b5e895SJason Wang 		len = vq->split.desc_extra[head].len;
800780bc790SAndy Lutomirski 
80172b5e895SJason Wang 		BUG_ON(!(vq->split.desc_extra[head].flags &
80272b5e895SJason Wang 				VRING_DESC_F_INDIRECT));
803780bc790SAndy Lutomirski 		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
804780bc790SAndy Lutomirski 
805b319940fSXuan Zhuo 		if (vq->do_unmap) {
806780bc790SAndy Lutomirski 			for (j = 0; j < len / sizeof(struct vring_desc); j++)
80772b5e895SJason Wang 				vring_unmap_one_split_indirect(vq, &indir_desc[j]);
808610c708bSXuan Zhuo 		}
809780bc790SAndy Lutomirski 
8105a08b04fSMichael S. Tsirkin 		kfree(indir_desc);
811cbeedb72STiwei Bie 		vq->split.desc_state[head].indir_desc = NULL;
8125a08b04fSMichael S. Tsirkin 	} else if (ctx) {
813cbeedb72STiwei Bie 		*ctx = vq->split.desc_state[head].indir_desc;
814780bc790SAndy Lutomirski 	}
8150a8a69ddSRusty Russell }
8160a8a69ddSRusty Russell 
more_used_split(const struct vring_virtqueue * vq)8171adbd6b2SFeng Liu static bool more_used_split(const struct vring_virtqueue *vq)
8180a8a69ddSRusty Russell {
819e593bf97STiwei Bie 	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
820e593bf97STiwei Bie 			vq->split.vring.used->idx);
8210a8a69ddSRusty Russell }
8220a8a69ddSRusty Russell 
virtqueue_get_buf_ctx_split(struct virtqueue * _vq,unsigned int * len,void ** ctx)823138fd251STiwei Bie static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
824138fd251STiwei Bie 					 unsigned int *len,
8255a08b04fSMichael S. Tsirkin 					 void **ctx)
8260a8a69ddSRusty Russell {
8270a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
8280a8a69ddSRusty Russell 	void *ret;
8290a8a69ddSRusty Russell 	unsigned int i;
8303b720b8cSRusty Russell 	u16 last_used;
8310a8a69ddSRusty Russell 
8320a8a69ddSRusty Russell 	START_USE(vq);
8330a8a69ddSRusty Russell 
8345ef82752SRusty Russell 	if (unlikely(vq->broken)) {
8355ef82752SRusty Russell 		END_USE(vq);
8365ef82752SRusty Russell 		return NULL;
8375ef82752SRusty Russell 	}
8385ef82752SRusty Russell 
839138fd251STiwei Bie 	if (!more_used_split(vq)) {
8400a8a69ddSRusty Russell 		pr_debug("No more buffers in queue\n");
8410a8a69ddSRusty Russell 		END_USE(vq);
8420a8a69ddSRusty Russell 		return NULL;
8430a8a69ddSRusty Russell 	}
8440a8a69ddSRusty Russell 
8452d61ba95SMichael S. Tsirkin 	/* Only get used array entries after they have been exposed by host. */
846a9a0fef7SRusty Russell 	virtio_rmb(vq->weak_barriers);
8472d61ba95SMichael S. Tsirkin 
848e593bf97STiwei Bie 	last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
849e593bf97STiwei Bie 	i = virtio32_to_cpu(_vq->vdev,
850e593bf97STiwei Bie 			vq->split.vring.used->ring[last_used].id);
851e593bf97STiwei Bie 	*len = virtio32_to_cpu(_vq->vdev,
852e593bf97STiwei Bie 			vq->split.vring.used->ring[last_used].len);
8530a8a69ddSRusty Russell 
854e593bf97STiwei Bie 	if (unlikely(i >= vq->split.vring.num)) {
8550a8a69ddSRusty Russell 		BAD_RING(vq, "id %u out of range\n", i);
8560a8a69ddSRusty Russell 		return NULL;
8570a8a69ddSRusty Russell 	}
858cbeedb72STiwei Bie 	if (unlikely(!vq->split.desc_state[i].data)) {
8590a8a69ddSRusty Russell 		BAD_RING(vq, "id %u is not a head!\n", i);
8600a8a69ddSRusty Russell 		return NULL;
8610a8a69ddSRusty Russell 	}
8620a8a69ddSRusty Russell 
863138fd251STiwei Bie 	/* detach_buf_split clears data, so grab it now. */
864cbeedb72STiwei Bie 	ret = vq->split.desc_state[i].data;
865138fd251STiwei Bie 	detach_buf_split(vq, i, ctx);
8660a8a69ddSRusty Russell 	vq->last_used_idx++;
867a5c262c5SMichael S. Tsirkin 	/* If we expect an interrupt for the next entry, tell host
868a5c262c5SMichael S. Tsirkin 	 * by writing event index and flush out the write before
869a5c262c5SMichael S. Tsirkin 	 * the read in the next get_buf call. */
870e593bf97STiwei Bie 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
871788e5b3aSMichael S. Tsirkin 		virtio_store_mb(vq->weak_barriers,
872e593bf97STiwei Bie 				&vring_used_event(&vq->split.vring),
873788e5b3aSMichael S. Tsirkin 				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
874a5c262c5SMichael S. Tsirkin 
8754d6a105eSTiwei Bie 	LAST_ADD_TIME_INVALID(vq);
876e93300b1SRusty Russell 
8770a8a69ddSRusty Russell 	END_USE(vq);
8780a8a69ddSRusty Russell 	return ret;
8790a8a69ddSRusty Russell }
880138fd251STiwei Bie 
virtqueue_disable_cb_split(struct virtqueue * _vq)881138fd251STiwei Bie static void virtqueue_disable_cb_split(struct virtqueue *_vq)
882138fd251STiwei Bie {
883138fd251STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
884138fd251STiwei Bie 
885e593bf97STiwei Bie 	if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
886e593bf97STiwei Bie 		vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
8876c0b057cSAlbert Huang 
8886c0b057cSAlbert Huang 		/*
8896c0b057cSAlbert Huang 		 * If device triggered an event already it won't trigger one again:
8906c0b057cSAlbert Huang 		 * no need to disable.
8916c0b057cSAlbert Huang 		 */
8926c0b057cSAlbert Huang 		if (vq->event_triggered)
8936c0b057cSAlbert Huang 			return;
8946c0b057cSAlbert Huang 
8958d622d21SMichael S. Tsirkin 		if (vq->event)
8968d622d21SMichael S. Tsirkin 			/* TODO: this is a hack. Figure out a cleaner value to write. */
8978d622d21SMichael S. Tsirkin 			vring_used_event(&vq->split.vring) = 0x0;
8988d622d21SMichael S. Tsirkin 		else
899e593bf97STiwei Bie 			vq->split.vring.avail->flags =
900e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
901e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
902138fd251STiwei Bie 	}
903138fd251STiwei Bie }
904138fd251STiwei Bie 
virtqueue_enable_cb_prepare_split(struct virtqueue * _vq)90531532340SSolomon Tan static unsigned int virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
906cc229884SMichael S. Tsirkin {
907cc229884SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
908cc229884SMichael S. Tsirkin 	u16 last_used_idx;
909cc229884SMichael S. Tsirkin 
910cc229884SMichael S. Tsirkin 	START_USE(vq);
911cc229884SMichael S. Tsirkin 
912cc229884SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
913cc229884SMichael S. Tsirkin 	 * more to do. */
914cc229884SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
915cc229884SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
916cc229884SMichael S. Tsirkin 	 * entry. Always do both to keep code simple. */
917e593bf97STiwei Bie 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
918e593bf97STiwei Bie 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
9190ea1e4a6SLadi Prosek 		if (!vq->event)
920e593bf97STiwei Bie 			vq->split.vring.avail->flags =
921e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
922e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
923f277ec42SVenkatesh Srinivas 	}
924e593bf97STiwei Bie 	vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
925e593bf97STiwei Bie 			last_used_idx = vq->last_used_idx);
926cc229884SMichael S. Tsirkin 	END_USE(vq);
927cc229884SMichael S. Tsirkin 	return last_used_idx;
928cc229884SMichael S. Tsirkin }
929138fd251STiwei Bie 
virtqueue_poll_split(struct virtqueue * _vq,unsigned int last_used_idx)93031532340SSolomon Tan static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned int last_used_idx)
931138fd251STiwei Bie {
932138fd251STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
933138fd251STiwei Bie 
934138fd251STiwei Bie 	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
935e593bf97STiwei Bie 			vq->split.vring.used->idx);
936138fd251STiwei Bie }
937138fd251STiwei Bie 
virtqueue_enable_cb_delayed_split(struct virtqueue * _vq)938138fd251STiwei Bie static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
9397ab358c2SMichael S. Tsirkin {
9407ab358c2SMichael S. Tsirkin 	struct vring_virtqueue *vq = to_vvq(_vq);
9417ab358c2SMichael S. Tsirkin 	u16 bufs;
9427ab358c2SMichael S. Tsirkin 
9437ab358c2SMichael S. Tsirkin 	START_USE(vq);
9447ab358c2SMichael S. Tsirkin 
9457ab358c2SMichael S. Tsirkin 	/* We optimistically turn back on interrupts, then check if there was
9467ab358c2SMichael S. Tsirkin 	 * more to do. */
9477ab358c2SMichael S. Tsirkin 	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
9487ab358c2SMichael S. Tsirkin 	 * either clear the flags bit or point the event index at the next
9490ea1e4a6SLadi Prosek 	 * entry. Always update the event index to keep code simple. */
950e593bf97STiwei Bie 	if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
951e593bf97STiwei Bie 		vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
9520ea1e4a6SLadi Prosek 		if (!vq->event)
953e593bf97STiwei Bie 			vq->split.vring.avail->flags =
954e593bf97STiwei Bie 				cpu_to_virtio16(_vq->vdev,
955e593bf97STiwei Bie 						vq->split.avail_flags_shadow);
956f277ec42SVenkatesh Srinivas 	}
9577ab358c2SMichael S. Tsirkin 	/* TODO: tune this threshold */
958e593bf97STiwei Bie 	bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
959788e5b3aSMichael S. Tsirkin 
960788e5b3aSMichael S. Tsirkin 	virtio_store_mb(vq->weak_barriers,
961e593bf97STiwei Bie 			&vring_used_event(&vq->split.vring),
962788e5b3aSMichael S. Tsirkin 			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
963788e5b3aSMichael S. Tsirkin 
964e593bf97STiwei Bie 	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
965e593bf97STiwei Bie 					- vq->last_used_idx) > bufs)) {
9667ab358c2SMichael S. Tsirkin 		END_USE(vq);
9677ab358c2SMichael S. Tsirkin 		return false;
9687ab358c2SMichael S. Tsirkin 	}
9697ab358c2SMichael S. Tsirkin 
9707ab358c2SMichael S. Tsirkin 	END_USE(vq);
9717ab358c2SMichael S. Tsirkin 	return true;
9727ab358c2SMichael S. Tsirkin }
9737ab358c2SMichael S. Tsirkin 
virtqueue_detach_unused_buf_split(struct virtqueue * _vq)974138fd251STiwei Bie static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
975c021eac4SShirley Ma {
976c021eac4SShirley Ma 	struct vring_virtqueue *vq = to_vvq(_vq);
977c021eac4SShirley Ma 	unsigned int i;
978c021eac4SShirley Ma 	void *buf;
979c021eac4SShirley Ma 
980c021eac4SShirley Ma 	START_USE(vq);
981c021eac4SShirley Ma 
982e593bf97STiwei Bie 	for (i = 0; i < vq->split.vring.num; i++) {
983cbeedb72STiwei Bie 		if (!vq->split.desc_state[i].data)
984c021eac4SShirley Ma 			continue;
985138fd251STiwei Bie 		/* detach_buf_split clears data, so grab it now. */
986cbeedb72STiwei Bie 		buf = vq->split.desc_state[i].data;
987138fd251STiwei Bie 		detach_buf_split(vq, i, NULL);
988e593bf97STiwei Bie 		vq->split.avail_idx_shadow--;
989e593bf97STiwei Bie 		vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
990e593bf97STiwei Bie 				vq->split.avail_idx_shadow);
991c021eac4SShirley Ma 		END_USE(vq);
992c021eac4SShirley Ma 		return buf;
993c021eac4SShirley Ma 	}
994c021eac4SShirley Ma 	/* That should have freed everything. */
995e593bf97STiwei Bie 	BUG_ON(vq->vq.num_free != vq->split.vring.num);
996c021eac4SShirley Ma 
997c021eac4SShirley Ma 	END_USE(vq);
998c021eac4SShirley Ma 	return NULL;
999c021eac4SShirley Ma }
1000138fd251STiwei Bie 
virtqueue_vring_init_split(struct vring_virtqueue_split * vring_split,struct vring_virtqueue * vq)1001198fa7beSXuan Zhuo static void virtqueue_vring_init_split(struct vring_virtqueue_split *vring_split,
1002198fa7beSXuan Zhuo 				       struct vring_virtqueue *vq)
1003198fa7beSXuan Zhuo {
1004198fa7beSXuan Zhuo 	struct virtio_device *vdev;
1005198fa7beSXuan Zhuo 
1006198fa7beSXuan Zhuo 	vdev = vq->vq.vdev;
1007198fa7beSXuan Zhuo 
1008198fa7beSXuan Zhuo 	vring_split->avail_flags_shadow = 0;
1009198fa7beSXuan Zhuo 	vring_split->avail_idx_shadow = 0;
1010198fa7beSXuan Zhuo 
1011198fa7beSXuan Zhuo 	/* No callback?  Tell other side not to bother us. */
1012198fa7beSXuan Zhuo 	if (!vq->vq.callback) {
1013198fa7beSXuan Zhuo 		vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
1014198fa7beSXuan Zhuo 		if (!vq->event)
1015198fa7beSXuan Zhuo 			vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
1016198fa7beSXuan Zhuo 					vring_split->avail_flags_shadow);
1017198fa7beSXuan Zhuo 	}
1018198fa7beSXuan Zhuo }
1019198fa7beSXuan Zhuo 
virtqueue_reinit_split(struct vring_virtqueue * vq)1020e5175b41SXuan Zhuo static void virtqueue_reinit_split(struct vring_virtqueue *vq)
1021e5175b41SXuan Zhuo {
1022e5175b41SXuan Zhuo 	int num;
1023e5175b41SXuan Zhuo 
1024e5175b41SXuan Zhuo 	num = vq->split.vring.num;
1025e5175b41SXuan Zhuo 
1026e5175b41SXuan Zhuo 	vq->split.vring.avail->flags = 0;
1027e5175b41SXuan Zhuo 	vq->split.vring.avail->idx = 0;
1028e5175b41SXuan Zhuo 
1029e5175b41SXuan Zhuo 	/* reset avail event */
1030e5175b41SXuan Zhuo 	vq->split.vring.avail->ring[num] = 0;
1031e5175b41SXuan Zhuo 
1032e5175b41SXuan Zhuo 	vq->split.vring.used->flags = 0;
1033e5175b41SXuan Zhuo 	vq->split.vring.used->idx = 0;
1034e5175b41SXuan Zhuo 
1035e5175b41SXuan Zhuo 	/* reset used event */
1036e5175b41SXuan Zhuo 	*(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0;
1037e5175b41SXuan Zhuo 
1038e5175b41SXuan Zhuo 	virtqueue_init(vq, num);
1039e5175b41SXuan Zhuo 
1040e5175b41SXuan Zhuo 	virtqueue_vring_init_split(&vq->split, vq);
1041e5175b41SXuan Zhuo }
1042e5175b41SXuan Zhuo 
virtqueue_vring_attach_split(struct vring_virtqueue * vq,struct vring_virtqueue_split * vring_split)1043e1d6a423SXuan Zhuo static void virtqueue_vring_attach_split(struct vring_virtqueue *vq,
1044e1d6a423SXuan Zhuo 					 struct vring_virtqueue_split *vring_split)
1045e1d6a423SXuan Zhuo {
1046e1d6a423SXuan Zhuo 	vq->split = *vring_split;
1047e1d6a423SXuan Zhuo 
1048e1d6a423SXuan Zhuo 	/* Put everything in free lists. */
1049e1d6a423SXuan Zhuo 	vq->free_head = 0;
1050e1d6a423SXuan Zhuo }
1051e1d6a423SXuan Zhuo 
vring_alloc_state_extra_split(struct vring_virtqueue_split * vring_split)1052a2b36c8dSXuan Zhuo static int vring_alloc_state_extra_split(struct vring_virtqueue_split *vring_split)
1053a2b36c8dSXuan Zhuo {
1054a2b36c8dSXuan Zhuo 	struct vring_desc_state_split *state;
1055a2b36c8dSXuan Zhuo 	struct vring_desc_extra *extra;
1056a2b36c8dSXuan Zhuo 	u32 num = vring_split->vring.num;
1057a2b36c8dSXuan Zhuo 
1058a2b36c8dSXuan Zhuo 	state = kmalloc_array(num, sizeof(struct vring_desc_state_split), GFP_KERNEL);
1059a2b36c8dSXuan Zhuo 	if (!state)
1060a2b36c8dSXuan Zhuo 		goto err_state;
1061a2b36c8dSXuan Zhuo 
1062a2b36c8dSXuan Zhuo 	extra = vring_alloc_desc_extra(num);
1063a2b36c8dSXuan Zhuo 	if (!extra)
1064a2b36c8dSXuan Zhuo 		goto err_extra;
1065a2b36c8dSXuan Zhuo 
1066a2b36c8dSXuan Zhuo 	memset(state, 0, num * sizeof(struct vring_desc_state_split));
1067a2b36c8dSXuan Zhuo 
1068a2b36c8dSXuan Zhuo 	vring_split->desc_state = state;
1069a2b36c8dSXuan Zhuo 	vring_split->desc_extra = extra;
1070a2b36c8dSXuan Zhuo 	return 0;
1071a2b36c8dSXuan Zhuo 
1072a2b36c8dSXuan Zhuo err_extra:
1073a2b36c8dSXuan Zhuo 	kfree(state);
1074a2b36c8dSXuan Zhuo err_state:
1075a2b36c8dSXuan Zhuo 	return -ENOMEM;
1076a2b36c8dSXuan Zhuo }
1077a2b36c8dSXuan Zhuo 
vring_free_split(struct vring_virtqueue_split * vring_split,struct virtio_device * vdev,struct device * dma_dev)107889f05d94SXuan Zhuo static void vring_free_split(struct vring_virtqueue_split *vring_split,
10792713ea3cSJason Wang 			     struct virtio_device *vdev, struct device *dma_dev)
108089f05d94SXuan Zhuo {
108189f05d94SXuan Zhuo 	vring_free_queue(vdev, vring_split->queue_size_in_bytes,
108289f05d94SXuan Zhuo 			 vring_split->vring.desc,
10832713ea3cSJason Wang 			 vring_split->queue_dma_addr,
10842713ea3cSJason Wang 			 dma_dev);
108589f05d94SXuan Zhuo 
108689f05d94SXuan Zhuo 	kfree(vring_split->desc_state);
108789f05d94SXuan Zhuo 	kfree(vring_split->desc_extra);
108889f05d94SXuan Zhuo }
108989f05d94SXuan Zhuo 
vring_alloc_queue_split(struct vring_virtqueue_split * vring_split,struct virtio_device * vdev,u32 num,unsigned int vring_align,bool may_reduce_num,struct device * dma_dev)1090c2d87fe6SXuan Zhuo static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
1091c2d87fe6SXuan Zhuo 				   struct virtio_device *vdev,
1092c2d87fe6SXuan Zhuo 				   u32 num,
1093c2d87fe6SXuan Zhuo 				   unsigned int vring_align,
10942713ea3cSJason Wang 				   bool may_reduce_num,
10952713ea3cSJason Wang 				   struct device *dma_dev)
1096c2d87fe6SXuan Zhuo {
1097c2d87fe6SXuan Zhuo 	void *queue = NULL;
1098c2d87fe6SXuan Zhuo 	dma_addr_t dma_addr;
1099c2d87fe6SXuan Zhuo 
1100c2d87fe6SXuan Zhuo 	/* We assume num is a power of 2. */
1101b9d978a8SShaoqin Huang 	if (!is_power_of_2(num)) {
1102c2d87fe6SXuan Zhuo 		dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1103c2d87fe6SXuan Zhuo 		return -EINVAL;
1104c2d87fe6SXuan Zhuo 	}
1105c2d87fe6SXuan Zhuo 
1106c2d87fe6SXuan Zhuo 	/* TODO: allocate each queue chunk individually */
1107c2d87fe6SXuan Zhuo 	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1108c2d87fe6SXuan Zhuo 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1109c2d87fe6SXuan Zhuo 					  &dma_addr,
11102713ea3cSJason Wang 					  GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
11112713ea3cSJason Wang 					  dma_dev);
1112c2d87fe6SXuan Zhuo 		if (queue)
1113c2d87fe6SXuan Zhuo 			break;
1114c2d87fe6SXuan Zhuo 		if (!may_reduce_num)
1115c2d87fe6SXuan Zhuo 			return -ENOMEM;
1116c2d87fe6SXuan Zhuo 	}
1117c2d87fe6SXuan Zhuo 
1118c2d87fe6SXuan Zhuo 	if (!num)
1119c2d87fe6SXuan Zhuo 		return -ENOMEM;
1120c2d87fe6SXuan Zhuo 
1121c2d87fe6SXuan Zhuo 	if (!queue) {
1122c2d87fe6SXuan Zhuo 		/* Try to get a single page. You are my only hope! */
1123c2d87fe6SXuan Zhuo 		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
11242713ea3cSJason Wang 					  &dma_addr, GFP_KERNEL | __GFP_ZERO,
11252713ea3cSJason Wang 					  dma_dev);
1126c2d87fe6SXuan Zhuo 	}
1127c2d87fe6SXuan Zhuo 	if (!queue)
1128c2d87fe6SXuan Zhuo 		return -ENOMEM;
1129c2d87fe6SXuan Zhuo 
1130c2d87fe6SXuan Zhuo 	vring_init(&vring_split->vring, num, queue, vring_align);
1131c2d87fe6SXuan Zhuo 
1132c2d87fe6SXuan Zhuo 	vring_split->queue_dma_addr = dma_addr;
1133c2d87fe6SXuan Zhuo 	vring_split->queue_size_in_bytes = vring_size(num, vring_align);
1134c2d87fe6SXuan Zhuo 
1135af36b16fSXuan Zhuo 	vring_split->vring_align = vring_align;
1136af36b16fSXuan Zhuo 	vring_split->may_reduce_num = may_reduce_num;
1137af36b16fSXuan Zhuo 
1138c2d87fe6SXuan Zhuo 	return 0;
1139c2d87fe6SXuan Zhuo }
1140c2d87fe6SXuan Zhuo 
vring_create_virtqueue_split(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool may_reduce_num,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name,struct device * dma_dev)1141d79dca75STiwei Bie static struct virtqueue *vring_create_virtqueue_split(
1142d79dca75STiwei Bie 	unsigned int index,
1143d79dca75STiwei Bie 	unsigned int num,
1144d79dca75STiwei Bie 	unsigned int vring_align,
1145d79dca75STiwei Bie 	struct virtio_device *vdev,
1146d79dca75STiwei Bie 	bool weak_barriers,
1147d79dca75STiwei Bie 	bool may_reduce_num,
1148d79dca75STiwei Bie 	bool context,
1149d79dca75STiwei Bie 	bool (*notify)(struct virtqueue *),
1150d79dca75STiwei Bie 	void (*callback)(struct virtqueue *),
11512713ea3cSJason Wang 	const char *name,
11522713ea3cSJason Wang 	struct device *dma_dev)
1153d79dca75STiwei Bie {
1154cd4c812aSXuan Zhuo 	struct vring_virtqueue_split vring_split = {};
1155d79dca75STiwei Bie 	struct virtqueue *vq;
1156c2d87fe6SXuan Zhuo 	int err;
1157d79dca75STiwei Bie 
1158c2d87fe6SXuan Zhuo 	err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
11592713ea3cSJason Wang 				      may_reduce_num, dma_dev);
1160c2d87fe6SXuan Zhuo 	if (err)
1161d79dca75STiwei Bie 		return NULL;
1162d79dca75STiwei Bie 
1163cd4c812aSXuan Zhuo 	vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
11642713ea3cSJason Wang 				   context, notify, callback, name, dma_dev);
1165d79dca75STiwei Bie 	if (!vq) {
11662713ea3cSJason Wang 		vring_free_split(&vring_split, vdev, dma_dev);
1167d79dca75STiwei Bie 		return NULL;
1168d79dca75STiwei Bie 	}
1169d79dca75STiwei Bie 
1170d79dca75STiwei Bie 	to_vvq(vq)->we_own_ring = true;
1171d79dca75STiwei Bie 
1172d79dca75STiwei Bie 	return vq;
1173d79dca75STiwei Bie }
1174d79dca75STiwei Bie 
virtqueue_resize_split(struct virtqueue * _vq,u32 num)11756fea20e5SXuan Zhuo static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
11766fea20e5SXuan Zhuo {
11776fea20e5SXuan Zhuo 	struct vring_virtqueue_split vring_split = {};
11786fea20e5SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
11796fea20e5SXuan Zhuo 	struct virtio_device *vdev = _vq->vdev;
11806fea20e5SXuan Zhuo 	int err;
11816fea20e5SXuan Zhuo 
11826fea20e5SXuan Zhuo 	err = vring_alloc_queue_split(&vring_split, vdev, num,
11836fea20e5SXuan Zhuo 				      vq->split.vring_align,
11842713ea3cSJason Wang 				      vq->split.may_reduce_num,
11852713ea3cSJason Wang 				      vring_dma_dev(vq));
11866fea20e5SXuan Zhuo 	if (err)
11876fea20e5SXuan Zhuo 		goto err;
11886fea20e5SXuan Zhuo 
11896fea20e5SXuan Zhuo 	err = vring_alloc_state_extra_split(&vring_split);
11906fea20e5SXuan Zhuo 	if (err)
11916fea20e5SXuan Zhuo 		goto err_state_extra;
11926fea20e5SXuan Zhuo 
11936fea20e5SXuan Zhuo 	vring_free(&vq->vq);
11946fea20e5SXuan Zhuo 
11956fea20e5SXuan Zhuo 	virtqueue_vring_init_split(&vring_split, vq);
11966fea20e5SXuan Zhuo 
11976fea20e5SXuan Zhuo 	virtqueue_init(vq, vring_split.vring.num);
11986fea20e5SXuan Zhuo 	virtqueue_vring_attach_split(vq, &vring_split);
11996fea20e5SXuan Zhuo 
12006fea20e5SXuan Zhuo 	return 0;
12016fea20e5SXuan Zhuo 
12026fea20e5SXuan Zhuo err_state_extra:
12032713ea3cSJason Wang 	vring_free_split(&vring_split, vdev, vring_dma_dev(vq));
12046fea20e5SXuan Zhuo err:
12056fea20e5SXuan Zhuo 	virtqueue_reinit_split(vq);
12066fea20e5SXuan Zhuo 	return -ENOMEM;
12076fea20e5SXuan Zhuo }
12086fea20e5SXuan Zhuo 
1209e6f633e5STiwei Bie 
1210e6f633e5STiwei Bie /*
12111ce9e605STiwei Bie  * Packed ring specific functions - *_packed().
12121ce9e605STiwei Bie  */
packed_used_wrap_counter(u16 last_used_idx)12131adbd6b2SFeng Liu static bool packed_used_wrap_counter(u16 last_used_idx)
1214a7722890Shuangjie.albert {
1215a7722890Shuangjie.albert 	return !!(last_used_idx & (1 << VRING_PACKED_EVENT_F_WRAP_CTR));
1216a7722890Shuangjie.albert }
1217a7722890Shuangjie.albert 
packed_last_used(u16 last_used_idx)12181adbd6b2SFeng Liu static u16 packed_last_used(u16 last_used_idx)
1219a7722890Shuangjie.albert {
1220a7722890Shuangjie.albert 	return last_used_idx & ~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR));
1221a7722890Shuangjie.albert }
12221ce9e605STiwei Bie 
vring_unmap_extra_packed(const struct vring_virtqueue * vq,const struct vring_desc_extra * extra)1223d80dc15bSXuan Zhuo static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
12244b6ec919SFeng Liu 				     const struct vring_desc_extra *extra)
12251ce9e605STiwei Bie {
12261ce9e605STiwei Bie 	u16 flags;
12271ce9e605STiwei Bie 
1228d80dc15bSXuan Zhuo 	flags = extra->flags;
12291ce9e605STiwei Bie 
12301ce9e605STiwei Bie 	if (flags & VRING_DESC_F_INDIRECT) {
1231b319940fSXuan Zhuo 		if (!vq->use_dma_api)
1232b319940fSXuan Zhuo 			return;
1233b319940fSXuan Zhuo 
12341ce9e605STiwei Bie 		dma_unmap_single(vring_dma_dev(vq),
1235d80dc15bSXuan Zhuo 				 extra->addr, extra->len,
12361ce9e605STiwei Bie 				 (flags & VRING_DESC_F_WRITE) ?
12371ce9e605STiwei Bie 				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
12381ce9e605STiwei Bie 	} else {
1239b319940fSXuan Zhuo 		if (!vq->do_unmap)
1240b319940fSXuan Zhuo 			return;
1241b319940fSXuan Zhuo 
12421ce9e605STiwei Bie 		dma_unmap_page(vring_dma_dev(vq),
1243d80dc15bSXuan Zhuo 			       extra->addr, extra->len,
12441ce9e605STiwei Bie 			       (flags & VRING_DESC_F_WRITE) ?
12451ce9e605STiwei Bie 			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
12461ce9e605STiwei Bie 	}
12471ce9e605STiwei Bie }
12481ce9e605STiwei Bie 
vring_unmap_desc_packed(const struct vring_virtqueue * vq,const struct vring_packed_desc * desc)12491ce9e605STiwei Bie static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
12504b6ec919SFeng Liu 				    const struct vring_packed_desc *desc)
12511ce9e605STiwei Bie {
12521ce9e605STiwei Bie 	u16 flags;
12531ce9e605STiwei Bie 
1254b319940fSXuan Zhuo 	if (!vq->do_unmap)
12551ce9e605STiwei Bie 		return;
12561ce9e605STiwei Bie 
12571ce9e605STiwei Bie 	flags = le16_to_cpu(desc->flags);
12581ce9e605STiwei Bie 
12591ce9e605STiwei Bie 	dma_unmap_page(vring_dma_dev(vq),
12601ce9e605STiwei Bie 		       le64_to_cpu(desc->addr),
12611ce9e605STiwei Bie 		       le32_to_cpu(desc->len),
12621ce9e605STiwei Bie 		       (flags & VRING_DESC_F_WRITE) ?
12631ce9e605STiwei Bie 		       DMA_FROM_DEVICE : DMA_TO_DEVICE);
12641ce9e605STiwei Bie }
12651ce9e605STiwei Bie 
alloc_indirect_packed(unsigned int total_sg,gfp_t gfp)12661ce9e605STiwei Bie static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
12671ce9e605STiwei Bie 						       gfp_t gfp)
12681ce9e605STiwei Bie {
12691ce9e605STiwei Bie 	struct vring_packed_desc *desc;
12701ce9e605STiwei Bie 
12711ce9e605STiwei Bie 	/*
12721ce9e605STiwei Bie 	 * We require lowmem mappings for the descriptors because
12731ce9e605STiwei Bie 	 * otherwise virt_to_phys will give us bogus addresses in the
12741ce9e605STiwei Bie 	 * virtqueue.
12751ce9e605STiwei Bie 	 */
12761ce9e605STiwei Bie 	gfp &= ~__GFP_HIGHMEM;
12771ce9e605STiwei Bie 
12781ce9e605STiwei Bie 	desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
12791ce9e605STiwei Bie 
12801ce9e605STiwei Bie 	return desc;
12811ce9e605STiwei Bie }
12821ce9e605STiwei Bie 
virtqueue_add_indirect_packed(struct vring_virtqueue * vq,struct scatterlist * sgs[],unsigned int total_sg,unsigned int out_sgs,unsigned int in_sgs,void * data,gfp_t gfp)12831ce9e605STiwei Bie static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
12841ce9e605STiwei Bie 					 struct scatterlist *sgs[],
12851ce9e605STiwei Bie 					 unsigned int total_sg,
12861ce9e605STiwei Bie 					 unsigned int out_sgs,
12871ce9e605STiwei Bie 					 unsigned int in_sgs,
12881ce9e605STiwei Bie 					 void *data,
12891ce9e605STiwei Bie 					 gfp_t gfp)
12901ce9e605STiwei Bie {
12911ce9e605STiwei Bie 	struct vring_packed_desc *desc;
12921ce9e605STiwei Bie 	struct scatterlist *sg;
12931ce9e605STiwei Bie 	unsigned int i, n, err_idx;
12941ce9e605STiwei Bie 	u16 head, id;
12951ce9e605STiwei Bie 	dma_addr_t addr;
12961ce9e605STiwei Bie 
12971ce9e605STiwei Bie 	head = vq->packed.next_avail_idx;
12981ce9e605STiwei Bie 	desc = alloc_indirect_packed(total_sg, gfp);
1299fc6d70f4SXuan Zhuo 	if (!desc)
1300fc6d70f4SXuan Zhuo 		return -ENOMEM;
13011ce9e605STiwei Bie 
13021ce9e605STiwei Bie 	if (unlikely(vq->vq.num_free < 1)) {
13031ce9e605STiwei Bie 		pr_debug("Can't add buf len 1 - avail = 0\n");
1304df0bfe75SYueHaibing 		kfree(desc);
13051ce9e605STiwei Bie 		END_USE(vq);
13061ce9e605STiwei Bie 		return -ENOSPC;
13071ce9e605STiwei Bie 	}
13081ce9e605STiwei Bie 
13091ce9e605STiwei Bie 	i = 0;
13101ce9e605STiwei Bie 	id = vq->free_head;
13111ce9e605STiwei Bie 	BUG_ON(id == vq->packed.vring.num);
13121ce9e605STiwei Bie 
13131ce9e605STiwei Bie 	for (n = 0; n < out_sgs + in_sgs; n++) {
13141ce9e605STiwei Bie 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
13150e27fa6dSXuan Zhuo 			if (vring_map_one_sg(vq, sg, n < out_sgs ?
13160e27fa6dSXuan Zhuo 					     DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
13171ce9e605STiwei Bie 				goto unmap_release;
13181ce9e605STiwei Bie 
13191ce9e605STiwei Bie 			desc[i].flags = cpu_to_le16(n < out_sgs ?
13201ce9e605STiwei Bie 						0 : VRING_DESC_F_WRITE);
13211ce9e605STiwei Bie 			desc[i].addr = cpu_to_le64(addr);
13221ce9e605STiwei Bie 			desc[i].len = cpu_to_le32(sg->length);
13231ce9e605STiwei Bie 			i++;
13241ce9e605STiwei Bie 		}
13251ce9e605STiwei Bie 	}
13261ce9e605STiwei Bie 
13271ce9e605STiwei Bie 	/* Now that the indirect table is filled in, map it. */
13281ce9e605STiwei Bie 	addr = vring_map_single(vq, desc,
13291ce9e605STiwei Bie 			total_sg * sizeof(struct vring_packed_desc),
13301ce9e605STiwei Bie 			DMA_TO_DEVICE);
1331d7344a2fSXuan Zhuo 	if (vring_mapping_error(vq, addr)) {
1332d7344a2fSXuan Zhuo 		if (vq->premapped)
1333d7344a2fSXuan Zhuo 			goto free_desc;
1334d7344a2fSXuan Zhuo 
13351ce9e605STiwei Bie 		goto unmap_release;
1336d7344a2fSXuan Zhuo 	}
13371ce9e605STiwei Bie 
13381ce9e605STiwei Bie 	vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
13391ce9e605STiwei Bie 	vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
13401ce9e605STiwei Bie 				sizeof(struct vring_packed_desc));
13411ce9e605STiwei Bie 	vq->packed.vring.desc[head].id = cpu_to_le16(id);
13421ce9e605STiwei Bie 
1343e142169aSXuan Zhuo 	if (vq->use_dma_api) {
13441ce9e605STiwei Bie 		vq->packed.desc_extra[id].addr = addr;
13451ce9e605STiwei Bie 		vq->packed.desc_extra[id].len = total_sg *
13461ce9e605STiwei Bie 				sizeof(struct vring_packed_desc);
13471ce9e605STiwei Bie 		vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
13481ce9e605STiwei Bie 						  vq->packed.avail_used_flags;
13491ce9e605STiwei Bie 	}
13501ce9e605STiwei Bie 
13511ce9e605STiwei Bie 	/*
13521ce9e605STiwei Bie 	 * A driver MUST NOT make the first descriptor in the list
13531ce9e605STiwei Bie 	 * available before all subsequent descriptors comprising
13541ce9e605STiwei Bie 	 * the list are made available.
13551ce9e605STiwei Bie 	 */
13561ce9e605STiwei Bie 	virtio_wmb(vq->weak_barriers);
13571ce9e605STiwei Bie 	vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
13581ce9e605STiwei Bie 						vq->packed.avail_used_flags);
13591ce9e605STiwei Bie 
13601ce9e605STiwei Bie 	/* We're using some buffers from the free list. */
13611ce9e605STiwei Bie 	vq->vq.num_free -= 1;
13621ce9e605STiwei Bie 
13631ce9e605STiwei Bie 	/* Update free pointer */
13641ce9e605STiwei Bie 	n = head + 1;
13651ce9e605STiwei Bie 	if (n >= vq->packed.vring.num) {
13661ce9e605STiwei Bie 		n = 0;
13671ce9e605STiwei Bie 		vq->packed.avail_wrap_counter ^= 1;
13681ce9e605STiwei Bie 		vq->packed.avail_used_flags ^=
13691ce9e605STiwei Bie 				1 << VRING_PACKED_DESC_F_AVAIL |
13701ce9e605STiwei Bie 				1 << VRING_PACKED_DESC_F_USED;
13711ce9e605STiwei Bie 	}
13721ce9e605STiwei Bie 	vq->packed.next_avail_idx = n;
1373aeef9b47SJason Wang 	vq->free_head = vq->packed.desc_extra[id].next;
13741ce9e605STiwei Bie 
13751ce9e605STiwei Bie 	/* Store token and indirect buffer state. */
13761ce9e605STiwei Bie 	vq->packed.desc_state[id].num = 1;
13771ce9e605STiwei Bie 	vq->packed.desc_state[id].data = data;
13781ce9e605STiwei Bie 	vq->packed.desc_state[id].indir_desc = desc;
13791ce9e605STiwei Bie 	vq->packed.desc_state[id].last = id;
13801ce9e605STiwei Bie 
13811ce9e605STiwei Bie 	vq->num_added += 1;
13821ce9e605STiwei Bie 
13831ce9e605STiwei Bie 	pr_debug("Added buffer head %i to %p\n", head, vq);
13841ce9e605STiwei Bie 	END_USE(vq);
13851ce9e605STiwei Bie 
13861ce9e605STiwei Bie 	return 0;
13871ce9e605STiwei Bie 
13881ce9e605STiwei Bie unmap_release:
13891ce9e605STiwei Bie 	err_idx = i;
13901ce9e605STiwei Bie 
13911ce9e605STiwei Bie 	for (i = 0; i < err_idx; i++)
13921ce9e605STiwei Bie 		vring_unmap_desc_packed(vq, &desc[i]);
13931ce9e605STiwei Bie 
1394d7344a2fSXuan Zhuo free_desc:
13951ce9e605STiwei Bie 	kfree(desc);
13961ce9e605STiwei Bie 
13971ce9e605STiwei Bie 	END_USE(vq);
1398f7728002SHalil Pasic 	return -ENOMEM;
13991ce9e605STiwei Bie }
14001ce9e605STiwei Bie 
virtqueue_add_packed(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int total_sg,unsigned int out_sgs,unsigned int in_sgs,void * data,void * ctx,gfp_t gfp)14011ce9e605STiwei Bie static inline int virtqueue_add_packed(struct virtqueue *_vq,
14021ce9e605STiwei Bie 				       struct scatterlist *sgs[],
14031ce9e605STiwei Bie 				       unsigned int total_sg,
14041ce9e605STiwei Bie 				       unsigned int out_sgs,
14051ce9e605STiwei Bie 				       unsigned int in_sgs,
14061ce9e605STiwei Bie 				       void *data,
14071ce9e605STiwei Bie 				       void *ctx,
14081ce9e605STiwei Bie 				       gfp_t gfp)
14091ce9e605STiwei Bie {
14101ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
14111ce9e605STiwei Bie 	struct vring_packed_desc *desc;
14121ce9e605STiwei Bie 	struct scatterlist *sg;
14131ce9e605STiwei Bie 	unsigned int i, n, c, descs_used, err_idx;
14143f649ab7SKees Cook 	__le16 head_flags, flags;
14153f649ab7SKees Cook 	u16 head, id, prev, curr, avail_used_flags;
1416fc6d70f4SXuan Zhuo 	int err;
14171ce9e605STiwei Bie 
14181ce9e605STiwei Bie 	START_USE(vq);
14191ce9e605STiwei Bie 
14201ce9e605STiwei Bie 	BUG_ON(data == NULL);
14211ce9e605STiwei Bie 	BUG_ON(ctx && vq->indirect);
14221ce9e605STiwei Bie 
14231ce9e605STiwei Bie 	if (unlikely(vq->broken)) {
14241ce9e605STiwei Bie 		END_USE(vq);
14251ce9e605STiwei Bie 		return -EIO;
14261ce9e605STiwei Bie 	}
14271ce9e605STiwei Bie 
14281ce9e605STiwei Bie 	LAST_ADD_TIME_UPDATE(vq);
14291ce9e605STiwei Bie 
14301ce9e605STiwei Bie 	BUG_ON(total_sg == 0);
14311ce9e605STiwei Bie 
143235c51e09SXianting Tian 	if (virtqueue_use_indirect(vq, total_sg)) {
1433fc6d70f4SXuan Zhuo 		err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1434fc6d70f4SXuan Zhuo 						    in_sgs, data, gfp);
14351861ba62SMichael S. Tsirkin 		if (err != -ENOMEM) {
14361861ba62SMichael S. Tsirkin 			END_USE(vq);
1437fc6d70f4SXuan Zhuo 			return err;
14381861ba62SMichael S. Tsirkin 		}
1439fc6d70f4SXuan Zhuo 
1440fc6d70f4SXuan Zhuo 		/* fall back on direct */
1441fc6d70f4SXuan Zhuo 	}
14421ce9e605STiwei Bie 
14431ce9e605STiwei Bie 	head = vq->packed.next_avail_idx;
14441ce9e605STiwei Bie 	avail_used_flags = vq->packed.avail_used_flags;
14451ce9e605STiwei Bie 
14461ce9e605STiwei Bie 	WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
14471ce9e605STiwei Bie 
14481ce9e605STiwei Bie 	desc = vq->packed.vring.desc;
14491ce9e605STiwei Bie 	i = head;
14501ce9e605STiwei Bie 	descs_used = total_sg;
14511ce9e605STiwei Bie 
14521ce9e605STiwei Bie 	if (unlikely(vq->vq.num_free < descs_used)) {
14531ce9e605STiwei Bie 		pr_debug("Can't add buf len %i - avail = %i\n",
14541ce9e605STiwei Bie 			 descs_used, vq->vq.num_free);
14551ce9e605STiwei Bie 		END_USE(vq);
14561ce9e605STiwei Bie 		return -ENOSPC;
14571ce9e605STiwei Bie 	}
14581ce9e605STiwei Bie 
14591ce9e605STiwei Bie 	id = vq->free_head;
14601ce9e605STiwei Bie 	BUG_ON(id == vq->packed.vring.num);
14611ce9e605STiwei Bie 
14621ce9e605STiwei Bie 	curr = id;
14631ce9e605STiwei Bie 	c = 0;
14641ce9e605STiwei Bie 	for (n = 0; n < out_sgs + in_sgs; n++) {
14651ce9e605STiwei Bie 		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
14660e27fa6dSXuan Zhuo 			dma_addr_t addr;
14670e27fa6dSXuan Zhuo 
14680e27fa6dSXuan Zhuo 			if (vring_map_one_sg(vq, sg, n < out_sgs ?
14690e27fa6dSXuan Zhuo 					     DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
14701ce9e605STiwei Bie 				goto unmap_release;
14711ce9e605STiwei Bie 
14721ce9e605STiwei Bie 			flags = cpu_to_le16(vq->packed.avail_used_flags |
14731ce9e605STiwei Bie 				    (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
14741ce9e605STiwei Bie 				    (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
14751ce9e605STiwei Bie 			if (i == head)
14761ce9e605STiwei Bie 				head_flags = flags;
14771ce9e605STiwei Bie 			else
14781ce9e605STiwei Bie 				desc[i].flags = flags;
14791ce9e605STiwei Bie 
14801ce9e605STiwei Bie 			desc[i].addr = cpu_to_le64(addr);
14811ce9e605STiwei Bie 			desc[i].len = cpu_to_le32(sg->length);
14821ce9e605STiwei Bie 			desc[i].id = cpu_to_le16(id);
14831ce9e605STiwei Bie 
1484e142169aSXuan Zhuo 			if (unlikely(vq->use_dma_api)) {
14851ce9e605STiwei Bie 				vq->packed.desc_extra[curr].addr = addr;
14861ce9e605STiwei Bie 				vq->packed.desc_extra[curr].len = sg->length;
14871ce9e605STiwei Bie 				vq->packed.desc_extra[curr].flags =
14881ce9e605STiwei Bie 					le16_to_cpu(flags);
14891ce9e605STiwei Bie 			}
14901ce9e605STiwei Bie 			prev = curr;
1491aeef9b47SJason Wang 			curr = vq->packed.desc_extra[curr].next;
14921ce9e605STiwei Bie 
14931ce9e605STiwei Bie 			if ((unlikely(++i >= vq->packed.vring.num))) {
14941ce9e605STiwei Bie 				i = 0;
14951ce9e605STiwei Bie 				vq->packed.avail_used_flags ^=
14961ce9e605STiwei Bie 					1 << VRING_PACKED_DESC_F_AVAIL |
14971ce9e605STiwei Bie 					1 << VRING_PACKED_DESC_F_USED;
14981ce9e605STiwei Bie 			}
14991ce9e605STiwei Bie 		}
15001ce9e605STiwei Bie 	}
15011ce9e605STiwei Bie 
15021acfe2c1SYuan Yao 	if (i <= head)
15031ce9e605STiwei Bie 		vq->packed.avail_wrap_counter ^= 1;
15041ce9e605STiwei Bie 
15051ce9e605STiwei Bie 	/* We're using some buffers from the free list. */
15061ce9e605STiwei Bie 	vq->vq.num_free -= descs_used;
15071ce9e605STiwei Bie 
15081ce9e605STiwei Bie 	/* Update free pointer */
15091ce9e605STiwei Bie 	vq->packed.next_avail_idx = i;
15101ce9e605STiwei Bie 	vq->free_head = curr;
15111ce9e605STiwei Bie 
15121ce9e605STiwei Bie 	/* Store token. */
15131ce9e605STiwei Bie 	vq->packed.desc_state[id].num = descs_used;
15141ce9e605STiwei Bie 	vq->packed.desc_state[id].data = data;
15151ce9e605STiwei Bie 	vq->packed.desc_state[id].indir_desc = ctx;
15161ce9e605STiwei Bie 	vq->packed.desc_state[id].last = prev;
15171ce9e605STiwei Bie 
15181ce9e605STiwei Bie 	/*
15191ce9e605STiwei Bie 	 * A driver MUST NOT make the first descriptor in the list
15201ce9e605STiwei Bie 	 * available before all subsequent descriptors comprising
15211ce9e605STiwei Bie 	 * the list are made available.
15221ce9e605STiwei Bie 	 */
15231ce9e605STiwei Bie 	virtio_wmb(vq->weak_barriers);
15241ce9e605STiwei Bie 	vq->packed.vring.desc[head].flags = head_flags;
15251ce9e605STiwei Bie 	vq->num_added += descs_used;
15261ce9e605STiwei Bie 
15271ce9e605STiwei Bie 	pr_debug("Added buffer head %i to %p\n", head, vq);
15281ce9e605STiwei Bie 	END_USE(vq);
15291ce9e605STiwei Bie 
15301ce9e605STiwei Bie 	return 0;
15311ce9e605STiwei Bie 
15321ce9e605STiwei Bie unmap_release:
15331ce9e605STiwei Bie 	err_idx = i;
15341ce9e605STiwei Bie 	i = head;
153544593865SJason Wang 	curr = vq->free_head;
15361ce9e605STiwei Bie 
15371ce9e605STiwei Bie 	vq->packed.avail_used_flags = avail_used_flags;
15381ce9e605STiwei Bie 
15391ce9e605STiwei Bie 	for (n = 0; n < total_sg; n++) {
15401ce9e605STiwei Bie 		if (i == err_idx)
15411ce9e605STiwei Bie 			break;
1542d80dc15bSXuan Zhuo 		vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
154344593865SJason Wang 		curr = vq->packed.desc_extra[curr].next;
15441ce9e605STiwei Bie 		i++;
15451ce9e605STiwei Bie 		if (i >= vq->packed.vring.num)
15461ce9e605STiwei Bie 			i = 0;
15471ce9e605STiwei Bie 	}
15481ce9e605STiwei Bie 
15491ce9e605STiwei Bie 	END_USE(vq);
15501ce9e605STiwei Bie 	return -EIO;
15511ce9e605STiwei Bie }
15521ce9e605STiwei Bie 
virtqueue_kick_prepare_packed(struct virtqueue * _vq)15531ce9e605STiwei Bie static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
15541ce9e605STiwei Bie {
15551ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1556f51f9826STiwei Bie 	u16 new, old, off_wrap, flags, wrap_counter, event_idx;
15571ce9e605STiwei Bie 	bool needs_kick;
15581ce9e605STiwei Bie 	union {
15591ce9e605STiwei Bie 		struct {
15601ce9e605STiwei Bie 			__le16 off_wrap;
15611ce9e605STiwei Bie 			__le16 flags;
15621ce9e605STiwei Bie 		};
15631ce9e605STiwei Bie 		u32 u32;
15641ce9e605STiwei Bie 	} snapshot;
15651ce9e605STiwei Bie 
15661ce9e605STiwei Bie 	START_USE(vq);
15671ce9e605STiwei Bie 
15681ce9e605STiwei Bie 	/*
15691ce9e605STiwei Bie 	 * We need to expose the new flags value before checking notification
15701ce9e605STiwei Bie 	 * suppressions.
15711ce9e605STiwei Bie 	 */
15721ce9e605STiwei Bie 	virtio_mb(vq->weak_barriers);
15731ce9e605STiwei Bie 
1574f51f9826STiwei Bie 	old = vq->packed.next_avail_idx - vq->num_added;
1575f51f9826STiwei Bie 	new = vq->packed.next_avail_idx;
15761ce9e605STiwei Bie 	vq->num_added = 0;
15771ce9e605STiwei Bie 
15781ce9e605STiwei Bie 	snapshot.u32 = *(u32 *)vq->packed.vring.device;
15791ce9e605STiwei Bie 	flags = le16_to_cpu(snapshot.flags);
15801ce9e605STiwei Bie 
15811ce9e605STiwei Bie 	LAST_ADD_TIME_CHECK(vq);
15821ce9e605STiwei Bie 	LAST_ADD_TIME_INVALID(vq);
15831ce9e605STiwei Bie 
1584f51f9826STiwei Bie 	if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
15851ce9e605STiwei Bie 		needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1586f51f9826STiwei Bie 		goto out;
1587f51f9826STiwei Bie 	}
1588f51f9826STiwei Bie 
1589f51f9826STiwei Bie 	off_wrap = le16_to_cpu(snapshot.off_wrap);
1590f51f9826STiwei Bie 
1591f51f9826STiwei Bie 	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1592f51f9826STiwei Bie 	event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1593f51f9826STiwei Bie 	if (wrap_counter != vq->packed.avail_wrap_counter)
1594f51f9826STiwei Bie 		event_idx -= vq->packed.vring.num;
1595f51f9826STiwei Bie 
1596f51f9826STiwei Bie 	needs_kick = vring_need_event(event_idx, new, old);
1597f51f9826STiwei Bie out:
15981ce9e605STiwei Bie 	END_USE(vq);
15991ce9e605STiwei Bie 	return needs_kick;
16001ce9e605STiwei Bie }
16011ce9e605STiwei Bie 
detach_buf_packed(struct vring_virtqueue * vq,unsigned int id,void ** ctx)16021ce9e605STiwei Bie static void detach_buf_packed(struct vring_virtqueue *vq,
16031ce9e605STiwei Bie 			      unsigned int id, void **ctx)
16041ce9e605STiwei Bie {
16051ce9e605STiwei Bie 	struct vring_desc_state_packed *state = NULL;
16061ce9e605STiwei Bie 	struct vring_packed_desc *desc;
16071ce9e605STiwei Bie 	unsigned int i, curr;
16081ce9e605STiwei Bie 
16091ce9e605STiwei Bie 	state = &vq->packed.desc_state[id];
16101ce9e605STiwei Bie 
16111ce9e605STiwei Bie 	/* Clear data ptr. */
16121ce9e605STiwei Bie 	state->data = NULL;
16131ce9e605STiwei Bie 
1614aeef9b47SJason Wang 	vq->packed.desc_extra[state->last].next = vq->free_head;
16151ce9e605STiwei Bie 	vq->free_head = id;
16161ce9e605STiwei Bie 	vq->vq.num_free += state->num;
16171ce9e605STiwei Bie 
1618e142169aSXuan Zhuo 	if (unlikely(vq->use_dma_api)) {
16191ce9e605STiwei Bie 		curr = id;
16201ce9e605STiwei Bie 		for (i = 0; i < state->num; i++) {
1621d80dc15bSXuan Zhuo 			vring_unmap_extra_packed(vq,
16221ce9e605STiwei Bie 						 &vq->packed.desc_extra[curr]);
1623aeef9b47SJason Wang 			curr = vq->packed.desc_extra[curr].next;
16241ce9e605STiwei Bie 		}
16251ce9e605STiwei Bie 	}
16261ce9e605STiwei Bie 
16271ce9e605STiwei Bie 	if (vq->indirect) {
16281ce9e605STiwei Bie 		u32 len;
16291ce9e605STiwei Bie 
16301ce9e605STiwei Bie 		/* Free the indirect table, if any, now that it's unmapped. */
16311ce9e605STiwei Bie 		desc = state->indir_desc;
16321ce9e605STiwei Bie 		if (!desc)
16331ce9e605STiwei Bie 			return;
16341ce9e605STiwei Bie 
1635b319940fSXuan Zhuo 		if (vq->do_unmap) {
16361ce9e605STiwei Bie 			len = vq->packed.desc_extra[id].len;
16371ce9e605STiwei Bie 			for (i = 0; i < len / sizeof(struct vring_packed_desc);
16381ce9e605STiwei Bie 					i++)
16391ce9e605STiwei Bie 				vring_unmap_desc_packed(vq, &desc[i]);
16401ce9e605STiwei Bie 		}
16411ce9e605STiwei Bie 		kfree(desc);
16421ce9e605STiwei Bie 		state->indir_desc = NULL;
16431ce9e605STiwei Bie 	} else if (ctx) {
16441ce9e605STiwei Bie 		*ctx = state->indir_desc;
16451ce9e605STiwei Bie 	}
16461ce9e605STiwei Bie }
16471ce9e605STiwei Bie 
is_used_desc_packed(const struct vring_virtqueue * vq,u16 idx,bool used_wrap_counter)16481ce9e605STiwei Bie static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
16491ce9e605STiwei Bie 				       u16 idx, bool used_wrap_counter)
16501ce9e605STiwei Bie {
16511ce9e605STiwei Bie 	bool avail, used;
16521ce9e605STiwei Bie 	u16 flags;
16531ce9e605STiwei Bie 
16541ce9e605STiwei Bie 	flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
16551ce9e605STiwei Bie 	avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
16561ce9e605STiwei Bie 	used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
16571ce9e605STiwei Bie 
16581ce9e605STiwei Bie 	return avail == used && used == used_wrap_counter;
16591ce9e605STiwei Bie }
16601ce9e605STiwei Bie 
more_used_packed(const struct vring_virtqueue * vq)16611adbd6b2SFeng Liu static bool more_used_packed(const struct vring_virtqueue *vq)
16621ce9e605STiwei Bie {
1663a7722890Shuangjie.albert 	u16 last_used;
1664a7722890Shuangjie.albert 	u16 last_used_idx;
1665a7722890Shuangjie.albert 	bool used_wrap_counter;
1666a7722890Shuangjie.albert 
1667a7722890Shuangjie.albert 	last_used_idx = READ_ONCE(vq->last_used_idx);
1668a7722890Shuangjie.albert 	last_used = packed_last_used(last_used_idx);
1669a7722890Shuangjie.albert 	used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1670a7722890Shuangjie.albert 	return is_used_desc_packed(vq, last_used, used_wrap_counter);
16711ce9e605STiwei Bie }
16721ce9e605STiwei Bie 
virtqueue_get_buf_ctx_packed(struct virtqueue * _vq,unsigned int * len,void ** ctx)16731ce9e605STiwei Bie static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
16741ce9e605STiwei Bie 					  unsigned int *len,
16751ce9e605STiwei Bie 					  void **ctx)
16761ce9e605STiwei Bie {
16771ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1678a7722890Shuangjie.albert 	u16 last_used, id, last_used_idx;
1679a7722890Shuangjie.albert 	bool used_wrap_counter;
16801ce9e605STiwei Bie 	void *ret;
16811ce9e605STiwei Bie 
16821ce9e605STiwei Bie 	START_USE(vq);
16831ce9e605STiwei Bie 
16841ce9e605STiwei Bie 	if (unlikely(vq->broken)) {
16851ce9e605STiwei Bie 		END_USE(vq);
16861ce9e605STiwei Bie 		return NULL;
16871ce9e605STiwei Bie 	}
16881ce9e605STiwei Bie 
16891ce9e605STiwei Bie 	if (!more_used_packed(vq)) {
16901ce9e605STiwei Bie 		pr_debug("No more buffers in queue\n");
16911ce9e605STiwei Bie 		END_USE(vq);
16921ce9e605STiwei Bie 		return NULL;
16931ce9e605STiwei Bie 	}
16941ce9e605STiwei Bie 
16951ce9e605STiwei Bie 	/* Only get used elements after they have been exposed by host. */
16961ce9e605STiwei Bie 	virtio_rmb(vq->weak_barriers);
16971ce9e605STiwei Bie 
1698a7722890Shuangjie.albert 	last_used_idx = READ_ONCE(vq->last_used_idx);
1699a7722890Shuangjie.albert 	used_wrap_counter = packed_used_wrap_counter(last_used_idx);
1700a7722890Shuangjie.albert 	last_used = packed_last_used(last_used_idx);
17011ce9e605STiwei Bie 	id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
17021ce9e605STiwei Bie 	*len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
17031ce9e605STiwei Bie 
17041ce9e605STiwei Bie 	if (unlikely(id >= vq->packed.vring.num)) {
17051ce9e605STiwei Bie 		BAD_RING(vq, "id %u out of range\n", id);
17061ce9e605STiwei Bie 		return NULL;
17071ce9e605STiwei Bie 	}
17081ce9e605STiwei Bie 	if (unlikely(!vq->packed.desc_state[id].data)) {
17091ce9e605STiwei Bie 		BAD_RING(vq, "id %u is not a head!\n", id);
17101ce9e605STiwei Bie 		return NULL;
17111ce9e605STiwei Bie 	}
17121ce9e605STiwei Bie 
17131ce9e605STiwei Bie 	/* detach_buf_packed clears data, so grab it now. */
17141ce9e605STiwei Bie 	ret = vq->packed.desc_state[id].data;
17151ce9e605STiwei Bie 	detach_buf_packed(vq, id, ctx);
17161ce9e605STiwei Bie 
1717a7722890Shuangjie.albert 	last_used += vq->packed.desc_state[id].num;
1718a7722890Shuangjie.albert 	if (unlikely(last_used >= vq->packed.vring.num)) {
1719a7722890Shuangjie.albert 		last_used -= vq->packed.vring.num;
1720a7722890Shuangjie.albert 		used_wrap_counter ^= 1;
17211ce9e605STiwei Bie 	}
17221ce9e605STiwei Bie 
1723a7722890Shuangjie.albert 	last_used = (last_used | (used_wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1724a7722890Shuangjie.albert 	WRITE_ONCE(vq->last_used_idx, last_used);
1725a7722890Shuangjie.albert 
1726f51f9826STiwei Bie 	/*
1727f51f9826STiwei Bie 	 * If we expect an interrupt for the next entry, tell host
1728f51f9826STiwei Bie 	 * by writing event index and flush out the write before
1729f51f9826STiwei Bie 	 * the read in the next get_buf call.
1730f51f9826STiwei Bie 	 */
1731f51f9826STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1732f51f9826STiwei Bie 		virtio_store_mb(vq->weak_barriers,
1733f51f9826STiwei Bie 				&vq->packed.vring.driver->off_wrap,
1734a7722890Shuangjie.albert 				cpu_to_le16(vq->last_used_idx));
1735f51f9826STiwei Bie 
17361ce9e605STiwei Bie 	LAST_ADD_TIME_INVALID(vq);
17371ce9e605STiwei Bie 
17381ce9e605STiwei Bie 	END_USE(vq);
17391ce9e605STiwei Bie 	return ret;
17401ce9e605STiwei Bie }
17411ce9e605STiwei Bie 
virtqueue_disable_cb_packed(struct virtqueue * _vq)17421ce9e605STiwei Bie static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
17431ce9e605STiwei Bie {
17441ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
17451ce9e605STiwei Bie 
17461ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
17471ce9e605STiwei Bie 		vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
17486c0b057cSAlbert Huang 
17496c0b057cSAlbert Huang 		/*
17506c0b057cSAlbert Huang 		 * If device triggered an event already it won't trigger one again:
17516c0b057cSAlbert Huang 		 * no need to disable.
17526c0b057cSAlbert Huang 		 */
17536c0b057cSAlbert Huang 		if (vq->event_triggered)
17546c0b057cSAlbert Huang 			return;
17556c0b057cSAlbert Huang 
17561ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
17571ce9e605STiwei Bie 			cpu_to_le16(vq->packed.event_flags_shadow);
17581ce9e605STiwei Bie 	}
17591ce9e605STiwei Bie }
17601ce9e605STiwei Bie 
virtqueue_enable_cb_prepare_packed(struct virtqueue * _vq)176131532340SSolomon Tan static unsigned int virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
17621ce9e605STiwei Bie {
17631ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
17641ce9e605STiwei Bie 
17651ce9e605STiwei Bie 	START_USE(vq);
17661ce9e605STiwei Bie 
17671ce9e605STiwei Bie 	/*
17681ce9e605STiwei Bie 	 * We optimistically turn back on interrupts, then check if there was
17691ce9e605STiwei Bie 	 * more to do.
17701ce9e605STiwei Bie 	 */
17711ce9e605STiwei Bie 
1772f51f9826STiwei Bie 	if (vq->event) {
1773f51f9826STiwei Bie 		vq->packed.vring.driver->off_wrap =
1774a7722890Shuangjie.albert 			cpu_to_le16(vq->last_used_idx);
1775f51f9826STiwei Bie 		/*
1776f51f9826STiwei Bie 		 * We need to update event offset and event wrap
1777f51f9826STiwei Bie 		 * counter first before updating event flags.
1778f51f9826STiwei Bie 		 */
1779f51f9826STiwei Bie 		virtio_wmb(vq->weak_barriers);
1780f51f9826STiwei Bie 	}
1781f51f9826STiwei Bie 
17821ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1783f51f9826STiwei Bie 		vq->packed.event_flags_shadow = vq->event ?
1784f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_DESC :
1785f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_ENABLE;
17861ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
17871ce9e605STiwei Bie 				cpu_to_le16(vq->packed.event_flags_shadow);
17881ce9e605STiwei Bie 	}
17891ce9e605STiwei Bie 
17901ce9e605STiwei Bie 	END_USE(vq);
1791a7722890Shuangjie.albert 	return vq->last_used_idx;
17921ce9e605STiwei Bie }
17931ce9e605STiwei Bie 
virtqueue_poll_packed(struct virtqueue * _vq,u16 off_wrap)17941ce9e605STiwei Bie static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
17951ce9e605STiwei Bie {
17961ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
17971ce9e605STiwei Bie 	bool wrap_counter;
17981ce9e605STiwei Bie 	u16 used_idx;
17991ce9e605STiwei Bie 
18001ce9e605STiwei Bie 	wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
18011ce9e605STiwei Bie 	used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
18021ce9e605STiwei Bie 
18031ce9e605STiwei Bie 	return is_used_desc_packed(vq, used_idx, wrap_counter);
18041ce9e605STiwei Bie }
18051ce9e605STiwei Bie 
virtqueue_enable_cb_delayed_packed(struct virtqueue * _vq)18061ce9e605STiwei Bie static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
18071ce9e605STiwei Bie {
18081ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
1809a7722890Shuangjie.albert 	u16 used_idx, wrap_counter, last_used_idx;
1810f51f9826STiwei Bie 	u16 bufs;
18111ce9e605STiwei Bie 
18121ce9e605STiwei Bie 	START_USE(vq);
18131ce9e605STiwei Bie 
18141ce9e605STiwei Bie 	/*
18151ce9e605STiwei Bie 	 * We optimistically turn back on interrupts, then check if there was
18161ce9e605STiwei Bie 	 * more to do.
18171ce9e605STiwei Bie 	 */
18181ce9e605STiwei Bie 
1819f51f9826STiwei Bie 	if (vq->event) {
1820f51f9826STiwei Bie 		/* TODO: tune this threshold */
1821f51f9826STiwei Bie 		bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1822a7722890Shuangjie.albert 		last_used_idx = READ_ONCE(vq->last_used_idx);
1823a7722890Shuangjie.albert 		wrap_counter = packed_used_wrap_counter(last_used_idx);
18241ce9e605STiwei Bie 
1825a7722890Shuangjie.albert 		used_idx = packed_last_used(last_used_idx) + bufs;
1826f51f9826STiwei Bie 		if (used_idx >= vq->packed.vring.num) {
1827f51f9826STiwei Bie 			used_idx -= vq->packed.vring.num;
1828f51f9826STiwei Bie 			wrap_counter ^= 1;
1829f51f9826STiwei Bie 		}
1830f51f9826STiwei Bie 
1831f51f9826STiwei Bie 		vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1832f51f9826STiwei Bie 			(wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1833f51f9826STiwei Bie 
1834f51f9826STiwei Bie 		/*
1835f51f9826STiwei Bie 		 * We need to update event offset and event wrap
1836f51f9826STiwei Bie 		 * counter first before updating event flags.
1837f51f9826STiwei Bie 		 */
1838f51f9826STiwei Bie 		virtio_wmb(vq->weak_barriers);
1839f51f9826STiwei Bie 	}
1840f51f9826STiwei Bie 
18411ce9e605STiwei Bie 	if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1842f51f9826STiwei Bie 		vq->packed.event_flags_shadow = vq->event ?
1843f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_DESC :
1844f51f9826STiwei Bie 				VRING_PACKED_EVENT_FLAG_ENABLE;
18451ce9e605STiwei Bie 		vq->packed.vring.driver->flags =
18461ce9e605STiwei Bie 				cpu_to_le16(vq->packed.event_flags_shadow);
18471ce9e605STiwei Bie 	}
18481ce9e605STiwei Bie 
18491ce9e605STiwei Bie 	/*
18501ce9e605STiwei Bie 	 * We need to update event suppression structure first
18511ce9e605STiwei Bie 	 * before re-checking for more used buffers.
18521ce9e605STiwei Bie 	 */
18531ce9e605STiwei Bie 	virtio_mb(vq->weak_barriers);
18541ce9e605STiwei Bie 
1855a7722890Shuangjie.albert 	last_used_idx = READ_ONCE(vq->last_used_idx);
1856a7722890Shuangjie.albert 	wrap_counter = packed_used_wrap_counter(last_used_idx);
1857a7722890Shuangjie.albert 	used_idx = packed_last_used(last_used_idx);
1858a7722890Shuangjie.albert 	if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
18591ce9e605STiwei Bie 		END_USE(vq);
18601ce9e605STiwei Bie 		return false;
18611ce9e605STiwei Bie 	}
18621ce9e605STiwei Bie 
18631ce9e605STiwei Bie 	END_USE(vq);
18641ce9e605STiwei Bie 	return true;
18651ce9e605STiwei Bie }
18661ce9e605STiwei Bie 
virtqueue_detach_unused_buf_packed(struct virtqueue * _vq)18671ce9e605STiwei Bie static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
18681ce9e605STiwei Bie {
18691ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
18701ce9e605STiwei Bie 	unsigned int i;
18711ce9e605STiwei Bie 	void *buf;
18721ce9e605STiwei Bie 
18731ce9e605STiwei Bie 	START_USE(vq);
18741ce9e605STiwei Bie 
18751ce9e605STiwei Bie 	for (i = 0; i < vq->packed.vring.num; i++) {
18761ce9e605STiwei Bie 		if (!vq->packed.desc_state[i].data)
18771ce9e605STiwei Bie 			continue;
18781ce9e605STiwei Bie 		/* detach_buf clears data, so grab it now. */
18791ce9e605STiwei Bie 		buf = vq->packed.desc_state[i].data;
18801ce9e605STiwei Bie 		detach_buf_packed(vq, i, NULL);
18811ce9e605STiwei Bie 		END_USE(vq);
18821ce9e605STiwei Bie 		return buf;
18831ce9e605STiwei Bie 	}
18841ce9e605STiwei Bie 	/* That should have freed everything. */
18851ce9e605STiwei Bie 	BUG_ON(vq->vq.num_free != vq->packed.vring.num);
18861ce9e605STiwei Bie 
18871ce9e605STiwei Bie 	END_USE(vq);
18881ce9e605STiwei Bie 	return NULL;
18891ce9e605STiwei Bie }
18901ce9e605STiwei Bie 
vring_alloc_desc_extra(unsigned int num)189196ef18a2SXuan Zhuo static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
18925a222421SJason Wang {
18935a222421SJason Wang 	struct vring_desc_extra *desc_extra;
18945a222421SJason Wang 	unsigned int i;
18955a222421SJason Wang 
18965a222421SJason Wang 	desc_extra = kmalloc_array(num, sizeof(struct vring_desc_extra),
18975a222421SJason Wang 				   GFP_KERNEL);
18985a222421SJason Wang 	if (!desc_extra)
18995a222421SJason Wang 		return NULL;
19005a222421SJason Wang 
19015a222421SJason Wang 	memset(desc_extra, 0, num * sizeof(struct vring_desc_extra));
19025a222421SJason Wang 
19035a222421SJason Wang 	for (i = 0; i < num - 1; i++)
19045a222421SJason Wang 		desc_extra[i].next = i + 1;
19055a222421SJason Wang 
19065a222421SJason Wang 	return desc_extra;
19075a222421SJason Wang }
19085a222421SJason Wang 
vring_free_packed(struct vring_virtqueue_packed * vring_packed,struct virtio_device * vdev,struct device * dma_dev)19096356f8bbSXuan Zhuo static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
19102713ea3cSJason Wang 			      struct virtio_device *vdev,
19112713ea3cSJason Wang 			      struct device *dma_dev)
19126356f8bbSXuan Zhuo {
19136356f8bbSXuan Zhuo 	if (vring_packed->vring.desc)
19146356f8bbSXuan Zhuo 		vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
19156356f8bbSXuan Zhuo 				 vring_packed->vring.desc,
19162713ea3cSJason Wang 				 vring_packed->ring_dma_addr,
19172713ea3cSJason Wang 				 dma_dev);
19186356f8bbSXuan Zhuo 
19196356f8bbSXuan Zhuo 	if (vring_packed->vring.driver)
19206356f8bbSXuan Zhuo 		vring_free_queue(vdev, vring_packed->event_size_in_bytes,
19216356f8bbSXuan Zhuo 				 vring_packed->vring.driver,
19222713ea3cSJason Wang 				 vring_packed->driver_event_dma_addr,
19232713ea3cSJason Wang 				 dma_dev);
19246356f8bbSXuan Zhuo 
19256356f8bbSXuan Zhuo 	if (vring_packed->vring.device)
19266356f8bbSXuan Zhuo 		vring_free_queue(vdev, vring_packed->event_size_in_bytes,
19276356f8bbSXuan Zhuo 				 vring_packed->vring.device,
19282713ea3cSJason Wang 				 vring_packed->device_event_dma_addr,
19292713ea3cSJason Wang 				 dma_dev);
19306356f8bbSXuan Zhuo 
19316356f8bbSXuan Zhuo 	kfree(vring_packed->desc_state);
19326356f8bbSXuan Zhuo 	kfree(vring_packed->desc_extra);
19336356f8bbSXuan Zhuo }
19346356f8bbSXuan Zhuo 
vring_alloc_queue_packed(struct vring_virtqueue_packed * vring_packed,struct virtio_device * vdev,u32 num,struct device * dma_dev)19356b60b9c0SXuan Zhuo static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
19366b60b9c0SXuan Zhuo 				    struct virtio_device *vdev,
19372713ea3cSJason Wang 				    u32 num, struct device *dma_dev)
19386b60b9c0SXuan Zhuo {
19396b60b9c0SXuan Zhuo 	struct vring_packed_desc *ring;
19406b60b9c0SXuan Zhuo 	struct vring_packed_desc_event *driver, *device;
19416b60b9c0SXuan Zhuo 	dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
19426b60b9c0SXuan Zhuo 	size_t ring_size_in_bytes, event_size_in_bytes;
19436b60b9c0SXuan Zhuo 
19446b60b9c0SXuan Zhuo 	ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
19456b60b9c0SXuan Zhuo 
19466b60b9c0SXuan Zhuo 	ring = vring_alloc_queue(vdev, ring_size_in_bytes,
19476b60b9c0SXuan Zhuo 				 &ring_dma_addr,
19482713ea3cSJason Wang 				 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
19492713ea3cSJason Wang 				 dma_dev);
19506b60b9c0SXuan Zhuo 	if (!ring)
19516b60b9c0SXuan Zhuo 		goto err;
19526b60b9c0SXuan Zhuo 
19536b60b9c0SXuan Zhuo 	vring_packed->vring.desc         = ring;
19546b60b9c0SXuan Zhuo 	vring_packed->ring_dma_addr      = ring_dma_addr;
19556b60b9c0SXuan Zhuo 	vring_packed->ring_size_in_bytes = ring_size_in_bytes;
19566b60b9c0SXuan Zhuo 
19576b60b9c0SXuan Zhuo 	event_size_in_bytes = sizeof(struct vring_packed_desc_event);
19586b60b9c0SXuan Zhuo 
19596b60b9c0SXuan Zhuo 	driver = vring_alloc_queue(vdev, event_size_in_bytes,
19606b60b9c0SXuan Zhuo 				   &driver_event_dma_addr,
19612713ea3cSJason Wang 				   GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
19622713ea3cSJason Wang 				   dma_dev);
19636b60b9c0SXuan Zhuo 	if (!driver)
19646b60b9c0SXuan Zhuo 		goto err;
19656b60b9c0SXuan Zhuo 
19666b60b9c0SXuan Zhuo 	vring_packed->vring.driver          = driver;
19676b60b9c0SXuan Zhuo 	vring_packed->event_size_in_bytes   = event_size_in_bytes;
19686b60b9c0SXuan Zhuo 	vring_packed->driver_event_dma_addr = driver_event_dma_addr;
19696b60b9c0SXuan Zhuo 
19706b60b9c0SXuan Zhuo 	device = vring_alloc_queue(vdev, event_size_in_bytes,
19716b60b9c0SXuan Zhuo 				   &device_event_dma_addr,
19722713ea3cSJason Wang 				   GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
19732713ea3cSJason Wang 				   dma_dev);
19746b60b9c0SXuan Zhuo 	if (!device)
19756b60b9c0SXuan Zhuo 		goto err;
19766b60b9c0SXuan Zhuo 
19776b60b9c0SXuan Zhuo 	vring_packed->vring.device          = device;
19786b60b9c0SXuan Zhuo 	vring_packed->device_event_dma_addr = device_event_dma_addr;
19796b60b9c0SXuan Zhuo 
19806b60b9c0SXuan Zhuo 	vring_packed->vring.num = num;
19816b60b9c0SXuan Zhuo 
19826b60b9c0SXuan Zhuo 	return 0;
19836b60b9c0SXuan Zhuo 
19846b60b9c0SXuan Zhuo err:
19852713ea3cSJason Wang 	vring_free_packed(vring_packed, vdev, dma_dev);
19866b60b9c0SXuan Zhuo 	return -ENOMEM;
19876b60b9c0SXuan Zhuo }
19886b60b9c0SXuan Zhuo 
vring_alloc_state_extra_packed(struct vring_virtqueue_packed * vring_packed)1989ef3167cfSXuan Zhuo static int vring_alloc_state_extra_packed(struct vring_virtqueue_packed *vring_packed)
1990ef3167cfSXuan Zhuo {
1991ef3167cfSXuan Zhuo 	struct vring_desc_state_packed *state;
1992ef3167cfSXuan Zhuo 	struct vring_desc_extra *extra;
1993ef3167cfSXuan Zhuo 	u32 num = vring_packed->vring.num;
1994ef3167cfSXuan Zhuo 
1995ef3167cfSXuan Zhuo 	state = kmalloc_array(num, sizeof(struct vring_desc_state_packed), GFP_KERNEL);
1996ef3167cfSXuan Zhuo 	if (!state)
1997ef3167cfSXuan Zhuo 		goto err_desc_state;
1998ef3167cfSXuan Zhuo 
1999ef3167cfSXuan Zhuo 	memset(state, 0, num * sizeof(struct vring_desc_state_packed));
2000ef3167cfSXuan Zhuo 
2001ef3167cfSXuan Zhuo 	extra = vring_alloc_desc_extra(num);
2002ef3167cfSXuan Zhuo 	if (!extra)
2003ef3167cfSXuan Zhuo 		goto err_desc_extra;
2004ef3167cfSXuan Zhuo 
2005ef3167cfSXuan Zhuo 	vring_packed->desc_state = state;
2006ef3167cfSXuan Zhuo 	vring_packed->desc_extra = extra;
2007ef3167cfSXuan Zhuo 
2008ef3167cfSXuan Zhuo 	return 0;
2009ef3167cfSXuan Zhuo 
2010ef3167cfSXuan Zhuo err_desc_extra:
2011ef3167cfSXuan Zhuo 	kfree(state);
2012ef3167cfSXuan Zhuo err_desc_state:
2013ef3167cfSXuan Zhuo 	return -ENOMEM;
2014ef3167cfSXuan Zhuo }
2015ef3167cfSXuan Zhuo 
virtqueue_vring_init_packed(struct vring_virtqueue_packed * vring_packed,bool callback)20161a107c87SXuan Zhuo static void virtqueue_vring_init_packed(struct vring_virtqueue_packed *vring_packed,
20171a107c87SXuan Zhuo 					bool callback)
20181a107c87SXuan Zhuo {
20191a107c87SXuan Zhuo 	vring_packed->next_avail_idx = 0;
20201a107c87SXuan Zhuo 	vring_packed->avail_wrap_counter = 1;
20211a107c87SXuan Zhuo 	vring_packed->event_flags_shadow = 0;
20221a107c87SXuan Zhuo 	vring_packed->avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
20231a107c87SXuan Zhuo 
20241a107c87SXuan Zhuo 	/* No callback?  Tell other side not to bother us. */
20251a107c87SXuan Zhuo 	if (!callback) {
20261a107c87SXuan Zhuo 		vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
20271a107c87SXuan Zhuo 		vring_packed->vring.driver->flags =
20281a107c87SXuan Zhuo 			cpu_to_le16(vring_packed->event_flags_shadow);
20291a107c87SXuan Zhuo 	}
20301a107c87SXuan Zhuo }
20311a107c87SXuan Zhuo 
virtqueue_vring_attach_packed(struct vring_virtqueue * vq,struct vring_virtqueue_packed * vring_packed)203251d649f1SXuan Zhuo static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq,
203351d649f1SXuan Zhuo 					  struct vring_virtqueue_packed *vring_packed)
203451d649f1SXuan Zhuo {
203551d649f1SXuan Zhuo 	vq->packed = *vring_packed;
203651d649f1SXuan Zhuo 
203751d649f1SXuan Zhuo 	/* Put everything in free lists. */
203851d649f1SXuan Zhuo 	vq->free_head = 0;
203951d649f1SXuan Zhuo }
204051d649f1SXuan Zhuo 
virtqueue_reinit_packed(struct vring_virtqueue * vq)204156775e14SXuan Zhuo static void virtqueue_reinit_packed(struct vring_virtqueue *vq)
204256775e14SXuan Zhuo {
204356775e14SXuan Zhuo 	memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes);
204456775e14SXuan Zhuo 	memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes);
204556775e14SXuan Zhuo 
204656775e14SXuan Zhuo 	/* we need to reset the desc.flags. For more, see is_used_desc_packed() */
204756775e14SXuan Zhuo 	memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes);
204856775e14SXuan Zhuo 
204956775e14SXuan Zhuo 	virtqueue_init(vq, vq->packed.vring.num);
205056775e14SXuan Zhuo 	virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback);
205156775e14SXuan Zhuo }
205256775e14SXuan Zhuo 
vring_create_virtqueue_packed(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool may_reduce_num,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name,struct device * dma_dev)20531ce9e605STiwei Bie static struct virtqueue *vring_create_virtqueue_packed(
20541ce9e605STiwei Bie 	unsigned int index,
20551ce9e605STiwei Bie 	unsigned int num,
20561ce9e605STiwei Bie 	unsigned int vring_align,
20571ce9e605STiwei Bie 	struct virtio_device *vdev,
20581ce9e605STiwei Bie 	bool weak_barriers,
20591ce9e605STiwei Bie 	bool may_reduce_num,
20601ce9e605STiwei Bie 	bool context,
20611ce9e605STiwei Bie 	bool (*notify)(struct virtqueue *),
20621ce9e605STiwei Bie 	void (*callback)(struct virtqueue *),
20632713ea3cSJason Wang 	const char *name,
20642713ea3cSJason Wang 	struct device *dma_dev)
20651ce9e605STiwei Bie {
20666b60b9c0SXuan Zhuo 	struct vring_virtqueue_packed vring_packed = {};
20671ce9e605STiwei Bie 	struct vring_virtqueue *vq;
2068ef3167cfSXuan Zhuo 	int err;
20691ce9e605STiwei Bie 
20702713ea3cSJason Wang 	if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev))
20711ce9e605STiwei Bie 		goto err_ring;
20721ce9e605STiwei Bie 
20731ce9e605STiwei Bie 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
20741ce9e605STiwei Bie 	if (!vq)
20751ce9e605STiwei Bie 		goto err_vq;
20761ce9e605STiwei Bie 
20771ce9e605STiwei Bie 	vq->vq.callback = callback;
20781ce9e605STiwei Bie 	vq->vq.vdev = vdev;
20791ce9e605STiwei Bie 	vq->vq.name = name;
20801ce9e605STiwei Bie 	vq->vq.index = index;
20814913e854SXuan Zhuo 	vq->vq.reset = false;
20821ce9e605STiwei Bie 	vq->we_own_ring = true;
20831ce9e605STiwei Bie 	vq->notify = notify;
20841ce9e605STiwei Bie 	vq->weak_barriers = weak_barriers;
2085c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
20868b4ec69dSJason Wang 	vq->broken = true;
2087c346dae4SJason Wang #else
2088c346dae4SJason Wang 	vq->broken = false;
2089c346dae4SJason Wang #endif
20901ce9e605STiwei Bie 	vq->packed_ring = true;
20912713ea3cSJason Wang 	vq->dma_dev = dma_dev;
20921ce9e605STiwei Bie 	vq->use_dma_api = vring_use_dma_api(vdev);
20938daafe9eSXuan Zhuo 	vq->premapped = false;
2094b319940fSXuan Zhuo 	vq->do_unmap = vq->use_dma_api;
20951ce9e605STiwei Bie 
20961ce9e605STiwei Bie 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
20971ce9e605STiwei Bie 		!context;
20981ce9e605STiwei Bie 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
20991ce9e605STiwei Bie 
210045383fb0STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
210145383fb0STiwei Bie 		vq->weak_barriers = false;
210245383fb0STiwei Bie 
2103ef3167cfSXuan Zhuo 	err = vring_alloc_state_extra_packed(&vring_packed);
2104ef3167cfSXuan Zhuo 	if (err)
2105ef3167cfSXuan Zhuo 		goto err_state_extra;
21061ce9e605STiwei Bie 
21071a107c87SXuan Zhuo 	virtqueue_vring_init_packed(&vring_packed, !!callback);
21081ce9e605STiwei Bie 
21093a897128SXuan Zhuo 	virtqueue_init(vq, num);
211051d649f1SXuan Zhuo 	virtqueue_vring_attach_packed(vq, &vring_packed);
21113a897128SXuan Zhuo 
21120e566c8fSParav Pandit 	spin_lock(&vdev->vqs_list_lock);
2113e152d8afSDan Carpenter 	list_add_tail(&vq->vq.list, &vdev->vqs);
21140e566c8fSParav Pandit 	spin_unlock(&vdev->vqs_list_lock);
21151ce9e605STiwei Bie 	return &vq->vq;
21161ce9e605STiwei Bie 
2117ef3167cfSXuan Zhuo err_state_extra:
21181ce9e605STiwei Bie 	kfree(vq);
21191ce9e605STiwei Bie err_vq:
21202713ea3cSJason Wang 	vring_free_packed(&vring_packed, vdev, dma_dev);
21211ce9e605STiwei Bie err_ring:
21221ce9e605STiwei Bie 	return NULL;
21231ce9e605STiwei Bie }
21241ce9e605STiwei Bie 
virtqueue_resize_packed(struct virtqueue * _vq,u32 num)2125947f9fcfSXuan Zhuo static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
2126947f9fcfSXuan Zhuo {
2127947f9fcfSXuan Zhuo 	struct vring_virtqueue_packed vring_packed = {};
2128947f9fcfSXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
2129947f9fcfSXuan Zhuo 	struct virtio_device *vdev = _vq->vdev;
2130947f9fcfSXuan Zhuo 	int err;
2131947f9fcfSXuan Zhuo 
21322713ea3cSJason Wang 	if (vring_alloc_queue_packed(&vring_packed, vdev, num, vring_dma_dev(vq)))
2133947f9fcfSXuan Zhuo 		goto err_ring;
2134947f9fcfSXuan Zhuo 
2135947f9fcfSXuan Zhuo 	err = vring_alloc_state_extra_packed(&vring_packed);
2136947f9fcfSXuan Zhuo 	if (err)
2137947f9fcfSXuan Zhuo 		goto err_state_extra;
2138947f9fcfSXuan Zhuo 
2139947f9fcfSXuan Zhuo 	vring_free(&vq->vq);
2140947f9fcfSXuan Zhuo 
2141947f9fcfSXuan Zhuo 	virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback);
2142947f9fcfSXuan Zhuo 
2143947f9fcfSXuan Zhuo 	virtqueue_init(vq, vring_packed.vring.num);
2144947f9fcfSXuan Zhuo 	virtqueue_vring_attach_packed(vq, &vring_packed);
2145947f9fcfSXuan Zhuo 
2146947f9fcfSXuan Zhuo 	return 0;
2147947f9fcfSXuan Zhuo 
2148947f9fcfSXuan Zhuo err_state_extra:
21492713ea3cSJason Wang 	vring_free_packed(&vring_packed, vdev, vring_dma_dev(vq));
2150947f9fcfSXuan Zhuo err_ring:
2151947f9fcfSXuan Zhuo 	virtqueue_reinit_packed(vq);
2152947f9fcfSXuan Zhuo 	return -ENOMEM;
2153947f9fcfSXuan Zhuo }
2154947f9fcfSXuan Zhuo 
virtqueue_disable_and_recycle(struct virtqueue * _vq,void (* recycle)(struct virtqueue * vq,void * buf))2155ad48d53bSXuan Zhuo static int virtqueue_disable_and_recycle(struct virtqueue *_vq,
2156ad48d53bSXuan Zhuo 					 void (*recycle)(struct virtqueue *vq, void *buf))
2157ad48d53bSXuan Zhuo {
2158ad48d53bSXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
2159ad48d53bSXuan Zhuo 	struct virtio_device *vdev = vq->vq.vdev;
2160ad48d53bSXuan Zhuo 	void *buf;
2161ad48d53bSXuan Zhuo 	int err;
2162ad48d53bSXuan Zhuo 
2163ad48d53bSXuan Zhuo 	if (!vq->we_own_ring)
2164ad48d53bSXuan Zhuo 		return -EPERM;
2165ad48d53bSXuan Zhuo 
2166ad48d53bSXuan Zhuo 	if (!vdev->config->disable_vq_and_reset)
2167ad48d53bSXuan Zhuo 		return -ENOENT;
2168ad48d53bSXuan Zhuo 
2169ad48d53bSXuan Zhuo 	if (!vdev->config->enable_vq_after_reset)
2170ad48d53bSXuan Zhuo 		return -ENOENT;
2171ad48d53bSXuan Zhuo 
2172ad48d53bSXuan Zhuo 	err = vdev->config->disable_vq_and_reset(_vq);
2173ad48d53bSXuan Zhuo 	if (err)
2174ad48d53bSXuan Zhuo 		return err;
2175ad48d53bSXuan Zhuo 
2176ad48d53bSXuan Zhuo 	while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
2177ad48d53bSXuan Zhuo 		recycle(_vq, buf);
2178ad48d53bSXuan Zhuo 
2179ad48d53bSXuan Zhuo 	return 0;
2180ad48d53bSXuan Zhuo }
2181ad48d53bSXuan Zhuo 
virtqueue_enable_after_reset(struct virtqueue * _vq)2182ad48d53bSXuan Zhuo static int virtqueue_enable_after_reset(struct virtqueue *_vq)
2183ad48d53bSXuan Zhuo {
2184ad48d53bSXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
2185ad48d53bSXuan Zhuo 	struct virtio_device *vdev = vq->vq.vdev;
2186ad48d53bSXuan Zhuo 
2187ad48d53bSXuan Zhuo 	if (vdev->config->enable_vq_after_reset(_vq))
2188ad48d53bSXuan Zhuo 		return -EBUSY;
2189ad48d53bSXuan Zhuo 
2190ad48d53bSXuan Zhuo 	return 0;
2191ad48d53bSXuan Zhuo }
21921ce9e605STiwei Bie 
21931ce9e605STiwei Bie /*
2194e6f633e5STiwei Bie  * Generic functions and exported symbols.
2195e6f633e5STiwei Bie  */
2196e6f633e5STiwei Bie 
virtqueue_add(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int total_sg,unsigned int out_sgs,unsigned int in_sgs,void * data,void * ctx,gfp_t gfp)2197e6f633e5STiwei Bie static inline int virtqueue_add(struct virtqueue *_vq,
2198e6f633e5STiwei Bie 				struct scatterlist *sgs[],
2199e6f633e5STiwei Bie 				unsigned int total_sg,
2200e6f633e5STiwei Bie 				unsigned int out_sgs,
2201e6f633e5STiwei Bie 				unsigned int in_sgs,
2202e6f633e5STiwei Bie 				void *data,
2203e6f633e5STiwei Bie 				void *ctx,
2204e6f633e5STiwei Bie 				gfp_t gfp)
2205e6f633e5STiwei Bie {
22061ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
22071ce9e605STiwei Bie 
22081ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
22091ce9e605STiwei Bie 					out_sgs, in_sgs, data, ctx, gfp) :
22101ce9e605STiwei Bie 				 virtqueue_add_split(_vq, sgs, total_sg,
2211e6f633e5STiwei Bie 					out_sgs, in_sgs, data, ctx, gfp);
2212e6f633e5STiwei Bie }
2213e6f633e5STiwei Bie 
2214e6f633e5STiwei Bie /**
2215e6f633e5STiwei Bie  * virtqueue_add_sgs - expose buffers to other end
2216a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2217e6f633e5STiwei Bie  * @sgs: array of terminated scatterlists.
2218a5581206SJiang Biao  * @out_sgs: the number of scatterlists readable by other side
2219a5581206SJiang Biao  * @in_sgs: the number of scatterlists which are writable (after readable ones)
2220e6f633e5STiwei Bie  * @data: the token identifying the buffer.
2221e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
2222e6f633e5STiwei Bie  *
2223e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
2224e6f633e5STiwei Bie  * at the same time (except where noted).
2225e6f633e5STiwei Bie  *
2226e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2227e6f633e5STiwei Bie  */
virtqueue_add_sgs(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int out_sgs,unsigned int in_sgs,void * data,gfp_t gfp)2228e6f633e5STiwei Bie int virtqueue_add_sgs(struct virtqueue *_vq,
2229e6f633e5STiwei Bie 		      struct scatterlist *sgs[],
2230e6f633e5STiwei Bie 		      unsigned int out_sgs,
2231e6f633e5STiwei Bie 		      unsigned int in_sgs,
2232e6f633e5STiwei Bie 		      void *data,
2233e6f633e5STiwei Bie 		      gfp_t gfp)
2234e6f633e5STiwei Bie {
2235e6f633e5STiwei Bie 	unsigned int i, total_sg = 0;
2236e6f633e5STiwei Bie 
2237e6f633e5STiwei Bie 	/* Count them first. */
2238e6f633e5STiwei Bie 	for (i = 0; i < out_sgs + in_sgs; i++) {
2239e6f633e5STiwei Bie 		struct scatterlist *sg;
2240e6f633e5STiwei Bie 
2241e6f633e5STiwei Bie 		for (sg = sgs[i]; sg; sg = sg_next(sg))
2242e6f633e5STiwei Bie 			total_sg++;
2243e6f633e5STiwei Bie 	}
2244e6f633e5STiwei Bie 	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
2245e6f633e5STiwei Bie 			     data, NULL, gfp);
2246e6f633e5STiwei Bie }
2247e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
2248e6f633e5STiwei Bie 
2249e6f633e5STiwei Bie /**
2250e6f633e5STiwei Bie  * virtqueue_add_outbuf - expose output buffers to other end
2251e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
2252e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
2253e6f633e5STiwei Bie  * @num: the number of entries in @sg readable by other side
2254e6f633e5STiwei Bie  * @data: the token identifying the buffer.
2255e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
2256e6f633e5STiwei Bie  *
2257e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
2258e6f633e5STiwei Bie  * at the same time (except where noted).
2259e6f633e5STiwei Bie  *
2260e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2261e6f633e5STiwei Bie  */
virtqueue_add_outbuf(struct virtqueue * vq,struct scatterlist * sg,unsigned int num,void * data,gfp_t gfp)2262e6f633e5STiwei Bie int virtqueue_add_outbuf(struct virtqueue *vq,
2263e6f633e5STiwei Bie 			 struct scatterlist *sg, unsigned int num,
2264e6f633e5STiwei Bie 			 void *data,
2265e6f633e5STiwei Bie 			 gfp_t gfp)
2266e6f633e5STiwei Bie {
2267e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
2268e6f633e5STiwei Bie }
2269e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
2270e6f633e5STiwei Bie 
2271e6f633e5STiwei Bie /**
2272e6f633e5STiwei Bie  * virtqueue_add_inbuf - expose input buffers to other end
2273e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
2274e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
2275e6f633e5STiwei Bie  * @num: the number of entries in @sg writable by other side
2276e6f633e5STiwei Bie  * @data: the token identifying the buffer.
2277e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
2278e6f633e5STiwei Bie  *
2279e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
2280e6f633e5STiwei Bie  * at the same time (except where noted).
2281e6f633e5STiwei Bie  *
2282e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2283e6f633e5STiwei Bie  */
virtqueue_add_inbuf(struct virtqueue * vq,struct scatterlist * sg,unsigned int num,void * data,gfp_t gfp)2284e6f633e5STiwei Bie int virtqueue_add_inbuf(struct virtqueue *vq,
2285e6f633e5STiwei Bie 			struct scatterlist *sg, unsigned int num,
2286e6f633e5STiwei Bie 			void *data,
2287e6f633e5STiwei Bie 			gfp_t gfp)
2288e6f633e5STiwei Bie {
2289e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
2290e6f633e5STiwei Bie }
2291e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
2292e6f633e5STiwei Bie 
2293e6f633e5STiwei Bie /**
2294e6f633e5STiwei Bie  * virtqueue_add_inbuf_ctx - expose input buffers to other end
2295e6f633e5STiwei Bie  * @vq: the struct virtqueue we're talking about.
2296e6f633e5STiwei Bie  * @sg: scatterlist (must be well-formed and terminated!)
2297e6f633e5STiwei Bie  * @num: the number of entries in @sg writable by other side
2298e6f633e5STiwei Bie  * @data: the token identifying the buffer.
2299e6f633e5STiwei Bie  * @ctx: extra context for the token
2300e6f633e5STiwei Bie  * @gfp: how to do memory allocations (if necessary).
2301e6f633e5STiwei Bie  *
2302e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue operations
2303e6f633e5STiwei Bie  * at the same time (except where noted).
2304e6f633e5STiwei Bie  *
2305e6f633e5STiwei Bie  * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
2306e6f633e5STiwei Bie  */
virtqueue_add_inbuf_ctx(struct virtqueue * vq,struct scatterlist * sg,unsigned int num,void * data,void * ctx,gfp_t gfp)2307e6f633e5STiwei Bie int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
2308e6f633e5STiwei Bie 			struct scatterlist *sg, unsigned int num,
2309e6f633e5STiwei Bie 			void *data,
2310e6f633e5STiwei Bie 			void *ctx,
2311e6f633e5STiwei Bie 			gfp_t gfp)
2312e6f633e5STiwei Bie {
2313e6f633e5STiwei Bie 	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
2314e6f633e5STiwei Bie }
2315e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
2316e6f633e5STiwei Bie 
2317e6f633e5STiwei Bie /**
23182df64759SXuan Zhuo  * virtqueue_dma_dev - get the dma dev
23192df64759SXuan Zhuo  * @_vq: the struct virtqueue we're talking about.
23202df64759SXuan Zhuo  *
23212df64759SXuan Zhuo  * Returns the dma dev. That can been used for dma api.
23222df64759SXuan Zhuo  */
virtqueue_dma_dev(struct virtqueue * _vq)23232df64759SXuan Zhuo struct device *virtqueue_dma_dev(struct virtqueue *_vq)
23242df64759SXuan Zhuo {
23252df64759SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
23262df64759SXuan Zhuo 
23272df64759SXuan Zhuo 	if (vq->use_dma_api)
23282df64759SXuan Zhuo 		return vring_dma_dev(vq);
23292df64759SXuan Zhuo 	else
23302df64759SXuan Zhuo 		return NULL;
23312df64759SXuan Zhuo }
23322df64759SXuan Zhuo EXPORT_SYMBOL_GPL(virtqueue_dma_dev);
23332df64759SXuan Zhuo 
23342df64759SXuan Zhuo /**
2335e6f633e5STiwei Bie  * virtqueue_kick_prepare - first half of split virtqueue_kick call.
2336a5581206SJiang Biao  * @_vq: the struct virtqueue
2337e6f633e5STiwei Bie  *
2338e6f633e5STiwei Bie  * Instead of virtqueue_kick(), you can do:
2339e6f633e5STiwei Bie  *	if (virtqueue_kick_prepare(vq))
2340e6f633e5STiwei Bie  *		virtqueue_notify(vq);
2341e6f633e5STiwei Bie  *
2342e6f633e5STiwei Bie  * This is sometimes useful because the virtqueue_kick_prepare() needs
2343e6f633e5STiwei Bie  * to be serialized, but the actual virtqueue_notify() call does not.
2344e6f633e5STiwei Bie  */
virtqueue_kick_prepare(struct virtqueue * _vq)2345e6f633e5STiwei Bie bool virtqueue_kick_prepare(struct virtqueue *_vq)
2346e6f633e5STiwei Bie {
23471ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
23481ce9e605STiwei Bie 
23491ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
23501ce9e605STiwei Bie 				 virtqueue_kick_prepare_split(_vq);
2351e6f633e5STiwei Bie }
2352e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
2353e6f633e5STiwei Bie 
2354e6f633e5STiwei Bie /**
2355e6f633e5STiwei Bie  * virtqueue_notify - second half of split virtqueue_kick call.
2356a5581206SJiang Biao  * @_vq: the struct virtqueue
2357e6f633e5STiwei Bie  *
2358e6f633e5STiwei Bie  * This does not need to be serialized.
2359e6f633e5STiwei Bie  *
2360e6f633e5STiwei Bie  * Returns false if host notify failed or queue is broken, otherwise true.
2361e6f633e5STiwei Bie  */
virtqueue_notify(struct virtqueue * _vq)2362e6f633e5STiwei Bie bool virtqueue_notify(struct virtqueue *_vq)
2363e6f633e5STiwei Bie {
2364e6f633e5STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
2365e6f633e5STiwei Bie 
2366e6f633e5STiwei Bie 	if (unlikely(vq->broken))
2367e6f633e5STiwei Bie 		return false;
2368e6f633e5STiwei Bie 
2369e6f633e5STiwei Bie 	/* Prod other side to tell it about changes. */
2370e6f633e5STiwei Bie 	if (!vq->notify(_vq)) {
2371e6f633e5STiwei Bie 		vq->broken = true;
2372e6f633e5STiwei Bie 		return false;
2373e6f633e5STiwei Bie 	}
2374e6f633e5STiwei Bie 	return true;
2375e6f633e5STiwei Bie }
2376e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_notify);
2377e6f633e5STiwei Bie 
2378e6f633e5STiwei Bie /**
2379e6f633e5STiwei Bie  * virtqueue_kick - update after add_buf
2380e6f633e5STiwei Bie  * @vq: the struct virtqueue
2381e6f633e5STiwei Bie  *
2382e6f633e5STiwei Bie  * After one or more virtqueue_add_* calls, invoke this to kick
2383e6f633e5STiwei Bie  * the other side.
2384e6f633e5STiwei Bie  *
2385e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2386e6f633e5STiwei Bie  * operations at the same time (except where noted).
2387e6f633e5STiwei Bie  *
2388e6f633e5STiwei Bie  * Returns false if kick failed, otherwise true.
2389e6f633e5STiwei Bie  */
virtqueue_kick(struct virtqueue * vq)2390e6f633e5STiwei Bie bool virtqueue_kick(struct virtqueue *vq)
2391e6f633e5STiwei Bie {
2392e6f633e5STiwei Bie 	if (virtqueue_kick_prepare(vq))
2393e6f633e5STiwei Bie 		return virtqueue_notify(vq);
2394e6f633e5STiwei Bie 	return true;
2395e6f633e5STiwei Bie }
2396e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_kick);
2397e6f633e5STiwei Bie 
2398e6f633e5STiwei Bie /**
239931c11db6SYang Li  * virtqueue_get_buf_ctx - get the next used buffer
2400a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2401e6f633e5STiwei Bie  * @len: the length written into the buffer
2402a5581206SJiang Biao  * @ctx: extra context for the token
2403e6f633e5STiwei Bie  *
2404e6f633e5STiwei Bie  * If the device wrote data into the buffer, @len will be set to the
2405e6f633e5STiwei Bie  * amount written.  This means you don't need to clear the buffer
2406e6f633e5STiwei Bie  * beforehand to ensure there's no data leakage in the case of short
2407e6f633e5STiwei Bie  * writes.
2408e6f633e5STiwei Bie  *
2409e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2410e6f633e5STiwei Bie  * operations at the same time (except where noted).
2411e6f633e5STiwei Bie  *
2412e6f633e5STiwei Bie  * Returns NULL if there are no used buffers, or the "data" token
2413e6f633e5STiwei Bie  * handed to virtqueue_add_*().
2414e6f633e5STiwei Bie  */
virtqueue_get_buf_ctx(struct virtqueue * _vq,unsigned int * len,void ** ctx)2415e6f633e5STiwei Bie void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
2416e6f633e5STiwei Bie 			    void **ctx)
2417e6f633e5STiwei Bie {
24181ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
24191ce9e605STiwei Bie 
24201ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
24211ce9e605STiwei Bie 				 virtqueue_get_buf_ctx_split(_vq, len, ctx);
2422e6f633e5STiwei Bie }
2423e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
2424e6f633e5STiwei Bie 
virtqueue_get_buf(struct virtqueue * _vq,unsigned int * len)2425e6f633e5STiwei Bie void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
2426e6f633e5STiwei Bie {
2427e6f633e5STiwei Bie 	return virtqueue_get_buf_ctx(_vq, len, NULL);
2428e6f633e5STiwei Bie }
2429e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_get_buf);
2430e6f633e5STiwei Bie /**
2431e6f633e5STiwei Bie  * virtqueue_disable_cb - disable callbacks
2432a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2433e6f633e5STiwei Bie  *
2434e6f633e5STiwei Bie  * Note that this is not necessarily synchronous, hence unreliable and only
2435e6f633e5STiwei Bie  * useful as an optimization.
2436e6f633e5STiwei Bie  *
2437e6f633e5STiwei Bie  * Unlike other operations, this need not be serialized.
2438e6f633e5STiwei Bie  */
virtqueue_disable_cb(struct virtqueue * _vq)2439e6f633e5STiwei Bie void virtqueue_disable_cb(struct virtqueue *_vq)
2440e6f633e5STiwei Bie {
24411ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
24421ce9e605STiwei Bie 
24431ce9e605STiwei Bie 	if (vq->packed_ring)
24441ce9e605STiwei Bie 		virtqueue_disable_cb_packed(_vq);
24451ce9e605STiwei Bie 	else
2446e6f633e5STiwei Bie 		virtqueue_disable_cb_split(_vq);
2447e6f633e5STiwei Bie }
2448e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
2449e6f633e5STiwei Bie 
2450e6f633e5STiwei Bie /**
2451e6f633e5STiwei Bie  * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
2452a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2453e6f633e5STiwei Bie  *
2454e6f633e5STiwei Bie  * This re-enables callbacks; it returns current queue state
2455e6f633e5STiwei Bie  * in an opaque unsigned value. This value should be later tested by
2456e6f633e5STiwei Bie  * virtqueue_poll, to detect a possible race between the driver checking for
2457e6f633e5STiwei Bie  * more work, and enabling callbacks.
2458e6f633e5STiwei Bie  *
2459e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2460e6f633e5STiwei Bie  * operations at the same time (except where noted).
2461e6f633e5STiwei Bie  */
virtqueue_enable_cb_prepare(struct virtqueue * _vq)246231532340SSolomon Tan unsigned int virtqueue_enable_cb_prepare(struct virtqueue *_vq)
2463e6f633e5STiwei Bie {
24641ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
24651ce9e605STiwei Bie 
24668d622d21SMichael S. Tsirkin 	if (vq->event_triggered)
24678d622d21SMichael S. Tsirkin 		vq->event_triggered = false;
24688d622d21SMichael S. Tsirkin 
24691ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
24701ce9e605STiwei Bie 				 virtqueue_enable_cb_prepare_split(_vq);
2471e6f633e5STiwei Bie }
2472e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
2473e6f633e5STiwei Bie 
2474e6f633e5STiwei Bie /**
2475e6f633e5STiwei Bie  * virtqueue_poll - query pending used buffers
2476a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2477e6f633e5STiwei Bie  * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
2478e6f633e5STiwei Bie  *
2479e6f633e5STiwei Bie  * Returns "true" if there are pending used buffers in the queue.
2480e6f633e5STiwei Bie  *
2481e6f633e5STiwei Bie  * This does not need to be serialized.
2482e6f633e5STiwei Bie  */
virtqueue_poll(struct virtqueue * _vq,unsigned int last_used_idx)248331532340SSolomon Tan bool virtqueue_poll(struct virtqueue *_vq, unsigned int last_used_idx)
2484e6f633e5STiwei Bie {
2485e6f633e5STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
2486e6f633e5STiwei Bie 
2487481a0d74SMao Wenan 	if (unlikely(vq->broken))
2488481a0d74SMao Wenan 		return false;
2489481a0d74SMao Wenan 
2490e6f633e5STiwei Bie 	virtio_mb(vq->weak_barriers);
24911ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
24921ce9e605STiwei Bie 				 virtqueue_poll_split(_vq, last_used_idx);
2493e6f633e5STiwei Bie }
2494e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_poll);
2495e6f633e5STiwei Bie 
2496e6f633e5STiwei Bie /**
2497e6f633e5STiwei Bie  * virtqueue_enable_cb - restart callbacks after disable_cb.
2498a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2499e6f633e5STiwei Bie  *
2500e6f633e5STiwei Bie  * This re-enables callbacks; it returns "false" if there are pending
2501e6f633e5STiwei Bie  * buffers in the queue, to detect a possible race between the driver
2502e6f633e5STiwei Bie  * checking for more work, and enabling callbacks.
2503e6f633e5STiwei Bie  *
2504e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2505e6f633e5STiwei Bie  * operations at the same time (except where noted).
2506e6f633e5STiwei Bie  */
virtqueue_enable_cb(struct virtqueue * _vq)2507e6f633e5STiwei Bie bool virtqueue_enable_cb(struct virtqueue *_vq)
2508e6f633e5STiwei Bie {
250931532340SSolomon Tan 	unsigned int last_used_idx = virtqueue_enable_cb_prepare(_vq);
2510e6f633e5STiwei Bie 
2511e6f633e5STiwei Bie 	return !virtqueue_poll(_vq, last_used_idx);
2512e6f633e5STiwei Bie }
2513e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2514e6f633e5STiwei Bie 
2515e6f633e5STiwei Bie /**
2516e6f633e5STiwei Bie  * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2517a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2518e6f633e5STiwei Bie  *
2519e6f633e5STiwei Bie  * This re-enables callbacks but hints to the other side to delay
2520e6f633e5STiwei Bie  * interrupts until most of the available buffers have been processed;
2521e6f633e5STiwei Bie  * it returns "false" if there are many pending buffers in the queue,
2522e6f633e5STiwei Bie  * to detect a possible race between the driver checking for more work,
2523e6f633e5STiwei Bie  * and enabling callbacks.
2524e6f633e5STiwei Bie  *
2525e6f633e5STiwei Bie  * Caller must ensure we don't call this with other virtqueue
2526e6f633e5STiwei Bie  * operations at the same time (except where noted).
2527e6f633e5STiwei Bie  */
virtqueue_enable_cb_delayed(struct virtqueue * _vq)2528e6f633e5STiwei Bie bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2529e6f633e5STiwei Bie {
25301ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
25311ce9e605STiwei Bie 
25328d622d21SMichael S. Tsirkin 	if (vq->event_triggered)
25338d622d21SMichael S. Tsirkin 		vq->event_triggered = false;
25348d622d21SMichael S. Tsirkin 
25351ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
25361ce9e605STiwei Bie 				 virtqueue_enable_cb_delayed_split(_vq);
2537e6f633e5STiwei Bie }
2538e6f633e5STiwei Bie EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2539e6f633e5STiwei Bie 
2540138fd251STiwei Bie /**
2541138fd251STiwei Bie  * virtqueue_detach_unused_buf - detach first unused buffer
2542a5581206SJiang Biao  * @_vq: the struct virtqueue we're talking about.
2543138fd251STiwei Bie  *
2544138fd251STiwei Bie  * Returns NULL or the "data" token handed to virtqueue_add_*().
2545a62eecb3SXuan Zhuo  * This is not valid on an active queue; it is useful for device
2546a62eecb3SXuan Zhuo  * shutdown or the reset queue.
2547138fd251STiwei Bie  */
virtqueue_detach_unused_buf(struct virtqueue * _vq)2548138fd251STiwei Bie void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2549138fd251STiwei Bie {
25501ce9e605STiwei Bie 	struct vring_virtqueue *vq = to_vvq(_vq);
25511ce9e605STiwei Bie 
25521ce9e605STiwei Bie 	return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
25531ce9e605STiwei Bie 				 virtqueue_detach_unused_buf_split(_vq);
2554138fd251STiwei Bie }
25557c5e9ed0SMichael S. Tsirkin EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2556c021eac4SShirley Ma 
more_used(const struct vring_virtqueue * vq)2557138fd251STiwei Bie static inline bool more_used(const struct vring_virtqueue *vq)
2558138fd251STiwei Bie {
25591ce9e605STiwei Bie 	return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2560138fd251STiwei Bie }
2561138fd251STiwei Bie 
25625c669c4aSRicardo Cañuelo /**
25635c669c4aSRicardo Cañuelo  * vring_interrupt - notify a virtqueue on an interrupt
25645c669c4aSRicardo Cañuelo  * @irq: the IRQ number (ignored)
25655c669c4aSRicardo Cañuelo  * @_vq: the struct virtqueue to notify
25665c669c4aSRicardo Cañuelo  *
25675c669c4aSRicardo Cañuelo  * Calls the callback function of @_vq to process the virtqueue
25685c669c4aSRicardo Cañuelo  * notification.
25695c669c4aSRicardo Cañuelo  */
vring_interrupt(int irq,void * _vq)25700a8a69ddSRusty Russell irqreturn_t vring_interrupt(int irq, void *_vq)
25710a8a69ddSRusty Russell {
25720a8a69ddSRusty Russell 	struct vring_virtqueue *vq = to_vvq(_vq);
25730a8a69ddSRusty Russell 
25740a8a69ddSRusty Russell 	if (!more_used(vq)) {
25750a8a69ddSRusty Russell 		pr_debug("virtqueue interrupt with no work for %p\n", vq);
25760a8a69ddSRusty Russell 		return IRQ_NONE;
25770a8a69ddSRusty Russell 	}
25780a8a69ddSRusty Russell 
25798b4ec69dSJason Wang 	if (unlikely(vq->broken)) {
2580c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
25818b4ec69dSJason Wang 		dev_warn_once(&vq->vq.vdev->dev,
25828b4ec69dSJason Wang 			      "virtio vring IRQ raised before DRIVER_OK");
25838b4ec69dSJason Wang 		return IRQ_NONE;
2584c346dae4SJason Wang #else
2585c346dae4SJason Wang 		return IRQ_HANDLED;
2586c346dae4SJason Wang #endif
25878b4ec69dSJason Wang 	}
25880a8a69ddSRusty Russell 
25898d622d21SMichael S. Tsirkin 	/* Just a hint for performance: so it's ok that this can be racy! */
25908d622d21SMichael S. Tsirkin 	if (vq->event)
25918d622d21SMichael S. Tsirkin 		vq->event_triggered = true;
25928d622d21SMichael S. Tsirkin 
25930a8a69ddSRusty Russell 	pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
259418445c4dSRusty Russell 	if (vq->vq.callback)
259518445c4dSRusty Russell 		vq->vq.callback(&vq->vq);
25960a8a69ddSRusty Russell 
25970a8a69ddSRusty Russell 	return IRQ_HANDLED;
25980a8a69ddSRusty Russell }
2599c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_interrupt);
26000a8a69ddSRusty Russell 
26011ce9e605STiwei Bie /* Only available for split ring */
__vring_new_virtqueue(unsigned int index,struct vring_virtqueue_split * vring_split,struct virtio_device * vdev,bool weak_barriers,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name,struct device * dma_dev)260207d9629dSXuan Zhuo static struct virtqueue *__vring_new_virtqueue(unsigned int index,
2603cd4c812aSXuan Zhuo 					       struct vring_virtqueue_split *vring_split,
26040a8a69ddSRusty Russell 					       struct virtio_device *vdev,
26057b21e34fSRusty Russell 					       bool weak_barriers,
2606f94682ddSMichael S. Tsirkin 					       bool context,
260746f9c2b9SHeinz Graalfs 					       bool (*notify)(struct virtqueue *),
26089499f5e7SRusty Russell 					       void (*callback)(struct virtqueue *),
26092713ea3cSJason Wang 					       const char *name,
26102713ea3cSJason Wang 					       struct device *dma_dev)
26110a8a69ddSRusty Russell {
26122a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq;
2613a2b36c8dSXuan Zhuo 	int err;
26140a8a69ddSRusty Russell 
26151ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
26161ce9e605STiwei Bie 		return NULL;
26171ce9e605STiwei Bie 
2618cbeedb72STiwei Bie 	vq = kmalloc(sizeof(*vq), GFP_KERNEL);
26190a8a69ddSRusty Russell 	if (!vq)
26200a8a69ddSRusty Russell 		return NULL;
26210a8a69ddSRusty Russell 
26221ce9e605STiwei Bie 	vq->packed_ring = false;
26230a8a69ddSRusty Russell 	vq->vq.callback = callback;
26240a8a69ddSRusty Russell 	vq->vq.vdev = vdev;
26259499f5e7SRusty Russell 	vq->vq.name = name;
262606ca287dSRusty Russell 	vq->vq.index = index;
26274913e854SXuan Zhuo 	vq->vq.reset = false;
26282a2d1382SAndy Lutomirski 	vq->we_own_ring = false;
26290a8a69ddSRusty Russell 	vq->notify = notify;
26307b21e34fSRusty Russell 	vq->weak_barriers = weak_barriers;
2631c346dae4SJason Wang #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
26328b4ec69dSJason Wang 	vq->broken = true;
2633c346dae4SJason Wang #else
2634c346dae4SJason Wang 	vq->broken = false;
2635c346dae4SJason Wang #endif
26362713ea3cSJason Wang 	vq->dma_dev = dma_dev;
2637fb3fba6bSTiwei Bie 	vq->use_dma_api = vring_use_dma_api(vdev);
26388daafe9eSXuan Zhuo 	vq->premapped = false;
2639b319940fSXuan Zhuo 	vq->do_unmap = vq->use_dma_api;
26400a8a69ddSRusty Russell 
26415a08b04fSMichael S. Tsirkin 	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
26425a08b04fSMichael S. Tsirkin 		!context;
2643a5c262c5SMichael S. Tsirkin 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
26449fa29b9dSMark McLoughlin 
264545383fb0STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
264645383fb0STiwei Bie 		vq->weak_barriers = false;
264745383fb0STiwei Bie 
2648a2b36c8dSXuan Zhuo 	err = vring_alloc_state_extra_split(vring_split);
2649a2b36c8dSXuan Zhuo 	if (err) {
2650a2b36c8dSXuan Zhuo 		kfree(vq);
2651a2b36c8dSXuan Zhuo 		return NULL;
2652a2b36c8dSXuan Zhuo 	}
265372b5e895SJason Wang 
2654198fa7beSXuan Zhuo 	virtqueue_vring_init_split(vring_split, vq);
2655198fa7beSXuan Zhuo 
2656cd4c812aSXuan Zhuo 	virtqueue_init(vq, vring_split->vring.num);
2657e1d6a423SXuan Zhuo 	virtqueue_vring_attach_split(vq, vring_split);
26583a897128SXuan Zhuo 
26590e566c8fSParav Pandit 	spin_lock(&vdev->vqs_list_lock);
2660e152d8afSDan Carpenter 	list_add_tail(&vq->vq.list, &vdev->vqs);
26610e566c8fSParav Pandit 	spin_unlock(&vdev->vqs_list_lock);
26620a8a69ddSRusty Russell 	return &vq->vq;
26630a8a69ddSRusty Russell }
26642a2d1382SAndy Lutomirski 
vring_create_virtqueue(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool may_reduce_num,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name)26652a2d1382SAndy Lutomirski struct virtqueue *vring_create_virtqueue(
26662a2d1382SAndy Lutomirski 	unsigned int index,
26672a2d1382SAndy Lutomirski 	unsigned int num,
26682a2d1382SAndy Lutomirski 	unsigned int vring_align,
26692a2d1382SAndy Lutomirski 	struct virtio_device *vdev,
26702a2d1382SAndy Lutomirski 	bool weak_barriers,
26712a2d1382SAndy Lutomirski 	bool may_reduce_num,
2672f94682ddSMichael S. Tsirkin 	bool context,
26732a2d1382SAndy Lutomirski 	bool (*notify)(struct virtqueue *),
26742a2d1382SAndy Lutomirski 	void (*callback)(struct virtqueue *),
26752a2d1382SAndy Lutomirski 	const char *name)
26762a2d1382SAndy Lutomirski {
26771ce9e605STiwei Bie 
26781ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
26791ce9e605STiwei Bie 		return vring_create_virtqueue_packed(index, num, vring_align,
26801ce9e605STiwei Bie 				vdev, weak_barriers, may_reduce_num,
26812713ea3cSJason Wang 				context, notify, callback, name, vdev->dev.parent);
26821ce9e605STiwei Bie 
2683d79dca75STiwei Bie 	return vring_create_virtqueue_split(index, num, vring_align,
2684d79dca75STiwei Bie 			vdev, weak_barriers, may_reduce_num,
26852713ea3cSJason Wang 			context, notify, callback, name, vdev->dev.parent);
26862a2d1382SAndy Lutomirski }
26872a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(vring_create_virtqueue);
26882a2d1382SAndy Lutomirski 
vring_create_virtqueue_dma(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool may_reduce_num,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name,struct device * dma_dev)26892713ea3cSJason Wang struct virtqueue *vring_create_virtqueue_dma(
26902713ea3cSJason Wang 	unsigned int index,
26912713ea3cSJason Wang 	unsigned int num,
26922713ea3cSJason Wang 	unsigned int vring_align,
26932713ea3cSJason Wang 	struct virtio_device *vdev,
26942713ea3cSJason Wang 	bool weak_barriers,
26952713ea3cSJason Wang 	bool may_reduce_num,
26962713ea3cSJason Wang 	bool context,
26972713ea3cSJason Wang 	bool (*notify)(struct virtqueue *),
26982713ea3cSJason Wang 	void (*callback)(struct virtqueue *),
26992713ea3cSJason Wang 	const char *name,
27002713ea3cSJason Wang 	struct device *dma_dev)
27012713ea3cSJason Wang {
27022713ea3cSJason Wang 
27032713ea3cSJason Wang 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
27042713ea3cSJason Wang 		return vring_create_virtqueue_packed(index, num, vring_align,
27052713ea3cSJason Wang 				vdev, weak_barriers, may_reduce_num,
27062713ea3cSJason Wang 				context, notify, callback, name, dma_dev);
27072713ea3cSJason Wang 
27082713ea3cSJason Wang 	return vring_create_virtqueue_split(index, num, vring_align,
27092713ea3cSJason Wang 			vdev, weak_barriers, may_reduce_num,
27102713ea3cSJason Wang 			context, notify, callback, name, dma_dev);
27112713ea3cSJason Wang }
27122713ea3cSJason Wang EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
27132713ea3cSJason Wang 
2714c790e8e1SXuan Zhuo /**
2715c790e8e1SXuan Zhuo  * virtqueue_resize - resize the vring of vq
2716c790e8e1SXuan Zhuo  * @_vq: the struct virtqueue we're talking about.
2717c790e8e1SXuan Zhuo  * @num: new ring num
27184d09f240SXuan Zhuo  * @recycle: callback to recycle unused buffers
2719c790e8e1SXuan Zhuo  *
2720c790e8e1SXuan Zhuo  * When it is really necessary to create a new vring, it will set the current vq
2721c790e8e1SXuan Zhuo  * into the reset state. Then call the passed callback to recycle the buffer
2722c790e8e1SXuan Zhuo  * that is no longer used. Only after the new vring is successfully created, the
2723c790e8e1SXuan Zhuo  * old vring will be released.
2724c790e8e1SXuan Zhuo  *
2725c790e8e1SXuan Zhuo  * Caller must ensure we don't call this with other virtqueue operations
2726c790e8e1SXuan Zhuo  * at the same time (except where noted).
2727c790e8e1SXuan Zhuo  *
2728c790e8e1SXuan Zhuo  * Returns zero or a negative error.
2729c790e8e1SXuan Zhuo  * 0: success.
2730c790e8e1SXuan Zhuo  * -ENOMEM: Failed to allocate a new ring, fall back to the original ring size.
2731c790e8e1SXuan Zhuo  *  vq can still work normally
2732c790e8e1SXuan Zhuo  * -EBUSY: Failed to sync with device, vq may not work properly
2733c790e8e1SXuan Zhuo  * -ENOENT: Transport or device not supported
2734c790e8e1SXuan Zhuo  * -E2BIG/-EINVAL: num error
2735c790e8e1SXuan Zhuo  * -EPERM: Operation not permitted
2736c790e8e1SXuan Zhuo  *
2737c790e8e1SXuan Zhuo  */
virtqueue_resize(struct virtqueue * _vq,u32 num,void (* recycle)(struct virtqueue * vq,void * buf))2738c790e8e1SXuan Zhuo int virtqueue_resize(struct virtqueue *_vq, u32 num,
2739c790e8e1SXuan Zhuo 		     void (*recycle)(struct virtqueue *vq, void *buf))
2740c790e8e1SXuan Zhuo {
2741c790e8e1SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
2742c790e8e1SXuan Zhuo 	int err;
2743c790e8e1SXuan Zhuo 
2744c790e8e1SXuan Zhuo 	if (num > vq->vq.num_max)
2745c790e8e1SXuan Zhuo 		return -E2BIG;
2746c790e8e1SXuan Zhuo 
2747c790e8e1SXuan Zhuo 	if (!num)
2748c790e8e1SXuan Zhuo 		return -EINVAL;
2749c790e8e1SXuan Zhuo 
2750c790e8e1SXuan Zhuo 	if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
2751c790e8e1SXuan Zhuo 		return 0;
2752c790e8e1SXuan Zhuo 
2753ad48d53bSXuan Zhuo 	err = virtqueue_disable_and_recycle(_vq, recycle);
2754c790e8e1SXuan Zhuo 	if (err)
2755c790e8e1SXuan Zhuo 		return err;
2756c790e8e1SXuan Zhuo 
2757c790e8e1SXuan Zhuo 	if (vq->packed_ring)
2758c790e8e1SXuan Zhuo 		err = virtqueue_resize_packed(_vq, num);
2759c790e8e1SXuan Zhuo 	else
2760c790e8e1SXuan Zhuo 		err = virtqueue_resize_split(_vq, num);
2761c790e8e1SXuan Zhuo 
2762ad48d53bSXuan Zhuo 	return virtqueue_enable_after_reset(_vq);
2763c790e8e1SXuan Zhuo }
2764c790e8e1SXuan Zhuo EXPORT_SYMBOL_GPL(virtqueue_resize);
2765c790e8e1SXuan Zhuo 
27668daafe9eSXuan Zhuo /**
27678daafe9eSXuan Zhuo  * virtqueue_set_dma_premapped - set the vring premapped mode
27688daafe9eSXuan Zhuo  * @_vq: the struct virtqueue we're talking about.
27698daafe9eSXuan Zhuo  *
27708daafe9eSXuan Zhuo  * Enable the premapped mode of the vq.
27718daafe9eSXuan Zhuo  *
27728daafe9eSXuan Zhuo  * The vring in premapped mode does not do dma internally, so the driver must
27738daafe9eSXuan Zhuo  * do dma mapping in advance. The driver must pass the dma_address through
27748daafe9eSXuan Zhuo  * dma_address of scatterlist. When the driver got a used buffer from
27758daafe9eSXuan Zhuo  * the vring, it has to unmap the dma address.
27768daafe9eSXuan Zhuo  *
27778daafe9eSXuan Zhuo  * This function must be called immediately after creating the vq, or after vq
27788daafe9eSXuan Zhuo  * reset, and before adding any buffers to it.
27798daafe9eSXuan Zhuo  *
27808daafe9eSXuan Zhuo  * Caller must ensure we don't call this with other virtqueue operations
27818daafe9eSXuan Zhuo  * at the same time (except where noted).
27828daafe9eSXuan Zhuo  *
27838daafe9eSXuan Zhuo  * Returns zero or a negative error.
27848daafe9eSXuan Zhuo  * 0: success.
27858daafe9eSXuan Zhuo  * -EINVAL: vring does not use the dma api, so we can not enable premapped mode.
27868daafe9eSXuan Zhuo  */
virtqueue_set_dma_premapped(struct virtqueue * _vq)27878daafe9eSXuan Zhuo int virtqueue_set_dma_premapped(struct virtqueue *_vq)
27888daafe9eSXuan Zhuo {
27898daafe9eSXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
27908daafe9eSXuan Zhuo 	u32 num;
27918daafe9eSXuan Zhuo 
27928daafe9eSXuan Zhuo 	START_USE(vq);
27938daafe9eSXuan Zhuo 
27948daafe9eSXuan Zhuo 	num = vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
27958daafe9eSXuan Zhuo 
27968daafe9eSXuan Zhuo 	if (num != vq->vq.num_free) {
27978daafe9eSXuan Zhuo 		END_USE(vq);
27988daafe9eSXuan Zhuo 		return -EINVAL;
27998daafe9eSXuan Zhuo 	}
28008daafe9eSXuan Zhuo 
28018daafe9eSXuan Zhuo 	if (!vq->use_dma_api) {
28028daafe9eSXuan Zhuo 		END_USE(vq);
28038daafe9eSXuan Zhuo 		return -EINVAL;
28048daafe9eSXuan Zhuo 	}
28058daafe9eSXuan Zhuo 
28068daafe9eSXuan Zhuo 	vq->premapped = true;
2807b319940fSXuan Zhuo 	vq->do_unmap = false;
28088daafe9eSXuan Zhuo 
28098daafe9eSXuan Zhuo 	END_USE(vq);
28108daafe9eSXuan Zhuo 
28118daafe9eSXuan Zhuo 	return 0;
28128daafe9eSXuan Zhuo }
28138daafe9eSXuan Zhuo EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped);
28148daafe9eSXuan Zhuo 
2815ba3e0c47SXuan Zhuo /**
2816ba3e0c47SXuan Zhuo  * virtqueue_reset - detach and recycle all unused buffers
2817ba3e0c47SXuan Zhuo  * @_vq: the struct virtqueue we're talking about.
2818ba3e0c47SXuan Zhuo  * @recycle: callback to recycle unused buffers
2819ba3e0c47SXuan Zhuo  *
2820ba3e0c47SXuan Zhuo  * Caller must ensure we don't call this with other virtqueue operations
2821ba3e0c47SXuan Zhuo  * at the same time (except where noted).
2822ba3e0c47SXuan Zhuo  *
2823ba3e0c47SXuan Zhuo  * Returns zero or a negative error.
2824ba3e0c47SXuan Zhuo  * 0: success.
2825ba3e0c47SXuan Zhuo  * -EBUSY: Failed to sync with device, vq may not work properly
2826ba3e0c47SXuan Zhuo  * -ENOENT: Transport or device not supported
2827ba3e0c47SXuan Zhuo  * -EPERM: Operation not permitted
2828ba3e0c47SXuan Zhuo  */
virtqueue_reset(struct virtqueue * _vq,void (* recycle)(struct virtqueue * vq,void * buf))2829ba3e0c47SXuan Zhuo int virtqueue_reset(struct virtqueue *_vq,
2830ba3e0c47SXuan Zhuo 		    void (*recycle)(struct virtqueue *vq, void *buf))
2831ba3e0c47SXuan Zhuo {
2832ba3e0c47SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
2833ba3e0c47SXuan Zhuo 	int err;
2834ba3e0c47SXuan Zhuo 
2835ba3e0c47SXuan Zhuo 	err = virtqueue_disable_and_recycle(_vq, recycle);
2836ba3e0c47SXuan Zhuo 	if (err)
2837ba3e0c47SXuan Zhuo 		return err;
2838ba3e0c47SXuan Zhuo 
2839ba3e0c47SXuan Zhuo 	if (vq->packed_ring)
2840ba3e0c47SXuan Zhuo 		virtqueue_reinit_packed(vq);
2841ba3e0c47SXuan Zhuo 	else
2842ba3e0c47SXuan Zhuo 		virtqueue_reinit_split(vq);
2843ba3e0c47SXuan Zhuo 
2844ba3e0c47SXuan Zhuo 	return virtqueue_enable_after_reset(_vq);
2845ba3e0c47SXuan Zhuo }
2846ba3e0c47SXuan Zhuo EXPORT_SYMBOL_GPL(virtqueue_reset);
2847ba3e0c47SXuan Zhuo 
28481ce9e605STiwei Bie /* Only available for split ring */
vring_new_virtqueue(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool context,void * pages,bool (* notify)(struct virtqueue * vq),void (* callback)(struct virtqueue * vq),const char * name)28492a2d1382SAndy Lutomirski struct virtqueue *vring_new_virtqueue(unsigned int index,
28502a2d1382SAndy Lutomirski 				      unsigned int num,
28512a2d1382SAndy Lutomirski 				      unsigned int vring_align,
28522a2d1382SAndy Lutomirski 				      struct virtio_device *vdev,
28532a2d1382SAndy Lutomirski 				      bool weak_barriers,
2854f94682ddSMichael S. Tsirkin 				      bool context,
28552a2d1382SAndy Lutomirski 				      void *pages,
28562a2d1382SAndy Lutomirski 				      bool (*notify)(struct virtqueue *vq),
28572a2d1382SAndy Lutomirski 				      void (*callback)(struct virtqueue *vq),
28582a2d1382SAndy Lutomirski 				      const char *name)
28592a2d1382SAndy Lutomirski {
2860cd4c812aSXuan Zhuo 	struct vring_virtqueue_split vring_split = {};
28611ce9e605STiwei Bie 
28621ce9e605STiwei Bie 	if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
28631ce9e605STiwei Bie 		return NULL;
28641ce9e605STiwei Bie 
2865cd4c812aSXuan Zhuo 	vring_init(&vring_split.vring, num, pages, vring_align);
2866cd4c812aSXuan Zhuo 	return __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers,
28672713ea3cSJason Wang 				     context, notify, callback, name,
28682713ea3cSJason Wang 				     vdev->dev.parent);
28692a2d1382SAndy Lutomirski }
2870c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_new_virtqueue);
28710a8a69ddSRusty Russell 
vring_free(struct virtqueue * _vq)28723ea19e32SXuan Zhuo static void vring_free(struct virtqueue *_vq)
28730a8a69ddSRusty Russell {
28742a2d1382SAndy Lutomirski 	struct vring_virtqueue *vq = to_vvq(_vq);
28752a2d1382SAndy Lutomirski 
28762a2d1382SAndy Lutomirski 	if (vq->we_own_ring) {
28771ce9e605STiwei Bie 		if (vq->packed_ring) {
28781ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
28791ce9e605STiwei Bie 					 vq->packed.ring_size_in_bytes,
28801ce9e605STiwei Bie 					 vq->packed.vring.desc,
28812713ea3cSJason Wang 					 vq->packed.ring_dma_addr,
28822713ea3cSJason Wang 					 vring_dma_dev(vq));
28831ce9e605STiwei Bie 
28841ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
28851ce9e605STiwei Bie 					 vq->packed.event_size_in_bytes,
28861ce9e605STiwei Bie 					 vq->packed.vring.driver,
28872713ea3cSJason Wang 					 vq->packed.driver_event_dma_addr,
28882713ea3cSJason Wang 					 vring_dma_dev(vq));
28891ce9e605STiwei Bie 
28901ce9e605STiwei Bie 			vring_free_queue(vq->vq.vdev,
28911ce9e605STiwei Bie 					 vq->packed.event_size_in_bytes,
28921ce9e605STiwei Bie 					 vq->packed.vring.device,
28932713ea3cSJason Wang 					 vq->packed.device_event_dma_addr,
28942713ea3cSJason Wang 					 vring_dma_dev(vq));
28951ce9e605STiwei Bie 
28961ce9e605STiwei Bie 			kfree(vq->packed.desc_state);
28971ce9e605STiwei Bie 			kfree(vq->packed.desc_extra);
28981ce9e605STiwei Bie 		} else {
2899d79dca75STiwei Bie 			vring_free_queue(vq->vq.vdev,
2900d79dca75STiwei Bie 					 vq->split.queue_size_in_bytes,
2901d79dca75STiwei Bie 					 vq->split.vring.desc,
29022713ea3cSJason Wang 					 vq->split.queue_dma_addr,
29032713ea3cSJason Wang 					 vring_dma_dev(vq));
2904f13f09a1SSuman Anna 		}
2905f13f09a1SSuman Anna 	}
290672b5e895SJason Wang 	if (!vq->packed_ring) {
2907cbeedb72STiwei Bie 		kfree(vq->split.desc_state);
290872b5e895SJason Wang 		kfree(vq->split.desc_extra);
290972b5e895SJason Wang 	}
29103ea19e32SXuan Zhuo }
29113ea19e32SXuan Zhuo 
vring_del_virtqueue(struct virtqueue * _vq)29123ea19e32SXuan Zhuo void vring_del_virtqueue(struct virtqueue *_vq)
29133ea19e32SXuan Zhuo {
29143ea19e32SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
29153ea19e32SXuan Zhuo 
29163ea19e32SXuan Zhuo 	spin_lock(&vq->vq.vdev->vqs_list_lock);
29173ea19e32SXuan Zhuo 	list_del(&_vq->list);
29183ea19e32SXuan Zhuo 	spin_unlock(&vq->vq.vdev->vqs_list_lock);
29193ea19e32SXuan Zhuo 
29203ea19e32SXuan Zhuo 	vring_free(_vq);
29213ea19e32SXuan Zhuo 
29222a2d1382SAndy Lutomirski 	kfree(vq);
29230a8a69ddSRusty Russell }
2924c6fd4701SRusty Russell EXPORT_SYMBOL_GPL(vring_del_virtqueue);
29250a8a69ddSRusty Russell 
vring_notification_data(struct virtqueue * _vq)2926af8ececdSViktor Prutyanov u32 vring_notification_data(struct virtqueue *_vq)
2927af8ececdSViktor Prutyanov {
2928af8ececdSViktor Prutyanov 	struct vring_virtqueue *vq = to_vvq(_vq);
2929af8ececdSViktor Prutyanov 	u16 next;
2930af8ececdSViktor Prutyanov 
2931af8ececdSViktor Prutyanov 	if (vq->packed_ring)
2932af8ececdSViktor Prutyanov 		next = (vq->packed.next_avail_idx &
2933af8ececdSViktor Prutyanov 				~(-(1 << VRING_PACKED_EVENT_F_WRAP_CTR))) |
2934af8ececdSViktor Prutyanov 			vq->packed.avail_wrap_counter <<
2935af8ececdSViktor Prutyanov 				VRING_PACKED_EVENT_F_WRAP_CTR;
2936af8ececdSViktor Prutyanov 	else
2937af8ececdSViktor Prutyanov 		next = vq->split.avail_idx_shadow;
2938af8ececdSViktor Prutyanov 
2939af8ececdSViktor Prutyanov 	return next << 16 | _vq->index;
2940af8ececdSViktor Prutyanov }
2941af8ececdSViktor Prutyanov EXPORT_SYMBOL_GPL(vring_notification_data);
2942af8ececdSViktor Prutyanov 
2943e34f8725SRusty Russell /* Manipulates transport-specific feature bits. */
vring_transport_features(struct virtio_device * vdev)2944e34f8725SRusty Russell void vring_transport_features(struct virtio_device *vdev)
2945e34f8725SRusty Russell {
2946e34f8725SRusty Russell 	unsigned int i;
2947e34f8725SRusty Russell 
2948e34f8725SRusty Russell 	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2949e34f8725SRusty Russell 		switch (i) {
29509fa29b9dSMark McLoughlin 		case VIRTIO_RING_F_INDIRECT_DESC:
29519fa29b9dSMark McLoughlin 			break;
2952a5c262c5SMichael S. Tsirkin 		case VIRTIO_RING_F_EVENT_IDX:
2953a5c262c5SMichael S. Tsirkin 			break;
2954747ae34aSMichael S. Tsirkin 		case VIRTIO_F_VERSION_1:
2955747ae34aSMichael S. Tsirkin 			break;
2956321bd212SMichael S. Tsirkin 		case VIRTIO_F_ACCESS_PLATFORM:
29571a937693SMichael S. Tsirkin 			break;
2958f959a128STiwei Bie 		case VIRTIO_F_RING_PACKED:
2959f959a128STiwei Bie 			break;
296045383fb0STiwei Bie 		case VIRTIO_F_ORDER_PLATFORM:
296145383fb0STiwei Bie 			break;
2962af8ececdSViktor Prutyanov 		case VIRTIO_F_NOTIFICATION_DATA:
2963af8ececdSViktor Prutyanov 			break;
2964e34f8725SRusty Russell 		default:
2965e34f8725SRusty Russell 			/* We don't understand this bit. */
2966e16e12beSMichael S. Tsirkin 			__virtio_clear_bit(vdev, i);
2967e34f8725SRusty Russell 		}
2968e34f8725SRusty Russell 	}
2969e34f8725SRusty Russell }
2970e34f8725SRusty Russell EXPORT_SYMBOL_GPL(vring_transport_features);
2971e34f8725SRusty Russell 
29725dfc1762SRusty Russell /**
29735dfc1762SRusty Russell  * virtqueue_get_vring_size - return the size of the virtqueue's vring
2974a5581206SJiang Biao  * @_vq: the struct virtqueue containing the vring of interest.
29755dfc1762SRusty Russell  *
29765dfc1762SRusty Russell  * Returns the size of the vring.  This is mainly used for boasting to
29775dfc1762SRusty Russell  * userspace.  Unlike other operations, this need not be serialized.
29785dfc1762SRusty Russell  */
virtqueue_get_vring_size(const struct virtqueue * _vq)29794b6ec919SFeng Liu unsigned int virtqueue_get_vring_size(const struct virtqueue *_vq)
29808f9f4668SRick Jones {
29818f9f4668SRick Jones 
29824b6ec919SFeng Liu 	const struct vring_virtqueue *vq = to_vvq(_vq);
29838f9f4668SRick Jones 
29841ce9e605STiwei Bie 	return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
29858f9f4668SRick Jones }
29868f9f4668SRick Jones EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
29878f9f4668SRick Jones 
298832510631SXuan Zhuo /*
298932510631SXuan Zhuo  * This function should only be called by the core, not directly by the driver.
299032510631SXuan Zhuo  */
__virtqueue_break(struct virtqueue * _vq)299132510631SXuan Zhuo void __virtqueue_break(struct virtqueue *_vq)
299232510631SXuan Zhuo {
299332510631SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
299432510631SXuan Zhuo 
299532510631SXuan Zhuo 	/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
299632510631SXuan Zhuo 	WRITE_ONCE(vq->broken, true);
299732510631SXuan Zhuo }
299832510631SXuan Zhuo EXPORT_SYMBOL_GPL(__virtqueue_break);
299932510631SXuan Zhuo 
300032510631SXuan Zhuo /*
300132510631SXuan Zhuo  * This function should only be called by the core, not directly by the driver.
300232510631SXuan Zhuo  */
__virtqueue_unbreak(struct virtqueue * _vq)300332510631SXuan Zhuo void __virtqueue_unbreak(struct virtqueue *_vq)
300432510631SXuan Zhuo {
300532510631SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
300632510631SXuan Zhuo 
300732510631SXuan Zhuo 	/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
300832510631SXuan Zhuo 	WRITE_ONCE(vq->broken, false);
300932510631SXuan Zhuo }
301032510631SXuan Zhuo EXPORT_SYMBOL_GPL(__virtqueue_unbreak);
301132510631SXuan Zhuo 
virtqueue_is_broken(const struct virtqueue * _vq)30124b6ec919SFeng Liu bool virtqueue_is_broken(const struct virtqueue *_vq)
3013b3b32c94SHeinz Graalfs {
30144b6ec919SFeng Liu 	const struct vring_virtqueue *vq = to_vvq(_vq);
3015b3b32c94SHeinz Graalfs 
301660f07798SParav Pandit 	return READ_ONCE(vq->broken);
3017b3b32c94SHeinz Graalfs }
3018b3b32c94SHeinz Graalfs EXPORT_SYMBOL_GPL(virtqueue_is_broken);
3019b3b32c94SHeinz Graalfs 
3020e2dcdfe9SRusty Russell /*
3021e2dcdfe9SRusty Russell  * This should prevent the device from being used, allowing drivers to
3022e2dcdfe9SRusty Russell  * recover.  You may need to grab appropriate locks to flush.
3023e2dcdfe9SRusty Russell  */
virtio_break_device(struct virtio_device * dev)3024e2dcdfe9SRusty Russell void virtio_break_device(struct virtio_device *dev)
3025e2dcdfe9SRusty Russell {
3026e2dcdfe9SRusty Russell 	struct virtqueue *_vq;
3027e2dcdfe9SRusty Russell 
30280e566c8fSParav Pandit 	spin_lock(&dev->vqs_list_lock);
3029e2dcdfe9SRusty Russell 	list_for_each_entry(_vq, &dev->vqs, list) {
3030e2dcdfe9SRusty Russell 		struct vring_virtqueue *vq = to_vvq(_vq);
303160f07798SParav Pandit 
303260f07798SParav Pandit 		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
303360f07798SParav Pandit 		WRITE_ONCE(vq->broken, true);
3034e2dcdfe9SRusty Russell 	}
30350e566c8fSParav Pandit 	spin_unlock(&dev->vqs_list_lock);
3036e2dcdfe9SRusty Russell }
3037e2dcdfe9SRusty Russell EXPORT_SYMBOL_GPL(virtio_break_device);
3038e2dcdfe9SRusty Russell 
3039be83f04dSJason Wang /*
3040be83f04dSJason Wang  * This should allow the device to be used by the driver. You may
3041be83f04dSJason Wang  * need to grab appropriate locks to flush the write to
3042be83f04dSJason Wang  * vq->broken. This should only be used in some specific case e.g
3043be83f04dSJason Wang  * (probing and restoring). This function should only be called by the
3044be83f04dSJason Wang  * core, not directly by the driver.
3045be83f04dSJason Wang  */
__virtio_unbreak_device(struct virtio_device * dev)3046be83f04dSJason Wang void __virtio_unbreak_device(struct virtio_device *dev)
3047be83f04dSJason Wang {
3048be83f04dSJason Wang 	struct virtqueue *_vq;
3049be83f04dSJason Wang 
3050be83f04dSJason Wang 	spin_lock(&dev->vqs_list_lock);
3051be83f04dSJason Wang 	list_for_each_entry(_vq, &dev->vqs, list) {
3052be83f04dSJason Wang 		struct vring_virtqueue *vq = to_vvq(_vq);
3053be83f04dSJason Wang 
3054be83f04dSJason Wang 		/* Pairs with READ_ONCE() in virtqueue_is_broken(). */
3055be83f04dSJason Wang 		WRITE_ONCE(vq->broken, false);
3056be83f04dSJason Wang 	}
3057be83f04dSJason Wang 	spin_unlock(&dev->vqs_list_lock);
3058be83f04dSJason Wang }
3059be83f04dSJason Wang EXPORT_SYMBOL_GPL(__virtio_unbreak_device);
3060be83f04dSJason Wang 
virtqueue_get_desc_addr(const struct virtqueue * _vq)30614b6ec919SFeng Liu dma_addr_t virtqueue_get_desc_addr(const struct virtqueue *_vq)
306289062652SCornelia Huck {
30634b6ec919SFeng Liu 	const struct vring_virtqueue *vq = to_vvq(_vq);
306489062652SCornelia Huck 
30652a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
306689062652SCornelia Huck 
30671ce9e605STiwei Bie 	if (vq->packed_ring)
30681ce9e605STiwei Bie 		return vq->packed.ring_dma_addr;
30691ce9e605STiwei Bie 
3070d79dca75STiwei Bie 	return vq->split.queue_dma_addr;
30712a2d1382SAndy Lutomirski }
30722a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
30732a2d1382SAndy Lutomirski 
virtqueue_get_avail_addr(const struct virtqueue * _vq)30744b6ec919SFeng Liu dma_addr_t virtqueue_get_avail_addr(const struct virtqueue *_vq)
307589062652SCornelia Huck {
30764b6ec919SFeng Liu 	const struct vring_virtqueue *vq = to_vvq(_vq);
307789062652SCornelia Huck 
30782a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
30792a2d1382SAndy Lutomirski 
30801ce9e605STiwei Bie 	if (vq->packed_ring)
30811ce9e605STiwei Bie 		return vq->packed.driver_event_dma_addr;
30821ce9e605STiwei Bie 
3083d79dca75STiwei Bie 	return vq->split.queue_dma_addr +
3084e593bf97STiwei Bie 		((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
308589062652SCornelia Huck }
30862a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
30872a2d1382SAndy Lutomirski 
virtqueue_get_used_addr(const struct virtqueue * _vq)30884b6ec919SFeng Liu dma_addr_t virtqueue_get_used_addr(const struct virtqueue *_vq)
30892a2d1382SAndy Lutomirski {
30904b6ec919SFeng Liu 	const struct vring_virtqueue *vq = to_vvq(_vq);
30912a2d1382SAndy Lutomirski 
30922a2d1382SAndy Lutomirski 	BUG_ON(!vq->we_own_ring);
30932a2d1382SAndy Lutomirski 
30941ce9e605STiwei Bie 	if (vq->packed_ring)
30951ce9e605STiwei Bie 		return vq->packed.device_event_dma_addr;
30961ce9e605STiwei Bie 
3097d79dca75STiwei Bie 	return vq->split.queue_dma_addr +
3098e593bf97STiwei Bie 		((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
30992a2d1382SAndy Lutomirski }
31002a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
31012a2d1382SAndy Lutomirski 
31021ce9e605STiwei Bie /* Only available for split ring */
virtqueue_get_vring(const struct virtqueue * vq)31034b6ec919SFeng Liu const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
31042a2d1382SAndy Lutomirski {
3105e593bf97STiwei Bie 	return &to_vvq(vq)->split.vring;
31062a2d1382SAndy Lutomirski }
31072a2d1382SAndy Lutomirski EXPORT_SYMBOL_GPL(virtqueue_get_vring);
310889062652SCornelia Huck 
3109b6253b4eSXuan Zhuo /**
3110b6253b4eSXuan Zhuo  * virtqueue_dma_map_single_attrs - map DMA for _vq
3111b6253b4eSXuan Zhuo  * @_vq: the struct virtqueue we're talking about.
3112b6253b4eSXuan Zhuo  * @ptr: the pointer of the buffer to do dma
3113b6253b4eSXuan Zhuo  * @size: the size of the buffer to do dma
3114b6253b4eSXuan Zhuo  * @dir: DMA direction
3115b6253b4eSXuan Zhuo  * @attrs: DMA Attrs
3116b6253b4eSXuan Zhuo  *
3117b6253b4eSXuan Zhuo  * The caller calls this to do dma mapping in advance. The DMA address can be
3118b6253b4eSXuan Zhuo  * passed to this _vq when it is in pre-mapped mode.
3119b6253b4eSXuan Zhuo  *
3120b6253b4eSXuan Zhuo  * return DMA address. Caller should check that by virtqueue_dma_mapping_error().
3121b6253b4eSXuan Zhuo  */
virtqueue_dma_map_single_attrs(struct virtqueue * _vq,void * ptr,size_t size,enum dma_data_direction dir,unsigned long attrs)3122b6253b4eSXuan Zhuo dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
3123b6253b4eSXuan Zhuo 					  size_t size,
3124b6253b4eSXuan Zhuo 					  enum dma_data_direction dir,
3125b6253b4eSXuan Zhuo 					  unsigned long attrs)
3126b6253b4eSXuan Zhuo {
3127b6253b4eSXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
3128b6253b4eSXuan Zhuo 
3129*c5b30148SXuan Zhuo 	if (!vq->use_dma_api) {
3130*c5b30148SXuan Zhuo 		kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir);
3131b6253b4eSXuan Zhuo 		return (dma_addr_t)virt_to_phys(ptr);
3132*c5b30148SXuan Zhuo 	}
3133b6253b4eSXuan Zhuo 
3134b6253b4eSXuan Zhuo 	return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs);
3135b6253b4eSXuan Zhuo }
3136b6253b4eSXuan Zhuo EXPORT_SYMBOL_GPL(virtqueue_dma_map_single_attrs);
3137b6253b4eSXuan Zhuo 
3138b6253b4eSXuan Zhuo /**
3139b6253b4eSXuan Zhuo  * virtqueue_dma_unmap_single_attrs - unmap DMA for _vq
3140b6253b4eSXuan Zhuo  * @_vq: the struct virtqueue we're talking about.
3141b6253b4eSXuan Zhuo  * @addr: the dma address to unmap
3142b6253b4eSXuan Zhuo  * @size: the size of the buffer
3143b6253b4eSXuan Zhuo  * @dir: DMA direction
3144b6253b4eSXuan Zhuo  * @attrs: DMA Attrs
3145b6253b4eSXuan Zhuo  *
3146b6253b4eSXuan Zhuo  * Unmap the address that is mapped by the virtqueue_dma_map_* APIs.
3147b6253b4eSXuan Zhuo  *
3148b6253b4eSXuan Zhuo  */
virtqueue_dma_unmap_single_attrs(struct virtqueue * _vq,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)3149b6253b4eSXuan Zhuo void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
3150b6253b4eSXuan Zhuo 				      size_t size, enum dma_data_direction dir,
3151b6253b4eSXuan Zhuo 				      unsigned long attrs)
3152b6253b4eSXuan Zhuo {
3153b6253b4eSXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
3154b6253b4eSXuan Zhuo 
3155b6253b4eSXuan Zhuo 	if (!vq->use_dma_api)
3156b6253b4eSXuan Zhuo 		return;
3157b6253b4eSXuan Zhuo 
3158b6253b4eSXuan Zhuo 	dma_unmap_single_attrs(vring_dma_dev(vq), addr, size, dir, attrs);
3159b6253b4eSXuan Zhuo }
3160b6253b4eSXuan Zhuo EXPORT_SYMBOL_GPL(virtqueue_dma_unmap_single_attrs);
3161b6253b4eSXuan Zhuo 
3162b6253b4eSXuan Zhuo /**
3163b6253b4eSXuan Zhuo  * virtqueue_dma_mapping_error - check dma address
3164b6253b4eSXuan Zhuo  * @_vq: the struct virtqueue we're talking about.
3165b6253b4eSXuan Zhuo  * @addr: DMA address
3166b6253b4eSXuan Zhuo  *
3167b6253b4eSXuan Zhuo  * Returns 0 means dma valid. Other means invalid dma address.
3168b6253b4eSXuan Zhuo  */
virtqueue_dma_mapping_error(struct virtqueue * _vq,dma_addr_t addr)3169b6253b4eSXuan Zhuo int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
3170b6253b4eSXuan Zhuo {
3171b6253b4eSXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
3172b6253b4eSXuan Zhuo 
3173b6253b4eSXuan Zhuo 	if (!vq->use_dma_api)
3174b6253b4eSXuan Zhuo 		return 0;
3175b6253b4eSXuan Zhuo 
3176b6253b4eSXuan Zhuo 	return dma_mapping_error(vring_dma_dev(vq), addr);
3177b6253b4eSXuan Zhuo }
3178b6253b4eSXuan Zhuo EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error);
3179b6253b4eSXuan Zhuo 
31808bd2f710SXuan Zhuo /**
31818bd2f710SXuan Zhuo  * virtqueue_dma_need_sync - check a dma address needs sync
31828bd2f710SXuan Zhuo  * @_vq: the struct virtqueue we're talking about.
31838bd2f710SXuan Zhuo  * @addr: DMA address
31848bd2f710SXuan Zhuo  *
31858bd2f710SXuan Zhuo  * Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be
31868bd2f710SXuan Zhuo  * synchronized
31878bd2f710SXuan Zhuo  *
31888bd2f710SXuan Zhuo  * return bool
31898bd2f710SXuan Zhuo  */
virtqueue_dma_need_sync(struct virtqueue * _vq,dma_addr_t addr)31908bd2f710SXuan Zhuo bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr)
31918bd2f710SXuan Zhuo {
31928bd2f710SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
31938bd2f710SXuan Zhuo 
31948bd2f710SXuan Zhuo 	if (!vq->use_dma_api)
31958bd2f710SXuan Zhuo 		return false;
31968bd2f710SXuan Zhuo 
31978bd2f710SXuan Zhuo 	return dma_need_sync(vring_dma_dev(vq), addr);
31988bd2f710SXuan Zhuo }
31998bd2f710SXuan Zhuo EXPORT_SYMBOL_GPL(virtqueue_dma_need_sync);
32008bd2f710SXuan Zhuo 
32018bd2f710SXuan Zhuo /**
32028bd2f710SXuan Zhuo  * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
32038bd2f710SXuan Zhuo  * @_vq: the struct virtqueue we're talking about.
32048bd2f710SXuan Zhuo  * @addr: DMA address
32058bd2f710SXuan Zhuo  * @offset: DMA address offset
32068bd2f710SXuan Zhuo  * @size: buf size for sync
32078bd2f710SXuan Zhuo  * @dir: DMA direction
32088bd2f710SXuan Zhuo  *
32098bd2f710SXuan Zhuo  * Before calling this function, use virtqueue_dma_need_sync() to confirm that
32108bd2f710SXuan Zhuo  * the DMA address really needs to be synchronized
32118bd2f710SXuan Zhuo  *
32128bd2f710SXuan Zhuo  */
virtqueue_dma_sync_single_range_for_cpu(struct virtqueue * _vq,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)32138bd2f710SXuan Zhuo void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq,
32148bd2f710SXuan Zhuo 					     dma_addr_t addr,
32158bd2f710SXuan Zhuo 					     unsigned long offset, size_t size,
32168bd2f710SXuan Zhuo 					     enum dma_data_direction dir)
32178bd2f710SXuan Zhuo {
32188bd2f710SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
32198bd2f710SXuan Zhuo 	struct device *dev = vring_dma_dev(vq);
32208bd2f710SXuan Zhuo 
32218bd2f710SXuan Zhuo 	if (!vq->use_dma_api)
32228bd2f710SXuan Zhuo 		return;
32238bd2f710SXuan Zhuo 
322428d6cde1SXuan Zhuo 	dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
32258bd2f710SXuan Zhuo }
32268bd2f710SXuan Zhuo EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu);
32278bd2f710SXuan Zhuo 
32288bd2f710SXuan Zhuo /**
32298bd2f710SXuan Zhuo  * virtqueue_dma_sync_single_range_for_device - dma sync for device
32308bd2f710SXuan Zhuo  * @_vq: the struct virtqueue we're talking about.
32318bd2f710SXuan Zhuo  * @addr: DMA address
32328bd2f710SXuan Zhuo  * @offset: DMA address offset
32338bd2f710SXuan Zhuo  * @size: buf size for sync
32348bd2f710SXuan Zhuo  * @dir: DMA direction
32358bd2f710SXuan Zhuo  *
32368bd2f710SXuan Zhuo  * Before calling this function, use virtqueue_dma_need_sync() to confirm that
32378bd2f710SXuan Zhuo  * the DMA address really needs to be synchronized
32388bd2f710SXuan Zhuo  */
virtqueue_dma_sync_single_range_for_device(struct virtqueue * _vq,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)32398bd2f710SXuan Zhuo void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
32408bd2f710SXuan Zhuo 						dma_addr_t addr,
32418bd2f710SXuan Zhuo 						unsigned long offset, size_t size,
32428bd2f710SXuan Zhuo 						enum dma_data_direction dir)
32438bd2f710SXuan Zhuo {
32448bd2f710SXuan Zhuo 	struct vring_virtqueue *vq = to_vvq(_vq);
32458bd2f710SXuan Zhuo 	struct device *dev = vring_dma_dev(vq);
32468bd2f710SXuan Zhuo 
32478bd2f710SXuan Zhuo 	if (!vq->use_dma_api)
32488bd2f710SXuan Zhuo 		return;
32498bd2f710SXuan Zhuo 
325028d6cde1SXuan Zhuo 	dma_sync_single_range_for_device(dev, addr, offset, size, dir);
32518bd2f710SXuan Zhuo }
32528bd2f710SXuan Zhuo EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device);
32538bd2f710SXuan Zhuo 
3254c6fd4701SRusty Russell MODULE_LICENSE("GPL");
3255